1 /*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * 62 * Support from LSI-Logic has also gone a great deal toward making this a 63 * workable subsystem and is gratefully acknowledged. 64 */ 65 /*- 66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 67 * Copyright (c) 2005, WHEEL Sp. z o.o. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * All rights reserved. 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions are 73 * met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 77 * substantially similar to the "NO WARRANTY" disclaimer below 78 * ("Disclaimer") and any redistribution must be conditioned upon including 79 * a substantially similar Disclaimer requirement for further binary 80 * redistribution. 81 * 3. Neither the names of the above listed copyright holders nor the names 82 * of any contributors may be used to endorse or promote products derived 83 * from this software without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 96 */ 97 98 #include <sys/cdefs.h> 99 __FBSDID("$FreeBSD$"); 100 101 #include <dev/mpt/mpt.h> 102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 104 105 #include <dev/mpt/mpilib/mpi.h> 106 #include <dev/mpt/mpilib/mpi_ioc.h> 107 #include <dev/mpt/mpilib/mpi_fc.h> 108 #include <dev/mpt/mpilib/mpi_targ.h> 109 110 #include <sys/sysctl.h> 111 112 #define MPT_MAX_TRYS 3 113 #define MPT_MAX_WAIT 300000 114 115 static int maxwait_ack = 0; 116 static int maxwait_int = 0; 117 static int maxwait_state = 0; 118 119 static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 121 122 static mpt_reply_handler_t mpt_default_reply_handler; 123 static mpt_reply_handler_t mpt_config_reply_handler; 124 static mpt_reply_handler_t mpt_handshake_reply_handler; 125 static mpt_reply_handler_t mpt_event_reply_handler; 126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 129 static int mpt_soft_reset(struct mpt_softc *mpt); 130 static void mpt_hard_reset(struct mpt_softc *mpt); 131 static int mpt_configure_ioc(struct mpt_softc *mpt); 132 static int mpt_enable_ioc(struct mpt_softc *mpt, int); 133 134 /************************* Personality Module Support *************************/ 135 /* 136 * We include one extra entry that is guaranteed to be NULL 137 * to simplify our itterator. 138 */ 139 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 140 static __inline struct mpt_personality* 141 mpt_pers_find(struct mpt_softc *, u_int); 142 static __inline struct mpt_personality* 143 mpt_pers_find_reverse(struct mpt_softc *, u_int); 144 145 static __inline struct mpt_personality * 146 mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 147 { 148 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 149 ("mpt_pers_find: starting position out of range\n")); 150 151 while (start_at < MPT_MAX_PERSONALITIES 152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 153 start_at++; 154 } 155 return (mpt_personalities[start_at]); 156 } 157 158 /* 159 * Used infrequently, so no need to optimize like a forward 160 * traversal where we use the MAX+1 is guaranteed to be NULL 161 * trick. 162 */ 163 static __inline struct mpt_personality * 164 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 165 { 166 while (start_at < MPT_MAX_PERSONALITIES 167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 168 start_at--; 169 } 170 if (start_at < MPT_MAX_PERSONALITIES) 171 return (mpt_personalities[start_at]); 172 return (NULL); 173 } 174 175 #define MPT_PERS_FOREACH(mpt, pers) \ 176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 177 pers != NULL; \ 178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 179 180 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 182 pers != NULL; \ 183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 184 185 static mpt_load_handler_t mpt_stdload; 186 static mpt_probe_handler_t mpt_stdprobe; 187 static mpt_attach_handler_t mpt_stdattach; 188 static mpt_enable_handler_t mpt_stdenable; 189 static mpt_ready_handler_t mpt_stdready; 190 static mpt_event_handler_t mpt_stdevent; 191 static mpt_reset_handler_t mpt_stdreset; 192 static mpt_shutdown_handler_t mpt_stdshutdown; 193 static mpt_detach_handler_t mpt_stddetach; 194 static mpt_unload_handler_t mpt_stdunload; 195 static struct mpt_personality mpt_default_personality = 196 { 197 .load = mpt_stdload, 198 .probe = mpt_stdprobe, 199 .attach = mpt_stdattach, 200 .enable = mpt_stdenable, 201 .ready = mpt_stdready, 202 .event = mpt_stdevent, 203 .reset = mpt_stdreset, 204 .shutdown = mpt_stdshutdown, 205 .detach = mpt_stddetach, 206 .unload = mpt_stdunload 207 }; 208 209 static mpt_load_handler_t mpt_core_load; 210 static mpt_attach_handler_t mpt_core_attach; 211 static mpt_enable_handler_t mpt_core_enable; 212 static mpt_reset_handler_t mpt_core_ioc_reset; 213 static mpt_event_handler_t mpt_core_event; 214 static mpt_shutdown_handler_t mpt_core_shutdown; 215 static mpt_shutdown_handler_t mpt_core_detach; 216 static mpt_unload_handler_t mpt_core_unload; 217 static struct mpt_personality mpt_core_personality = 218 { 219 .name = "mpt_core", 220 .load = mpt_core_load, 221 .attach = mpt_core_attach, 222 .enable = mpt_core_enable, 223 .event = mpt_core_event, 224 .reset = mpt_core_ioc_reset, 225 .shutdown = mpt_core_shutdown, 226 .detach = mpt_core_detach, 227 .unload = mpt_core_unload, 228 }; 229 230 /* 231 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 232 * ordering information. We want the core to always register FIRST. 233 * other modules are set to SI_ORDER_SECOND. 234 */ 235 static moduledata_t mpt_core_mod = { 236 "mpt_core", mpt_modevent, &mpt_core_personality 237 }; 238 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 239 MODULE_VERSION(mpt_core, 1); 240 241 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 242 243 int 244 mpt_modevent(module_t mod, int type, void *data) 245 { 246 struct mpt_personality *pers; 247 int error; 248 249 pers = (struct mpt_personality *)data; 250 251 error = 0; 252 switch (type) { 253 case MOD_LOAD: 254 { 255 mpt_load_handler_t **def_handler; 256 mpt_load_handler_t **pers_handler; 257 int i; 258 259 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 260 if (mpt_personalities[i] == NULL) 261 break; 262 } 263 if (i >= MPT_MAX_PERSONALITIES) { 264 error = ENOMEM; 265 break; 266 } 267 pers->id = i; 268 mpt_personalities[i] = pers; 269 270 /* Install standard/noop handlers for any NULL entries. */ 271 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 272 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 273 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 274 if (*pers_handler == NULL) 275 *pers_handler = *def_handler; 276 pers_handler++; 277 def_handler++; 278 } 279 280 error = (pers->load(pers)); 281 if (error != 0) 282 mpt_personalities[i] = NULL; 283 break; 284 } 285 case MOD_SHUTDOWN: 286 break; 287 #if __FreeBSD_version >= 500000 288 case MOD_QUIESCE: 289 break; 290 #endif 291 case MOD_UNLOAD: 292 error = pers->unload(pers); 293 mpt_personalities[pers->id] = NULL; 294 break; 295 default: 296 error = EINVAL; 297 break; 298 } 299 return (error); 300 } 301 302 int 303 mpt_stdload(struct mpt_personality *pers) 304 { 305 /* Load is always successfull. */ 306 return (0); 307 } 308 309 int 310 mpt_stdprobe(struct mpt_softc *mpt) 311 { 312 /* Probe is always successfull. */ 313 return (0); 314 } 315 316 int 317 mpt_stdattach(struct mpt_softc *mpt) 318 { 319 /* Attach is always successfull. */ 320 return (0); 321 } 322 323 int 324 mpt_stdenable(struct mpt_softc *mpt) 325 { 326 /* Enable is always successfull. */ 327 return (0); 328 } 329 330 void 331 mpt_stdready(struct mpt_softc *mpt) 332 { 333 } 334 335 336 int 337 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 338 { 339 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 340 /* Event was not for us. */ 341 return (0); 342 } 343 344 void 345 mpt_stdreset(struct mpt_softc *mpt, int type) 346 { 347 } 348 349 void 350 mpt_stdshutdown(struct mpt_softc *mpt) 351 { 352 } 353 354 void 355 mpt_stddetach(struct mpt_softc *mpt) 356 { 357 } 358 359 int 360 mpt_stdunload(struct mpt_personality *pers) 361 { 362 /* Unload is always successfull. */ 363 return (0); 364 } 365 366 /* 367 * Post driver attachment, we may want to perform some global actions. 368 * Here is the hook to do so. 369 */ 370 371 static void 372 mpt_postattach(void *unused) 373 { 374 struct mpt_softc *mpt; 375 struct mpt_personality *pers; 376 377 TAILQ_FOREACH(mpt, &mpt_tailq, links) { 378 MPT_PERS_FOREACH(mpt, pers) 379 pers->ready(mpt); 380 } 381 } 382 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL); 383 384 385 /******************************* Bus DMA Support ******************************/ 386 void 387 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 388 { 389 struct mpt_map_info *map_info; 390 391 map_info = (struct mpt_map_info *)arg; 392 map_info->error = error; 393 map_info->phys = segs->ds_addr; 394 } 395 396 /**************************** Reply/Event Handling ****************************/ 397 int 398 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 399 mpt_handler_t handler, uint32_t *phandler_id) 400 { 401 402 switch (type) { 403 case MPT_HANDLER_REPLY: 404 { 405 u_int cbi; 406 u_int free_cbi; 407 408 if (phandler_id == NULL) 409 return (EINVAL); 410 411 free_cbi = MPT_HANDLER_ID_NONE; 412 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 413 /* 414 * If the same handler is registered multiple 415 * times, don't error out. Just return the 416 * index of the original registration. 417 */ 418 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 419 *phandler_id = MPT_CBI_TO_HID(cbi); 420 return (0); 421 } 422 423 /* 424 * Fill from the front in the hope that 425 * all registered handlers consume only a 426 * single cache line. 427 * 428 * We don't break on the first empty slot so 429 * that the full table is checked to see if 430 * this handler was previously registered. 431 */ 432 if (free_cbi == MPT_HANDLER_ID_NONE && 433 (mpt_reply_handlers[cbi] 434 == mpt_default_reply_handler)) 435 free_cbi = cbi; 436 } 437 if (free_cbi == MPT_HANDLER_ID_NONE) { 438 return (ENOMEM); 439 } 440 mpt_reply_handlers[free_cbi] = handler.reply_handler; 441 *phandler_id = MPT_CBI_TO_HID(free_cbi); 442 break; 443 } 444 default: 445 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 446 return (EINVAL); 447 } 448 return (0); 449 } 450 451 int 452 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 453 mpt_handler_t handler, uint32_t handler_id) 454 { 455 456 switch (type) { 457 case MPT_HANDLER_REPLY: 458 { 459 u_int cbi; 460 461 cbi = MPT_CBI(handler_id); 462 if (cbi >= MPT_NUM_REPLY_HANDLERS 463 || mpt_reply_handlers[cbi] != handler.reply_handler) 464 return (ENOENT); 465 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 466 break; 467 } 468 default: 469 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 470 return (EINVAL); 471 } 472 return (0); 473 } 474 475 static int 476 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 477 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 478 { 479 mpt_prt(mpt, 480 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 481 req, req->serno, reply_desc, reply_frame); 482 483 if (reply_frame != NULL) 484 mpt_dump_reply_frame(mpt, reply_frame); 485 486 mpt_prt(mpt, "Reply Frame Ignored\n"); 487 488 return (/*free_reply*/TRUE); 489 } 490 491 static int 492 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 493 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 494 { 495 if (req != NULL) { 496 497 if (reply_frame != NULL) { 498 MSG_CONFIG *cfgp; 499 MSG_CONFIG_REPLY *reply; 500 501 cfgp = (MSG_CONFIG *)req->req_vbuf; 502 reply = (MSG_CONFIG_REPLY *)reply_frame; 503 req->IOCStatus = le16toh(reply_frame->IOCStatus); 504 bcopy(&reply->Header, &cfgp->Header, 505 sizeof(cfgp->Header)); 506 } 507 req->state &= ~REQ_STATE_QUEUED; 508 req->state |= REQ_STATE_DONE; 509 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 510 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 511 wakeup(req); 512 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 513 /* 514 * Whew- we can free this request (late completion) 515 */ 516 mpt_free_request(mpt, req); 517 } 518 } 519 520 return (TRUE); 521 } 522 523 static int 524 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 525 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 526 { 527 /* Nothing to be done. */ 528 return (TRUE); 529 } 530 531 static int 532 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 533 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 534 { 535 int free_reply; 536 537 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 538 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 539 540 free_reply = TRUE; 541 switch (reply_frame->Function) { 542 case MPI_FUNCTION_EVENT_NOTIFICATION: 543 { 544 MSG_EVENT_NOTIFY_REPLY *msg; 545 struct mpt_personality *pers; 546 u_int handled; 547 548 handled = 0; 549 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 550 MPT_PERS_FOREACH(mpt, pers) 551 handled += pers->event(mpt, req, msg); 552 553 if (handled == 0 && mpt->mpt_pers_mask == 0) { 554 mpt_lprt(mpt, MPT_PRT_INFO, 555 "No Handlers For Any Event Notify Frames. " 556 "Event %#x (ACK %sequired).\n", 557 msg->Event, msg->AckRequired? "r" : "not r"); 558 } else if (handled == 0) { 559 mpt_lprt(mpt, MPT_PRT_WARN, 560 "Unhandled Event Notify Frame. Event %#x " 561 "(ACK %sequired).\n", 562 msg->Event, msg->AckRequired? "r" : "not r"); 563 } 564 565 if (msg->AckRequired) { 566 request_t *ack_req; 567 uint32_t context; 568 569 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 570 ack_req = mpt_get_request(mpt, FALSE); 571 if (ack_req == NULL) { 572 struct mpt_evtf_record *evtf; 573 574 evtf = (struct mpt_evtf_record *)reply_frame; 575 evtf->context = context; 576 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 577 free_reply = FALSE; 578 break; 579 } 580 mpt_send_event_ack(mpt, ack_req, msg, context); 581 /* 582 * Don't check for CONTINUATION_REPLY here 583 */ 584 return (free_reply); 585 } 586 break; 587 } 588 case MPI_FUNCTION_PORT_ENABLE: 589 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 590 break; 591 case MPI_FUNCTION_EVENT_ACK: 592 break; 593 default: 594 mpt_prt(mpt, "unknown event function: %x\n", 595 reply_frame->Function); 596 break; 597 } 598 599 /* 600 * I'm not sure that this continuation stuff works as it should. 601 * 602 * I've had FC async events occur that free the frame up because 603 * the continuation bit isn't set, and then additional async events 604 * then occur using the same context. As you might imagine, this 605 * leads to Very Bad Thing. 606 * 607 * Let's just be safe for now and not free them up until we figure 608 * out what's actually happening here. 609 */ 610 #if 0 611 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 612 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 613 mpt_free_request(mpt, req); 614 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 615 reply_frame->Function, req, req->serno); 616 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 617 MSG_EVENT_NOTIFY_REPLY *msg = 618 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 619 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 620 msg->Event, msg->AckRequired); 621 } 622 } else { 623 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 624 reply_frame->Function, req, req->serno); 625 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 626 MSG_EVENT_NOTIFY_REPLY *msg = 627 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 628 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 629 msg->Event, msg->AckRequired); 630 } 631 mpt_prtc(mpt, "\n"); 632 } 633 #endif 634 return (free_reply); 635 } 636 637 /* 638 * Process an asynchronous event from the IOC. 639 */ 640 static int 641 mpt_core_event(struct mpt_softc *mpt, request_t *req, 642 MSG_EVENT_NOTIFY_REPLY *msg) 643 { 644 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 645 msg->Event & 0xFF); 646 switch(msg->Event & 0xFF) { 647 case MPI_EVENT_NONE: 648 break; 649 case MPI_EVENT_LOG_DATA: 650 { 651 int i; 652 653 /* Some error occured that LSI wants logged */ 654 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 655 msg->IOCLogInfo); 656 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 657 for (i = 0; i < msg->EventDataLength; i++) 658 mpt_prtc(mpt, " %08x", msg->Data[i]); 659 mpt_prtc(mpt, "\n"); 660 break; 661 } 662 case MPI_EVENT_EVENT_CHANGE: 663 /* 664 * This is just an acknowledgement 665 * of our mpt_send_event_request. 666 */ 667 break; 668 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 669 break; 670 default: 671 return (0); 672 break; 673 } 674 return (1); 675 } 676 677 static void 678 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 679 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 680 { 681 MSG_EVENT_ACK *ackp; 682 683 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 684 memset(ackp, 0, sizeof (*ackp)); 685 ackp->Function = MPI_FUNCTION_EVENT_ACK; 686 ackp->Event = msg->Event; 687 ackp->EventContext = msg->EventContext; 688 ackp->MsgContext = context; 689 mpt_check_doorbell(mpt); 690 mpt_send_cmd(mpt, ack_req); 691 } 692 693 /***************************** Interrupt Handling *****************************/ 694 void 695 mpt_intr(void *arg) 696 { 697 struct mpt_softc *mpt; 698 uint32_t reply_desc; 699 int ntrips = 0; 700 701 mpt = (struct mpt_softc *)arg; 702 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 703 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 704 request_t *req; 705 MSG_DEFAULT_REPLY *reply_frame; 706 uint32_t reply_baddr; 707 uint32_t ctxt_idx; 708 u_int cb_index; 709 u_int req_index; 710 int free_rf; 711 712 req = NULL; 713 reply_frame = NULL; 714 reply_baddr = 0; 715 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 716 u_int offset; 717 /* 718 * Insure that the reply frame is coherent. 719 */ 720 reply_baddr = MPT_REPLY_BADDR(reply_desc); 721 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 722 bus_dmamap_sync_range(mpt->reply_dmat, 723 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 724 BUS_DMASYNC_POSTREAD); 725 reply_frame = MPT_REPLY_OTOV(mpt, offset); 726 ctxt_idx = le32toh(reply_frame->MsgContext); 727 } else { 728 uint32_t type; 729 730 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 731 ctxt_idx = reply_desc; 732 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 733 reply_desc); 734 735 switch (type) { 736 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 737 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 738 break; 739 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 740 ctxt_idx = GET_IO_INDEX(reply_desc); 741 if (mpt->tgt_cmd_ptrs == NULL) { 742 mpt_prt(mpt, 743 "mpt_intr: no target cmd ptrs\n"); 744 reply_desc = MPT_REPLY_EMPTY; 745 break; 746 } 747 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 748 mpt_prt(mpt, 749 "mpt_intr: bad tgt cmd ctxt %u\n", 750 ctxt_idx); 751 reply_desc = MPT_REPLY_EMPTY; 752 ntrips = 1000; 753 break; 754 } 755 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 756 if (req == NULL) { 757 mpt_prt(mpt, "no request backpointer " 758 "at index %u", ctxt_idx); 759 reply_desc = MPT_REPLY_EMPTY; 760 ntrips = 1000; 761 break; 762 } 763 /* 764 * Reformulate ctxt_idx to be just as if 765 * it were another type of context reply 766 * so the code below will find the request 767 * via indexing into the pool. 768 */ 769 ctxt_idx = 770 req->index | mpt->scsi_tgt_handler_id; 771 req = NULL; 772 break; 773 case MPI_CONTEXT_REPLY_TYPE_LAN: 774 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 775 reply_desc); 776 reply_desc = MPT_REPLY_EMPTY; 777 break; 778 default: 779 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 780 reply_desc = MPT_REPLY_EMPTY; 781 break; 782 } 783 if (reply_desc == MPT_REPLY_EMPTY) { 784 if (ntrips++ > 1000) { 785 break; 786 } 787 continue; 788 } 789 } 790 791 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 792 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 793 if (req_index < MPT_MAX_REQUESTS(mpt)) { 794 req = &mpt->request_pool[req_index]; 795 } else { 796 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 797 " 0x%x)\n", req_index, reply_desc); 798 } 799 800 free_rf = mpt_reply_handlers[cb_index](mpt, req, 801 reply_desc, reply_frame); 802 803 if (reply_frame != NULL && free_rf) { 804 mpt_free_reply(mpt, reply_baddr); 805 } 806 807 /* 808 * If we got ourselves disabled, don't get stuck in a loop 809 */ 810 if (mpt->disabled) { 811 mpt_disable_ints(mpt); 812 break; 813 } 814 if (ntrips++ > 1000) { 815 break; 816 } 817 } 818 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 819 } 820 821 /******************************* Error Recovery *******************************/ 822 void 823 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 824 u_int iocstatus) 825 { 826 MSG_DEFAULT_REPLY ioc_status_frame; 827 request_t *req; 828 829 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 830 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 831 ioc_status_frame.IOCStatus = iocstatus; 832 while((req = TAILQ_FIRST(chain)) != NULL) { 833 MSG_REQUEST_HEADER *msg_hdr; 834 u_int cb_index; 835 836 TAILQ_REMOVE(chain, req, links); 837 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 838 ioc_status_frame.Function = msg_hdr->Function; 839 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 840 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 841 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 842 &ioc_status_frame); 843 } 844 } 845 846 /********************************* Diagnostics ********************************/ 847 /* 848 * Perform a diagnostic dump of a reply frame. 849 */ 850 void 851 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 852 { 853 mpt_prt(mpt, "Address Reply:\n"); 854 mpt_print_reply(reply_frame); 855 } 856 857 /******************************* Doorbell Access ******************************/ 858 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 859 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 860 861 static __inline uint32_t 862 mpt_rd_db(struct mpt_softc *mpt) 863 { 864 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 865 } 866 867 static __inline uint32_t 868 mpt_rd_intr(struct mpt_softc *mpt) 869 { 870 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 871 } 872 873 /* Busy wait for a door bell to be read by IOC */ 874 static int 875 mpt_wait_db_ack(struct mpt_softc *mpt) 876 { 877 int i; 878 for (i=0; i < MPT_MAX_WAIT; i++) { 879 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 880 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 881 return (MPT_OK); 882 } 883 DELAY(200); 884 } 885 return (MPT_FAIL); 886 } 887 888 /* Busy wait for a door bell interrupt */ 889 static int 890 mpt_wait_db_int(struct mpt_softc *mpt) 891 { 892 int i; 893 for (i=0; i < MPT_MAX_WAIT; i++) { 894 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 895 maxwait_int = i > maxwait_int ? i : maxwait_int; 896 return MPT_OK; 897 } 898 DELAY(100); 899 } 900 return (MPT_FAIL); 901 } 902 903 /* Wait for IOC to transition to a give state */ 904 void 905 mpt_check_doorbell(struct mpt_softc *mpt) 906 { 907 uint32_t db = mpt_rd_db(mpt); 908 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 909 mpt_prt(mpt, "Device not running\n"); 910 mpt_print_db(db); 911 } 912 } 913 914 /* Wait for IOC to transition to a give state */ 915 static int 916 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 917 { 918 int i; 919 920 for (i = 0; i < MPT_MAX_WAIT; i++) { 921 uint32_t db = mpt_rd_db(mpt); 922 if (MPT_STATE(db) == state) { 923 maxwait_state = i > maxwait_state ? i : maxwait_state; 924 return (MPT_OK); 925 } 926 DELAY(100); 927 } 928 return (MPT_FAIL); 929 } 930 931 932 /************************* Intialization/Configuration ************************/ 933 static int mpt_download_fw(struct mpt_softc *mpt); 934 935 /* Issue the reset COMMAND to the IOC */ 936 static int 937 mpt_soft_reset(struct mpt_softc *mpt) 938 { 939 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 940 941 /* Have to use hard reset if we are not in Running state */ 942 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 943 mpt_prt(mpt, "soft reset failed: device not running\n"); 944 return (MPT_FAIL); 945 } 946 947 /* If door bell is in use we don't have a chance of getting 948 * a word in since the IOC probably crashed in message 949 * processing. So don't waste our time. 950 */ 951 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 952 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 953 return (MPT_FAIL); 954 } 955 956 /* Send the reset request to the IOC */ 957 mpt_write(mpt, MPT_OFFSET_DOORBELL, 958 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 959 if (mpt_wait_db_ack(mpt) != MPT_OK) { 960 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 961 return (MPT_FAIL); 962 } 963 964 /* Wait for the IOC to reload and come out of reset state */ 965 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 966 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 967 return (MPT_FAIL); 968 } 969 970 return MPT_OK; 971 } 972 973 static int 974 mpt_enable_diag_mode(struct mpt_softc *mpt) 975 { 976 int try; 977 978 try = 20; 979 while (--try) { 980 981 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 982 break; 983 984 /* Enable diagnostic registers */ 985 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 986 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 987 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 988 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 989 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 990 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 991 992 DELAY(100000); 993 } 994 if (try == 0) 995 return (EIO); 996 return (0); 997 } 998 999 static void 1000 mpt_disable_diag_mode(struct mpt_softc *mpt) 1001 { 1002 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 1003 } 1004 1005 /* This is a magic diagnostic reset that resets all the ARM 1006 * processors in the chip. 1007 */ 1008 static void 1009 mpt_hard_reset(struct mpt_softc *mpt) 1010 { 1011 int error; 1012 int wait; 1013 uint32_t diagreg; 1014 1015 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 1016 1017 error = mpt_enable_diag_mode(mpt); 1018 if (error) { 1019 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 1020 mpt_prt(mpt, "Trying to reset anyway.\n"); 1021 } 1022 1023 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1024 1025 /* 1026 * This appears to be a workaround required for some 1027 * firmware or hardware revs. 1028 */ 1029 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 1030 DELAY(1000); 1031 1032 /* Diag. port is now active so we can now hit the reset bit */ 1033 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1034 1035 /* 1036 * Ensure that the reset has finished. We delay 1ms 1037 * prior to reading the register to make sure the chip 1038 * has sufficiently completed its reset to handle register 1039 * accesses. 1040 */ 1041 wait = 5000; 1042 do { 1043 DELAY(1000); 1044 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1045 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1046 1047 if (wait == 0) { 1048 mpt_prt(mpt, "WARNING - Failed hard reset! " 1049 "Trying to initialize anyway.\n"); 1050 } 1051 1052 /* 1053 * If we have firmware to download, it must be loaded before 1054 * the controller will become operational. Do so now. 1055 */ 1056 if (mpt->fw_image != NULL) { 1057 1058 error = mpt_download_fw(mpt); 1059 1060 if (error) { 1061 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1062 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1063 } 1064 } 1065 1066 /* 1067 * Reseting the controller should have disabled write 1068 * access to the diagnostic registers, but disable 1069 * manually to be sure. 1070 */ 1071 mpt_disable_diag_mode(mpt); 1072 } 1073 1074 static void 1075 mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1076 { 1077 /* 1078 * Complete all pending requests with a status 1079 * appropriate for an IOC reset. 1080 */ 1081 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1082 MPI_IOCSTATUS_INVALID_STATE); 1083 } 1084 1085 1086 /* 1087 * Reset the IOC when needed. Try software command first then if needed 1088 * poke at the magic diagnostic reset. Note that a hard reset resets 1089 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1090 * fouls up the PCI configuration registers. 1091 */ 1092 int 1093 mpt_reset(struct mpt_softc *mpt, int reinit) 1094 { 1095 struct mpt_personality *pers; 1096 int ret; 1097 int retry_cnt = 0; 1098 1099 /* 1100 * Try a soft reset. If that fails, get out the big hammer. 1101 */ 1102 again: 1103 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1104 int cnt; 1105 for (cnt = 0; cnt < 5; cnt++) { 1106 /* Failed; do a hard reset */ 1107 mpt_hard_reset(mpt); 1108 1109 /* 1110 * Wait for the IOC to reload 1111 * and come out of reset state 1112 */ 1113 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1114 if (ret == MPT_OK) { 1115 break; 1116 } 1117 /* 1118 * Okay- try to check again... 1119 */ 1120 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1121 if (ret == MPT_OK) { 1122 break; 1123 } 1124 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1125 retry_cnt, cnt); 1126 } 1127 } 1128 1129 if (retry_cnt == 0) { 1130 /* 1131 * Invoke reset handlers. We bump the reset count so 1132 * that mpt_wait_req() understands that regardless of 1133 * the specified wait condition, it should stop its wait. 1134 */ 1135 mpt->reset_cnt++; 1136 MPT_PERS_FOREACH(mpt, pers) 1137 pers->reset(mpt, ret); 1138 } 1139 1140 if (reinit) { 1141 ret = mpt_enable_ioc(mpt, 1); 1142 if (ret == MPT_OK) { 1143 mpt_enable_ints(mpt); 1144 } 1145 } 1146 if (ret != MPT_OK && retry_cnt++ < 2) { 1147 goto again; 1148 } 1149 return ret; 1150 } 1151 1152 /* Return a command buffer to the free queue */ 1153 void 1154 mpt_free_request(struct mpt_softc *mpt, request_t *req) 1155 { 1156 request_t *nxt; 1157 struct mpt_evtf_record *record; 1158 uint32_t reply_baddr; 1159 1160 if (req == NULL || req != &mpt->request_pool[req->index]) { 1161 panic("mpt_free_request bad req ptr\n"); 1162 return; 1163 } 1164 if ((nxt = req->chain) != NULL) { 1165 req->chain = NULL; 1166 mpt_free_request(mpt, nxt); /* NB: recursion */ 1167 } 1168 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1169 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1170 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); 1171 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1172 ("mpt_free_request: req %p:%u func %x already on freelist", 1173 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1174 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1175 ("mpt_free_request: req %p:%u func %x on pending list", 1176 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1177 #ifdef INVARIANTS 1178 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1179 #endif 1180 1181 req->ccb = NULL; 1182 if (LIST_EMPTY(&mpt->ack_frames)) { 1183 /* 1184 * Insert free ones at the tail 1185 */ 1186 req->serno = 0; 1187 req->state = REQ_STATE_FREE; 1188 #ifdef INVARIANTS 1189 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1190 #endif 1191 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1192 if (mpt->getreqwaiter != 0) { 1193 mpt->getreqwaiter = 0; 1194 wakeup(&mpt->request_free_list); 1195 } 1196 return; 1197 } 1198 1199 /* 1200 * Process an ack frame deferred due to resource shortage. 1201 */ 1202 record = LIST_FIRST(&mpt->ack_frames); 1203 LIST_REMOVE(record, links); 1204 req->state = REQ_STATE_ALLOCATED; 1205 mpt_assign_serno(mpt, req); 1206 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1207 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1208 + (mpt->reply_phys & 0xFFFFFFFF); 1209 mpt_free_reply(mpt, reply_baddr); 1210 } 1211 1212 /* Get a command buffer from the free queue */ 1213 request_t * 1214 mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1215 { 1216 request_t *req; 1217 1218 retry: 1219 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); 1220 req = TAILQ_FIRST(&mpt->request_free_list); 1221 if (req != NULL) { 1222 KASSERT(req == &mpt->request_pool[req->index], 1223 ("mpt_get_request: corrupted request free list\n")); 1224 KASSERT(req->state == REQ_STATE_FREE, 1225 ("req %p:%u not free on free list %x index %d function %x", 1226 req, req->serno, req->state, req->index, 1227 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1228 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1229 req->state = REQ_STATE_ALLOCATED; 1230 req->chain = NULL; 1231 mpt_assign_serno(mpt, req); 1232 } else if (sleep_ok != 0) { 1233 mpt->getreqwaiter = 1; 1234 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1235 goto retry; 1236 } 1237 return (req); 1238 } 1239 1240 /* Pass the command to the IOC */ 1241 void 1242 mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1243 { 1244 if (mpt->verbose > MPT_PRT_DEBUG2) { 1245 mpt_dump_request(mpt, req); 1246 } 1247 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1248 BUS_DMASYNC_PREWRITE); 1249 req->state |= REQ_STATE_QUEUED; 1250 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1251 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1252 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1253 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1254 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1255 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1256 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1257 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1258 } 1259 1260 /* 1261 * Wait for a request to complete. 1262 * 1263 * Inputs: 1264 * mpt softc of controller executing request 1265 * req request to wait for 1266 * sleep_ok nonzero implies may sleep in this context 1267 * time_ms timeout in ms. 0 implies no timeout. 1268 * 1269 * Return Values: 1270 * 0 Request completed 1271 * non-0 Timeout fired before request completion. 1272 */ 1273 int 1274 mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1275 mpt_req_state_t state, mpt_req_state_t mask, 1276 int sleep_ok, int time_ms) 1277 { 1278 int error; 1279 int timeout; 1280 u_int saved_cnt; 1281 1282 /* 1283 * timeout is in ms. 0 indicates infinite wait. 1284 * Convert to ticks or 500us units depending on 1285 * our sleep mode. 1286 */ 1287 if (sleep_ok != 0) { 1288 timeout = (time_ms * hz) / 1000; 1289 } else { 1290 timeout = time_ms * 2; 1291 } 1292 req->state |= REQ_STATE_NEED_WAKEUP; 1293 mask &= ~REQ_STATE_NEED_WAKEUP; 1294 saved_cnt = mpt->reset_cnt; 1295 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1296 if (sleep_ok != 0) { 1297 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1298 if (error == EWOULDBLOCK) { 1299 timeout = 0; 1300 break; 1301 } 1302 } else { 1303 if (time_ms != 0 && --timeout == 0) { 1304 break; 1305 } 1306 DELAY(500); 1307 mpt_intr(mpt); 1308 } 1309 } 1310 req->state &= ~REQ_STATE_NEED_WAKEUP; 1311 if (mpt->reset_cnt != saved_cnt) { 1312 return (EIO); 1313 } 1314 if (time_ms && timeout <= 0) { 1315 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1316 req->state |= REQ_STATE_TIMEDOUT; 1317 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1318 return (ETIMEDOUT); 1319 } 1320 return (0); 1321 } 1322 1323 /* 1324 * Send a command to the IOC via the handshake register. 1325 * 1326 * Only done at initialization time and for certain unusual 1327 * commands such as device/bus reset as specified by LSI. 1328 */ 1329 int 1330 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1331 { 1332 int i; 1333 uint32_t data, *data32; 1334 1335 /* Check condition of the IOC */ 1336 data = mpt_rd_db(mpt); 1337 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1338 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1339 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1340 || MPT_DB_IS_IN_USE(data)) { 1341 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1342 mpt_print_db(data); 1343 return (EBUSY); 1344 } 1345 1346 /* We move things in 32 bit chunks */ 1347 len = (len + 3) >> 2; 1348 data32 = cmd; 1349 1350 /* Clear any left over pending doorbell interupts */ 1351 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1352 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1353 1354 /* 1355 * Tell the handshake reg. we are going to send a command 1356 * and how long it is going to be. 1357 */ 1358 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1359 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1360 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1361 1362 /* Wait for the chip to notice */ 1363 if (mpt_wait_db_int(mpt) != MPT_OK) { 1364 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1365 return (ETIMEDOUT); 1366 } 1367 1368 /* Clear the interrupt */ 1369 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1370 1371 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1372 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1373 return (ETIMEDOUT); 1374 } 1375 1376 /* Send the command */ 1377 for (i = 0; i < len; i++) { 1378 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1379 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1380 mpt_prt(mpt, 1381 "mpt_send_handshake_cmd timeout! index = %d\n", 1382 i); 1383 return (ETIMEDOUT); 1384 } 1385 } 1386 return MPT_OK; 1387 } 1388 1389 /* Get the response from the handshake register */ 1390 int 1391 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1392 { 1393 int left, reply_left; 1394 u_int16_t *data16; 1395 MSG_DEFAULT_REPLY *hdr; 1396 1397 /* We move things out in 16 bit chunks */ 1398 reply_len >>= 1; 1399 data16 = (u_int16_t *)reply; 1400 1401 hdr = (MSG_DEFAULT_REPLY *)reply; 1402 1403 /* Get first word */ 1404 if (mpt_wait_db_int(mpt) != MPT_OK) { 1405 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1406 return ETIMEDOUT; 1407 } 1408 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1409 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1410 1411 /* Get Second Word */ 1412 if (mpt_wait_db_int(mpt) != MPT_OK) { 1413 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1414 return ETIMEDOUT; 1415 } 1416 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1417 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1418 1419 /* 1420 * With the second word, we can now look at the length. 1421 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1422 */ 1423 if ((reply_len >> 1) != hdr->MsgLength && 1424 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1425 #if __FreeBSD_version >= 500000 1426 mpt_prt(mpt, "reply length does not match message length: " 1427 "got %x; expected %zx for function %x\n", 1428 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1429 #else 1430 mpt_prt(mpt, "reply length does not match message length: " 1431 "got %x; expected %x for function %x\n", 1432 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1433 #endif 1434 } 1435 1436 /* Get rest of the reply; but don't overflow the provided buffer */ 1437 left = (hdr->MsgLength << 1) - 2; 1438 reply_left = reply_len - 2; 1439 while (left--) { 1440 u_int16_t datum; 1441 1442 if (mpt_wait_db_int(mpt) != MPT_OK) { 1443 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1444 return ETIMEDOUT; 1445 } 1446 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1447 1448 if (reply_left-- > 0) 1449 *data16++ = datum & MPT_DB_DATA_MASK; 1450 1451 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1452 } 1453 1454 /* One more wait & clear at the end */ 1455 if (mpt_wait_db_int(mpt) != MPT_OK) { 1456 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1457 return ETIMEDOUT; 1458 } 1459 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1460 1461 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1462 if (mpt->verbose >= MPT_PRT_TRACE) 1463 mpt_print_reply(hdr); 1464 return (MPT_FAIL | hdr->IOCStatus); 1465 } 1466 1467 return (0); 1468 } 1469 1470 static int 1471 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1472 { 1473 MSG_IOC_FACTS f_req; 1474 int error; 1475 1476 memset(&f_req, 0, sizeof f_req); 1477 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1478 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1479 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1480 if (error) 1481 return(error); 1482 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1483 return (error); 1484 } 1485 1486 static int 1487 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1488 { 1489 MSG_PORT_FACTS f_req; 1490 int error; 1491 1492 /* XXX: Only getting PORT FACTS for Port 0 */ 1493 memset(&f_req, 0, sizeof f_req); 1494 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1495 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1496 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1497 if (error) 1498 return(error); 1499 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1500 return (error); 1501 } 1502 1503 /* 1504 * Send the initialization request. This is where we specify how many 1505 * SCSI busses and how many devices per bus we wish to emulate. 1506 * This is also the command that specifies the max size of the reply 1507 * frames from the IOC that we will be allocating. 1508 */ 1509 static int 1510 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1511 { 1512 int error = 0; 1513 MSG_IOC_INIT init; 1514 MSG_IOC_INIT_REPLY reply; 1515 1516 memset(&init, 0, sizeof init); 1517 init.WhoInit = who; 1518 init.Function = MPI_FUNCTION_IOC_INIT; 1519 init.MaxDevices = mpt->mpt_max_devices; 1520 init.MaxBuses = 1; 1521 1522 init.MsgVersion = htole16(MPI_VERSION); 1523 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1524 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1525 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1526 1527 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1528 return(error); 1529 } 1530 1531 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1532 return (error); 1533 } 1534 1535 1536 /* 1537 * Utiltity routine to read configuration headers and pages 1538 */ 1539 int 1540 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1541 u_int PageVersion, u_int PageLength, u_int PageNumber, 1542 u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1543 bus_size_t len, int sleep_ok, int timeout_ms) 1544 { 1545 MSG_CONFIG *cfgp; 1546 SGE_SIMPLE32 *se; 1547 1548 cfgp = req->req_vbuf; 1549 memset(cfgp, 0, sizeof *cfgp); 1550 cfgp->Action = Action; 1551 cfgp->Function = MPI_FUNCTION_CONFIG; 1552 cfgp->Header.PageVersion = PageVersion; 1553 cfgp->Header.PageLength = PageLength; 1554 cfgp->Header.PageNumber = PageNumber; 1555 cfgp->Header.PageType = PageType; 1556 cfgp->PageAddress = PageAddress; 1557 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1558 se->Address = addr; 1559 MPI_pSGE_SET_LENGTH(se, len); 1560 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1561 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1562 MPI_SGE_FLAGS_END_OF_LIST | 1563 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1564 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1565 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1566 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1567 1568 mpt_check_doorbell(mpt); 1569 mpt_send_cmd(mpt, req); 1570 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1571 sleep_ok, timeout_ms)); 1572 } 1573 1574 1575 int 1576 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1577 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1578 int sleep_ok, int timeout_ms) 1579 { 1580 request_t *req; 1581 MSG_CONFIG *cfgp; 1582 int error; 1583 1584 req = mpt_get_request(mpt, sleep_ok); 1585 if (req == NULL) { 1586 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1587 return (ENOMEM); 1588 } 1589 1590 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1591 /*PageVersion*/0, /*PageLength*/0, PageNumber, 1592 PageType, PageAddress, /*addr*/0, /*len*/0, 1593 sleep_ok, timeout_ms); 1594 if (error != 0) { 1595 /* 1596 * Leave the request. Without resetting the chip, it's 1597 * still owned by it and we'll just get into trouble 1598 * freeing it now. Mark it as abandoned so that if it 1599 * shows up later it can be freed. 1600 */ 1601 mpt_prt(mpt, "read_cfg_header timed out\n"); 1602 return (ETIMEDOUT); 1603 } 1604 1605 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1606 case MPI_IOCSTATUS_SUCCESS: 1607 cfgp = req->req_vbuf; 1608 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1609 error = 0; 1610 break; 1611 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1612 mpt_lprt(mpt, MPT_PRT_DEBUG, 1613 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1614 PageType, PageNumber, PageAddress); 1615 error = EINVAL; 1616 break; 1617 default: 1618 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1619 req->IOCStatus); 1620 error = EIO; 1621 break; 1622 } 1623 mpt_free_request(mpt, req); 1624 return (error); 1625 } 1626 1627 int 1628 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1629 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1630 int timeout_ms) 1631 { 1632 request_t *req; 1633 int error; 1634 1635 req = mpt_get_request(mpt, sleep_ok); 1636 if (req == NULL) { 1637 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1638 return (-1); 1639 } 1640 1641 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1642 hdr->PageLength, hdr->PageNumber, 1643 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1644 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1645 len, sleep_ok, timeout_ms); 1646 if (error != 0) { 1647 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1648 return (-1); 1649 } 1650 1651 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1652 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1653 req->IOCStatus); 1654 mpt_free_request(mpt, req); 1655 return (-1); 1656 } 1657 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1658 BUS_DMASYNC_POSTREAD); 1659 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1660 mpt_free_request(mpt, req); 1661 return (0); 1662 } 1663 1664 int 1665 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1666 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1667 int timeout_ms) 1668 { 1669 request_t *req; 1670 u_int hdr_attr; 1671 int error; 1672 1673 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1674 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1675 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1676 mpt_prt(mpt, "page type 0x%x not changeable\n", 1677 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1678 return (-1); 1679 } 1680 1681 #if 0 1682 /* 1683 * We shouldn't mask off other bits here. 1684 */ 1685 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK; 1686 #endif 1687 1688 req = mpt_get_request(mpt, sleep_ok); 1689 if (req == NULL) 1690 return (-1); 1691 1692 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len); 1693 1694 /* 1695 * There isn't any point in restoring stripped out attributes 1696 * if you then mask them going down to issue the request. 1697 */ 1698 1699 #if 0 1700 /* Restore stripped out attributes */ 1701 hdr->PageType |= hdr_attr; 1702 1703 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1704 hdr->PageLength, hdr->PageNumber, 1705 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1706 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1707 len, sleep_ok, timeout_ms); 1708 #else 1709 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1710 hdr->PageLength, hdr->PageNumber, 1711 hdr->PageType, PageAddress, 1712 req->req_pbuf + MPT_RQSL(mpt), 1713 len, sleep_ok, timeout_ms); 1714 #endif 1715 if (error != 0) { 1716 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1717 return (-1); 1718 } 1719 1720 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1721 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1722 req->IOCStatus); 1723 mpt_free_request(mpt, req); 1724 return (-1); 1725 } 1726 mpt_free_request(mpt, req); 1727 return (0); 1728 } 1729 1730 /* 1731 * Read IOC configuration information 1732 */ 1733 static int 1734 mpt_read_config_info_ioc(struct mpt_softc *mpt) 1735 { 1736 CONFIG_PAGE_HEADER hdr; 1737 struct mpt_raid_volume *mpt_raid; 1738 int rv; 1739 int i; 1740 size_t len; 1741 1742 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1743 2, 0, &hdr, FALSE, 5000); 1744 /* 1745 * If it's an invalid page, so what? Not a supported function.... 1746 */ 1747 if (rv == EINVAL) { 1748 return (0); 1749 } 1750 if (rv) { 1751 return (rv); 1752 } 1753 1754 #if __FreeBSD_version >= 500000 1755 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1756 "num %x, type %x\n", hdr.PageVersion, 1757 hdr.PageLength * sizeof(uint32_t), 1758 hdr.PageNumber, hdr.PageType); 1759 #else 1760 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1761 "num %x, type %x\n", hdr.PageVersion, 1762 hdr.PageLength * sizeof(uint32_t), 1763 hdr.PageNumber, hdr.PageType); 1764 #endif 1765 1766 len = hdr.PageLength * sizeof(uint32_t); 1767 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1768 if (mpt->ioc_page2 == NULL) { 1769 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); 1770 mpt_raid_free_mem(mpt); 1771 return (ENOMEM); 1772 } 1773 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1774 rv = mpt_read_cur_cfg_page(mpt, 0, 1775 &mpt->ioc_page2->Header, len, FALSE, 5000); 1776 if (rv) { 1777 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1778 mpt_raid_free_mem(mpt); 1779 return (EIO); 1780 } 1781 1782 if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1783 uint32_t mask; 1784 1785 mpt_prt(mpt, "Capabilities: ("); 1786 for (mask = 1; mask != 0; mask <<= 1) { 1787 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { 1788 continue; 1789 } 1790 switch (mask) { 1791 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1792 mpt_prtc(mpt, " RAID-0"); 1793 break; 1794 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1795 mpt_prtc(mpt, " RAID-1E"); 1796 break; 1797 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1798 mpt_prtc(mpt, " RAID-1"); 1799 break; 1800 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1801 mpt_prtc(mpt, " SES"); 1802 break; 1803 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1804 mpt_prtc(mpt, " SAFTE"); 1805 break; 1806 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1807 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1808 default: 1809 break; 1810 } 1811 } 1812 mpt_prtc(mpt, " )\n"); 1813 if ((mpt->ioc_page2->CapabilitiesFlags 1814 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1815 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1816 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1817 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1818 mpt->ioc_page2->NumActiveVolumes, 1819 mpt->ioc_page2->NumActiveVolumes != 1 1820 ? "s " : " ", 1821 mpt->ioc_page2->MaxVolumes); 1822 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1823 mpt->ioc_page2->NumActivePhysDisks, 1824 mpt->ioc_page2->NumActivePhysDisks != 1 1825 ? "s " : " ", 1826 mpt->ioc_page2->MaxPhysDisks); 1827 } 1828 } 1829 1830 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1831 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1832 if (mpt->raid_volumes == NULL) { 1833 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1834 mpt_raid_free_mem(mpt); 1835 return (ENOMEM); 1836 } 1837 1838 /* 1839 * Copy critical data out of ioc_page2 so that we can 1840 * safely refresh the page without windows of unreliable 1841 * data. 1842 */ 1843 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1844 1845 len = sizeof(*mpt->raid_volumes->config_page) + 1846 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); 1847 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1848 mpt_raid = &mpt->raid_volumes[i]; 1849 mpt_raid->config_page = 1850 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1851 if (mpt_raid->config_page == NULL) { 1852 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1853 mpt_raid_free_mem(mpt); 1854 return (ENOMEM); 1855 } 1856 } 1857 mpt->raid_page0_len = len; 1858 1859 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1860 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1861 if (mpt->raid_disks == NULL) { 1862 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1863 mpt_raid_free_mem(mpt); 1864 return (ENOMEM); 1865 } 1866 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1867 1868 /* 1869 * Load page 3. 1870 */ 1871 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1872 3, 0, &hdr, FALSE, 5000); 1873 if (rv) { 1874 mpt_raid_free_mem(mpt); 1875 return (EIO); 1876 } 1877 1878 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1879 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1880 1881 len = hdr.PageLength * sizeof(uint32_t); 1882 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1883 if (mpt->ioc_page3 == NULL) { 1884 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); 1885 mpt_raid_free_mem(mpt); 1886 return (ENOMEM); 1887 } 1888 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1889 rv = mpt_read_cur_cfg_page(mpt, 0, 1890 &mpt->ioc_page3->Header, len, FALSE, 5000); 1891 if (rv) { 1892 mpt_raid_free_mem(mpt); 1893 return (EIO); 1894 } 1895 mpt_raid_wakeup(mpt); 1896 return (0); 1897 } 1898 1899 /* 1900 * Enable IOC port 1901 */ 1902 static int 1903 mpt_send_port_enable(struct mpt_softc *mpt, int port) 1904 { 1905 request_t *req; 1906 MSG_PORT_ENABLE *enable_req; 1907 int error; 1908 1909 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1910 if (req == NULL) 1911 return (-1); 1912 1913 enable_req = req->req_vbuf; 1914 memset(enable_req, 0, MPT_RQSL(mpt)); 1915 1916 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1917 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1918 enable_req->PortNumber = port; 1919 1920 mpt_check_doorbell(mpt); 1921 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1922 1923 mpt_send_cmd(mpt, req); 1924 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1925 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1926 if (error != 0) { 1927 mpt_prt(mpt, "port %d enable timed out\n", port); 1928 return (-1); 1929 } 1930 mpt_free_request(mpt, req); 1931 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1932 return (0); 1933 } 1934 1935 /* 1936 * Enable/Disable asynchronous event reporting. 1937 */ 1938 static int 1939 mpt_send_event_request(struct mpt_softc *mpt, int onoff) 1940 { 1941 request_t *req; 1942 MSG_EVENT_NOTIFY *enable_req; 1943 1944 req = mpt_get_request(mpt, FALSE); 1945 if (req == NULL) { 1946 return (ENOMEM); 1947 } 1948 enable_req = req->req_vbuf; 1949 memset(enable_req, 0, sizeof *enable_req); 1950 1951 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1952 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1953 enable_req->Switch = onoff; 1954 1955 mpt_check_doorbell(mpt); 1956 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1957 onoff ? "en" : "dis"); 1958 /* 1959 * Send the command off, but don't wait for it. 1960 */ 1961 mpt_send_cmd(mpt, req); 1962 return (0); 1963 } 1964 1965 /* 1966 * Un-mask the interupts on the chip. 1967 */ 1968 void 1969 mpt_enable_ints(struct mpt_softc *mpt) 1970 { 1971 /* Unmask every thing except door bell int */ 1972 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1973 } 1974 1975 /* 1976 * Mask the interupts on the chip. 1977 */ 1978 void 1979 mpt_disable_ints(struct mpt_softc *mpt) 1980 { 1981 /* Mask all interrupts */ 1982 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1983 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1984 } 1985 1986 static void 1987 mpt_sysctl_attach(struct mpt_softc *mpt) 1988 { 1989 #if __FreeBSD_version >= 500000 1990 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1991 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1992 1993 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1994 "debug", CTLFLAG_RW, &mpt->verbose, 0, 1995 "Debugging/Verbose level"); 1996 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1997 "role", CTLFLAG_RD, &mpt->role, 0, 1998 "HBA role"); 1999 #endif 2000 } 2001 2002 int 2003 mpt_attach(struct mpt_softc *mpt) 2004 { 2005 struct mpt_personality *pers; 2006 int i; 2007 int error; 2008 2009 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links); 2010 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2011 pers = mpt_personalities[i]; 2012 if (pers == NULL) { 2013 continue; 2014 } 2015 if (pers->probe(mpt) == 0) { 2016 error = pers->attach(mpt); 2017 if (error != 0) { 2018 mpt_detach(mpt); 2019 return (error); 2020 } 2021 mpt->mpt_pers_mask |= (0x1 << pers->id); 2022 pers->use_count++; 2023 } 2024 } 2025 2026 /* 2027 * Now that we've attached everything, do the enable function 2028 * for all of the personalities. This allows the personalities 2029 * to do setups that are appropriate for them prior to enabling 2030 * any ports. 2031 */ 2032 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2033 pers = mpt_personalities[i]; 2034 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 2035 error = pers->enable(mpt); 2036 if (error != 0) { 2037 mpt_prt(mpt, "personality %s attached but would" 2038 " not enable (%d)\n", pers->name, error); 2039 mpt_detach(mpt); 2040 return (error); 2041 } 2042 } 2043 } 2044 return (0); 2045 } 2046 2047 int 2048 mpt_shutdown(struct mpt_softc *mpt) 2049 { 2050 struct mpt_personality *pers; 2051 2052 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2053 pers->shutdown(mpt); 2054 } 2055 return (0); 2056 } 2057 2058 int 2059 mpt_detach(struct mpt_softc *mpt) 2060 { 2061 struct mpt_personality *pers; 2062 2063 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2064 pers->detach(mpt); 2065 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2066 pers->use_count--; 2067 } 2068 TAILQ_REMOVE(&mpt_tailq, mpt, links); 2069 return (0); 2070 } 2071 2072 int 2073 mpt_core_load(struct mpt_personality *pers) 2074 { 2075 int i; 2076 2077 /* 2078 * Setup core handlers and insert the default handler 2079 * into all "empty slots". 2080 */ 2081 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2082 mpt_reply_handlers[i] = mpt_default_reply_handler; 2083 } 2084 2085 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2086 mpt_event_reply_handler; 2087 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2088 mpt_config_reply_handler; 2089 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2090 mpt_handshake_reply_handler; 2091 return (0); 2092 } 2093 2094 /* 2095 * Initialize per-instance driver data and perform 2096 * initial controller configuration. 2097 */ 2098 int 2099 mpt_core_attach(struct mpt_softc *mpt) 2100 { 2101 int val; 2102 int error; 2103 2104 2105 LIST_INIT(&mpt->ack_frames); 2106 2107 /* Put all request buffers on the free list */ 2108 TAILQ_INIT(&mpt->request_pending_list); 2109 TAILQ_INIT(&mpt->request_free_list); 2110 TAILQ_INIT(&mpt->request_timeout_list); 2111 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2112 request_t *req = &mpt->request_pool[val]; 2113 req->state = REQ_STATE_ALLOCATED; 2114 mpt_free_request(mpt, req); 2115 } 2116 2117 for (val = 0; val < MPT_MAX_LUNS; val++) { 2118 STAILQ_INIT(&mpt->trt[val].atios); 2119 STAILQ_INIT(&mpt->trt[val].inots); 2120 } 2121 STAILQ_INIT(&mpt->trt_wildcard.atios); 2122 STAILQ_INIT(&mpt->trt_wildcard.inots); 2123 2124 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2125 2126 mpt_sysctl_attach(mpt); 2127 2128 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2129 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2130 2131 error = mpt_configure_ioc(mpt); 2132 2133 return (error); 2134 } 2135 2136 int 2137 mpt_core_enable(struct mpt_softc *mpt) 2138 { 2139 /* 2140 * We enter with the IOC enabled, but async events 2141 * not enabled, ports not enabled and interrupts 2142 * not enabled. 2143 */ 2144 2145 /* 2146 * Enable asynchronous event reporting- all personalities 2147 * have attached so that they should be able to now field 2148 * async events. 2149 */ 2150 mpt_send_event_request(mpt, 1); 2151 2152 /* 2153 * Catch any pending interrupts 2154 * 2155 * This seems to be crucial- otherwise 2156 * the portenable below times out. 2157 */ 2158 mpt_intr(mpt); 2159 2160 /* 2161 * Enable Interrupts 2162 */ 2163 mpt_enable_ints(mpt); 2164 2165 /* 2166 * Catch any pending interrupts 2167 * 2168 * This seems to be crucial- otherwise 2169 * the portenable below times out. 2170 */ 2171 mpt_intr(mpt); 2172 2173 /* 2174 * Enable the port. 2175 */ 2176 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2177 mpt_prt(mpt, "failed to enable port 0\n"); 2178 return (ENXIO); 2179 } 2180 return (0); 2181 } 2182 2183 void 2184 mpt_core_shutdown(struct mpt_softc *mpt) 2185 { 2186 mpt_disable_ints(mpt); 2187 } 2188 2189 void 2190 mpt_core_detach(struct mpt_softc *mpt) 2191 { 2192 mpt_disable_ints(mpt); 2193 } 2194 2195 int 2196 mpt_core_unload(struct mpt_personality *pers) 2197 { 2198 /* Unload is always successfull. */ 2199 return (0); 2200 } 2201 2202 #define FW_UPLOAD_REQ_SIZE \ 2203 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2204 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2205 2206 static int 2207 mpt_upload_fw(struct mpt_softc *mpt) 2208 { 2209 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2210 MSG_FW_UPLOAD_REPLY fw_reply; 2211 MSG_FW_UPLOAD *fw_req; 2212 FW_UPLOAD_TCSGE *tsge; 2213 SGE_SIMPLE32 *sge; 2214 uint32_t flags; 2215 int error; 2216 2217 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2218 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2219 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2220 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2221 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2222 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2223 tsge->DetailsLength = 12; 2224 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2225 tsge->ImageSize = htole32(mpt->fw_image_size); 2226 sge = (SGE_SIMPLE32 *)(tsge + 1); 2227 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2228 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2229 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2230 flags <<= MPI_SGE_FLAGS_SHIFT; 2231 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2232 sge->Address = htole32(mpt->fw_phys); 2233 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2234 if (error) 2235 return(error); 2236 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2237 return (error); 2238 } 2239 2240 static void 2241 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2242 uint32_t *data, bus_size_t len) 2243 { 2244 uint32_t *data_end; 2245 2246 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2247 if (mpt->is_sas) { 2248 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2249 } 2250 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2251 while (data != data_end) { 2252 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2253 data++; 2254 } 2255 if (mpt->is_sas) { 2256 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2257 } 2258 } 2259 2260 static int 2261 mpt_download_fw(struct mpt_softc *mpt) 2262 { 2263 MpiFwHeader_t *fw_hdr; 2264 int error; 2265 uint32_t ext_offset; 2266 uint32_t data; 2267 2268 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2269 mpt->fw_image_size); 2270 2271 error = mpt_enable_diag_mode(mpt); 2272 if (error != 0) { 2273 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2274 return (EIO); 2275 } 2276 2277 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2278 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2279 2280 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2281 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2282 fw_hdr->ImageSize); 2283 2284 ext_offset = fw_hdr->NextImageHeaderOffset; 2285 while (ext_offset != 0) { 2286 MpiExtImageHeader_t *ext; 2287 2288 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2289 ext_offset = ext->NextImageHeaderOffset; 2290 2291 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2292 ext->ImageSize); 2293 } 2294 2295 if (mpt->is_sas) { 2296 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2297 } 2298 /* Setup the address to jump to on reset. */ 2299 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2300 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2301 2302 /* 2303 * The controller sets the "flash bad" status after attempting 2304 * to auto-boot from flash. Clear the status so that the controller 2305 * will continue the boot process with our newly installed firmware. 2306 */ 2307 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2308 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2309 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2310 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2311 2312 if (mpt->is_sas) { 2313 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2314 } 2315 2316 /* 2317 * Re-enable the processor and clear the boot halt flag. 2318 */ 2319 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2320 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2321 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2322 2323 mpt_disable_diag_mode(mpt); 2324 return (0); 2325 } 2326 2327 /* 2328 * Allocate/Initialize data structures for the controller. Called 2329 * once at instance startup. 2330 */ 2331 static int 2332 mpt_configure_ioc(struct mpt_softc *mpt) 2333 { 2334 MSG_PORT_FACTS_REPLY pfp; 2335 MSG_IOC_FACTS_REPLY facts; 2336 int try; 2337 int needreset; 2338 uint32_t max_chain_depth; 2339 2340 needreset = 0; 2341 for (try = 0; try < MPT_MAX_TRYS; try++) { 2342 2343 /* 2344 * No need to reset if the IOC is already in the READY state. 2345 * 2346 * Force reset if initialization failed previously. 2347 * Note that a hard_reset of the second channel of a '929 2348 * will stop operation of the first channel. Hopefully, if the 2349 * first channel is ok, the second will not require a hard 2350 * reset. 2351 */ 2352 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2353 MPT_DB_STATE_READY) { 2354 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2355 continue; 2356 } 2357 } 2358 needreset = 0; 2359 2360 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2361 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2362 needreset = 1; 2363 continue; 2364 } 2365 2366 mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2367 mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2368 mpt->ioc_facts_flags = facts.Flags; 2369 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2370 le16toh(facts.MsgVersion) >> 8, 2371 le16toh(facts.MsgVersion) & 0xFF, 2372 le16toh(facts.HeaderVersion) >> 8, 2373 le16toh(facts.HeaderVersion) & 0xFF); 2374 2375 /* 2376 * Now that we know request frame size, we can calculate 2377 * the actual (reasonable) segment limit for read/write I/O. 2378 * 2379 * This limit is constrained by: 2380 * 2381 * + The size of each area we allocate per command (and how 2382 * many chain segments we can fit into it). 2383 * + The total number of areas we've set up. 2384 * + The actual chain depth the card will allow. 2385 * 2386 * The first area's segment count is limited by the I/O request 2387 * at the head of it. We cannot allocate realistically more 2388 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2389 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2390 * 2391 */ 2392 max_chain_depth = facts.MaxChainDepth; 2393 2394 /* total number of request areas we (can) allocate */ 2395 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2396 2397 /* converted to the number of chain areas possible */ 2398 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2399 2400 /* limited by the number of chain areas the card will support */ 2401 if (mpt->max_seg_cnt > max_chain_depth) { 2402 mpt_lprt(mpt, MPT_PRT_DEBUG, 2403 "chain depth limited to %u (from %u)\n", 2404 max_chain_depth, mpt->max_seg_cnt); 2405 mpt->max_seg_cnt = max_chain_depth; 2406 } 2407 2408 /* converted to the number of simple sges in chain segments. */ 2409 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2410 2411 mpt_lprt(mpt, MPT_PRT_DEBUG, 2412 "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2413 mpt_lprt(mpt, MPT_PRT_DEBUG, 2414 "MsgLength=%u IOCNumber = %d\n", 2415 facts.MsgLength, facts.IOCNumber); 2416 mpt_lprt(mpt, MPT_PRT_DEBUG, 2417 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2418 "Request Frame Size %u bytes Max Chain Depth %u\n", 2419 mpt->mpt_global_credits, facts.BlockSize, 2420 mpt->request_frame_size << 2, max_chain_depth); 2421 mpt_lprt(mpt, MPT_PRT_DEBUG, 2422 "IOCFACTS: Num Ports %d, FWImageSize %d, " 2423 "Flags=%#x\n", facts.NumberOfPorts, 2424 le32toh(facts.FWImageSize), facts.Flags); 2425 2426 2427 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2428 struct mpt_map_info mi; 2429 int error; 2430 2431 /* 2432 * In some configurations, the IOC's firmware is 2433 * stored in a shared piece of system NVRAM that 2434 * is only accessable via the BIOS. In this 2435 * case, the firmware keeps a copy of firmware in 2436 * RAM until the OS driver retrieves it. Once 2437 * retrieved, we are responsible for re-downloading 2438 * the firmware after any hard-reset. 2439 */ 2440 mpt->fw_image_size = le32toh(facts.FWImageSize); 2441 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2442 /*alignment*/1, /*boundary*/0, 2443 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2444 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2445 /*filterarg*/NULL, mpt->fw_image_size, 2446 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2447 /*flags*/0, &mpt->fw_dmat); 2448 if (error != 0) { 2449 mpt_prt(mpt, "cannot create fw dma tag\n"); 2450 return (ENOMEM); 2451 } 2452 error = bus_dmamem_alloc(mpt->fw_dmat, 2453 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2454 &mpt->fw_dmap); 2455 if (error != 0) { 2456 mpt_prt(mpt, "cannot allocate fw mem.\n"); 2457 bus_dma_tag_destroy(mpt->fw_dmat); 2458 return (ENOMEM); 2459 } 2460 mi.mpt = mpt; 2461 mi.error = 0; 2462 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2463 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2464 &mi, 0); 2465 mpt->fw_phys = mi.phys; 2466 2467 error = mpt_upload_fw(mpt); 2468 if (error != 0) { 2469 mpt_prt(mpt, "fw upload failed.\n"); 2470 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2471 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2472 mpt->fw_dmap); 2473 bus_dma_tag_destroy(mpt->fw_dmat); 2474 mpt->fw_image = NULL; 2475 return (EIO); 2476 } 2477 } 2478 2479 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2480 mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2481 needreset = 1; 2482 continue; 2483 } 2484 2485 mpt_lprt(mpt, MPT_PRT_DEBUG, 2486 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2487 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2488 pfp.MaxDevices); 2489 2490 mpt->mpt_port_type = pfp.PortType; 2491 mpt->mpt_proto_flags = pfp.ProtocolFlags; 2492 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2493 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2494 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2495 mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2496 pfp.PortType); 2497 return (ENXIO); 2498 } 2499 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2500 2501 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2502 mpt->is_fc = 1; 2503 mpt->is_sas = 0; 2504 mpt->is_spi = 0; 2505 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2506 mpt->is_fc = 0; 2507 mpt->is_sas = 1; 2508 mpt->is_spi = 0; 2509 } else { 2510 mpt->is_fc = 0; 2511 mpt->is_sas = 0; 2512 mpt->is_spi = 1; 2513 } 2514 mpt->mpt_ini_id = pfp.PortSCSIID; 2515 mpt->mpt_max_devices = pfp.MaxDevices; 2516 2517 /* 2518 * Set our role with what this port supports. 2519 * 2520 * Note this might be changed later in different modules 2521 * if this is different from what is wanted. 2522 */ 2523 mpt->role = MPT_ROLE_NONE; 2524 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2525 mpt->role |= MPT_ROLE_INITIATOR; 2526 } 2527 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2528 mpt->role |= MPT_ROLE_TARGET; 2529 } 2530 if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2531 mpt_prt(mpt, "unable to initialize IOC\n"); 2532 return (ENXIO); 2533 } 2534 2535 /* 2536 * Read IOC configuration information. 2537 * 2538 * We need this to determine whether or not we have certain 2539 * settings for Integrated Mirroring (e.g.). 2540 */ 2541 mpt_read_config_info_ioc(mpt); 2542 2543 /* Everything worked */ 2544 break; 2545 } 2546 2547 if (try >= MPT_MAX_TRYS) { 2548 mpt_prt(mpt, "failed to initialize IOC"); 2549 return (EIO); 2550 } 2551 2552 return (0); 2553 } 2554 2555 static int 2556 mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2557 { 2558 uint32_t pptr; 2559 int val; 2560 2561 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2562 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2563 return (EIO); 2564 } 2565 2566 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2567 2568 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2569 mpt_prt(mpt, "IOC failed to go to run state\n"); 2570 return (ENXIO); 2571 } 2572 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2573 2574 /* 2575 * Give it reply buffers 2576 * 2577 * Do *not* exceed global credits. 2578 */ 2579 for (val = 0, pptr = mpt->reply_phys; 2580 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2581 pptr += MPT_REPLY_SIZE) { 2582 mpt_free_reply(mpt, pptr); 2583 if (++val == mpt->mpt_global_credits - 1) 2584 break; 2585 } 2586 2587 2588 /* 2589 * Enable the port if asked. This is only done if we're resetting 2590 * the IOC after initial startup. 2591 */ 2592 if (portenable) { 2593 /* 2594 * Enable asynchronous event reporting 2595 */ 2596 mpt_send_event_request(mpt, 1); 2597 2598 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2599 mpt_prt(mpt, "failed to enable port 0\n"); 2600 return (ENXIO); 2601 } 2602 } 2603 return (MPT_OK); 2604 } 2605