1 /*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 */ 61 /*- 62 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 63 * Copyright (c) 2005, WHEEL Sp. z o.o. 64 * Copyright (c) 2004, 2005 Justin T. Gibbs 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions are 69 * met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 73 * substantially similar to the "NO WARRANTY" disclaimer below 74 * ("Disclaimer") and any redistribution must be conditioned upon including 75 * a substantially similar Disclaimer requirement for further binary 76 * redistribution. 77 * 3. Neither the names of the above listed copyright holders nor the names 78 * of any contributors may be used to endorse or promote products derived 79 * from this software without specific prior written permission. 80 * 81 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 82 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 83 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 84 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 85 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 92 */ 93 94 #include <sys/cdefs.h> 95 __FBSDID("$FreeBSD$"); 96 97 #include <dev/mpt/mpt.h> 98 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 99 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 100 101 #include <dev/mpt/mpilib/mpi.h> 102 #include <dev/mpt/mpilib/mpi_ioc.h> 103 #include <dev/mpt/mpilib/mpi_fc.h> 104 #include <dev/mpt/mpilib/mpi_targ.h> 105 106 #include <sys/sysctl.h> 107 108 #define MPT_MAX_TRYS 3 109 #define MPT_MAX_WAIT 300000 110 111 static int maxwait_ack = 0; 112 static int maxwait_int = 0; 113 static int maxwait_state = 0; 114 115 TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 116 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 117 118 static mpt_reply_handler_t mpt_default_reply_handler; 119 static mpt_reply_handler_t mpt_config_reply_handler; 120 static mpt_reply_handler_t mpt_handshake_reply_handler; 121 static mpt_reply_handler_t mpt_event_reply_handler; 122 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 123 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 124 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 125 static int mpt_soft_reset(struct mpt_softc *mpt); 126 static void mpt_hard_reset(struct mpt_softc *mpt); 127 static int mpt_configure_ioc(struct mpt_softc *mpt); 128 static int mpt_enable_ioc(struct mpt_softc *mpt, int); 129 130 /************************* Personality Module Support *************************/ 131 /* 132 * We include one extra entry that is guaranteed to be NULL 133 * to simplify our itterator. 134 */ 135 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 136 static __inline struct mpt_personality* 137 mpt_pers_find(struct mpt_softc *, u_int); 138 static __inline struct mpt_personality* 139 mpt_pers_find_reverse(struct mpt_softc *, u_int); 140 141 static __inline struct mpt_personality * 142 mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 143 { 144 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 145 ("mpt_pers_find: starting position out of range\n")); 146 147 while (start_at < MPT_MAX_PERSONALITIES 148 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 149 start_at++; 150 } 151 return (mpt_personalities[start_at]); 152 } 153 154 /* 155 * Used infrequently, so no need to optimize like a forward 156 * traversal where we use the MAX+1 is guaranteed to be NULL 157 * trick. 158 */ 159 static __inline struct mpt_personality * 160 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 161 { 162 while (start_at < MPT_MAX_PERSONALITIES 163 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 164 start_at--; 165 } 166 if (start_at < MPT_MAX_PERSONALITIES) 167 return (mpt_personalities[start_at]); 168 return (NULL); 169 } 170 171 #define MPT_PERS_FOREACH(mpt, pers) \ 172 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 173 pers != NULL; \ 174 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 175 176 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 177 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 178 pers != NULL; \ 179 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 180 181 static mpt_load_handler_t mpt_stdload; 182 static mpt_probe_handler_t mpt_stdprobe; 183 static mpt_attach_handler_t mpt_stdattach; 184 static mpt_enable_handler_t mpt_stdenable; 185 static mpt_event_handler_t mpt_stdevent; 186 static mpt_reset_handler_t mpt_stdreset; 187 static mpt_shutdown_handler_t mpt_stdshutdown; 188 static mpt_detach_handler_t mpt_stddetach; 189 static mpt_unload_handler_t mpt_stdunload; 190 static struct mpt_personality mpt_default_personality = 191 { 192 .load = mpt_stdload, 193 .probe = mpt_stdprobe, 194 .attach = mpt_stdattach, 195 .enable = mpt_stdenable, 196 .event = mpt_stdevent, 197 .reset = mpt_stdreset, 198 .shutdown = mpt_stdshutdown, 199 .detach = mpt_stddetach, 200 .unload = mpt_stdunload 201 }; 202 203 static mpt_load_handler_t mpt_core_load; 204 static mpt_attach_handler_t mpt_core_attach; 205 static mpt_enable_handler_t mpt_core_enable; 206 static mpt_reset_handler_t mpt_core_ioc_reset; 207 static mpt_event_handler_t mpt_core_event; 208 static mpt_shutdown_handler_t mpt_core_shutdown; 209 static mpt_shutdown_handler_t mpt_core_detach; 210 static mpt_unload_handler_t mpt_core_unload; 211 static struct mpt_personality mpt_core_personality = 212 { 213 .name = "mpt_core", 214 .load = mpt_core_load, 215 .attach = mpt_core_attach, 216 .enable = mpt_core_enable, 217 .event = mpt_core_event, 218 .reset = mpt_core_ioc_reset, 219 .shutdown = mpt_core_shutdown, 220 .detach = mpt_core_detach, 221 .unload = mpt_core_unload, 222 }; 223 224 /* 225 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 226 * ordering information. We want the core to always register FIRST. 227 * other modules are set to SI_ORDER_SECOND. 228 */ 229 static moduledata_t mpt_core_mod = { 230 "mpt_core", mpt_modevent, &mpt_core_personality 231 }; 232 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 233 MODULE_VERSION(mpt_core, 1); 234 235 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 236 237 238 int 239 mpt_modevent(module_t mod, int type, void *data) 240 { 241 struct mpt_personality *pers; 242 int error; 243 244 pers = (struct mpt_personality *)data; 245 246 error = 0; 247 switch (type) { 248 case MOD_LOAD: 249 { 250 mpt_load_handler_t **def_handler; 251 mpt_load_handler_t **pers_handler; 252 int i; 253 254 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 255 if (mpt_personalities[i] == NULL) 256 break; 257 } 258 if (i >= MPT_MAX_PERSONALITIES) { 259 error = ENOMEM; 260 break; 261 } 262 pers->id = i; 263 mpt_personalities[i] = pers; 264 265 /* Install standard/noop handlers for any NULL entries. */ 266 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 267 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 268 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 269 if (*pers_handler == NULL) 270 *pers_handler = *def_handler; 271 pers_handler++; 272 def_handler++; 273 } 274 275 error = (pers->load(pers)); 276 if (error != 0) 277 mpt_personalities[i] = NULL; 278 break; 279 } 280 case MOD_SHUTDOWN: 281 break; 282 #if __FreeBSD_version >= 500000 283 case MOD_QUIESCE: 284 break; 285 #endif 286 case MOD_UNLOAD: 287 error = pers->unload(pers); 288 mpt_personalities[pers->id] = NULL; 289 break; 290 default: 291 error = EINVAL; 292 break; 293 } 294 return (error); 295 } 296 297 int 298 mpt_stdload(struct mpt_personality *pers) 299 { 300 /* Load is always successfull. */ 301 return (0); 302 } 303 304 int 305 mpt_stdprobe(struct mpt_softc *mpt) 306 { 307 /* Probe is always successfull. */ 308 return (0); 309 } 310 311 int 312 mpt_stdattach(struct mpt_softc *mpt) 313 { 314 /* Attach is always successfull. */ 315 return (0); 316 } 317 318 int 319 mpt_stdenable(struct mpt_softc *mpt) 320 { 321 /* Enable is always successfull. */ 322 return (0); 323 } 324 325 int 326 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 327 { 328 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 329 /* Event was not for us. */ 330 return (0); 331 } 332 333 void 334 mpt_stdreset(struct mpt_softc *mpt, int type) 335 { 336 } 337 338 void 339 mpt_stdshutdown(struct mpt_softc *mpt) 340 { 341 } 342 343 void 344 mpt_stddetach(struct mpt_softc *mpt) 345 { 346 } 347 348 int 349 mpt_stdunload(struct mpt_personality *pers) 350 { 351 /* Unload is always successfull. */ 352 return (0); 353 } 354 355 /******************************* Bus DMA Support ******************************/ 356 void 357 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 358 { 359 struct mpt_map_info *map_info; 360 361 map_info = (struct mpt_map_info *)arg; 362 map_info->error = error; 363 map_info->phys = segs->ds_addr; 364 } 365 366 /**************************** Reply/Event Handling ****************************/ 367 int 368 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 369 mpt_handler_t handler, uint32_t *phandler_id) 370 { 371 372 switch (type) { 373 case MPT_HANDLER_REPLY: 374 { 375 u_int cbi; 376 u_int free_cbi; 377 378 if (phandler_id == NULL) 379 return (EINVAL); 380 381 free_cbi = MPT_HANDLER_ID_NONE; 382 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 383 /* 384 * If the same handler is registered multiple 385 * times, don't error out. Just return the 386 * index of the original registration. 387 */ 388 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 389 *phandler_id = MPT_CBI_TO_HID(cbi); 390 return (0); 391 } 392 393 /* 394 * Fill from the front in the hope that 395 * all registered handlers consume only a 396 * single cache line. 397 * 398 * We don't break on the first empty slot so 399 * that the full table is checked to see if 400 * this handler was previously registered. 401 */ 402 if (free_cbi == MPT_HANDLER_ID_NONE && 403 (mpt_reply_handlers[cbi] 404 == mpt_default_reply_handler)) 405 free_cbi = cbi; 406 } 407 if (free_cbi == MPT_HANDLER_ID_NONE) { 408 return (ENOMEM); 409 } 410 mpt_reply_handlers[free_cbi] = handler.reply_handler; 411 *phandler_id = MPT_CBI_TO_HID(free_cbi); 412 break; 413 } 414 default: 415 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 416 return (EINVAL); 417 } 418 return (0); 419 } 420 421 int 422 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 423 mpt_handler_t handler, uint32_t handler_id) 424 { 425 426 switch (type) { 427 case MPT_HANDLER_REPLY: 428 { 429 u_int cbi; 430 431 cbi = MPT_CBI(handler_id); 432 if (cbi >= MPT_NUM_REPLY_HANDLERS 433 || mpt_reply_handlers[cbi] != handler.reply_handler) 434 return (ENOENT); 435 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 436 break; 437 } 438 default: 439 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 440 return (EINVAL); 441 } 442 return (0); 443 } 444 445 static int 446 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 447 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 448 { 449 mpt_prt(mpt, 450 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 451 req, req->serno, reply_desc, reply_frame); 452 453 if (reply_frame != NULL) 454 mpt_dump_reply_frame(mpt, reply_frame); 455 456 mpt_prt(mpt, "Reply Frame Ignored\n"); 457 458 return (/*free_reply*/TRUE); 459 } 460 461 static int 462 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 463 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 464 { 465 if (req != NULL) { 466 467 if (reply_frame != NULL) { 468 MSG_CONFIG *cfgp; 469 MSG_CONFIG_REPLY *reply; 470 471 cfgp = (MSG_CONFIG *)req->req_vbuf; 472 reply = (MSG_CONFIG_REPLY *)reply_frame; 473 req->IOCStatus = le16toh(reply_frame->IOCStatus); 474 bcopy(&reply->Header, &cfgp->Header, 475 sizeof(cfgp->Header)); 476 } 477 req->state &= ~REQ_STATE_QUEUED; 478 req->state |= REQ_STATE_DONE; 479 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 480 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 481 wakeup(req); 482 } 483 } 484 485 return (TRUE); 486 } 487 488 static int 489 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 490 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 491 { 492 /* Nothing to be done. */ 493 return (TRUE); 494 } 495 496 static int 497 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 498 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 499 { 500 int free_reply; 501 502 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 503 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 504 505 free_reply = TRUE; 506 switch (reply_frame->Function) { 507 case MPI_FUNCTION_EVENT_NOTIFICATION: 508 { 509 MSG_EVENT_NOTIFY_REPLY *msg; 510 struct mpt_personality *pers; 511 u_int handled; 512 513 handled = 0; 514 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 515 MPT_PERS_FOREACH(mpt, pers) 516 handled += pers->event(mpt, req, msg); 517 518 if (handled == 0 && mpt->mpt_pers_mask == 0) { 519 mpt_lprt(mpt, MPT_PRT_INFO, 520 "No Handlers For Any Event Notify Frames. " 521 "Event %#x (ACK %sequired).\n", 522 msg->Event, msg->AckRequired? "r" : "not r"); 523 } else if (handled == 0) { 524 mpt_lprt(mpt, MPT_PRT_WARN, 525 "Unhandled Event Notify Frame. Event %#x " 526 "(ACK %sequired).\n", 527 msg->Event, msg->AckRequired? "r" : "not r"); 528 } 529 530 if (msg->AckRequired) { 531 request_t *ack_req; 532 uint32_t context; 533 534 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 535 ack_req = mpt_get_request(mpt, FALSE); 536 if (ack_req == NULL) { 537 struct mpt_evtf_record *evtf; 538 539 evtf = (struct mpt_evtf_record *)reply_frame; 540 evtf->context = context; 541 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 542 free_reply = FALSE; 543 break; 544 } 545 mpt_send_event_ack(mpt, ack_req, msg, context); 546 /* 547 * Don't check for CONTINUATION_REPLY here 548 */ 549 return (free_reply); 550 } 551 break; 552 } 553 case MPI_FUNCTION_PORT_ENABLE: 554 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 555 break; 556 case MPI_FUNCTION_EVENT_ACK: 557 break; 558 default: 559 mpt_prt(mpt, "unknown event function: %x\n", 560 reply_frame->Function); 561 break; 562 } 563 564 /* 565 * I'm not sure that this continuation stuff works as it should. 566 * 567 * I've had FC async events occur that free the frame up because 568 * the continuation bit isn't set, and then additional async events 569 * then occur using the same context. As you might imagine, this 570 * leads to Very Bad Thing. 571 * 572 * Let's just be safe for now and not free them up until we figure 573 * out what's actually happening here. 574 */ 575 #if 0 576 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 577 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 578 mpt_free_request(mpt, req); 579 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 580 reply_frame->Function, req, req->serno); 581 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 582 MSG_EVENT_NOTIFY_REPLY *msg = 583 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 584 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 585 msg->Event, msg->AckRequired); 586 } 587 } else { 588 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 589 reply_frame->Function, req, req->serno); 590 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 591 MSG_EVENT_NOTIFY_REPLY *msg = 592 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 593 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 594 msg->Event, msg->AckRequired); 595 } 596 mpt_prtc(mpt, "\n"); 597 } 598 #endif 599 return (free_reply); 600 } 601 602 /* 603 * Process an asynchronous event from the IOC. 604 */ 605 static int 606 mpt_core_event(struct mpt_softc *mpt, request_t *req, 607 MSG_EVENT_NOTIFY_REPLY *msg) 608 { 609 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 610 msg->Event & 0xFF); 611 switch(msg->Event & 0xFF) { 612 case MPI_EVENT_NONE: 613 break; 614 case MPI_EVENT_LOG_DATA: 615 { 616 int i; 617 618 /* Some error occured that LSI wants logged */ 619 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 620 msg->IOCLogInfo); 621 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 622 for (i = 0; i < msg->EventDataLength; i++) 623 mpt_prtc(mpt, " %08x", msg->Data[i]); 624 mpt_prtc(mpt, "\n"); 625 break; 626 } 627 case MPI_EVENT_EVENT_CHANGE: 628 /* 629 * This is just an acknowledgement 630 * of our mpt_send_event_request. 631 */ 632 break; 633 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 634 break; 635 default: 636 return (0); 637 break; 638 } 639 return (1); 640 } 641 642 static void 643 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 644 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 645 { 646 MSG_EVENT_ACK *ackp; 647 648 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 649 memset(ackp, 0, sizeof (*ackp)); 650 ackp->Function = MPI_FUNCTION_EVENT_ACK; 651 ackp->Event = msg->Event; 652 ackp->EventContext = msg->EventContext; 653 ackp->MsgContext = context; 654 mpt_check_doorbell(mpt); 655 mpt_send_cmd(mpt, ack_req); 656 } 657 658 /***************************** Interrupt Handling *****************************/ 659 void 660 mpt_intr(void *arg) 661 { 662 struct mpt_softc *mpt; 663 uint32_t reply_desc; 664 int ntrips = 0; 665 666 mpt = (struct mpt_softc *)arg; 667 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 668 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 669 request_t *req; 670 MSG_DEFAULT_REPLY *reply_frame; 671 uint32_t reply_baddr; 672 uint32_t ctxt_idx; 673 u_int cb_index; 674 u_int req_index; 675 int free_rf; 676 677 req = NULL; 678 reply_frame = NULL; 679 reply_baddr = 0; 680 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 681 u_int offset; 682 /* 683 * Insure that the reply frame is coherent. 684 */ 685 reply_baddr = MPT_REPLY_BADDR(reply_desc); 686 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 687 bus_dmamap_sync_range(mpt->reply_dmat, 688 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 689 BUS_DMASYNC_POSTREAD); 690 reply_frame = MPT_REPLY_OTOV(mpt, offset); 691 ctxt_idx = le32toh(reply_frame->MsgContext); 692 } else { 693 uint32_t type; 694 695 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 696 ctxt_idx = reply_desc; 697 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 698 reply_desc); 699 700 switch (type) { 701 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 702 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 703 break; 704 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 705 ctxt_idx = GET_IO_INDEX(reply_desc); 706 if (mpt->tgt_cmd_ptrs == NULL) { 707 mpt_prt(mpt, 708 "mpt_intr: no target cmd ptrs\n"); 709 reply_desc = MPT_REPLY_EMPTY; 710 break; 711 } 712 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 713 mpt_prt(mpt, 714 "mpt_intr: bad tgt cmd ctxt %u\n", 715 ctxt_idx); 716 reply_desc = MPT_REPLY_EMPTY; 717 ntrips = 1000; 718 break; 719 } 720 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 721 if (req == NULL) { 722 mpt_prt(mpt, "no request backpointer " 723 "at index %u", ctxt_idx); 724 reply_desc = MPT_REPLY_EMPTY; 725 ntrips = 1000; 726 break; 727 } 728 /* 729 * Reformulate ctxt_idx to be just as if 730 * it were another type of context reply 731 * so the code below will find the request 732 * via indexing into the pool. 733 */ 734 ctxt_idx = 735 req->index | mpt->scsi_tgt_handler_id; 736 req = NULL; 737 break; 738 case MPI_CONTEXT_REPLY_TYPE_LAN: 739 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 740 reply_desc); 741 reply_desc = MPT_REPLY_EMPTY; 742 break; 743 default: 744 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 745 reply_desc = MPT_REPLY_EMPTY; 746 break; 747 } 748 if (reply_desc == MPT_REPLY_EMPTY) { 749 if (ntrips++ > 1000) { 750 break; 751 } 752 continue; 753 } 754 } 755 756 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 757 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 758 if (req_index < MPT_MAX_REQUESTS(mpt)) { 759 req = &mpt->request_pool[req_index]; 760 } else { 761 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 762 " 0x%x)\n", req_index, reply_desc); 763 } 764 765 free_rf = mpt_reply_handlers[cb_index](mpt, req, 766 reply_desc, reply_frame); 767 768 if (reply_frame != NULL && free_rf) { 769 mpt_free_reply(mpt, reply_baddr); 770 } 771 772 /* 773 * If we got ourselves disabled, don't get stuck in a loop 774 */ 775 if (mpt->disabled) { 776 mpt_disable_ints(mpt); 777 break; 778 } 779 if (ntrips++ > 1000) { 780 break; 781 } 782 } 783 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 784 } 785 786 /******************************* Error Recovery *******************************/ 787 void 788 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 789 u_int iocstatus) 790 { 791 MSG_DEFAULT_REPLY ioc_status_frame; 792 request_t *req; 793 794 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 795 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 796 ioc_status_frame.IOCStatus = iocstatus; 797 while((req = TAILQ_FIRST(chain)) != NULL) { 798 MSG_REQUEST_HEADER *msg_hdr; 799 u_int cb_index; 800 801 TAILQ_REMOVE(chain, req, links); 802 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 803 ioc_status_frame.Function = msg_hdr->Function; 804 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 805 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 806 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 807 &ioc_status_frame); 808 } 809 } 810 811 /********************************* Diagnostics ********************************/ 812 /* 813 * Perform a diagnostic dump of a reply frame. 814 */ 815 void 816 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 817 { 818 mpt_prt(mpt, "Address Reply:\n"); 819 mpt_print_reply(reply_frame); 820 } 821 822 /******************************* Doorbell Access ******************************/ 823 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 824 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 825 826 static __inline uint32_t 827 mpt_rd_db(struct mpt_softc *mpt) 828 { 829 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 830 } 831 832 static __inline uint32_t 833 mpt_rd_intr(struct mpt_softc *mpt) 834 { 835 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 836 } 837 838 /* Busy wait for a door bell to be read by IOC */ 839 static int 840 mpt_wait_db_ack(struct mpt_softc *mpt) 841 { 842 int i; 843 for (i=0; i < MPT_MAX_WAIT; i++) { 844 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 845 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 846 return (MPT_OK); 847 } 848 DELAY(200); 849 } 850 return (MPT_FAIL); 851 } 852 853 /* Busy wait for a door bell interrupt */ 854 static int 855 mpt_wait_db_int(struct mpt_softc *mpt) 856 { 857 int i; 858 for (i=0; i < MPT_MAX_WAIT; i++) { 859 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 860 maxwait_int = i > maxwait_int ? i : maxwait_int; 861 return MPT_OK; 862 } 863 DELAY(100); 864 } 865 return (MPT_FAIL); 866 } 867 868 /* Wait for IOC to transition to a give state */ 869 void 870 mpt_check_doorbell(struct mpt_softc *mpt) 871 { 872 uint32_t db = mpt_rd_db(mpt); 873 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 874 mpt_prt(mpt, "Device not running\n"); 875 mpt_print_db(db); 876 } 877 } 878 879 /* Wait for IOC to transition to a give state */ 880 static int 881 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 882 { 883 int i; 884 885 for (i = 0; i < MPT_MAX_WAIT; i++) { 886 uint32_t db = mpt_rd_db(mpt); 887 if (MPT_STATE(db) == state) { 888 maxwait_state = i > maxwait_state ? i : maxwait_state; 889 return (MPT_OK); 890 } 891 DELAY(100); 892 } 893 return (MPT_FAIL); 894 } 895 896 897 /************************* Intialization/Configuration ************************/ 898 static int mpt_download_fw(struct mpt_softc *mpt); 899 900 /* Issue the reset COMMAND to the IOC */ 901 static int 902 mpt_soft_reset(struct mpt_softc *mpt) 903 { 904 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 905 906 /* Have to use hard reset if we are not in Running state */ 907 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 908 mpt_prt(mpt, "soft reset failed: device not running\n"); 909 return (MPT_FAIL); 910 } 911 912 /* If door bell is in use we don't have a chance of getting 913 * a word in since the IOC probably crashed in message 914 * processing. So don't waste our time. 915 */ 916 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 917 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 918 return (MPT_FAIL); 919 } 920 921 /* Send the reset request to the IOC */ 922 mpt_write(mpt, MPT_OFFSET_DOORBELL, 923 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 924 if (mpt_wait_db_ack(mpt) != MPT_OK) { 925 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 926 return (MPT_FAIL); 927 } 928 929 /* Wait for the IOC to reload and come out of reset state */ 930 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 931 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 932 return (MPT_FAIL); 933 } 934 935 return MPT_OK; 936 } 937 938 static int 939 mpt_enable_diag_mode(struct mpt_softc *mpt) 940 { 941 int try; 942 943 try = 20; 944 while (--try) { 945 946 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 947 break; 948 949 /* Enable diagnostic registers */ 950 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 951 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 952 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 953 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 954 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 955 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 956 957 DELAY(100000); 958 } 959 if (try == 0) 960 return (EIO); 961 return (0); 962 } 963 964 static void 965 mpt_disable_diag_mode(struct mpt_softc *mpt) 966 { 967 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 968 } 969 970 /* This is a magic diagnostic reset that resets all the ARM 971 * processors in the chip. 972 */ 973 static void 974 mpt_hard_reset(struct mpt_softc *mpt) 975 { 976 int error; 977 int wait; 978 uint32_t diagreg; 979 980 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 981 982 error = mpt_enable_diag_mode(mpt); 983 if (error) { 984 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 985 mpt_prt(mpt, "Trying to reset anyway.\n"); 986 } 987 988 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 989 990 /* 991 * This appears to be a workaround required for some 992 * firmware or hardware revs. 993 */ 994 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 995 DELAY(1000); 996 997 /* Diag. port is now active so we can now hit the reset bit */ 998 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 999 1000 /* 1001 * Ensure that the reset has finished. We delay 1ms 1002 * prior to reading the register to make sure the chip 1003 * has sufficiently completed its reset to handle register 1004 * accesses. 1005 */ 1006 wait = 5000; 1007 do { 1008 DELAY(1000); 1009 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1010 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1011 1012 if (wait == 0) { 1013 mpt_prt(mpt, "WARNING - Failed hard reset! " 1014 "Trying to initialize anyway.\n"); 1015 } 1016 1017 /* 1018 * If we have firmware to download, it must be loaded before 1019 * the controller will become operational. Do so now. 1020 */ 1021 if (mpt->fw_image != NULL) { 1022 1023 error = mpt_download_fw(mpt); 1024 1025 if (error) { 1026 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1027 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1028 } 1029 } 1030 1031 /* 1032 * Reseting the controller should have disabled write 1033 * access to the diagnostic registers, but disable 1034 * manually to be sure. 1035 */ 1036 mpt_disable_diag_mode(mpt); 1037 } 1038 1039 static void 1040 mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1041 { 1042 /* 1043 * Complete all pending requests with a status 1044 * appropriate for an IOC reset. 1045 */ 1046 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1047 MPI_IOCSTATUS_INVALID_STATE); 1048 } 1049 1050 1051 /* 1052 * Reset the IOC when needed. Try software command first then if needed 1053 * poke at the magic diagnostic reset. Note that a hard reset resets 1054 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1055 * fouls up the PCI configuration registers. 1056 */ 1057 int 1058 mpt_reset(struct mpt_softc *mpt, int reinit) 1059 { 1060 struct mpt_personality *pers; 1061 int ret; 1062 int retry_cnt = 0; 1063 1064 /* 1065 * Try a soft reset. If that fails, get out the big hammer. 1066 */ 1067 again: 1068 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1069 int cnt; 1070 for (cnt = 0; cnt < 5; cnt++) { 1071 /* Failed; do a hard reset */ 1072 mpt_hard_reset(mpt); 1073 1074 /* 1075 * Wait for the IOC to reload 1076 * and come out of reset state 1077 */ 1078 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1079 if (ret == MPT_OK) { 1080 break; 1081 } 1082 /* 1083 * Okay- try to check again... 1084 */ 1085 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1086 if (ret == MPT_OK) { 1087 break; 1088 } 1089 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1090 retry_cnt, cnt); 1091 } 1092 } 1093 1094 if (retry_cnt == 0) { 1095 /* 1096 * Invoke reset handlers. We bump the reset count so 1097 * that mpt_wait_req() understands that regardless of 1098 * the specified wait condition, it should stop its wait. 1099 */ 1100 mpt->reset_cnt++; 1101 MPT_PERS_FOREACH(mpt, pers) 1102 pers->reset(mpt, ret); 1103 } 1104 1105 if (reinit) { 1106 ret = mpt_enable_ioc(mpt, 1); 1107 if (ret == MPT_OK) { 1108 mpt_enable_ints(mpt); 1109 } 1110 } 1111 if (ret != MPT_OK && retry_cnt++ < 2) { 1112 goto again; 1113 } 1114 return ret; 1115 } 1116 1117 /* Return a command buffer to the free queue */ 1118 void 1119 mpt_free_request(struct mpt_softc *mpt, request_t *req) 1120 { 1121 request_t *nxt; 1122 struct mpt_evtf_record *record; 1123 uint32_t reply_baddr; 1124 1125 if (req == NULL || req != &mpt->request_pool[req->index]) { 1126 panic("mpt_free_request bad req ptr\n"); 1127 return; 1128 } 1129 if ((nxt = req->chain) != NULL) { 1130 req->chain = NULL; 1131 mpt_free_request(mpt, nxt); /* NB: recursion */ 1132 } 1133 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1134 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1135 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); 1136 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1137 ("mpt_free_request: req %p:%u func %x already on freelist", 1138 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1139 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1140 ("mpt_free_request: req %p:%u func %x on pending list", 1141 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1142 #ifdef INVARIANTS 1143 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1144 #endif 1145 1146 req->ccb = NULL; 1147 if (LIST_EMPTY(&mpt->ack_frames)) { 1148 /* 1149 * Insert free ones at the tail 1150 */ 1151 req->serno = 0; 1152 req->state = REQ_STATE_FREE; 1153 #ifdef INVARIANTS 1154 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1155 #endif 1156 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1157 if (mpt->getreqwaiter != 0) { 1158 mpt->getreqwaiter = 0; 1159 wakeup(&mpt->request_free_list); 1160 } 1161 return; 1162 } 1163 1164 /* 1165 * Process an ack frame deferred due to resource shortage. 1166 */ 1167 record = LIST_FIRST(&mpt->ack_frames); 1168 LIST_REMOVE(record, links); 1169 req->state = REQ_STATE_ALLOCATED; 1170 mpt_assign_serno(mpt, req); 1171 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1172 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1173 + (mpt->reply_phys & 0xFFFFFFFF); 1174 mpt_free_reply(mpt, reply_baddr); 1175 } 1176 1177 /* Get a command buffer from the free queue */ 1178 request_t * 1179 mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1180 { 1181 request_t *req; 1182 1183 retry: 1184 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); 1185 req = TAILQ_FIRST(&mpt->request_free_list); 1186 if (req != NULL) { 1187 KASSERT(req == &mpt->request_pool[req->index], 1188 ("mpt_get_request: corrupted request free list\n")); 1189 KASSERT(req->state == REQ_STATE_FREE, 1190 ("req %p:%u not free on free list %x index %d function %x", 1191 req, req->serno, req->state, req->index, 1192 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1193 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1194 req->state = REQ_STATE_ALLOCATED; 1195 req->chain = NULL; 1196 mpt_assign_serno(mpt, req); 1197 } else if (sleep_ok != 0) { 1198 mpt->getreqwaiter = 1; 1199 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1200 goto retry; 1201 } 1202 return (req); 1203 } 1204 1205 /* Pass the command to the IOC */ 1206 void 1207 mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1208 { 1209 uint32_t *pReq; 1210 1211 pReq = req->req_vbuf; 1212 if (mpt->verbose > MPT_PRT_TRACE) { 1213 int offset; 1214 #if __FreeBSD_version >= 500000 1215 mpt_prt(mpt, "Send Request %d (%jx):", 1216 req->index, (uintmax_t) req->req_pbuf); 1217 #else 1218 mpt_prt(mpt, "Send Request %d (%llx):", 1219 req->index, (unsigned long long) req->req_pbuf); 1220 #endif 1221 for (offset = 0; offset < mpt->request_frame_size; offset++) { 1222 if ((offset & 0x7) == 0) { 1223 mpt_prtc(mpt, "\n"); 1224 mpt_prt(mpt, " "); 1225 } 1226 mpt_prtc(mpt, " %08x", pReq[offset]); 1227 } 1228 mpt_prtc(mpt, "\n"); 1229 } 1230 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1231 BUS_DMASYNC_PREWRITE); 1232 req->state |= REQ_STATE_QUEUED; 1233 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1234 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1235 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1236 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1237 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1238 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1239 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1240 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1241 } 1242 1243 /* 1244 * Wait for a request to complete. 1245 * 1246 * Inputs: 1247 * mpt softc of controller executing request 1248 * req request to wait for 1249 * sleep_ok nonzero implies may sleep in this context 1250 * time_ms timeout in ms. 0 implies no timeout. 1251 * 1252 * Return Values: 1253 * 0 Request completed 1254 * non-0 Timeout fired before request completion. 1255 */ 1256 int 1257 mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1258 mpt_req_state_t state, mpt_req_state_t mask, 1259 int sleep_ok, int time_ms) 1260 { 1261 int error; 1262 int timeout; 1263 u_int saved_cnt; 1264 1265 /* 1266 * timeout is in ms. 0 indicates infinite wait. 1267 * Convert to ticks or 500us units depending on 1268 * our sleep mode. 1269 */ 1270 if (sleep_ok != 0) { 1271 timeout = (time_ms * hz) / 1000; 1272 } else { 1273 timeout = time_ms * 2; 1274 } 1275 req->state |= REQ_STATE_NEED_WAKEUP; 1276 mask &= ~REQ_STATE_NEED_WAKEUP; 1277 saved_cnt = mpt->reset_cnt; 1278 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1279 if (sleep_ok != 0) { 1280 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1281 if (error == EWOULDBLOCK) { 1282 timeout = 0; 1283 break; 1284 } 1285 } else { 1286 if (time_ms != 0 && --timeout == 0) { 1287 break; 1288 } 1289 DELAY(500); 1290 mpt_intr(mpt); 1291 } 1292 } 1293 req->state &= ~REQ_STATE_NEED_WAKEUP; 1294 if (mpt->reset_cnt != saved_cnt) { 1295 return (EIO); 1296 } 1297 if (time_ms && timeout <= 0) { 1298 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1299 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1300 return (ETIMEDOUT); 1301 } 1302 return (0); 1303 } 1304 1305 /* 1306 * Send a command to the IOC via the handshake register. 1307 * 1308 * Only done at initialization time and for certain unusual 1309 * commands such as device/bus reset as specified by LSI. 1310 */ 1311 int 1312 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1313 { 1314 int i; 1315 uint32_t data, *data32; 1316 1317 /* Check condition of the IOC */ 1318 data = mpt_rd_db(mpt); 1319 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1320 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1321 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1322 || MPT_DB_IS_IN_USE(data)) { 1323 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1324 mpt_print_db(data); 1325 return (EBUSY); 1326 } 1327 1328 /* We move things in 32 bit chunks */ 1329 len = (len + 3) >> 2; 1330 data32 = cmd; 1331 1332 /* Clear any left over pending doorbell interupts */ 1333 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1334 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1335 1336 /* 1337 * Tell the handshake reg. we are going to send a command 1338 * and how long it is going to be. 1339 */ 1340 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1341 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1342 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1343 1344 /* Wait for the chip to notice */ 1345 if (mpt_wait_db_int(mpt) != MPT_OK) { 1346 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1347 return (ETIMEDOUT); 1348 } 1349 1350 /* Clear the interrupt */ 1351 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1352 1353 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1354 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1355 return (ETIMEDOUT); 1356 } 1357 1358 /* Send the command */ 1359 for (i = 0; i < len; i++) { 1360 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1361 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1362 mpt_prt(mpt, 1363 "mpt_send_handshake_cmd timeout! index = %d\n", 1364 i); 1365 return (ETIMEDOUT); 1366 } 1367 } 1368 return MPT_OK; 1369 } 1370 1371 /* Get the response from the handshake register */ 1372 int 1373 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1374 { 1375 int left, reply_left; 1376 u_int16_t *data16; 1377 MSG_DEFAULT_REPLY *hdr; 1378 1379 /* We move things out in 16 bit chunks */ 1380 reply_len >>= 1; 1381 data16 = (u_int16_t *)reply; 1382 1383 hdr = (MSG_DEFAULT_REPLY *)reply; 1384 1385 /* Get first word */ 1386 if (mpt_wait_db_int(mpt) != MPT_OK) { 1387 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1388 return ETIMEDOUT; 1389 } 1390 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1391 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1392 1393 /* Get Second Word */ 1394 if (mpt_wait_db_int(mpt) != MPT_OK) { 1395 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1396 return ETIMEDOUT; 1397 } 1398 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1399 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1400 1401 /* 1402 * With the second word, we can now look at the length. 1403 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1404 */ 1405 if ((reply_len >> 1) != hdr->MsgLength && 1406 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1407 #if __FreeBSD_version >= 500000 1408 mpt_prt(mpt, "reply length does not match message length: " 1409 "got %x; expected %zx for function %x\n", 1410 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1411 #else 1412 mpt_prt(mpt, "reply length does not match message length: " 1413 "got %x; expected %x for function %x\n", 1414 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1415 #endif 1416 } 1417 1418 /* Get rest of the reply; but don't overflow the provided buffer */ 1419 left = (hdr->MsgLength << 1) - 2; 1420 reply_left = reply_len - 2; 1421 while (left--) { 1422 u_int16_t datum; 1423 1424 if (mpt_wait_db_int(mpt) != MPT_OK) { 1425 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1426 return ETIMEDOUT; 1427 } 1428 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1429 1430 if (reply_left-- > 0) 1431 *data16++ = datum & MPT_DB_DATA_MASK; 1432 1433 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1434 } 1435 1436 /* One more wait & clear at the end */ 1437 if (mpt_wait_db_int(mpt) != MPT_OK) { 1438 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1439 return ETIMEDOUT; 1440 } 1441 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1442 1443 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1444 if (mpt->verbose >= MPT_PRT_TRACE) 1445 mpt_print_reply(hdr); 1446 return (MPT_FAIL | hdr->IOCStatus); 1447 } 1448 1449 return (0); 1450 } 1451 1452 static int 1453 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1454 { 1455 MSG_IOC_FACTS f_req; 1456 int error; 1457 1458 memset(&f_req, 0, sizeof f_req); 1459 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1460 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1461 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1462 if (error) 1463 return(error); 1464 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1465 return (error); 1466 } 1467 1468 static int 1469 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1470 { 1471 MSG_PORT_FACTS f_req; 1472 int error; 1473 1474 /* XXX: Only getting PORT FACTS for Port 0 */ 1475 memset(&f_req, 0, sizeof f_req); 1476 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1477 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1478 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1479 if (error) 1480 return(error); 1481 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1482 return (error); 1483 } 1484 1485 /* 1486 * Send the initialization request. This is where we specify how many 1487 * SCSI busses and how many devices per bus we wish to emulate. 1488 * This is also the command that specifies the max size of the reply 1489 * frames from the IOC that we will be allocating. 1490 */ 1491 static int 1492 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1493 { 1494 int error = 0; 1495 MSG_IOC_INIT init; 1496 MSG_IOC_INIT_REPLY reply; 1497 1498 memset(&init, 0, sizeof init); 1499 init.WhoInit = who; 1500 init.Function = MPI_FUNCTION_IOC_INIT; 1501 if (mpt->is_fc) { 1502 init.MaxDevices = 255; 1503 } else if (mpt->is_sas) { 1504 init.MaxDevices = mpt->mpt_max_devices; 1505 } else { 1506 init.MaxDevices = 16; 1507 } 1508 init.MaxBuses = 1; 1509 1510 init.MsgVersion = htole16(MPI_VERSION); 1511 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1512 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1513 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1514 1515 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1516 return(error); 1517 } 1518 1519 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1520 return (error); 1521 } 1522 1523 1524 /* 1525 * Utiltity routine to read configuration headers and pages 1526 */ 1527 int 1528 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1529 u_int PageVersion, u_int PageLength, u_int PageNumber, 1530 u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1531 bus_size_t len, int sleep_ok, int timeout_ms) 1532 { 1533 MSG_CONFIG *cfgp; 1534 SGE_SIMPLE32 *se; 1535 1536 cfgp = req->req_vbuf; 1537 memset(cfgp, 0, sizeof *cfgp); 1538 cfgp->Action = Action; 1539 cfgp->Function = MPI_FUNCTION_CONFIG; 1540 cfgp->Header.PageVersion = PageVersion; 1541 cfgp->Header.PageLength = PageLength; 1542 cfgp->Header.PageNumber = PageNumber; 1543 cfgp->Header.PageType = PageType; 1544 cfgp->PageAddress = PageAddress; 1545 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1546 se->Address = addr; 1547 MPI_pSGE_SET_LENGTH(se, len); 1548 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1549 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1550 MPI_SGE_FLAGS_END_OF_LIST | 1551 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1552 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1553 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1554 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1555 1556 mpt_check_doorbell(mpt); 1557 mpt_send_cmd(mpt, req); 1558 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1559 sleep_ok, timeout_ms)); 1560 } 1561 1562 1563 int 1564 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1565 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1566 int sleep_ok, int timeout_ms) 1567 { 1568 request_t *req; 1569 MSG_CONFIG *cfgp; 1570 int error; 1571 1572 req = mpt_get_request(mpt, sleep_ok); 1573 if (req == NULL) { 1574 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1575 return (ENOMEM); 1576 } 1577 1578 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1579 /*PageVersion*/0, /*PageLength*/0, PageNumber, 1580 PageType, PageAddress, /*addr*/0, /*len*/0, 1581 sleep_ok, timeout_ms); 1582 if (error != 0) { 1583 mpt_free_request(mpt, req); 1584 mpt_prt(mpt, "read_cfg_header timed out\n"); 1585 return (ETIMEDOUT); 1586 } 1587 1588 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1589 case MPI_IOCSTATUS_SUCCESS: 1590 cfgp = req->req_vbuf; 1591 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1592 error = 0; 1593 break; 1594 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1595 mpt_lprt(mpt, MPT_PRT_DEBUG, 1596 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1597 PageType, PageNumber, PageAddress); 1598 error = EINVAL; 1599 break; 1600 default: 1601 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1602 req->IOCStatus); 1603 error = EIO; 1604 break; 1605 } 1606 mpt_free_request(mpt, req); 1607 return (error); 1608 } 1609 1610 int 1611 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1612 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1613 int timeout_ms) 1614 { 1615 request_t *req; 1616 int error; 1617 1618 req = mpt_get_request(mpt, sleep_ok); 1619 if (req == NULL) { 1620 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1621 return (-1); 1622 } 1623 1624 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1625 hdr->PageLength, hdr->PageNumber, 1626 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1627 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1628 len, sleep_ok, timeout_ms); 1629 if (error != 0) { 1630 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1631 return (-1); 1632 } 1633 1634 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1635 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1636 req->IOCStatus); 1637 mpt_free_request(mpt, req); 1638 return (-1); 1639 } 1640 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1641 BUS_DMASYNC_POSTREAD); 1642 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1643 mpt_free_request(mpt, req); 1644 return (0); 1645 } 1646 1647 int 1648 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1649 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1650 int timeout_ms) 1651 { 1652 request_t *req; 1653 u_int hdr_attr; 1654 int error; 1655 1656 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1657 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1658 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1659 mpt_prt(mpt, "page type 0x%x not changeable\n", 1660 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1661 return (-1); 1662 } 1663 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1664 1665 req = mpt_get_request(mpt, sleep_ok); 1666 if (req == NULL) 1667 return (-1); 1668 1669 memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); 1670 /* Restore stripped out attributes */ 1671 hdr->PageType |= hdr_attr; 1672 1673 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1674 hdr->PageLength, hdr->PageNumber, 1675 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1676 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1677 len, sleep_ok, timeout_ms); 1678 if (error != 0) { 1679 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1680 return (-1); 1681 } 1682 1683 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1684 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1685 req->IOCStatus); 1686 mpt_free_request(mpt, req); 1687 return (-1); 1688 } 1689 mpt_free_request(mpt, req); 1690 return (0); 1691 } 1692 1693 /* 1694 * Read IOC configuration information 1695 */ 1696 static int 1697 mpt_read_config_info_ioc(struct mpt_softc *mpt) 1698 { 1699 CONFIG_PAGE_HEADER hdr; 1700 struct mpt_raid_volume *mpt_raid; 1701 int rv; 1702 int i; 1703 size_t len; 1704 1705 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1706 /*PageNumber*/2, /*PageAddress*/0, &hdr, 1707 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1708 /* 1709 * If it's an invalid page, so what? Not a supported function.... 1710 */ 1711 if (rv == EINVAL) 1712 return (0); 1713 if (rv) 1714 return (rv); 1715 1716 #if __FreeBSD_version >= 500000 1717 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1718 "num %x, type %x\n", hdr.PageVersion, 1719 hdr.PageLength * sizeof(uint32_t), 1720 hdr.PageNumber, hdr.PageType); 1721 #else 1722 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1723 "num %x, type %x\n", hdr.PageVersion, 1724 hdr.PageLength * sizeof(uint32_t), 1725 hdr.PageNumber, hdr.PageType); 1726 #endif 1727 1728 len = hdr.PageLength * sizeof(uint32_t); 1729 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1730 if (mpt->ioc_page2 == NULL) 1731 return (ENOMEM); 1732 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1733 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1734 &mpt->ioc_page2->Header, len, 1735 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1736 if (rv) { 1737 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1738 } else if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1739 uint32_t mask; 1740 1741 mpt_prt(mpt, "Capabilities: ("); 1742 for (mask = 1; mask != 0; mask <<= 1) { 1743 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) 1744 continue; 1745 1746 switch (mask) { 1747 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1748 mpt_prtc(mpt, " RAID-0"); 1749 break; 1750 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1751 mpt_prtc(mpt, " RAID-1E"); 1752 break; 1753 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1754 mpt_prtc(mpt, " RAID-1"); 1755 break; 1756 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1757 mpt_prtc(mpt, " SES"); 1758 break; 1759 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1760 mpt_prtc(mpt, " SAFTE"); 1761 break; 1762 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1763 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1764 default: 1765 break; 1766 } 1767 } 1768 mpt_prtc(mpt, " )\n"); 1769 if ((mpt->ioc_page2->CapabilitiesFlags 1770 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1771 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1772 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1773 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1774 mpt->ioc_page2->NumActiveVolumes, 1775 mpt->ioc_page2->NumActiveVolumes != 1 1776 ? "s " : " ", 1777 mpt->ioc_page2->MaxVolumes); 1778 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1779 mpt->ioc_page2->NumActivePhysDisks, 1780 mpt->ioc_page2->NumActivePhysDisks != 1 1781 ? "s " : " ", 1782 mpt->ioc_page2->MaxPhysDisks); 1783 } 1784 } 1785 1786 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1787 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT); 1788 if (mpt->raid_volumes == NULL) { 1789 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1790 } else { 1791 memset(mpt->raid_volumes, 0, len); 1792 } 1793 1794 /* 1795 * Copy critical data out of ioc_page2 so that we can 1796 * safely refresh the page without windows of unreliable 1797 * data. 1798 */ 1799 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1800 1801 len = sizeof(*mpt->raid_volumes->config_page) 1802 + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1)); 1803 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1804 mpt_raid = &mpt->raid_volumes[i]; 1805 mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT); 1806 if (mpt_raid->config_page == NULL) { 1807 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1808 break; 1809 } 1810 memset(mpt_raid->config_page, 0, len); 1811 } 1812 mpt->raid_page0_len = len; 1813 1814 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1815 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT); 1816 if (mpt->raid_disks == NULL) { 1817 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1818 } else { 1819 memset(mpt->raid_disks, 0, len); 1820 } 1821 1822 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1823 1824 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1825 /*PageNumber*/3, /*PageAddress*/0, &hdr, 1826 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1827 if (rv) 1828 return (EIO); 1829 1830 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1831 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1832 1833 if (mpt->ioc_page3 != NULL) 1834 free(mpt->ioc_page3, M_DEVBUF); 1835 len = hdr.PageLength * sizeof(uint32_t); 1836 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1837 if (mpt->ioc_page3 == NULL) 1838 return (-1); 1839 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1840 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1841 &mpt->ioc_page3->Header, len, 1842 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1843 if (rv) { 1844 mpt_prt(mpt, "failed to read IOC Page 3\n"); 1845 } 1846 1847 mpt_raid_wakeup(mpt); 1848 1849 return (0); 1850 } 1851 1852 /* 1853 * Enable IOC port 1854 */ 1855 static int 1856 mpt_send_port_enable(struct mpt_softc *mpt, int port) 1857 { 1858 request_t *req; 1859 MSG_PORT_ENABLE *enable_req; 1860 int error; 1861 1862 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1863 if (req == NULL) 1864 return (-1); 1865 1866 enable_req = req->req_vbuf; 1867 memset(enable_req, 0, MPT_RQSL(mpt)); 1868 1869 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1870 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1871 enable_req->PortNumber = port; 1872 1873 mpt_check_doorbell(mpt); 1874 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1875 1876 mpt_send_cmd(mpt, req); 1877 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1878 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1879 if (error != 0) { 1880 mpt_prt(mpt, "port %d enable timed out\n", port); 1881 return (-1); 1882 } 1883 mpt_free_request(mpt, req); 1884 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1885 return (0); 1886 } 1887 1888 /* 1889 * Enable/Disable asynchronous event reporting. 1890 */ 1891 static int 1892 mpt_send_event_request(struct mpt_softc *mpt, int onoff) 1893 { 1894 request_t *req; 1895 MSG_EVENT_NOTIFY *enable_req; 1896 1897 req = mpt_get_request(mpt, FALSE); 1898 if (req == NULL) { 1899 return (ENOMEM); 1900 } 1901 enable_req = req->req_vbuf; 1902 memset(enable_req, 0, sizeof *enable_req); 1903 1904 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1905 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1906 enable_req->Switch = onoff; 1907 1908 mpt_check_doorbell(mpt); 1909 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1910 onoff ? "en" : "dis"); 1911 /* 1912 * Send the command off, but don't wait for it. 1913 */ 1914 mpt_send_cmd(mpt, req); 1915 return (0); 1916 } 1917 1918 /* 1919 * Un-mask the interupts on the chip. 1920 */ 1921 void 1922 mpt_enable_ints(struct mpt_softc *mpt) 1923 { 1924 /* Unmask every thing except door bell int */ 1925 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1926 } 1927 1928 /* 1929 * Mask the interupts on the chip. 1930 */ 1931 void 1932 mpt_disable_ints(struct mpt_softc *mpt) 1933 { 1934 /* Mask all interrupts */ 1935 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1936 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1937 } 1938 1939 static void 1940 mpt_sysctl_attach(struct mpt_softc *mpt) 1941 { 1942 #if __FreeBSD_version >= 500000 1943 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1944 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1945 1946 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1947 "debug", CTLFLAG_RW, &mpt->verbose, 0, 1948 "Debugging/Verbose level"); 1949 #endif 1950 } 1951 1952 int 1953 mpt_attach(struct mpt_softc *mpt) 1954 { 1955 struct mpt_personality *pers; 1956 int i; 1957 int error; 1958 1959 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1960 pers = mpt_personalities[i]; 1961 if (pers == NULL) { 1962 continue; 1963 } 1964 if (pers->probe(mpt) == 0) { 1965 error = pers->attach(mpt); 1966 if (error != 0) { 1967 mpt_detach(mpt); 1968 return (error); 1969 } 1970 mpt->mpt_pers_mask |= (0x1 << pers->id); 1971 pers->use_count++; 1972 } 1973 } 1974 1975 /* 1976 * Now that we've attached everything, do the enable function 1977 * for all of the personalities. This allows the personalities 1978 * to do setups that are appropriate for them prior to enabling 1979 * any ports. 1980 */ 1981 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1982 pers = mpt_personalities[i]; 1983 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 1984 error = pers->enable(mpt); 1985 if (error != 0) { 1986 mpt_prt(mpt, "personality %s attached but would" 1987 " not enable (%d)\n", pers->name, error); 1988 mpt_detach(mpt); 1989 return (error); 1990 } 1991 } 1992 } 1993 return (0); 1994 } 1995 1996 int 1997 mpt_shutdown(struct mpt_softc *mpt) 1998 { 1999 struct mpt_personality *pers; 2000 2001 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2002 pers->shutdown(mpt); 2003 } 2004 return (0); 2005 } 2006 2007 int 2008 mpt_detach(struct mpt_softc *mpt) 2009 { 2010 struct mpt_personality *pers; 2011 2012 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2013 pers->detach(mpt); 2014 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2015 pers->use_count--; 2016 } 2017 2018 return (0); 2019 } 2020 2021 int 2022 mpt_core_load(struct mpt_personality *pers) 2023 { 2024 int i; 2025 2026 /* 2027 * Setup core handlers and insert the default handler 2028 * into all "empty slots". 2029 */ 2030 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2031 mpt_reply_handlers[i] = mpt_default_reply_handler; 2032 } 2033 2034 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2035 mpt_event_reply_handler; 2036 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2037 mpt_config_reply_handler; 2038 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2039 mpt_handshake_reply_handler; 2040 return (0); 2041 } 2042 2043 /* 2044 * Initialize per-instance driver data and perform 2045 * initial controller configuration. 2046 */ 2047 int 2048 mpt_core_attach(struct mpt_softc *mpt) 2049 { 2050 int val; 2051 int error; 2052 2053 2054 LIST_INIT(&mpt->ack_frames); 2055 2056 /* Put all request buffers on the free list */ 2057 TAILQ_INIT(&mpt->request_pending_list); 2058 TAILQ_INIT(&mpt->request_free_list); 2059 TAILQ_INIT(&mpt->request_timeout_list); 2060 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2061 request_t *req = &mpt->request_pool[val]; 2062 req->state = REQ_STATE_ALLOCATED; 2063 mpt_free_request(mpt, req); 2064 } 2065 2066 for (val = 0; val < MPT_MAX_LUNS; val++) { 2067 STAILQ_INIT(&mpt->trt[val].atios); 2068 STAILQ_INIT(&mpt->trt[val].inots); 2069 } 2070 STAILQ_INIT(&mpt->trt_wildcard.atios); 2071 STAILQ_INIT(&mpt->trt_wildcard.inots); 2072 2073 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2074 2075 mpt_sysctl_attach(mpt); 2076 2077 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2078 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2079 2080 error = mpt_configure_ioc(mpt); 2081 2082 return (error); 2083 } 2084 2085 int 2086 mpt_core_enable(struct mpt_softc *mpt) 2087 { 2088 /* 2089 * We enter with the IOC enabled, but async events 2090 * not enabled, ports not enabled and interrupts 2091 * not enabled. 2092 */ 2093 2094 /* 2095 * Enable asynchronous event reporting- all personalities 2096 * have attached so that they should be able to now field 2097 * async events. 2098 */ 2099 mpt_send_event_request(mpt, 1); 2100 2101 /* 2102 * Catch any pending interrupts 2103 * 2104 * This seems to be crucial- otherwise 2105 * the portenable below times out. 2106 */ 2107 mpt_intr(mpt); 2108 2109 /* 2110 * Enable Interrupts 2111 */ 2112 mpt_enable_ints(mpt); 2113 2114 /* 2115 * Catch any pending interrupts 2116 * 2117 * This seems to be crucial- otherwise 2118 * the portenable below times out. 2119 */ 2120 mpt_intr(mpt); 2121 2122 /* 2123 * Enable the port. 2124 */ 2125 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2126 mpt_prt(mpt, "failed to enable port 0\n"); 2127 return (ENXIO); 2128 } 2129 return (0); 2130 } 2131 2132 void 2133 mpt_core_shutdown(struct mpt_softc *mpt) 2134 { 2135 mpt_disable_ints(mpt); 2136 } 2137 2138 void 2139 mpt_core_detach(struct mpt_softc *mpt) 2140 { 2141 mpt_disable_ints(mpt); 2142 } 2143 2144 int 2145 mpt_core_unload(struct mpt_personality *pers) 2146 { 2147 /* Unload is always successfull. */ 2148 return (0); 2149 } 2150 2151 #define FW_UPLOAD_REQ_SIZE \ 2152 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2153 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2154 2155 static int 2156 mpt_upload_fw(struct mpt_softc *mpt) 2157 { 2158 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2159 MSG_FW_UPLOAD_REPLY fw_reply; 2160 MSG_FW_UPLOAD *fw_req; 2161 FW_UPLOAD_TCSGE *tsge; 2162 SGE_SIMPLE32 *sge; 2163 uint32_t flags; 2164 int error; 2165 2166 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2167 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2168 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2169 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2170 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2171 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2172 tsge->DetailsLength = 12; 2173 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2174 tsge->ImageSize = htole32(mpt->fw_image_size); 2175 sge = (SGE_SIMPLE32 *)(tsge + 1); 2176 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2177 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2178 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2179 flags <<= MPI_SGE_FLAGS_SHIFT; 2180 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2181 sge->Address = htole32(mpt->fw_phys); 2182 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2183 if (error) 2184 return(error); 2185 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2186 return (error); 2187 } 2188 2189 static void 2190 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2191 uint32_t *data, bus_size_t len) 2192 { 2193 uint32_t *data_end; 2194 2195 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2196 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2197 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2198 while (data != data_end) { 2199 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2200 data++; 2201 } 2202 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2203 } 2204 2205 static int 2206 mpt_download_fw(struct mpt_softc *mpt) 2207 { 2208 MpiFwHeader_t *fw_hdr; 2209 int error; 2210 uint32_t ext_offset; 2211 uint32_t data; 2212 2213 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2214 mpt->fw_image_size); 2215 2216 error = mpt_enable_diag_mode(mpt); 2217 if (error != 0) { 2218 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2219 return (EIO); 2220 } 2221 2222 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2223 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2224 2225 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2226 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2227 fw_hdr->ImageSize); 2228 2229 ext_offset = fw_hdr->NextImageHeaderOffset; 2230 while (ext_offset != 0) { 2231 MpiExtImageHeader_t *ext; 2232 2233 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2234 ext_offset = ext->NextImageHeaderOffset; 2235 2236 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2237 ext->ImageSize); 2238 } 2239 2240 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2241 /* Setup the address to jump to on reset. */ 2242 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2243 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2244 2245 /* 2246 * The controller sets the "flash bad" status after attempting 2247 * to auto-boot from flash. Clear the status so that the controller 2248 * will continue the boot process with our newly installed firmware. 2249 */ 2250 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2251 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2252 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2253 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2254 2255 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2256 2257 /* 2258 * Re-enable the processor and clear the boot halt flag. 2259 */ 2260 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2261 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2262 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2263 2264 mpt_disable_diag_mode(mpt); 2265 return (0); 2266 } 2267 2268 /* 2269 * Allocate/Initialize data structures for the controller. Called 2270 * once at instance startup. 2271 */ 2272 static int 2273 mpt_configure_ioc(struct mpt_softc *mpt) 2274 { 2275 MSG_PORT_FACTS_REPLY pfp; 2276 MSG_IOC_FACTS_REPLY facts; 2277 int try; 2278 int needreset; 2279 uint32_t max_chain_depth; 2280 2281 needreset = 0; 2282 for (try = 0; try < MPT_MAX_TRYS; try++) { 2283 2284 /* 2285 * No need to reset if the IOC is already in the READY state. 2286 * 2287 * Force reset if initialization failed previously. 2288 * Note that a hard_reset of the second channel of a '929 2289 * will stop operation of the first channel. Hopefully, if the 2290 * first channel is ok, the second will not require a hard 2291 * reset. 2292 */ 2293 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2294 MPT_DB_STATE_READY) { 2295 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2296 continue; 2297 } 2298 } 2299 needreset = 0; 2300 2301 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2302 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2303 needreset = 1; 2304 continue; 2305 } 2306 2307 mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2308 mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2309 mpt->ioc_facts_flags = facts.Flags; 2310 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2311 le16toh(facts.MsgVersion) >> 8, 2312 le16toh(facts.MsgVersion) & 0xFF, 2313 le16toh(facts.HeaderVersion) >> 8, 2314 le16toh(facts.HeaderVersion) & 0xFF); 2315 2316 /* 2317 * Now that we know request frame size, we can calculate 2318 * the actual (reasonable) segment limit for read/write I/O. 2319 * 2320 * This limit is constrained by: 2321 * 2322 * + The size of each area we allocate per command (and how 2323 * many chain segments we can fit into it). 2324 * + The total number of areas we've set up. 2325 * + The actual chain depth the card will allow. 2326 * 2327 * The first area's segment count is limited by the I/O request 2328 * at the head of it. We cannot allocate realistically more 2329 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2330 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2331 * 2332 */ 2333 max_chain_depth = facts.MaxChainDepth; 2334 2335 /* total number of request areas we (can) allocate */ 2336 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2337 2338 /* converted to the number of chain areas possible */ 2339 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2340 2341 /* limited by the number of chain areas the card will support */ 2342 if (mpt->max_seg_cnt > max_chain_depth) { 2343 mpt_lprt(mpt, MPT_PRT_DEBUG, 2344 "chain depth limited to %u (from %u)\n", 2345 max_chain_depth, mpt->max_seg_cnt); 2346 mpt->max_seg_cnt = max_chain_depth; 2347 } 2348 2349 /* converted to the number of simple sges in chain segments. */ 2350 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2351 2352 mpt_lprt(mpt, MPT_PRT_DEBUG, 2353 "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2354 mpt_lprt(mpt, MPT_PRT_DEBUG, 2355 "MsgLength=%u IOCNumber = %d\n", 2356 facts.MsgLength, facts.IOCNumber); 2357 mpt_lprt(mpt, MPT_PRT_DEBUG, 2358 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2359 "Request Frame Size %u bytes Max Chain Depth %u\n", 2360 mpt->mpt_global_credits, facts.BlockSize, 2361 mpt->request_frame_size << 2, max_chain_depth); 2362 mpt_lprt(mpt, MPT_PRT_DEBUG, 2363 "IOCFACTS: Num Ports %d, FWImageSize %d, " 2364 "Flags=%#x\n", facts.NumberOfPorts, 2365 le32toh(facts.FWImageSize), facts.Flags); 2366 2367 2368 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2369 struct mpt_map_info mi; 2370 int error; 2371 2372 /* 2373 * In some configurations, the IOC's firmware is 2374 * stored in a shared piece of system NVRAM that 2375 * is only accessable via the BIOS. In this 2376 * case, the firmware keeps a copy of firmware in 2377 * RAM until the OS driver retrieves it. Once 2378 * retrieved, we are responsible for re-downloading 2379 * the firmware after any hard-reset. 2380 */ 2381 mpt->fw_image_size = le32toh(facts.FWImageSize); 2382 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2383 /*alignment*/1, /*boundary*/0, 2384 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2385 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2386 /*filterarg*/NULL, mpt->fw_image_size, 2387 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2388 /*flags*/0, &mpt->fw_dmat); 2389 if (error != 0) { 2390 mpt_prt(mpt, "cannot create fw dma tag\n"); 2391 return (ENOMEM); 2392 } 2393 error = bus_dmamem_alloc(mpt->fw_dmat, 2394 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2395 &mpt->fw_dmap); 2396 if (error != 0) { 2397 mpt_prt(mpt, "cannot allocate fw mem.\n"); 2398 bus_dma_tag_destroy(mpt->fw_dmat); 2399 return (ENOMEM); 2400 } 2401 mi.mpt = mpt; 2402 mi.error = 0; 2403 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2404 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2405 &mi, 0); 2406 mpt->fw_phys = mi.phys; 2407 2408 error = mpt_upload_fw(mpt); 2409 if (error != 0) { 2410 mpt_prt(mpt, "fw upload failed.\n"); 2411 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2412 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2413 mpt->fw_dmap); 2414 bus_dma_tag_destroy(mpt->fw_dmat); 2415 mpt->fw_image = NULL; 2416 return (EIO); 2417 } 2418 } 2419 2420 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2421 mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2422 needreset = 1; 2423 continue; 2424 } 2425 2426 mpt_lprt(mpt, MPT_PRT_DEBUG, 2427 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2428 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2429 pfp.MaxDevices); 2430 2431 mpt->mpt_port_type = pfp.PortType; 2432 mpt->mpt_proto_flags = pfp.ProtocolFlags; 2433 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2434 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2435 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2436 mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2437 pfp.PortType); 2438 return (ENXIO); 2439 } 2440 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2441 2442 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2443 mpt->is_fc = 1; 2444 mpt->is_sas = 0; 2445 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2446 mpt->is_fc = 0; 2447 mpt->is_sas = 1; 2448 } else { 2449 mpt->is_fc = 0; 2450 mpt->is_sas = 0; 2451 } 2452 mpt->mpt_ini_id = pfp.PortSCSIID; 2453 mpt->mpt_max_devices = pfp.MaxDevices; 2454 2455 /* 2456 * Set our expected role with what this port supports. 2457 */ 2458 2459 mpt->role = MPT_ROLE_NONE; 2460 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2461 mpt->role |= MPT_ROLE_INITIATOR; 2462 } 2463 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2464 mpt->role |= MPT_ROLE_TARGET; 2465 } 2466 if (mpt->role == MPT_ROLE_NONE) { 2467 mpt_prt(mpt, "port does not support either target or " 2468 "initiator role\n"); 2469 return (ENXIO); 2470 } 2471 2472 if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2473 mpt_prt(mpt, "unable to initialize IOC\n"); 2474 return (ENXIO); 2475 } 2476 2477 /* 2478 * Read IOC configuration information. 2479 */ 2480 mpt_read_config_info_ioc(mpt); 2481 2482 /* Everything worked */ 2483 break; 2484 } 2485 2486 if (try >= MPT_MAX_TRYS) { 2487 mpt_prt(mpt, "failed to initialize IOC"); 2488 return (EIO); 2489 } 2490 2491 return (0); 2492 } 2493 2494 static int 2495 mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2496 { 2497 uint32_t pptr; 2498 int val; 2499 2500 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2501 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2502 return (EIO); 2503 } 2504 2505 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2506 2507 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2508 mpt_prt(mpt, "IOC failed to go to run state\n"); 2509 return (ENXIO); 2510 } 2511 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2512 2513 /* 2514 * Give it reply buffers 2515 * 2516 * Do *not* exceed global credits. 2517 */ 2518 for (val = 0, pptr = mpt->reply_phys; 2519 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2520 pptr += MPT_REPLY_SIZE) { 2521 mpt_free_reply(mpt, pptr); 2522 if (++val == mpt->mpt_global_credits - 1) 2523 break; 2524 } 2525 2526 2527 /* 2528 * Enable the port if asked. This is only done if we're resetting 2529 * the IOC after initial startup. 2530 */ 2531 if (portenable) { 2532 /* 2533 * Enable asynchronous event reporting 2534 */ 2535 mpt_send_event_request(mpt, 1); 2536 2537 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2538 mpt_prt(mpt, "failed to enable port 0\n"); 2539 return (ENXIO); 2540 } 2541 } 2542 return (MPT_OK); 2543 } 2544