1 /*- 2 * Redistribution and use in source and binary forms, with or without 3 * modification, are permitted provided that the following conditions 4 * are met: 5 * 6 * Copyright 1994-2009 The FreeBSD Project. 7 * All rights reserved. 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR 19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY 23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * The views and conclusions contained in the software and documentation 28 * are those of the authors and should not be interpreted as representing 29 * official policies,either expressed or implied, of the FreeBSD Project. 30 */ 31 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_mfi.h" 37 38 #include <sys/param.h> 39 #include <sys/types.h> 40 #include <sys/kernel.h> 41 #include <sys/selinfo.h> 42 #include <sys/bus.h> 43 #include <sys/conf.h> 44 #include <sys/bio.h> 45 #include <sys/ioccom.h> 46 #include <sys/eventhandler.h> 47 #include <sys/callout.h> 48 #include <sys/uio.h> 49 #include <machine/bus.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 #include <sys/malloc.h> 53 54 #include <dev/mfi/mfireg.h> 55 #include <dev/mfi/mfi_ioctl.h> 56 #include <dev/mfi/mfivar.h> 57 58 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *); 59 union mfi_mpi2_request_descriptor * 60 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index); 61 void mfi_tbolt_complete_cmd(struct mfi_softc *sc); 62 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 63 struct mfi_cmd_tbolt *cmd); 64 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc 65 *sc, struct mfi_command *cmd); 66 uint8_t 67 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd); 68 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc 69 *sc, struct mfi_command *mfi_cmd); 70 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 71 struct mfi_cmd_tbolt *cmd); 72 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command 73 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd); 74 void 75 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 76 uint8_t ext_status); 77 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc); 78 static void mfi_kill_hba (struct mfi_softc *sc); 79 static void mfi_process_fw_state_chg_isr(void *arg); 80 static void mfi_sync_map_complete(struct mfi_command *); 81 static void mfi_queue_map_sync(struct mfi_softc *sc); 82 83 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) 84 85 86 extern int mfi_polled_cmd_timeout; 87 static int mfi_fw_reset_test = 0; 88 #ifdef MFI_DEBUG 89 TUNABLE_INT("hw.mfi.fw_reset_test", &mfi_fw_reset_test); 90 SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test, 91 0, "Force a firmware reset condition"); 92 #endif 93 94 void 95 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc) 96 { 97 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK); 98 MFI_READ4(sc, MFI_OMSK); 99 } 100 101 void 102 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc) 103 { 104 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF); 105 MFI_READ4(sc, MFI_OMSK); 106 } 107 108 int32_t 109 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc) 110 { 111 return MFI_READ4(sc, MFI_OSP0); 112 } 113 114 int32_t 115 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc) 116 { 117 int32_t status, mfi_status = 0; 118 119 status = MFI_READ4(sc, MFI_OSTS); 120 121 if (status & 1) { 122 MFI_WRITE4(sc, MFI_OSTS, status); 123 MFI_READ4(sc, MFI_OSTS); 124 if (status & MFI_STATE_CHANGE_INTERRUPT) { 125 mfi_status |= MFI_FIRMWARE_STATE_CHANGE; 126 } 127 128 return mfi_status; 129 } 130 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 131 return 1; 132 133 MFI_READ4(sc, MFI_OSTS); 134 return 0; 135 } 136 137 138 void 139 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 140 uint32_t frame_cnt) 141 { 142 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA 143 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 144 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add); 145 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32)); 146 } 147 148 /* 149 * mfi_tbolt_adp_reset - For controller reset 150 * @regs: MFI register set 151 */ 152 int 153 mfi_tbolt_adp_reset(struct mfi_softc *sc) 154 { 155 int retry = 0, i = 0; 156 int HostDiag; 157 158 MFI_WRITE4(sc, MFI_WSR, 0xF); 159 MFI_WRITE4(sc, MFI_WSR, 4); 160 MFI_WRITE4(sc, MFI_WSR, 0xB); 161 MFI_WRITE4(sc, MFI_WSR, 2); 162 MFI_WRITE4(sc, MFI_WSR, 7); 163 MFI_WRITE4(sc, MFI_WSR, 0xD); 164 165 for (i = 0; i < 10000; i++) ; 166 167 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 168 169 while (!( HostDiag & DIAG_WRITE_ENABLE)) { 170 for (i = 0; i < 1000; i++); 171 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 172 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 173 "hostdiag=%#x\n", retry, HostDiag); 174 175 if (retry++ >= 100) 176 return 1; 177 } 178 179 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag); 180 181 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER)); 182 183 for (i=0; i < 10; i++) { 184 for (i = 0; i < 10000; i++); 185 } 186 187 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 188 while (HostDiag & DIAG_RESET_ADAPTER) { 189 for (i = 0; i < 1000; i++) ; 190 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 191 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 192 "hostdiag=%#x\n", retry, HostDiag); 193 194 if (retry++ >= 1000) 195 return 1; 196 } 197 return 0; 198 } 199 200 /* 201 * This routine initialize Thunderbolt specific device information 202 */ 203 void 204 mfi_tbolt_init_globals(struct mfi_softc *sc) 205 { 206 /* Initialize single reply size and Message size */ 207 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE; 208 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 209 210 /* 211 * Calculating how many SGEs allowed in a allocated main message 212 * (size of the Message - Raid SCSI IO message size(except SGE)) 213 * / size of SGE 214 * (0x100 - (0x90 - 0x10)) / 0x10 = 8 215 */ 216 sc->max_SGEs_in_main_message = 217 (uint8_t)((sc->raid_io_msg_size 218 - (sizeof(struct mfi_mpi2_request_raid_scsi_io) 219 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION)); 220 /* 221 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size) 222 * / size of SGL ; 223 * (1280 - 256) / 16 = 64 224 */ 225 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE 226 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION); 227 /* 228 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command 229 * colscing 230 */ 231 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1) 232 + sc->max_SGEs_in_chain_message - 1; 233 /* 234 * This is the offset in number of 4 * 32bit words to the next chain 235 * (0x100 - 0x10)/0x10 = 0xF(15) 236 */ 237 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size 238 - sizeof(MPI2_SGE_IO_UNION))/16; 239 sc->chain_offset_value_for_mpt_ptmsg 240 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16; 241 sc->mfi_cmd_pool_tbolt = NULL; 242 sc->request_desc_pool = NULL; 243 } 244 245 /* 246 * This function calculates the memory requirement for Thunderbolt 247 * controller, returns the total required memory in bytes 248 */ 249 250 uint32_t 251 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc) 252 { 253 uint32_t size; 254 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */ 255 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1); 256 size += sc->reply_size * sc->mfi_max_fw_cmds; 257 /* this is for SGL's */ 258 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds; 259 return size; 260 } 261 262 /* 263 * Description: 264 * This function will prepare message pools for the Thunderbolt controller 265 * Arguments: 266 * DevExt - HBA miniport driver's adapter data storage structure 267 * pMemLocation - start of the memory allocated for Thunderbolt. 268 * Return Value: 269 * TRUE if successful 270 * FALSE if failed 271 */ 272 int 273 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location, 274 uint32_t tbolt_contg_length) 275 { 276 uint32_t offset = 0; 277 uint8_t *addr = mem_location; 278 279 /* Request Descriptor Base physical Address */ 280 281 /* For Request Decriptors Virtual Memory */ 282 /* Initialise the aligned IO Frames Virtual Memory Pointer */ 283 if (((uintptr_t)addr) & (0xFF)) { 284 addr = &addr[sc->raid_io_msg_size]; 285 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 286 sc->request_message_pool_align = addr; 287 } else 288 sc->request_message_pool_align = addr; 289 290 offset = sc->request_message_pool_align - sc->request_message_pool; 291 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset; 292 293 /* DJA XXX should this be bus dma ??? */ 294 /* Skip request message pool */ 295 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)]; 296 /* Reply Frame Pool is initialized */ 297 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; 298 if (((uintptr_t)addr) & (0xFF)) { 299 addr = &addr[sc->reply_size]; 300 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 301 } 302 sc->reply_frame_pool_align 303 = (struct mfi_mpi2_reply_header *)addr; 304 305 offset = (uintptr_t)sc->reply_frame_pool_align 306 - (uintptr_t)sc->request_message_pool; 307 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset; 308 309 /* Skip Reply Frame Pool */ 310 addr += sc->reply_size * sc->mfi_max_fw_cmds; 311 sc->reply_pool_limit = addr; 312 313 /* initializing reply address to 0xFFFFFFFF */ 314 memset((uint8_t *)sc->reply_frame_pool, 0xFF, 315 (sc->reply_size * sc->mfi_max_fw_cmds)); 316 317 offset = sc->reply_size * sc->mfi_max_fw_cmds; 318 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset; 319 /* initialize the last_reply_idx to 0 */ 320 sc->last_reply_idx = 0; 321 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 322 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 323 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * 324 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr; 325 if (offset > tbolt_contg_length) 326 device_printf(sc->mfi_dev, "Error:Initialized more than " 327 "allocated\n"); 328 return 0; 329 } 330 331 /* 332 * This routine prepare and issue INIT2 frame to the Firmware 333 */ 334 335 int 336 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc) 337 { 338 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit; 339 struct mfi_init_frame *mfi_init; 340 uintptr_t offset = 0; 341 bus_addr_t phyAddress; 342 MFI_ADDRESS *mfiAddressTemp; 343 struct mfi_command *cm, cmd_tmp; 344 int error; 345 346 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 347 348 /* Check if initialization is already completed */ 349 if (sc->MFA_enabled) { 350 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n"); 351 return 1; 352 } 353 354 if ((cm = mfi_dequeue_free(sc)) == NULL) { 355 device_printf(sc->mfi_dev, "tbolt_init failed to get command " 356 " entry!\n"); 357 return (EBUSY); 358 } 359 360 cmd_tmp.cm_frame = cm->cm_frame; 361 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr; 362 cmd_tmp.cm_dmamap = cm->cm_dmamap; 363 364 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init); 365 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr; 366 cm->cm_dmamap = sc->mfi_tb_init_dmamap; 367 cm->cm_frame->header.context = 0; 368 369 /* 370 * Abuse the SG list area of the frame to hold the init_qinfo 371 * object; 372 */ 373 mfi_init = &cm->cm_frame->init; 374 375 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc; 376 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST)); 377 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; 378 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 379 380 /* set MsgVersion and HeaderVersion host driver was built with */ 381 mpi2IocInit->MsgVersion = MPI2_VERSION; 382 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; 383 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4; 384 mpi2IocInit->ReplyDescriptorPostQueueDepth 385 = (uint16_t)sc->mfi_max_fw_cmds; 386 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ 387 388 /* Get physical address of reply frame pool */ 389 offset = (uintptr_t) sc->reply_frame_pool_align 390 - (uintptr_t)sc->request_message_pool; 391 phyAddress = sc->mfi_tb_busaddr + offset; 392 mfiAddressTemp = 393 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; 394 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 395 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 396 397 /* Get physical address of request message pool */ 398 offset = sc->request_message_pool_align - sc->request_message_pool; 399 phyAddress = sc->mfi_tb_busaddr + offset; 400 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress; 401 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 402 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 403 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ 404 mpi2IocInit->TimeStamp = time_uptime; 405 406 if (sc->verbuf) { 407 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", 408 MEGASAS_VERSION); 409 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr; 410 mfi_init->driver_ver_hi = 411 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32); 412 } 413 /* Get the physical address of the mpi2 ioc init command */ 414 phyAddress = sc->mfi_tb_ioc_init_busaddr; 415 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress; 416 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32); 417 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 418 419 mfi_init->header.cmd = MFI_CMD_INIT; 420 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST); 421 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS; 422 423 cm->cm_data = NULL; 424 cm->cm_flags |= MFI_CMD_POLLED; 425 cm->cm_timestamp = time_uptime; 426 if ((error = mfi_mapcmd(sc, cm)) != 0) { 427 device_printf(sc->mfi_dev, "failed to send IOC init2 " 428 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr); 429 goto out; 430 } 431 432 if (mfi_init->header.cmd_status == MFI_STAT_OK) { 433 sc->MFA_enabled = 1; 434 } else { 435 device_printf(sc->mfi_dev, "Init command Failed %#x\n", 436 mfi_init->header.cmd_status); 437 error = mfi_init->header.cmd_status; 438 goto out; 439 } 440 441 out: 442 cm->cm_frame = cmd_tmp.cm_frame; 443 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr; 444 cm->cm_dmamap = cmd_tmp.cm_dmamap; 445 mfi_release_command(cm); 446 447 return (error); 448 449 } 450 451 int 452 mfi_tbolt_alloc_cmd(struct mfi_softc *sc) 453 { 454 struct mfi_cmd_tbolt *cmd; 455 bus_addr_t io_req_base_phys; 456 uint8_t *io_req_base; 457 int i = 0, j = 0, offset = 0; 458 459 /* 460 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers. 461 * Allocate the dynamic array first and then allocate individual 462 * commands. 463 */ 464 sc->request_desc_pool = malloc(sizeof( 465 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds, 466 M_MFIBUF, M_NOWAIT|M_ZERO); 467 468 if (sc->request_desc_pool == NULL) { 469 device_printf(sc->mfi_dev, "Could not alloc " 470 "memory for request_desc_pool\n"); 471 return (ENOMEM); 472 } 473 474 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*) 475 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO); 476 477 if (sc->mfi_cmd_pool_tbolt == NULL) { 478 free(sc->request_desc_pool, M_MFIBUF); 479 device_printf(sc->mfi_dev, "Could not alloc " 480 "memory for cmd_pool_tbolt\n"); 481 return (ENOMEM); 482 } 483 484 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 485 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof( 486 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO); 487 488 if (!sc->mfi_cmd_pool_tbolt[i]) { 489 device_printf(sc->mfi_dev, "Could not alloc " 490 "cmd_pool_tbolt entry\n"); 491 492 for (j = 0; j < i; j++) 493 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF); 494 495 free(sc->request_desc_pool, M_MFIBUF); 496 sc->request_desc_pool = NULL; 497 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 498 sc->mfi_cmd_pool_tbolt = NULL; 499 500 return (ENOMEM); 501 } 502 } 503 504 /* 505 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd 506 * list 507 */ 508 io_req_base = sc->request_message_pool_align 509 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 510 io_req_base_phys = sc->request_msg_busaddr 511 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 512 513 /* 514 * Add all the commands to command pool (instance->cmd_pool) 515 */ 516 /* SMID 0 is reserved. Set SMID/index from 1 */ 517 518 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 519 cmd = sc->mfi_cmd_pool_tbolt[i]; 520 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i; 521 cmd->index = i + 1; 522 cmd->request_desc = (union mfi_mpi2_request_descriptor *) 523 (sc->request_desc_pool + i); 524 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *) 525 (io_req_base + offset); 526 cmd->io_request_phys_addr = io_req_base_phys + offset; 527 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit 528 + i * MEGASAS_MAX_SZ_CHAIN_FRAME); 529 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i 530 * MEGASAS_MAX_SZ_CHAIN_FRAME; 531 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 532 533 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next); 534 } 535 return 0; 536 } 537 538 int 539 mfi_tbolt_reset(struct mfi_softc *sc) 540 { 541 uint32_t fw_state; 542 543 mtx_lock(&sc->mfi_io_lock); 544 if (sc->hw_crit_error) { 545 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n"); 546 mtx_unlock(&sc->mfi_io_lock); 547 return 1; 548 } 549 550 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 551 fw_state = sc->mfi_read_fw_status(sc); 552 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT || 553 mfi_fw_reset_test) { 554 if ((sc->disableOnlineCtrlReset == 0) 555 && (sc->adpreset == 0)) { 556 device_printf(sc->mfi_dev, "Adapter RESET " 557 "condition is detected\n"); 558 sc->adpreset = 1; 559 sc->issuepend_done = 0; 560 sc->MFA_enabled = 0; 561 sc->last_reply_idx = 0; 562 mfi_process_fw_state_chg_isr((void *) sc); 563 } 564 mtx_unlock(&sc->mfi_io_lock); 565 return 0; 566 } 567 } 568 mtx_unlock(&sc->mfi_io_lock); 569 return 1; 570 } 571 572 /* 573 * mfi_intr_tbolt - isr entry point 574 */ 575 void 576 mfi_intr_tbolt(void *arg) 577 { 578 struct mfi_softc *sc = (struct mfi_softc *)arg; 579 580 if (sc->mfi_check_clear_intr(sc) == 1) { 581 return; 582 } 583 if (sc->mfi_detaching) 584 return; 585 mtx_lock(&sc->mfi_io_lock); 586 mfi_tbolt_complete_cmd(sc); 587 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 588 mfi_startio(sc); 589 mtx_unlock(&sc->mfi_io_lock); 590 return; 591 } 592 593 /* 594 * map_cmd_status - Maps FW cmd status to OS cmd status 595 * @cmd : Pointer to cmd 596 * @status : status of cmd returned by FW 597 * @ext_status : ext status of cmd returned by FW 598 */ 599 600 void 601 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 602 uint8_t ext_status) 603 { 604 switch (status) { 605 case MFI_STAT_OK: 606 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK; 607 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK; 608 mfi_cmd->cm_error = MFI_STAT_OK; 609 break; 610 611 case MFI_STAT_SCSI_IO_FAILED: 612 case MFI_STAT_LD_INIT_IN_PROGRESS: 613 mfi_cmd->cm_frame->header.cmd_status = status; 614 mfi_cmd->cm_frame->header.scsi_status = ext_status; 615 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 616 mfi_cmd->cm_frame->dcmd.header.scsi_status 617 = ext_status; 618 break; 619 620 case MFI_STAT_SCSI_DONE_WITH_ERROR: 621 mfi_cmd->cm_frame->header.cmd_status = ext_status; 622 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status; 623 break; 624 625 case MFI_STAT_LD_OFFLINE: 626 case MFI_STAT_DEVICE_NOT_FOUND: 627 mfi_cmd->cm_frame->header.cmd_status = status; 628 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 629 break; 630 631 default: 632 mfi_cmd->cm_frame->header.cmd_status = status; 633 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 634 break; 635 } 636 } 637 638 /* 639 * mfi_tbolt_return_cmd - Return a cmd to free command pool 640 * @instance: Adapter soft state 641 * @tbolt_cmd: Tbolt command packet to be returned to free command pool 642 * @mfi_cmd: Oning MFI command packe 643 */ 644 void 645 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd, 646 struct mfi_command *mfi_cmd) 647 { 648 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 649 650 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT; 651 mfi_cmd->cm_extra_frames = 0; 652 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 653 654 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next); 655 } 656 657 void 658 mfi_tbolt_complete_cmd(struct mfi_softc *sc) 659 { 660 struct mfi_mpi2_reply_header *desc, *reply_desc; 661 struct mfi_command *cmd_mfi; /* For MFA Cmds */ 662 struct mfi_cmd_tbolt *cmd_tbolt; 663 uint16_t smid; 664 uint8_t reply_descript_type; 665 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req; 666 uint32_t status, extStatus; 667 uint16_t num_completed; 668 union desc_value val; 669 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 670 671 desc = (struct mfi_mpi2_reply_header *) 672 ((uintptr_t)sc->reply_frame_pool_align 673 + sc->last_reply_idx * sc->reply_size); 674 reply_desc = desc; 675 676 if (reply_desc == NULL) { 677 device_printf(sc->mfi_dev, "reply desc is NULL!!\n"); 678 return; 679 } 680 681 reply_descript_type = reply_desc->ReplyFlags 682 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 683 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 684 return; 685 686 num_completed = 0; 687 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words; 688 689 /* Read Reply descriptor */ 690 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) { 691 smid = reply_desc->SMID; 692 if (smid == 0 || smid > sc->mfi_max_fw_cmds) { 693 device_printf(sc->mfi_dev, "smid is %d cannot " 694 "proceed - skipping\n", smid); 695 goto next; 696 } 697 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1]; 698 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) { 699 device_printf(sc->mfi_dev, "cmd_tbolt %p " 700 "has invalid sync_cmd_idx=%d - skipping\n", 701 cmd_tbolt, cmd_tbolt->sync_cmd_idx); 702 goto next; 703 } 704 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx]; 705 scsi_io_req = cmd_tbolt->io_request; 706 707 status = cmd_mfi->cm_frame->dcmd.header.cmd_status; 708 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status; 709 map_tbolt_cmd_status(cmd_mfi, status, extStatus); 710 711 /* mfi_tbolt_return_cmd is handled by mfi complete / return */ 712 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 && 713 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) { 714 /* polled LD/SYSPD IO command */ 715 /* XXX mark okay for now DJA */ 716 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK; 717 718 } else { 719 /* remove command from busy queue if not polled */ 720 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0) 721 mfi_remove_busy(cmd_mfi); 722 723 /* complete the command */ 724 mfi_complete(sc, cmd_mfi); 725 } 726 727 next: 728 sc->last_reply_idx++; 729 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) { 730 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 731 sc->last_reply_idx = 0; 732 } 733 734 /* Set it back to all 0xfff */ 735 ((union mfi_mpi2_reply_descriptor*)desc)->words = 736 ~((uint64_t)0x00); 737 738 num_completed++; 739 740 /* Get the next reply descriptor */ 741 desc = (struct mfi_mpi2_reply_header *) 742 ((uintptr_t)sc->reply_frame_pool_align 743 + sc->last_reply_idx * sc->reply_size); 744 reply_desc = desc; 745 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words; 746 reply_descript_type = reply_desc->ReplyFlags 747 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 748 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 749 break; 750 } 751 752 if (!num_completed) 753 return; 754 755 /* update replyIndex to FW */ 756 if (sc->last_reply_idx) 757 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 758 759 return; 760 } 761 762 /* 763 * mfi_get_cmd - Get a command from the free pool 764 * @instance: Adapter soft state 765 * 766 * Returns a free command from the pool 767 */ 768 769 struct mfi_cmd_tbolt * 770 mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 771 { 772 struct mfi_cmd_tbolt *cmd = NULL; 773 774 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 775 776 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL) 777 return (NULL); 778 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next); 779 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); 780 memset((uint8_t *)cmd->io_request, 0, 781 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE); 782 783 cmd->sync_cmd_idx = mfi_cmd->cm_index; 784 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */ 785 mfi_cmd->cm_flags |= MFI_CMD_TBOLT; 786 787 return cmd; 788 } 789 790 union mfi_mpi2_request_descriptor * 791 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index) 792 { 793 uint8_t *p; 794 795 if (index >= sc->mfi_max_fw_cmds) { 796 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request " 797 "for descriptor\n", index); 798 return NULL; 799 } 800 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor) 801 * index; 802 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor)); 803 return (union mfi_mpi2_request_descriptor *)p; 804 } 805 806 807 /* Used to build IOCTL cmd */ 808 uint8_t 809 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 810 { 811 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 812 struct mfi_mpi2_request_raid_scsi_io *io_req; 813 struct mfi_cmd_tbolt *cmd; 814 815 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 816 if (!cmd) 817 return EBUSY; 818 io_req = cmd->io_request; 819 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 820 821 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 822 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io, 823 SGL) / 4; 824 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg; 825 826 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr; 827 828 /* 829 In MFI pass thru, nextChainOffset will always be zero to 830 indicate the end of the chain. 831 */ 832 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT 833 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 834 835 /* setting the length to the maximum length */ 836 mpi25_ieee_chain->Length = 1024; 837 838 return 0; 839 } 840 841 void 842 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 843 struct mfi_cmd_tbolt *cmd) 844 { 845 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id; 846 struct mfi_mpi2_request_raid_scsi_io *io_request; 847 struct IO_REQUEST_INFO io_info; 848 849 device_id = mfi_cmd->cm_frame->io.header.target_id; 850 io_request = cmd->io_request; 851 io_request->RaidContext.TargetID = device_id; 852 io_request->RaidContext.Status = 0; 853 io_request->RaidContext.exStatus =0; 854 855 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo; 856 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi; 857 858 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 859 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo; 860 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len; 861 io_info.ldTgtId = device_id; 862 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) == 863 MFI_FRAME_DIR_READ) 864 io_info.isRead = 1; 865 866 io_request->RaidContext.timeoutValue 867 = MFI_FUSION_FP_DEFAULT_TIMEOUT; 868 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST; 869 io_request->DevHandle = device_id; 870 cmd->request_desc->header.RequestFlags 871 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO 872 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 873 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0)) 874 io_request->RaidContext.RegLockLength = 0x100; 875 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len 876 * MFI_SECTOR_LEN; 877 } 878 879 int 880 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 881 struct mfi_cmd_tbolt *cmd) 882 { 883 struct mfi_mpi2_request_raid_scsi_io *io_request; 884 uint32_t sge_count; 885 uint8_t cdb_len; 886 int readop; 887 u_int64_t lba; 888 889 io_request = cmd->io_request; 890 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ 891 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) 892 return 1; 893 894 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd); 895 896 /* Convert to SCSI command CDB */ 897 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32)); 898 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 899 readop = 0; 900 else 901 readop = 1; 902 903 lba = mfi_cmd->cm_frame->io.lba_hi; 904 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo; 905 cdb_len = mfi_build_cdb(readop, 0, lba, 906 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32); 907 908 /* Just the CDB length, rest of the Flags are zero */ 909 io_request->IoFlags = cdb_len; 910 911 /* 912 * Construct SGL 913 */ 914 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd, 915 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd); 916 if (sge_count > sc->mfi_max_sge) { 917 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds " 918 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge); 919 return 1; 920 } 921 io_request->RaidContext.numSGE = sge_count; 922 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 923 924 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 925 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE; 926 else 927 io_request->Control = MPI2_SCSIIO_CONTROL_READ; 928 929 io_request->SGLOffset0 = offsetof( 930 struct mfi_mpi2_request_raid_scsi_io, SGL)/4; 931 932 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr; 933 io_request->SenseBufferLength = MFI_SENSE_LEN; 934 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS; 935 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS; 936 937 return 0; 938 } 939 940 941 static int 942 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 943 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd) 944 { 945 uint8_t i, sg_processed, sg_to_process; 946 uint8_t sge_count, sge_idx; 947 union mfi_sgl *os_sgl; 948 949 /* 950 * Return 0 if there is no data transfer 951 */ 952 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) { 953 device_printf(sc->mfi_dev, "Buffer empty \n"); 954 return 0; 955 } 956 os_sgl = mfi_cmd->cm_sg; 957 sge_count = mfi_cmd->cm_frame->header.sg_count; 958 959 if (sge_count > sc->mfi_max_sge) { 960 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n", 961 os_sgl, sge_count); 962 return sge_count; 963 } 964 965 if (sge_count > sc->max_SGEs_in_main_message) 966 /* One element to store the chain info */ 967 sge_idx = sc->max_SGEs_in_main_message - 1; 968 else 969 sge_idx = sge_count; 970 971 for (i = 0; i < sge_idx; i++) { 972 /* 973 * For 32bit BSD we are getting 32 bit SGL's from OS 974 * but FW only take 64 bit SGL's so copying from 32 bit 975 * SGL's to 64. 976 */ 977 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 978 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 979 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 980 } else { 981 sgl_ptr->Length = os_sgl->sg32[i].len; 982 sgl_ptr->Address = os_sgl->sg32[i].addr; 983 } 984 sgl_ptr->Flags = 0; 985 sgl_ptr++; 986 cmd->io_request->ChainOffset = 0; 987 } 988 989 sg_processed = i; 990 991 if (sg_processed < sge_count) { 992 pMpi25IeeeSgeChain64_t sg_chain; 993 sg_to_process = sge_count - sg_processed; 994 cmd->io_request->ChainOffset = 995 sc->chain_offset_value_for_main_message; 996 sg_chain = sgl_ptr; 997 /* Prepare chain element */ 998 sg_chain->NextChainOffset = 0; 999 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1000 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1001 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * 1002 (sge_count - sg_processed)); 1003 sg_chain->Address = cmd->sg_frame_phys_addr; 1004 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame; 1005 for (; i < sge_count; i++) { 1006 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 1007 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 1008 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 1009 } else { 1010 sgl_ptr->Length = os_sgl->sg32[i].len; 1011 sgl_ptr->Address = os_sgl->sg32[i].addr; 1012 } 1013 sgl_ptr->Flags = 0; 1014 sgl_ptr++; 1015 } 1016 } 1017 return sge_count; 1018 } 1019 1020 union mfi_mpi2_request_descriptor * 1021 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 1022 { 1023 struct mfi_cmd_tbolt *cmd; 1024 union mfi_mpi2_request_descriptor *req_desc = NULL; 1025 uint16_t index; 1026 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 1027 if (cmd == NULL) 1028 return (NULL); 1029 1030 index = cmd->index; 1031 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1); 1032 if (req_desc == NULL) { 1033 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1034 return (NULL); 1035 } 1036 1037 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) { 1038 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1039 return (NULL); 1040 } 1041 req_desc->header.SMID = index; 1042 return req_desc; 1043 } 1044 1045 union mfi_mpi2_request_descriptor * 1046 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd) 1047 { 1048 union mfi_mpi2_request_descriptor *req_desc = NULL; 1049 uint16_t index; 1050 if (mfi_build_mpt_pass_thru(sc, cmd)) { 1051 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru " 1052 "cmd\n"); 1053 return NULL; 1054 } 1055 /* For fusion the frame_count variable is used for SMID */ 1056 index = cmd->cm_extra_frames; 1057 1058 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1); 1059 if (req_desc == NULL) 1060 return NULL; 1061 1062 bzero(req_desc, sizeof(*req_desc)); 1063 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1064 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1065 req_desc->header.SMID = index; 1066 return req_desc; 1067 } 1068 1069 int 1070 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1071 { 1072 struct mfi_frame_header *hdr; 1073 uint8_t *cdb; 1074 union mfi_mpi2_request_descriptor *req_desc = NULL; 1075 int tm = mfi_polled_cmd_timeout * 1000; 1076 1077 hdr = &cm->cm_frame->header; 1078 cdb = cm->cm_frame->pass.cdb; 1079 if (sc->adpreset) 1080 return 1; 1081 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1082 cm->cm_timestamp = time_uptime; 1083 mfi_enqueue_busy(cm); 1084 } else { /* still get interrupts for it */ 1085 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1086 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1087 } 1088 1089 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) { 1090 /* check for inquiry commands coming from CLI */ 1091 if (cdb[0] != 0x28 || cdb[0] != 0x2A) { 1092 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == 1093 NULL) { 1094 device_printf(sc->mfi_dev, "Mapping from MFI " 1095 "to MPT Failed \n"); 1096 return 1; 1097 } 1098 } 1099 else 1100 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n"); 1101 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO || 1102 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) { 1103 cm->cm_flags |= MFI_CMD_SCSI; 1104 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) { 1105 device_printf(sc->mfi_dev, "LDIO Failed \n"); 1106 return 1; 1107 } 1108 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) { 1109 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n"); 1110 return (1); 1111 } 1112 1113 if (cm->cm_flags & MFI_CMD_SCSI) { 1114 /* 1115 * LD IO needs to be posted since it doesn't get 1116 * acknowledged via a status update so have the 1117 * controller reply via mfi_tbolt_complete_cmd. 1118 */ 1119 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1120 } 1121 1122 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF)); 1123 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20)); 1124 1125 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1126 return 0; 1127 1128 /* 1129 * This is a polled command, so busy-wait for it to complete. 1130 * 1131 * The value of hdr->cmd_status is updated directly by the hardware 1132 * so there is no garantee that mfi_tbolt_complete_cmd is called 1133 * prior to this value changing. 1134 */ 1135 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1136 DELAY(1000); 1137 tm -= 1; 1138 if (tm <= 0) 1139 break; 1140 if (cm->cm_flags & MFI_CMD_SCSI) { 1141 /* 1142 * Force check reply queue. 1143 * This ensures that dump works correctly 1144 */ 1145 mfi_tbolt_complete_cmd(sc); 1146 } 1147 } 1148 1149 /* ensure the command cleanup has been processed before returning */ 1150 mfi_tbolt_complete_cmd(sc); 1151 1152 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1153 device_printf(sc->mfi_dev, "Frame %p timed out " 1154 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1155 return (ETIMEDOUT); 1156 } 1157 return 0; 1158 } 1159 1160 static void 1161 mfi_issue_pending_cmds_again(struct mfi_softc *sc) 1162 { 1163 struct mfi_command *cm, *tmp; 1164 struct mfi_cmd_tbolt *cmd; 1165 1166 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1167 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) { 1168 1169 cm->retry_for_fw_reset++; 1170 1171 /* 1172 * If a command has continuously been tried multiple times 1173 * and causing a FW reset condition, no further recoveries 1174 * should be performed on the controller 1175 */ 1176 if (cm->retry_for_fw_reset == 3) { 1177 device_printf(sc->mfi_dev, "megaraid_sas: command %p " 1178 "index=%d was tried multiple times during adapter " 1179 "reset - Shutting down the HBA\n", cm, cm->cm_index); 1180 mfi_kill_hba(sc); 1181 sc->hw_crit_error = 1; 1182 return; 1183 } 1184 1185 mfi_remove_busy(cm); 1186 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { 1187 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <= 1188 sc->mfi_max_fw_cmds) { 1189 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1]; 1190 mfi_tbolt_return_cmd(sc, cmd, cm); 1191 } else { 1192 device_printf(sc->mfi_dev, 1193 "Invalid extra_frames: %d detected\n", 1194 cm->cm_extra_frames); 1195 } 1196 } 1197 1198 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) { 1199 device_printf(sc->mfi_dev, 1200 "APJ ****requeue command %p index=%d\n", 1201 cm, cm->cm_index); 1202 mfi_requeue_ready(cm); 1203 } else 1204 mfi_release_command(cm); 1205 } 1206 mfi_startio(sc); 1207 } 1208 1209 static void 1210 mfi_kill_hba(struct mfi_softc *sc) 1211 { 1212 if (sc->mfi_flags & MFI_FLAGS_TBOLT) 1213 MFI_WRITE4(sc, 0x00, MFI_STOP_ADP); 1214 else 1215 MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP); 1216 } 1217 1218 static void 1219 mfi_process_fw_state_chg_isr(void *arg) 1220 { 1221 struct mfi_softc *sc= (struct mfi_softc *)arg; 1222 int error, status; 1223 1224 if (sc->adpreset == 1) { 1225 device_printf(sc->mfi_dev, "First stage of FW reset " 1226 "initiated...\n"); 1227 1228 sc->mfi_adp_reset(sc); 1229 sc->mfi_enable_intr(sc); 1230 1231 device_printf(sc->mfi_dev, "First stage of reset complete, " 1232 "second stage initiated...\n"); 1233 1234 sc->adpreset = 2; 1235 1236 /* waiting for about 20 second before start the second init */ 1237 for (int wait = 0; wait < 20000; wait++) 1238 DELAY(1000); 1239 device_printf(sc->mfi_dev, "Second stage of FW reset " 1240 "initiated...\n"); 1241 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04); 1242 1243 sc->mfi_disable_intr(sc); 1244 1245 /* We expect the FW state to be READY */ 1246 if (mfi_transition_firmware(sc)) { 1247 device_printf(sc->mfi_dev, "controller is not in " 1248 "ready state\n"); 1249 mfi_kill_hba(sc); 1250 sc->hw_crit_error = 1; 1251 return; 1252 } 1253 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 1254 device_printf(sc->mfi_dev, "Failed to initialise MFI " 1255 "queue\n"); 1256 mfi_kill_hba(sc); 1257 sc->hw_crit_error = 1; 1258 return; 1259 } 1260 1261 /* Init last reply index and max */ 1262 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 1263 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 1264 1265 sc->mfi_enable_intr(sc); 1266 sc->adpreset = 0; 1267 if (sc->mfi_aen_cm != NULL) { 1268 free(sc->mfi_aen_cm->cm_data, M_MFIBUF); 1269 mfi_remove_busy(sc->mfi_aen_cm); 1270 mfi_release_command(sc->mfi_aen_cm); 1271 sc->mfi_aen_cm = NULL; 1272 } 1273 1274 if (sc->mfi_map_sync_cm != NULL) { 1275 mfi_remove_busy(sc->mfi_map_sync_cm); 1276 mfi_release_command(sc->mfi_map_sync_cm); 1277 sc->mfi_map_sync_cm = NULL; 1278 } 1279 mfi_issue_pending_cmds_again(sc); 1280 1281 /* 1282 * Issue pending command can result in adapter being marked 1283 * dead because of too many re-tries. Check for that 1284 * condition before clearing the reset condition on the FW 1285 */ 1286 if (!sc->hw_crit_error) { 1287 /* 1288 * Initiate AEN (Asynchronous Event Notification) & 1289 * Sync Map 1290 */ 1291 mfi_aen_setup(sc, sc->last_seq_num); 1292 mfi_tbolt_sync_map_info(sc); 1293 1294 sc->issuepend_done = 1; 1295 device_printf(sc->mfi_dev, "second stage of reset " 1296 "complete, FW is ready now.\n"); 1297 } else { 1298 device_printf(sc->mfi_dev, "second stage of reset " 1299 "never completed, hba was marked offline.\n"); 1300 } 1301 } else { 1302 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr " 1303 "called with unhandled value:%d\n", sc->adpreset); 1304 } 1305 } 1306 1307 /* 1308 * The ThunderBolt HW has an option for the driver to directly 1309 * access the underlying disks and operate on the RAID. To 1310 * do this there needs to be a capability to keep the RAID controller 1311 * and driver in sync. The FreeBSD driver does not take advantage 1312 * of this feature since it adds a lot of complexity and slows down 1313 * performance. Performance is gained by using the controller's 1314 * cache etc. 1315 * 1316 * Even though this driver doesn't access the disks directly, an 1317 * AEN like command is used to inform the RAID firmware to "sync" 1318 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This 1319 * command in write mode will return when the RAID firmware has 1320 * detected a change to the RAID state. Examples of this type 1321 * of change are removing a disk. Once the command returns then 1322 * the driver needs to acknowledge this and "sync" all LD's again. 1323 * This repeats until we shutdown. Then we need to cancel this 1324 * pending command. 1325 * 1326 * If this is not done right the RAID firmware will not remove a 1327 * pulled drive and the RAID won't go degraded etc. Effectively, 1328 * stopping any RAID mangement to functions. 1329 * 1330 * Doing another LD sync, requires the use of an event since the 1331 * driver needs to do a mfi_wait_command and can't do that in an 1332 * interrupt thread. 1333 * 1334 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO 1335 * That requires a bunch of structure and it is simplier to just do 1336 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map. 1337 */ 1338 1339 void 1340 mfi_tbolt_sync_map_info(struct mfi_softc *sc) 1341 { 1342 int error = 0, i; 1343 struct mfi_command *cmd = NULL; 1344 struct mfi_dcmd_frame *dcmd = NULL; 1345 uint32_t context = 0; 1346 union mfi_ld_ref *ld_sync = NULL; 1347 size_t ld_size; 1348 struct mfi_frame_header *hdr; 1349 struct mfi_command *cm = NULL; 1350 struct mfi_ld_list *list = NULL; 1351 1352 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1353 1354 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort) 1355 return; 1356 1357 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1358 (void **)&list, sizeof(*list)); 1359 if (error) 1360 goto out; 1361 1362 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN; 1363 1364 if (mfi_wait_command(sc, cm) != 0) { 1365 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1366 goto out; 1367 } 1368 1369 hdr = &cm->cm_frame->header; 1370 if (hdr->cmd_status != MFI_STAT_OK) { 1371 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1372 hdr->cmd_status); 1373 goto out; 1374 } 1375 1376 ld_size = sizeof(*ld_sync) * list->ld_count; 1377 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF, 1378 M_NOWAIT | M_ZERO); 1379 if (ld_sync == NULL) { 1380 device_printf(sc->mfi_dev, "Failed to allocate sync\n"); 1381 goto out; 1382 } 1383 for (i = 0; i < list->ld_count; i++) 1384 ld_sync[i].ref = list->ld_list[i].ld.ref; 1385 1386 if ((cmd = mfi_dequeue_free(sc)) == NULL) { 1387 device_printf(sc->mfi_dev, "Failed to get command\n"); 1388 free(ld_sync, M_MFIBUF); 1389 goto out; 1390 } 1391 1392 context = cmd->cm_frame->header.context; 1393 bzero(cmd->cm_frame, sizeof(union mfi_frame)); 1394 cmd->cm_frame->header.context = context; 1395 1396 dcmd = &cmd->cm_frame->dcmd; 1397 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1398 dcmd->header.cmd = MFI_CMD_DCMD; 1399 dcmd->header.flags = MFI_FRAME_DIR_WRITE; 1400 dcmd->header.timeout = 0; 1401 dcmd->header.data_len = ld_size; 1402 dcmd->header.scsi_status = 0; 1403 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO; 1404 cmd->cm_sg = &dcmd->sgl; 1405 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1406 cmd->cm_data = ld_sync; 1407 cmd->cm_private = ld_sync; 1408 1409 cmd->cm_len = ld_size; 1410 cmd->cm_complete = mfi_sync_map_complete; 1411 sc->mfi_map_sync_cm = cmd; 1412 1413 cmd->cm_flags = MFI_CMD_DATAOUT; 1414 cmd->cm_frame->dcmd.mbox[0] = list->ld_count; 1415 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; 1416 1417 if ((error = mfi_mapcmd(sc, cmd)) != 0) { 1418 device_printf(sc->mfi_dev, "failed to send map sync\n"); 1419 free(ld_sync, M_MFIBUF); 1420 sc->mfi_map_sync_cm = NULL; 1421 mfi_release_command(cmd); 1422 goto out; 1423 } 1424 1425 out: 1426 if (list) 1427 free(list, M_MFIBUF); 1428 if (cm) 1429 mfi_release_command(cm); 1430 } 1431 1432 static void 1433 mfi_sync_map_complete(struct mfi_command *cm) 1434 { 1435 struct mfi_frame_header *hdr; 1436 struct mfi_softc *sc; 1437 int aborted = 0; 1438 1439 sc = cm->cm_sc; 1440 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1441 1442 hdr = &cm->cm_frame->header; 1443 1444 if (sc->mfi_map_sync_cm == NULL) 1445 return; 1446 1447 if (sc->cm_map_abort || 1448 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1449 sc->cm_map_abort = 0; 1450 aborted = 1; 1451 } 1452 1453 free(cm->cm_data, M_MFIBUF); 1454 wakeup(&sc->mfi_map_sync_cm); 1455 sc->mfi_map_sync_cm = NULL; 1456 mfi_release_command(cm); 1457 1458 /* set it up again so the driver can catch more events */ 1459 if (!aborted) 1460 mfi_queue_map_sync(sc); 1461 } 1462 1463 static void 1464 mfi_queue_map_sync(struct mfi_softc *sc) 1465 { 1466 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1467 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task); 1468 } 1469 1470 void 1471 mfi_handle_map_sync(void *context, int pending) 1472 { 1473 struct mfi_softc *sc; 1474 1475 sc = context; 1476 mtx_lock(&sc->mfi_io_lock); 1477 mfi_tbolt_sync_map_info(sc); 1478 mtx_unlock(&sc->mfi_io_lock); 1479 } 1480