1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * Copyright 1994-2009 The FreeBSD Project. 9 * All rights reserved. 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY 25 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation 30 * are those of the authors and should not be interpreted as representing 31 * official policies,either expressed or implied, of the FreeBSD Project. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_mfi.h" 38 39 #include <sys/param.h> 40 #include <sys/types.h> 41 #include <sys/kernel.h> 42 #include <sys/selinfo.h> 43 #include <sys/bus.h> 44 #include <sys/conf.h> 45 #include <sys/bio.h> 46 #include <sys/ioccom.h> 47 #include <sys/eventhandler.h> 48 #include <sys/callout.h> 49 #include <sys/uio.h> 50 #include <machine/bus.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #include <sys/malloc.h> 54 55 #include <dev/mfi/mfireg.h> 56 #include <dev/mfi/mfi_ioctl.h> 57 #include <dev/mfi/mfivar.h> 58 59 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *); 60 union mfi_mpi2_request_descriptor * 61 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index); 62 void mfi_tbolt_complete_cmd(struct mfi_softc *sc); 63 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 64 struct mfi_cmd_tbolt *cmd); 65 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc 66 *sc, struct mfi_command *cmd); 67 uint8_t 68 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd); 69 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc 70 *sc, struct mfi_command *mfi_cmd); 71 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 72 struct mfi_cmd_tbolt *cmd); 73 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command 74 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd); 75 void 76 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 77 uint8_t ext_status); 78 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc); 79 static void mfi_kill_hba (struct mfi_softc *sc); 80 static void mfi_process_fw_state_chg_isr(void *arg); 81 static void mfi_sync_map_complete(struct mfi_command *); 82 static void mfi_queue_map_sync(struct mfi_softc *sc); 83 84 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) 85 86 extern int mfi_polled_cmd_timeout; 87 static int mfi_fw_reset_test = 0; 88 #ifdef MFI_DEBUG 89 SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test, 90 0, "Force a firmware reset condition"); 91 #endif 92 93 void 94 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc) 95 { 96 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK); 97 MFI_READ4(sc, MFI_OMSK); 98 } 99 100 void 101 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc) 102 { 103 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF); 104 MFI_READ4(sc, MFI_OMSK); 105 } 106 107 int32_t 108 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc) 109 { 110 return MFI_READ4(sc, MFI_OSP0); 111 } 112 113 int32_t 114 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc) 115 { 116 int32_t status, mfi_status = 0; 117 118 status = MFI_READ4(sc, MFI_OSTS); 119 120 if (status & 1) { 121 MFI_WRITE4(sc, MFI_OSTS, status); 122 MFI_READ4(sc, MFI_OSTS); 123 if (status & MFI_STATE_CHANGE_INTERRUPT) { 124 mfi_status |= MFI_FIRMWARE_STATE_CHANGE; 125 } 126 127 return mfi_status; 128 } 129 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 130 return 1; 131 132 MFI_READ4(sc, MFI_OSTS); 133 return 0; 134 } 135 136 void 137 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 138 uint32_t frame_cnt) 139 { 140 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA 141 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 142 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add); 143 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32)); 144 } 145 146 /* 147 * mfi_tbolt_adp_reset - For controller reset 148 * @regs: MFI register set 149 */ 150 int 151 mfi_tbolt_adp_reset(struct mfi_softc *sc) 152 { 153 int retry = 0, i = 0; 154 int HostDiag; 155 156 MFI_WRITE4(sc, MFI_WSR, 0xF); 157 MFI_WRITE4(sc, MFI_WSR, 4); 158 MFI_WRITE4(sc, MFI_WSR, 0xB); 159 MFI_WRITE4(sc, MFI_WSR, 2); 160 MFI_WRITE4(sc, MFI_WSR, 7); 161 MFI_WRITE4(sc, MFI_WSR, 0xD); 162 163 for (i = 0; i < 10000; i++) ; 164 165 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 166 167 while (!( HostDiag & DIAG_WRITE_ENABLE)) { 168 for (i = 0; i < 1000; i++); 169 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 170 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 171 "hostdiag=%#x\n", retry, HostDiag); 172 173 if (retry++ >= 100) 174 return 1; 175 } 176 177 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag); 178 179 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER)); 180 181 for (i=0; i < 10; i++) { 182 for (i = 0; i < 10000; i++); 183 } 184 185 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 186 while (HostDiag & DIAG_RESET_ADAPTER) { 187 for (i = 0; i < 1000; i++) ; 188 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 189 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 190 "hostdiag=%#x\n", retry, HostDiag); 191 192 if (retry++ >= 1000) 193 return 1; 194 } 195 return 0; 196 } 197 198 /* 199 * This routine initialize Thunderbolt specific device information 200 */ 201 void 202 mfi_tbolt_init_globals(struct mfi_softc *sc) 203 { 204 /* Initialize single reply size and Message size */ 205 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE; 206 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 207 208 /* 209 * Calculating how many SGEs allowed in a allocated main message 210 * (size of the Message - Raid SCSI IO message size(except SGE)) 211 * / size of SGE 212 * (0x100 - (0x90 - 0x10)) / 0x10 = 8 213 */ 214 sc->max_SGEs_in_main_message = 215 (uint8_t)((sc->raid_io_msg_size 216 - (sizeof(struct mfi_mpi2_request_raid_scsi_io) 217 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION)); 218 /* 219 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size) 220 * / size of SGL ; 221 * (1280 - 256) / 16 = 64 222 */ 223 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE 224 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION); 225 /* 226 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command 227 * colscing 228 */ 229 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1) 230 + sc->max_SGEs_in_chain_message - 1; 231 /* 232 * This is the offset in number of 4 * 32bit words to the next chain 233 * (0x100 - 0x10)/0x10 = 0xF(15) 234 */ 235 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size 236 - sizeof(MPI2_SGE_IO_UNION))/16; 237 sc->chain_offset_value_for_mpt_ptmsg 238 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16; 239 sc->mfi_cmd_pool_tbolt = NULL; 240 sc->request_desc_pool = NULL; 241 } 242 243 /* 244 * This function calculates the memory requirement for Thunderbolt 245 * controller, returns the total required memory in bytes 246 */ 247 248 uint32_t 249 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc) 250 { 251 uint32_t size; 252 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */ 253 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1); 254 size += sc->reply_size * sc->mfi_max_fw_cmds; 255 /* this is for SGL's */ 256 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds; 257 return size; 258 } 259 260 /* 261 * Description: 262 * This function will prepare message pools for the Thunderbolt controller 263 * Arguments: 264 * DevExt - HBA miniport driver's adapter data storage structure 265 * pMemLocation - start of the memory allocated for Thunderbolt. 266 * Return Value: 267 * TRUE if successful 268 * FALSE if failed 269 */ 270 int 271 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location, 272 uint32_t tbolt_contg_length) 273 { 274 uint32_t offset = 0; 275 uint8_t *addr = mem_location; 276 277 /* Request Descriptor Base physical Address */ 278 279 /* For Request Decriptors Virtual Memory */ 280 /* Initialise the aligned IO Frames Virtual Memory Pointer */ 281 if (((uintptr_t)addr) & (0xFF)) { 282 addr = &addr[sc->raid_io_msg_size]; 283 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 284 sc->request_message_pool_align = addr; 285 } else 286 sc->request_message_pool_align = addr; 287 288 offset = sc->request_message_pool_align - sc->request_message_pool; 289 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset; 290 291 /* DJA XXX should this be bus dma ??? */ 292 /* Skip request message pool */ 293 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)]; 294 /* Reply Frame Pool is initialized */ 295 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; 296 if (((uintptr_t)addr) & (0xFF)) { 297 addr = &addr[sc->reply_size]; 298 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 299 } 300 sc->reply_frame_pool_align 301 = (struct mfi_mpi2_reply_header *)addr; 302 303 offset = (uintptr_t)sc->reply_frame_pool_align 304 - (uintptr_t)sc->request_message_pool; 305 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset; 306 307 /* Skip Reply Frame Pool */ 308 addr += sc->reply_size * sc->mfi_max_fw_cmds; 309 sc->reply_pool_limit = addr; 310 311 /* initializing reply address to 0xFFFFFFFF */ 312 memset((uint8_t *)sc->reply_frame_pool, 0xFF, 313 (sc->reply_size * sc->mfi_max_fw_cmds)); 314 315 offset = sc->reply_size * sc->mfi_max_fw_cmds; 316 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset; 317 /* initialize the last_reply_idx to 0 */ 318 sc->last_reply_idx = 0; 319 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 320 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 321 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * 322 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr; 323 if (offset > tbolt_contg_length) 324 device_printf(sc->mfi_dev, "Error:Initialized more than " 325 "allocated\n"); 326 return 0; 327 } 328 329 /* 330 * This routine prepare and issue INIT2 frame to the Firmware 331 */ 332 333 int 334 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc) 335 { 336 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit; 337 struct mfi_init_frame *mfi_init; 338 uintptr_t offset = 0; 339 bus_addr_t phyAddress; 340 MFI_ADDRESS *mfiAddressTemp; 341 struct mfi_command *cm, cmd_tmp; 342 int error; 343 344 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 345 346 /* Check if initialization is already completed */ 347 if (sc->MFA_enabled) { 348 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n"); 349 return 1; 350 } 351 352 if ((cm = mfi_dequeue_free(sc)) == NULL) { 353 device_printf(sc->mfi_dev, "tbolt_init failed to get command " 354 " entry!\n"); 355 return (EBUSY); 356 } 357 358 cmd_tmp.cm_frame = cm->cm_frame; 359 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr; 360 cmd_tmp.cm_dmamap = cm->cm_dmamap; 361 362 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init); 363 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr; 364 cm->cm_dmamap = sc->mfi_tb_init_dmamap; 365 cm->cm_frame->header.context = 0; 366 367 /* 368 * Abuse the SG list area of the frame to hold the init_qinfo 369 * object; 370 */ 371 mfi_init = &cm->cm_frame->init; 372 373 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc; 374 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST)); 375 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; 376 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 377 378 /* set MsgVersion and HeaderVersion host driver was built with */ 379 mpi2IocInit->MsgVersion = MPI2_VERSION; 380 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; 381 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4; 382 mpi2IocInit->ReplyDescriptorPostQueueDepth 383 = (uint16_t)sc->mfi_max_fw_cmds; 384 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ 385 386 /* Get physical address of reply frame pool */ 387 offset = (uintptr_t) sc->reply_frame_pool_align 388 - (uintptr_t)sc->request_message_pool; 389 phyAddress = sc->mfi_tb_busaddr + offset; 390 mfiAddressTemp = 391 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; 392 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 393 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 394 395 /* Get physical address of request message pool */ 396 offset = sc->request_message_pool_align - sc->request_message_pool; 397 phyAddress = sc->mfi_tb_busaddr + offset; 398 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress; 399 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 400 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 401 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ 402 mpi2IocInit->TimeStamp = time_uptime; 403 404 if (sc->verbuf) { 405 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", 406 MEGASAS_VERSION); 407 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr; 408 mfi_init->driver_ver_hi = 409 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32); 410 } 411 /* Get the physical address of the mpi2 ioc init command */ 412 phyAddress = sc->mfi_tb_ioc_init_busaddr; 413 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress; 414 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32); 415 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 416 417 mfi_init->header.cmd = MFI_CMD_INIT; 418 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST); 419 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS; 420 421 cm->cm_data = NULL; 422 cm->cm_flags |= MFI_CMD_POLLED; 423 cm->cm_timestamp = time_uptime; 424 if ((error = mfi_mapcmd(sc, cm)) != 0) { 425 device_printf(sc->mfi_dev, "failed to send IOC init2 " 426 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr); 427 goto out; 428 } 429 430 if (mfi_init->header.cmd_status == MFI_STAT_OK) { 431 sc->MFA_enabled = 1; 432 } else { 433 device_printf(sc->mfi_dev, "Init command Failed %#x\n", 434 mfi_init->header.cmd_status); 435 error = mfi_init->header.cmd_status; 436 goto out; 437 } 438 439 out: 440 cm->cm_frame = cmd_tmp.cm_frame; 441 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr; 442 cm->cm_dmamap = cmd_tmp.cm_dmamap; 443 mfi_release_command(cm); 444 445 return (error); 446 447 } 448 449 int 450 mfi_tbolt_alloc_cmd(struct mfi_softc *sc) 451 { 452 struct mfi_cmd_tbolt *cmd; 453 bus_addr_t io_req_base_phys; 454 uint8_t *io_req_base; 455 int i = 0, j = 0, offset = 0; 456 457 /* 458 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers. 459 * Allocate the dynamic array first and then allocate individual 460 * commands. 461 */ 462 sc->request_desc_pool = malloc(sizeof( 463 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds, 464 M_MFIBUF, M_NOWAIT|M_ZERO); 465 466 if (sc->request_desc_pool == NULL) { 467 device_printf(sc->mfi_dev, "Could not alloc " 468 "memory for request_desc_pool\n"); 469 return (ENOMEM); 470 } 471 472 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*) 473 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO); 474 475 if (sc->mfi_cmd_pool_tbolt == NULL) { 476 free(sc->request_desc_pool, M_MFIBUF); 477 device_printf(sc->mfi_dev, "Could not alloc " 478 "memory for cmd_pool_tbolt\n"); 479 return (ENOMEM); 480 } 481 482 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 483 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof( 484 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO); 485 486 if (!sc->mfi_cmd_pool_tbolt[i]) { 487 device_printf(sc->mfi_dev, "Could not alloc " 488 "cmd_pool_tbolt entry\n"); 489 490 for (j = 0; j < i; j++) 491 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF); 492 493 free(sc->request_desc_pool, M_MFIBUF); 494 sc->request_desc_pool = NULL; 495 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 496 sc->mfi_cmd_pool_tbolt = NULL; 497 498 return (ENOMEM); 499 } 500 } 501 502 /* 503 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd 504 * list 505 */ 506 io_req_base = sc->request_message_pool_align 507 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 508 io_req_base_phys = sc->request_msg_busaddr 509 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 510 511 /* 512 * Add all the commands to command pool (instance->cmd_pool) 513 */ 514 /* SMID 0 is reserved. Set SMID/index from 1 */ 515 516 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 517 cmd = sc->mfi_cmd_pool_tbolt[i]; 518 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i; 519 cmd->index = i + 1; 520 cmd->request_desc = (union mfi_mpi2_request_descriptor *) 521 (sc->request_desc_pool + i); 522 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *) 523 (io_req_base + offset); 524 cmd->io_request_phys_addr = io_req_base_phys + offset; 525 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit 526 + i * MEGASAS_MAX_SZ_CHAIN_FRAME); 527 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i 528 * MEGASAS_MAX_SZ_CHAIN_FRAME; 529 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 530 531 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next); 532 } 533 return 0; 534 } 535 536 int 537 mfi_tbolt_reset(struct mfi_softc *sc) 538 { 539 uint32_t fw_state; 540 541 mtx_lock(&sc->mfi_io_lock); 542 if (sc->hw_crit_error) { 543 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n"); 544 mtx_unlock(&sc->mfi_io_lock); 545 return 1; 546 } 547 548 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 549 fw_state = sc->mfi_read_fw_status(sc); 550 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT || 551 mfi_fw_reset_test) { 552 if ((sc->disableOnlineCtrlReset == 0) 553 && (sc->adpreset == 0)) { 554 device_printf(sc->mfi_dev, "Adapter RESET " 555 "condition is detected\n"); 556 sc->adpreset = 1; 557 sc->issuepend_done = 0; 558 sc->MFA_enabled = 0; 559 sc->last_reply_idx = 0; 560 mfi_process_fw_state_chg_isr((void *) sc); 561 } 562 mtx_unlock(&sc->mfi_io_lock); 563 return 0; 564 } 565 } 566 mtx_unlock(&sc->mfi_io_lock); 567 return 1; 568 } 569 570 /* 571 * mfi_intr_tbolt - isr entry point 572 */ 573 void 574 mfi_intr_tbolt(void *arg) 575 { 576 struct mfi_softc *sc = (struct mfi_softc *)arg; 577 578 if (sc->mfi_check_clear_intr(sc) == 1) { 579 return; 580 } 581 if (sc->mfi_detaching) 582 return; 583 mtx_lock(&sc->mfi_io_lock); 584 mfi_tbolt_complete_cmd(sc); 585 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 586 mfi_startio(sc); 587 mtx_unlock(&sc->mfi_io_lock); 588 return; 589 } 590 591 /* 592 * map_cmd_status - Maps FW cmd status to OS cmd status 593 * @cmd : Pointer to cmd 594 * @status : status of cmd returned by FW 595 * @ext_status : ext status of cmd returned by FW 596 */ 597 598 void 599 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 600 uint8_t ext_status) 601 { 602 switch (status) { 603 case MFI_STAT_OK: 604 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK; 605 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK; 606 mfi_cmd->cm_error = MFI_STAT_OK; 607 break; 608 609 case MFI_STAT_SCSI_IO_FAILED: 610 case MFI_STAT_LD_INIT_IN_PROGRESS: 611 mfi_cmd->cm_frame->header.cmd_status = status; 612 mfi_cmd->cm_frame->header.scsi_status = ext_status; 613 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 614 mfi_cmd->cm_frame->dcmd.header.scsi_status 615 = ext_status; 616 break; 617 618 case MFI_STAT_SCSI_DONE_WITH_ERROR: 619 mfi_cmd->cm_frame->header.cmd_status = ext_status; 620 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status; 621 break; 622 623 case MFI_STAT_LD_OFFLINE: 624 case MFI_STAT_DEVICE_NOT_FOUND: 625 mfi_cmd->cm_frame->header.cmd_status = status; 626 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 627 break; 628 629 default: 630 mfi_cmd->cm_frame->header.cmd_status = status; 631 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 632 break; 633 } 634 } 635 636 /* 637 * mfi_tbolt_return_cmd - Return a cmd to free command pool 638 * @instance: Adapter soft state 639 * @tbolt_cmd: Tbolt command packet to be returned to free command pool 640 * @mfi_cmd: Oning MFI command packe 641 */ 642 void 643 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd, 644 struct mfi_command *mfi_cmd) 645 { 646 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 647 648 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT; 649 mfi_cmd->cm_extra_frames = 0; 650 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 651 652 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next); 653 } 654 655 void 656 mfi_tbolt_complete_cmd(struct mfi_softc *sc) 657 { 658 struct mfi_mpi2_reply_header *desc, *reply_desc; 659 struct mfi_command *cmd_mfi; /* For MFA Cmds */ 660 struct mfi_cmd_tbolt *cmd_tbolt; 661 uint16_t smid; 662 uint8_t reply_descript_type; 663 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req; 664 uint32_t status, extStatus; 665 uint16_t num_completed; 666 union desc_value val; 667 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 668 669 desc = (struct mfi_mpi2_reply_header *) 670 ((uintptr_t)sc->reply_frame_pool_align 671 + sc->last_reply_idx * sc->reply_size); 672 reply_desc = desc; 673 674 if (reply_desc == NULL) { 675 device_printf(sc->mfi_dev, "reply desc is NULL!!\n"); 676 return; 677 } 678 679 reply_descript_type = reply_desc->ReplyFlags 680 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 681 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 682 return; 683 684 num_completed = 0; 685 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words; 686 687 /* Read Reply descriptor */ 688 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) { 689 smid = reply_desc->SMID; 690 if (smid == 0 || smid > sc->mfi_max_fw_cmds) { 691 device_printf(sc->mfi_dev, "smid is %d cannot " 692 "proceed - skipping\n", smid); 693 goto next; 694 } 695 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1]; 696 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) { 697 device_printf(sc->mfi_dev, "cmd_tbolt %p " 698 "has invalid sync_cmd_idx=%d - skipping\n", 699 cmd_tbolt, cmd_tbolt->sync_cmd_idx); 700 goto next; 701 } 702 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx]; 703 scsi_io_req = cmd_tbolt->io_request; 704 705 status = cmd_mfi->cm_frame->dcmd.header.cmd_status; 706 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status; 707 map_tbolt_cmd_status(cmd_mfi, status, extStatus); 708 709 /* mfi_tbolt_return_cmd is handled by mfi complete / return */ 710 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 && 711 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) { 712 /* polled LD/SYSPD IO command */ 713 /* XXX mark okay for now DJA */ 714 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK; 715 716 } else { 717 /* remove command from busy queue if not polled */ 718 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0) 719 mfi_remove_busy(cmd_mfi); 720 721 /* complete the command */ 722 mfi_complete(sc, cmd_mfi); 723 } 724 725 next: 726 sc->last_reply_idx++; 727 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) { 728 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 729 sc->last_reply_idx = 0; 730 } 731 732 /* Set it back to all 0xfff */ 733 ((union mfi_mpi2_reply_descriptor*)desc)->words = 734 ~((uint64_t)0x00); 735 736 num_completed++; 737 738 /* Get the next reply descriptor */ 739 desc = (struct mfi_mpi2_reply_header *) 740 ((uintptr_t)sc->reply_frame_pool_align 741 + sc->last_reply_idx * sc->reply_size); 742 reply_desc = desc; 743 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words; 744 reply_descript_type = reply_desc->ReplyFlags 745 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 746 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 747 break; 748 } 749 750 if (!num_completed) 751 return; 752 753 /* update replyIndex to FW */ 754 if (sc->last_reply_idx) 755 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 756 757 return; 758 } 759 760 /* 761 * mfi_get_cmd - Get a command from the free pool 762 * @instance: Adapter soft state 763 * 764 * Returns a free command from the pool 765 */ 766 767 struct mfi_cmd_tbolt * 768 mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 769 { 770 struct mfi_cmd_tbolt *cmd = NULL; 771 772 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 773 774 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL) 775 return (NULL); 776 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next); 777 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); 778 memset((uint8_t *)cmd->io_request, 0, 779 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE); 780 781 cmd->sync_cmd_idx = mfi_cmd->cm_index; 782 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */ 783 mfi_cmd->cm_flags |= MFI_CMD_TBOLT; 784 785 return cmd; 786 } 787 788 union mfi_mpi2_request_descriptor * 789 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index) 790 { 791 uint8_t *p; 792 793 if (index >= sc->mfi_max_fw_cmds) { 794 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request " 795 "for descriptor\n", index); 796 return NULL; 797 } 798 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor) 799 * index; 800 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor)); 801 return (union mfi_mpi2_request_descriptor *)p; 802 } 803 804 /* Used to build IOCTL cmd */ 805 uint8_t 806 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 807 { 808 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 809 struct mfi_mpi2_request_raid_scsi_io *io_req; 810 struct mfi_cmd_tbolt *cmd; 811 812 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 813 if (!cmd) 814 return EBUSY; 815 io_req = cmd->io_request; 816 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 817 818 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 819 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io, 820 SGL) / 4; 821 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg; 822 823 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr; 824 825 /* 826 In MFI pass thru, nextChainOffset will always be zero to 827 indicate the end of the chain. 828 */ 829 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT 830 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 831 832 /* setting the length to the maximum length */ 833 mpi25_ieee_chain->Length = 1024; 834 835 return 0; 836 } 837 838 void 839 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 840 struct mfi_cmd_tbolt *cmd) 841 { 842 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id; 843 struct mfi_mpi2_request_raid_scsi_io *io_request; 844 struct IO_REQUEST_INFO io_info; 845 846 device_id = mfi_cmd->cm_frame->io.header.target_id; 847 io_request = cmd->io_request; 848 io_request->RaidContext.TargetID = device_id; 849 io_request->RaidContext.Status = 0; 850 io_request->RaidContext.exStatus = 0; 851 io_request->RaidContext.regLockFlags = 0; 852 853 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo; 854 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi; 855 856 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 857 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo; 858 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len; 859 io_info.ldTgtId = device_id; 860 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) == 861 MFI_FRAME_DIR_READ) 862 io_info.isRead = 1; 863 864 io_request->RaidContext.timeoutValue 865 = MFI_FUSION_FP_DEFAULT_TIMEOUT; 866 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST; 867 io_request->DevHandle = device_id; 868 cmd->request_desc->header.RequestFlags 869 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO 870 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 871 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0)) 872 io_request->RaidContext.RegLockLength = 0x100; 873 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len 874 * MFI_SECTOR_LEN; 875 } 876 877 int 878 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 879 struct mfi_cmd_tbolt *cmd) 880 { 881 struct mfi_mpi2_request_raid_scsi_io *io_request; 882 uint32_t sge_count; 883 uint8_t cdb_len; 884 int readop; 885 u_int64_t lba; 886 887 io_request = cmd->io_request; 888 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ 889 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) 890 return 1; 891 892 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd); 893 894 /* Convert to SCSI command CDB */ 895 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32)); 896 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 897 readop = 0; 898 else 899 readop = 1; 900 901 lba = mfi_cmd->cm_frame->io.lba_hi; 902 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo; 903 cdb_len = mfi_build_cdb(readop, 0, lba, 904 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32); 905 906 /* Just the CDB length, rest of the Flags are zero */ 907 io_request->IoFlags = cdb_len; 908 909 /* 910 * Construct SGL 911 */ 912 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd, 913 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd); 914 if (sge_count > sc->mfi_max_sge) { 915 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds " 916 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge); 917 return 1; 918 } 919 io_request->RaidContext.numSGE = sge_count; 920 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 921 922 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 923 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE; 924 else 925 io_request->Control = MPI2_SCSIIO_CONTROL_READ; 926 927 io_request->SGLOffset0 = offsetof( 928 struct mfi_mpi2_request_raid_scsi_io, SGL)/4; 929 930 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr; 931 io_request->SenseBufferLength = MFI_SENSE_LEN; 932 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS; 933 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS; 934 935 return 0; 936 } 937 938 static int 939 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 940 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd) 941 { 942 uint8_t i, sg_processed, sg_to_process; 943 uint8_t sge_count, sge_idx; 944 union mfi_sgl *os_sgl; 945 pMpi25IeeeSgeChain64_t sgl_end; 946 947 /* 948 * Return 0 if there is no data transfer 949 */ 950 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) { 951 device_printf(sc->mfi_dev, "Buffer empty \n"); 952 return 0; 953 } 954 os_sgl = mfi_cmd->cm_sg; 955 sge_count = mfi_cmd->cm_frame->header.sg_count; 956 957 if (sge_count > sc->mfi_max_sge) { 958 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n", 959 os_sgl, sge_count); 960 return sge_count; 961 } 962 963 if (sge_count > sc->max_SGEs_in_main_message) 964 /* One element to store the chain info */ 965 sge_idx = sc->max_SGEs_in_main_message - 1; 966 else 967 sge_idx = sge_count; 968 969 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) { 970 sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1); 971 sgl_end->Flags = 0; 972 } 973 974 for (i = 0; i < sge_idx; i++) { 975 /* 976 * For 32bit BSD we are getting 32 bit SGL's from OS 977 * but FW only take 64 bit SGL's so copying from 32 bit 978 * SGL's to 64. 979 */ 980 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 981 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 982 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 983 } else { 984 sgl_ptr->Length = os_sgl->sg32[i].len; 985 sgl_ptr->Address = os_sgl->sg32[i].addr; 986 } 987 if (i == sge_count - 1 && 988 (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))) 989 sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 990 else 991 sgl_ptr->Flags = 0; 992 sgl_ptr++; 993 cmd->io_request->ChainOffset = 0; 994 } 995 996 sg_processed = i; 997 998 if (sg_processed < sge_count) { 999 pMpi25IeeeSgeChain64_t sg_chain; 1000 sg_to_process = sge_count - sg_processed; 1001 cmd->io_request->ChainOffset = 1002 sc->chain_offset_value_for_main_message; 1003 sg_chain = sgl_ptr; 1004 /* Prepare chain element */ 1005 sg_chain->NextChainOffset = 0; 1006 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) 1007 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1008 else 1009 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1010 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 1011 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * 1012 (sge_count - sg_processed)); 1013 sg_chain->Address = cmd->sg_frame_phys_addr; 1014 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame; 1015 for (; i < sge_count; i++) { 1016 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 1017 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 1018 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 1019 } else { 1020 sgl_ptr->Length = os_sgl->sg32[i].len; 1021 sgl_ptr->Address = os_sgl->sg32[i].addr; 1022 } 1023 if (i == sge_count - 1 && 1024 (sc->mfi_flags & 1025 (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))) 1026 sgl_ptr->Flags = 1027 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 1028 else 1029 sgl_ptr->Flags = 0; 1030 sgl_ptr++; 1031 } 1032 } 1033 return sge_count; 1034 } 1035 1036 union mfi_mpi2_request_descriptor * 1037 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 1038 { 1039 struct mfi_cmd_tbolt *cmd; 1040 union mfi_mpi2_request_descriptor *req_desc = NULL; 1041 uint16_t index; 1042 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 1043 if (cmd == NULL) 1044 return (NULL); 1045 1046 index = cmd->index; 1047 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1); 1048 if (req_desc == NULL) { 1049 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1050 return (NULL); 1051 } 1052 1053 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) { 1054 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1055 return (NULL); 1056 } 1057 req_desc->header.SMID = index; 1058 return req_desc; 1059 } 1060 1061 union mfi_mpi2_request_descriptor * 1062 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd) 1063 { 1064 union mfi_mpi2_request_descriptor *req_desc = NULL; 1065 uint16_t index; 1066 if (mfi_build_mpt_pass_thru(sc, cmd)) { 1067 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru " 1068 "cmd\n"); 1069 return NULL; 1070 } 1071 /* For fusion the frame_count variable is used for SMID */ 1072 index = cmd->cm_extra_frames; 1073 1074 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1); 1075 if (req_desc == NULL) 1076 return NULL; 1077 1078 bzero(req_desc, sizeof(*req_desc)); 1079 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1080 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1081 req_desc->header.SMID = index; 1082 return req_desc; 1083 } 1084 1085 int 1086 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1087 { 1088 struct mfi_frame_header *hdr; 1089 uint8_t *cdb; 1090 union mfi_mpi2_request_descriptor *req_desc = NULL; 1091 int tm = mfi_polled_cmd_timeout * 1000; 1092 1093 hdr = &cm->cm_frame->header; 1094 cdb = cm->cm_frame->pass.cdb; 1095 if (sc->adpreset) 1096 return 1; 1097 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1098 cm->cm_timestamp = time_uptime; 1099 mfi_enqueue_busy(cm); 1100 } else { /* still get interrupts for it */ 1101 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1102 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1103 } 1104 1105 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) { 1106 /* check for inquiry commands coming from CLI */ 1107 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == 1108 NULL) { 1109 device_printf(sc->mfi_dev, "Mapping from MFI " 1110 "to MPT Failed \n"); 1111 return 1; 1112 } 1113 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO || 1114 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) { 1115 cm->cm_flags |= MFI_CMD_SCSI; 1116 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) { 1117 device_printf(sc->mfi_dev, "LDIO Failed \n"); 1118 return 1; 1119 } 1120 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) { 1121 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n"); 1122 return (1); 1123 } 1124 1125 if (cm->cm_flags & MFI_CMD_SCSI) { 1126 /* 1127 * LD IO needs to be posted since it doesn't get 1128 * acknowledged via a status update so have the 1129 * controller reply via mfi_tbolt_complete_cmd. 1130 */ 1131 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1132 } 1133 1134 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF)); 1135 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20)); 1136 1137 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1138 return 0; 1139 1140 /* 1141 * This is a polled command, so busy-wait for it to complete. 1142 * 1143 * The value of hdr->cmd_status is updated directly by the hardware 1144 * so there is no guarantee that mfi_tbolt_complete_cmd is called 1145 * prior to this value changing. 1146 */ 1147 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1148 DELAY(1000); 1149 tm -= 1; 1150 if (tm <= 0) 1151 break; 1152 if (cm->cm_flags & MFI_CMD_SCSI) { 1153 /* 1154 * Force check reply queue. 1155 * This ensures that dump works correctly 1156 */ 1157 mfi_tbolt_complete_cmd(sc); 1158 } 1159 } 1160 1161 /* ensure the command cleanup has been processed before returning */ 1162 mfi_tbolt_complete_cmd(sc); 1163 1164 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1165 device_printf(sc->mfi_dev, "Frame %p timed out " 1166 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1167 return (ETIMEDOUT); 1168 } 1169 return 0; 1170 } 1171 1172 static void 1173 mfi_issue_pending_cmds_again(struct mfi_softc *sc) 1174 { 1175 struct mfi_command *cm, *tmp; 1176 struct mfi_cmd_tbolt *cmd; 1177 1178 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1179 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) { 1180 cm->retry_for_fw_reset++; 1181 1182 /* 1183 * If a command has continuously been tried multiple times 1184 * and causing a FW reset condition, no further recoveries 1185 * should be performed on the controller 1186 */ 1187 if (cm->retry_for_fw_reset == 3) { 1188 device_printf(sc->mfi_dev, "megaraid_sas: command %p " 1189 "index=%d was tried multiple times during adapter " 1190 "reset - Shutting down the HBA\n", cm, cm->cm_index); 1191 mfi_kill_hba(sc); 1192 sc->hw_crit_error = 1; 1193 return; 1194 } 1195 1196 mfi_remove_busy(cm); 1197 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { 1198 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <= 1199 sc->mfi_max_fw_cmds) { 1200 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1]; 1201 mfi_tbolt_return_cmd(sc, cmd, cm); 1202 } else { 1203 device_printf(sc->mfi_dev, 1204 "Invalid extra_frames: %d detected\n", 1205 cm->cm_extra_frames); 1206 } 1207 } 1208 1209 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) { 1210 device_printf(sc->mfi_dev, 1211 "APJ ****requeue command %p index=%d\n", 1212 cm, cm->cm_index); 1213 mfi_requeue_ready(cm); 1214 } else 1215 mfi_release_command(cm); 1216 } 1217 mfi_startio(sc); 1218 } 1219 1220 static void 1221 mfi_kill_hba(struct mfi_softc *sc) 1222 { 1223 if (sc->mfi_flags & MFI_FLAGS_TBOLT) 1224 MFI_WRITE4(sc, 0x00, MFI_STOP_ADP); 1225 else 1226 MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP); 1227 } 1228 1229 static void 1230 mfi_process_fw_state_chg_isr(void *arg) 1231 { 1232 struct mfi_softc *sc= (struct mfi_softc *)arg; 1233 int error, status; 1234 1235 if (sc->adpreset == 1) { 1236 device_printf(sc->mfi_dev, "First stage of FW reset " 1237 "initiated...\n"); 1238 1239 sc->mfi_adp_reset(sc); 1240 sc->mfi_enable_intr(sc); 1241 1242 device_printf(sc->mfi_dev, "First stage of reset complete, " 1243 "second stage initiated...\n"); 1244 1245 sc->adpreset = 2; 1246 1247 /* waiting for about 20 second before start the second init */ 1248 for (int wait = 0; wait < 20000; wait++) 1249 DELAY(1000); 1250 device_printf(sc->mfi_dev, "Second stage of FW reset " 1251 "initiated...\n"); 1252 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04); 1253 1254 sc->mfi_disable_intr(sc); 1255 1256 /* We expect the FW state to be READY */ 1257 if (mfi_transition_firmware(sc)) { 1258 device_printf(sc->mfi_dev, "controller is not in " 1259 "ready state\n"); 1260 mfi_kill_hba(sc); 1261 sc->hw_crit_error = 1; 1262 return; 1263 } 1264 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 1265 device_printf(sc->mfi_dev, "Failed to initialise MFI " 1266 "queue\n"); 1267 mfi_kill_hba(sc); 1268 sc->hw_crit_error = 1; 1269 return; 1270 } 1271 1272 /* Init last reply index and max */ 1273 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 1274 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 1275 1276 sc->mfi_enable_intr(sc); 1277 sc->adpreset = 0; 1278 if (sc->mfi_aen_cm != NULL) { 1279 free(sc->mfi_aen_cm->cm_data, M_MFIBUF); 1280 mfi_remove_busy(sc->mfi_aen_cm); 1281 mfi_release_command(sc->mfi_aen_cm); 1282 sc->mfi_aen_cm = NULL; 1283 } 1284 1285 if (sc->mfi_map_sync_cm != NULL) { 1286 mfi_remove_busy(sc->mfi_map_sync_cm); 1287 mfi_release_command(sc->mfi_map_sync_cm); 1288 sc->mfi_map_sync_cm = NULL; 1289 } 1290 mfi_issue_pending_cmds_again(sc); 1291 1292 /* 1293 * Issue pending command can result in adapter being marked 1294 * dead because of too many re-tries. Check for that 1295 * condition before clearing the reset condition on the FW 1296 */ 1297 if (!sc->hw_crit_error) { 1298 /* 1299 * Initiate AEN (Asynchronous Event Notification) & 1300 * Sync Map 1301 */ 1302 mfi_aen_setup(sc, sc->last_seq_num); 1303 mfi_tbolt_sync_map_info(sc); 1304 1305 sc->issuepend_done = 1; 1306 device_printf(sc->mfi_dev, "second stage of reset " 1307 "complete, FW is ready now.\n"); 1308 } else { 1309 device_printf(sc->mfi_dev, "second stage of reset " 1310 "never completed, hba was marked offline.\n"); 1311 } 1312 } else { 1313 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr " 1314 "called with unhandled value:%d\n", sc->adpreset); 1315 } 1316 } 1317 1318 /* 1319 * The ThunderBolt HW has an option for the driver to directly 1320 * access the underlying disks and operate on the RAID. To 1321 * do this there needs to be a capability to keep the RAID controller 1322 * and driver in sync. The FreeBSD driver does not take advantage 1323 * of this feature since it adds a lot of complexity and slows down 1324 * performance. Performance is gained by using the controller's 1325 * cache etc. 1326 * 1327 * Even though this driver doesn't access the disks directly, an 1328 * AEN like command is used to inform the RAID firmware to "sync" 1329 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This 1330 * command in write mode will return when the RAID firmware has 1331 * detected a change to the RAID state. Examples of this type 1332 * of change are removing a disk. Once the command returns then 1333 * the driver needs to acknowledge this and "sync" all LD's again. 1334 * This repeats until we shutdown. Then we need to cancel this 1335 * pending command. 1336 * 1337 * If this is not done right the RAID firmware will not remove a 1338 * pulled drive and the RAID won't go degraded etc. Effectively, 1339 * stopping any RAID mangement to functions. 1340 * 1341 * Doing another LD sync, requires the use of an event since the 1342 * driver needs to do a mfi_wait_command and can't do that in an 1343 * interrupt thread. 1344 * 1345 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO 1346 * That requires a bunch of structure and it is simpler to just do 1347 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map. 1348 */ 1349 1350 void 1351 mfi_tbolt_sync_map_info(struct mfi_softc *sc) 1352 { 1353 int error = 0, i; 1354 struct mfi_command *cmd = NULL; 1355 struct mfi_dcmd_frame *dcmd = NULL; 1356 uint32_t context = 0; 1357 union mfi_ld_ref *ld_sync = NULL; 1358 size_t ld_size; 1359 struct mfi_frame_header *hdr; 1360 struct mfi_command *cm = NULL; 1361 struct mfi_ld_list *list = NULL; 1362 1363 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1364 1365 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort) 1366 return; 1367 1368 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1369 (void **)&list, sizeof(*list)); 1370 if (error) 1371 goto out; 1372 1373 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN; 1374 1375 if (mfi_wait_command(sc, cm) != 0) { 1376 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1377 goto out; 1378 } 1379 1380 hdr = &cm->cm_frame->header; 1381 if (hdr->cmd_status != MFI_STAT_OK) { 1382 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1383 hdr->cmd_status); 1384 goto out; 1385 } 1386 1387 ld_size = sizeof(*ld_sync) * list->ld_count; 1388 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF, 1389 M_NOWAIT | M_ZERO); 1390 if (ld_sync == NULL) { 1391 device_printf(sc->mfi_dev, "Failed to allocate sync\n"); 1392 goto out; 1393 } 1394 for (i = 0; i < list->ld_count; i++) 1395 ld_sync[i].ref = list->ld_list[i].ld.ref; 1396 1397 if ((cmd = mfi_dequeue_free(sc)) == NULL) { 1398 device_printf(sc->mfi_dev, "Failed to get command\n"); 1399 free(ld_sync, M_MFIBUF); 1400 goto out; 1401 } 1402 1403 context = cmd->cm_frame->header.context; 1404 bzero(cmd->cm_frame, sizeof(union mfi_frame)); 1405 cmd->cm_frame->header.context = context; 1406 1407 dcmd = &cmd->cm_frame->dcmd; 1408 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1409 dcmd->header.cmd = MFI_CMD_DCMD; 1410 dcmd->header.flags = MFI_FRAME_DIR_WRITE; 1411 dcmd->header.timeout = 0; 1412 dcmd->header.data_len = ld_size; 1413 dcmd->header.scsi_status = 0; 1414 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO; 1415 cmd->cm_sg = &dcmd->sgl; 1416 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1417 cmd->cm_data = ld_sync; 1418 cmd->cm_private = ld_sync; 1419 1420 cmd->cm_len = ld_size; 1421 cmd->cm_complete = mfi_sync_map_complete; 1422 sc->mfi_map_sync_cm = cmd; 1423 1424 cmd->cm_flags = MFI_CMD_DATAOUT; 1425 cmd->cm_frame->dcmd.mbox[0] = list->ld_count; 1426 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; 1427 1428 if ((error = mfi_mapcmd(sc, cmd)) != 0) { 1429 device_printf(sc->mfi_dev, "failed to send map sync\n"); 1430 free(ld_sync, M_MFIBUF); 1431 sc->mfi_map_sync_cm = NULL; 1432 mfi_release_command(cmd); 1433 goto out; 1434 } 1435 1436 out: 1437 if (list) 1438 free(list, M_MFIBUF); 1439 if (cm) 1440 mfi_release_command(cm); 1441 } 1442 1443 static void 1444 mfi_sync_map_complete(struct mfi_command *cm) 1445 { 1446 struct mfi_frame_header *hdr; 1447 struct mfi_softc *sc; 1448 int aborted = 0; 1449 1450 sc = cm->cm_sc; 1451 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1452 1453 hdr = &cm->cm_frame->header; 1454 1455 if (sc->mfi_map_sync_cm == NULL) 1456 return; 1457 1458 if (sc->cm_map_abort || 1459 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1460 sc->cm_map_abort = 0; 1461 aborted = 1; 1462 } 1463 1464 free(cm->cm_data, M_MFIBUF); 1465 wakeup(&sc->mfi_map_sync_cm); 1466 sc->mfi_map_sync_cm = NULL; 1467 mfi_release_command(cm); 1468 1469 /* set it up again so the driver can catch more events */ 1470 if (!aborted) 1471 mfi_queue_map_sync(sc); 1472 } 1473 1474 static void 1475 mfi_queue_map_sync(struct mfi_softc *sc) 1476 { 1477 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1478 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task); 1479 } 1480 1481 void 1482 mfi_handle_map_sync(void *context, int pending) 1483 { 1484 struct mfi_softc *sc; 1485 1486 sc = context; 1487 mtx_lock(&sc->mfi_io_lock); 1488 mfi_tbolt_sync_map_info(sc); 1489 mtx_unlock(&sc->mfi_io_lock); 1490 } 1491