1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * Copyright 1994-2009 The FreeBSD Project. 9 * All rights reserved. 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY 25 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * The views and conclusions contained in the software and documentation 30 * are those of the authors and should not be interpreted as representing 31 * official policies,either expressed or implied, of the FreeBSD Project. 32 */ 33 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_mfi.h" 39 40 #include <sys/param.h> 41 #include <sys/types.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/bus.h> 45 #include <sys/conf.h> 46 #include <sys/bio.h> 47 #include <sys/ioccom.h> 48 #include <sys/eventhandler.h> 49 #include <sys/callout.h> 50 #include <sys/uio.h> 51 #include <machine/bus.h> 52 #include <sys/sysctl.h> 53 #include <sys/systm.h> 54 #include <sys/malloc.h> 55 56 #include <dev/mfi/mfireg.h> 57 #include <dev/mfi/mfi_ioctl.h> 58 #include <dev/mfi/mfivar.h> 59 60 struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *); 61 union mfi_mpi2_request_descriptor * 62 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index); 63 void mfi_tbolt_complete_cmd(struct mfi_softc *sc); 64 int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 65 struct mfi_cmd_tbolt *cmd); 66 union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc 67 *sc, struct mfi_command *cmd); 68 uint8_t 69 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd); 70 union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc 71 *sc, struct mfi_command *mfi_cmd); 72 void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 73 struct mfi_cmd_tbolt *cmd); 74 static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command 75 *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd); 76 void 77 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 78 uint8_t ext_status); 79 static void mfi_issue_pending_cmds_again (struct mfi_softc *sc); 80 static void mfi_kill_hba (struct mfi_softc *sc); 81 static void mfi_process_fw_state_chg_isr(void *arg); 82 static void mfi_sync_map_complete(struct mfi_command *); 83 static void mfi_queue_map_sync(struct mfi_softc *sc); 84 85 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) 86 87 88 extern int mfi_polled_cmd_timeout; 89 static int mfi_fw_reset_test = 0; 90 #ifdef MFI_DEBUG 91 SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test, 92 0, "Force a firmware reset condition"); 93 #endif 94 95 void 96 mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc) 97 { 98 MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK); 99 MFI_READ4(sc, MFI_OMSK); 100 } 101 102 void 103 mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc) 104 { 105 MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF); 106 MFI_READ4(sc, MFI_OMSK); 107 } 108 109 int32_t 110 mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc) 111 { 112 return MFI_READ4(sc, MFI_OSP0); 113 } 114 115 int32_t 116 mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc) 117 { 118 int32_t status, mfi_status = 0; 119 120 status = MFI_READ4(sc, MFI_OSTS); 121 122 if (status & 1) { 123 MFI_WRITE4(sc, MFI_OSTS, status); 124 MFI_READ4(sc, MFI_OSTS); 125 if (status & MFI_STATE_CHANGE_INTERRUPT) { 126 mfi_status |= MFI_FIRMWARE_STATE_CHANGE; 127 } 128 129 return mfi_status; 130 } 131 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 132 return 1; 133 134 MFI_READ4(sc, MFI_OSTS); 135 return 0; 136 } 137 138 139 void 140 mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, 141 uint32_t frame_cnt) 142 { 143 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA 144 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 145 MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add); 146 MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32)); 147 } 148 149 /* 150 * mfi_tbolt_adp_reset - For controller reset 151 * @regs: MFI register set 152 */ 153 int 154 mfi_tbolt_adp_reset(struct mfi_softc *sc) 155 { 156 int retry = 0, i = 0; 157 int HostDiag; 158 159 MFI_WRITE4(sc, MFI_WSR, 0xF); 160 MFI_WRITE4(sc, MFI_WSR, 4); 161 MFI_WRITE4(sc, MFI_WSR, 0xB); 162 MFI_WRITE4(sc, MFI_WSR, 2); 163 MFI_WRITE4(sc, MFI_WSR, 7); 164 MFI_WRITE4(sc, MFI_WSR, 0xD); 165 166 for (i = 0; i < 10000; i++) ; 167 168 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 169 170 while (!( HostDiag & DIAG_WRITE_ENABLE)) { 171 for (i = 0; i < 1000; i++); 172 HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR); 173 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 174 "hostdiag=%#x\n", retry, HostDiag); 175 176 if (retry++ >= 100) 177 return 1; 178 } 179 180 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag); 181 182 MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER)); 183 184 for (i=0; i < 10; i++) { 185 for (i = 0; i < 10000; i++); 186 } 187 188 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 189 while (HostDiag & DIAG_RESET_ADAPTER) { 190 for (i = 0; i < 1000; i++) ; 191 HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR); 192 device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, " 193 "hostdiag=%#x\n", retry, HostDiag); 194 195 if (retry++ >= 1000) 196 return 1; 197 } 198 return 0; 199 } 200 201 /* 202 * This routine initialize Thunderbolt specific device information 203 */ 204 void 205 mfi_tbolt_init_globals(struct mfi_softc *sc) 206 { 207 /* Initialize single reply size and Message size */ 208 sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE; 209 sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 210 211 /* 212 * Calculating how many SGEs allowed in a allocated main message 213 * (size of the Message - Raid SCSI IO message size(except SGE)) 214 * / size of SGE 215 * (0x100 - (0x90 - 0x10)) / 0x10 = 8 216 */ 217 sc->max_SGEs_in_main_message = 218 (uint8_t)((sc->raid_io_msg_size 219 - (sizeof(struct mfi_mpi2_request_raid_scsi_io) 220 - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION)); 221 /* 222 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size) 223 * / size of SGL ; 224 * (1280 - 256) / 16 = 64 225 */ 226 sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE 227 - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION); 228 /* 229 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command 230 * colscing 231 */ 232 sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1) 233 + sc->max_SGEs_in_chain_message - 1; 234 /* 235 * This is the offset in number of 4 * 32bit words to the next chain 236 * (0x100 - 0x10)/0x10 = 0xF(15) 237 */ 238 sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size 239 - sizeof(MPI2_SGE_IO_UNION))/16; 240 sc->chain_offset_value_for_mpt_ptmsg 241 = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16; 242 sc->mfi_cmd_pool_tbolt = NULL; 243 sc->request_desc_pool = NULL; 244 } 245 246 /* 247 * This function calculates the memory requirement for Thunderbolt 248 * controller, returns the total required memory in bytes 249 */ 250 251 uint32_t 252 mfi_tbolt_get_memory_requirement(struct mfi_softc *sc) 253 { 254 uint32_t size; 255 size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; /* for Alignment */ 256 size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1); 257 size += sc->reply_size * sc->mfi_max_fw_cmds; 258 /* this is for SGL's */ 259 size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds; 260 return size; 261 } 262 263 /* 264 * Description: 265 * This function will prepare message pools for the Thunderbolt controller 266 * Arguments: 267 * DevExt - HBA miniport driver's adapter data storage structure 268 * pMemLocation - start of the memory allocated for Thunderbolt. 269 * Return Value: 270 * TRUE if successful 271 * FALSE if failed 272 */ 273 int 274 mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location, 275 uint32_t tbolt_contg_length) 276 { 277 uint32_t offset = 0; 278 uint8_t *addr = mem_location; 279 280 /* Request Descriptor Base physical Address */ 281 282 /* For Request Decriptors Virtual Memory */ 283 /* Initialise the aligned IO Frames Virtual Memory Pointer */ 284 if (((uintptr_t)addr) & (0xFF)) { 285 addr = &addr[sc->raid_io_msg_size]; 286 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 287 sc->request_message_pool_align = addr; 288 } else 289 sc->request_message_pool_align = addr; 290 291 offset = sc->request_message_pool_align - sc->request_message_pool; 292 sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset; 293 294 /* DJA XXX should this be bus dma ??? */ 295 /* Skip request message pool */ 296 addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)]; 297 /* Reply Frame Pool is initialized */ 298 sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; 299 if (((uintptr_t)addr) & (0xFF)) { 300 addr = &addr[sc->reply_size]; 301 addr = (uint8_t *)((uintptr_t)addr & (~0xFF)); 302 } 303 sc->reply_frame_pool_align 304 = (struct mfi_mpi2_reply_header *)addr; 305 306 offset = (uintptr_t)sc->reply_frame_pool_align 307 - (uintptr_t)sc->request_message_pool; 308 sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset; 309 310 /* Skip Reply Frame Pool */ 311 addr += sc->reply_size * sc->mfi_max_fw_cmds; 312 sc->reply_pool_limit = addr; 313 314 /* initializing reply address to 0xFFFFFFFF */ 315 memset((uint8_t *)sc->reply_frame_pool, 0xFF, 316 (sc->reply_size * sc->mfi_max_fw_cmds)); 317 318 offset = sc->reply_size * sc->mfi_max_fw_cmds; 319 sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset; 320 /* initialize the last_reply_idx to 0 */ 321 sc->last_reply_idx = 0; 322 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 323 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 324 offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * 325 sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr; 326 if (offset > tbolt_contg_length) 327 device_printf(sc->mfi_dev, "Error:Initialized more than " 328 "allocated\n"); 329 return 0; 330 } 331 332 /* 333 * This routine prepare and issue INIT2 frame to the Firmware 334 */ 335 336 int 337 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc) 338 { 339 struct MPI2_IOC_INIT_REQUEST *mpi2IocInit; 340 struct mfi_init_frame *mfi_init; 341 uintptr_t offset = 0; 342 bus_addr_t phyAddress; 343 MFI_ADDRESS *mfiAddressTemp; 344 struct mfi_command *cm, cmd_tmp; 345 int error; 346 347 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 348 349 /* Check if initialization is already completed */ 350 if (sc->MFA_enabled) { 351 device_printf(sc->mfi_dev, "tbolt_init already initialised!\n"); 352 return 1; 353 } 354 355 if ((cm = mfi_dequeue_free(sc)) == NULL) { 356 device_printf(sc->mfi_dev, "tbolt_init failed to get command " 357 " entry!\n"); 358 return (EBUSY); 359 } 360 361 cmd_tmp.cm_frame = cm->cm_frame; 362 cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr; 363 cmd_tmp.cm_dmamap = cm->cm_dmamap; 364 365 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init); 366 cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr; 367 cm->cm_dmamap = sc->mfi_tb_init_dmamap; 368 cm->cm_frame->header.context = 0; 369 370 /* 371 * Abuse the SG list area of the frame to hold the init_qinfo 372 * object; 373 */ 374 mfi_init = &cm->cm_frame->init; 375 376 mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc; 377 bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST)); 378 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; 379 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 380 381 /* set MsgVersion and HeaderVersion host driver was built with */ 382 mpi2IocInit->MsgVersion = MPI2_VERSION; 383 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; 384 mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4; 385 mpi2IocInit->ReplyDescriptorPostQueueDepth 386 = (uint16_t)sc->mfi_max_fw_cmds; 387 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ 388 389 /* Get physical address of reply frame pool */ 390 offset = (uintptr_t) sc->reply_frame_pool_align 391 - (uintptr_t)sc->request_message_pool; 392 phyAddress = sc->mfi_tb_busaddr + offset; 393 mfiAddressTemp = 394 (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; 395 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 396 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 397 398 /* Get physical address of request message pool */ 399 offset = sc->request_message_pool_align - sc->request_message_pool; 400 phyAddress = sc->mfi_tb_busaddr + offset; 401 mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress; 402 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 403 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 404 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ 405 mpi2IocInit->TimeStamp = time_uptime; 406 407 if (sc->verbuf) { 408 snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", 409 MEGASAS_VERSION); 410 mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr; 411 mfi_init->driver_ver_hi = 412 (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32); 413 } 414 /* Get the physical address of the mpi2 ioc init command */ 415 phyAddress = sc->mfi_tb_ioc_init_busaddr; 416 mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress; 417 mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32); 418 mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 419 420 mfi_init->header.cmd = MFI_CMD_INIT; 421 mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST); 422 mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS; 423 424 cm->cm_data = NULL; 425 cm->cm_flags |= MFI_CMD_POLLED; 426 cm->cm_timestamp = time_uptime; 427 if ((error = mfi_mapcmd(sc, cm)) != 0) { 428 device_printf(sc->mfi_dev, "failed to send IOC init2 " 429 "command %d at %lx\n", error, (long)cm->cm_frame_busaddr); 430 goto out; 431 } 432 433 if (mfi_init->header.cmd_status == MFI_STAT_OK) { 434 sc->MFA_enabled = 1; 435 } else { 436 device_printf(sc->mfi_dev, "Init command Failed %#x\n", 437 mfi_init->header.cmd_status); 438 error = mfi_init->header.cmd_status; 439 goto out; 440 } 441 442 out: 443 cm->cm_frame = cmd_tmp.cm_frame; 444 cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr; 445 cm->cm_dmamap = cmd_tmp.cm_dmamap; 446 mfi_release_command(cm); 447 448 return (error); 449 450 } 451 452 int 453 mfi_tbolt_alloc_cmd(struct mfi_softc *sc) 454 { 455 struct mfi_cmd_tbolt *cmd; 456 bus_addr_t io_req_base_phys; 457 uint8_t *io_req_base; 458 int i = 0, j = 0, offset = 0; 459 460 /* 461 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers. 462 * Allocate the dynamic array first and then allocate individual 463 * commands. 464 */ 465 sc->request_desc_pool = malloc(sizeof( 466 union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds, 467 M_MFIBUF, M_NOWAIT|M_ZERO); 468 469 if (sc->request_desc_pool == NULL) { 470 device_printf(sc->mfi_dev, "Could not alloc " 471 "memory for request_desc_pool\n"); 472 return (ENOMEM); 473 } 474 475 sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*) 476 * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO); 477 478 if (sc->mfi_cmd_pool_tbolt == NULL) { 479 free(sc->request_desc_pool, M_MFIBUF); 480 device_printf(sc->mfi_dev, "Could not alloc " 481 "memory for cmd_pool_tbolt\n"); 482 return (ENOMEM); 483 } 484 485 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 486 sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof( 487 struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO); 488 489 if (!sc->mfi_cmd_pool_tbolt[i]) { 490 device_printf(sc->mfi_dev, "Could not alloc " 491 "cmd_pool_tbolt entry\n"); 492 493 for (j = 0; j < i; j++) 494 free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF); 495 496 free(sc->request_desc_pool, M_MFIBUF); 497 sc->request_desc_pool = NULL; 498 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); 499 sc->mfi_cmd_pool_tbolt = NULL; 500 501 return (ENOMEM); 502 } 503 } 504 505 /* 506 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd 507 * list 508 */ 509 io_req_base = sc->request_message_pool_align 510 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 511 io_req_base_phys = sc->request_msg_busaddr 512 + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 513 514 /* 515 * Add all the commands to command pool (instance->cmd_pool) 516 */ 517 /* SMID 0 is reserved. Set SMID/index from 1 */ 518 519 for (i = 0; i < sc->mfi_max_fw_cmds; i++) { 520 cmd = sc->mfi_cmd_pool_tbolt[i]; 521 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i; 522 cmd->index = i + 1; 523 cmd->request_desc = (union mfi_mpi2_request_descriptor *) 524 (sc->request_desc_pool + i); 525 cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *) 526 (io_req_base + offset); 527 cmd->io_request_phys_addr = io_req_base_phys + offset; 528 cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit 529 + i * MEGASAS_MAX_SZ_CHAIN_FRAME); 530 cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i 531 * MEGASAS_MAX_SZ_CHAIN_FRAME; 532 cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 533 534 TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next); 535 } 536 return 0; 537 } 538 539 int 540 mfi_tbolt_reset(struct mfi_softc *sc) 541 { 542 uint32_t fw_state; 543 544 mtx_lock(&sc->mfi_io_lock); 545 if (sc->hw_crit_error) { 546 device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n"); 547 mtx_unlock(&sc->mfi_io_lock); 548 return 1; 549 } 550 551 if (sc->mfi_flags & MFI_FLAGS_TBOLT) { 552 fw_state = sc->mfi_read_fw_status(sc); 553 if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT || 554 mfi_fw_reset_test) { 555 if ((sc->disableOnlineCtrlReset == 0) 556 && (sc->adpreset == 0)) { 557 device_printf(sc->mfi_dev, "Adapter RESET " 558 "condition is detected\n"); 559 sc->adpreset = 1; 560 sc->issuepend_done = 0; 561 sc->MFA_enabled = 0; 562 sc->last_reply_idx = 0; 563 mfi_process_fw_state_chg_isr((void *) sc); 564 } 565 mtx_unlock(&sc->mfi_io_lock); 566 return 0; 567 } 568 } 569 mtx_unlock(&sc->mfi_io_lock); 570 return 1; 571 } 572 573 /* 574 * mfi_intr_tbolt - isr entry point 575 */ 576 void 577 mfi_intr_tbolt(void *arg) 578 { 579 struct mfi_softc *sc = (struct mfi_softc *)arg; 580 581 if (sc->mfi_check_clear_intr(sc) == 1) { 582 return; 583 } 584 if (sc->mfi_detaching) 585 return; 586 mtx_lock(&sc->mfi_io_lock); 587 mfi_tbolt_complete_cmd(sc); 588 sc->mfi_flags &= ~MFI_FLAGS_QFRZN; 589 mfi_startio(sc); 590 mtx_unlock(&sc->mfi_io_lock); 591 return; 592 } 593 594 /* 595 * map_cmd_status - Maps FW cmd status to OS cmd status 596 * @cmd : Pointer to cmd 597 * @status : status of cmd returned by FW 598 * @ext_status : ext status of cmd returned by FW 599 */ 600 601 void 602 map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status, 603 uint8_t ext_status) 604 { 605 switch (status) { 606 case MFI_STAT_OK: 607 mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK; 608 mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK; 609 mfi_cmd->cm_error = MFI_STAT_OK; 610 break; 611 612 case MFI_STAT_SCSI_IO_FAILED: 613 case MFI_STAT_LD_INIT_IN_PROGRESS: 614 mfi_cmd->cm_frame->header.cmd_status = status; 615 mfi_cmd->cm_frame->header.scsi_status = ext_status; 616 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 617 mfi_cmd->cm_frame->dcmd.header.scsi_status 618 = ext_status; 619 break; 620 621 case MFI_STAT_SCSI_DONE_WITH_ERROR: 622 mfi_cmd->cm_frame->header.cmd_status = ext_status; 623 mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status; 624 break; 625 626 case MFI_STAT_LD_OFFLINE: 627 case MFI_STAT_DEVICE_NOT_FOUND: 628 mfi_cmd->cm_frame->header.cmd_status = status; 629 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 630 break; 631 632 default: 633 mfi_cmd->cm_frame->header.cmd_status = status; 634 mfi_cmd->cm_frame->dcmd.header.cmd_status = status; 635 break; 636 } 637 } 638 639 /* 640 * mfi_tbolt_return_cmd - Return a cmd to free command pool 641 * @instance: Adapter soft state 642 * @tbolt_cmd: Tbolt command packet to be returned to free command pool 643 * @mfi_cmd: Oning MFI command packe 644 */ 645 void 646 mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd, 647 struct mfi_command *mfi_cmd) 648 { 649 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 650 651 mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT; 652 mfi_cmd->cm_extra_frames = 0; 653 tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds; 654 655 TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next); 656 } 657 658 void 659 mfi_tbolt_complete_cmd(struct mfi_softc *sc) 660 { 661 struct mfi_mpi2_reply_header *desc, *reply_desc; 662 struct mfi_command *cmd_mfi; /* For MFA Cmds */ 663 struct mfi_cmd_tbolt *cmd_tbolt; 664 uint16_t smid; 665 uint8_t reply_descript_type; 666 struct mfi_mpi2_request_raid_scsi_io *scsi_io_req; 667 uint32_t status, extStatus; 668 uint16_t num_completed; 669 union desc_value val; 670 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 671 672 desc = (struct mfi_mpi2_reply_header *) 673 ((uintptr_t)sc->reply_frame_pool_align 674 + sc->last_reply_idx * sc->reply_size); 675 reply_desc = desc; 676 677 if (reply_desc == NULL) { 678 device_printf(sc->mfi_dev, "reply desc is NULL!!\n"); 679 return; 680 } 681 682 reply_descript_type = reply_desc->ReplyFlags 683 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 684 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 685 return; 686 687 num_completed = 0; 688 val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words; 689 690 /* Read Reply descriptor */ 691 while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) { 692 smid = reply_desc->SMID; 693 if (smid == 0 || smid > sc->mfi_max_fw_cmds) { 694 device_printf(sc->mfi_dev, "smid is %d cannot " 695 "proceed - skipping\n", smid); 696 goto next; 697 } 698 cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1]; 699 if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) { 700 device_printf(sc->mfi_dev, "cmd_tbolt %p " 701 "has invalid sync_cmd_idx=%d - skipping\n", 702 cmd_tbolt, cmd_tbolt->sync_cmd_idx); 703 goto next; 704 } 705 cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx]; 706 scsi_io_req = cmd_tbolt->io_request; 707 708 status = cmd_mfi->cm_frame->dcmd.header.cmd_status; 709 extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status; 710 map_tbolt_cmd_status(cmd_mfi, status, extStatus); 711 712 /* mfi_tbolt_return_cmd is handled by mfi complete / return */ 713 if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 && 714 (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) { 715 /* polled LD/SYSPD IO command */ 716 /* XXX mark okay for now DJA */ 717 cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK; 718 719 } else { 720 /* remove command from busy queue if not polled */ 721 if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0) 722 mfi_remove_busy(cmd_mfi); 723 724 /* complete the command */ 725 mfi_complete(sc, cmd_mfi); 726 } 727 728 next: 729 sc->last_reply_idx++; 730 if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) { 731 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 732 sc->last_reply_idx = 0; 733 } 734 735 /* Set it back to all 0xfff */ 736 ((union mfi_mpi2_reply_descriptor*)desc)->words = 737 ~((uint64_t)0x00); 738 739 num_completed++; 740 741 /* Get the next reply descriptor */ 742 desc = (struct mfi_mpi2_reply_header *) 743 ((uintptr_t)sc->reply_frame_pool_align 744 + sc->last_reply_idx * sc->reply_size); 745 reply_desc = desc; 746 val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words; 747 reply_descript_type = reply_desc->ReplyFlags 748 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 749 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 750 break; 751 } 752 753 if (!num_completed) 754 return; 755 756 /* update replyIndex to FW */ 757 if (sc->last_reply_idx) 758 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 759 760 return; 761 } 762 763 /* 764 * mfi_get_cmd - Get a command from the free pool 765 * @instance: Adapter soft state 766 * 767 * Returns a free command from the pool 768 */ 769 770 struct mfi_cmd_tbolt * 771 mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 772 { 773 struct mfi_cmd_tbolt *cmd = NULL; 774 775 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 776 777 if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL) 778 return (NULL); 779 TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next); 780 memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); 781 memset((uint8_t *)cmd->io_request, 0, 782 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE); 783 784 cmd->sync_cmd_idx = mfi_cmd->cm_index; 785 mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */ 786 mfi_cmd->cm_flags |= MFI_CMD_TBOLT; 787 788 return cmd; 789 } 790 791 union mfi_mpi2_request_descriptor * 792 mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index) 793 { 794 uint8_t *p; 795 796 if (index >= sc->mfi_max_fw_cmds) { 797 device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request " 798 "for descriptor\n", index); 799 return NULL; 800 } 801 p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor) 802 * index; 803 memset(p, 0, sizeof(union mfi_mpi2_request_descriptor)); 804 return (union mfi_mpi2_request_descriptor *)p; 805 } 806 807 808 /* Used to build IOCTL cmd */ 809 uint8_t 810 mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 811 { 812 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 813 struct mfi_mpi2_request_raid_scsi_io *io_req; 814 struct mfi_cmd_tbolt *cmd; 815 816 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 817 if (!cmd) 818 return EBUSY; 819 io_req = cmd->io_request; 820 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 821 822 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 823 io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io, 824 SGL) / 4; 825 io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg; 826 827 mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr; 828 829 /* 830 In MFI pass thru, nextChainOffset will always be zero to 831 indicate the end of the chain. 832 */ 833 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT 834 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 835 836 /* setting the length to the maximum length */ 837 mpi25_ieee_chain->Length = 1024; 838 839 return 0; 840 } 841 842 void 843 mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 844 struct mfi_cmd_tbolt *cmd) 845 { 846 uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id; 847 struct mfi_mpi2_request_raid_scsi_io *io_request; 848 struct IO_REQUEST_INFO io_info; 849 850 device_id = mfi_cmd->cm_frame->io.header.target_id; 851 io_request = cmd->io_request; 852 io_request->RaidContext.TargetID = device_id; 853 io_request->RaidContext.Status = 0; 854 io_request->RaidContext.exStatus = 0; 855 io_request->RaidContext.regLockFlags = 0; 856 857 start_lba_lo = mfi_cmd->cm_frame->io.lba_lo; 858 start_lba_hi = mfi_cmd->cm_frame->io.lba_hi; 859 860 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 861 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo; 862 io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len; 863 io_info.ldTgtId = device_id; 864 if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) == 865 MFI_FRAME_DIR_READ) 866 io_info.isRead = 1; 867 868 io_request->RaidContext.timeoutValue 869 = MFI_FUSION_FP_DEFAULT_TIMEOUT; 870 io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST; 871 io_request->DevHandle = device_id; 872 cmd->request_desc->header.RequestFlags 873 = (MFI_REQ_DESCRIPT_FLAGS_LD_IO 874 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 875 if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0)) 876 io_request->RaidContext.RegLockLength = 0x100; 877 io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len 878 * MFI_SECTOR_LEN; 879 } 880 881 int 882 mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 883 struct mfi_cmd_tbolt *cmd) 884 { 885 struct mfi_mpi2_request_raid_scsi_io *io_request; 886 uint32_t sge_count; 887 uint8_t cdb_len; 888 int readop; 889 u_int64_t lba; 890 891 io_request = cmd->io_request; 892 if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ 893 || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) 894 return 1; 895 896 mfi_tbolt_build_ldio(sc, mfi_cmd, cmd); 897 898 /* Convert to SCSI command CDB */ 899 bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32)); 900 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 901 readop = 0; 902 else 903 readop = 1; 904 905 lba = mfi_cmd->cm_frame->io.lba_hi; 906 lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo; 907 cdb_len = mfi_build_cdb(readop, 0, lba, 908 mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32); 909 910 /* Just the CDB length, rest of the Flags are zero */ 911 io_request->IoFlags = cdb_len; 912 913 /* 914 * Construct SGL 915 */ 916 sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd, 917 (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd); 918 if (sge_count > sc->mfi_max_sge) { 919 device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds " 920 "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge); 921 return 1; 922 } 923 io_request->RaidContext.numSGE = sge_count; 924 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 925 926 if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE) 927 io_request->Control = MPI2_SCSIIO_CONTROL_WRITE; 928 else 929 io_request->Control = MPI2_SCSIIO_CONTROL_READ; 930 931 io_request->SGLOffset0 = offsetof( 932 struct mfi_mpi2_request_raid_scsi_io, SGL)/4; 933 934 io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr; 935 io_request->SenseBufferLength = MFI_SENSE_LEN; 936 io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS; 937 io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS; 938 939 return 0; 940 } 941 942 943 static int 944 mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd, 945 pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd) 946 { 947 uint8_t i, sg_processed, sg_to_process; 948 uint8_t sge_count, sge_idx; 949 union mfi_sgl *os_sgl; 950 pMpi25IeeeSgeChain64_t sgl_end; 951 952 /* 953 * Return 0 if there is no data transfer 954 */ 955 if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) { 956 device_printf(sc->mfi_dev, "Buffer empty \n"); 957 return 0; 958 } 959 os_sgl = mfi_cmd->cm_sg; 960 sge_count = mfi_cmd->cm_frame->header.sg_count; 961 962 if (sge_count > sc->mfi_max_sge) { 963 device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n", 964 os_sgl, sge_count); 965 return sge_count; 966 } 967 968 if (sge_count > sc->max_SGEs_in_main_message) 969 /* One element to store the chain info */ 970 sge_idx = sc->max_SGEs_in_main_message - 1; 971 else 972 sge_idx = sge_count; 973 974 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) { 975 sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1); 976 sgl_end->Flags = 0; 977 } 978 979 for (i = 0; i < sge_idx; i++) { 980 /* 981 * For 32bit BSD we are getting 32 bit SGL's from OS 982 * but FW only take 64 bit SGL's so copying from 32 bit 983 * SGL's to 64. 984 */ 985 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 986 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 987 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 988 } else { 989 sgl_ptr->Length = os_sgl->sg32[i].len; 990 sgl_ptr->Address = os_sgl->sg32[i].addr; 991 } 992 if (i == sge_count - 1 && 993 (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))) 994 sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 995 else 996 sgl_ptr->Flags = 0; 997 sgl_ptr++; 998 cmd->io_request->ChainOffset = 0; 999 } 1000 1001 sg_processed = i; 1002 1003 if (sg_processed < sge_count) { 1004 pMpi25IeeeSgeChain64_t sg_chain; 1005 sg_to_process = sge_count - sg_processed; 1006 cmd->io_request->ChainOffset = 1007 sc->chain_offset_value_for_main_message; 1008 sg_chain = sgl_ptr; 1009 /* Prepare chain element */ 1010 sg_chain->NextChainOffset = 0; 1011 if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) 1012 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1013 else 1014 sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1015 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 1016 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * 1017 (sge_count - sg_processed)); 1018 sg_chain->Address = cmd->sg_frame_phys_addr; 1019 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame; 1020 for (; i < sge_count; i++) { 1021 if (sc->mfi_flags & MFI_FLAGS_SKINNY) { 1022 sgl_ptr->Length = os_sgl->sg_skinny[i].len; 1023 sgl_ptr->Address = os_sgl->sg_skinny[i].addr; 1024 } else { 1025 sgl_ptr->Length = os_sgl->sg32[i].len; 1026 sgl_ptr->Address = os_sgl->sg32[i].addr; 1027 } 1028 if (i == sge_count - 1 && 1029 (sc->mfi_flags & 1030 (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))) 1031 sgl_ptr->Flags = 1032 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 1033 else 1034 sgl_ptr->Flags = 0; 1035 sgl_ptr++; 1036 } 1037 } 1038 return sge_count; 1039 } 1040 1041 union mfi_mpi2_request_descriptor * 1042 mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd) 1043 { 1044 struct mfi_cmd_tbolt *cmd; 1045 union mfi_mpi2_request_descriptor *req_desc = NULL; 1046 uint16_t index; 1047 cmd = mfi_tbolt_get_cmd(sc, mfi_cmd); 1048 if (cmd == NULL) 1049 return (NULL); 1050 1051 index = cmd->index; 1052 req_desc = mfi_tbolt_get_request_descriptor(sc, index-1); 1053 if (req_desc == NULL) { 1054 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1055 return (NULL); 1056 } 1057 1058 if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) { 1059 mfi_tbolt_return_cmd(sc, cmd, mfi_cmd); 1060 return (NULL); 1061 } 1062 req_desc->header.SMID = index; 1063 return req_desc; 1064 } 1065 1066 union mfi_mpi2_request_descriptor * 1067 mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd) 1068 { 1069 union mfi_mpi2_request_descriptor *req_desc = NULL; 1070 uint16_t index; 1071 if (mfi_build_mpt_pass_thru(sc, cmd)) { 1072 device_printf(sc->mfi_dev, "Couldn't build MFI pass thru " 1073 "cmd\n"); 1074 return NULL; 1075 } 1076 /* For fusion the frame_count variable is used for SMID */ 1077 index = cmd->cm_extra_frames; 1078 1079 req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1); 1080 if (req_desc == NULL) 1081 return NULL; 1082 1083 bzero(req_desc, sizeof(*req_desc)); 1084 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1085 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1086 req_desc->header.SMID = index; 1087 return req_desc; 1088 } 1089 1090 int 1091 mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm) 1092 { 1093 struct mfi_frame_header *hdr; 1094 uint8_t *cdb; 1095 union mfi_mpi2_request_descriptor *req_desc = NULL; 1096 int tm = mfi_polled_cmd_timeout * 1000; 1097 1098 hdr = &cm->cm_frame->header; 1099 cdb = cm->cm_frame->pass.cdb; 1100 if (sc->adpreset) 1101 return 1; 1102 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { 1103 cm->cm_timestamp = time_uptime; 1104 mfi_enqueue_busy(cm); 1105 } else { /* still get interrupts for it */ 1106 hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1107 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1108 } 1109 1110 if (hdr->cmd == MFI_CMD_PD_SCSI_IO) { 1111 /* check for inquiry commands coming from CLI */ 1112 if (cdb[0] != 0x28 || cdb[0] != 0x2A) { 1113 if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == 1114 NULL) { 1115 device_printf(sc->mfi_dev, "Mapping from MFI " 1116 "to MPT Failed \n"); 1117 return 1; 1118 } 1119 } 1120 else 1121 device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n"); 1122 } else if (hdr->cmd == MFI_CMD_LD_SCSI_IO || 1123 hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) { 1124 cm->cm_flags |= MFI_CMD_SCSI; 1125 if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) { 1126 device_printf(sc->mfi_dev, "LDIO Failed \n"); 1127 return 1; 1128 } 1129 } else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) { 1130 device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n"); 1131 return (1); 1132 } 1133 1134 if (cm->cm_flags & MFI_CMD_SCSI) { 1135 /* 1136 * LD IO needs to be posted since it doesn't get 1137 * acknowledged via a status update so have the 1138 * controller reply via mfi_tbolt_complete_cmd. 1139 */ 1140 hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1141 } 1142 1143 MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF)); 1144 MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20)); 1145 1146 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) 1147 return 0; 1148 1149 /* 1150 * This is a polled command, so busy-wait for it to complete. 1151 * 1152 * The value of hdr->cmd_status is updated directly by the hardware 1153 * so there is no guarantee that mfi_tbolt_complete_cmd is called 1154 * prior to this value changing. 1155 */ 1156 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1157 DELAY(1000); 1158 tm -= 1; 1159 if (tm <= 0) 1160 break; 1161 if (cm->cm_flags & MFI_CMD_SCSI) { 1162 /* 1163 * Force check reply queue. 1164 * This ensures that dump works correctly 1165 */ 1166 mfi_tbolt_complete_cmd(sc); 1167 } 1168 } 1169 1170 /* ensure the command cleanup has been processed before returning */ 1171 mfi_tbolt_complete_cmd(sc); 1172 1173 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1174 device_printf(sc->mfi_dev, "Frame %p timed out " 1175 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); 1176 return (ETIMEDOUT); 1177 } 1178 return 0; 1179 } 1180 1181 static void 1182 mfi_issue_pending_cmds_again(struct mfi_softc *sc) 1183 { 1184 struct mfi_command *cm, *tmp; 1185 struct mfi_cmd_tbolt *cmd; 1186 1187 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1188 TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) { 1189 1190 cm->retry_for_fw_reset++; 1191 1192 /* 1193 * If a command has continuously been tried multiple times 1194 * and causing a FW reset condition, no further recoveries 1195 * should be performed on the controller 1196 */ 1197 if (cm->retry_for_fw_reset == 3) { 1198 device_printf(sc->mfi_dev, "megaraid_sas: command %p " 1199 "index=%d was tried multiple times during adapter " 1200 "reset - Shutting down the HBA\n", cm, cm->cm_index); 1201 mfi_kill_hba(sc); 1202 sc->hw_crit_error = 1; 1203 return; 1204 } 1205 1206 mfi_remove_busy(cm); 1207 if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { 1208 if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <= 1209 sc->mfi_max_fw_cmds) { 1210 cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1]; 1211 mfi_tbolt_return_cmd(sc, cmd, cm); 1212 } else { 1213 device_printf(sc->mfi_dev, 1214 "Invalid extra_frames: %d detected\n", 1215 cm->cm_extra_frames); 1216 } 1217 } 1218 1219 if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) { 1220 device_printf(sc->mfi_dev, 1221 "APJ ****requeue command %p index=%d\n", 1222 cm, cm->cm_index); 1223 mfi_requeue_ready(cm); 1224 } else 1225 mfi_release_command(cm); 1226 } 1227 mfi_startio(sc); 1228 } 1229 1230 static void 1231 mfi_kill_hba(struct mfi_softc *sc) 1232 { 1233 if (sc->mfi_flags & MFI_FLAGS_TBOLT) 1234 MFI_WRITE4(sc, 0x00, MFI_STOP_ADP); 1235 else 1236 MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP); 1237 } 1238 1239 static void 1240 mfi_process_fw_state_chg_isr(void *arg) 1241 { 1242 struct mfi_softc *sc= (struct mfi_softc *)arg; 1243 int error, status; 1244 1245 if (sc->adpreset == 1) { 1246 device_printf(sc->mfi_dev, "First stage of FW reset " 1247 "initiated...\n"); 1248 1249 sc->mfi_adp_reset(sc); 1250 sc->mfi_enable_intr(sc); 1251 1252 device_printf(sc->mfi_dev, "First stage of reset complete, " 1253 "second stage initiated...\n"); 1254 1255 sc->adpreset = 2; 1256 1257 /* waiting for about 20 second before start the second init */ 1258 for (int wait = 0; wait < 20000; wait++) 1259 DELAY(1000); 1260 device_printf(sc->mfi_dev, "Second stage of FW reset " 1261 "initiated...\n"); 1262 while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04); 1263 1264 sc->mfi_disable_intr(sc); 1265 1266 /* We expect the FW state to be READY */ 1267 if (mfi_transition_firmware(sc)) { 1268 device_printf(sc->mfi_dev, "controller is not in " 1269 "ready state\n"); 1270 mfi_kill_hba(sc); 1271 sc->hw_crit_error = 1; 1272 return; 1273 } 1274 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { 1275 device_printf(sc->mfi_dev, "Failed to initialise MFI " 1276 "queue\n"); 1277 mfi_kill_hba(sc); 1278 sc->hw_crit_error = 1; 1279 return; 1280 } 1281 1282 /* Init last reply index and max */ 1283 MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1); 1284 MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx); 1285 1286 sc->mfi_enable_intr(sc); 1287 sc->adpreset = 0; 1288 if (sc->mfi_aen_cm != NULL) { 1289 free(sc->mfi_aen_cm->cm_data, M_MFIBUF); 1290 mfi_remove_busy(sc->mfi_aen_cm); 1291 mfi_release_command(sc->mfi_aen_cm); 1292 sc->mfi_aen_cm = NULL; 1293 } 1294 1295 if (sc->mfi_map_sync_cm != NULL) { 1296 mfi_remove_busy(sc->mfi_map_sync_cm); 1297 mfi_release_command(sc->mfi_map_sync_cm); 1298 sc->mfi_map_sync_cm = NULL; 1299 } 1300 mfi_issue_pending_cmds_again(sc); 1301 1302 /* 1303 * Issue pending command can result in adapter being marked 1304 * dead because of too many re-tries. Check for that 1305 * condition before clearing the reset condition on the FW 1306 */ 1307 if (!sc->hw_crit_error) { 1308 /* 1309 * Initiate AEN (Asynchronous Event Notification) & 1310 * Sync Map 1311 */ 1312 mfi_aen_setup(sc, sc->last_seq_num); 1313 mfi_tbolt_sync_map_info(sc); 1314 1315 sc->issuepend_done = 1; 1316 device_printf(sc->mfi_dev, "second stage of reset " 1317 "complete, FW is ready now.\n"); 1318 } else { 1319 device_printf(sc->mfi_dev, "second stage of reset " 1320 "never completed, hba was marked offline.\n"); 1321 } 1322 } else { 1323 device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr " 1324 "called with unhandled value:%d\n", sc->adpreset); 1325 } 1326 } 1327 1328 /* 1329 * The ThunderBolt HW has an option for the driver to directly 1330 * access the underlying disks and operate on the RAID. To 1331 * do this there needs to be a capability to keep the RAID controller 1332 * and driver in sync. The FreeBSD driver does not take advantage 1333 * of this feature since it adds a lot of complexity and slows down 1334 * performance. Performance is gained by using the controller's 1335 * cache etc. 1336 * 1337 * Even though this driver doesn't access the disks directly, an 1338 * AEN like command is used to inform the RAID firmware to "sync" 1339 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This 1340 * command in write mode will return when the RAID firmware has 1341 * detected a change to the RAID state. Examples of this type 1342 * of change are removing a disk. Once the command returns then 1343 * the driver needs to acknowledge this and "sync" all LD's again. 1344 * This repeats until we shutdown. Then we need to cancel this 1345 * pending command. 1346 * 1347 * If this is not done right the RAID firmware will not remove a 1348 * pulled drive and the RAID won't go degraded etc. Effectively, 1349 * stopping any RAID mangement to functions. 1350 * 1351 * Doing another LD sync, requires the use of an event since the 1352 * driver needs to do a mfi_wait_command and can't do that in an 1353 * interrupt thread. 1354 * 1355 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO 1356 * That requires a bunch of structure and it is simpler to just do 1357 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map. 1358 */ 1359 1360 void 1361 mfi_tbolt_sync_map_info(struct mfi_softc *sc) 1362 { 1363 int error = 0, i; 1364 struct mfi_command *cmd = NULL; 1365 struct mfi_dcmd_frame *dcmd = NULL; 1366 uint32_t context = 0; 1367 union mfi_ld_ref *ld_sync = NULL; 1368 size_t ld_size; 1369 struct mfi_frame_header *hdr; 1370 struct mfi_command *cm = NULL; 1371 struct mfi_ld_list *list = NULL; 1372 1373 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1374 1375 if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort) 1376 return; 1377 1378 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, 1379 (void **)&list, sizeof(*list)); 1380 if (error) 1381 goto out; 1382 1383 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN; 1384 1385 if (mfi_wait_command(sc, cm) != 0) { 1386 device_printf(sc->mfi_dev, "Failed to get device listing\n"); 1387 goto out; 1388 } 1389 1390 hdr = &cm->cm_frame->header; 1391 if (hdr->cmd_status != MFI_STAT_OK) { 1392 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", 1393 hdr->cmd_status); 1394 goto out; 1395 } 1396 1397 ld_size = sizeof(*ld_sync) * list->ld_count; 1398 ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF, 1399 M_NOWAIT | M_ZERO); 1400 if (ld_sync == NULL) { 1401 device_printf(sc->mfi_dev, "Failed to allocate sync\n"); 1402 goto out; 1403 } 1404 for (i = 0; i < list->ld_count; i++) 1405 ld_sync[i].ref = list->ld_list[i].ld.ref; 1406 1407 if ((cmd = mfi_dequeue_free(sc)) == NULL) { 1408 device_printf(sc->mfi_dev, "Failed to get command\n"); 1409 free(ld_sync, M_MFIBUF); 1410 goto out; 1411 } 1412 1413 context = cmd->cm_frame->header.context; 1414 bzero(cmd->cm_frame, sizeof(union mfi_frame)); 1415 cmd->cm_frame->header.context = context; 1416 1417 dcmd = &cmd->cm_frame->dcmd; 1418 bzero(dcmd->mbox, MFI_MBOX_SIZE); 1419 dcmd->header.cmd = MFI_CMD_DCMD; 1420 dcmd->header.flags = MFI_FRAME_DIR_WRITE; 1421 dcmd->header.timeout = 0; 1422 dcmd->header.data_len = ld_size; 1423 dcmd->header.scsi_status = 0; 1424 dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO; 1425 cmd->cm_sg = &dcmd->sgl; 1426 cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; 1427 cmd->cm_data = ld_sync; 1428 cmd->cm_private = ld_sync; 1429 1430 cmd->cm_len = ld_size; 1431 cmd->cm_complete = mfi_sync_map_complete; 1432 sc->mfi_map_sync_cm = cmd; 1433 1434 cmd->cm_flags = MFI_CMD_DATAOUT; 1435 cmd->cm_frame->dcmd.mbox[0] = list->ld_count; 1436 cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; 1437 1438 if ((error = mfi_mapcmd(sc, cmd)) != 0) { 1439 device_printf(sc->mfi_dev, "failed to send map sync\n"); 1440 free(ld_sync, M_MFIBUF); 1441 sc->mfi_map_sync_cm = NULL; 1442 mfi_release_command(cmd); 1443 goto out; 1444 } 1445 1446 out: 1447 if (list) 1448 free(list, M_MFIBUF); 1449 if (cm) 1450 mfi_release_command(cm); 1451 } 1452 1453 static void 1454 mfi_sync_map_complete(struct mfi_command *cm) 1455 { 1456 struct mfi_frame_header *hdr; 1457 struct mfi_softc *sc; 1458 int aborted = 0; 1459 1460 sc = cm->cm_sc; 1461 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1462 1463 hdr = &cm->cm_frame->header; 1464 1465 if (sc->mfi_map_sync_cm == NULL) 1466 return; 1467 1468 if (sc->cm_map_abort || 1469 hdr->cmd_status == MFI_STAT_INVALID_STATUS) { 1470 sc->cm_map_abort = 0; 1471 aborted = 1; 1472 } 1473 1474 free(cm->cm_data, M_MFIBUF); 1475 wakeup(&sc->mfi_map_sync_cm); 1476 sc->mfi_map_sync_cm = NULL; 1477 mfi_release_command(cm); 1478 1479 /* set it up again so the driver can catch more events */ 1480 if (!aborted) 1481 mfi_queue_map_sync(sc); 1482 } 1483 1484 static void 1485 mfi_queue_map_sync(struct mfi_softc *sc) 1486 { 1487 mtx_assert(&sc->mfi_io_lock, MA_OWNED); 1488 taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task); 1489 } 1490 1491 void 1492 mfi_handle_map_sync(void *context, int pending) 1493 { 1494 struct mfi_softc *sc; 1495 1496 sc = context; 1497 mtx_lock(&sc->mfi_io_lock); 1498 mfi_tbolt_sync_map_info(sc); 1499 mtx_unlock(&sc->mfi_io_lock); 1500 } 1501