1 /* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/kdev_t.h> 54 #include <linux/blkdev.h> 55 #include <linux/delay.h> 56 #include <linux/interrupt.h> 57 #include <linux/dma-mapping.h> 58 #include <linux/io.h> 59 #include <linux/time.h> 60 #include <linux/ktime.h> 61 #include <linux/kthread.h> 62 #include <asm/page.h> /* To get host page size per arch */ 63 64 65 #include "mpt3sas_base.h" 66 67 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 68 69 70 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 71 72 /* maximum controller queue depth */ 73 #define MAX_HBA_QUEUE_DEPTH 30000 74 #define MAX_CHAIN_DEPTH 100000 75 static int max_queue_depth = -1; 76 module_param(max_queue_depth, int, 0444); 77 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 78 79 static int max_sgl_entries = -1; 80 module_param(max_sgl_entries, int, 0444); 81 MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 82 83 static int msix_disable = -1; 84 module_param(msix_disable, int, 0444); 85 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 86 87 static int smp_affinity_enable = 1; 88 module_param(smp_affinity_enable, int, 0444); 89 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 90 91 static int max_msix_vectors = -1; 92 module_param(max_msix_vectors, int, 0444); 93 MODULE_PARM_DESC(max_msix_vectors, 94 " max msix vectors"); 95 96 static int irqpoll_weight = -1; 97 module_param(irqpoll_weight, int, 0444); 98 MODULE_PARM_DESC(irqpoll_weight, 99 "irq poll weight (default= one fourth of HBA queue depth)"); 100 101 static int mpt3sas_fwfault_debug; 102 MODULE_PARM_DESC(mpt3sas_fwfault_debug, 103 " enable detection of firmware fault and halt firmware - (default=0)"); 104 105 static int perf_mode = -1; 106 module_param(perf_mode, int, 0444); 107 MODULE_PARM_DESC(perf_mode, 108 "Performance mode (only for Aero/Sea Generation), options:\n\t\t" 109 "0 - balanced: high iops mode is enabled &\n\t\t" 110 "interrupt coalescing is enabled only on high iops queues,\n\t\t" 111 "1 - iops: high iops mode is disabled &\n\t\t" 112 "interrupt coalescing is enabled on all queues,\n\t\t" 113 "2 - latency: high iops mode is disabled &\n\t\t" 114 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" 115 "\t\tdefault - default perf_mode is 'balanced'" 116 ); 117 118 static int poll_queues; 119 module_param(poll_queues, int, 0444); 120 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 121 "This parameter is effective only if host_tagset_enable=1. &\n\t\t" 122 "when poll_queues are enabled then &\n\t\t" 123 "perf_mode is set to latency mode. &\n\t\t" 124 ); 125 126 enum mpt3sas_perf_mode { 127 MPT_PERF_MODE_DEFAULT = -1, 128 MPT_PERF_MODE_BALANCED = 0, 129 MPT_PERF_MODE_IOPS = 1, 130 MPT_PERF_MODE_LATENCY = 2, 131 }; 132 133 static int 134 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, 135 u32 ioc_state, int timeout); 136 static int 137 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); 138 static void 139 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); 140 141 static u32 142 _base_readl_ext_retry(const void __iomem *addr); 143 144 /** 145 * mpt3sas_base_check_cmd_timeout - Function 146 * to check timeout and command termination due 147 * to Host reset. 148 * 149 * @ioc: per adapter object. 150 * @status: Status of issued command. 151 * @mpi_request:mf request pointer. 152 * @sz: size of buffer. 153 * 154 * Return: 1/0 Reset to be done or Not 155 */ 156 u8 157 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, 158 u8 status, void *mpi_request, int sz) 159 { 160 u8 issue_reset = 0; 161 162 if (!(status & MPT3_CMD_RESET)) 163 issue_reset = 1; 164 165 ioc_err(ioc, "Command %s\n", 166 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout"); 167 _debug_dump_mf(mpi_request, sz); 168 169 return issue_reset; 170 } 171 172 /** 173 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 174 * @val: ? 175 * @kp: ? 176 * 177 * Return: ? 178 */ 179 static int 180 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) 181 { 182 int ret = param_set_int(val, kp); 183 struct MPT3SAS_ADAPTER *ioc; 184 185 if (ret) 186 return ret; 187 188 /* global ioc spinlock to protect controller list on list operations */ 189 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); 190 spin_lock(&gioc_lock); 191 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 192 ioc->fwfault_debug = mpt3sas_fwfault_debug; 193 spin_unlock(&gioc_lock); 194 return 0; 195 } 196 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, 197 param_get_int, &mpt3sas_fwfault_debug, 0644); 198 199 /** 200 * _base_readl_aero - retry readl for max three times. 201 * @addr: MPT Fusion system interface register address 202 * 203 * Retry the readl() for max three times if it gets zero value 204 * while reading the system interface register. 205 */ 206 static inline u32 207 _base_readl_aero(const void __iomem *addr) 208 { 209 u32 i = 0, ret_val; 210 211 do { 212 ret_val = readl(addr); 213 i++; 214 } while (ret_val == 0 && i < 3); 215 216 return ret_val; 217 } 218 219 static u32 220 _base_readl_ext_retry(const void __iomem *addr) 221 { 222 u32 i, ret_val; 223 224 for (i = 0 ; i < 30 ; i++) { 225 ret_val = readl(addr); 226 if (ret_val != 0) 227 break; 228 } 229 230 return ret_val; 231 } 232 233 static inline u32 234 _base_readl(const void __iomem *addr) 235 { 236 return readl(addr); 237 } 238 239 /** 240 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem 241 * in BAR0 space. 242 * 243 * @ioc: per adapter object 244 * @reply: reply message frame(lower 32bit addr) 245 * @index: System request message index. 246 */ 247 static void 248 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, 249 u32 index) 250 { 251 /* 252 * 256 is offset within sys register. 253 * 256 offset MPI frame starts. Max MPI frame supported is 32. 254 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts 255 */ 256 u16 cmd_credit = ioc->facts.RequestCredit + 1; 257 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + 258 MPI_FRAME_START_OFFSET + 259 (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); 260 261 writel(reply, reply_free_iomem); 262 } 263 264 /** 265 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames 266 * to system/BAR0 region. 267 * 268 * @dst_iomem: Pointer to the destination location in BAR0 space. 269 * @src: Pointer to the Source data. 270 * @size: Size of data to be copied. 271 */ 272 static void 273 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) 274 { 275 int i; 276 u32 *src_virt_mem = (u32 *)src; 277 278 for (i = 0; i < size/4; i++) 279 writel((u32)src_virt_mem[i], 280 (void __iomem *)dst_iomem + (i * 4)); 281 } 282 283 /** 284 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region 285 * 286 * @dst_iomem: Pointer to the destination location in BAR0 space. 287 * @src: Pointer to the Source data. 288 * @size: Size of data to be copied. 289 */ 290 static void 291 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) 292 { 293 int i; 294 u32 *src_virt_mem = (u32 *)(src); 295 296 for (i = 0; i < size/4; i++) 297 writel((u32)src_virt_mem[i], 298 (void __iomem *)dst_iomem + (i * 4)); 299 } 300 301 /** 302 * _base_get_chain - Calculates and Returns virtual chain address 303 * for the provided smid in BAR0 space. 304 * 305 * @ioc: per adapter object 306 * @smid: system request message index 307 * @sge_chain_count: Scatter gather chain count. 308 * 309 * Return: the chain address. 310 */ 311 static inline void __iomem* 312 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, 313 u8 sge_chain_count) 314 { 315 void __iomem *base_chain, *chain_virt; 316 u16 cmd_credit = ioc->facts.RequestCredit + 1; 317 318 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + 319 (cmd_credit * ioc->request_sz) + 320 REPLY_FREE_POOL_SIZE; 321 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * 322 ioc->request_sz) + (sge_chain_count * ioc->request_sz); 323 return chain_virt; 324 } 325 326 /** 327 * _base_get_chain_phys - Calculates and Returns physical address 328 * in BAR0 for scatter gather chains, for 329 * the provided smid. 330 * 331 * @ioc: per adapter object 332 * @smid: system request message index 333 * @sge_chain_count: Scatter gather chain count. 334 * 335 * Return: Physical chain address. 336 */ 337 static inline phys_addr_t 338 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, 339 u8 sge_chain_count) 340 { 341 phys_addr_t base_chain_phys, chain_phys; 342 u16 cmd_credit = ioc->facts.RequestCredit + 1; 343 344 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + 345 (cmd_credit * ioc->request_sz) + 346 REPLY_FREE_POOL_SIZE; 347 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * 348 ioc->request_sz) + (sge_chain_count * ioc->request_sz); 349 return chain_phys; 350 } 351 352 /** 353 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host 354 * buffer address for the provided smid. 355 * (Each smid can have 64K starts from 17024) 356 * 357 * @ioc: per adapter object 358 * @smid: system request message index 359 * 360 * Return: Pointer to buffer location in BAR0. 361 */ 362 363 static void __iomem * 364 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) 365 { 366 u16 cmd_credit = ioc->facts.RequestCredit + 1; 367 // Added extra 1 to reach end of chain. 368 void __iomem *chain_end = _base_get_chain(ioc, 369 cmd_credit + 1, 370 ioc->facts.MaxChainDepth); 371 return chain_end + (smid * 64 * 1024); 372 } 373 374 /** 375 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped 376 * Host buffer Physical address for the provided smid. 377 * (Each smid can have 64K starts from 17024) 378 * 379 * @ioc: per adapter object 380 * @smid: system request message index 381 * 382 * Return: Pointer to buffer location in BAR0. 383 */ 384 static phys_addr_t 385 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) 386 { 387 u16 cmd_credit = ioc->facts.RequestCredit + 1; 388 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc, 389 cmd_credit + 1, 390 ioc->facts.MaxChainDepth); 391 return chain_end_phys + (smid * 64 * 1024); 392 } 393 394 /** 395 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain 396 * lookup list and Provides chain_buffer 397 * address for the matching dma address. 398 * (Each smid can have 64K starts from 17024) 399 * 400 * @ioc: per adapter object 401 * @chain_buffer_dma: Chain buffer dma address. 402 * 403 * Return: Pointer to chain buffer. Or Null on Failure. 404 */ 405 static void * 406 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, 407 dma_addr_t chain_buffer_dma) 408 { 409 u16 index, j; 410 struct chain_tracker *ct; 411 412 for (index = 0; index < ioc->scsiio_depth; index++) { 413 for (j = 0; j < ioc->chains_needed_per_io; j++) { 414 ct = &ioc->chain_lookup[index].chains_per_smid[j]; 415 if (ct && ct->chain_buffer_dma == chain_buffer_dma) 416 return ct->chain_buffer; 417 } 418 } 419 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n"); 420 return NULL; 421 } 422 423 /** 424 * _clone_sg_entries - MPI EP's scsiio and config requests 425 * are handled here. Base function for 426 * double buffering, before submitting 427 * the requests. 428 * 429 * @ioc: per adapter object. 430 * @mpi_request: mf request pointer. 431 * @smid: system request message index. 432 */ 433 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, 434 void *mpi_request, u16 smid) 435 { 436 Mpi2SGESimple32_t *sgel, *sgel_next; 437 u32 sgl_flags, sge_chain_count = 0; 438 bool is_write = false; 439 u16 i = 0; 440 void __iomem *buffer_iomem; 441 phys_addr_t buffer_iomem_phys; 442 void __iomem *buff_ptr; 443 phys_addr_t buff_ptr_phys; 444 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO]; 445 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO]; 446 phys_addr_t dst_addr_phys; 447 MPI2RequestHeader_t *request_hdr; 448 struct scsi_cmnd *scmd; 449 struct scatterlist *sg_scmd = NULL; 450 int is_scsiio_req = 0; 451 452 request_hdr = (MPI2RequestHeader_t *) mpi_request; 453 454 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { 455 Mpi25SCSIIORequest_t *scsiio_request = 456 (Mpi25SCSIIORequest_t *)mpi_request; 457 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; 458 is_scsiio_req = 1; 459 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { 460 Mpi2ConfigRequest_t *config_req = 461 (Mpi2ConfigRequest_t *)mpi_request; 462 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; 463 } else 464 return; 465 466 /* From smid we can get scsi_cmd, once we have sg_scmd, 467 * we just need to get sg_virt and sg_next to get virtual 468 * address associated with sgel->Address. 469 */ 470 471 if (is_scsiio_req) { 472 /* Get scsi_cmd using smid */ 473 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 474 if (scmd == NULL) { 475 ioc_err(ioc, "scmd is NULL\n"); 476 return; 477 } 478 479 /* Get sg_scmd from scmd provided */ 480 sg_scmd = scsi_sglist(scmd); 481 } 482 483 /* 484 * 0 - 255 System register 485 * 256 - 4352 MPI Frame. (This is based on maxCredit 32) 486 * 4352 - 4864 Reply_free pool (512 byte is reserved 487 * considering maxCredit 32. Reply need extra 488 * room, for mCPU case kept four times of 489 * maxCredit). 490 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of 491 * 128 byte size = 12288) 492 * 17152 - x Host buffer mapped with smid. 493 * (Each smid can have 64K Max IO.) 494 * BAR0+Last 1K MSIX Addr and Data 495 * Total size in use 2113664 bytes of 4MB BAR0 496 */ 497 498 buffer_iomem = _base_get_buffer_bar0(ioc, smid); 499 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid); 500 501 buff_ptr = buffer_iomem; 502 buff_ptr_phys = buffer_iomem_phys; 503 WARN_ON(buff_ptr_phys > U32_MAX); 504 505 if (le32_to_cpu(sgel->FlagsLength) & 506 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) 507 is_write = true; 508 509 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { 510 511 sgl_flags = 512 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT); 513 514 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { 515 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 516 /* 517 * Helper function which on passing 518 * chain_buffer_dma returns chain_buffer. Get 519 * the virtual address for sgel->Address 520 */ 521 sgel_next = 522 _base_get_chain_buffer_dma_to_chain_buffer(ioc, 523 le32_to_cpu(sgel->Address)); 524 if (sgel_next == NULL) 525 return; 526 /* 527 * This is coping 128 byte chain 528 * frame (not a host buffer) 529 */ 530 dst_chain_addr[sge_chain_count] = 531 _base_get_chain(ioc, 532 smid, sge_chain_count); 533 src_chain_addr[sge_chain_count] = 534 (void *) sgel_next; 535 dst_addr_phys = _base_get_chain_phys(ioc, 536 smid, sge_chain_count); 537 WARN_ON(dst_addr_phys > U32_MAX); 538 sgel->Address = 539 cpu_to_le32(lower_32_bits(dst_addr_phys)); 540 sgel = sgel_next; 541 sge_chain_count++; 542 break; 543 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: 544 if (is_write) { 545 if (is_scsiio_req) { 546 _base_clone_to_sys_mem(buff_ptr, 547 sg_virt(sg_scmd), 548 (le32_to_cpu(sgel->FlagsLength) & 549 0x00ffffff)); 550 /* 551 * FIXME: this relies on a a zero 552 * PCI mem_offset. 553 */ 554 sgel->Address = 555 cpu_to_le32((u32)buff_ptr_phys); 556 } else { 557 _base_clone_to_sys_mem(buff_ptr, 558 ioc->config_vaddr, 559 (le32_to_cpu(sgel->FlagsLength) & 560 0x00ffffff)); 561 sgel->Address = 562 cpu_to_le32((u32)buff_ptr_phys); 563 } 564 } 565 buff_ptr += (le32_to_cpu(sgel->FlagsLength) & 566 0x00ffffff); 567 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) & 568 0x00ffffff); 569 if ((le32_to_cpu(sgel->FlagsLength) & 570 (MPI2_SGE_FLAGS_END_OF_BUFFER 571 << MPI2_SGE_FLAGS_SHIFT))) 572 goto eob_clone_chain; 573 else { 574 /* 575 * Every single element in MPT will have 576 * associated sg_next. Better to sanity that 577 * sg_next is not NULL, but it will be a bug 578 * if it is null. 579 */ 580 if (is_scsiio_req) { 581 sg_scmd = sg_next(sg_scmd); 582 if (sg_scmd) 583 sgel++; 584 else 585 goto eob_clone_chain; 586 } 587 } 588 break; 589 } 590 } 591 592 eob_clone_chain: 593 for (i = 0; i < sge_chain_count; i++) { 594 if (is_scsiio_req) 595 _base_clone_to_sys_mem(dst_chain_addr[i], 596 src_chain_addr[i], ioc->request_sz); 597 } 598 } 599 600 /** 601 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc 602 * @arg: input argument, used to derive ioc 603 * 604 * Return: 605 * 0 if controller is removed from pci subsystem. 606 * -1 for other case. 607 */ 608 static int mpt3sas_remove_dead_ioc_func(void *arg) 609 { 610 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 611 struct pci_dev *pdev; 612 613 if (!ioc) 614 return -1; 615 616 pdev = ioc->pdev; 617 if (!pdev) 618 return -1; 619 pci_stop_and_remove_bus_device_locked(pdev); 620 return 0; 621 } 622 623 /** 624 * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp. 625 * @ioc: Per Adapter Object 626 * 627 * Return: nothing. 628 */ 629 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) 630 { 631 Mpi26IoUnitControlRequest_t *mpi_request; 632 Mpi26IoUnitControlReply_t *mpi_reply; 633 u16 smid; 634 ktime_t current_time; 635 u64 TimeStamp = 0; 636 u8 issue_reset = 0; 637 638 mutex_lock(&ioc->scsih_cmds.mutex); 639 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 640 ioc_err(ioc, "scsih_cmd in use %s\n", __func__); 641 goto out; 642 } 643 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 644 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 645 if (!smid) { 646 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__); 647 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 648 goto out; 649 } 650 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 651 ioc->scsih_cmds.smid = smid; 652 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 653 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 654 mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; 655 mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; 656 current_time = ktime_get_real(); 657 TimeStamp = ktime_to_ms(current_time); 658 mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); 659 mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); 660 init_completion(&ioc->scsih_cmds.done); 661 ioc->put_smid_default(ioc, smid); 662 dinitprintk(ioc, ioc_info(ioc, 663 "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", 664 TimeStamp)); 665 wait_for_completion_timeout(&ioc->scsih_cmds.done, 666 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ); 667 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 668 mpt3sas_check_cmd_timeout(ioc, 669 ioc->scsih_cmds.status, mpi_request, 670 sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); 671 goto issue_host_reset; 672 } 673 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 674 mpi_reply = ioc->scsih_cmds.reply; 675 dinitprintk(ioc, ioc_info(ioc, 676 "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 677 le16_to_cpu(mpi_reply->IOCStatus), 678 le32_to_cpu(mpi_reply->IOCLogInfo))); 679 } 680 issue_host_reset: 681 if (issue_reset) 682 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 683 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 684 out: 685 mutex_unlock(&ioc->scsih_cmds.mutex); 686 } 687 688 /** 689 * _base_fault_reset_work - workq handling ioc fault conditions 690 * @work: input argument, used to derive ioc 691 * 692 * Context: sleep. 693 */ 694 static void 695 _base_fault_reset_work(struct work_struct *work) 696 { 697 struct MPT3SAS_ADAPTER *ioc = 698 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); 699 unsigned long flags; 700 u32 doorbell; 701 int rc; 702 struct task_struct *p; 703 704 705 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 706 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || 707 ioc->pci_error_recovery) 708 goto rearm_timer; 709 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 710 711 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 712 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 713 ioc_err(ioc, "SAS host is non-operational !!!!\n"); 714 715 /* It may be possible that EEH recovery can resolve some of 716 * pci bus failure issues rather removing the dead ioc function 717 * by considering controller is in a non-operational state. So 718 * here priority is given to the EEH recovery. If it doesn't 719 * not resolve this issue, mpt3sas driver will consider this 720 * controller to non-operational state and remove the dead ioc 721 * function. 722 */ 723 if (ioc->non_operational_loop++ < 5) { 724 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, 725 flags); 726 goto rearm_timer; 727 } 728 729 /* 730 * Call _scsih_flush_pending_cmds callback so that we flush all 731 * pending commands back to OS. This call is required to avoid 732 * deadlock at block layer. Dead IOC will fail to do diag reset, 733 * and this call is safe since dead ioc will never return any 734 * command back from HW. 735 */ 736 mpt3sas_base_pause_mq_polling(ioc); 737 ioc->schedule_dead_ioc_flush_running_cmds(ioc); 738 /* 739 * Set remove_host flag early since kernel thread will 740 * take some time to execute. 741 */ 742 ioc->remove_host = 1; 743 /*Remove the Dead Host */ 744 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 745 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); 746 if (IS_ERR(p)) 747 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 748 __func__); 749 else 750 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 751 __func__); 752 return; /* don't rearm timer */ 753 } 754 755 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { 756 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? 757 ioc->manu_pg11.CoreDumpTOSec : 758 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; 759 760 timeout /= (FAULT_POLLING_INTERVAL/1000); 761 762 if (ioc->ioc_coredump_loop == 0) { 763 mpt3sas_print_coredump_info(ioc, 764 doorbell & MPI2_DOORBELL_DATA_MASK); 765 /* do not accept any IOs and disable the interrupts */ 766 spin_lock_irqsave( 767 &ioc->ioc_reset_in_progress_lock, flags); 768 ioc->shost_recovery = 1; 769 spin_unlock_irqrestore( 770 &ioc->ioc_reset_in_progress_lock, flags); 771 mpt3sas_base_mask_interrupts(ioc); 772 mpt3sas_base_pause_mq_polling(ioc); 773 _base_clear_outstanding_commands(ioc); 774 } 775 776 ioc_info(ioc, "%s: CoreDump loop %d.", 777 __func__, ioc->ioc_coredump_loop); 778 779 /* Wait until CoreDump completes or times out */ 780 if (ioc->ioc_coredump_loop++ < timeout) { 781 spin_lock_irqsave( 782 &ioc->ioc_reset_in_progress_lock, flags); 783 goto rearm_timer; 784 } 785 } 786 787 if (ioc->ioc_coredump_loop) { 788 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) 789 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", 790 __func__, ioc->ioc_coredump_loop); 791 else 792 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", 793 __func__, ioc->ioc_coredump_loop); 794 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; 795 } 796 ioc->non_operational_loop = 0; 797 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 798 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 799 ioc_warn(ioc, "%s: hard reset: %s\n", 800 __func__, rc == 0 ? "success" : "failed"); 801 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 802 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 803 mpt3sas_print_fault_code(ioc, doorbell & 804 MPI2_DOORBELL_DATA_MASK); 805 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 806 MPI2_IOC_STATE_COREDUMP) 807 mpt3sas_print_coredump_info(ioc, doorbell & 808 MPI2_DOORBELL_DATA_MASK); 809 if (rc && (doorbell & MPI2_IOC_STATE_MASK) != 810 MPI2_IOC_STATE_OPERATIONAL) 811 return; /* don't rearm timer */ 812 } 813 ioc->ioc_coredump_loop = 0; 814 if (ioc->time_sync_interval && 815 ++ioc->timestamp_update_count >= ioc->time_sync_interval) { 816 ioc->timestamp_update_count = 0; 817 _base_sync_drv_fw_timestamp(ioc); 818 } 819 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 820 rearm_timer: 821 if (ioc->fault_reset_work_q) 822 queue_delayed_work(ioc->fault_reset_work_q, 823 &ioc->fault_reset_work, 824 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 825 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 826 } 827 828 /** 829 * mpt3sas_base_start_watchdog - start the fault_reset_work_q 830 * @ioc: per adapter object 831 * 832 * Context: sleep. 833 */ 834 void 835 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) 836 { 837 unsigned long flags; 838 839 if (ioc->fault_reset_work_q) 840 return; 841 842 ioc->timestamp_update_count = 0; 843 /* initialize fault polling */ 844 845 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 846 snprintf(ioc->fault_reset_work_q_name, 847 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", 848 ioc->driver_name, ioc->id); 849 ioc->fault_reset_work_q = alloc_ordered_workqueue( 850 "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name); 851 if (!ioc->fault_reset_work_q) { 852 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); 853 return; 854 } 855 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 856 if (ioc->fault_reset_work_q) 857 queue_delayed_work(ioc->fault_reset_work_q, 858 &ioc->fault_reset_work, 859 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 860 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 861 } 862 863 /** 864 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q 865 * @ioc: per adapter object 866 * 867 * Context: sleep. 868 */ 869 void 870 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) 871 { 872 unsigned long flags; 873 struct workqueue_struct *wq; 874 875 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 876 wq = ioc->fault_reset_work_q; 877 ioc->fault_reset_work_q = NULL; 878 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 879 if (wq) { 880 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) 881 flush_workqueue(wq); 882 destroy_workqueue(wq); 883 } 884 } 885 886 /** 887 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code 888 * @ioc: per adapter object 889 * @fault_code: fault code 890 */ 891 void 892 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) 893 { 894 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code); 895 } 896 897 /** 898 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state 899 * @ioc: per adapter object 900 * @fault_code: fault code 901 * 902 * Return: nothing. 903 */ 904 void 905 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) 906 { 907 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); 908 } 909 910 /** 911 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump 912 * completes or times out 913 * @ioc: per adapter object 914 * @caller: caller function name 915 * 916 * Return: 0 for success, non-zero for failure. 917 */ 918 int 919 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, 920 const char *caller) 921 { 922 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? 923 ioc->manu_pg11.CoreDumpTOSec : 924 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; 925 926 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, 927 timeout); 928 929 if (ioc_state) 930 ioc_err(ioc, 931 "%s: CoreDump timed out. (ioc_state=0x%x)\n", 932 caller, ioc_state); 933 else 934 ioc_info(ioc, 935 "%s: CoreDump completed. (ioc_state=0x%x)\n", 936 caller, ioc_state); 937 938 return ioc_state; 939 } 940 941 /** 942 * mpt3sas_halt_firmware - halt's mpt controller firmware 943 * @ioc: per adapter object 944 * 945 * For debugging timeout related issues. Writing 0xCOFFEE00 946 * to the doorbell register will halt controller firmware. With 947 * the purpose to stop both driver and firmware, the enduser can 948 * obtain a ring buffer from controller UART. 949 */ 950 void 951 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) 952 { 953 u32 doorbell; 954 955 if (!ioc->fwfault_debug) 956 return; 957 958 dump_stack(); 959 960 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); 961 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 962 mpt3sas_print_fault_code(ioc, doorbell & 963 MPI2_DOORBELL_DATA_MASK); 964 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 965 MPI2_IOC_STATE_COREDUMP) { 966 mpt3sas_print_coredump_info(ioc, doorbell & 967 MPI2_DOORBELL_DATA_MASK); 968 } else { 969 writel(0xC0FFEE00, &ioc->chip->Doorbell); 970 ioc_err(ioc, "Firmware is halted due to command timeout\n"); 971 } 972 973 if (ioc->fwfault_debug == 2) 974 for (;;) 975 ; 976 else 977 panic("panic in %s\n", __func__); 978 } 979 980 /** 981 * _base_sas_ioc_info - verbose translation of the ioc status 982 * @ioc: per adapter object 983 * @mpi_reply: reply mf payload returned from firmware 984 * @request_hdr: request mf 985 */ 986 static void 987 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 988 MPI2RequestHeader_t *request_hdr) 989 { 990 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 991 MPI2_IOCSTATUS_MASK; 992 char *desc = NULL; 993 u16 frame_sz; 994 char *func_str = NULL; 995 996 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 997 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 998 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 999 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 1000 return; 1001 1002 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 1003 return; 1004 /* 1005 * Older Firmware version doesn't support driver trigger pages. 1006 * So, skip displaying 'config invalid type' type 1007 * of error message. 1008 */ 1009 if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { 1010 Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr; 1011 1012 if ((rqst->ExtPageType == 1013 MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) && 1014 !(ioc->logging_level & MPT_DEBUG_CONFIG)) { 1015 return; 1016 } 1017 } 1018 1019 switch (ioc_status) { 1020 1021 /**************************************************************************** 1022 * Common IOCStatus values for all replies 1023 ****************************************************************************/ 1024 1025 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1026 desc = "invalid function"; 1027 break; 1028 case MPI2_IOCSTATUS_BUSY: 1029 desc = "busy"; 1030 break; 1031 case MPI2_IOCSTATUS_INVALID_SGL: 1032 desc = "invalid sgl"; 1033 break; 1034 case MPI2_IOCSTATUS_INTERNAL_ERROR: 1035 desc = "internal error"; 1036 break; 1037 case MPI2_IOCSTATUS_INVALID_VPID: 1038 desc = "invalid vpid"; 1039 break; 1040 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 1041 desc = "insufficient resources"; 1042 break; 1043 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 1044 desc = "insufficient power"; 1045 break; 1046 case MPI2_IOCSTATUS_INVALID_FIELD: 1047 desc = "invalid field"; 1048 break; 1049 case MPI2_IOCSTATUS_INVALID_STATE: 1050 desc = "invalid state"; 1051 break; 1052 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 1053 desc = "op state not supported"; 1054 break; 1055 1056 /**************************************************************************** 1057 * Config IOCStatus values 1058 ****************************************************************************/ 1059 1060 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 1061 desc = "config invalid action"; 1062 break; 1063 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 1064 desc = "config invalid type"; 1065 break; 1066 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 1067 desc = "config invalid page"; 1068 break; 1069 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 1070 desc = "config invalid data"; 1071 break; 1072 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 1073 desc = "config no defaults"; 1074 break; 1075 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 1076 desc = "config can't commit"; 1077 break; 1078 1079 /**************************************************************************** 1080 * SCSI IO Reply 1081 ****************************************************************************/ 1082 1083 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1084 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1085 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1086 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1087 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1088 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 1089 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 1090 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 1091 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 1092 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 1093 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 1094 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 1095 break; 1096 1097 /**************************************************************************** 1098 * For use by SCSI Initiator and SCSI Target end-to-end data protection 1099 ****************************************************************************/ 1100 1101 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 1102 desc = "eedp guard error"; 1103 break; 1104 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 1105 desc = "eedp ref tag error"; 1106 break; 1107 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 1108 desc = "eedp app tag error"; 1109 break; 1110 1111 /**************************************************************************** 1112 * SCSI Target values 1113 ****************************************************************************/ 1114 1115 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 1116 desc = "target invalid io index"; 1117 break; 1118 case MPI2_IOCSTATUS_TARGET_ABORTED: 1119 desc = "target aborted"; 1120 break; 1121 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 1122 desc = "target no conn retryable"; 1123 break; 1124 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 1125 desc = "target no connection"; 1126 break; 1127 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 1128 desc = "target xfer count mismatch"; 1129 break; 1130 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 1131 desc = "target data offset error"; 1132 break; 1133 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 1134 desc = "target too much write data"; 1135 break; 1136 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 1137 desc = "target iu too short"; 1138 break; 1139 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 1140 desc = "target ack nak timeout"; 1141 break; 1142 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 1143 desc = "target nak received"; 1144 break; 1145 1146 /**************************************************************************** 1147 * Serial Attached SCSI values 1148 ****************************************************************************/ 1149 1150 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 1151 desc = "smp request failed"; 1152 break; 1153 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 1154 desc = "smp data overrun"; 1155 break; 1156 1157 /**************************************************************************** 1158 * Diagnostic Buffer Post / Diagnostic Release values 1159 ****************************************************************************/ 1160 1161 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 1162 desc = "diagnostic released"; 1163 break; 1164 default: 1165 break; 1166 } 1167 1168 if (!desc) 1169 return; 1170 1171 switch (request_hdr->Function) { 1172 case MPI2_FUNCTION_CONFIG: 1173 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 1174 func_str = "config_page"; 1175 break; 1176 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1177 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 1178 func_str = "task_mgmt"; 1179 break; 1180 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 1181 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 1182 func_str = "sas_iounit_ctl"; 1183 break; 1184 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1185 frame_sz = sizeof(Mpi2SepRequest_t); 1186 func_str = "enclosure"; 1187 break; 1188 case MPI2_FUNCTION_IOC_INIT: 1189 frame_sz = sizeof(Mpi2IOCInitRequest_t); 1190 func_str = "ioc_init"; 1191 break; 1192 case MPI2_FUNCTION_PORT_ENABLE: 1193 frame_sz = sizeof(Mpi2PortEnableRequest_t); 1194 func_str = "port_enable"; 1195 break; 1196 case MPI2_FUNCTION_SMP_PASSTHROUGH: 1197 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 1198 func_str = "smp_passthru"; 1199 break; 1200 case MPI2_FUNCTION_NVME_ENCAPSULATED: 1201 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + 1202 ioc->sge_size; 1203 func_str = "nvme_encapsulated"; 1204 break; 1205 case MPI2_FUNCTION_MCTP_PASSTHROUGH: 1206 frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) + 1207 ioc->sge_size; 1208 func_str = "mctp_passthru"; 1209 break; 1210 default: 1211 frame_sz = 32; 1212 func_str = "unknown"; 1213 break; 1214 } 1215 1216 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 1217 desc, ioc_status, request_hdr, func_str); 1218 1219 _debug_dump_mf(request_hdr, frame_sz/4); 1220 } 1221 1222 /** 1223 * _base_display_event_data - verbose translation of firmware asyn events 1224 * @ioc: per adapter object 1225 * @mpi_reply: reply mf payload returned from firmware 1226 */ 1227 static void 1228 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, 1229 Mpi2EventNotificationReply_t *mpi_reply) 1230 { 1231 char *desc = NULL; 1232 u16 event; 1233 1234 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 1235 return; 1236 1237 event = le16_to_cpu(mpi_reply->Event); 1238 1239 switch (event) { 1240 case MPI2_EVENT_LOG_DATA: 1241 desc = "Log Data"; 1242 break; 1243 case MPI2_EVENT_STATE_CHANGE: 1244 desc = "Status Change"; 1245 break; 1246 case MPI2_EVENT_HARD_RESET_RECEIVED: 1247 desc = "Hard Reset Received"; 1248 break; 1249 case MPI2_EVENT_EVENT_CHANGE: 1250 desc = "Event Change"; 1251 break; 1252 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 1253 desc = "Device Status Change"; 1254 break; 1255 case MPI2_EVENT_IR_OPERATION_STATUS: 1256 if (!ioc->hide_ir_msg) 1257 desc = "IR Operation Status"; 1258 break; 1259 case MPI2_EVENT_SAS_DISCOVERY: 1260 { 1261 Mpi2EventDataSasDiscovery_t *event_data = 1262 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 1263 ioc_info(ioc, "Discovery: (%s)", 1264 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 1265 "start" : "stop"); 1266 if (event_data->DiscoveryStatus) 1267 pr_cont(" discovery_status(0x%08x)", 1268 le32_to_cpu(event_data->DiscoveryStatus)); 1269 pr_cont("\n"); 1270 return; 1271 } 1272 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 1273 desc = "SAS Broadcast Primitive"; 1274 break; 1275 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 1276 desc = "SAS Init Device Status Change"; 1277 break; 1278 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 1279 desc = "SAS Init Table Overflow"; 1280 break; 1281 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1282 desc = "SAS Topology Change List"; 1283 break; 1284 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 1285 desc = "SAS Enclosure Device Status Change"; 1286 break; 1287 case MPI2_EVENT_IR_VOLUME: 1288 if (!ioc->hide_ir_msg) 1289 desc = "IR Volume"; 1290 break; 1291 case MPI2_EVENT_IR_PHYSICAL_DISK: 1292 if (!ioc->hide_ir_msg) 1293 desc = "IR Physical Disk"; 1294 break; 1295 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 1296 if (!ioc->hide_ir_msg) 1297 desc = "IR Configuration Change List"; 1298 break; 1299 case MPI2_EVENT_LOG_ENTRY_ADDED: 1300 if (!ioc->hide_ir_msg) 1301 desc = "Log Entry Added"; 1302 break; 1303 case MPI2_EVENT_TEMP_THRESHOLD: 1304 desc = "Temperature Threshold"; 1305 break; 1306 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 1307 desc = "Cable Event"; 1308 break; 1309 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 1310 desc = "SAS Device Discovery Error"; 1311 break; 1312 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 1313 desc = "PCIE Device Status Change"; 1314 break; 1315 case MPI2_EVENT_PCIE_ENUMERATION: 1316 { 1317 Mpi26EventDataPCIeEnumeration_t *event_data = 1318 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; 1319 ioc_info(ioc, "PCIE Enumeration: (%s)", 1320 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? 1321 "start" : "stop"); 1322 if (event_data->EnumerationStatus) 1323 pr_cont("enumeration_status(0x%08x)", 1324 le32_to_cpu(event_data->EnumerationStatus)); 1325 pr_cont("\n"); 1326 return; 1327 } 1328 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1329 desc = "PCIE Topology Change List"; 1330 break; 1331 } 1332 1333 if (!desc) 1334 return; 1335 1336 ioc_info(ioc, "%s\n", desc); 1337 } 1338 1339 /** 1340 * _base_sas_log_info - verbose translation of firmware log info 1341 * @ioc: per adapter object 1342 * @log_info: log info 1343 */ 1344 static void 1345 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info) 1346 { 1347 union loginfo_type { 1348 u32 loginfo; 1349 struct { 1350 u32 subcode:16; 1351 u32 code:8; 1352 u32 originator:4; 1353 u32 bus_type:4; 1354 } dw; 1355 }; 1356 union loginfo_type sas_loginfo; 1357 char *originator_str = NULL; 1358 1359 sas_loginfo.loginfo = log_info; 1360 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 1361 return; 1362 1363 /* each nexus loss loginfo */ 1364 if (log_info == 0x31170000) 1365 return; 1366 1367 /* eat the loginfos associated with task aborts */ 1368 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 1369 0x31140000 || log_info == 0x31130000)) 1370 return; 1371 1372 switch (sas_loginfo.dw.originator) { 1373 case 0: 1374 originator_str = "IOP"; 1375 break; 1376 case 1: 1377 originator_str = "PL"; 1378 break; 1379 case 2: 1380 if (!ioc->hide_ir_msg) 1381 originator_str = "IR"; 1382 else 1383 originator_str = "WarpDrive"; 1384 break; 1385 } 1386 1387 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 1388 log_info, 1389 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); 1390 } 1391 1392 /** 1393 * _base_display_reply_info - handle reply descriptors depending on IOC Status 1394 * @ioc: per adapter object 1395 * @smid: system request message index 1396 * @msix_index: MSIX table index supplied by the OS 1397 * @reply: reply message frame (lower 32bit addr) 1398 */ 1399 static void 1400 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1401 u32 reply) 1402 { 1403 MPI2DefaultReply_t *mpi_reply; 1404 u16 ioc_status; 1405 u32 loginfo = 0; 1406 1407 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1408 if (unlikely(!mpi_reply)) { 1409 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 1410 __FILE__, __LINE__, __func__); 1411 return; 1412 } 1413 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 1414 1415 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 1416 (ioc->logging_level & MPT_DEBUG_REPLY)) { 1417 _base_sas_ioc_info(ioc, mpi_reply, 1418 mpt3sas_base_get_msg_frame(ioc, smid)); 1419 } 1420 1421 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 1422 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); 1423 _base_sas_log_info(ioc, loginfo); 1424 } 1425 1426 if (ioc_status || loginfo) { 1427 ioc_status &= MPI2_IOCSTATUS_MASK; 1428 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); 1429 } 1430 } 1431 1432 /** 1433 * mpt3sas_base_done - base internal command completion routine 1434 * @ioc: per adapter object 1435 * @smid: system request message index 1436 * @msix_index: MSIX table index supplied by the OS 1437 * @reply: reply message frame(lower 32bit addr) 1438 * 1439 * Return: 1440 * 1 meaning mf should be freed from _base_interrupt 1441 * 0 means the mf is freed from this function. 1442 */ 1443 u8 1444 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1445 u32 reply) 1446 { 1447 MPI2DefaultReply_t *mpi_reply; 1448 1449 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1450 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 1451 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 1452 1453 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) 1454 return 1; 1455 1456 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; 1457 if (mpi_reply) { 1458 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; 1459 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 1460 } 1461 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; 1462 1463 complete(&ioc->base_cmds.done); 1464 return 1; 1465 } 1466 1467 /** 1468 * _base_async_event - main callback handler for firmware asyn events 1469 * @ioc: per adapter object 1470 * @msix_index: MSIX table index supplied by the OS 1471 * @reply: reply message frame(lower 32bit addr) 1472 * 1473 * Return: 1474 * 1 meaning mf should be freed from _base_interrupt 1475 * 0 means the mf is freed from this function. 1476 */ 1477 static u8 1478 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 1479 { 1480 Mpi2EventNotificationReply_t *mpi_reply; 1481 Mpi2EventAckRequest_t *ack_request; 1482 u16 smid; 1483 struct _event_ack_list *delayed_event_ack; 1484 1485 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1486 if (!mpi_reply) 1487 return 1; 1488 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 1489 return 1; 1490 1491 _base_display_event_data(ioc, mpi_reply); 1492 1493 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 1494 goto out; 1495 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 1496 if (!smid) { 1497 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), 1498 GFP_ATOMIC); 1499 if (!delayed_event_ack) 1500 goto out; 1501 INIT_LIST_HEAD(&delayed_event_ack->list); 1502 delayed_event_ack->Event = mpi_reply->Event; 1503 delayed_event_ack->EventContext = mpi_reply->EventContext; 1504 list_add_tail(&delayed_event_ack->list, 1505 &ioc->delayed_event_ack_list); 1506 dewtprintk(ioc, 1507 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n", 1508 le16_to_cpu(mpi_reply->Event))); 1509 goto out; 1510 } 1511 1512 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 1513 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 1514 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 1515 ack_request->Event = mpi_reply->Event; 1516 ack_request->EventContext = mpi_reply->EventContext; 1517 ack_request->VF_ID = 0; /* TODO */ 1518 ack_request->VP_ID = 0; 1519 ioc->put_smid_default(ioc, smid); 1520 1521 out: 1522 1523 /* scsih callback handler */ 1524 mpt3sas_scsih_event_callback(ioc, msix_index, reply); 1525 1526 /* ctl callback handler */ 1527 mpt3sas_ctl_event_callback(ioc, msix_index, reply); 1528 1529 return 1; 1530 } 1531 1532 static struct scsiio_tracker * 1533 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1534 { 1535 struct scsi_cmnd *cmd; 1536 1537 if (WARN_ON(!smid) || 1538 WARN_ON(smid >= ioc->hi_priority_smid)) 1539 return NULL; 1540 1541 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1542 if (cmd) 1543 return scsi_cmd_priv(cmd); 1544 1545 return NULL; 1546 } 1547 1548 /** 1549 * _base_get_cb_idx - obtain the callback index 1550 * @ioc: per adapter object 1551 * @smid: system request message index 1552 * 1553 * Return: callback index. 1554 */ 1555 static u8 1556 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1557 { 1558 int i; 1559 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 1560 u8 cb_idx = 0xFF; 1561 1562 if (smid < ioc->hi_priority_smid) { 1563 struct scsiio_tracker *st; 1564 1565 if (smid < ctl_smid) { 1566 st = _get_st_from_smid(ioc, smid); 1567 if (st) 1568 cb_idx = st->cb_idx; 1569 } else if (smid == ctl_smid) 1570 cb_idx = ioc->ctl_cb_idx; 1571 } else if (smid < ioc->internal_smid) { 1572 i = smid - ioc->hi_priority_smid; 1573 cb_idx = ioc->hpr_lookup[i].cb_idx; 1574 } else if (smid <= ioc->hba_queue_depth) { 1575 i = smid - ioc->internal_smid; 1576 cb_idx = ioc->internal_lookup[i].cb_idx; 1577 } 1578 return cb_idx; 1579 } 1580 1581 /** 1582 * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues 1583 * when driver is flushing out the IOs. 1584 * @ioc: per adapter object 1585 * 1586 * Pause polling on the mq poll (io uring) queues when driver is flushing 1587 * out the IOs. Otherwise we may see the race condition of completing the same 1588 * IO from two paths. 1589 * 1590 * Returns nothing. 1591 */ 1592 void 1593 mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) 1594 { 1595 int iopoll_q_count = 1596 ioc->reply_queue_count - ioc->iopoll_q_start_index; 1597 int qid; 1598 1599 for (qid = 0; qid < iopoll_q_count; qid++) 1600 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); 1601 1602 /* 1603 * wait for current poll to complete. 1604 */ 1605 for (qid = 0; qid < iopoll_q_count; qid++) { 1606 while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { 1607 cpu_relax(); 1608 udelay(500); 1609 } 1610 } 1611 } 1612 1613 /** 1614 * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. 1615 * @ioc: per adapter object 1616 * 1617 * Returns nothing. 1618 */ 1619 void 1620 mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) 1621 { 1622 int iopoll_q_count = 1623 ioc->reply_queue_count - ioc->iopoll_q_start_index; 1624 int qid; 1625 1626 for (qid = 0; qid < iopoll_q_count; qid++) 1627 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); 1628 } 1629 1630 /** 1631 * mpt3sas_base_mask_interrupts - disable interrupts 1632 * @ioc: per adapter object 1633 * 1634 * Disabling ResetIRQ, Reply and Doorbell Interrupts 1635 */ 1636 void 1637 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) 1638 { 1639 u32 him_register; 1640 1641 ioc->mask_interrupts = 1; 1642 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); 1643 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 1644 writel(him_register, &ioc->chip->HostInterruptMask); 1645 ioc->base_readl(&ioc->chip->HostInterruptMask); 1646 } 1647 1648 /** 1649 * mpt3sas_base_unmask_interrupts - enable interrupts 1650 * @ioc: per adapter object 1651 * 1652 * Enabling only Reply Interrupts 1653 */ 1654 void 1655 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) 1656 { 1657 u32 him_register; 1658 1659 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); 1660 him_register &= ~MPI2_HIM_RIM; 1661 writel(him_register, &ioc->chip->HostInterruptMask); 1662 ioc->mask_interrupts = 0; 1663 } 1664 1665 union reply_descriptor { 1666 u64 word; 1667 struct { 1668 u32 low; 1669 u32 high; 1670 } u; 1671 }; 1672 1673 static u32 base_mod64(u64 dividend, u32 divisor) 1674 { 1675 u32 remainder; 1676 1677 if (!divisor) 1678 pr_err("mpt3sas: DIVISOR is zero, in div fn\n"); 1679 remainder = do_div(dividend, divisor); 1680 return remainder; 1681 } 1682 1683 /** 1684 * _base_process_reply_queue - Process reply descriptors from reply 1685 * descriptor post queue. 1686 * @reply_q: per IRQ's reply queue object. 1687 * 1688 * Return: number of reply descriptors processed from reply 1689 * descriptor queue. 1690 */ 1691 static int 1692 _base_process_reply_queue(struct adapter_reply_queue *reply_q) 1693 { 1694 union reply_descriptor rd; 1695 u64 completed_cmds; 1696 u8 request_descript_type; 1697 u16 smid; 1698 u8 cb_idx; 1699 u32 reply; 1700 u8 msix_index = reply_q->msix_index; 1701 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 1702 Mpi2ReplyDescriptorsUnion_t *rpf; 1703 u8 rc; 1704 1705 completed_cmds = 0; 1706 if (!atomic_add_unless(&reply_q->busy, 1, 1)) 1707 return completed_cmds; 1708 1709 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; 1710 request_descript_type = rpf->Default.ReplyFlags 1711 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1712 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 1713 atomic_dec(&reply_q->busy); 1714 return completed_cmds; 1715 } 1716 1717 cb_idx = 0xFF; 1718 do { 1719 rd.word = le64_to_cpu(rpf->Words); 1720 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 1721 goto out; 1722 reply = 0; 1723 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 1724 if (request_descript_type == 1725 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 1726 request_descript_type == 1727 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || 1728 request_descript_type == 1729 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { 1730 cb_idx = _base_get_cb_idx(ioc, smid); 1731 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1732 (likely(mpt_callbacks[cb_idx] != NULL))) { 1733 rc = mpt_callbacks[cb_idx](ioc, smid, 1734 msix_index, 0); 1735 if (rc) 1736 mpt3sas_base_free_smid(ioc, smid); 1737 } 1738 } else if (request_descript_type == 1739 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 1740 reply = le32_to_cpu( 1741 rpf->AddressReply.ReplyFrameAddress); 1742 if (reply > ioc->reply_dma_max_address || 1743 reply < ioc->reply_dma_min_address) 1744 reply = 0; 1745 if (smid) { 1746 cb_idx = _base_get_cb_idx(ioc, smid); 1747 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1748 (likely(mpt_callbacks[cb_idx] != NULL))) { 1749 rc = mpt_callbacks[cb_idx](ioc, smid, 1750 msix_index, reply); 1751 if (reply) 1752 _base_display_reply_info(ioc, 1753 smid, msix_index, reply); 1754 if (rc) 1755 mpt3sas_base_free_smid(ioc, 1756 smid); 1757 } 1758 } else { 1759 _base_async_event(ioc, msix_index, reply); 1760 } 1761 1762 /* reply free queue handling */ 1763 if (reply) { 1764 ioc->reply_free_host_index = 1765 (ioc->reply_free_host_index == 1766 (ioc->reply_free_queue_depth - 1)) ? 1767 0 : ioc->reply_free_host_index + 1; 1768 ioc->reply_free[ioc->reply_free_host_index] = 1769 cpu_to_le32(reply); 1770 if (ioc->is_mcpu_endpoint) 1771 _base_clone_reply_to_sys_mem(ioc, 1772 reply, 1773 ioc->reply_free_host_index); 1774 writel(ioc->reply_free_host_index, 1775 &ioc->chip->ReplyFreeHostIndex); 1776 } 1777 } 1778 1779 rpf->Words = cpu_to_le64(ULLONG_MAX); 1780 reply_q->reply_post_host_index = 1781 (reply_q->reply_post_host_index == 1782 (ioc->reply_post_queue_depth - 1)) ? 0 : 1783 reply_q->reply_post_host_index + 1; 1784 request_descript_type = 1785 reply_q->reply_post_free[reply_q->reply_post_host_index]. 1786 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1787 completed_cmds++; 1788 /* Update the reply post host index after continuously 1789 * processing the threshold number of Reply Descriptors. 1790 * So that FW can find enough entries to post the Reply 1791 * Descriptors in the reply descriptor post queue. 1792 */ 1793 if (completed_cmds >= ioc->thresh_hold) { 1794 if (ioc->combined_reply_queue) { 1795 writel(reply_q->reply_post_host_index | 1796 ((msix_index & 7) << 1797 MPI2_RPHI_MSIX_INDEX_SHIFT), 1798 ioc->replyPostRegisterIndex[msix_index/8]); 1799 } else { 1800 writel(reply_q->reply_post_host_index | 1801 (msix_index << 1802 MPI2_RPHI_MSIX_INDEX_SHIFT), 1803 &ioc->chip->ReplyPostHostIndex); 1804 } 1805 if (!reply_q->is_iouring_poll_q && 1806 !reply_q->irq_poll_scheduled) { 1807 reply_q->irq_poll_scheduled = true; 1808 irq_poll_sched(&reply_q->irqpoll); 1809 } 1810 atomic_dec(&reply_q->busy); 1811 return completed_cmds; 1812 } 1813 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1814 goto out; 1815 if (!reply_q->reply_post_host_index) 1816 rpf = reply_q->reply_post_free; 1817 else 1818 rpf++; 1819 } while (1); 1820 1821 out: 1822 1823 if (!completed_cmds) { 1824 atomic_dec(&reply_q->busy); 1825 return completed_cmds; 1826 } 1827 1828 if (ioc->is_warpdrive) { 1829 writel(reply_q->reply_post_host_index, 1830 ioc->reply_post_host_index[msix_index]); 1831 atomic_dec(&reply_q->busy); 1832 return completed_cmds; 1833 } 1834 1835 /* Update Reply Post Host Index. 1836 * For those HBA's which support combined reply queue feature 1837 * 1. Get the correct Supplemental Reply Post Host Index Register. 1838 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host 1839 * Index Register address bank i.e replyPostRegisterIndex[], 1840 * 2. Then update this register with new reply host index value 1841 * in ReplyPostIndex field and the MSIxIndex field with 1842 * msix_index value reduced to a value between 0 and 7, 1843 * using a modulo 8 operation. Since each Supplemental Reply Post 1844 * Host Index Register supports 8 MSI-X vectors. 1845 * 1846 * For other HBA's just update the Reply Post Host Index register with 1847 * new reply host index value in ReplyPostIndex Field and msix_index 1848 * value in MSIxIndex field. 1849 */ 1850 if (ioc->combined_reply_queue) 1851 writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1852 MPI2_RPHI_MSIX_INDEX_SHIFT), 1853 ioc->replyPostRegisterIndex[msix_index/8]); 1854 else 1855 writel(reply_q->reply_post_host_index | (msix_index << 1856 MPI2_RPHI_MSIX_INDEX_SHIFT), 1857 &ioc->chip->ReplyPostHostIndex); 1858 atomic_dec(&reply_q->busy); 1859 return completed_cmds; 1860 } 1861 1862 /** 1863 * mpt3sas_blk_mq_poll - poll the blk mq poll queue 1864 * @shost: Scsi_Host object 1865 * @queue_num: hw ctx queue number 1866 * 1867 * Return number of entries that has been processed from poll queue. 1868 */ 1869 int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 1870 { 1871 struct MPT3SAS_ADAPTER *ioc = 1872 (struct MPT3SAS_ADAPTER *)shost->hostdata; 1873 struct adapter_reply_queue *reply_q; 1874 int num_entries = 0; 1875 int qid = queue_num - ioc->iopoll_q_start_index; 1876 1877 if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || 1878 !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) 1879 return 0; 1880 1881 reply_q = ioc->io_uring_poll_queues[qid].reply_q; 1882 1883 num_entries = _base_process_reply_queue(reply_q); 1884 atomic_dec(&ioc->io_uring_poll_queues[qid].busy); 1885 1886 return num_entries; 1887 } 1888 1889 /** 1890 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 1891 * @irq: irq number (not used) 1892 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 1893 * 1894 * Return: IRQ_HANDLED if processed, else IRQ_NONE. 1895 */ 1896 static irqreturn_t 1897 _base_interrupt(int irq, void *bus_id) 1898 { 1899 struct adapter_reply_queue *reply_q = bus_id; 1900 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 1901 1902 if (ioc->mask_interrupts) 1903 return IRQ_NONE; 1904 if (reply_q->irq_poll_scheduled) 1905 return IRQ_HANDLED; 1906 return ((_base_process_reply_queue(reply_q) > 0) ? 1907 IRQ_HANDLED : IRQ_NONE); 1908 } 1909 1910 /** 1911 * _base_irqpoll - IRQ poll callback handler 1912 * @irqpoll: irq_poll object 1913 * @budget: irq poll weight 1914 * 1915 * Return: number of reply descriptors processed 1916 */ 1917 static int 1918 _base_irqpoll(struct irq_poll *irqpoll, int budget) 1919 { 1920 struct adapter_reply_queue *reply_q; 1921 int num_entries = 0; 1922 1923 reply_q = container_of(irqpoll, struct adapter_reply_queue, 1924 irqpoll); 1925 if (reply_q->irq_line_enable) { 1926 disable_irq_nosync(reply_q->os_irq); 1927 reply_q->irq_line_enable = false; 1928 } 1929 num_entries = _base_process_reply_queue(reply_q); 1930 if (num_entries < budget) { 1931 irq_poll_complete(irqpoll); 1932 reply_q->irq_poll_scheduled = false; 1933 reply_q->irq_line_enable = true; 1934 enable_irq(reply_q->os_irq); 1935 /* 1936 * Go for one more round of processing the 1937 * reply descriptor post queue in case the HBA 1938 * Firmware has posted some reply descriptors 1939 * while reenabling the IRQ. 1940 */ 1941 _base_process_reply_queue(reply_q); 1942 } 1943 1944 return num_entries; 1945 } 1946 1947 /** 1948 * _base_init_irqpolls - initliaze IRQ polls 1949 * @ioc: per adapter object 1950 * 1951 * Return: nothing 1952 */ 1953 static void 1954 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) 1955 { 1956 struct adapter_reply_queue *reply_q, *next; 1957 1958 if (list_empty(&ioc->reply_queue_list)) 1959 return; 1960 1961 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 1962 if (reply_q->is_iouring_poll_q) 1963 continue; 1964 irq_poll_init(&reply_q->irqpoll, 1965 ioc->hba_queue_depth/4, _base_irqpoll); 1966 reply_q->irq_poll_scheduled = false; 1967 reply_q->irq_line_enable = true; 1968 reply_q->os_irq = pci_irq_vector(ioc->pdev, 1969 reply_q->msix_index); 1970 } 1971 } 1972 1973 /** 1974 * _base_is_controller_msix_enabled - is controller support muli-reply queues 1975 * @ioc: per adapter object 1976 * 1977 * Return: Whether or not MSI/X is enabled. 1978 */ 1979 static inline int 1980 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) 1981 { 1982 return (ioc->facts.IOCCapabilities & 1983 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; 1984 } 1985 1986 /** 1987 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts 1988 * @ioc: per adapter object 1989 * @poll: poll over reply descriptor pools incase interrupt for 1990 * timed-out SCSI command got delayed 1991 * Context: non-ISR context 1992 * 1993 * Called when a Task Management request has completed. 1994 */ 1995 void 1996 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) 1997 { 1998 struct adapter_reply_queue *reply_q; 1999 2000 /* If MSIX capability is turned off 2001 * then multi-queues are not enabled 2002 */ 2003 if (!_base_is_controller_msix_enabled(ioc)) 2004 return; 2005 2006 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 2007 if (ioc->shost_recovery || ioc->remove_host || 2008 ioc->pci_error_recovery) 2009 return; 2010 /* TMs are on msix_index == 0 */ 2011 if (reply_q->msix_index == 0) 2012 continue; 2013 2014 if (reply_q->is_iouring_poll_q) { 2015 _base_process_reply_queue(reply_q); 2016 continue; 2017 } 2018 2019 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); 2020 if (reply_q->irq_poll_scheduled) { 2021 /* Calling irq_poll_disable will wait for any pending 2022 * callbacks to have completed. 2023 */ 2024 irq_poll_disable(&reply_q->irqpoll); 2025 irq_poll_enable(&reply_q->irqpoll); 2026 /* check how the scheduled poll has ended, 2027 * clean up only if necessary 2028 */ 2029 if (reply_q->irq_poll_scheduled) { 2030 reply_q->irq_poll_scheduled = false; 2031 reply_q->irq_line_enable = true; 2032 enable_irq(reply_q->os_irq); 2033 } 2034 } 2035 2036 if (poll) 2037 _base_process_reply_queue(reply_q); 2038 } 2039 } 2040 2041 /** 2042 * mpt3sas_base_release_callback_handler - clear interrupt callback handler 2043 * @cb_idx: callback index 2044 */ 2045 void 2046 mpt3sas_base_release_callback_handler(u8 cb_idx) 2047 { 2048 mpt_callbacks[cb_idx] = NULL; 2049 } 2050 2051 /** 2052 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler 2053 * @cb_func: callback function 2054 * 2055 * Return: Index of @cb_func. 2056 */ 2057 u8 2058 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) 2059 { 2060 u8 cb_idx; 2061 2062 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 2063 if (mpt_callbacks[cb_idx] == NULL) 2064 break; 2065 2066 mpt_callbacks[cb_idx] = cb_func; 2067 return cb_idx; 2068 } 2069 2070 /** 2071 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler 2072 */ 2073 void 2074 mpt3sas_base_initialize_callback_handler(void) 2075 { 2076 u8 cb_idx; 2077 2078 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 2079 mpt3sas_base_release_callback_handler(cb_idx); 2080 } 2081 2082 2083 /** 2084 * _base_build_zero_len_sge - build zero length sg entry 2085 * @ioc: per adapter object 2086 * @paddr: virtual address for SGE 2087 * 2088 * Create a zero length scatter gather entry to insure the IOCs hardware has 2089 * something to use if the target device goes brain dead and tries 2090 * to send data even when none is asked for. 2091 */ 2092 static void 2093 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) 2094 { 2095 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 2096 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 2097 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 2098 MPI2_SGE_FLAGS_SHIFT); 2099 ioc->base_add_sg_single(paddr, flags_length, -1); 2100 } 2101 2102 /** 2103 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 2104 * @paddr: virtual address for SGE 2105 * @flags_length: SGE flags and data transfer length 2106 * @dma_addr: Physical address 2107 */ 2108 static void 2109 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 2110 { 2111 Mpi2SGESimple32_t *sgel = paddr; 2112 2113 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 2114 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 2115 sgel->FlagsLength = cpu_to_le32(flags_length); 2116 sgel->Address = cpu_to_le32(dma_addr); 2117 } 2118 2119 2120 /** 2121 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 2122 * @paddr: virtual address for SGE 2123 * @flags_length: SGE flags and data transfer length 2124 * @dma_addr: Physical address 2125 */ 2126 static void 2127 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 2128 { 2129 Mpi2SGESimple64_t *sgel = paddr; 2130 2131 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 2132 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 2133 sgel->FlagsLength = cpu_to_le32(flags_length); 2134 sgel->Address = cpu_to_le64(dma_addr); 2135 } 2136 2137 /** 2138 * _base_get_chain_buffer_tracker - obtain chain tracker 2139 * @ioc: per adapter object 2140 * @scmd: SCSI commands of the IO request 2141 * 2142 * Return: chain tracker from chain_lookup table using key as 2143 * smid and smid's chain_offset. 2144 */ 2145 static struct chain_tracker * 2146 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, 2147 struct scsi_cmnd *scmd) 2148 { 2149 struct chain_tracker *chain_req; 2150 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 2151 u16 smid = st->smid; 2152 u8 chain_offset = 2153 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); 2154 2155 if (chain_offset == ioc->chains_needed_per_io) 2156 return NULL; 2157 2158 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; 2159 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); 2160 return chain_req; 2161 } 2162 2163 2164 /** 2165 * _base_build_sg - build generic sg 2166 * @ioc: per adapter object 2167 * @psge: virtual address for SGE 2168 * @data_out_dma: physical address for WRITES 2169 * @data_out_sz: data xfer size for WRITES 2170 * @data_in_dma: physical address for READS 2171 * @data_in_sz: data xfer size for READS 2172 */ 2173 static void 2174 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, 2175 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2176 size_t data_in_sz) 2177 { 2178 u32 sgl_flags; 2179 2180 if (!data_out_sz && !data_in_sz) { 2181 _base_build_zero_len_sge(ioc, psge); 2182 return; 2183 } 2184 2185 if (data_out_sz && data_in_sz) { 2186 /* WRITE sgel first */ 2187 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2188 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2189 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2190 ioc->base_add_sg_single(psge, sgl_flags | 2191 data_out_sz, data_out_dma); 2192 2193 /* incr sgel */ 2194 psge += ioc->sge_size; 2195 2196 /* READ sgel last */ 2197 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2198 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2199 MPI2_SGE_FLAGS_END_OF_LIST); 2200 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2201 ioc->base_add_sg_single(psge, sgl_flags | 2202 data_in_sz, data_in_dma); 2203 } else if (data_out_sz) /* WRITE */ { 2204 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2205 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2206 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); 2207 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2208 ioc->base_add_sg_single(psge, sgl_flags | 2209 data_out_sz, data_out_dma); 2210 } else if (data_in_sz) /* READ */ { 2211 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2212 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2213 MPI2_SGE_FLAGS_END_OF_LIST); 2214 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2215 ioc->base_add_sg_single(psge, sgl_flags | 2216 data_in_sz, data_in_dma); 2217 } 2218 } 2219 2220 /* IEEE format sgls */ 2221 2222 /** 2223 * _base_build_nvme_prp - This function is called for NVMe end devices to build 2224 * a native SGL (NVMe PRP). 2225 * @ioc: per adapter object 2226 * @smid: system request message index for getting asscociated SGL 2227 * @nvme_encap_request: the NVMe request msg frame pointer 2228 * @data_out_dma: physical address for WRITES 2229 * @data_out_sz: data xfer size for WRITES 2230 * @data_in_dma: physical address for READS 2231 * @data_in_sz: data xfer size for READS 2232 * 2233 * The native SGL is built starting in the first PRP 2234 * entry of the NVMe message (PRP1). If the data buffer is small enough to be 2235 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is 2236 * used to describe a larger data buffer. If the data buffer is too large to 2237 * describe using the two PRP entriess inside the NVMe message, then PRP1 2238 * describes the first data memory segment, and PRP2 contains a pointer to a PRP 2239 * list located elsewhere in memory to describe the remaining data memory 2240 * segments. The PRP list will be contiguous. 2241 * 2242 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2243 * consists of a list of PRP entries to describe a number of noncontigous 2244 * physical memory segments as a single memory buffer, just as a SGL does. Note 2245 * however, that this function is only used by the IOCTL call, so the memory 2246 * given will be guaranteed to be contiguous. There is no need to translate 2247 * non-contiguous SGL into a PRP in this case. All PRPs will describe 2248 * contiguous space that is one page size each. 2249 * 2250 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2251 * a PRP list pointer or a PRP element, depending upon the command. PRP2 2252 * contains the second PRP element if the memory being described fits within 2 2253 * PRP entries, or a PRP list pointer if the PRP spans more than two entries. 2254 * 2255 * A PRP list pointer contains the address of a PRP list, structured as a linear 2256 * array of PRP entries. Each PRP entry in this list describes a segment of 2257 * physical memory. 2258 * 2259 * Each 64-bit PRP entry comprises an address and an offset field. The address 2260 * always points at the beginning of a 4KB physical memory page, and the offset 2261 * describes where within that 4KB page the memory segment begins. Only the 2262 * first element in a PRP list may contain a non-zero offset, implying that all 2263 * memory segments following the first begin at the start of a 4KB page. 2264 * 2265 * Each PRP element normally describes 4KB of physical memory, with exceptions 2266 * for the first and last elements in the list. If the memory being described 2267 * by the list begins at a non-zero offset within the first 4KB page, then the 2268 * first PRP element will contain a non-zero offset indicating where the region 2269 * begins within the 4KB page. The last memory segment may end before the end 2270 * of the 4KB segment, depending upon the overall size of the memory being 2271 * described by the PRP list. 2272 * 2273 * Since PRP entries lack any indication of size, the overall data buffer length 2274 * is used to determine where the end of the data memory buffer is located, and 2275 * how many PRP entries are required to describe it. 2276 */ 2277 static void 2278 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, 2279 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, 2280 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2281 size_t data_in_sz) 2282 { 2283 int prp_size = NVME_PRP_SIZE; 2284 __le64 *prp_entry, *prp1_entry, *prp2_entry; 2285 __le64 *prp_page; 2286 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 2287 u32 offset, entry_len; 2288 u32 page_mask_result, page_mask; 2289 size_t length; 2290 struct mpt3sas_nvme_cmd *nvme_cmd = 2291 (void *)nvme_encap_request->NVMe_Command; 2292 2293 /* 2294 * Not all commands require a data transfer. If no data, just return 2295 * without constructing any PRP. 2296 */ 2297 if (!data_in_sz && !data_out_sz) 2298 return; 2299 prp1_entry = &nvme_cmd->prp1; 2300 prp2_entry = &nvme_cmd->prp2; 2301 prp_entry = prp1_entry; 2302 /* 2303 * For the PRP entries, use the specially allocated buffer of 2304 * contiguous memory. 2305 */ 2306 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); 2307 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 2308 2309 /* 2310 * Check if we are within 1 entry of a page boundary we don't 2311 * want our first entry to be a PRP List entry. 2312 */ 2313 page_mask = ioc->page_size - 1; 2314 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 2315 if (!page_mask_result) { 2316 /* Bump up to next page boundary. */ 2317 prp_page = (__le64 *)((u8 *)prp_page + prp_size); 2318 prp_page_dma = prp_page_dma + prp_size; 2319 } 2320 2321 /* 2322 * Set PRP physical pointer, which initially points to the current PRP 2323 * DMA memory page. 2324 */ 2325 prp_entry_dma = prp_page_dma; 2326 2327 /* Get physical address and length of the data buffer. */ 2328 if (data_in_sz) { 2329 dma_addr = data_in_dma; 2330 length = data_in_sz; 2331 } else { 2332 dma_addr = data_out_dma; 2333 length = data_out_sz; 2334 } 2335 2336 /* Loop while the length is not zero. */ 2337 while (length) { 2338 /* 2339 * Check if we need to put a list pointer here if we are at 2340 * page boundary - prp_size (8 bytes). 2341 */ 2342 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 2343 if (!page_mask_result) { 2344 /* 2345 * This is the last entry in a PRP List, so we need to 2346 * put a PRP list pointer here. What this does is: 2347 * - bump the current memory pointer to the next 2348 * address, which will be the next full page. 2349 * - set the PRP Entry to point to that page. This 2350 * is now the PRP List pointer. 2351 * - bump the PRP Entry pointer the start of the 2352 * next page. Since all of this PRP memory is 2353 * contiguous, no need to get a new page - it's 2354 * just the next address. 2355 */ 2356 prp_entry_dma++; 2357 *prp_entry = cpu_to_le64(prp_entry_dma); 2358 prp_entry++; 2359 } 2360 2361 /* Need to handle if entry will be part of a page. */ 2362 offset = dma_addr & page_mask; 2363 entry_len = ioc->page_size - offset; 2364 2365 if (prp_entry == prp1_entry) { 2366 /* 2367 * Must fill in the first PRP pointer (PRP1) before 2368 * moving on. 2369 */ 2370 *prp1_entry = cpu_to_le64(dma_addr); 2371 2372 /* 2373 * Now point to the second PRP entry within the 2374 * command (PRP2). 2375 */ 2376 prp_entry = prp2_entry; 2377 } else if (prp_entry == prp2_entry) { 2378 /* 2379 * Should the PRP2 entry be a PRP List pointer or just 2380 * a regular PRP pointer? If there is more than one 2381 * more page of data, must use a PRP List pointer. 2382 */ 2383 if (length > ioc->page_size) { 2384 /* 2385 * PRP2 will contain a PRP List pointer because 2386 * more PRP's are needed with this command. The 2387 * list will start at the beginning of the 2388 * contiguous buffer. 2389 */ 2390 *prp2_entry = cpu_to_le64(prp_entry_dma); 2391 2392 /* 2393 * The next PRP Entry will be the start of the 2394 * first PRP List. 2395 */ 2396 prp_entry = prp_page; 2397 } else { 2398 /* 2399 * After this, the PRP Entries are complete. 2400 * This command uses 2 PRP's and no PRP list. 2401 */ 2402 *prp2_entry = cpu_to_le64(dma_addr); 2403 } 2404 } else { 2405 /* 2406 * Put entry in list and bump the addresses. 2407 * 2408 * After PRP1 and PRP2 are filled in, this will fill in 2409 * all remaining PRP entries in a PRP List, one per 2410 * each time through the loop. 2411 */ 2412 *prp_entry = cpu_to_le64(dma_addr); 2413 prp_entry++; 2414 prp_entry_dma++; 2415 } 2416 2417 /* 2418 * Bump the phys address of the command's data buffer by the 2419 * entry_len. 2420 */ 2421 dma_addr += entry_len; 2422 2423 /* Decrement length accounting for last partial page. */ 2424 if (entry_len > length) 2425 length = 0; 2426 else 2427 length -= entry_len; 2428 } 2429 } 2430 2431 /** 2432 * base_make_prp_nvme - Prepare PRPs (Physical Region Page) - 2433 * SGLs specific to NVMe drives only 2434 * 2435 * @ioc: per adapter object 2436 * @scmd: SCSI command from the mid-layer 2437 * @mpi_request: mpi request 2438 * @smid: msg Index 2439 * @sge_count: scatter gather element count. 2440 * 2441 * Return: true: PRPs are built 2442 * false: IEEE SGLs needs to be built 2443 */ 2444 static void 2445 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, 2446 struct scsi_cmnd *scmd, 2447 Mpi25SCSIIORequest_t *mpi_request, 2448 u16 smid, int sge_count) 2449 { 2450 int sge_len, num_prp_in_chain = 0; 2451 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; 2452 __le64 *curr_buff; 2453 dma_addr_t msg_dma, sge_addr, offset; 2454 u32 page_mask, page_mask_result; 2455 struct scatterlist *sg_scmd; 2456 u32 first_prp_len; 2457 int data_len = scsi_bufflen(scmd); 2458 u32 nvme_pg_size; 2459 2460 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); 2461 /* 2462 * Nvme has a very convoluted prp format. One prp is required 2463 * for each page or partial page. Driver need to split up OS sg_list 2464 * entries if it is longer than one page or cross a page 2465 * boundary. Driver also have to insert a PRP list pointer entry as 2466 * the last entry in each physical page of the PRP list. 2467 * 2468 * NOTE: The first PRP "entry" is actually placed in the first 2469 * SGL entry in the main message as IEEE 64 format. The 2nd 2470 * entry in the main message is the chain element, and the rest 2471 * of the PRP entries are built in the contiguous pcie buffer. 2472 */ 2473 page_mask = nvme_pg_size - 1; 2474 2475 /* 2476 * Native SGL is needed. 2477 * Put a chain element in main message frame that points to the first 2478 * chain buffer. 2479 * 2480 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2481 * a native SGL. 2482 */ 2483 2484 /* Set main message chain element pointer */ 2485 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 2486 /* 2487 * For NVMe the chain element needs to be the 2nd SG entry in the main 2488 * message. 2489 */ 2490 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2491 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2492 2493 /* 2494 * For the PRP entries, use the specially allocated buffer of 2495 * contiguous memory. Normal chain buffers can't be used 2496 * because each chain buffer would need to be the size of an OS 2497 * page (4k). 2498 */ 2499 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); 2500 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); 2501 2502 main_chain_element->Address = cpu_to_le64(msg_dma); 2503 main_chain_element->NextChainOffset = 0; 2504 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2505 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2506 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2507 2508 /* Build first prp, sge need not to be page aligned*/ 2509 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; 2510 sg_scmd = scsi_sglist(scmd); 2511 sge_addr = sg_dma_address(sg_scmd); 2512 sge_len = sg_dma_len(sg_scmd); 2513 2514 offset = sge_addr & page_mask; 2515 first_prp_len = nvme_pg_size - offset; 2516 2517 ptr_first_sgl->Address = cpu_to_le64(sge_addr); 2518 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); 2519 2520 data_len -= first_prp_len; 2521 2522 if (sge_len > first_prp_len) { 2523 sge_addr += first_prp_len; 2524 sge_len -= first_prp_len; 2525 } else if (data_len && (sge_len == first_prp_len)) { 2526 sg_scmd = sg_next(sg_scmd); 2527 sge_addr = sg_dma_address(sg_scmd); 2528 sge_len = sg_dma_len(sg_scmd); 2529 } 2530 2531 for (;;) { 2532 offset = sge_addr & page_mask; 2533 2534 /* Put PRP pointer due to page boundary*/ 2535 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; 2536 if (unlikely(!page_mask_result)) { 2537 scmd_printk(KERN_NOTICE, 2538 scmd, "page boundary curr_buff: 0x%p\n", 2539 curr_buff); 2540 msg_dma += 8; 2541 *curr_buff = cpu_to_le64(msg_dma); 2542 curr_buff++; 2543 num_prp_in_chain++; 2544 } 2545 2546 *curr_buff = cpu_to_le64(sge_addr); 2547 curr_buff++; 2548 msg_dma += 8; 2549 num_prp_in_chain++; 2550 2551 sge_addr += nvme_pg_size; 2552 sge_len -= nvme_pg_size; 2553 data_len -= nvme_pg_size; 2554 2555 if (data_len <= 0) 2556 break; 2557 2558 if (sge_len > 0) 2559 continue; 2560 2561 sg_scmd = sg_next(sg_scmd); 2562 sge_addr = sg_dma_address(sg_scmd); 2563 sge_len = sg_dma_len(sg_scmd); 2564 } 2565 2566 main_chain_element->Length = 2567 cpu_to_le32(num_prp_in_chain * sizeof(u64)); 2568 return; 2569 } 2570 2571 static bool 2572 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, 2573 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) 2574 { 2575 u32 data_length = 0; 2576 bool build_prp = true; 2577 2578 data_length = scsi_bufflen(scmd); 2579 if (pcie_device && 2580 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { 2581 build_prp = false; 2582 return build_prp; 2583 } 2584 2585 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 2586 * we built IEEE SGL 2587 */ 2588 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) 2589 build_prp = false; 2590 2591 return build_prp; 2592 } 2593 2594 /** 2595 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to 2596 * determine if the driver needs to build a native SGL. If so, that native 2597 * SGL is built in the special contiguous buffers allocated especially for 2598 * PCIe SGL creation. If the driver will not build a native SGL, return 2599 * TRUE and a normal IEEE SGL will be built. Currently this routine 2600 * supports NVMe. 2601 * @ioc: per adapter object 2602 * @mpi_request: mf request pointer 2603 * @smid: system request message index 2604 * @scmd: scsi command 2605 * @pcie_device: points to the PCIe device's info 2606 * 2607 * Return: 0 if native SGL was built, 1 if no SGL was built 2608 */ 2609 static int 2610 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, 2611 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, 2612 struct _pcie_device *pcie_device) 2613 { 2614 int sges_left; 2615 2616 /* Get the SG list pointer and info. */ 2617 sges_left = scsi_dma_map(scmd); 2618 if (sges_left < 0) 2619 return 1; 2620 2621 /* Check if we need to build a native SG list. */ 2622 if (!base_is_prp_possible(ioc, pcie_device, 2623 scmd, sges_left)) { 2624 /* We built a native SG list, just return. */ 2625 goto out; 2626 } 2627 2628 /* 2629 * Build native NVMe PRP. 2630 */ 2631 base_make_prp_nvme(ioc, scmd, mpi_request, 2632 smid, sges_left); 2633 2634 return 0; 2635 out: 2636 scsi_dma_unmap(scmd); 2637 return 1; 2638 } 2639 2640 /** 2641 * _base_add_sg_single_ieee - add sg element for IEEE format 2642 * @paddr: virtual address for SGE 2643 * @flags: SGE flags 2644 * @chain_offset: number of 128 byte elements from start of segment 2645 * @length: data transfer length 2646 * @dma_addr: Physical address 2647 */ 2648 static void 2649 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, 2650 dma_addr_t dma_addr) 2651 { 2652 Mpi25IeeeSgeChain64_t *sgel = paddr; 2653 2654 sgel->Flags = flags; 2655 sgel->NextChainOffset = chain_offset; 2656 sgel->Length = cpu_to_le32(length); 2657 sgel->Address = cpu_to_le64(dma_addr); 2658 } 2659 2660 /** 2661 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format 2662 * @ioc: per adapter object 2663 * @paddr: virtual address for SGE 2664 * 2665 * Create a zero length scatter gather entry to insure the IOCs hardware has 2666 * something to use if the target device goes brain dead and tries 2667 * to send data even when none is asked for. 2668 */ 2669 static void 2670 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) 2671 { 2672 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2673 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2674 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 2675 2676 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 2677 } 2678 2679 static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) 2680 { 2681 /* 2682 * Some firmware versions byte-swap the REPORT ZONES command reply from 2683 * ATA-ZAC devices by directly accessing in the host buffer. This does 2684 * not respect the default command DMA direction and causes IOMMU page 2685 * faults on some architectures with an IOMMU enforcing write mappings 2686 * (e.g. AMD hosts). Avoid such issue by making the report zones buffer 2687 * mapping bi-directional. 2688 */ 2689 if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) 2690 cmd->sc_data_direction = DMA_BIDIRECTIONAL; 2691 2692 return scsi_dma_map(cmd); 2693 } 2694 2695 /** 2696 * _base_build_sg_scmd - main sg creation routine 2697 * pcie_device is unused here! 2698 * @ioc: per adapter object 2699 * @scmd: scsi command 2700 * @smid: system request message index 2701 * @unused: unused pcie_device pointer 2702 * Context: none. 2703 * 2704 * The main routine that builds scatter gather table from a given 2705 * scsi request sent via the .queuecommand main handler. 2706 * 2707 * Return: 0 success, anything else error 2708 */ 2709 static int 2710 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, 2711 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) 2712 { 2713 Mpi2SCSIIORequest_t *mpi_request; 2714 dma_addr_t chain_dma; 2715 struct scatterlist *sg_scmd; 2716 void *sg_local, *chain; 2717 u32 chain_offset; 2718 u32 chain_length; 2719 u32 chain_flags; 2720 int sges_left; 2721 u32 sges_in_segment; 2722 u32 sgl_flags; 2723 u32 sgl_flags_last_element; 2724 u32 sgl_flags_end_buffer; 2725 struct chain_tracker *chain_req; 2726 2727 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2728 2729 /* init scatter gather flags */ 2730 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; 2731 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2732 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 2733 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) 2734 << MPI2_SGE_FLAGS_SHIFT; 2735 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | 2736 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) 2737 << MPI2_SGE_FLAGS_SHIFT; 2738 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2739 2740 sg_scmd = scsi_sglist(scmd); 2741 sges_left = _base_scsi_dma_map(scmd); 2742 if (sges_left < 0) 2743 return -ENOMEM; 2744 2745 sg_local = &mpi_request->SGL; 2746 sges_in_segment = ioc->max_sges_in_main_message; 2747 if (sges_left <= sges_in_segment) 2748 goto fill_in_last_segment; 2749 2750 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + 2751 (sges_in_segment * ioc->sge_size))/4; 2752 2753 /* fill in main message segment when there is a chain following */ 2754 while (sges_in_segment) { 2755 if (sges_in_segment == 1) 2756 ioc->base_add_sg_single(sg_local, 2757 sgl_flags_last_element | sg_dma_len(sg_scmd), 2758 sg_dma_address(sg_scmd)); 2759 else 2760 ioc->base_add_sg_single(sg_local, sgl_flags | 2761 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2762 sg_scmd = sg_next(sg_scmd); 2763 sg_local += ioc->sge_size; 2764 sges_left--; 2765 sges_in_segment--; 2766 } 2767 2768 /* initializing the chain flags and pointers */ 2769 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 2770 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2771 if (!chain_req) 2772 return -1; 2773 chain = chain_req->chain_buffer; 2774 chain_dma = chain_req->chain_buffer_dma; 2775 do { 2776 sges_in_segment = (sges_left <= 2777 ioc->max_sges_in_chain_message) ? sges_left : 2778 ioc->max_sges_in_chain_message; 2779 chain_offset = (sges_left == sges_in_segment) ? 2780 0 : (sges_in_segment * ioc->sge_size)/4; 2781 chain_length = sges_in_segment * ioc->sge_size; 2782 if (chain_offset) { 2783 chain_offset = chain_offset << 2784 MPI2_SGE_CHAIN_OFFSET_SHIFT; 2785 chain_length += ioc->sge_size; 2786 } 2787 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | 2788 chain_length, chain_dma); 2789 sg_local = chain; 2790 if (!chain_offset) 2791 goto fill_in_last_segment; 2792 2793 /* fill in chain segments */ 2794 while (sges_in_segment) { 2795 if (sges_in_segment == 1) 2796 ioc->base_add_sg_single(sg_local, 2797 sgl_flags_last_element | 2798 sg_dma_len(sg_scmd), 2799 sg_dma_address(sg_scmd)); 2800 else 2801 ioc->base_add_sg_single(sg_local, sgl_flags | 2802 sg_dma_len(sg_scmd), 2803 sg_dma_address(sg_scmd)); 2804 sg_scmd = sg_next(sg_scmd); 2805 sg_local += ioc->sge_size; 2806 sges_left--; 2807 sges_in_segment--; 2808 } 2809 2810 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2811 if (!chain_req) 2812 return -1; 2813 chain = chain_req->chain_buffer; 2814 chain_dma = chain_req->chain_buffer_dma; 2815 } while (1); 2816 2817 2818 fill_in_last_segment: 2819 2820 /* fill the last segment */ 2821 while (sges_left) { 2822 if (sges_left == 1) 2823 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | 2824 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2825 else 2826 ioc->base_add_sg_single(sg_local, sgl_flags | 2827 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2828 sg_scmd = sg_next(sg_scmd); 2829 sg_local += ioc->sge_size; 2830 sges_left--; 2831 } 2832 2833 return 0; 2834 } 2835 2836 /** 2837 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format 2838 * @ioc: per adapter object 2839 * @scmd: scsi command 2840 * @smid: system request message index 2841 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be 2842 * constructed on need. 2843 * Context: none. 2844 * 2845 * The main routine that builds scatter gather table from a given 2846 * scsi request sent via the .queuecommand main handler. 2847 * 2848 * Return: 0 success, anything else error 2849 */ 2850 static int 2851 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 2852 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) 2853 { 2854 Mpi25SCSIIORequest_t *mpi_request; 2855 dma_addr_t chain_dma; 2856 struct scatterlist *sg_scmd; 2857 void *sg_local, *chain; 2858 u32 chain_offset; 2859 u32 chain_length; 2860 int sges_left; 2861 u32 sges_in_segment; 2862 u8 simple_sgl_flags; 2863 u8 simple_sgl_flags_last; 2864 u8 chain_sgl_flags; 2865 struct chain_tracker *chain_req; 2866 2867 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2868 2869 /* init scatter gather flags */ 2870 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2871 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2872 simple_sgl_flags_last = simple_sgl_flags | 2873 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 2874 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2875 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2876 2877 /* Check if we need to build a native SG list. */ 2878 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, 2879 smid, scmd, pcie_device) == 0)) { 2880 /* We built a native SG list, just return. */ 2881 return 0; 2882 } 2883 2884 sg_scmd = scsi_sglist(scmd); 2885 sges_left = _base_scsi_dma_map(scmd); 2886 if (sges_left < 0) 2887 return -ENOMEM; 2888 2889 sg_local = &mpi_request->SGL; 2890 sges_in_segment = (ioc->request_sz - 2891 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 2892 if (sges_left <= sges_in_segment) 2893 goto fill_in_last_segment; 2894 2895 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 2896 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 2897 2898 /* fill in main message segment when there is a chain following */ 2899 while (sges_in_segment > 1) { 2900 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2901 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2902 sg_scmd = sg_next(sg_scmd); 2903 sg_local += ioc->sge_size_ieee; 2904 sges_left--; 2905 sges_in_segment--; 2906 } 2907 2908 /* initializing the pointers */ 2909 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2910 if (!chain_req) 2911 return -1; 2912 chain = chain_req->chain_buffer; 2913 chain_dma = chain_req->chain_buffer_dma; 2914 do { 2915 sges_in_segment = (sges_left <= 2916 ioc->max_sges_in_chain_message) ? sges_left : 2917 ioc->max_sges_in_chain_message; 2918 chain_offset = (sges_left == sges_in_segment) ? 2919 0 : sges_in_segment; 2920 chain_length = sges_in_segment * ioc->sge_size_ieee; 2921 if (chain_offset) 2922 chain_length += ioc->sge_size_ieee; 2923 _base_add_sg_single_ieee(sg_local, chain_sgl_flags, 2924 chain_offset, chain_length, chain_dma); 2925 2926 sg_local = chain; 2927 if (!chain_offset) 2928 goto fill_in_last_segment; 2929 2930 /* fill in chain segments */ 2931 while (sges_in_segment) { 2932 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2933 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2934 sg_scmd = sg_next(sg_scmd); 2935 sg_local += ioc->sge_size_ieee; 2936 sges_left--; 2937 sges_in_segment--; 2938 } 2939 2940 chain_req = _base_get_chain_buffer_tracker(ioc, scmd); 2941 if (!chain_req) 2942 return -1; 2943 chain = chain_req->chain_buffer; 2944 chain_dma = chain_req->chain_buffer_dma; 2945 } while (1); 2946 2947 2948 fill_in_last_segment: 2949 2950 /* fill the last segment */ 2951 while (sges_left > 0) { 2952 if (sges_left == 1) 2953 _base_add_sg_single_ieee(sg_local, 2954 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 2955 sg_dma_address(sg_scmd)); 2956 else 2957 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 2958 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2959 sg_scmd = sg_next(sg_scmd); 2960 sg_local += ioc->sge_size_ieee; 2961 sges_left--; 2962 } 2963 2964 return 0; 2965 } 2966 2967 /** 2968 * _base_build_sg_ieee - build generic sg for IEEE format 2969 * @ioc: per adapter object 2970 * @psge: virtual address for SGE 2971 * @data_out_dma: physical address for WRITES 2972 * @data_out_sz: data xfer size for WRITES 2973 * @data_in_dma: physical address for READS 2974 * @data_in_sz: data xfer size for READS 2975 */ 2976 static void 2977 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, 2978 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 2979 size_t data_in_sz) 2980 { 2981 u8 sgl_flags; 2982 2983 if (!data_out_sz && !data_in_sz) { 2984 _base_build_zero_len_sge_ieee(ioc, psge); 2985 return; 2986 } 2987 2988 if (data_out_sz && data_in_sz) { 2989 /* WRITE sgel first */ 2990 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 2991 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2992 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 2993 data_out_dma); 2994 2995 /* incr sgel */ 2996 psge += ioc->sge_size_ieee; 2997 2998 /* READ sgel last */ 2999 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 3000 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 3001 data_in_dma); 3002 } else if (data_out_sz) /* WRITE */ { 3003 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3004 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 3005 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 3006 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 3007 data_out_dma); 3008 } else if (data_in_sz) /* READ */ { 3009 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3010 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 3011 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 3012 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 3013 data_in_dma); 3014 } 3015 } 3016 3017 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 3018 3019 /** 3020 * _base_config_dma_addressing - set dma addressing 3021 * @ioc: per adapter object 3022 * @pdev: PCI device struct 3023 * 3024 * Return: 0 for success, non-zero for failure. 3025 */ 3026 static int 3027 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 3028 { 3029 struct sysinfo s; 3030 u64 coherent_dma_mask, dma_mask; 3031 3032 if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { 3033 ioc->dma_mask = 32; 3034 coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); 3035 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ 3036 } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { 3037 ioc->dma_mask = 63; 3038 coherent_dma_mask = dma_mask = DMA_BIT_MASK(63); 3039 } else { 3040 ioc->dma_mask = 64; 3041 coherent_dma_mask = dma_mask = DMA_BIT_MASK(64); 3042 } 3043 3044 if (ioc->use_32bit_dma) 3045 coherent_dma_mask = DMA_BIT_MASK(32); 3046 3047 if (dma_set_mask(&pdev->dev, dma_mask) || 3048 dma_set_coherent_mask(&pdev->dev, coherent_dma_mask)) 3049 return -ENODEV; 3050 3051 if (ioc->dma_mask > 32) { 3052 ioc->base_add_sg_single = &_base_add_sg_single_64; 3053 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 3054 } else { 3055 ioc->base_add_sg_single = &_base_add_sg_single_32; 3056 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 3057 } 3058 3059 si_meminfo(&s); 3060 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 3061 ioc->dma_mask, convert_to_kb(s.totalram)); 3062 3063 return 0; 3064 } 3065 3066 /** 3067 * _base_check_enable_msix - checks MSIX capabable. 3068 * @ioc: per adapter object 3069 * 3070 * Check to see if card is capable of MSIX, and set number 3071 * of available msix vectors 3072 */ 3073 static int 3074 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) 3075 { 3076 int base; 3077 u16 message_control; 3078 3079 /* Check whether controller SAS2008 B0 controller, 3080 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX 3081 */ 3082 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && 3083 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { 3084 return -EINVAL; 3085 } 3086 3087 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 3088 if (!base) { 3089 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n")); 3090 return -EINVAL; 3091 } 3092 3093 /* get msix vector count */ 3094 /* NUMA_IO not supported for older controllers */ 3095 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || 3096 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || 3097 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || 3098 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || 3099 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || 3100 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || 3101 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) 3102 ioc->msix_vector_count = 1; 3103 else { 3104 pci_read_config_word(ioc->pdev, base + 2, &message_control); 3105 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 3106 } 3107 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n", 3108 ioc->msix_vector_count)); 3109 return 0; 3110 } 3111 3112 /** 3113 * mpt3sas_base_free_irq - free irq 3114 * @ioc: per adapter object 3115 * 3116 * Freeing respective reply_queue from the list. 3117 */ 3118 void 3119 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) 3120 { 3121 unsigned int irq; 3122 struct adapter_reply_queue *reply_q, *next; 3123 3124 if (list_empty(&ioc->reply_queue_list)) 3125 return; 3126 3127 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 3128 list_del(&reply_q->list); 3129 if (reply_q->is_iouring_poll_q) { 3130 kfree(reply_q); 3131 continue; 3132 } 3133 3134 if (ioc->smp_affinity_enable) { 3135 irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); 3136 irq_update_affinity_hint(irq, NULL); 3137 } 3138 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), 3139 reply_q); 3140 kfree(reply_q); 3141 } 3142 } 3143 3144 /** 3145 * _base_request_irq - request irq 3146 * @ioc: per adapter object 3147 * @index: msix index into vector table 3148 * 3149 * Inserting respective reply_queue into the list. 3150 */ 3151 static int 3152 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) 3153 { 3154 struct pci_dev *pdev = ioc->pdev; 3155 struct adapter_reply_queue *reply_q; 3156 int r, qid; 3157 3158 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 3159 if (!reply_q) { 3160 ioc_err(ioc, "unable to allocate memory %zu!\n", 3161 sizeof(struct adapter_reply_queue)); 3162 return -ENOMEM; 3163 } 3164 reply_q->ioc = ioc; 3165 reply_q->msix_index = index; 3166 3167 atomic_set(&reply_q->busy, 0); 3168 3169 if (index >= ioc->iopoll_q_start_index) { 3170 qid = index - ioc->iopoll_q_start_index; 3171 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", 3172 ioc->driver_name, ioc->id, qid); 3173 reply_q->is_iouring_poll_q = 1; 3174 ioc->io_uring_poll_queues[qid].reply_q = reply_q; 3175 goto out; 3176 } 3177 3178 3179 if (ioc->msix_enable) 3180 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 3181 ioc->driver_name, ioc->id, index); 3182 else 3183 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 3184 ioc->driver_name, ioc->id); 3185 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 3186 IRQF_SHARED, reply_q->name, reply_q); 3187 if (r) { 3188 pr_err("%s: unable to allocate interrupt %d!\n", 3189 reply_q->name, pci_irq_vector(pdev, index)); 3190 kfree(reply_q); 3191 return -EBUSY; 3192 } 3193 out: 3194 INIT_LIST_HEAD(&reply_q->list); 3195 list_add_tail(&reply_q->list, &ioc->reply_queue_list); 3196 return 0; 3197 } 3198 3199 /** 3200 * _base_assign_reply_queues - assigning msix index for each cpu 3201 * @ioc: per adapter object 3202 * 3203 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity 3204 */ 3205 static void 3206 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) 3207 { 3208 unsigned int cpu, nr_cpus, nr_msix, index = 0, irq; 3209 struct adapter_reply_queue *reply_q; 3210 int iopoll_q_count = ioc->reply_queue_count - 3211 ioc->iopoll_q_start_index; 3212 const struct cpumask *mask; 3213 3214 if (!_base_is_controller_msix_enabled(ioc)) 3215 return; 3216 3217 if (ioc->msix_load_balance) 3218 return; 3219 3220 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); 3221 3222 nr_cpus = num_online_cpus(); 3223 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, 3224 ioc->facts.MaxMSIxVectors); 3225 if (!nr_msix) 3226 return; 3227 3228 if (ioc->smp_affinity_enable) { 3229 3230 /* 3231 * set irq affinity to local numa node for those irqs 3232 * corresponding to high iops queues. 3233 */ 3234 if (ioc->high_iops_queues) { 3235 mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev)); 3236 for (index = 0; index < ioc->high_iops_queues; 3237 index++) { 3238 irq = pci_irq_vector(ioc->pdev, index); 3239 irq_set_affinity_and_hint(irq, mask); 3240 } 3241 } 3242 3243 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 3244 const cpumask_t *mask; 3245 3246 if (reply_q->msix_index < ioc->high_iops_queues || 3247 reply_q->msix_index >= ioc->iopoll_q_start_index) 3248 continue; 3249 3250 mask = pci_irq_get_affinity(ioc->pdev, 3251 reply_q->msix_index); 3252 if (!mask) { 3253 ioc_warn(ioc, "no affinity for msi %x\n", 3254 reply_q->msix_index); 3255 goto fall_back; 3256 } 3257 3258 for_each_cpu_and(cpu, mask, cpu_online_mask) { 3259 if (cpu >= ioc->cpu_msix_table_sz) 3260 break; 3261 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 3262 } 3263 } 3264 return; 3265 } 3266 3267 fall_back: 3268 cpu = cpumask_first(cpu_online_mask); 3269 nr_msix -= (ioc->high_iops_queues - iopoll_q_count); 3270 index = 0; 3271 3272 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 3273 unsigned int i, group = nr_cpus / nr_msix; 3274 3275 if (reply_q->msix_index < ioc->high_iops_queues || 3276 reply_q->msix_index >= ioc->iopoll_q_start_index) 3277 continue; 3278 3279 if (cpu >= nr_cpus) 3280 break; 3281 3282 if (index < nr_cpus % nr_msix) 3283 group++; 3284 3285 for (i = 0 ; i < group ; i++) { 3286 ioc->cpu_msix_table[cpu] = reply_q->msix_index; 3287 cpu = cpumask_next(cpu, cpu_online_mask); 3288 } 3289 index++; 3290 } 3291 } 3292 3293 /** 3294 * _base_check_and_enable_high_iops_queues - enable high iops mode 3295 * @ioc: per adapter object 3296 * @hba_msix_vector_count: msix vectors supported by HBA 3297 * 3298 * Enable high iops queues only if 3299 * - HBA is a SEA/AERO controller and 3300 * - MSI-Xs vector supported by the HBA is 128 and 3301 * - total CPU count in the system >=16 and 3302 * - loaded driver with default max_msix_vectors module parameter and 3303 * - system booted in non kdump mode 3304 * 3305 * Return: nothing. 3306 */ 3307 static void 3308 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, 3309 int hba_msix_vector_count) 3310 { 3311 u16 lnksta, speed; 3312 3313 /* 3314 * Disable high iops queues if io uring poll queues are enabled. 3315 */ 3316 if (perf_mode == MPT_PERF_MODE_IOPS || 3317 perf_mode == MPT_PERF_MODE_LATENCY || 3318 ioc->io_uring_poll_queues) { 3319 ioc->high_iops_queues = 0; 3320 return; 3321 } 3322 3323 if (perf_mode == MPT_PERF_MODE_DEFAULT) { 3324 3325 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); 3326 speed = lnksta & PCI_EXP_LNKSTA_CLS; 3327 3328 if (speed < 0x4) { 3329 ioc->high_iops_queues = 0; 3330 return; 3331 } 3332 } 3333 3334 if (!reset_devices && ioc->is_aero_ioc && 3335 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && 3336 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && 3337 max_msix_vectors == -1) 3338 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; 3339 else 3340 ioc->high_iops_queues = 0; 3341 } 3342 3343 /** 3344 * mpt3sas_base_disable_msix - disables msix 3345 * @ioc: per adapter object 3346 * 3347 */ 3348 void 3349 mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) 3350 { 3351 if (!ioc->msix_enable) 3352 return; 3353 pci_free_irq_vectors(ioc->pdev); 3354 ioc->msix_enable = 0; 3355 kfree(ioc->io_uring_poll_queues); 3356 } 3357 3358 /** 3359 * _base_alloc_irq_vectors - allocate msix vectors 3360 * @ioc: per adapter object 3361 * 3362 */ 3363 static int 3364 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) 3365 { 3366 int i, irq_flags = PCI_IRQ_MSIX; 3367 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; 3368 struct irq_affinity *descp = &desc; 3369 /* 3370 * Don't allocate msix vectors for poll_queues. 3371 * msix_vectors is always within a range of FW supported reply queue. 3372 */ 3373 int nr_msix_vectors = ioc->iopoll_q_start_index; 3374 3375 3376 if (ioc->smp_affinity_enable) 3377 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 3378 else 3379 descp = NULL; 3380 3381 ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, 3382 ioc->reply_queue_count, nr_msix_vectors); 3383 3384 i = pci_alloc_irq_vectors_affinity(ioc->pdev, 3385 ioc->high_iops_queues, 3386 nr_msix_vectors, irq_flags, descp); 3387 3388 return i; 3389 } 3390 3391 /** 3392 * _base_enable_msix - enables msix, failback to io_apic 3393 * @ioc: per adapter object 3394 * 3395 */ 3396 static int 3397 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) 3398 { 3399 int r; 3400 int i, local_max_msix_vectors; 3401 u8 try_msix = 0; 3402 int iopoll_q_count = 0; 3403 3404 ioc->msix_load_balance = false; 3405 3406 if (msix_disable == -1 || msix_disable == 0) 3407 try_msix = 1; 3408 3409 if (!try_msix) 3410 goto try_ioapic; 3411 3412 if (_base_check_enable_msix(ioc) != 0) 3413 goto try_ioapic; 3414 3415 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); 3416 pr_info("\t no of cores: %d, max_msix_vectors: %d\n", 3417 ioc->cpu_count, max_msix_vectors); 3418 3419 ioc->reply_queue_count = 3420 min_t(int, ioc->cpu_count, ioc->msix_vector_count); 3421 3422 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 3423 local_max_msix_vectors = (reset_devices) ? 1 : 8; 3424 else 3425 local_max_msix_vectors = max_msix_vectors; 3426 3427 if (local_max_msix_vectors == 0) 3428 goto try_ioapic; 3429 3430 /* 3431 * Enable msix_load_balance only if combined reply queue mode is 3432 * disabled on SAS3 & above generation HBA devices. 3433 */ 3434 if (!ioc->combined_reply_queue && 3435 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 3436 ioc_info(ioc, 3437 "combined ReplyQueue is off, Enabling msix load balance\n"); 3438 ioc->msix_load_balance = true; 3439 } 3440 3441 /* 3442 * smp affinity setting is not need when msix load balance 3443 * is enabled. 3444 */ 3445 if (ioc->msix_load_balance) 3446 ioc->smp_affinity_enable = 0; 3447 3448 if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) 3449 ioc->shost->host_tagset = 0; 3450 3451 /* 3452 * Enable io uring poll queues only if host_tagset is enabled. 3453 */ 3454 if (ioc->shost->host_tagset) 3455 iopoll_q_count = poll_queues; 3456 3457 if (iopoll_q_count) { 3458 ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, 3459 sizeof(struct io_uring_poll_queue), GFP_KERNEL); 3460 if (!ioc->io_uring_poll_queues) 3461 iopoll_q_count = 0; 3462 } 3463 3464 if (ioc->is_aero_ioc) 3465 _base_check_and_enable_high_iops_queues(ioc, 3466 ioc->msix_vector_count); 3467 3468 /* 3469 * Add high iops queues count to reply queue count if high iops queues 3470 * are enabled. 3471 */ 3472 ioc->reply_queue_count = min_t(int, 3473 ioc->reply_queue_count + ioc->high_iops_queues, 3474 ioc->msix_vector_count); 3475 3476 /* 3477 * Adjust the reply queue count incase reply queue count 3478 * exceeds the user provided MSIx vectors count. 3479 */ 3480 if (local_max_msix_vectors > 0) 3481 ioc->reply_queue_count = min_t(int, local_max_msix_vectors, 3482 ioc->reply_queue_count); 3483 /* 3484 * Add io uring poll queues count to reply queues count 3485 * if io uring is enabled in driver. 3486 */ 3487 if (iopoll_q_count) { 3488 if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) 3489 iopoll_q_count = 0; 3490 ioc->reply_queue_count = min_t(int, 3491 ioc->reply_queue_count + iopoll_q_count, 3492 ioc->msix_vector_count); 3493 } 3494 3495 /* 3496 * Starting index of io uring poll queues in reply queue list. 3497 */ 3498 ioc->iopoll_q_start_index = 3499 ioc->reply_queue_count - iopoll_q_count; 3500 3501 r = _base_alloc_irq_vectors(ioc); 3502 if (r < 0) { 3503 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); 3504 goto try_ioapic; 3505 } 3506 3507 /* 3508 * Adjust the reply queue count if the allocated 3509 * MSIx vectors is less then the requested number 3510 * of MSIx vectors. 3511 */ 3512 if (r < ioc->iopoll_q_start_index) { 3513 ioc->reply_queue_count = r + iopoll_q_count; 3514 ioc->iopoll_q_start_index = 3515 ioc->reply_queue_count - iopoll_q_count; 3516 } 3517 3518 ioc->msix_enable = 1; 3519 for (i = 0; i < ioc->reply_queue_count; i++) { 3520 r = _base_request_irq(ioc, i); 3521 if (r) { 3522 mpt3sas_base_free_irq(ioc); 3523 mpt3sas_base_disable_msix(ioc); 3524 goto try_ioapic; 3525 } 3526 } 3527 3528 ioc_info(ioc, "High IOPs queues : %s\n", 3529 ioc->high_iops_queues ? "enabled" : "disabled"); 3530 3531 return 0; 3532 3533 /* failback to io_apic interrupt routing */ 3534 try_ioapic: 3535 ioc->high_iops_queues = 0; 3536 ioc_info(ioc, "High IOPs queues : disabled\n"); 3537 ioc->reply_queue_count = 1; 3538 ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; 3539 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_INTX); 3540 if (r < 0) { 3541 dfailprintk(ioc, 3542 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", 3543 r)); 3544 } else 3545 r = _base_request_irq(ioc, 0); 3546 3547 return r; 3548 } 3549 3550 /** 3551 * mpt3sas_base_unmap_resources - free controller resources 3552 * @ioc: per adapter object 3553 */ 3554 static void 3555 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 3556 { 3557 struct pci_dev *pdev = ioc->pdev; 3558 3559 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 3560 3561 mpt3sas_base_free_irq(ioc); 3562 mpt3sas_base_disable_msix(ioc); 3563 3564 kfree(ioc->replyPostRegisterIndex); 3565 ioc->replyPostRegisterIndex = NULL; 3566 3567 3568 if (ioc->chip_phys) { 3569 iounmap(ioc->chip); 3570 ioc->chip_phys = 0; 3571 } 3572 3573 if (pci_is_enabled(pdev)) { 3574 pci_release_selected_regions(ioc->pdev, ioc->bars); 3575 pci_disable_device(pdev); 3576 } 3577 } 3578 3579 static int 3580 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc); 3581 3582 /** 3583 * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state 3584 * and if it is in fault state then issue diag reset. 3585 * @ioc: per adapter object 3586 * 3587 * Return: 0 for success, non-zero for failure. 3588 */ 3589 int 3590 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) 3591 { 3592 u32 ioc_state; 3593 int rc = -EFAULT; 3594 3595 dinitprintk(ioc, pr_info("%s\n", __func__)); 3596 if (ioc->pci_error_recovery) 3597 return 0; 3598 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3599 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); 3600 3601 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3602 mpt3sas_print_fault_code(ioc, ioc_state & 3603 MPI2_DOORBELL_DATA_MASK); 3604 mpt3sas_base_mask_interrupts(ioc); 3605 rc = _base_diag_reset(ioc); 3606 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3607 MPI2_IOC_STATE_COREDUMP) { 3608 mpt3sas_print_coredump_info(ioc, ioc_state & 3609 MPI2_DOORBELL_DATA_MASK); 3610 mpt3sas_base_wait_for_coredump_completion(ioc, __func__); 3611 mpt3sas_base_mask_interrupts(ioc); 3612 rc = _base_diag_reset(ioc); 3613 } 3614 3615 return rc; 3616 } 3617 3618 /** 3619 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 3620 * @ioc: per adapter object 3621 * 3622 * Return: 0 for success, non-zero for failure. 3623 */ 3624 int 3625 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) 3626 { 3627 struct pci_dev *pdev = ioc->pdev; 3628 u32 memap_sz; 3629 u32 pio_sz; 3630 int i, r = 0, rc; 3631 u64 pio_chip = 0; 3632 phys_addr_t chip_phys = 0; 3633 struct adapter_reply_queue *reply_q; 3634 int iopoll_q_count = 0; 3635 3636 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 3637 3638 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3639 if (pci_enable_device_mem(pdev)) { 3640 ioc_warn(ioc, "pci_enable_device_mem: failed\n"); 3641 ioc->bars = 0; 3642 return -ENODEV; 3643 } 3644 3645 3646 if (pci_request_selected_regions(pdev, ioc->bars, 3647 ioc->driver_name)) { 3648 ioc_warn(ioc, "pci_request_selected_regions: failed\n"); 3649 ioc->bars = 0; 3650 r = -ENODEV; 3651 goto out_fail; 3652 } 3653 3654 pci_set_master(pdev); 3655 3656 3657 if (_base_config_dma_addressing(ioc, pdev) != 0) { 3658 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev)); 3659 r = -ENODEV; 3660 goto out_fail; 3661 } 3662 3663 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && 3664 (!memap_sz || !pio_sz); i++) { 3665 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 3666 if (pio_sz) 3667 continue; 3668 pio_chip = (u64)pci_resource_start(pdev, i); 3669 pio_sz = pci_resource_len(pdev, i); 3670 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3671 if (memap_sz) 3672 continue; 3673 ioc->chip_phys = pci_resource_start(pdev, i); 3674 chip_phys = ioc->chip_phys; 3675 memap_sz = pci_resource_len(pdev, i); 3676 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 3677 } 3678 } 3679 3680 if (ioc->chip == NULL) { 3681 ioc_err(ioc, 3682 "unable to map adapter memory! or resource not found\n"); 3683 r = -EINVAL; 3684 goto out_fail; 3685 } 3686 3687 mpt3sas_base_mask_interrupts(ioc); 3688 3689 r = _base_get_ioc_facts(ioc); 3690 if (r) { 3691 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); 3692 if (rc || (_base_get_ioc_facts(ioc))) 3693 goto out_fail; 3694 } 3695 3696 if (!ioc->rdpq_array_enable_assigned) { 3697 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 3698 ioc->rdpq_array_enable_assigned = 1; 3699 } 3700 3701 r = _base_enable_msix(ioc); 3702 if (r) 3703 goto out_fail; 3704 3705 iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; 3706 for (i = 0; i < iopoll_q_count; i++) { 3707 atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); 3708 atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); 3709 } 3710 3711 if (!ioc->is_driver_loading) 3712 _base_init_irqpolls(ioc); 3713 /* Use the Combined reply queue feature only for SAS3 C0 & higher 3714 * revision HBAs and also only when reply queue count is greater than 8 3715 */ 3716 if (ioc->combined_reply_queue) { 3717 /* Determine the Supplemental Reply Post Host Index Registers 3718 * Addresse. Supplemental Reply Post Host Index Registers 3719 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 3720 * each register is at offset bytes of 3721 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 3722 */ 3723 ioc->replyPostRegisterIndex = kcalloc( 3724 ioc->combined_reply_index_count, 3725 sizeof(resource_size_t *), GFP_KERNEL); 3726 if (!ioc->replyPostRegisterIndex) { 3727 ioc_err(ioc, 3728 "allocation for replyPostRegisterIndex failed!\n"); 3729 r = -ENOMEM; 3730 goto out_fail; 3731 } 3732 3733 for (i = 0; i < ioc->combined_reply_index_count; i++) { 3734 ioc->replyPostRegisterIndex[i] = 3735 (resource_size_t __iomem *) 3736 ((u8 __force *)&ioc->chip->Doorbell + 3737 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 3738 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 3739 } 3740 } 3741 3742 if (ioc->is_warpdrive) { 3743 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) 3744 &ioc->chip->ReplyPostHostIndex; 3745 3746 for (i = 1; i < ioc->cpu_msix_table_sz; i++) 3747 ioc->reply_post_host_index[i] = 3748 (resource_size_t __iomem *) 3749 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) 3750 * 4))); 3751 } 3752 3753 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 3754 if (reply_q->msix_index >= ioc->iopoll_q_start_index) { 3755 pr_info("%s: enabled: index: %d\n", 3756 reply_q->name, reply_q->msix_index); 3757 continue; 3758 } 3759 3760 pr_info("%s: %s enabled: IRQ %d\n", 3761 reply_q->name, 3762 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", 3763 pci_irq_vector(ioc->pdev, reply_q->msix_index)); 3764 } 3765 3766 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", 3767 &chip_phys, ioc->chip, memap_sz); 3768 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n", 3769 (unsigned long long)pio_chip, pio_sz); 3770 3771 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 3772 pci_save_state(pdev); 3773 return 0; 3774 3775 out_fail: 3776 mpt3sas_base_unmap_resources(ioc); 3777 return r; 3778 } 3779 3780 /** 3781 * mpt3sas_base_get_msg_frame - obtain request mf pointer 3782 * @ioc: per adapter object 3783 * @smid: system request message index(smid zero is invalid) 3784 * 3785 * Return: virt pointer to message frame. 3786 */ 3787 void * 3788 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3789 { 3790 return (void *)(ioc->request + (smid * ioc->request_sz)); 3791 } 3792 3793 /** 3794 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr 3795 * @ioc: per adapter object 3796 * @smid: system request message index 3797 * 3798 * Return: virt pointer to sense buffer. 3799 */ 3800 void * 3801 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3802 { 3803 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 3804 } 3805 3806 /** 3807 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr 3808 * @ioc: per adapter object 3809 * @smid: system request message index 3810 * 3811 * Return: phys pointer to the low 32bit address of the sense buffer. 3812 */ 3813 __le32 3814 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3815 { 3816 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * 3817 SCSI_SENSE_BUFFERSIZE)); 3818 } 3819 3820 /** 3821 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr 3822 * @ioc: per adapter object 3823 * @smid: system request message index 3824 * 3825 * Return: virt pointer to a PCIe SGL. 3826 */ 3827 void * 3828 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3829 { 3830 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); 3831 } 3832 3833 /** 3834 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr 3835 * @ioc: per adapter object 3836 * @smid: system request message index 3837 * 3838 * Return: phys pointer to the address of the PCIe buffer. 3839 */ 3840 dma_addr_t 3841 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 3842 { 3843 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; 3844 } 3845 3846 /** 3847 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 3848 * @ioc: per adapter object 3849 * @phys_addr: lower 32 physical addr of the reply 3850 * 3851 * Converts 32bit lower physical addr into a virt address. 3852 */ 3853 void * 3854 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) 3855 { 3856 if (!phys_addr) 3857 return NULL; 3858 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 3859 } 3860 3861 /** 3862 * _base_get_msix_index - get the msix index 3863 * @ioc: per adapter object 3864 * @scmd: scsi_cmnd object 3865 * 3866 * Return: msix index of general reply queues, 3867 * i.e. reply queue on which IO request's reply 3868 * should be posted by the HBA firmware. 3869 */ 3870 static inline u8 3871 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, 3872 struct scsi_cmnd *scmd) 3873 { 3874 /* Enables reply_queue load balancing */ 3875 if (ioc->msix_load_balance) 3876 return ioc->reply_queue_count ? 3877 base_mod64(atomic64_add_return(1, 3878 &ioc->total_io_cnt), ioc->reply_queue_count) : 0; 3879 3880 if (scmd && ioc->shost->nr_hw_queues > 1) { 3881 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 3882 3883 return blk_mq_unique_tag_to_hwq(tag) + 3884 ioc->high_iops_queues; 3885 } 3886 3887 return ioc->cpu_msix_table[raw_smp_processor_id()]; 3888 } 3889 3890 /** 3891 * _base_get_high_iops_msix_index - get the msix index of 3892 * high iops queues 3893 * @ioc: per adapter object 3894 * @scmd: scsi_cmnd object 3895 * 3896 * Return: msix index of high iops reply queues. 3897 * i.e. high iops reply queue on which IO request's 3898 * reply should be posted by the HBA firmware. 3899 */ 3900 static inline u8 3901 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, 3902 struct scsi_cmnd *scmd) 3903 { 3904 /** 3905 * Round robin the IO interrupts among the high iops 3906 * reply queues in terms of batch count 16 when outstanding 3907 * IOs on the target device is >=8. 3908 */ 3909 3910 if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) 3911 return base_mod64(( 3912 atomic64_add_return(1, &ioc->high_iops_outstanding) / 3913 MPT3SAS_HIGH_IOPS_BATCH_COUNT), 3914 MPT3SAS_HIGH_IOPS_REPLY_QUEUES); 3915 3916 return _base_get_msix_index(ioc, scmd); 3917 } 3918 3919 /** 3920 * mpt3sas_base_get_smid - obtain a free smid from internal queue 3921 * @ioc: per adapter object 3922 * @cb_idx: callback index 3923 * 3924 * Return: smid (zero is invalid) 3925 */ 3926 u16 3927 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 3928 { 3929 unsigned long flags; 3930 struct request_tracker *request; 3931 u16 smid; 3932 3933 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3934 if (list_empty(&ioc->internal_free_list)) { 3935 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3936 ioc_err(ioc, "%s: smid not available\n", __func__); 3937 return 0; 3938 } 3939 3940 request = list_entry(ioc->internal_free_list.next, 3941 struct request_tracker, tracker_list); 3942 request->cb_idx = cb_idx; 3943 smid = request->smid; 3944 list_del(&request->tracker_list); 3945 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3946 return smid; 3947 } 3948 3949 /** 3950 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 3951 * @ioc: per adapter object 3952 * @cb_idx: callback index 3953 * @scmd: pointer to scsi command object 3954 * 3955 * Return: smid (zero is invalid) 3956 */ 3957 u16 3958 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 3959 struct scsi_cmnd *scmd) 3960 { 3961 struct scsiio_tracker *request = scsi_cmd_priv(scmd); 3962 u16 smid; 3963 u32 tag, unique_tag; 3964 3965 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 3966 tag = blk_mq_unique_tag_to_tag(unique_tag); 3967 3968 /* 3969 * Store hw queue number corresponding to the tag. 3970 * This hw queue number is used later to determine 3971 * the unique_tag using the logic below. This unique_tag 3972 * is used to retrieve the scmd pointer corresponding 3973 * to tag using scsi_host_find_tag() API. 3974 * 3975 * tag = smid - 1; 3976 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 3977 */ 3978 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); 3979 3980 smid = tag + 1; 3981 request->cb_idx = cb_idx; 3982 request->smid = smid; 3983 request->scmd = scmd; 3984 INIT_LIST_HEAD(&request->chain_list); 3985 return smid; 3986 } 3987 3988 /** 3989 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 3990 * @ioc: per adapter object 3991 * @cb_idx: callback index 3992 * 3993 * Return: smid (zero is invalid) 3994 */ 3995 u16 3996 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 3997 { 3998 unsigned long flags; 3999 struct request_tracker *request; 4000 u16 smid; 4001 4002 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4003 if (list_empty(&ioc->hpr_free_list)) { 4004 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4005 return 0; 4006 } 4007 4008 request = list_entry(ioc->hpr_free_list.next, 4009 struct request_tracker, tracker_list); 4010 request->cb_idx = cb_idx; 4011 smid = request->smid; 4012 list_del(&request->tracker_list); 4013 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4014 return smid; 4015 } 4016 4017 static void 4018 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) 4019 { 4020 /* 4021 * See _wait_for_commands_to_complete() call with regards to this code. 4022 */ 4023 if (ioc->shost_recovery && ioc->pending_io_count) { 4024 ioc->pending_io_count = scsi_host_busy(ioc->shost); 4025 if (ioc->pending_io_count == 0) 4026 wake_up(&ioc->reset_wq); 4027 } 4028 } 4029 4030 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, 4031 struct scsiio_tracker *st) 4032 { 4033 if (WARN_ON(st->smid == 0)) 4034 return; 4035 st->cb_idx = 0xFF; 4036 st->direct_io = 0; 4037 st->scmd = NULL; 4038 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); 4039 st->smid = 0; 4040 } 4041 4042 /** 4043 * mpt3sas_base_free_smid - put smid back on free_list 4044 * @ioc: per adapter object 4045 * @smid: system request message index 4046 */ 4047 void 4048 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4049 { 4050 unsigned long flags; 4051 int i; 4052 4053 if (smid < ioc->hi_priority_smid) { 4054 struct scsiio_tracker *st; 4055 void *request; 4056 4057 st = _get_st_from_smid(ioc, smid); 4058 if (!st) { 4059 _base_recovery_check(ioc); 4060 return; 4061 } 4062 4063 /* Clear MPI request frame */ 4064 request = mpt3sas_base_get_msg_frame(ioc, smid); 4065 memset(request, 0, ioc->request_sz); 4066 4067 mpt3sas_base_clear_st(ioc, st); 4068 _base_recovery_check(ioc); 4069 ioc->io_queue_num[smid - 1] = 0; 4070 return; 4071 } 4072 4073 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4074 if (smid < ioc->internal_smid) { 4075 /* hi-priority */ 4076 i = smid - ioc->hi_priority_smid; 4077 ioc->hpr_lookup[i].cb_idx = 0xFF; 4078 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); 4079 } else if (smid <= ioc->hba_queue_depth) { 4080 /* internal queue */ 4081 i = smid - ioc->internal_smid; 4082 ioc->internal_lookup[i].cb_idx = 0xFF; 4083 list_add(&ioc->internal_lookup[i].tracker_list, 4084 &ioc->internal_free_list); 4085 } 4086 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4087 } 4088 4089 /** 4090 * _base_mpi_ep_writeq - 32 bit write to MMIO 4091 * @b: data payload 4092 * @addr: address in MMIO space 4093 * @writeq_lock: spin lock 4094 * 4095 * This special handling for MPI EP to take care of 32 bit 4096 * environment where its not quarenteed to send the entire word 4097 * in one transfer. 4098 */ 4099 static inline void 4100 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, 4101 spinlock_t *writeq_lock) 4102 { 4103 unsigned long flags; 4104 4105 spin_lock_irqsave(writeq_lock, flags); 4106 __raw_writel((u32)(b), addr); 4107 __raw_writel((u32)(b >> 32), (addr + 4)); 4108 spin_unlock_irqrestore(writeq_lock, flags); 4109 } 4110 4111 /** 4112 * _base_writeq - 64 bit write to MMIO 4113 * @b: data payload 4114 * @addr: address in MMIO space 4115 * @writeq_lock: spin lock 4116 * 4117 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 4118 * care of 32 bit environment where its not quarenteed to send the entire word 4119 * in one transfer. 4120 */ 4121 #if defined(writeq) && defined(CONFIG_64BIT) 4122 static inline void 4123 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 4124 { 4125 wmb(); 4126 __raw_writeq(b, addr); 4127 barrier(); 4128 } 4129 #else 4130 static inline void 4131 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 4132 { 4133 _base_mpi_ep_writeq(b, addr, writeq_lock); 4134 } 4135 #endif 4136 4137 /** 4138 * _base_set_and_get_msix_index - get the msix index and assign to msix_io 4139 * variable of scsi tracker 4140 * @ioc: per adapter object 4141 * @smid: system request message index 4142 * 4143 * Return: msix index. 4144 */ 4145 static u8 4146 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4147 { 4148 struct scsiio_tracker *st = NULL; 4149 4150 if (smid < ioc->hi_priority_smid) 4151 st = _get_st_from_smid(ioc, smid); 4152 4153 if (st == NULL) 4154 return _base_get_msix_index(ioc, NULL); 4155 4156 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); 4157 return st->msix_io; 4158 } 4159 4160 /** 4161 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware 4162 * @ioc: per adapter object 4163 * @smid: system request message index 4164 * @handle: device handle 4165 */ 4166 static void 4167 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, 4168 u16 smid, u16 handle) 4169 { 4170 Mpi2RequestDescriptorUnion_t descriptor; 4171 u64 *request = (u64 *)&descriptor; 4172 void *mpi_req_iomem; 4173 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 4174 4175 _clone_sg_entries(ioc, (void *) mfp, smid); 4176 mpi_req_iomem = (void __force *)ioc->chip + 4177 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 4178 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 4179 ioc->request_sz); 4180 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 4181 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4182 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 4183 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 4184 descriptor.SCSIIO.LMID = 0; 4185 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4186 &ioc->scsi_lookup_lock); 4187 } 4188 4189 /** 4190 * _base_put_smid_scsi_io - send SCSI_IO request to firmware 4191 * @ioc: per adapter object 4192 * @smid: system request message index 4193 * @handle: device handle 4194 */ 4195 static void 4196 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 4197 { 4198 Mpi2RequestDescriptorUnion_t descriptor; 4199 u64 *request = (u64 *)&descriptor; 4200 4201 4202 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 4203 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4204 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 4205 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 4206 descriptor.SCSIIO.LMID = 0; 4207 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4208 &ioc->scsi_lookup_lock); 4209 } 4210 4211 /** 4212 * _base_put_smid_fast_path - send fast path request to firmware 4213 * @ioc: per adapter object 4214 * @smid: system request message index 4215 * @handle: device handle 4216 */ 4217 static void 4218 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4219 u16 handle) 4220 { 4221 Mpi2RequestDescriptorUnion_t descriptor; 4222 u64 *request = (u64 *)&descriptor; 4223 4224 descriptor.SCSIIO.RequestFlags = 4225 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 4226 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4227 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 4228 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 4229 descriptor.SCSIIO.LMID = 0; 4230 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4231 &ioc->scsi_lookup_lock); 4232 } 4233 4234 /** 4235 * _base_put_smid_hi_priority - send Task Management request to firmware 4236 * @ioc: per adapter object 4237 * @smid: system request message index 4238 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 4239 */ 4240 static void 4241 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4242 u16 msix_task) 4243 { 4244 Mpi2RequestDescriptorUnion_t descriptor; 4245 void *mpi_req_iomem; 4246 u64 *request; 4247 4248 if (ioc->is_mcpu_endpoint) { 4249 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 4250 4251 /* TBD 256 is offset within sys register. */ 4252 mpi_req_iomem = (void __force *)ioc->chip 4253 + MPI_FRAME_START_OFFSET 4254 + (smid * ioc->request_sz); 4255 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 4256 ioc->request_sz); 4257 } 4258 4259 request = (u64 *)&descriptor; 4260 4261 descriptor.HighPriority.RequestFlags = 4262 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 4263 descriptor.HighPriority.MSIxIndex = msix_task; 4264 descriptor.HighPriority.SMID = cpu_to_le16(smid); 4265 descriptor.HighPriority.LMID = 0; 4266 descriptor.HighPriority.Reserved1 = 0; 4267 if (ioc->is_mcpu_endpoint) 4268 _base_mpi_ep_writeq(*request, 4269 &ioc->chip->RequestDescriptorPostLow, 4270 &ioc->scsi_lookup_lock); 4271 else 4272 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4273 &ioc->scsi_lookup_lock); 4274 } 4275 4276 /** 4277 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to 4278 * firmware 4279 * @ioc: per adapter object 4280 * @smid: system request message index 4281 */ 4282 void 4283 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4284 { 4285 Mpi2RequestDescriptorUnion_t descriptor; 4286 u64 *request = (u64 *)&descriptor; 4287 4288 descriptor.Default.RequestFlags = 4289 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 4290 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4291 descriptor.Default.SMID = cpu_to_le16(smid); 4292 descriptor.Default.LMID = 0; 4293 descriptor.Default.DescriptorTypeDependent = 0; 4294 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4295 &ioc->scsi_lookup_lock); 4296 } 4297 4298 /** 4299 * _base_put_smid_default - Default, primarily used for config pages 4300 * @ioc: per adapter object 4301 * @smid: system request message index 4302 */ 4303 static void 4304 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4305 { 4306 Mpi2RequestDescriptorUnion_t descriptor; 4307 void *mpi_req_iomem; 4308 u64 *request; 4309 4310 if (ioc->is_mcpu_endpoint) { 4311 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 4312 4313 _clone_sg_entries(ioc, (void *) mfp, smid); 4314 /* TBD 256 is offset within sys register */ 4315 mpi_req_iomem = (void __force *)ioc->chip + 4316 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 4317 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 4318 ioc->request_sz); 4319 } 4320 request = (u64 *)&descriptor; 4321 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 4322 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4323 descriptor.Default.SMID = cpu_to_le16(smid); 4324 descriptor.Default.LMID = 0; 4325 descriptor.Default.DescriptorTypeDependent = 0; 4326 if (ioc->is_mcpu_endpoint) 4327 _base_mpi_ep_writeq(*request, 4328 &ioc->chip->RequestDescriptorPostLow, 4329 &ioc->scsi_lookup_lock); 4330 else 4331 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 4332 &ioc->scsi_lookup_lock); 4333 } 4334 4335 /** 4336 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using 4337 * Atomic Request Descriptor 4338 * @ioc: per adapter object 4339 * @smid: system request message index 4340 * @handle: device handle, unused in this function, for function type match 4341 * 4342 * Return: nothing. 4343 */ 4344 static void 4345 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4346 u16 handle) 4347 { 4348 Mpi26AtomicRequestDescriptor_t descriptor; 4349 u32 *request = (u32 *)&descriptor; 4350 4351 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 4352 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4353 descriptor.SMID = cpu_to_le16(smid); 4354 4355 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 4356 } 4357 4358 /** 4359 * _base_put_smid_fast_path_atomic - send fast path request to firmware 4360 * using Atomic Request Descriptor 4361 * @ioc: per adapter object 4362 * @smid: system request message index 4363 * @handle: device handle, unused in this function, for function type match 4364 * Return: nothing 4365 */ 4366 static void 4367 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4368 u16 handle) 4369 { 4370 Mpi26AtomicRequestDescriptor_t descriptor; 4371 u32 *request = (u32 *)&descriptor; 4372 4373 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 4374 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4375 descriptor.SMID = cpu_to_le16(smid); 4376 4377 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 4378 } 4379 4380 /** 4381 * _base_put_smid_hi_priority_atomic - send Task Management request to 4382 * firmware using Atomic Request Descriptor 4383 * @ioc: per adapter object 4384 * @smid: system request message index 4385 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 4386 * 4387 * Return: nothing. 4388 */ 4389 static void 4390 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4391 u16 msix_task) 4392 { 4393 Mpi26AtomicRequestDescriptor_t descriptor; 4394 u32 *request = (u32 *)&descriptor; 4395 4396 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 4397 descriptor.MSIxIndex = msix_task; 4398 descriptor.SMID = cpu_to_le16(smid); 4399 4400 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 4401 } 4402 4403 /** 4404 * _base_put_smid_default_atomic - Default, primarily used for config pages 4405 * use Atomic Request Descriptor 4406 * @ioc: per adapter object 4407 * @smid: system request message index 4408 * 4409 * Return: nothing. 4410 */ 4411 static void 4412 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4413 { 4414 Mpi26AtomicRequestDescriptor_t descriptor; 4415 u32 *request = (u32 *)&descriptor; 4416 4417 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 4418 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); 4419 descriptor.SMID = cpu_to_le16(smid); 4420 4421 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); 4422 } 4423 4424 /** 4425 * _base_display_OEMs_branding - Display branding string 4426 * @ioc: per adapter object 4427 */ 4428 static void 4429 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) 4430 { 4431 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 4432 return; 4433 4434 switch (ioc->pdev->subsystem_vendor) { 4435 case PCI_VENDOR_ID_INTEL: 4436 switch (ioc->pdev->device) { 4437 case MPI2_MFGPAGE_DEVID_SAS2008: 4438 switch (ioc->pdev->subsystem_device) { 4439 case MPT2SAS_INTEL_RMS2LL080_SSDID: 4440 ioc_info(ioc, "%s\n", 4441 MPT2SAS_INTEL_RMS2LL080_BRANDING); 4442 break; 4443 case MPT2SAS_INTEL_RMS2LL040_SSDID: 4444 ioc_info(ioc, "%s\n", 4445 MPT2SAS_INTEL_RMS2LL040_BRANDING); 4446 break; 4447 case MPT2SAS_INTEL_SSD910_SSDID: 4448 ioc_info(ioc, "%s\n", 4449 MPT2SAS_INTEL_SSD910_BRANDING); 4450 break; 4451 default: 4452 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", 4453 ioc->pdev->subsystem_device); 4454 break; 4455 } 4456 break; 4457 case MPI2_MFGPAGE_DEVID_SAS2308_2: 4458 switch (ioc->pdev->subsystem_device) { 4459 case MPT2SAS_INTEL_RS25GB008_SSDID: 4460 ioc_info(ioc, "%s\n", 4461 MPT2SAS_INTEL_RS25GB008_BRANDING); 4462 break; 4463 case MPT2SAS_INTEL_RMS25JB080_SSDID: 4464 ioc_info(ioc, "%s\n", 4465 MPT2SAS_INTEL_RMS25JB080_BRANDING); 4466 break; 4467 case MPT2SAS_INTEL_RMS25JB040_SSDID: 4468 ioc_info(ioc, "%s\n", 4469 MPT2SAS_INTEL_RMS25JB040_BRANDING); 4470 break; 4471 case MPT2SAS_INTEL_RMS25KB080_SSDID: 4472 ioc_info(ioc, "%s\n", 4473 MPT2SAS_INTEL_RMS25KB080_BRANDING); 4474 break; 4475 case MPT2SAS_INTEL_RMS25KB040_SSDID: 4476 ioc_info(ioc, "%s\n", 4477 MPT2SAS_INTEL_RMS25KB040_BRANDING); 4478 break; 4479 case MPT2SAS_INTEL_RMS25LB040_SSDID: 4480 ioc_info(ioc, "%s\n", 4481 MPT2SAS_INTEL_RMS25LB040_BRANDING); 4482 break; 4483 case MPT2SAS_INTEL_RMS25LB080_SSDID: 4484 ioc_info(ioc, "%s\n", 4485 MPT2SAS_INTEL_RMS25LB080_BRANDING); 4486 break; 4487 default: 4488 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", 4489 ioc->pdev->subsystem_device); 4490 break; 4491 } 4492 break; 4493 case MPI25_MFGPAGE_DEVID_SAS3008: 4494 switch (ioc->pdev->subsystem_device) { 4495 case MPT3SAS_INTEL_RMS3JC080_SSDID: 4496 ioc_info(ioc, "%s\n", 4497 MPT3SAS_INTEL_RMS3JC080_BRANDING); 4498 break; 4499 4500 case MPT3SAS_INTEL_RS3GC008_SSDID: 4501 ioc_info(ioc, "%s\n", 4502 MPT3SAS_INTEL_RS3GC008_BRANDING); 4503 break; 4504 case MPT3SAS_INTEL_RS3FC044_SSDID: 4505 ioc_info(ioc, "%s\n", 4506 MPT3SAS_INTEL_RS3FC044_BRANDING); 4507 break; 4508 case MPT3SAS_INTEL_RS3UC080_SSDID: 4509 ioc_info(ioc, "%s\n", 4510 MPT3SAS_INTEL_RS3UC080_BRANDING); 4511 break; 4512 default: 4513 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", 4514 ioc->pdev->subsystem_device); 4515 break; 4516 } 4517 break; 4518 default: 4519 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", 4520 ioc->pdev->subsystem_device); 4521 break; 4522 } 4523 break; 4524 case PCI_VENDOR_ID_DELL: 4525 switch (ioc->pdev->device) { 4526 case MPI2_MFGPAGE_DEVID_SAS2008: 4527 switch (ioc->pdev->subsystem_device) { 4528 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 4529 ioc_info(ioc, "%s\n", 4530 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); 4531 break; 4532 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 4533 ioc_info(ioc, "%s\n", 4534 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); 4535 break; 4536 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 4537 ioc_info(ioc, "%s\n", 4538 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); 4539 break; 4540 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 4541 ioc_info(ioc, "%s\n", 4542 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); 4543 break; 4544 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 4545 ioc_info(ioc, "%s\n", 4546 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); 4547 break; 4548 case MPT2SAS_DELL_PERC_H200_SSDID: 4549 ioc_info(ioc, "%s\n", 4550 MPT2SAS_DELL_PERC_H200_BRANDING); 4551 break; 4552 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 4553 ioc_info(ioc, "%s\n", 4554 MPT2SAS_DELL_6GBPS_SAS_BRANDING); 4555 break; 4556 default: 4557 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", 4558 ioc->pdev->subsystem_device); 4559 break; 4560 } 4561 break; 4562 case MPI25_MFGPAGE_DEVID_SAS3008: 4563 switch (ioc->pdev->subsystem_device) { 4564 case MPT3SAS_DELL_12G_HBA_SSDID: 4565 ioc_info(ioc, "%s\n", 4566 MPT3SAS_DELL_12G_HBA_BRANDING); 4567 break; 4568 default: 4569 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", 4570 ioc->pdev->subsystem_device); 4571 break; 4572 } 4573 break; 4574 default: 4575 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n", 4576 ioc->pdev->subsystem_device); 4577 break; 4578 } 4579 break; 4580 case PCI_VENDOR_ID_CISCO: 4581 switch (ioc->pdev->device) { 4582 case MPI25_MFGPAGE_DEVID_SAS3008: 4583 switch (ioc->pdev->subsystem_device) { 4584 case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 4585 ioc_info(ioc, "%s\n", 4586 MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 4587 break; 4588 case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 4589 ioc_info(ioc, "%s\n", 4590 MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 4591 break; 4592 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 4593 ioc_info(ioc, "%s\n", 4594 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 4595 break; 4596 default: 4597 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 4598 ioc->pdev->subsystem_device); 4599 break; 4600 } 4601 break; 4602 case MPI25_MFGPAGE_DEVID_SAS3108_1: 4603 switch (ioc->pdev->subsystem_device) { 4604 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 4605 ioc_info(ioc, "%s\n", 4606 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 4607 break; 4608 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 4609 ioc_info(ioc, "%s\n", 4610 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); 4611 break; 4612 default: 4613 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 4614 ioc->pdev->subsystem_device); 4615 break; 4616 } 4617 break; 4618 default: 4619 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n", 4620 ioc->pdev->subsystem_device); 4621 break; 4622 } 4623 break; 4624 case MPT2SAS_HP_3PAR_SSVID: 4625 switch (ioc->pdev->device) { 4626 case MPI2_MFGPAGE_DEVID_SAS2004: 4627 switch (ioc->pdev->subsystem_device) { 4628 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 4629 ioc_info(ioc, "%s\n", 4630 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 4631 break; 4632 default: 4633 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 4634 ioc->pdev->subsystem_device); 4635 break; 4636 } 4637 break; 4638 case MPI2_MFGPAGE_DEVID_SAS2308_2: 4639 switch (ioc->pdev->subsystem_device) { 4640 case MPT2SAS_HP_2_4_INTERNAL_SSDID: 4641 ioc_info(ioc, "%s\n", 4642 MPT2SAS_HP_2_4_INTERNAL_BRANDING); 4643 break; 4644 case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 4645 ioc_info(ioc, "%s\n", 4646 MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 4647 break; 4648 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 4649 ioc_info(ioc, "%s\n", 4650 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 4651 break; 4652 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 4653 ioc_info(ioc, "%s\n", 4654 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 4655 break; 4656 default: 4657 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 4658 ioc->pdev->subsystem_device); 4659 break; 4660 } 4661 break; 4662 default: 4663 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", 4664 ioc->pdev->subsystem_device); 4665 break; 4666 } 4667 break; 4668 default: 4669 break; 4670 } 4671 } 4672 4673 /** 4674 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg 4675 * version from FW Image Header. 4676 * @ioc: per adapter object 4677 * 4678 * Return: 0 for success, non-zero for failure. 4679 */ 4680 static int 4681 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) 4682 { 4683 Mpi2FWImageHeader_t *fw_img_hdr; 4684 Mpi26ComponentImageHeader_t *cmp_img_hdr; 4685 Mpi25FWUploadRequest_t *mpi_request; 4686 Mpi2FWUploadReply_t mpi_reply; 4687 int r = 0, issue_diag_reset = 0; 4688 u32 package_version = 0; 4689 void *fwpkg_data = NULL; 4690 dma_addr_t fwpkg_data_dma; 4691 u16 smid, ioc_status; 4692 size_t data_length; 4693 4694 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 4695 4696 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 4697 ioc_err(ioc, "%s: internal command already in use\n", __func__); 4698 return -EAGAIN; 4699 } 4700 4701 data_length = sizeof(Mpi2FWImageHeader_t); 4702 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, 4703 &fwpkg_data_dma, GFP_KERNEL); 4704 if (!fwpkg_data) { 4705 ioc_err(ioc, 4706 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", 4707 __FILE__, __LINE__, __func__); 4708 return -ENOMEM; 4709 } 4710 4711 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4712 if (!smid) { 4713 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 4714 r = -EAGAIN; 4715 goto out; 4716 } 4717 4718 ioc->base_cmds.status = MPT3_CMD_PENDING; 4719 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4720 ioc->base_cmds.smid = smid; 4721 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t)); 4722 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; 4723 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; 4724 mpi_request->ImageSize = cpu_to_le32(data_length); 4725 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, 4726 data_length); 4727 init_completion(&ioc->base_cmds.done); 4728 ioc->put_smid_default(ioc, smid); 4729 /* Wait for 15 seconds */ 4730 wait_for_completion_timeout(&ioc->base_cmds.done, 4731 FW_IMG_HDR_READ_TIMEOUT*HZ); 4732 ioc_info(ioc, "%s: complete\n", __func__); 4733 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4734 ioc_err(ioc, "%s: timeout\n", __func__); 4735 _debug_dump_mf(mpi_request, 4736 sizeof(Mpi25FWUploadRequest_t)/4); 4737 issue_diag_reset = 1; 4738 } else { 4739 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t)); 4740 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { 4741 memcpy(&mpi_reply, ioc->base_cmds.reply, 4742 sizeof(Mpi2FWUploadReply_t)); 4743 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 4744 MPI2_IOCSTATUS_MASK; 4745 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 4746 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; 4747 if (le32_to_cpu(fw_img_hdr->Signature) == 4748 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { 4749 cmp_img_hdr = 4750 (Mpi26ComponentImageHeader_t *) 4751 (fwpkg_data); 4752 package_version = 4753 le32_to_cpu( 4754 cmp_img_hdr->ApplicationSpecific); 4755 } else 4756 package_version = 4757 le32_to_cpu( 4758 fw_img_hdr->PackageVersion.Word); 4759 if (package_version) 4760 ioc_info(ioc, 4761 "FW Package Ver(%02d.%02d.%02d.%02d)\n", 4762 ((package_version) & 0xFF000000) >> 24, 4763 ((package_version) & 0x00FF0000) >> 16, 4764 ((package_version) & 0x0000FF00) >> 8, 4765 (package_version) & 0x000000FF); 4766 } else { 4767 _debug_dump_mf(&mpi_reply, 4768 sizeof(Mpi2FWUploadReply_t)/4); 4769 } 4770 } 4771 } 4772 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4773 out: 4774 if (fwpkg_data) 4775 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, 4776 fwpkg_data_dma); 4777 if (issue_diag_reset) { 4778 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) 4779 return -EFAULT; 4780 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) 4781 return -EFAULT; 4782 r = -EAGAIN; 4783 } 4784 return r; 4785 } 4786 4787 /** 4788 * _base_display_ioc_capabilities - Display IOC's capabilities. 4789 * @ioc: per adapter object 4790 */ 4791 static void 4792 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) 4793 { 4794 int i = 0; 4795 char desc[17] = {0}; 4796 u32 iounit_pg1_flags; 4797 4798 memtostr(desc, ioc->manu_pg0.ChipName); 4799 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", 4800 desc, 4801 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 4802 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 4803 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 4804 ioc->facts.FWVersion.Word & 0x000000FF, 4805 ioc->pdev->revision); 4806 4807 _base_display_OEMs_branding(ioc); 4808 4809 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 4810 pr_info("%sNVMe", i ? "," : ""); 4811 i++; 4812 } 4813 4814 ioc_info(ioc, "Protocol=("); 4815 4816 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 4817 pr_cont("Initiator"); 4818 i++; 4819 } 4820 4821 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 4822 pr_cont("%sTarget", i ? "," : ""); 4823 i++; 4824 } 4825 4826 i = 0; 4827 pr_cont("), Capabilities=("); 4828 4829 if (!ioc->hide_ir_msg) { 4830 if (ioc->facts.IOCCapabilities & 4831 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 4832 pr_cont("Raid"); 4833 i++; 4834 } 4835 } 4836 4837 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 4838 pr_cont("%sTLR", i ? "," : ""); 4839 i++; 4840 } 4841 4842 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 4843 pr_cont("%sMulticast", i ? "," : ""); 4844 i++; 4845 } 4846 4847 if (ioc->facts.IOCCapabilities & 4848 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 4849 pr_cont("%sBIDI Target", i ? "," : ""); 4850 i++; 4851 } 4852 4853 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 4854 pr_cont("%sEEDP", i ? "," : ""); 4855 i++; 4856 } 4857 4858 if (ioc->facts.IOCCapabilities & 4859 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 4860 pr_cont("%sSnapshot Buffer", i ? "," : ""); 4861 i++; 4862 } 4863 4864 if (ioc->facts.IOCCapabilities & 4865 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 4866 pr_cont("%sDiag Trace Buffer", i ? "," : ""); 4867 i++; 4868 } 4869 4870 if (ioc->facts.IOCCapabilities & 4871 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 4872 pr_cont("%sDiag Extended Buffer", i ? "," : ""); 4873 i++; 4874 } 4875 4876 if (ioc->facts.IOCCapabilities & 4877 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 4878 pr_cont("%sTask Set Full", i ? "," : ""); 4879 i++; 4880 } 4881 4882 if (ioc->facts.IOCCapabilities & 4883 MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) { 4884 pr_cont("%sMCTP Passthru", i ? "," : ""); 4885 i++; 4886 } 4887 4888 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 4889 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 4890 pr_cont("%sNCQ", i ? "," : ""); 4891 i++; 4892 } 4893 4894 pr_cont(")\n"); 4895 } 4896 4897 /** 4898 * mpt3sas_base_update_missing_delay - change the missing delay timers 4899 * @ioc: per adapter object 4900 * @device_missing_delay: amount of time till device is reported missing 4901 * @io_missing_delay: interval IO is returned when there is a missing device 4902 * 4903 * Passed on the command line, this function will modify the device missing 4904 * delay, as well as the io missing delay. This should be called at driver 4905 * load time. 4906 */ 4907 void 4908 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, 4909 u16 device_missing_delay, u8 io_missing_delay) 4910 { 4911 u16 dmd, dmd_new, dmd_orignal; 4912 u8 io_missing_delay_original; 4913 u16 sz; 4914 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 4915 Mpi2ConfigReply_t mpi_reply; 4916 u8 num_phys = 0; 4917 u16 ioc_status; 4918 4919 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 4920 if (!num_phys) 4921 return; 4922 4923 sz = struct_size(sas_iounit_pg1, PhyData, num_phys); 4924 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 4925 if (!sas_iounit_pg1) { 4926 ioc_err(ioc, "failure at %s:%d/%s()!\n", 4927 __FILE__, __LINE__, __func__); 4928 goto out; 4929 } 4930 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 4931 sas_iounit_pg1, sz))) { 4932 ioc_err(ioc, "failure at %s:%d/%s()!\n", 4933 __FILE__, __LINE__, __func__); 4934 goto out; 4935 } 4936 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 4937 MPI2_IOCSTATUS_MASK; 4938 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 4939 ioc_err(ioc, "failure at %s:%d/%s()!\n", 4940 __FILE__, __LINE__, __func__); 4941 goto out; 4942 } 4943 4944 /* device missing delay */ 4945 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 4946 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 4947 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 4948 else 4949 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 4950 dmd_orignal = dmd; 4951 if (device_missing_delay > 0x7F) { 4952 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 4953 device_missing_delay; 4954 dmd = dmd / 16; 4955 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 4956 } else 4957 dmd = device_missing_delay; 4958 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 4959 4960 /* io missing delay */ 4961 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 4962 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 4963 4964 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 4965 sz)) { 4966 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 4967 dmd_new = (dmd & 4968 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 4969 else 4970 dmd_new = 4971 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 4972 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n", 4973 dmd_orignal, dmd_new); 4974 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n", 4975 io_missing_delay_original, 4976 io_missing_delay); 4977 ioc->device_missing_delay = dmd_new; 4978 ioc->io_missing_delay = io_missing_delay; 4979 } 4980 4981 out: 4982 kfree(sas_iounit_pg1); 4983 } 4984 4985 /** 4986 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields 4987 * according to performance mode. 4988 * @ioc : per adapter object 4989 * 4990 * Return: zero on success; otherwise return EAGAIN error code asking the 4991 * caller to retry. 4992 */ 4993 static int 4994 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) 4995 { 4996 Mpi2IOCPage1_t ioc_pg1; 4997 Mpi2ConfigReply_t mpi_reply; 4998 int rc; 4999 5000 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); 5001 if (rc) 5002 return rc; 5003 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t)); 5004 5005 switch (perf_mode) { 5006 case MPT_PERF_MODE_DEFAULT: 5007 case MPT_PERF_MODE_BALANCED: 5008 if (ioc->high_iops_queues) { 5009 ioc_info(ioc, 5010 "Enable interrupt coalescing only for first\t" 5011 "%d reply queues\n", 5012 MPT3SAS_HIGH_IOPS_REPLY_QUEUES); 5013 /* 5014 * If 31st bit is zero then interrupt coalescing is 5015 * enabled for all reply descriptor post queues. 5016 * If 31st bit is set to one then user can 5017 * enable/disable interrupt coalescing on per reply 5018 * descriptor post queue group(8) basis. So to enable 5019 * interrupt coalescing only on first reply descriptor 5020 * post queue group 31st bit and zero th bit is enabled. 5021 */ 5022 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | 5023 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); 5024 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); 5025 if (rc) 5026 return rc; 5027 ioc_info(ioc, "performance mode: balanced\n"); 5028 return 0; 5029 } 5030 fallthrough; 5031 case MPT_PERF_MODE_LATENCY: 5032 /* 5033 * Enable interrupt coalescing on all reply queues 5034 * with timeout value 0xA 5035 */ 5036 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); 5037 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); 5038 ioc_pg1.ProductSpecific = 0; 5039 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); 5040 if (rc) 5041 return rc; 5042 ioc_info(ioc, "performance mode: latency\n"); 5043 break; 5044 case MPT_PERF_MODE_IOPS: 5045 /* 5046 * Enable interrupt coalescing on all reply queues. 5047 */ 5048 ioc_info(ioc, 5049 "performance mode: iops with coalescing timeout: 0x%x\n", 5050 le32_to_cpu(ioc_pg1.CoalescingTimeout)); 5051 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); 5052 ioc_pg1.ProductSpecific = 0; 5053 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); 5054 if (rc) 5055 return rc; 5056 break; 5057 } 5058 return 0; 5059 } 5060 5061 /** 5062 * _base_get_event_diag_triggers - get event diag trigger values from 5063 * persistent pages 5064 * @ioc : per adapter object 5065 * 5066 * Return: nothing. 5067 */ 5068 static int 5069 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5070 { 5071 Mpi26DriverTriggerPage2_t trigger_pg2; 5072 struct SL_WH_EVENT_TRIGGER_T *event_tg; 5073 MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY *mpi_event_tg; 5074 Mpi2ConfigReply_t mpi_reply; 5075 int r = 0, i = 0; 5076 u16 count = 0; 5077 u16 ioc_status; 5078 5079 r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, 5080 &trigger_pg2); 5081 if (r) 5082 return r; 5083 5084 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5085 MPI2_IOCSTATUS_MASK; 5086 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5087 dinitprintk(ioc, 5088 ioc_err(ioc, 5089 "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", 5090 __func__, ioc_status)); 5091 return 0; 5092 } 5093 5094 if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) { 5095 count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger); 5096 count = min_t(u16, NUM_VALID_ENTRIES, count); 5097 ioc->diag_trigger_event.ValidEntries = count; 5098 5099 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; 5100 mpi_event_tg = &trigger_pg2.MPIEventTriggers[0]; 5101 for (i = 0; i < count; i++) { 5102 event_tg->EventValue = le16_to_cpu( 5103 mpi_event_tg->MPIEventCode); 5104 event_tg->LogEntryQualifier = le16_to_cpu( 5105 mpi_event_tg->MPIEventCodeSpecific); 5106 event_tg++; 5107 mpi_event_tg++; 5108 } 5109 } 5110 return 0; 5111 } 5112 5113 /** 5114 * _base_get_scsi_diag_triggers - get scsi diag trigger values from 5115 * persistent pages 5116 * @ioc : per adapter object 5117 * 5118 * Return: 0 on success; otherwise return failure status. 5119 */ 5120 static int 5121 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5122 { 5123 Mpi26DriverTriggerPage3_t trigger_pg3; 5124 struct SL_WH_SCSI_TRIGGER_T *scsi_tg; 5125 MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY *mpi_scsi_tg; 5126 Mpi2ConfigReply_t mpi_reply; 5127 int r = 0, i = 0; 5128 u16 count = 0; 5129 u16 ioc_status; 5130 5131 r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, 5132 &trigger_pg3); 5133 if (r) 5134 return r; 5135 5136 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5137 MPI2_IOCSTATUS_MASK; 5138 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5139 dinitprintk(ioc, 5140 ioc_err(ioc, 5141 "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", 5142 __func__, ioc_status)); 5143 return 0; 5144 } 5145 5146 if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) { 5147 count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger); 5148 count = min_t(u16, NUM_VALID_ENTRIES, count); 5149 ioc->diag_trigger_scsi.ValidEntries = count; 5150 5151 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; 5152 mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0]; 5153 for (i = 0; i < count; i++) { 5154 scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; 5155 scsi_tg->ASC = mpi_scsi_tg->ASC; 5156 scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; 5157 5158 scsi_tg++; 5159 mpi_scsi_tg++; 5160 } 5161 } 5162 return 0; 5163 } 5164 5165 /** 5166 * _base_get_mpi_diag_triggers - get mpi diag trigger values from 5167 * persistent pages 5168 * @ioc : per adapter object 5169 * 5170 * Return: 0 on success; otherwise return failure status. 5171 */ 5172 static int 5173 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5174 { 5175 Mpi26DriverTriggerPage4_t trigger_pg4; 5176 struct SL_WH_MPI_TRIGGER_T *status_tg; 5177 MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY *mpi_status_tg; 5178 Mpi2ConfigReply_t mpi_reply; 5179 int r = 0, i = 0; 5180 u16 count = 0; 5181 u16 ioc_status; 5182 5183 r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, 5184 &trigger_pg4); 5185 if (r) 5186 return r; 5187 5188 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5189 MPI2_IOCSTATUS_MASK; 5190 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5191 dinitprintk(ioc, 5192 ioc_err(ioc, 5193 "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", 5194 __func__, ioc_status)); 5195 return 0; 5196 } 5197 5198 if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) { 5199 count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger); 5200 count = min_t(u16, NUM_VALID_ENTRIES, count); 5201 ioc->diag_trigger_mpi.ValidEntries = count; 5202 5203 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; 5204 mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0]; 5205 5206 for (i = 0; i < count; i++) { 5207 status_tg->IOCStatus = le16_to_cpu( 5208 mpi_status_tg->IOCStatus); 5209 status_tg->IocLogInfo = le32_to_cpu( 5210 mpi_status_tg->LogInfo); 5211 5212 status_tg++; 5213 mpi_status_tg++; 5214 } 5215 } 5216 return 0; 5217 } 5218 5219 /** 5220 * _base_get_master_diag_triggers - get master diag trigger values from 5221 * persistent pages 5222 * @ioc : per adapter object 5223 * 5224 * Return: nothing. 5225 */ 5226 static int 5227 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5228 { 5229 Mpi26DriverTriggerPage1_t trigger_pg1; 5230 Mpi2ConfigReply_t mpi_reply; 5231 int r; 5232 u16 ioc_status; 5233 5234 r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, 5235 &trigger_pg1); 5236 if (r) 5237 return r; 5238 5239 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5240 MPI2_IOCSTATUS_MASK; 5241 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5242 dinitprintk(ioc, 5243 ioc_err(ioc, 5244 "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", 5245 __func__, ioc_status)); 5246 return 0; 5247 } 5248 5249 if (le16_to_cpu(trigger_pg1.NumMasterTrigger)) 5250 ioc->diag_trigger_master.MasterData |= 5251 le32_to_cpu( 5252 trigger_pg1.MasterTriggers[0].MasterTriggerFlags); 5253 return 0; 5254 } 5255 5256 /** 5257 * _base_check_for_trigger_pages_support - checks whether HBA FW supports 5258 * driver trigger pages or not 5259 * @ioc : per adapter object 5260 * @trigger_flags : address where trigger page0's TriggerFlags value is copied 5261 * 5262 * Return: trigger flags mask if HBA FW supports driver trigger pages; 5263 * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or 5264 * return EAGAIN if diag reset occurred due to FW fault and asking the 5265 * caller to retry the command. 5266 * 5267 */ 5268 static int 5269 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags) 5270 { 5271 Mpi26DriverTriggerPage0_t trigger_pg0; 5272 int r = 0; 5273 Mpi2ConfigReply_t mpi_reply; 5274 u16 ioc_status; 5275 5276 r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, 5277 &trigger_pg0); 5278 if (r) 5279 return r; 5280 5281 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5282 MPI2_IOCSTATUS_MASK; 5283 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5284 return -EFAULT; 5285 5286 *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags); 5287 return 0; 5288 } 5289 5290 /** 5291 * _base_get_diag_triggers - Retrieve diag trigger values from 5292 * persistent pages. 5293 * @ioc : per adapter object 5294 * 5295 * Return: zero on success; otherwise return EAGAIN error codes 5296 * asking the caller to retry. 5297 */ 5298 static int 5299 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5300 { 5301 int trigger_flags; 5302 int r; 5303 5304 /* 5305 * Default setting of master trigger. 5306 */ 5307 ioc->diag_trigger_master.MasterData = 5308 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 5309 5310 r = _base_check_for_trigger_pages_support(ioc, &trigger_flags); 5311 if (r) { 5312 if (r == -EAGAIN) 5313 return r; 5314 /* 5315 * Don't go for error handling when FW doesn't support 5316 * driver trigger pages. 5317 */ 5318 return 0; 5319 } 5320 5321 ioc->supports_trigger_pages = 1; 5322 5323 /* 5324 * Retrieve master diag trigger values from driver trigger pg1 5325 * if master trigger bit enabled in TriggerFlags. 5326 */ 5327 if ((u16)trigger_flags & 5328 MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) { 5329 r = _base_get_master_diag_triggers(ioc); 5330 if (r) 5331 return r; 5332 } 5333 5334 /* 5335 * Retrieve event diag trigger values from driver trigger pg2 5336 * if event trigger bit enabled in TriggerFlags. 5337 */ 5338 if ((u16)trigger_flags & 5339 MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) { 5340 r = _base_get_event_diag_triggers(ioc); 5341 if (r) 5342 return r; 5343 } 5344 5345 /* 5346 * Retrieve scsi diag trigger values from driver trigger pg3 5347 * if scsi trigger bit enabled in TriggerFlags. 5348 */ 5349 if ((u16)trigger_flags & 5350 MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) { 5351 r = _base_get_scsi_diag_triggers(ioc); 5352 if (r) 5353 return r; 5354 } 5355 /* 5356 * Retrieve mpi error diag trigger values from driver trigger pg4 5357 * if loginfo trigger bit enabled in TriggerFlags. 5358 */ 5359 if ((u16)trigger_flags & 5360 MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) { 5361 r = _base_get_mpi_diag_triggers(ioc); 5362 if (r) 5363 return r; 5364 } 5365 return 0; 5366 } 5367 5368 /** 5369 * _base_update_diag_trigger_pages - Update the driver trigger pages after 5370 * online FW update, in case updated FW supports driver 5371 * trigger pages. 5372 * @ioc : per adapter object 5373 * 5374 * Return: nothing. 5375 */ 5376 static void 5377 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) 5378 { 5379 5380 if (ioc->diag_trigger_master.MasterData) 5381 mpt3sas_config_update_driver_trigger_pg1(ioc, 5382 &ioc->diag_trigger_master, 1); 5383 5384 if (ioc->diag_trigger_event.ValidEntries) 5385 mpt3sas_config_update_driver_trigger_pg2(ioc, 5386 &ioc->diag_trigger_event, 1); 5387 5388 if (ioc->diag_trigger_scsi.ValidEntries) 5389 mpt3sas_config_update_driver_trigger_pg3(ioc, 5390 &ioc->diag_trigger_scsi, 1); 5391 5392 if (ioc->diag_trigger_mpi.ValidEntries) 5393 mpt3sas_config_update_driver_trigger_pg4(ioc, 5394 &ioc->diag_trigger_mpi, 1); 5395 } 5396 5397 /** 5398 * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. 5399 * - On failure set default QD values. 5400 * @ioc : per adapter object 5401 * 5402 * Returns 0 for success, non-zero for failure. 5403 * 5404 */ 5405 static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) 5406 { 5407 Mpi2ConfigReply_t mpi_reply; 5408 Mpi2SasIOUnitPage1_t sas_iounit_pg1; 5409 Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; 5410 u16 depth; 5411 int rc = 0; 5412 5413 ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; 5414 ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; 5415 ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; 5416 ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; 5417 if (!ioc->is_gen35_ioc) 5418 goto out; 5419 /* sas iounit page 1 */ 5420 rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 5421 &sas_iounit_pg1, sizeof(Mpi2SasIOUnitPage1_t)); 5422 if (rc) { 5423 pr_err("%s: failure at %s:%d/%s()!\n", 5424 ioc->name, __FILE__, __LINE__, __func__); 5425 goto out; 5426 } 5427 5428 depth = le16_to_cpu(sas_iounit_pg1.SASWideMaxQueueDepth); 5429 ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); 5430 5431 depth = le16_to_cpu(sas_iounit_pg1.SASNarrowMaxQueueDepth); 5432 ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); 5433 5434 depth = sas_iounit_pg1.SATAMaxQDepth; 5435 ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); 5436 5437 /* pcie iounit page 1 */ 5438 rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, 5439 &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); 5440 if (rc) { 5441 pr_err("%s: failure at %s:%d/%s()!\n", 5442 ioc->name, __FILE__, __LINE__, __func__); 5443 goto out; 5444 } 5445 ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? 5446 (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : 5447 MPT3SAS_NVME_QUEUE_DEPTH; 5448 out: 5449 dinitprintk(ioc, pr_err( 5450 "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", 5451 ioc->max_wideport_qd, ioc->max_narrowport_qd, 5452 ioc->max_sata_qd, ioc->max_nvme_qd)); 5453 return rc; 5454 } 5455 5456 /** 5457 * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1 5458 * 5459 * @ioc : per adapter object 5460 * @n : ptr to the ATTO nvram structure 5461 * Return: 0 for success, non-zero for failure. 5462 */ 5463 static int 5464 mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc, 5465 struct ATTO_SAS_NVRAM *n) 5466 { 5467 int r = -EINVAL; 5468 union ATTO_SAS_ADDRESS *s1; 5469 u32 len; 5470 u8 *pb; 5471 u8 ckSum; 5472 5473 /* validate nvram checksum */ 5474 pb = (u8 *) n; 5475 ckSum = ATTO_SASNVR_CKSUM_SEED; 5476 len = sizeof(struct ATTO_SAS_NVRAM); 5477 5478 while (len--) 5479 ckSum = ckSum + pb[len]; 5480 5481 if (ckSum) { 5482 ioc_err(ioc, "Invalid ATTO NVRAM checksum\n"); 5483 return r; 5484 } 5485 5486 s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr; 5487 5488 if (n->Signature[0] != 'E' 5489 || n->Signature[1] != 'S' 5490 || n->Signature[2] != 'A' 5491 || n->Signature[3] != 'S') 5492 ioc_err(ioc, "Invalid ATTO NVRAM signature\n"); 5493 else if (n->Version > ATTO_SASNVR_VERSION) 5494 ioc_info(ioc, "Invalid ATTO NVRAM version"); 5495 else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1)) 5496 || s1->b[0] != 0x50 5497 || s1->b[1] != 0x01 5498 || s1->b[2] != 0x08 5499 || (s1->b[3] & 0xF0) != 0x60 5500 || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) { 5501 ioc_err(ioc, "Invalid ATTO SAS address\n"); 5502 } else 5503 r = 0; 5504 return r; 5505 } 5506 5507 /** 5508 * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1 5509 * 5510 * @ioc : per adapter object 5511 * @sas_addr : return sas address 5512 * Return: 0 for success, non-zero for failure. 5513 */ 5514 static int 5515 mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr) 5516 { 5517 Mpi2ManufacturingPage1_t mfg_pg1; 5518 Mpi2ConfigReply_t mpi_reply; 5519 struct ATTO_SAS_NVRAM *nvram; 5520 int r; 5521 __be64 addr; 5522 5523 r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1); 5524 if (r) { 5525 ioc_err(ioc, "Failed to read manufacturing page 1\n"); 5526 return r; 5527 } 5528 5529 /* validate nvram */ 5530 nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD; 5531 r = mpt3sas_atto_validate_nvram(ioc, nvram); 5532 if (r) 5533 return r; 5534 5535 addr = *((__be64 *) nvram->SasAddr); 5536 sas_addr->q = cpu_to_le64(be64_to_cpu(addr)); 5537 return r; 5538 } 5539 5540 /** 5541 * mpt3sas_atto_init - perform initializaion for ATTO branded 5542 * adapter. 5543 * @ioc : per adapter object 5544 *5 5545 * Return: 0 for success, non-zero for failure. 5546 */ 5547 static int 5548 mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc) 5549 { 5550 int sz = 0; 5551 Mpi2BiosPage4_t *bios_pg4 = NULL; 5552 Mpi2ConfigReply_t mpi_reply; 5553 int r; 5554 int ix; 5555 union ATTO_SAS_ADDRESS sas_addr; 5556 union ATTO_SAS_ADDRESS temp; 5557 union ATTO_SAS_ADDRESS bias; 5558 5559 r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr); 5560 if (r) 5561 return r; 5562 5563 /* get header first to get size */ 5564 r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0); 5565 if (r) { 5566 ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n"); 5567 return r; 5568 } 5569 5570 sz = mpi_reply.Header.PageLength * sizeof(u32); 5571 bios_pg4 = kzalloc(sz, GFP_KERNEL); 5572 if (!bios_pg4) { 5573 ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n"); 5574 return -ENOMEM; 5575 } 5576 5577 /* read bios page 4 */ 5578 r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); 5579 if (r) { 5580 ioc_err(ioc, "Failed to read ATTO bios page 4\n"); 5581 goto out; 5582 } 5583 5584 /* Update bios page 4 with the ATTO WWID */ 5585 bias.q = sas_addr.q; 5586 bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS; 5587 5588 for (ix = 0; ix < bios_pg4->NumPhys; ix++) { 5589 temp.q = sas_addr.q; 5590 temp.b[7] += ix; 5591 bios_pg4->Phy[ix].ReassignmentWWID = temp.q; 5592 bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q; 5593 } 5594 r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); 5595 5596 out: 5597 kfree(bios_pg4); 5598 return r; 5599 } 5600 5601 /** 5602 * _base_static_config_pages - static start of day config pages 5603 * @ioc: per adapter object 5604 */ 5605 static int 5606 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) 5607 { 5608 Mpi2IOUnitPage8_t iounit_pg8; 5609 Mpi2ConfigReply_t mpi_reply; 5610 u32 iounit_pg1_flags; 5611 int tg_flags = 0; 5612 int rc; 5613 ioc->nvme_abort_timeout = 30; 5614 5615 rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, 5616 &ioc->manu_pg0); 5617 if (rc) 5618 return rc; 5619 if (ioc->ir_firmware) { 5620 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 5621 &ioc->manu_pg10); 5622 if (rc) 5623 return rc; 5624 } 5625 5626 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { 5627 rc = mpt3sas_atto_init(ioc); 5628 if (rc) 5629 return rc; 5630 } 5631 5632 /* 5633 * Ensure correct T10 PI operation if vendor left EEDPTagMode 5634 * flag unset in NVDATA. 5635 */ 5636 rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, 5637 &ioc->manu_pg11); 5638 if (rc) 5639 return rc; 5640 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { 5641 pr_err("%s: overriding NVDATA EEDPTagMode setting from 0 to 1\n", 5642 ioc->name); 5643 ioc->manu_pg11.EEDPTagMode = 0x1; 5644 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 5645 &ioc->manu_pg11); 5646 } 5647 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) 5648 ioc->tm_custom_handling = 1; 5649 else { 5650 ioc->tm_custom_handling = 0; 5651 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) 5652 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; 5653 else if (ioc->manu_pg11.NVMeAbortTO > 5654 NVME_TASK_ABORT_MAX_TIMEOUT) 5655 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; 5656 else 5657 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; 5658 } 5659 ioc->time_sync_interval = 5660 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; 5661 if (ioc->time_sync_interval) { 5662 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) 5663 ioc->time_sync_interval = 5664 ioc->time_sync_interval * SECONDS_PER_HOUR; 5665 else 5666 ioc->time_sync_interval = 5667 ioc->time_sync_interval * SECONDS_PER_MIN; 5668 dinitprintk(ioc, ioc_info(ioc, 5669 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n", 5670 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval & 5671 MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute")); 5672 } else { 5673 if (ioc->is_gen35_ioc) 5674 ioc_warn(ioc, 5675 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); 5676 } 5677 rc = _base_assign_fw_reported_qd(ioc); 5678 if (rc) 5679 return rc; 5680 5681 /* 5682 * ATTO doesn't use bios page 2 and 3 for bios settings. 5683 */ 5684 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) 5685 ioc->bios_pg3.BiosVersion = 0; 5686 else { 5687 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 5688 if (rc) 5689 return rc; 5690 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 5691 if (rc) 5692 return rc; 5693 } 5694 5695 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 5696 if (rc) 5697 return rc; 5698 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 5699 if (rc) 5700 return rc; 5701 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 5702 if (rc) 5703 return rc; 5704 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &iounit_pg8); 5705 if (rc) 5706 return rc; 5707 _base_display_ioc_capabilities(ioc); 5708 5709 /* 5710 * Enable task_set_full handling in iounit_pg1 when the 5711 * facts capabilities indicate that its supported. 5712 */ 5713 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 5714 if ((ioc->facts.IOCCapabilities & 5715 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 5716 iounit_pg1_flags &= 5717 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 5718 else 5719 iounit_pg1_flags |= 5720 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 5721 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 5722 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 5723 if (rc) 5724 return rc; 5725 5726 if (iounit_pg8.NumSensors) 5727 ioc->temp_sensors_count = iounit_pg8.NumSensors; 5728 if (ioc->is_aero_ioc) { 5729 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc); 5730 if (rc) 5731 return rc; 5732 } 5733 if (ioc->is_gen35_ioc) { 5734 if (ioc->is_driver_loading) { 5735 rc = _base_get_diag_triggers(ioc); 5736 if (rc) 5737 return rc; 5738 } else { 5739 /* 5740 * In case of online HBA FW update operation, 5741 * check whether updated FW supports the driver trigger 5742 * pages or not. 5743 * - If previous FW has not supported driver trigger 5744 * pages and newer FW supports them then update these 5745 * pages with current diag trigger values. 5746 * - If previous FW has supported driver trigger pages 5747 * and new FW doesn't support them then disable 5748 * support_trigger_pages flag. 5749 */ 5750 _base_check_for_trigger_pages_support(ioc, &tg_flags); 5751 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) 5752 _base_update_diag_trigger_pages(ioc); 5753 else if (ioc->supports_trigger_pages && 5754 tg_flags == -EFAULT) 5755 ioc->supports_trigger_pages = 0; 5756 } 5757 } 5758 return 0; 5759 } 5760 5761 /** 5762 * mpt3sas_free_enclosure_list - release memory 5763 * @ioc: per adapter object 5764 * 5765 * Free memory allocated during enclosure add. 5766 */ 5767 void 5768 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) 5769 { 5770 struct _enclosure_node *enclosure_dev, *enclosure_dev_next; 5771 5772 /* Free enclosure list */ 5773 list_for_each_entry_safe(enclosure_dev, 5774 enclosure_dev_next, &ioc->enclosure_list, list) { 5775 list_del(&enclosure_dev->list); 5776 kfree(enclosure_dev); 5777 } 5778 } 5779 5780 /** 5781 * _base_release_memory_pools - release memory 5782 * @ioc: per adapter object 5783 * 5784 * Free memory allocated from _base_allocate_memory_pools. 5785 */ 5786 static void 5787 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 5788 { 5789 int i = 0; 5790 int j = 0; 5791 int dma_alloc_count = 0; 5792 struct chain_tracker *ct; 5793 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; 5794 5795 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 5796 5797 if (ioc->request) { 5798 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, 5799 ioc->request, ioc->request_dma); 5800 dexitprintk(ioc, 5801 ioc_info(ioc, "request_pool(0x%p): free\n", 5802 ioc->request)); 5803 ioc->request = NULL; 5804 } 5805 5806 if (ioc->sense) { 5807 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 5808 dma_pool_destroy(ioc->sense_dma_pool); 5809 dexitprintk(ioc, 5810 ioc_info(ioc, "sense_pool(0x%p): free\n", 5811 ioc->sense)); 5812 ioc->sense = NULL; 5813 } 5814 5815 if (ioc->reply) { 5816 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 5817 dma_pool_destroy(ioc->reply_dma_pool); 5818 dexitprintk(ioc, 5819 ioc_info(ioc, "reply_pool(0x%p): free\n", 5820 ioc->reply)); 5821 ioc->reply = NULL; 5822 } 5823 5824 if (ioc->reply_free) { 5825 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 5826 ioc->reply_free_dma); 5827 dma_pool_destroy(ioc->reply_free_dma_pool); 5828 dexitprintk(ioc, 5829 ioc_info(ioc, "reply_free_pool(0x%p): free\n", 5830 ioc->reply_free)); 5831 ioc->reply_free = NULL; 5832 } 5833 5834 if (ioc->reply_post) { 5835 dma_alloc_count = DIV_ROUND_UP(count, 5836 RDPQ_MAX_INDEX_IN_ONE_CHUNK); 5837 for (i = 0; i < count; i++) { 5838 if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 5839 && dma_alloc_count) { 5840 if (ioc->reply_post[i].reply_post_free) { 5841 dma_pool_free( 5842 ioc->reply_post_free_dma_pool, 5843 ioc->reply_post[i].reply_post_free, 5844 ioc->reply_post[i].reply_post_free_dma); 5845 dexitprintk(ioc, ioc_info(ioc, 5846 "reply_post_free_pool(0x%p): free\n", 5847 ioc->reply_post[i].reply_post_free)); 5848 ioc->reply_post[i].reply_post_free = 5849 NULL; 5850 } 5851 --dma_alloc_count; 5852 } 5853 } 5854 dma_pool_destroy(ioc->reply_post_free_dma_pool); 5855 if (ioc->reply_post_free_array && 5856 ioc->rdpq_array_enable) { 5857 dma_pool_free(ioc->reply_post_free_array_dma_pool, 5858 ioc->reply_post_free_array, 5859 ioc->reply_post_free_array_dma); 5860 ioc->reply_post_free_array = NULL; 5861 } 5862 dma_pool_destroy(ioc->reply_post_free_array_dma_pool); 5863 kfree(ioc->reply_post); 5864 } 5865 5866 if (ioc->pcie_sgl_dma_pool) { 5867 for (i = 0; i < ioc->scsiio_depth; i++) { 5868 dma_pool_free(ioc->pcie_sgl_dma_pool, 5869 ioc->pcie_sg_lookup[i].pcie_sgl, 5870 ioc->pcie_sg_lookup[i].pcie_sgl_dma); 5871 ioc->pcie_sg_lookup[i].pcie_sgl = NULL; 5872 } 5873 dma_pool_destroy(ioc->pcie_sgl_dma_pool); 5874 } 5875 kfree(ioc->pcie_sg_lookup); 5876 ioc->pcie_sg_lookup = NULL; 5877 5878 if (ioc->config_page) { 5879 dexitprintk(ioc, 5880 ioc_info(ioc, "config_page(0x%p): free\n", 5881 ioc->config_page)); 5882 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, 5883 ioc->config_page, ioc->config_page_dma); 5884 } 5885 5886 kfree(ioc->hpr_lookup); 5887 ioc->hpr_lookup = NULL; 5888 kfree(ioc->internal_lookup); 5889 ioc->internal_lookup = NULL; 5890 if (ioc->chain_lookup) { 5891 for (i = 0; i < ioc->scsiio_depth; i++) { 5892 for (j = ioc->chains_per_prp_buffer; 5893 j < ioc->chains_needed_per_io; j++) { 5894 ct = &ioc->chain_lookup[i].chains_per_smid[j]; 5895 if (ct && ct->chain_buffer) 5896 dma_pool_free(ioc->chain_dma_pool, 5897 ct->chain_buffer, 5898 ct->chain_buffer_dma); 5899 } 5900 kfree(ioc->chain_lookup[i].chains_per_smid); 5901 } 5902 dma_pool_destroy(ioc->chain_dma_pool); 5903 kfree(ioc->chain_lookup); 5904 ioc->chain_lookup = NULL; 5905 } 5906 5907 kfree(ioc->io_queue_num); 5908 ioc->io_queue_num = NULL; 5909 } 5910 5911 /** 5912 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are 5913 * having same upper 32bits in their base memory address. 5914 * @start_address: Base address of a reply queue set 5915 * @pool_sz: Size of single Reply Descriptor Post Queues pool size 5916 * 5917 * Return: 1 if reply queues in a set have a same upper 32bits in their base 5918 * memory address, else 0. 5919 */ 5920 static int 5921 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) 5922 { 5923 dma_addr_t end_address; 5924 5925 end_address = start_address + pool_sz - 1; 5926 5927 if (upper_32_bits(start_address) == upper_32_bits(end_address)) 5928 return 1; 5929 else 5930 return 0; 5931 } 5932 5933 /** 5934 * _base_reduce_hba_queue_depth- Retry with reduced queue depth 5935 * @ioc: Adapter object 5936 * 5937 * Return: 0 for success, non-zero for failure. 5938 **/ 5939 static inline int 5940 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) 5941 { 5942 int reduce_sz = 64; 5943 5944 if ((ioc->hba_queue_depth - reduce_sz) > 5945 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { 5946 ioc->hba_queue_depth -= reduce_sz; 5947 return 0; 5948 } else 5949 return -ENOMEM; 5950 } 5951 5952 /** 5953 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory 5954 * for pcie sgl pools. 5955 * @ioc: Adapter object 5956 * @sz: DMA Pool size 5957 * 5958 * Return: 0 for success, non-zero for failure. 5959 */ 5960 5961 static int 5962 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) 5963 { 5964 int i = 0, j = 0; 5965 struct chain_tracker *ct; 5966 5967 ioc->pcie_sgl_dma_pool = 5968 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 5969 ioc->page_size, 0); 5970 if (!ioc->pcie_sgl_dma_pool) { 5971 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); 5972 return -ENOMEM; 5973 } 5974 5975 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; 5976 ioc->chains_per_prp_buffer = 5977 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); 5978 for (i = 0; i < ioc->scsiio_depth; i++) { 5979 ioc->pcie_sg_lookup[i].pcie_sgl = 5980 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, 5981 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); 5982 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { 5983 ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); 5984 return -EAGAIN; 5985 } 5986 5987 if (!mpt3sas_check_same_4gb_region( 5988 ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) { 5989 ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", 5990 ioc->pcie_sg_lookup[i].pcie_sgl, 5991 (unsigned long long) 5992 ioc->pcie_sg_lookup[i].pcie_sgl_dma); 5993 ioc->use_32bit_dma = true; 5994 return -EAGAIN; 5995 } 5996 5997 for (j = 0; j < ioc->chains_per_prp_buffer; j++) { 5998 ct = &ioc->chain_lookup[i].chains_per_smid[j]; 5999 ct->chain_buffer = 6000 ioc->pcie_sg_lookup[i].pcie_sgl + 6001 (j * ioc->chain_segment_sz); 6002 ct->chain_buffer_dma = 6003 ioc->pcie_sg_lookup[i].pcie_sgl_dma + 6004 (j * ioc->chain_segment_sz); 6005 } 6006 } 6007 dinitprintk(ioc, ioc_info(ioc, 6008 "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", 6009 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 6010 dinitprintk(ioc, ioc_info(ioc, 6011 "Number of chains can fit in a PRP page(%d)\n", 6012 ioc->chains_per_prp_buffer)); 6013 return 0; 6014 } 6015 6016 /** 6017 * _base_allocate_chain_dma_pool - Allocating DMA'able memory 6018 * for chain dma pool. 6019 * @ioc: Adapter object 6020 * @sz: DMA Pool size 6021 * 6022 * Return: 0 for success, non-zero for failure. 6023 */ 6024 static int 6025 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) 6026 { 6027 int i = 0, j = 0; 6028 struct chain_tracker *ctr; 6029 6030 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 6031 ioc->chain_segment_sz, 16, 0); 6032 if (!ioc->chain_dma_pool) 6033 return -ENOMEM; 6034 6035 for (i = 0; i < ioc->scsiio_depth; i++) { 6036 for (j = ioc->chains_per_prp_buffer; 6037 j < ioc->chains_needed_per_io; j++) { 6038 ctr = &ioc->chain_lookup[i].chains_per_smid[j]; 6039 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, 6040 GFP_KERNEL, &ctr->chain_buffer_dma); 6041 if (!ctr->chain_buffer) 6042 return -EAGAIN; 6043 if (!mpt3sas_check_same_4gb_region( 6044 ctr->chain_buffer_dma, ioc->chain_segment_sz)) { 6045 ioc_err(ioc, 6046 "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", 6047 ctr->chain_buffer, 6048 (unsigned long long)ctr->chain_buffer_dma); 6049 ioc->use_32bit_dma = true; 6050 return -EAGAIN; 6051 } 6052 } 6053 } 6054 dinitprintk(ioc, ioc_info(ioc, 6055 "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", 6056 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * 6057 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * 6058 ioc->chain_segment_sz))/1024)); 6059 return 0; 6060 } 6061 6062 /** 6063 * _base_allocate_sense_dma_pool - Allocating DMA'able memory 6064 * for sense dma pool. 6065 * @ioc: Adapter object 6066 * @sz: DMA Pool size 6067 * Return: 0 for success, non-zero for failure. 6068 */ 6069 static int 6070 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) 6071 { 6072 ioc->sense_dma_pool = 6073 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); 6074 if (!ioc->sense_dma_pool) 6075 return -ENOMEM; 6076 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, 6077 GFP_KERNEL, &ioc->sense_dma); 6078 if (!ioc->sense) 6079 return -EAGAIN; 6080 if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) { 6081 dinitprintk(ioc, pr_err( 6082 "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", 6083 ioc->sense, (unsigned long long) ioc->sense_dma)); 6084 ioc->use_32bit_dma = true; 6085 return -EAGAIN; 6086 } 6087 ioc_info(ioc, 6088 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", 6089 ioc->sense, (unsigned long long)ioc->sense_dma, 6090 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); 6091 return 0; 6092 } 6093 6094 /** 6095 * _base_allocate_reply_pool - Allocating DMA'able memory 6096 * for reply pool. 6097 * @ioc: Adapter object 6098 * @sz: DMA Pool size 6099 * Return: 0 for success, non-zero for failure. 6100 */ 6101 static int 6102 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) 6103 { 6104 /* reply pool, 4 byte align */ 6105 ioc->reply_dma_pool = dma_pool_create("reply pool", 6106 &ioc->pdev->dev, sz, 4, 0); 6107 if (!ioc->reply_dma_pool) 6108 return -ENOMEM; 6109 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, 6110 &ioc->reply_dma); 6111 if (!ioc->reply) 6112 return -EAGAIN; 6113 if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) { 6114 dinitprintk(ioc, pr_err( 6115 "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", 6116 ioc->reply, (unsigned long long) ioc->reply_dma)); 6117 ioc->use_32bit_dma = true; 6118 return -EAGAIN; 6119 } 6120 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 6121 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 6122 ioc_info(ioc, 6123 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", 6124 ioc->reply, (unsigned long long)ioc->reply_dma, 6125 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); 6126 return 0; 6127 } 6128 6129 /** 6130 * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory 6131 * for reply free dma pool. 6132 * @ioc: Adapter object 6133 * @sz: DMA Pool size 6134 * Return: 0 for success, non-zero for failure. 6135 */ 6136 static int 6137 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) 6138 { 6139 /* reply free queue, 16 byte align */ 6140 ioc->reply_free_dma_pool = dma_pool_create( 6141 "reply_free pool", &ioc->pdev->dev, sz, 16, 0); 6142 if (!ioc->reply_free_dma_pool) 6143 return -ENOMEM; 6144 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, 6145 GFP_KERNEL, &ioc->reply_free_dma); 6146 if (!ioc->reply_free) 6147 return -EAGAIN; 6148 if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) { 6149 dinitprintk(ioc, 6150 pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", 6151 ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); 6152 ioc->use_32bit_dma = true; 6153 return -EAGAIN; 6154 } 6155 memset(ioc->reply_free, 0, sz); 6156 dinitprintk(ioc, ioc_info(ioc, 6157 "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", 6158 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 6159 dinitprintk(ioc, ioc_info(ioc, 6160 "reply_free_dma (0x%llx)\n", 6161 (unsigned long long)ioc->reply_free_dma)); 6162 return 0; 6163 } 6164 6165 /** 6166 * _base_allocate_reply_post_free_array - Allocating DMA'able memory 6167 * for reply post free array. 6168 * @ioc: Adapter object 6169 * @reply_post_free_array_sz: DMA Pool size 6170 * Return: 0 for success, non-zero for failure. 6171 */ 6172 6173 static int 6174 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, 6175 u32 reply_post_free_array_sz) 6176 { 6177 ioc->reply_post_free_array_dma_pool = 6178 dma_pool_create("reply_post_free_array pool", 6179 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); 6180 if (!ioc->reply_post_free_array_dma_pool) 6181 return -ENOMEM; 6182 ioc->reply_post_free_array = 6183 dma_pool_alloc(ioc->reply_post_free_array_dma_pool, 6184 GFP_KERNEL, &ioc->reply_post_free_array_dma); 6185 if (!ioc->reply_post_free_array) 6186 return -EAGAIN; 6187 if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma, 6188 reply_post_free_array_sz)) { 6189 dinitprintk(ioc, pr_err( 6190 "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", 6191 ioc->reply_free, 6192 (unsigned long long) ioc->reply_free_dma)); 6193 ioc->use_32bit_dma = true; 6194 return -EAGAIN; 6195 } 6196 return 0; 6197 } 6198 /** 6199 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory 6200 * for reply queues. 6201 * @ioc: per adapter object 6202 * @sz: DMA Pool size 6203 * Return: 0 for success, non-zero for failure. 6204 */ 6205 static int 6206 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) 6207 { 6208 int i = 0; 6209 u32 dma_alloc_count = 0; 6210 int reply_post_free_sz = ioc->reply_post_queue_depth * 6211 sizeof(Mpi2DefaultReplyDescriptor_t); 6212 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; 6213 6214 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct), 6215 GFP_KERNEL); 6216 if (!ioc->reply_post) 6217 return -ENOMEM; 6218 /* 6219 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and 6220 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should 6221 * be within 4GB boundary i.e reply queues in a set must have same 6222 * upper 32-bits in their memory address. so here driver is allocating 6223 * the DMA'able memory for reply queues according. 6224 * Driver uses limitation of 6225 * VENTURA_SERIES to manage INVADER_SERIES as well. 6226 */ 6227 dma_alloc_count = DIV_ROUND_UP(count, 6228 RDPQ_MAX_INDEX_IN_ONE_CHUNK); 6229 ioc->reply_post_free_dma_pool = 6230 dma_pool_create("reply_post_free pool", 6231 &ioc->pdev->dev, sz, 16, 0); 6232 if (!ioc->reply_post_free_dma_pool) 6233 return -ENOMEM; 6234 for (i = 0; i < count; i++) { 6235 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { 6236 ioc->reply_post[i].reply_post_free = 6237 dma_pool_zalloc(ioc->reply_post_free_dma_pool, 6238 GFP_KERNEL, 6239 &ioc->reply_post[i].reply_post_free_dma); 6240 if (!ioc->reply_post[i].reply_post_free) 6241 return -ENOMEM; 6242 /* 6243 * Each set of RDPQ pool must satisfy 4gb boundary 6244 * restriction. 6245 * 1) Check if allocated resources for RDPQ pool are in 6246 * the same 4GB range. 6247 * 2) If #1 is true, continue with 64 bit DMA. 6248 * 3) If #1 is false, return 1. which means free all the 6249 * resources and set DMA mask to 32 and allocate. 6250 */ 6251 if (!mpt3sas_check_same_4gb_region( 6252 ioc->reply_post[i].reply_post_free_dma, sz)) { 6253 dinitprintk(ioc, 6254 ioc_err(ioc, "bad Replypost free pool(0x%p)" 6255 "reply_post_free_dma = (0x%llx)\n", 6256 ioc->reply_post[i].reply_post_free, 6257 (unsigned long long) 6258 ioc->reply_post[i].reply_post_free_dma)); 6259 return -EAGAIN; 6260 } 6261 dma_alloc_count--; 6262 6263 } else { 6264 ioc->reply_post[i].reply_post_free = 6265 (Mpi2ReplyDescriptorsUnion_t *) 6266 ((long)ioc->reply_post[i-1].reply_post_free 6267 + reply_post_free_sz); 6268 ioc->reply_post[i].reply_post_free_dma = 6269 (dma_addr_t) 6270 (ioc->reply_post[i-1].reply_post_free_dma + 6271 reply_post_free_sz); 6272 } 6273 } 6274 return 0; 6275 } 6276 6277 /** 6278 * _base_allocate_memory_pools - allocate start of day memory pools 6279 * @ioc: per adapter object 6280 * 6281 * Return: 0 success, anything else error. 6282 */ 6283 static int 6284 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) 6285 { 6286 struct mpt3sas_facts *facts; 6287 u16 max_sge_elements; 6288 u16 chains_needed_per_io; 6289 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; 6290 u32 retry_sz; 6291 u32 rdpq_sz = 0, sense_sz = 0; 6292 u16 max_request_credit, nvme_blocks_needed; 6293 unsigned short sg_tablesize; 6294 u16 sge_size; 6295 int i; 6296 int ret = 0, rc = 0; 6297 6298 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 6299 6300 6301 retry_sz = 0; 6302 facts = &ioc->facts; 6303 6304 /* command line tunables for max sgl entries */ 6305 if (max_sgl_entries != -1) 6306 sg_tablesize = max_sgl_entries; 6307 else { 6308 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 6309 sg_tablesize = MPT2SAS_SG_DEPTH; 6310 else 6311 sg_tablesize = MPT3SAS_SG_DEPTH; 6312 } 6313 6314 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ 6315 if (reset_devices) 6316 sg_tablesize = min_t(unsigned short, sg_tablesize, 6317 MPT_KDUMP_MIN_PHYS_SEGMENTS); 6318 6319 if (ioc->is_mcpu_endpoint) 6320 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 6321 else { 6322 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) 6323 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 6324 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 6325 sg_tablesize = min_t(unsigned short, sg_tablesize, 6326 SG_MAX_SEGMENTS); 6327 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n", 6328 sg_tablesize, MPT_MAX_PHYS_SEGMENTS); 6329 } 6330 ioc->shost->sg_tablesize = sg_tablesize; 6331 } 6332 6333 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), 6334 (facts->RequestCredit / 4)); 6335 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { 6336 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + 6337 INTERNAL_SCSIIO_CMDS_COUNT)) { 6338 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n", 6339 facts->RequestCredit); 6340 return -ENOMEM; 6341 } 6342 ioc->internal_depth = 10; 6343 } 6344 6345 ioc->hi_priority_depth = ioc->internal_depth - (5); 6346 /* command line tunables for max controller queue depth */ 6347 if (max_queue_depth != -1 && max_queue_depth != 0) { 6348 max_request_credit = min_t(u16, max_queue_depth + 6349 ioc->internal_depth, facts->RequestCredit); 6350 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 6351 max_request_credit = MAX_HBA_QUEUE_DEPTH; 6352 } else if (reset_devices) 6353 max_request_credit = min_t(u16, facts->RequestCredit, 6354 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); 6355 else 6356 max_request_credit = min_t(u16, facts->RequestCredit, 6357 MAX_HBA_QUEUE_DEPTH); 6358 6359 /* Firmware maintains additional facts->HighPriorityCredit number of 6360 * credits for HiPriprity Request messages, so hba queue depth will be 6361 * sum of max_request_credit and high priority queue depth. 6362 */ 6363 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; 6364 6365 /* request frame size */ 6366 ioc->request_sz = facts->IOCRequestFrameSize * 4; 6367 6368 /* reply frame size */ 6369 ioc->reply_sz = facts->ReplyFrameSize * 4; 6370 6371 /* chain segment size */ 6372 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 6373 if (facts->IOCMaxChainSegmentSize) 6374 ioc->chain_segment_sz = 6375 facts->IOCMaxChainSegmentSize * 6376 MAX_CHAIN_ELEMT_SZ; 6377 else 6378 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ 6379 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * 6380 MAX_CHAIN_ELEMT_SZ; 6381 } else 6382 ioc->chain_segment_sz = ioc->request_sz; 6383 6384 /* calculate the max scatter element size */ 6385 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); 6386 6387 retry_allocation: 6388 total_sz = 0; 6389 /* calculate number of sg elements left over in the 1st frame */ 6390 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 6391 sizeof(Mpi2SGEIOUnion_t)) + sge_size); 6392 ioc->max_sges_in_main_message = max_sge_elements/sge_size; 6393 6394 /* now do the same for a chain buffer */ 6395 max_sge_elements = ioc->chain_segment_sz - sge_size; 6396 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; 6397 6398 /* 6399 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 6400 */ 6401 chains_needed_per_io = ((ioc->shost->sg_tablesize - 6402 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 6403 + 1; 6404 if (chains_needed_per_io > facts->MaxChainDepth) { 6405 chains_needed_per_io = facts->MaxChainDepth; 6406 ioc->shost->sg_tablesize = min_t(u16, 6407 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 6408 * chains_needed_per_io), ioc->shost->sg_tablesize); 6409 } 6410 ioc->chains_needed_per_io = chains_needed_per_io; 6411 6412 /* reply free queue sizing - taking into account for 64 FW events */ 6413 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 6414 6415 /* mCPU manage single counters for simplicity */ 6416 if (ioc->is_mcpu_endpoint) 6417 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; 6418 else { 6419 /* calculate reply descriptor post queue depth */ 6420 ioc->reply_post_queue_depth = ioc->hba_queue_depth + 6421 ioc->reply_free_queue_depth + 1; 6422 /* align the reply post queue on the next 16 count boundary */ 6423 if (ioc->reply_post_queue_depth % 16) 6424 ioc->reply_post_queue_depth += 16 - 6425 (ioc->reply_post_queue_depth % 16); 6426 } 6427 6428 if (ioc->reply_post_queue_depth > 6429 facts->MaxReplyDescriptorPostQueueDepth) { 6430 ioc->reply_post_queue_depth = 6431 facts->MaxReplyDescriptorPostQueueDepth - 6432 (facts->MaxReplyDescriptorPostQueueDepth % 16); 6433 ioc->hba_queue_depth = 6434 ((ioc->reply_post_queue_depth - 64) / 2) - 1; 6435 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 6436 } 6437 6438 ioc_info(ioc, 6439 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " 6440 "sge_per_io(%d), chains_per_io(%d)\n", 6441 ioc->max_sges_in_main_message, 6442 ioc->max_sges_in_chain_message, 6443 ioc->shost->sg_tablesize, 6444 ioc->chains_needed_per_io); 6445 6446 /* reply post queue, 16 byte align */ 6447 reply_post_free_sz = ioc->reply_post_queue_depth * 6448 sizeof(Mpi2DefaultReplyDescriptor_t); 6449 rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; 6450 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) 6451 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK)) 6452 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; 6453 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); 6454 if (ret == -EAGAIN) { 6455 /* 6456 * Free allocated bad RDPQ memory pools. 6457 * Change dma coherent mask to 32 bit and reallocate RDPQ 6458 */ 6459 _base_release_memory_pools(ioc); 6460 ioc->use_32bit_dma = true; 6461 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { 6462 ioc_err(ioc, 6463 "32 DMA mask failed %s\n", pci_name(ioc->pdev)); 6464 return -ENODEV; 6465 } 6466 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz)) 6467 return -ENOMEM; 6468 } else if (ret == -ENOMEM) 6469 return -ENOMEM; 6470 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 : 6471 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK)); 6472 ioc->scsiio_depth = ioc->hba_queue_depth - 6473 ioc->hi_priority_depth - ioc->internal_depth; 6474 6475 /* set the scsi host can_queue depth 6476 * with some internal commands that could be outstanding 6477 */ 6478 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; 6479 dinitprintk(ioc, 6480 ioc_info(ioc, "scsi host: can_queue depth (%d)\n", 6481 ioc->shost->can_queue)); 6482 6483 /* contiguous pool for request and chains, 16 byte align, one extra " 6484 * "frame for smid=0 6485 */ 6486 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 6487 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 6488 6489 /* hi-priority queue */ 6490 sz += (ioc->hi_priority_depth * ioc->request_sz); 6491 6492 /* internal queue */ 6493 sz += (ioc->internal_depth * ioc->request_sz); 6494 6495 ioc->request_dma_sz = sz; 6496 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, 6497 &ioc->request_dma, GFP_KERNEL); 6498 if (!ioc->request) { 6499 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n", 6500 ioc->hba_queue_depth, ioc->chains_needed_per_io, 6501 ioc->request_sz, sz / 1024); 6502 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 6503 goto out; 6504 retry_sz = 64; 6505 ioc->hba_queue_depth -= retry_sz; 6506 _base_release_memory_pools(ioc); 6507 goto retry_allocation; 6508 } 6509 6510 if (retry_sz) 6511 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", 6512 ioc->hba_queue_depth, ioc->chains_needed_per_io, 6513 ioc->request_sz, sz / 1024); 6514 6515 /* hi-priority queue */ 6516 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 6517 ioc->request_sz); 6518 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 6519 ioc->request_sz); 6520 6521 /* internal queue */ 6522 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 6523 ioc->request_sz); 6524 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 6525 ioc->request_sz); 6526 6527 ioc_info(ioc, 6528 "request pool(0x%p) - dma(0x%llx): " 6529 "depth(%d), frame_size(%d), pool_size(%d kB)\n", 6530 ioc->request, (unsigned long long) ioc->request_dma, 6531 ioc->hba_queue_depth, ioc->request_sz, 6532 (ioc->hba_queue_depth * ioc->request_sz) / 1024); 6533 6534 total_sz += sz; 6535 6536 dinitprintk(ioc, 6537 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n", 6538 ioc->request, ioc->scsiio_depth)); 6539 6540 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 6541 sz = ioc->scsiio_depth * sizeof(struct chain_lookup); 6542 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); 6543 if (!ioc->chain_lookup) { 6544 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n"); 6545 goto out; 6546 } 6547 6548 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); 6549 for (i = 0; i < ioc->scsiio_depth; i++) { 6550 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); 6551 if (!ioc->chain_lookup[i].chains_per_smid) { 6552 ioc_err(ioc, "chain_lookup: kzalloc failed\n"); 6553 goto out; 6554 } 6555 } 6556 6557 /* initialize hi-priority queue smid's */ 6558 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 6559 sizeof(struct request_tracker), GFP_KERNEL); 6560 if (!ioc->hpr_lookup) { 6561 ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); 6562 goto out; 6563 } 6564 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 6565 dinitprintk(ioc, 6566 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n", 6567 ioc->hi_priority, 6568 ioc->hi_priority_depth, ioc->hi_priority_smid)); 6569 6570 /* initialize internal queue smid's */ 6571 ioc->internal_lookup = kcalloc(ioc->internal_depth, 6572 sizeof(struct request_tracker), GFP_KERNEL); 6573 if (!ioc->internal_lookup) { 6574 ioc_err(ioc, "internal_lookup: kcalloc failed\n"); 6575 goto out; 6576 } 6577 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 6578 dinitprintk(ioc, 6579 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n", 6580 ioc->internal, 6581 ioc->internal_depth, ioc->internal_smid)); 6582 6583 ioc->io_queue_num = kcalloc(ioc->scsiio_depth, 6584 sizeof(u16), GFP_KERNEL); 6585 if (!ioc->io_queue_num) 6586 goto out; 6587 /* 6588 * The number of NVMe page sized blocks needed is: 6589 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 6590 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry 6591 * that is placed in the main message frame. 8 is the size of each PRP 6592 * entry or PRP list pointer entry. 8 is subtracted from page_size 6593 * because of the PRP list pointer entry at the end of a page, so this 6594 * is not counted as a PRP entry. The 1 added page is a round up. 6595 * 6596 * To avoid allocation failures due to the amount of memory that could 6597 * be required for NVMe PRP's, only each set of NVMe blocks will be 6598 * contiguous, so a new set is allocated for each possible I/O. 6599 */ 6600 6601 ioc->chains_per_prp_buffer = 0; 6602 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 6603 nvme_blocks_needed = 6604 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; 6605 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); 6606 nvme_blocks_needed++; 6607 6608 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; 6609 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); 6610 if (!ioc->pcie_sg_lookup) { 6611 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n"); 6612 goto out; 6613 } 6614 sz = nvme_blocks_needed * ioc->page_size; 6615 rc = _base_allocate_pcie_sgl_pool(ioc, sz); 6616 if (rc == -ENOMEM) 6617 return -ENOMEM; 6618 else if (rc == -EAGAIN) 6619 goto try_32bit_dma; 6620 total_sz += sz * ioc->scsiio_depth; 6621 } 6622 6623 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); 6624 if (rc == -ENOMEM) 6625 return -ENOMEM; 6626 else if (rc == -EAGAIN) 6627 goto try_32bit_dma; 6628 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - 6629 ioc->chains_per_prp_buffer) * ioc->scsiio_depth); 6630 dinitprintk(ioc, 6631 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 6632 ioc->chain_depth, ioc->chain_segment_sz, 6633 (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); 6634 /* sense buffers, 4 byte align */ 6635 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 6636 rc = _base_allocate_sense_dma_pool(ioc, sense_sz); 6637 if (rc == -ENOMEM) 6638 return -ENOMEM; 6639 else if (rc == -EAGAIN) 6640 goto try_32bit_dma; 6641 total_sz += sense_sz; 6642 /* reply pool, 4 byte align */ 6643 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 6644 rc = _base_allocate_reply_pool(ioc, sz); 6645 if (rc == -ENOMEM) 6646 return -ENOMEM; 6647 else if (rc == -EAGAIN) 6648 goto try_32bit_dma; 6649 total_sz += sz; 6650 6651 /* reply free queue, 16 byte align */ 6652 sz = ioc->reply_free_queue_depth * 4; 6653 rc = _base_allocate_reply_free_dma_pool(ioc, sz); 6654 if (rc == -ENOMEM) 6655 return -ENOMEM; 6656 else if (rc == -EAGAIN) 6657 goto try_32bit_dma; 6658 dinitprintk(ioc, 6659 ioc_info(ioc, "reply_free_dma (0x%llx)\n", 6660 (unsigned long long)ioc->reply_free_dma)); 6661 total_sz += sz; 6662 if (ioc->rdpq_array_enable) { 6663 reply_post_free_array_sz = ioc->reply_queue_count * 6664 sizeof(Mpi2IOCInitRDPQArrayEntry); 6665 rc = _base_allocate_reply_post_free_array(ioc, 6666 reply_post_free_array_sz); 6667 if (rc == -ENOMEM) 6668 return -ENOMEM; 6669 else if (rc == -EAGAIN) 6670 goto try_32bit_dma; 6671 } 6672 ioc->config_page_sz = 512; 6673 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, 6674 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); 6675 if (!ioc->config_page) { 6676 ioc_err(ioc, "config page: dma_pool_alloc failed\n"); 6677 goto out; 6678 } 6679 6680 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", 6681 ioc->config_page, (unsigned long long)ioc->config_page_dma, 6682 ioc->config_page_sz); 6683 total_sz += ioc->config_page_sz; 6684 6685 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", 6686 total_sz / 1024); 6687 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 6688 ioc->shost->can_queue, facts->RequestCredit); 6689 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n", 6690 ioc->shost->sg_tablesize); 6691 return 0; 6692 6693 try_32bit_dma: 6694 _base_release_memory_pools(ioc); 6695 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { 6696 /* Change dma coherent mask to 32 bit and reallocate */ 6697 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { 6698 pr_err("Setting 32 bit coherent DMA mask Failed %s\n", 6699 pci_name(ioc->pdev)); 6700 return -ENODEV; 6701 } 6702 } else if (_base_reduce_hba_queue_depth(ioc) != 0) 6703 return -ENOMEM; 6704 goto retry_allocation; 6705 6706 out: 6707 return -ENOMEM; 6708 } 6709 6710 /** 6711 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. 6712 * @ioc: Pointer to MPT_ADAPTER structure 6713 * @cooked: Request raw or cooked IOC state 6714 * 6715 * Return: all IOC Doorbell register bits if cooked==0, else just the 6716 * Doorbell bits in MPI_IOC_STATE_MASK. 6717 */ 6718 u32 6719 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) 6720 { 6721 u32 s, sc; 6722 6723 s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); 6724 sc = s & MPI2_IOC_STATE_MASK; 6725 return cooked ? sc : s; 6726 } 6727 6728 /** 6729 * _base_wait_on_iocstate - waiting on a particular ioc state 6730 * @ioc: ? 6731 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 6732 * @timeout: timeout in second 6733 * 6734 * Return: 0 for success, non-zero for failure. 6735 */ 6736 static int 6737 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) 6738 { 6739 u32 count, cntdn; 6740 u32 current_state; 6741 6742 count = 0; 6743 cntdn = 1000 * timeout; 6744 do { 6745 current_state = mpt3sas_base_get_iocstate(ioc, 1); 6746 if (current_state == ioc_state) 6747 return 0; 6748 if (count && current_state == MPI2_IOC_STATE_FAULT) 6749 break; 6750 if (count && current_state == MPI2_IOC_STATE_COREDUMP) 6751 break; 6752 6753 usleep_range(1000, 1500); 6754 count++; 6755 } while (--cntdn); 6756 6757 return current_state; 6758 } 6759 6760 /** 6761 * _base_dump_reg_set - This function will print hexdump of register set. 6762 * @ioc: per adapter object 6763 * 6764 * Return: nothing. 6765 */ 6766 static inline void 6767 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc) 6768 { 6769 unsigned int i, sz = 256; 6770 u32 __iomem *reg = (u32 __iomem *)ioc->chip; 6771 6772 ioc_info(ioc, "System Register set:\n"); 6773 for (i = 0; i < (sz / sizeof(u32)); i++) 6774 pr_info("%08x: %08x\n", (i * 4), readl(®[i])); 6775 } 6776 6777 /** 6778 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 6779 * a write to the doorbell) 6780 * @ioc: per adapter object 6781 * @timeout: timeout in seconds 6782 * 6783 * Return: 0 for success, non-zero for failure. 6784 * 6785 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 6786 */ 6787 6788 static int 6789 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 6790 { 6791 u32 cntdn, count; 6792 u32 int_status; 6793 6794 count = 0; 6795 cntdn = 1000 * timeout; 6796 do { 6797 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); 6798 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 6799 dhsprintk(ioc, 6800 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 6801 __func__, count, timeout)); 6802 return 0; 6803 } 6804 6805 usleep_range(1000, 1500); 6806 count++; 6807 } while (--cntdn); 6808 6809 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", 6810 __func__, count, int_status); 6811 return -EFAULT; 6812 } 6813 6814 static int 6815 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 6816 { 6817 u32 cntdn, count; 6818 u32 int_status; 6819 6820 count = 0; 6821 cntdn = 2000 * timeout; 6822 do { 6823 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); 6824 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 6825 dhsprintk(ioc, 6826 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 6827 __func__, count, timeout)); 6828 return 0; 6829 } 6830 6831 udelay(500); 6832 count++; 6833 } while (--cntdn); 6834 6835 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", 6836 __func__, count, int_status); 6837 return -EFAULT; 6838 6839 } 6840 6841 /** 6842 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 6843 * @ioc: per adapter object 6844 * @timeout: timeout in second 6845 * 6846 * Return: 0 for success, non-zero for failure. 6847 * 6848 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 6849 * doorbell. 6850 */ 6851 static int 6852 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) 6853 { 6854 u32 cntdn, count; 6855 u32 int_status; 6856 u32 doorbell; 6857 6858 count = 0; 6859 cntdn = 1000 * timeout; 6860 do { 6861 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); 6862 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 6863 dhsprintk(ioc, 6864 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 6865 __func__, count, timeout)); 6866 return 0; 6867 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 6868 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); 6869 if ((doorbell & MPI2_IOC_STATE_MASK) == 6870 MPI2_IOC_STATE_FAULT) { 6871 mpt3sas_print_fault_code(ioc, doorbell); 6872 return -EFAULT; 6873 } 6874 if ((doorbell & MPI2_IOC_STATE_MASK) == 6875 MPI2_IOC_STATE_COREDUMP) { 6876 mpt3sas_print_coredump_info(ioc, doorbell); 6877 return -EFAULT; 6878 } 6879 } else if (int_status == 0xFFFFFFFF) 6880 goto out; 6881 6882 usleep_range(1000, 1500); 6883 count++; 6884 } while (--cntdn); 6885 6886 out: 6887 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", 6888 __func__, count, int_status); 6889 return -EFAULT; 6890 } 6891 6892 /** 6893 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 6894 * @ioc: per adapter object 6895 * @timeout: timeout in second 6896 * 6897 * Return: 0 for success, non-zero for failure. 6898 */ 6899 static int 6900 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) 6901 { 6902 u32 cntdn, count; 6903 u32 doorbell_reg; 6904 6905 count = 0; 6906 cntdn = 1000 * timeout; 6907 do { 6908 doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); 6909 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 6910 dhsprintk(ioc, 6911 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 6912 __func__, count, timeout)); 6913 return 0; 6914 } 6915 6916 usleep_range(1000, 1500); 6917 count++; 6918 } while (--cntdn); 6919 6920 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 6921 __func__, count, doorbell_reg); 6922 return -EFAULT; 6923 } 6924 6925 /** 6926 * _base_send_ioc_reset - send doorbell reset 6927 * @ioc: per adapter object 6928 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 6929 * @timeout: timeout in second 6930 * 6931 * Return: 0 for success, non-zero for failure. 6932 */ 6933 static int 6934 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) 6935 { 6936 u32 ioc_state; 6937 int r = 0; 6938 unsigned long flags; 6939 6940 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 6941 ioc_err(ioc, "%s: unknown reset_type\n", __func__); 6942 return -EFAULT; 6943 } 6944 6945 if (!(ioc->facts.IOCCapabilities & 6946 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 6947 return -EFAULT; 6948 6949 ioc_info(ioc, "sending message unit reset !!\n"); 6950 6951 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 6952 &ioc->chip->Doorbell); 6953 if ((_base_wait_for_doorbell_ack(ioc, 15))) { 6954 r = -EFAULT; 6955 goto out; 6956 } 6957 6958 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 6959 if (ioc_state) { 6960 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", 6961 __func__, ioc_state); 6962 r = -EFAULT; 6963 goto out; 6964 } 6965 out: 6966 if (r != 0) { 6967 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6968 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6969 /* 6970 * Wait for IOC state CoreDump to clear only during 6971 * HBA initialization & release time. 6972 */ 6973 if ((ioc_state & MPI2_IOC_STATE_MASK) == 6974 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || 6975 ioc->fault_reset_work_q == NULL)) { 6976 spin_unlock_irqrestore( 6977 &ioc->ioc_reset_in_progress_lock, flags); 6978 mpt3sas_print_coredump_info(ioc, ioc_state); 6979 mpt3sas_base_wait_for_coredump_completion(ioc, 6980 __func__); 6981 spin_lock_irqsave( 6982 &ioc->ioc_reset_in_progress_lock, flags); 6983 } 6984 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 6985 } 6986 ioc_info(ioc, "message unit reset: %s\n", 6987 r == 0 ? "SUCCESS" : "FAILED"); 6988 return r; 6989 } 6990 6991 /** 6992 * mpt3sas_wait_for_ioc - IOC's operational state is checked here. 6993 * @ioc: per adapter object 6994 * @timeout: timeout in seconds 6995 * 6996 * Return: Waits up to timeout seconds for the IOC to 6997 * become operational. Returns 0 if IOC is present 6998 * and operational; otherwise returns %-EFAULT. 6999 */ 7000 7001 int 7002 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout) 7003 { 7004 int wait_state_count = 0; 7005 u32 ioc_state; 7006 7007 do { 7008 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 7009 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL) 7010 break; 7011 7012 /* 7013 * Watchdog thread will be started after IOC Initialization, so 7014 * no need to wait here for IOC state to become operational 7015 * when IOC Initialization is on. Instead the driver will 7016 * return ETIME status, so that calling function can issue 7017 * diag reset operation and retry the command. 7018 */ 7019 if (ioc->is_driver_loading) 7020 return -ETIME; 7021 7022 ssleep(1); 7023 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n", 7024 __func__, ++wait_state_count); 7025 } while (--timeout); 7026 if (!timeout) { 7027 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__); 7028 return -EFAULT; 7029 } 7030 if (wait_state_count) 7031 ioc_info(ioc, "ioc is operational\n"); 7032 return 0; 7033 } 7034 7035 /** 7036 * _base_handshake_req_reply_wait - send request thru doorbell interface 7037 * @ioc: per adapter object 7038 * @request_bytes: request length 7039 * @request: pointer having request payload 7040 * @reply_bytes: reply length 7041 * @reply: pointer to reply payload 7042 * @timeout: timeout in second 7043 * 7044 * Return: 0 for success, non-zero for failure. 7045 */ 7046 static int 7047 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 7048 u32 *request, int reply_bytes, u16 *reply, int timeout) 7049 { 7050 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 7051 int i; 7052 u8 failed; 7053 __le32 *mfp; 7054 int ret_val; 7055 7056 /* make sure doorbell is not in use */ 7057 if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 7058 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); 7059 goto doorbell_diag_reset; 7060 } 7061 7062 /* clear pending doorbell interrupts from previous state changes */ 7063 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & 7064 MPI2_HIS_IOC2SYS_DB_STATUS) 7065 writel(0, &ioc->chip->HostInterruptStatus); 7066 7067 /* send message to ioc */ 7068 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 7069 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 7070 &ioc->chip->Doorbell); 7071 7072 if ((_base_spin_on_doorbell_int(ioc, 5))) { 7073 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", 7074 __LINE__); 7075 return -EFAULT; 7076 } 7077 writel(0, &ioc->chip->HostInterruptStatus); 7078 7079 if ((_base_wait_for_doorbell_ack(ioc, 5))) { 7080 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n", 7081 __LINE__); 7082 return -EFAULT; 7083 } 7084 7085 /* send message 32-bits at a time */ 7086 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 7087 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 7088 if ((_base_wait_for_doorbell_ack(ioc, 5))) 7089 failed = 1; 7090 } 7091 7092 if (failed) { 7093 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n", 7094 __LINE__); 7095 return -EFAULT; 7096 } 7097 7098 /* now wait for the reply */ 7099 if ((_base_wait_for_doorbell_int(ioc, timeout))) { 7100 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", 7101 __LINE__); 7102 return -EFAULT; 7103 } 7104 7105 /* read the first two 16-bits, it gives the total length of the reply */ 7106 reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) 7107 & MPI2_DOORBELL_DATA_MASK); 7108 writel(0, &ioc->chip->HostInterruptStatus); 7109 if ((_base_wait_for_doorbell_int(ioc, 5))) { 7110 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", 7111 __LINE__); 7112 return -EFAULT; 7113 } 7114 reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) 7115 & MPI2_DOORBELL_DATA_MASK); 7116 writel(0, &ioc->chip->HostInterruptStatus); 7117 7118 for (i = 2; i < default_reply->MsgLength * 2; i++) { 7119 if ((_base_wait_for_doorbell_int(ioc, 5))) { 7120 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", 7121 __LINE__); 7122 return -EFAULT; 7123 } 7124 if (i >= reply_bytes/2) /* overflow case */ 7125 ioc->base_readl_ext_retry(&ioc->chip->Doorbell); 7126 else 7127 reply[i] = le16_to_cpu( 7128 ioc->base_readl_ext_retry(&ioc->chip->Doorbell) 7129 & MPI2_DOORBELL_DATA_MASK); 7130 writel(0, &ioc->chip->HostInterruptStatus); 7131 } 7132 7133 _base_wait_for_doorbell_int(ioc, 5); 7134 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { 7135 dhsprintk(ioc, 7136 ioc_info(ioc, "doorbell is in use (line=%d)\n", 7137 __LINE__)); 7138 } 7139 writel(0, &ioc->chip->HostInterruptStatus); 7140 7141 if (ioc->logging_level & MPT_DEBUG_INIT) { 7142 mfp = (__le32 *)reply; 7143 pr_info("\toffset:data\n"); 7144 for (i = 0; i < reply_bytes/4; i++) 7145 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, 7146 le32_to_cpu(mfp[i])); 7147 } 7148 return 0; 7149 7150 doorbell_diag_reset: 7151 ret_val = _base_diag_reset(ioc); 7152 return ret_val; 7153 } 7154 7155 /** 7156 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW 7157 * @ioc: per adapter object 7158 * @mpi_reply: the reply payload from FW 7159 * @mpi_request: the request payload sent to FW 7160 * 7161 * The SAS IO Unit Control Request message allows the host to perform low-level 7162 * operations, such as resets on the PHYs of the IO Unit, also allows the host 7163 * to obtain the IOC assigned device handles for a device if it has other 7164 * identifying information about the device, in addition allows the host to 7165 * remove IOC resources associated with the device. 7166 * 7167 * Return: 0 for success, non-zero for failure. 7168 */ 7169 int 7170 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, 7171 Mpi2SasIoUnitControlReply_t *mpi_reply, 7172 Mpi2SasIoUnitControlRequest_t *mpi_request) 7173 { 7174 u16 smid; 7175 u8 issue_reset = 0; 7176 int rc; 7177 void *request; 7178 7179 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7180 7181 mutex_lock(&ioc->base_cmds.mutex); 7182 7183 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 7184 ioc_err(ioc, "%s: base_cmd in use\n", __func__); 7185 rc = -EAGAIN; 7186 goto out; 7187 } 7188 7189 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); 7190 if (rc) 7191 goto out; 7192 7193 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 7194 if (!smid) { 7195 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7196 rc = -EAGAIN; 7197 goto out; 7198 } 7199 7200 rc = 0; 7201 ioc->base_cmds.status = MPT3_CMD_PENDING; 7202 request = mpt3sas_base_get_msg_frame(ioc, smid); 7203 ioc->base_cmds.smid = smid; 7204 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 7205 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 7206 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 7207 ioc->ioc_link_reset_in_progress = 1; 7208 init_completion(&ioc->base_cmds.done); 7209 ioc->put_smid_default(ioc, smid); 7210 wait_for_completion_timeout(&ioc->base_cmds.done, 7211 msecs_to_jiffies(10000)); 7212 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 7213 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 7214 ioc->ioc_link_reset_in_progress) 7215 ioc->ioc_link_reset_in_progress = 0; 7216 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 7217 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, 7218 mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, 7219 issue_reset); 7220 goto issue_host_reset; 7221 } 7222 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 7223 memcpy(mpi_reply, ioc->base_cmds.reply, 7224 sizeof(Mpi2SasIoUnitControlReply_t)); 7225 else 7226 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 7227 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 7228 goto out; 7229 7230 issue_host_reset: 7231 if (issue_reset) 7232 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 7233 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 7234 rc = -EFAULT; 7235 out: 7236 mutex_unlock(&ioc->base_cmds.mutex); 7237 return rc; 7238 } 7239 7240 /** 7241 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device 7242 * @ioc: per adapter object 7243 * @mpi_reply: the reply payload from FW 7244 * @mpi_request: the request payload sent to FW 7245 * 7246 * The SCSI Enclosure Processor request message causes the IOC to 7247 * communicate with SES devices to control LED status signals. 7248 * 7249 * Return: 0 for success, non-zero for failure. 7250 */ 7251 int 7252 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, 7253 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 7254 { 7255 u16 smid; 7256 u8 issue_reset = 0; 7257 int rc; 7258 void *request; 7259 7260 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7261 7262 mutex_lock(&ioc->base_cmds.mutex); 7263 7264 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 7265 ioc_err(ioc, "%s: base_cmd in use\n", __func__); 7266 rc = -EAGAIN; 7267 goto out; 7268 } 7269 7270 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); 7271 if (rc) 7272 goto out; 7273 7274 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 7275 if (!smid) { 7276 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7277 rc = -EAGAIN; 7278 goto out; 7279 } 7280 7281 rc = 0; 7282 ioc->base_cmds.status = MPT3_CMD_PENDING; 7283 request = mpt3sas_base_get_msg_frame(ioc, smid); 7284 ioc->base_cmds.smid = smid; 7285 memset(request, 0, ioc->request_sz); 7286 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 7287 init_completion(&ioc->base_cmds.done); 7288 ioc->put_smid_default(ioc, smid); 7289 wait_for_completion_timeout(&ioc->base_cmds.done, 7290 msecs_to_jiffies(10000)); 7291 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 7292 mpt3sas_check_cmd_timeout(ioc, 7293 ioc->base_cmds.status, mpi_request, 7294 sizeof(Mpi2SepRequest_t)/4, issue_reset); 7295 goto issue_host_reset; 7296 } 7297 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 7298 memcpy(mpi_reply, ioc->base_cmds.reply, 7299 sizeof(Mpi2SepReply_t)); 7300 else 7301 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 7302 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 7303 goto out; 7304 7305 issue_host_reset: 7306 if (issue_reset) 7307 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 7308 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 7309 rc = -EFAULT; 7310 out: 7311 mutex_unlock(&ioc->base_cmds.mutex); 7312 return rc; 7313 } 7314 7315 /** 7316 * _base_get_port_facts - obtain port facts reply and save in ioc 7317 * @ioc: per adapter object 7318 * @port: ? 7319 * 7320 * Return: 0 for success, non-zero for failure. 7321 */ 7322 static int 7323 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) 7324 { 7325 Mpi2PortFactsRequest_t mpi_request; 7326 Mpi2PortFactsReply_t mpi_reply; 7327 struct mpt3sas_port_facts *pfacts; 7328 int mpi_reply_sz, mpi_request_sz, r; 7329 7330 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7331 7332 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 7333 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 7334 memset(&mpi_request, 0, mpi_request_sz); 7335 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 7336 mpi_request.PortNumber = port; 7337 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 7338 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 7339 7340 if (r != 0) { 7341 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); 7342 return r; 7343 } 7344 7345 pfacts = &ioc->pfacts[port]; 7346 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); 7347 pfacts->PortNumber = mpi_reply.PortNumber; 7348 pfacts->VP_ID = mpi_reply.VP_ID; 7349 pfacts->VF_ID = mpi_reply.VF_ID; 7350 pfacts->MaxPostedCmdBuffers = 7351 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 7352 7353 return 0; 7354 } 7355 7356 /** 7357 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 7358 * @ioc: per adapter object 7359 * @timeout: 7360 * 7361 * Return: 0 for success, non-zero for failure. 7362 */ 7363 static int 7364 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) 7365 { 7366 u32 ioc_state; 7367 int rc; 7368 7369 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7370 7371 if (ioc->pci_error_recovery) { 7372 dfailprintk(ioc, 7373 ioc_info(ioc, "%s: host in pci error recovery\n", 7374 __func__)); 7375 return -EFAULT; 7376 } 7377 7378 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 7379 dhsprintk(ioc, 7380 ioc_info(ioc, "%s: ioc_state(0x%08x)\n", 7381 __func__, ioc_state)); 7382 7383 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 7384 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 7385 return 0; 7386 7387 if (ioc_state & MPI2_DOORBELL_USED) { 7388 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 7389 goto issue_diag_reset; 7390 } 7391 7392 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 7393 mpt3sas_print_fault_code(ioc, ioc_state & 7394 MPI2_DOORBELL_DATA_MASK); 7395 goto issue_diag_reset; 7396 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 7397 MPI2_IOC_STATE_COREDUMP) { 7398 ioc_info(ioc, 7399 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", 7400 __func__, ioc_state); 7401 return -EFAULT; 7402 } 7403 7404 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 7405 if (ioc_state) { 7406 dfailprintk(ioc, 7407 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", 7408 __func__, ioc_state)); 7409 return -EFAULT; 7410 } 7411 7412 return 0; 7413 7414 issue_diag_reset: 7415 rc = _base_diag_reset(ioc); 7416 return rc; 7417 } 7418 7419 /** 7420 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 7421 * @ioc: per adapter object 7422 * 7423 * Return: 0 for success, non-zero for failure. 7424 */ 7425 static int 7426 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) 7427 { 7428 Mpi2IOCFactsRequest_t mpi_request; 7429 Mpi2IOCFactsReply_t mpi_reply; 7430 struct mpt3sas_facts *facts; 7431 int mpi_reply_sz, mpi_request_sz, r; 7432 7433 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7434 7435 r = _base_wait_for_iocstate(ioc, 10); 7436 if (r) { 7437 dfailprintk(ioc, 7438 ioc_info(ioc, "%s: failed getting to correct state\n", 7439 __func__)); 7440 return r; 7441 } 7442 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 7443 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 7444 memset(&mpi_request, 0, mpi_request_sz); 7445 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 7446 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 7447 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 7448 7449 if (r != 0) { 7450 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); 7451 return r; 7452 } 7453 7454 facts = &ioc->facts; 7455 memset(facts, 0, sizeof(struct mpt3sas_facts)); 7456 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 7457 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 7458 facts->VP_ID = mpi_reply.VP_ID; 7459 facts->VF_ID = mpi_reply.VF_ID; 7460 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 7461 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 7462 facts->WhoInit = mpi_reply.WhoInit; 7463 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 7464 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; 7465 if (ioc->msix_enable && (facts->MaxMSIxVectors <= 7466 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) 7467 ioc->combined_reply_queue = 0; 7468 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 7469 facts->MaxReplyDescriptorPostQueueDepth = 7470 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 7471 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 7472 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 7473 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 7474 ioc->ir_firmware = 1; 7475 if ((facts->IOCCapabilities & 7476 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) 7477 ioc->rdpq_array_capable = 1; 7478 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 7479 && ioc->is_aero_ioc) 7480 ioc->atomic_desc_capable = 1; 7481 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 7482 facts->IOCRequestFrameSize = 7483 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 7484 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 7485 facts->IOCMaxChainSegmentSize = 7486 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); 7487 } 7488 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 7489 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 7490 ioc->shost->max_id = -1; 7491 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 7492 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 7493 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 7494 facts->HighPriorityCredit = 7495 le16_to_cpu(mpi_reply.HighPriorityCredit); 7496 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 7497 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 7498 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; 7499 7500 /* 7501 * Get the Page Size from IOC Facts. If it's 0, default to 4k. 7502 */ 7503 ioc->page_size = 1 << facts->CurrentHostPageSize; 7504 if (ioc->page_size == 1) { 7505 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n"); 7506 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; 7507 } 7508 dinitprintk(ioc, 7509 ioc_info(ioc, "CurrentHostPageSize(%d)\n", 7510 facts->CurrentHostPageSize)); 7511 7512 dinitprintk(ioc, 7513 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n", 7514 facts->RequestCredit, facts->MaxChainDepth)); 7515 dinitprintk(ioc, 7516 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n", 7517 facts->IOCRequestFrameSize * 4, 7518 facts->ReplyFrameSize * 4)); 7519 return 0; 7520 } 7521 7522 /** 7523 * _base_send_ioc_init - send ioc_init to firmware 7524 * @ioc: per adapter object 7525 * 7526 * Return: 0 for success, non-zero for failure. 7527 */ 7528 static int 7529 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) 7530 { 7531 Mpi2IOCInitRequest_t mpi_request; 7532 Mpi2IOCInitReply_t mpi_reply; 7533 int i, r = 0; 7534 ktime_t current_time; 7535 u16 ioc_status; 7536 u32 reply_post_free_array_sz = 0; 7537 7538 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7539 7540 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 7541 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 7542 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 7543 mpi_request.VF_ID = 0; /* TODO */ 7544 mpi_request.VP_ID = 0; 7545 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); 7546 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 7547 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; 7548 7549 if (_base_is_controller_msix_enabled(ioc)) 7550 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 7551 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 7552 mpi_request.ReplyDescriptorPostQueueDepth = 7553 cpu_to_le16(ioc->reply_post_queue_depth); 7554 mpi_request.ReplyFreeQueueDepth = 7555 cpu_to_le16(ioc->reply_free_queue_depth); 7556 7557 mpi_request.SenseBufferAddressHigh = 7558 cpu_to_le32((u64)ioc->sense_dma >> 32); 7559 mpi_request.SystemReplyAddressHigh = 7560 cpu_to_le32((u64)ioc->reply_dma >> 32); 7561 mpi_request.SystemRequestFrameBaseAddress = 7562 cpu_to_le64((u64)ioc->request_dma); 7563 mpi_request.ReplyFreeQueueAddress = 7564 cpu_to_le64((u64)ioc->reply_free_dma); 7565 7566 if (ioc->rdpq_array_enable) { 7567 reply_post_free_array_sz = ioc->reply_queue_count * 7568 sizeof(Mpi2IOCInitRDPQArrayEntry); 7569 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); 7570 for (i = 0; i < ioc->reply_queue_count; i++) 7571 ioc->reply_post_free_array[i].RDPQBaseAddress = 7572 cpu_to_le64( 7573 (u64)ioc->reply_post[i].reply_post_free_dma); 7574 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 7575 mpi_request.ReplyDescriptorPostQueueAddress = 7576 cpu_to_le64((u64)ioc->reply_post_free_array_dma); 7577 } else { 7578 mpi_request.ReplyDescriptorPostQueueAddress = 7579 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 7580 } 7581 7582 /* 7583 * Set the flag to enable CoreDump state feature in IOC firmware. 7584 */ 7585 mpi_request.ConfigurationFlags |= 7586 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); 7587 7588 /* This time stamp specifies number of milliseconds 7589 * since epoch ~ midnight January 1, 1970. 7590 */ 7591 current_time = ktime_get_real(); 7592 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); 7593 7594 if (ioc->logging_level & MPT_DEBUG_INIT) { 7595 __le32 *mfp; 7596 int i; 7597 7598 mfp = (__le32 *)&mpi_request; 7599 ioc_info(ioc, "\toffset:data\n"); 7600 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 7601 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, 7602 le32_to_cpu(mfp[i])); 7603 } 7604 7605 r = _base_handshake_req_reply_wait(ioc, 7606 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 7607 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); 7608 7609 if (r != 0) { 7610 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); 7611 return r; 7612 } 7613 7614 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7615 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 7616 mpi_reply.IOCLogInfo) { 7617 ioc_err(ioc, "%s: failed\n", __func__); 7618 r = -EIO; 7619 } 7620 7621 /* Reset TimeSync Counter*/ 7622 ioc->timestamp_update_count = 0; 7623 return r; 7624 } 7625 7626 /** 7627 * mpt3sas_port_enable_done - command completion routine for port enable 7628 * @ioc: per adapter object 7629 * @smid: system request message index 7630 * @msix_index: MSIX table index supplied by the OS 7631 * @reply: reply message frame(lower 32bit addr) 7632 * 7633 * Return: 1 meaning mf should be freed from _base_interrupt 7634 * 0 means the mf is freed from this function. 7635 */ 7636 u8 7637 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 7638 u32 reply) 7639 { 7640 MPI2DefaultReply_t *mpi_reply; 7641 u16 ioc_status; 7642 7643 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) 7644 return 1; 7645 7646 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 7647 if (!mpi_reply) 7648 return 1; 7649 7650 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) 7651 return 1; 7652 7653 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; 7654 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; 7655 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; 7656 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 7657 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 7658 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7659 ioc->port_enable_failed = 1; 7660 7661 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) { 7662 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC; 7663 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 7664 mpt3sas_port_enable_complete(ioc); 7665 return 1; 7666 } else { 7667 ioc->start_scan_failed = ioc_status; 7668 ioc->start_scan = 0; 7669 return 1; 7670 } 7671 } 7672 complete(&ioc->port_enable_cmds.done); 7673 return 1; 7674 } 7675 7676 /** 7677 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 7678 * @ioc: per adapter object 7679 * 7680 * Return: 0 for success, non-zero for failure. 7681 */ 7682 static int 7683 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) 7684 { 7685 Mpi2PortEnableRequest_t *mpi_request; 7686 Mpi2PortEnableReply_t *mpi_reply; 7687 int r = 0; 7688 u16 smid; 7689 u16 ioc_status; 7690 7691 ioc_info(ioc, "sending port enable !!\n"); 7692 7693 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 7694 ioc_err(ioc, "%s: internal command already in use\n", __func__); 7695 return -EAGAIN; 7696 } 7697 7698 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 7699 if (!smid) { 7700 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7701 return -EAGAIN; 7702 } 7703 7704 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 7705 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 7706 ioc->port_enable_cmds.smid = smid; 7707 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 7708 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 7709 7710 init_completion(&ioc->port_enable_cmds.done); 7711 ioc->put_smid_default(ioc, smid); 7712 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 7713 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 7714 ioc_err(ioc, "%s: timeout\n", __func__); 7715 _debug_dump_mf(mpi_request, 7716 sizeof(Mpi2PortEnableRequest_t)/4); 7717 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 7718 r = -EFAULT; 7719 else 7720 r = -ETIME; 7721 goto out; 7722 } 7723 7724 mpi_reply = ioc->port_enable_cmds.reply; 7725 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 7726 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7727 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n", 7728 __func__, ioc_status); 7729 r = -EFAULT; 7730 goto out; 7731 } 7732 7733 out: 7734 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 7735 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED"); 7736 return r; 7737 } 7738 7739 /** 7740 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) 7741 * @ioc: per adapter object 7742 * 7743 * Return: 0 for success, non-zero for failure. 7744 */ 7745 int 7746 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) 7747 { 7748 Mpi2PortEnableRequest_t *mpi_request; 7749 u16 smid; 7750 7751 ioc_info(ioc, "sending port enable !!\n"); 7752 7753 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 7754 ioc_err(ioc, "%s: internal command already in use\n", __func__); 7755 return -EAGAIN; 7756 } 7757 7758 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 7759 if (!smid) { 7760 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7761 return -EAGAIN; 7762 } 7763 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED; 7764 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 7765 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC; 7766 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 7767 ioc->port_enable_cmds.smid = smid; 7768 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 7769 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 7770 7771 ioc->put_smid_default(ioc, smid); 7772 return 0; 7773 } 7774 7775 /** 7776 * _base_determine_wait_on_discovery - desposition 7777 * @ioc: per adapter object 7778 * 7779 * Decide whether to wait on discovery to complete. Used to either 7780 * locate boot device, or report volumes ahead of physical devices. 7781 * 7782 * Return: 1 for wait, 0 for don't wait. 7783 */ 7784 static int 7785 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) 7786 { 7787 /* We wait for discovery to complete if IR firmware is loaded. 7788 * The sas topology events arrive before PD events, so we need time to 7789 * turn on the bit in ioc->pd_handles to indicate PD 7790 * Also, it maybe required to report Volumes ahead of physical 7791 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. 7792 */ 7793 if (ioc->ir_firmware) 7794 return 1; 7795 7796 /* if no Bios, then we don't need to wait */ 7797 if (!ioc->bios_pg3.BiosVersion) 7798 return 0; 7799 7800 /* Bios is present, then we drop down here. 7801 * 7802 * If there any entries in the Bios Page 2, then we wait 7803 * for discovery to complete. 7804 */ 7805 7806 /* Current Boot Device */ 7807 if ((ioc->bios_pg2.CurrentBootDeviceForm & 7808 MPI2_BIOSPAGE2_FORM_MASK) == 7809 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 7810 /* Request Boot Device */ 7811 (ioc->bios_pg2.ReqBootDeviceForm & 7812 MPI2_BIOSPAGE2_FORM_MASK) == 7813 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 7814 /* Alternate Request Boot Device */ 7815 (ioc->bios_pg2.ReqAltBootDeviceForm & 7816 MPI2_BIOSPAGE2_FORM_MASK) == 7817 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) 7818 return 0; 7819 7820 return 1; 7821 } 7822 7823 /** 7824 * _base_unmask_events - turn on notification for this event 7825 * @ioc: per adapter object 7826 * @event: firmware event 7827 * 7828 * The mask is stored in ioc->event_masks. 7829 */ 7830 static void 7831 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) 7832 { 7833 u32 desired_event; 7834 7835 if (event >= 128) 7836 return; 7837 7838 desired_event = (1 << (event % 32)); 7839 7840 if (event < 32) 7841 ioc->event_masks[0] &= ~desired_event; 7842 else if (event < 64) 7843 ioc->event_masks[1] &= ~desired_event; 7844 else if (event < 96) 7845 ioc->event_masks[2] &= ~desired_event; 7846 else if (event < 128) 7847 ioc->event_masks[3] &= ~desired_event; 7848 } 7849 7850 /** 7851 * _base_event_notification - send event notification 7852 * @ioc: per adapter object 7853 * 7854 * Return: 0 for success, non-zero for failure. 7855 */ 7856 static int 7857 _base_event_notification(struct MPT3SAS_ADAPTER *ioc) 7858 { 7859 Mpi2EventNotificationRequest_t *mpi_request; 7860 u16 smid; 7861 int r = 0; 7862 int i, issue_diag_reset = 0; 7863 7864 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 7865 7866 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 7867 ioc_err(ioc, "%s: internal command already in use\n", __func__); 7868 return -EAGAIN; 7869 } 7870 7871 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 7872 if (!smid) { 7873 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7874 return -EAGAIN; 7875 } 7876 ioc->base_cmds.status = MPT3_CMD_PENDING; 7877 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 7878 ioc->base_cmds.smid = smid; 7879 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 7880 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 7881 mpi_request->VF_ID = 0; /* TODO */ 7882 mpi_request->VP_ID = 0; 7883 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 7884 mpi_request->EventMasks[i] = 7885 cpu_to_le32(ioc->event_masks[i]); 7886 init_completion(&ioc->base_cmds.done); 7887 ioc->put_smid_default(ioc, smid); 7888 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 7889 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 7890 ioc_err(ioc, "%s: timeout\n", __func__); 7891 _debug_dump_mf(mpi_request, 7892 sizeof(Mpi2EventNotificationRequest_t)/4); 7893 if (ioc->base_cmds.status & MPT3_CMD_RESET) 7894 r = -EFAULT; 7895 else 7896 issue_diag_reset = 1; 7897 7898 } else 7899 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__)); 7900 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 7901 7902 if (issue_diag_reset) { 7903 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) 7904 return -EFAULT; 7905 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) 7906 return -EFAULT; 7907 r = -EAGAIN; 7908 } 7909 return r; 7910 } 7911 7912 /** 7913 * mpt3sas_base_validate_event_type - validating event types 7914 * @ioc: per adapter object 7915 * @event_type: firmware event 7916 * 7917 * This will turn on firmware event notification when application 7918 * ask for that event. We don't mask events that are already enabled. 7919 */ 7920 void 7921 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) 7922 { 7923 int i, j; 7924 u32 event_mask, desired_event; 7925 u8 send_update_to_fw; 7926 7927 for (i = 0, send_update_to_fw = 0; i < 7928 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 7929 event_mask = ~event_type[i]; 7930 desired_event = 1; 7931 for (j = 0; j < 32; j++) { 7932 if (!(event_mask & desired_event) && 7933 (ioc->event_masks[i] & desired_event)) { 7934 ioc->event_masks[i] &= ~desired_event; 7935 send_update_to_fw = 1; 7936 } 7937 desired_event = (desired_event << 1); 7938 } 7939 } 7940 7941 if (!send_update_to_fw) 7942 return; 7943 7944 mutex_lock(&ioc->base_cmds.mutex); 7945 _base_event_notification(ioc); 7946 mutex_unlock(&ioc->base_cmds.mutex); 7947 } 7948 7949 /** 7950 * mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes 7951 * @ioc: per adapter object 7952 * @host_diagnostic: host diagnostic register content 7953 * 7954 * Return: 0 for success, non-zero for failure. 7955 */ 7956 7957 int 7958 mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc, 7959 u32 *host_diagnostic) 7960 { 7961 7962 u32 count; 7963 *host_diagnostic = 0; 7964 count = 0; 7965 7966 do { 7967 /* Write magic sequence to WriteSequence register 7968 * Loop until in diagnostic mode 7969 */ 7970 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n")); 7971 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 7972 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 7973 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 7974 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 7975 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 7976 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 7977 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 7978 7979 /* wait 100 msec */ 7980 msleep(100); 7981 7982 if (count++ > 20) { 7983 ioc_info(ioc, 7984 "Stop writing magic sequence after 20 retries\n"); 7985 _base_dump_reg_set(ioc); 7986 return -EFAULT; 7987 } 7988 7989 *host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); 7990 drsprintk(ioc, 7991 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 7992 count, *host_diagnostic)); 7993 7994 } while ((*host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 7995 return 0; 7996 } 7997 7998 /** 7999 * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes 8000 * @ioc: per adapter object 8001 */ 8002 8003 void 8004 mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc) 8005 { 8006 drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n")); 8007 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 8008 } 8009 8010 /** 8011 * _base_diag_reset - the "big hammer" start of day reset 8012 * @ioc: per adapter object 8013 * 8014 * Return: 0 for success, non-zero for failure. 8015 */ 8016 static int 8017 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) 8018 { 8019 u32 host_diagnostic; 8020 u32 ioc_state; 8021 u32 count; 8022 u32 hcb_size; 8023 8024 ioc_info(ioc, "sending diag reset !!\n"); 8025 8026 pci_cfg_access_lock(ioc->pdev); 8027 8028 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); 8029 8030 mutex_lock(&ioc->hostdiag_unlock_mutex); 8031 if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) 8032 goto unlock; 8033 8034 hcb_size = ioc->base_readl(&ioc->chip->HCBSize); 8035 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); 8036 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 8037 &ioc->chip->HostDiagnostic); 8038 8039 /* This delay allows the chip PCIe hardware time to finish reset tasks */ 8040 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 8041 8042 /* Approximately 300 second max wait */ 8043 for (count = 0; count < (300000000 / 8044 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 8045 8046 host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); 8047 8048 if (host_diagnostic == 0xFFFFFFFF) { 8049 ioc_info(ioc, 8050 "Invalid host diagnostic register value\n"); 8051 _base_dump_reg_set(ioc); 8052 goto unlock; 8053 } 8054 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 8055 break; 8056 8057 /* Wait to pass the second read delay window */ 8058 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000); 8059 } 8060 8061 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 8062 8063 drsprintk(ioc, 8064 ioc_info(ioc, "restart the adapter assuming the\n" 8065 "HCB Address points to good F/W\n")); 8066 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 8067 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 8068 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 8069 8070 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); 8071 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 8072 &ioc->chip->HCBSize); 8073 } 8074 8075 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n")); 8076 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 8077 &ioc->chip->HostDiagnostic); 8078 8079 mpt3sas_base_lock_host_diagnostic(ioc); 8080 mutex_unlock(&ioc->hostdiag_unlock_mutex); 8081 8082 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n")); 8083 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); 8084 if (ioc_state) { 8085 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", 8086 __func__, ioc_state); 8087 _base_dump_reg_set(ioc); 8088 goto fail; 8089 } 8090 8091 pci_cfg_access_unlock(ioc->pdev); 8092 ioc_info(ioc, "diag reset: SUCCESS\n"); 8093 return 0; 8094 8095 unlock: 8096 mutex_unlock(&ioc->hostdiag_unlock_mutex); 8097 8098 fail: 8099 pci_cfg_access_unlock(ioc->pdev); 8100 ioc_err(ioc, "diag reset: FAILED\n"); 8101 return -EFAULT; 8102 } 8103 8104 /** 8105 * mpt3sas_base_make_ioc_ready - put controller in READY state 8106 * @ioc: per adapter object 8107 * @type: FORCE_BIG_HAMMER or SOFT_RESET 8108 * 8109 * Return: 0 for success, non-zero for failure. 8110 */ 8111 int 8112 mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) 8113 { 8114 u32 ioc_state; 8115 int rc; 8116 int count; 8117 8118 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 8119 8120 if (ioc->pci_error_recovery) 8121 return 0; 8122 8123 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 8124 dhsprintk(ioc, 8125 ioc_info(ioc, "%s: ioc_state(0x%08x)\n", 8126 __func__, ioc_state)); 8127 8128 /* if in RESET state, it should move to READY state shortly */ 8129 count = 0; 8130 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 8131 while ((ioc_state & MPI2_IOC_STATE_MASK) != 8132 MPI2_IOC_STATE_READY) { 8133 if (count++ == 10) { 8134 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", 8135 __func__, ioc_state); 8136 return -EFAULT; 8137 } 8138 ssleep(1); 8139 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 8140 } 8141 } 8142 8143 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 8144 return 0; 8145 8146 if (ioc_state & MPI2_DOORBELL_USED) { 8147 ioc_info(ioc, "unexpected doorbell active!\n"); 8148 goto issue_diag_reset; 8149 } 8150 8151 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 8152 mpt3sas_print_fault_code(ioc, ioc_state & 8153 MPI2_DOORBELL_DATA_MASK); 8154 goto issue_diag_reset; 8155 } 8156 8157 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { 8158 /* 8159 * if host reset is invoked while watch dog thread is waiting 8160 * for IOC state to be changed to Fault state then driver has 8161 * to wait here for CoreDump state to clear otherwise reset 8162 * will be issued to the FW and FW move the IOC state to 8163 * reset state without copying the FW logs to coredump region. 8164 */ 8165 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { 8166 mpt3sas_print_coredump_info(ioc, ioc_state & 8167 MPI2_DOORBELL_DATA_MASK); 8168 mpt3sas_base_wait_for_coredump_completion(ioc, 8169 __func__); 8170 } 8171 goto issue_diag_reset; 8172 } 8173 8174 if (type == FORCE_BIG_HAMMER) 8175 goto issue_diag_reset; 8176 8177 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 8178 if (!(_base_send_ioc_reset(ioc, 8179 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { 8180 return 0; 8181 } 8182 8183 issue_diag_reset: 8184 rc = _base_diag_reset(ioc); 8185 return rc; 8186 } 8187 8188 /** 8189 * _base_make_ioc_operational - put controller in OPERATIONAL state 8190 * @ioc: per adapter object 8191 * 8192 * Return: 0 for success, non-zero for failure. 8193 */ 8194 static int 8195 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) 8196 { 8197 int r, i, index, rc; 8198 unsigned long flags; 8199 u32 reply_address; 8200 u16 smid; 8201 struct _tr_list *delayed_tr, *delayed_tr_next; 8202 struct _sc_list *delayed_sc, *delayed_sc_next; 8203 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 8204 u8 hide_flag; 8205 struct adapter_reply_queue *reply_q; 8206 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; 8207 8208 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 8209 8210 /* clean the delayed target reset list */ 8211 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 8212 &ioc->delayed_tr_list, list) { 8213 list_del(&delayed_tr->list); 8214 kfree(delayed_tr); 8215 } 8216 8217 8218 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 8219 &ioc->delayed_tr_volume_list, list) { 8220 list_del(&delayed_tr->list); 8221 kfree(delayed_tr); 8222 } 8223 8224 list_for_each_entry_safe(delayed_sc, delayed_sc_next, 8225 &ioc->delayed_sc_list, list) { 8226 list_del(&delayed_sc->list); 8227 kfree(delayed_sc); 8228 } 8229 8230 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, 8231 &ioc->delayed_event_ack_list, list) { 8232 list_del(&delayed_event_ack->list); 8233 kfree(delayed_event_ack); 8234 } 8235 8236 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8237 8238 /* hi-priority queue */ 8239 INIT_LIST_HEAD(&ioc->hpr_free_list); 8240 smid = ioc->hi_priority_smid; 8241 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 8242 ioc->hpr_lookup[i].cb_idx = 0xFF; 8243 ioc->hpr_lookup[i].smid = smid; 8244 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 8245 &ioc->hpr_free_list); 8246 } 8247 8248 /* internal queue */ 8249 INIT_LIST_HEAD(&ioc->internal_free_list); 8250 smid = ioc->internal_smid; 8251 for (i = 0; i < ioc->internal_depth; i++, smid++) { 8252 ioc->internal_lookup[i].cb_idx = 0xFF; 8253 ioc->internal_lookup[i].smid = smid; 8254 list_add_tail(&ioc->internal_lookup[i].tracker_list, 8255 &ioc->internal_free_list); 8256 } 8257 8258 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8259 8260 /* initialize Reply Free Queue */ 8261 for (i = 0, reply_address = (u32)ioc->reply_dma ; 8262 i < ioc->reply_free_queue_depth ; i++, reply_address += 8263 ioc->reply_sz) { 8264 ioc->reply_free[i] = cpu_to_le32(reply_address); 8265 if (ioc->is_mcpu_endpoint) 8266 _base_clone_reply_to_sys_mem(ioc, 8267 reply_address, i); 8268 } 8269 8270 /* initialize reply queues */ 8271 if (ioc->is_driver_loading) 8272 _base_assign_reply_queues(ioc); 8273 8274 /* initialize Reply Post Free Queue */ 8275 index = 0; 8276 reply_post_free_contig = ioc->reply_post[0].reply_post_free; 8277 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 8278 /* 8279 * If RDPQ is enabled, switch to the next allocation. 8280 * Otherwise advance within the contiguous region. 8281 */ 8282 if (ioc->rdpq_array_enable) { 8283 reply_q->reply_post_free = 8284 ioc->reply_post[index++].reply_post_free; 8285 } else { 8286 reply_q->reply_post_free = reply_post_free_contig; 8287 reply_post_free_contig += ioc->reply_post_queue_depth; 8288 } 8289 8290 reply_q->reply_post_host_index = 0; 8291 for (i = 0; i < ioc->reply_post_queue_depth; i++) 8292 reply_q->reply_post_free[i].Words = 8293 cpu_to_le64(ULLONG_MAX); 8294 if (!_base_is_controller_msix_enabled(ioc)) 8295 goto skip_init_reply_post_free_queue; 8296 } 8297 skip_init_reply_post_free_queue: 8298 8299 r = _base_send_ioc_init(ioc); 8300 if (r) { 8301 /* 8302 * No need to check IOC state for fault state & issue 8303 * diag reset during host reset. This check is need 8304 * only during driver load time. 8305 */ 8306 if (!ioc->is_driver_loading) 8307 return r; 8308 8309 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); 8310 if (rc || (_base_send_ioc_init(ioc))) 8311 return r; 8312 } 8313 8314 /* initialize reply free host index */ 8315 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 8316 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 8317 8318 /* initialize reply post host index */ 8319 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 8320 if (ioc->combined_reply_queue) 8321 writel((reply_q->msix_index & 7)<< 8322 MPI2_RPHI_MSIX_INDEX_SHIFT, 8323 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 8324 else 8325 writel(reply_q->msix_index << 8326 MPI2_RPHI_MSIX_INDEX_SHIFT, 8327 &ioc->chip->ReplyPostHostIndex); 8328 8329 if (!_base_is_controller_msix_enabled(ioc)) 8330 goto skip_init_reply_post_host_index; 8331 } 8332 8333 skip_init_reply_post_host_index: 8334 8335 mpt3sas_base_unmask_interrupts(ioc); 8336 8337 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 8338 r = _base_display_fwpkg_version(ioc); 8339 if (r) 8340 return r; 8341 } 8342 8343 r = _base_static_config_pages(ioc); 8344 if (r) 8345 return r; 8346 8347 r = _base_event_notification(ioc); 8348 if (r) 8349 return r; 8350 8351 if (!ioc->shost_recovery) { 8352 8353 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 8354 == 0x80) { 8355 hide_flag = (u8) ( 8356 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & 8357 MFG_PAGE10_HIDE_SSDS_MASK); 8358 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) 8359 ioc->mfg_pg10_hide_flag = hide_flag; 8360 } 8361 8362 ioc->wait_for_discovery_to_complete = 8363 _base_determine_wait_on_discovery(ioc); 8364 8365 return r; /* scan_start and scan_finished support */ 8366 } 8367 8368 r = _base_send_port_enable(ioc); 8369 if (r) 8370 return r; 8371 8372 return r; 8373 } 8374 8375 /** 8376 * mpt3sas_base_free_resources - free resources controller resources 8377 * @ioc: per adapter object 8378 */ 8379 void 8380 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 8381 { 8382 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 8383 8384 /* synchronizing freeing resource with pci_access_mutex lock */ 8385 mutex_lock(&ioc->pci_access_mutex); 8386 if (ioc->chip_phys && ioc->chip) { 8387 mpt3sas_base_mask_interrupts(ioc); 8388 ioc->shost_recovery = 1; 8389 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); 8390 ioc->shost_recovery = 0; 8391 } 8392 8393 mpt3sas_base_unmap_resources(ioc); 8394 mutex_unlock(&ioc->pci_access_mutex); 8395 return; 8396 } 8397 8398 /** 8399 * mpt3sas_base_attach - attach controller instance 8400 * @ioc: per adapter object 8401 * 8402 * Return: 0 for success, non-zero for failure. 8403 */ 8404 int 8405 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 8406 { 8407 int r, i, rc; 8408 int cpu_id, last_cpu_id = 0; 8409 8410 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 8411 8412 /* setup cpu_msix_table */ 8413 ioc->cpu_count = num_online_cpus(); 8414 for_each_online_cpu(cpu_id) 8415 last_cpu_id = cpu_id; 8416 ioc->cpu_msix_table_sz = last_cpu_id + 1; 8417 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 8418 ioc->reply_queue_count = 1; 8419 if (!ioc->cpu_msix_table) { 8420 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); 8421 r = -ENOMEM; 8422 goto out_free_resources; 8423 } 8424 8425 if (ioc->is_warpdrive) { 8426 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, 8427 sizeof(resource_size_t *), GFP_KERNEL); 8428 if (!ioc->reply_post_host_index) { 8429 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); 8430 r = -ENOMEM; 8431 goto out_free_resources; 8432 } 8433 } 8434 8435 ioc->smp_affinity_enable = smp_affinity_enable; 8436 8437 ioc->rdpq_array_enable_assigned = 0; 8438 ioc->use_32bit_dma = false; 8439 ioc->dma_mask = 64; 8440 if (ioc->is_aero_ioc) { 8441 ioc->base_readl = &_base_readl_aero; 8442 ioc->base_readl_ext_retry = &_base_readl_ext_retry; 8443 } else { 8444 ioc->base_readl = &_base_readl; 8445 ioc->base_readl_ext_retry = &_base_readl; 8446 } 8447 r = mpt3sas_base_map_resources(ioc); 8448 if (r) 8449 goto out_free_resources; 8450 8451 pci_set_drvdata(ioc->pdev, ioc->shost); 8452 r = _base_get_ioc_facts(ioc); 8453 if (r) { 8454 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); 8455 if (rc || (_base_get_ioc_facts(ioc))) 8456 goto out_free_resources; 8457 } 8458 8459 switch (ioc->hba_mpi_version_belonged) { 8460 case MPI2_VERSION: 8461 ioc->build_sg_scmd = &_base_build_sg_scmd; 8462 ioc->build_sg = &_base_build_sg; 8463 ioc->build_zero_len_sge = &_base_build_zero_len_sge; 8464 ioc->get_msix_index_for_smlio = &_base_get_msix_index; 8465 break; 8466 case MPI25_VERSION: 8467 case MPI26_VERSION: 8468 /* 8469 * In SAS3.0, 8470 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and 8471 * Target Status - all require the IEEE formatted scatter gather 8472 * elements. 8473 */ 8474 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 8475 ioc->build_sg = &_base_build_sg_ieee; 8476 ioc->build_nvme_prp = &_base_build_nvme_prp; 8477 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 8478 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 8479 if (ioc->high_iops_queues) 8480 ioc->get_msix_index_for_smlio = 8481 &_base_get_high_iops_msix_index; 8482 else 8483 ioc->get_msix_index_for_smlio = &_base_get_msix_index; 8484 break; 8485 } 8486 if (ioc->atomic_desc_capable) { 8487 ioc->put_smid_default = &_base_put_smid_default_atomic; 8488 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; 8489 ioc->put_smid_fast_path = 8490 &_base_put_smid_fast_path_atomic; 8491 ioc->put_smid_hi_priority = 8492 &_base_put_smid_hi_priority_atomic; 8493 } else { 8494 ioc->put_smid_default = &_base_put_smid_default; 8495 ioc->put_smid_fast_path = &_base_put_smid_fast_path; 8496 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; 8497 if (ioc->is_mcpu_endpoint) 8498 ioc->put_smid_scsi_io = 8499 &_base_put_smid_mpi_ep_scsi_io; 8500 else 8501 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; 8502 } 8503 /* 8504 * These function pointers for other requests that don't 8505 * the require IEEE scatter gather elements. 8506 * 8507 * For example Configuration Pages and SAS IOUNIT Control don't. 8508 */ 8509 ioc->build_sg_mpi = &_base_build_sg; 8510 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 8511 8512 r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); 8513 if (r) 8514 goto out_free_resources; 8515 8516 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 8517 sizeof(struct mpt3sas_port_facts), GFP_KERNEL); 8518 if (!ioc->pfacts) { 8519 r = -ENOMEM; 8520 goto out_free_resources; 8521 } 8522 8523 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 8524 r = _base_get_port_facts(ioc, i); 8525 if (r) { 8526 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); 8527 if (rc || (_base_get_port_facts(ioc, i))) 8528 goto out_free_resources; 8529 } 8530 } 8531 8532 r = _base_allocate_memory_pools(ioc); 8533 if (r) 8534 goto out_free_resources; 8535 8536 if (irqpoll_weight > 0) 8537 ioc->thresh_hold = irqpoll_weight; 8538 else 8539 ioc->thresh_hold = ioc->hba_queue_depth/4; 8540 8541 _base_init_irqpolls(ioc); 8542 init_waitqueue_head(&ioc->reset_wq); 8543 8544 /* allocate memory pd handle bitmask list */ 8545 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 8546 if (ioc->facts.MaxDevHandle % 8) 8547 ioc->pd_handles_sz++; 8548 /* 8549 * pd_handles_sz should have, at least, the minimal room for 8550 * set_bit()/test_bit(), otherwise out-of-memory touch may occur. 8551 */ 8552 ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); 8553 8554 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 8555 GFP_KERNEL); 8556 if (!ioc->pd_handles) { 8557 r = -ENOMEM; 8558 goto out_free_resources; 8559 } 8560 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, 8561 GFP_KERNEL); 8562 if (!ioc->blocking_handles) { 8563 r = -ENOMEM; 8564 goto out_free_resources; 8565 } 8566 8567 /* allocate memory for pending OS device add list */ 8568 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); 8569 if (ioc->facts.MaxDevHandle % 8) 8570 ioc->pend_os_device_add_sz++; 8571 8572 /* 8573 * pend_os_device_add_sz should have, at least, the minimal room for 8574 * set_bit()/test_bit(), otherwise out-of-memory may occur. 8575 */ 8576 ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, 8577 sizeof(unsigned long)); 8578 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, 8579 GFP_KERNEL); 8580 if (!ioc->pend_os_device_add) { 8581 r = -ENOMEM; 8582 goto out_free_resources; 8583 } 8584 8585 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; 8586 ioc->device_remove_in_progress = 8587 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); 8588 if (!ioc->device_remove_in_progress) { 8589 r = -ENOMEM; 8590 goto out_free_resources; 8591 } 8592 8593 ioc->fwfault_debug = mpt3sas_fwfault_debug; 8594 8595 /* base internal command bits */ 8596 mutex_init(&ioc->base_cmds.mutex); 8597 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8598 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 8599 8600 /* port_enable command bits */ 8601 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8602 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 8603 8604 /* transport internal command bits */ 8605 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8606 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 8607 mutex_init(&ioc->transport_cmds.mutex); 8608 8609 /* scsih internal command bits */ 8610 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8611 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8612 mutex_init(&ioc->scsih_cmds.mutex); 8613 8614 /* task management internal command bits */ 8615 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8616 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 8617 mutex_init(&ioc->tm_cmds.mutex); 8618 8619 /* config page internal command bits */ 8620 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8621 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 8622 mutex_init(&ioc->config_cmds.mutex); 8623 8624 /* ctl module internal command bits */ 8625 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 8626 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 8627 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 8628 mutex_init(&ioc->ctl_cmds.mutex); 8629 8630 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || 8631 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || 8632 !ioc->tm_cmds.reply || !ioc->config_cmds.reply || 8633 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { 8634 r = -ENOMEM; 8635 goto out_free_resources; 8636 } 8637 8638 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 8639 ioc->event_masks[i] = -1; 8640 8641 /* here we enable the events we care about */ 8642 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 8643 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 8644 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 8645 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 8646 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 8647 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 8648 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 8649 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 8650 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 8651 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 8652 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 8653 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 8654 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 8655 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { 8656 if (ioc->is_gen35_ioc) { 8657 _base_unmask_events(ioc, 8658 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 8659 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); 8660 _base_unmask_events(ioc, 8661 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 8662 } 8663 } 8664 r = _base_make_ioc_operational(ioc); 8665 if (r == -EAGAIN) { 8666 r = _base_make_ioc_operational(ioc); 8667 if (r) 8668 goto out_free_resources; 8669 } 8670 8671 /* 8672 * Copy current copy of IOCFacts in prev_fw_facts 8673 * and it will be used during online firmware upgrade. 8674 */ 8675 memcpy(&ioc->prev_fw_facts, &ioc->facts, 8676 sizeof(struct mpt3sas_facts)); 8677 8678 ioc->non_operational_loop = 0; 8679 ioc->ioc_coredump_loop = 0; 8680 ioc->got_task_abort_from_ioctl = 0; 8681 return 0; 8682 8683 out_free_resources: 8684 8685 ioc->remove_host = 1; 8686 8687 mpt3sas_base_free_resources(ioc); 8688 _base_release_memory_pools(ioc); 8689 pci_set_drvdata(ioc->pdev, NULL); 8690 kfree(ioc->cpu_msix_table); 8691 if (ioc->is_warpdrive) 8692 kfree(ioc->reply_post_host_index); 8693 kfree(ioc->pd_handles); 8694 kfree(ioc->blocking_handles); 8695 kfree(ioc->device_remove_in_progress); 8696 kfree(ioc->pend_os_device_add); 8697 kfree(ioc->tm_cmds.reply); 8698 kfree(ioc->transport_cmds.reply); 8699 kfree(ioc->scsih_cmds.reply); 8700 kfree(ioc->config_cmds.reply); 8701 kfree(ioc->base_cmds.reply); 8702 kfree(ioc->port_enable_cmds.reply); 8703 kfree(ioc->ctl_cmds.reply); 8704 kfree(ioc->ctl_cmds.sense); 8705 kfree(ioc->pfacts); 8706 ioc->ctl_cmds.reply = NULL; 8707 ioc->base_cmds.reply = NULL; 8708 ioc->tm_cmds.reply = NULL; 8709 ioc->scsih_cmds.reply = NULL; 8710 ioc->transport_cmds.reply = NULL; 8711 ioc->config_cmds.reply = NULL; 8712 ioc->pfacts = NULL; 8713 return r; 8714 } 8715 8716 8717 /** 8718 * mpt3sas_base_detach - remove controller instance 8719 * @ioc: per adapter object 8720 */ 8721 void 8722 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 8723 { 8724 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 8725 8726 mpt3sas_base_stop_watchdog(ioc); 8727 mpt3sas_base_free_resources(ioc); 8728 _base_release_memory_pools(ioc); 8729 mpt3sas_free_enclosure_list(ioc); 8730 pci_set_drvdata(ioc->pdev, NULL); 8731 kfree(ioc->cpu_msix_table); 8732 if (ioc->is_warpdrive) 8733 kfree(ioc->reply_post_host_index); 8734 kfree(ioc->pd_handles); 8735 kfree(ioc->blocking_handles); 8736 kfree(ioc->device_remove_in_progress); 8737 kfree(ioc->pend_os_device_add); 8738 kfree(ioc->pfacts); 8739 kfree(ioc->ctl_cmds.reply); 8740 kfree(ioc->ctl_cmds.sense); 8741 kfree(ioc->base_cmds.reply); 8742 kfree(ioc->port_enable_cmds.reply); 8743 kfree(ioc->tm_cmds.reply); 8744 kfree(ioc->transport_cmds.reply); 8745 kfree(ioc->scsih_cmds.reply); 8746 kfree(ioc->config_cmds.reply); 8747 } 8748 8749 /** 8750 * _base_pre_reset_handler - pre reset handler 8751 * @ioc: per adapter object 8752 */ 8753 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 8754 { 8755 mpt3sas_scsih_pre_reset_handler(ioc); 8756 mpt3sas_ctl_pre_reset_handler(ioc); 8757 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 8758 } 8759 8760 /** 8761 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands 8762 * @ioc: per adapter object 8763 */ 8764 static void 8765 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) 8766 { 8767 dtmprintk(ioc, 8768 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); 8769 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 8770 ioc->transport_cmds.status |= MPT3_CMD_RESET; 8771 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 8772 complete(&ioc->transport_cmds.done); 8773 } 8774 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 8775 ioc->base_cmds.status |= MPT3_CMD_RESET; 8776 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); 8777 complete(&ioc->base_cmds.done); 8778 } 8779 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 8780 ioc->port_enable_failed = 1; 8781 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 8782 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); 8783 if (ioc->is_driver_loading) { 8784 ioc->start_scan_failed = 8785 MPI2_IOCSTATUS_INTERNAL_ERROR; 8786 ioc->start_scan = 0; 8787 } else { 8788 complete(&ioc->port_enable_cmds.done); 8789 } 8790 } 8791 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { 8792 ioc->config_cmds.status |= MPT3_CMD_RESET; 8793 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); 8794 ioc->config_cmds.smid = USHRT_MAX; 8795 complete(&ioc->config_cmds.done); 8796 } 8797 } 8798 8799 /** 8800 * _base_clear_outstanding_commands - clear all outstanding commands 8801 * @ioc: per adapter object 8802 */ 8803 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) 8804 { 8805 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); 8806 mpt3sas_ctl_clear_outstanding_ioctls(ioc); 8807 _base_clear_outstanding_mpt_commands(ioc); 8808 } 8809 8810 /** 8811 * _base_reset_done_handler - reset done handler 8812 * @ioc: per adapter object 8813 */ 8814 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 8815 { 8816 mpt3sas_scsih_reset_done_handler(ioc); 8817 mpt3sas_ctl_reset_done_handler(ioc); 8818 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 8819 } 8820 8821 /** 8822 * mpt3sas_wait_for_commands_to_complete - reset controller 8823 * @ioc: Pointer to MPT_ADAPTER structure 8824 * 8825 * This function is waiting 10s for all pending commands to complete 8826 * prior to putting controller in reset. 8827 */ 8828 void 8829 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 8830 { 8831 u32 ioc_state; 8832 8833 ioc->pending_io_count = 0; 8834 8835 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 8836 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 8837 return; 8838 8839 /* pending command count */ 8840 ioc->pending_io_count = scsi_host_busy(ioc->shost); 8841 8842 if (!ioc->pending_io_count) 8843 return; 8844 8845 /* wait for pending commands to complete */ 8846 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 8847 } 8848 8849 /** 8850 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts 8851 * attributes during online firmware upgrade and update the corresponding 8852 * IOC variables accordingly. 8853 * 8854 * @ioc: Pointer to MPT_ADAPTER structure 8855 */ 8856 static int 8857 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) 8858 { 8859 u16 pd_handles_sz; 8860 void *pd_handles = NULL, *blocking_handles = NULL; 8861 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; 8862 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; 8863 8864 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { 8865 pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 8866 if (ioc->facts.MaxDevHandle % 8) 8867 pd_handles_sz++; 8868 8869 /* 8870 * pd_handles should have, at least, the minimal room for 8871 * set_bit()/test_bit(), otherwise out-of-memory touch may 8872 * occur. 8873 */ 8874 pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); 8875 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, 8876 GFP_KERNEL); 8877 if (!pd_handles) { 8878 ioc_info(ioc, 8879 "Unable to allocate the memory for pd_handles of sz: %d\n", 8880 pd_handles_sz); 8881 return -ENOMEM; 8882 } 8883 memset(pd_handles + ioc->pd_handles_sz, 0, 8884 (pd_handles_sz - ioc->pd_handles_sz)); 8885 ioc->pd_handles = pd_handles; 8886 8887 blocking_handles = krealloc(ioc->blocking_handles, 8888 pd_handles_sz, GFP_KERNEL); 8889 if (!blocking_handles) { 8890 ioc_info(ioc, 8891 "Unable to allocate the memory for " 8892 "blocking_handles of sz: %d\n", 8893 pd_handles_sz); 8894 return -ENOMEM; 8895 } 8896 memset(blocking_handles + ioc->pd_handles_sz, 0, 8897 (pd_handles_sz - ioc->pd_handles_sz)); 8898 ioc->blocking_handles = blocking_handles; 8899 ioc->pd_handles_sz = pd_handles_sz; 8900 8901 pend_os_device_add = krealloc(ioc->pend_os_device_add, 8902 pd_handles_sz, GFP_KERNEL); 8903 if (!pend_os_device_add) { 8904 ioc_info(ioc, 8905 "Unable to allocate the memory for pend_os_device_add of sz: %d\n", 8906 pd_handles_sz); 8907 return -ENOMEM; 8908 } 8909 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, 8910 (pd_handles_sz - ioc->pend_os_device_add_sz)); 8911 ioc->pend_os_device_add = pend_os_device_add; 8912 ioc->pend_os_device_add_sz = pd_handles_sz; 8913 8914 device_remove_in_progress = krealloc( 8915 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); 8916 if (!device_remove_in_progress) { 8917 ioc_info(ioc, 8918 "Unable to allocate the memory for device_remove_in_progress of sz: %d\n", 8919 pd_handles_sz); 8920 return -ENOMEM; 8921 } 8922 memset(device_remove_in_progress + 8923 ioc->device_remove_in_progress_sz, 0, 8924 (pd_handles_sz - ioc->device_remove_in_progress_sz)); 8925 ioc->device_remove_in_progress = device_remove_in_progress; 8926 ioc->device_remove_in_progress_sz = pd_handles_sz; 8927 } 8928 8929 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); 8930 return 0; 8931 } 8932 8933 /** 8934 * mpt3sas_base_hard_reset_handler - reset controller 8935 * @ioc: Pointer to MPT_ADAPTER structure 8936 * @type: FORCE_BIG_HAMMER or SOFT_RESET 8937 * 8938 * Return: 0 for success, non-zero for failure. 8939 */ 8940 int 8941 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, 8942 enum reset_type type) 8943 { 8944 int r; 8945 unsigned long flags; 8946 u32 ioc_state; 8947 u8 is_fault = 0, is_trigger = 0; 8948 8949 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); 8950 8951 if (ioc->pci_error_recovery) { 8952 ioc_err(ioc, "%s: pci error recovery reset\n", __func__); 8953 r = 0; 8954 goto out_unlocked; 8955 } 8956 8957 if (mpt3sas_fwfault_debug) 8958 mpt3sas_halt_firmware(ioc); 8959 8960 /* wait for an active reset in progress to complete */ 8961 mutex_lock(&ioc->reset_in_progress_mutex); 8962 8963 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 8964 ioc->shost_recovery = 1; 8965 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 8966 8967 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 8968 MPT3_DIAG_BUFFER_IS_REGISTERED) && 8969 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 8970 MPT3_DIAG_BUFFER_IS_RELEASED))) { 8971 is_trigger = 1; 8972 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 8973 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || 8974 (ioc_state & MPI2_IOC_STATE_MASK) == 8975 MPI2_IOC_STATE_COREDUMP) { 8976 is_fault = 1; 8977 ioc->htb_rel.trigger_info_dwords[1] = 8978 (ioc_state & MPI2_DOORBELL_DATA_MASK); 8979 } 8980 } 8981 _base_pre_reset_handler(ioc); 8982 mpt3sas_wait_for_commands_to_complete(ioc); 8983 mpt3sas_base_mask_interrupts(ioc); 8984 mpt3sas_base_pause_mq_polling(ioc); 8985 r = mpt3sas_base_make_ioc_ready(ioc, type); 8986 if (r) 8987 goto out; 8988 _base_clear_outstanding_commands(ioc); 8989 8990 /* If this hard reset is called while port enable is active, then 8991 * there is no reason to call make_ioc_operational 8992 */ 8993 if (ioc->is_driver_loading && ioc->port_enable_failed) { 8994 ioc->remove_host = 1; 8995 r = -EFAULT; 8996 goto out; 8997 } 8998 r = _base_get_ioc_facts(ioc); 8999 if (r) 9000 goto out; 9001 9002 r = _base_check_ioc_facts_changes(ioc); 9003 if (r) { 9004 ioc_info(ioc, 9005 "Some of the parameters got changed in this new firmware" 9006 " image and it requires system reboot\n"); 9007 goto out; 9008 } 9009 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 9010 panic("%s: Issue occurred with flashing controller firmware." 9011 "Please reboot the system and ensure that the correct" 9012 " firmware version is running\n", ioc->name); 9013 9014 r = _base_make_ioc_operational(ioc); 9015 if (!r) 9016 _base_reset_done_handler(ioc); 9017 9018 out: 9019 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED"); 9020 9021 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 9022 ioc->shost_recovery = 0; 9023 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 9024 ioc->ioc_reset_count++; 9025 mutex_unlock(&ioc->reset_in_progress_mutex); 9026 mpt3sas_base_resume_mq_polling(ioc); 9027 9028 out_unlocked: 9029 if ((r == 0) && is_trigger) { 9030 if (is_fault) 9031 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); 9032 else 9033 mpt3sas_trigger_master(ioc, 9034 MASTER_TRIGGER_ADAPTER_RESET); 9035 } 9036 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__)); 9037 return r; 9038 } 9039