1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 19 struct mpi3mr_drv_cmd *drv_cmd); 20 21 static int poll_queues; 22 module_param(poll_queues, int, 0444); 23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 24 25 #if defined(writeq) && defined(CONFIG_64BIT) 26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 27 { 28 writeq(b, addr); 29 } 30 #else 31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 32 { 33 __u64 data_out = b; 34 35 writel((u32)(data_out), addr); 36 writel((u32)(data_out >> 32), (addr + 4)); 37 } 38 #endif 39 40 static inline bool 41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 42 { 43 u16 pi, ci, max_entries; 44 bool is_qfull = false; 45 46 pi = op_req_q->pi; 47 ci = READ_ONCE(op_req_q->ci); 48 max_entries = op_req_q->num_requests; 49 50 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 51 is_qfull = true; 52 53 return is_qfull; 54 } 55 56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 57 { 58 u16 i, max_vectors; 59 60 max_vectors = mrioc->intr_info_count; 61 62 for (i = 0; i < max_vectors; i++) 63 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 64 } 65 66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 67 { 68 mrioc->intr_enabled = 0; 69 mpi3mr_sync_irqs(mrioc); 70 } 71 72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 73 { 74 mrioc->intr_enabled = 1; 75 } 76 77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 78 { 79 u16 i; 80 81 mpi3mr_ioc_disable_intr(mrioc); 82 83 if (!mrioc->intr_info) 84 return; 85 86 for (i = 0; i < mrioc->intr_info_count; i++) 87 free_irq(pci_irq_vector(mrioc->pdev, i), 88 (mrioc->intr_info + i)); 89 90 kfree(mrioc->intr_info); 91 mrioc->intr_info = NULL; 92 mrioc->intr_info_count = 0; 93 mrioc->is_intr_info_set = false; 94 pci_free_irq_vectors(mrioc->pdev); 95 } 96 97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 98 dma_addr_t dma_addr) 99 { 100 struct mpi3_sge_common *sgel = paddr; 101 102 sgel->flags = flags; 103 sgel->length = cpu_to_le32(length); 104 sgel->address = cpu_to_le64(dma_addr); 105 } 106 107 void mpi3mr_build_zero_len_sge(void *paddr) 108 { 109 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 110 111 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 112 } 113 114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 if ((phys_addr < mrioc->reply_buf_dma) || 121 (phys_addr > mrioc->reply_buf_dma_max_address)) 122 return NULL; 123 124 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 125 } 126 127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 128 dma_addr_t phys_addr) 129 { 130 if (!phys_addr) 131 return NULL; 132 133 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 134 } 135 136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 137 u64 reply_dma) 138 { 139 u32 old_idx = 0; 140 unsigned long flags; 141 142 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 143 old_idx = mrioc->reply_free_queue_host_index; 144 mrioc->reply_free_queue_host_index = ( 145 (mrioc->reply_free_queue_host_index == 146 (mrioc->reply_free_qsz - 1)) ? 0 : 147 (mrioc->reply_free_queue_host_index + 1)); 148 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 149 writel(mrioc->reply_free_queue_host_index, 150 &mrioc->sysif_regs->reply_free_host_index); 151 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 152 } 153 154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 155 u64 sense_buf_dma) 156 { 157 u32 old_idx = 0; 158 unsigned long flags; 159 160 spin_lock_irqsave(&mrioc->sbq_lock, flags); 161 old_idx = mrioc->sbq_host_index; 162 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 163 (mrioc->sense_buf_q_sz - 1)) ? 0 : 164 (mrioc->sbq_host_index + 1)); 165 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 166 writel(mrioc->sbq_host_index, 167 &mrioc->sysif_regs->sense_buffer_free_host_index); 168 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 169 } 170 171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 172 struct mpi3_event_notification_reply *event_reply) 173 { 174 char *desc = NULL; 175 u16 event; 176 177 event = event_reply->event; 178 179 switch (event) { 180 case MPI3_EVENT_LOG_DATA: 181 desc = "Log Data"; 182 break; 183 case MPI3_EVENT_CHANGE: 184 desc = "Event Change"; 185 break; 186 case MPI3_EVENT_GPIO_INTERRUPT: 187 desc = "GPIO Interrupt"; 188 break; 189 case MPI3_EVENT_CABLE_MGMT: 190 desc = "Cable Management"; 191 break; 192 case MPI3_EVENT_ENERGY_PACK_CHANGE: 193 desc = "Energy Pack Change"; 194 break; 195 case MPI3_EVENT_DEVICE_ADDED: 196 { 197 struct mpi3_device_page0 *event_data = 198 (struct mpi3_device_page0 *)event_reply->event_data; 199 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 200 event_data->dev_handle, event_data->device_form); 201 return; 202 } 203 case MPI3_EVENT_DEVICE_INFO_CHANGED: 204 { 205 struct mpi3_device_page0 *event_data = 206 (struct mpi3_device_page0 *)event_reply->event_data; 207 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 208 event_data->dev_handle, event_data->device_form); 209 return; 210 } 211 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 212 { 213 struct mpi3_event_data_device_status_change *event_data = 214 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 215 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 216 event_data->dev_handle, event_data->reason_code); 217 return; 218 } 219 case MPI3_EVENT_SAS_DISCOVERY: 220 { 221 struct mpi3_event_data_sas_discovery *event_data = 222 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 223 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 224 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 225 "start" : "stop", 226 le32_to_cpu(event_data->discovery_status)); 227 return; 228 } 229 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 230 desc = "SAS Broadcast Primitive"; 231 break; 232 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 233 desc = "SAS Notify Primitive"; 234 break; 235 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 236 desc = "SAS Init Device Status Change"; 237 break; 238 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 239 desc = "SAS Init Table Overflow"; 240 break; 241 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 242 desc = "SAS Topology Change List"; 243 break; 244 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 245 desc = "Enclosure Device Status Change"; 246 break; 247 case MPI3_EVENT_ENCL_DEVICE_ADDED: 248 desc = "Enclosure Added"; 249 break; 250 case MPI3_EVENT_HARD_RESET_RECEIVED: 251 desc = "Hard Reset Received"; 252 break; 253 case MPI3_EVENT_SAS_PHY_COUNTER: 254 desc = "SAS PHY Counter"; 255 break; 256 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 257 desc = "SAS Device Discovery Error"; 258 break; 259 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 260 desc = "PCIE Topology Change List"; 261 break; 262 case MPI3_EVENT_PCIE_ENUMERATION: 263 { 264 struct mpi3_event_data_pcie_enumeration *event_data = 265 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 266 ioc_info(mrioc, "PCIE Enumeration: (%s)", 267 (event_data->reason_code == 268 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 269 if (event_data->enumeration_status) 270 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 271 le32_to_cpu(event_data->enumeration_status)); 272 return; 273 } 274 case MPI3_EVENT_PREPARE_FOR_RESET: 275 desc = "Prepare For Reset"; 276 break; 277 } 278 279 if (!desc) 280 return; 281 282 ioc_info(mrioc, "%s\n", desc); 283 } 284 285 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 286 struct mpi3_default_reply *def_reply) 287 { 288 struct mpi3_event_notification_reply *event_reply = 289 (struct mpi3_event_notification_reply *)def_reply; 290 291 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 292 mpi3mr_print_event_data(mrioc, event_reply); 293 mpi3mr_os_handle_events(mrioc, event_reply); 294 } 295 296 static struct mpi3mr_drv_cmd * 297 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 298 struct mpi3_default_reply *def_reply) 299 { 300 u16 idx; 301 302 switch (host_tag) { 303 case MPI3MR_HOSTTAG_INITCMDS: 304 return &mrioc->init_cmds; 305 case MPI3MR_HOSTTAG_CFG_CMDS: 306 return &mrioc->cfg_cmds; 307 case MPI3MR_HOSTTAG_BSG_CMDS: 308 return &mrioc->bsg_cmds; 309 case MPI3MR_HOSTTAG_BLK_TMS: 310 return &mrioc->host_tm_cmds; 311 case MPI3MR_HOSTTAG_PEL_ABORT: 312 return &mrioc->pel_abort_cmd; 313 case MPI3MR_HOSTTAG_PEL_WAIT: 314 return &mrioc->pel_cmds; 315 case MPI3MR_HOSTTAG_TRANSPORT_CMDS: 316 return &mrioc->transport_cmds; 317 case MPI3MR_HOSTTAG_INVALID: 318 if (def_reply && def_reply->function == 319 MPI3_FUNCTION_EVENT_NOTIFICATION) 320 mpi3mr_handle_events(mrioc, def_reply); 321 return NULL; 322 default: 323 break; 324 } 325 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 326 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 327 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 328 return &mrioc->dev_rmhs_cmds[idx]; 329 } 330 331 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && 332 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { 333 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 334 return &mrioc->evtack_cmds[idx]; 335 } 336 337 return NULL; 338 } 339 340 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 341 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 342 { 343 u16 reply_desc_type, host_tag = 0; 344 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 345 u32 ioc_loginfo = 0; 346 struct mpi3_status_reply_descriptor *status_desc; 347 struct mpi3_address_reply_descriptor *addr_desc; 348 struct mpi3_success_reply_descriptor *success_desc; 349 struct mpi3_default_reply *def_reply = NULL; 350 struct mpi3mr_drv_cmd *cmdptr = NULL; 351 struct mpi3_scsi_io_reply *scsi_reply; 352 u8 *sense_buf = NULL; 353 354 *reply_dma = 0; 355 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 356 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 357 switch (reply_desc_type) { 358 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 359 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 360 host_tag = le16_to_cpu(status_desc->host_tag); 361 ioc_status = le16_to_cpu(status_desc->ioc_status); 362 if (ioc_status & 363 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 364 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 365 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 366 break; 367 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 368 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 369 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 370 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 371 if (!def_reply) 372 goto out; 373 host_tag = le16_to_cpu(def_reply->host_tag); 374 ioc_status = le16_to_cpu(def_reply->ioc_status); 375 if (ioc_status & 376 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 377 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 378 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 379 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 380 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 381 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 382 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 383 } 384 break; 385 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 386 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 387 host_tag = le16_to_cpu(success_desc->host_tag); 388 break; 389 default: 390 break; 391 } 392 393 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 394 if (cmdptr) { 395 if (cmdptr->state & MPI3MR_CMD_PENDING) { 396 cmdptr->state |= MPI3MR_CMD_COMPLETE; 397 cmdptr->ioc_loginfo = ioc_loginfo; 398 cmdptr->ioc_status = ioc_status; 399 cmdptr->state &= ~MPI3MR_CMD_PENDING; 400 if (def_reply) { 401 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 402 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 403 mrioc->reply_sz); 404 } 405 if (sense_buf && cmdptr->sensebuf) { 406 cmdptr->is_sense = 1; 407 memcpy(cmdptr->sensebuf, sense_buf, 408 MPI3MR_SENSE_BUF_SZ); 409 } 410 if (cmdptr->is_waiting) { 411 complete(&cmdptr->done); 412 cmdptr->is_waiting = 0; 413 } else if (cmdptr->callback) 414 cmdptr->callback(mrioc, cmdptr); 415 } 416 } 417 out: 418 if (sense_buf) 419 mpi3mr_repost_sense_buf(mrioc, 420 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 421 } 422 423 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 424 { 425 u32 exp_phase = mrioc->admin_reply_ephase; 426 u32 admin_reply_ci = mrioc->admin_reply_ci; 427 u32 num_admin_replies = 0; 428 u64 reply_dma = 0; 429 struct mpi3_default_reply_descriptor *reply_desc; 430 431 if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) 432 return 0; 433 434 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 435 admin_reply_ci; 436 437 if ((le16_to_cpu(reply_desc->reply_flags) & 438 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 439 atomic_dec(&mrioc->admin_reply_q_in_use); 440 return 0; 441 } 442 443 do { 444 if (mrioc->unrecoverable) 445 break; 446 447 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 448 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 449 if (reply_dma) 450 mpi3mr_repost_reply_buf(mrioc, reply_dma); 451 num_admin_replies++; 452 if (++admin_reply_ci == mrioc->num_admin_replies) { 453 admin_reply_ci = 0; 454 exp_phase ^= 1; 455 } 456 reply_desc = 457 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 458 admin_reply_ci; 459 if ((le16_to_cpu(reply_desc->reply_flags) & 460 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 461 break; 462 } while (1); 463 464 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 465 mrioc->admin_reply_ci = admin_reply_ci; 466 mrioc->admin_reply_ephase = exp_phase; 467 atomic_dec(&mrioc->admin_reply_q_in_use); 468 469 return num_admin_replies; 470 } 471 472 /** 473 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 474 * queue's consumer index from operational reply descriptor queue. 475 * @op_reply_q: op_reply_qinfo object 476 * @reply_ci: operational reply descriptor's queue consumer index 477 * 478 * Returns: reply descriptor frame address 479 */ 480 static inline struct mpi3_default_reply_descriptor * 481 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 482 { 483 void *segment_base_addr; 484 struct segments *segments = op_reply_q->q_segments; 485 struct mpi3_default_reply_descriptor *reply_desc = NULL; 486 487 segment_base_addr = 488 segments[reply_ci / op_reply_q->segment_qd].segment; 489 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 490 (reply_ci % op_reply_q->segment_qd); 491 return reply_desc; 492 } 493 494 /** 495 * mpi3mr_process_op_reply_q - Operational reply queue handler 496 * @mrioc: Adapter instance reference 497 * @op_reply_q: Operational reply queue info 498 * 499 * Checks the specific operational reply queue and drains the 500 * reply queue entries until the queue is empty and process the 501 * individual reply descriptors. 502 * 503 * Return: 0 if queue is already processed,or number of reply 504 * descriptors processed. 505 */ 506 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 507 struct op_reply_qinfo *op_reply_q) 508 { 509 struct op_req_qinfo *op_req_q; 510 u32 exp_phase; 511 u32 reply_ci; 512 u32 num_op_reply = 0; 513 u64 reply_dma = 0; 514 struct mpi3_default_reply_descriptor *reply_desc; 515 u16 req_q_idx = 0, reply_qidx; 516 517 reply_qidx = op_reply_q->qid - 1; 518 519 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 520 return 0; 521 522 exp_phase = op_reply_q->ephase; 523 reply_ci = op_reply_q->ci; 524 525 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 526 if ((le16_to_cpu(reply_desc->reply_flags) & 527 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 528 atomic_dec(&op_reply_q->in_use); 529 return 0; 530 } 531 532 do { 533 if (mrioc->unrecoverable) 534 break; 535 536 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 537 op_req_q = &mrioc->req_qinfo[req_q_idx]; 538 539 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 540 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 541 reply_qidx); 542 atomic_dec(&op_reply_q->pend_ios); 543 if (reply_dma) 544 mpi3mr_repost_reply_buf(mrioc, reply_dma); 545 num_op_reply++; 546 547 if (++reply_ci == op_reply_q->num_replies) { 548 reply_ci = 0; 549 exp_phase ^= 1; 550 } 551 552 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 553 554 if ((le16_to_cpu(reply_desc->reply_flags) & 555 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 556 break; 557 #ifndef CONFIG_PREEMPT_RT 558 /* 559 * Exit completion loop to avoid CPU lockup 560 * Ensure remaining completion happens from threaded ISR. 561 */ 562 if (num_op_reply > mrioc->max_host_ios) { 563 op_reply_q->enable_irq_poll = true; 564 break; 565 } 566 #endif 567 } while (1); 568 569 writel(reply_ci, 570 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 571 op_reply_q->ci = reply_ci; 572 op_reply_q->ephase = exp_phase; 573 574 atomic_dec(&op_reply_q->in_use); 575 return num_op_reply; 576 } 577 578 /** 579 * mpi3mr_blk_mq_poll - Operational reply queue handler 580 * @shost: SCSI Host reference 581 * @queue_num: Request queue number (w.r.t OS it is hardware context number) 582 * 583 * Checks the specific operational reply queue and drains the 584 * reply queue entries until the queue is empty and process the 585 * individual reply descriptors. 586 * 587 * Return: 0 if queue is already processed,or number of reply 588 * descriptors processed. 589 */ 590 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 591 { 592 int num_entries = 0; 593 struct mpi3mr_ioc *mrioc; 594 595 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 596 597 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset || 598 mrioc->unrecoverable)) 599 return 0; 600 601 num_entries = mpi3mr_process_op_reply_q(mrioc, 602 &mrioc->op_reply_qinfo[queue_num]); 603 604 return num_entries; 605 } 606 607 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 608 { 609 struct mpi3mr_intr_info *intr_info = privdata; 610 struct mpi3mr_ioc *mrioc; 611 u16 midx; 612 u32 num_admin_replies = 0, num_op_reply = 0; 613 614 if (!intr_info) 615 return IRQ_NONE; 616 617 mrioc = intr_info->mrioc; 618 619 if (!mrioc->intr_enabled) 620 return IRQ_NONE; 621 622 midx = intr_info->msix_index; 623 624 if (!midx) 625 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 626 if (intr_info->op_reply_q) 627 num_op_reply = mpi3mr_process_op_reply_q(mrioc, 628 intr_info->op_reply_q); 629 630 if (num_admin_replies || num_op_reply) 631 return IRQ_HANDLED; 632 else 633 return IRQ_NONE; 634 } 635 636 #ifndef CONFIG_PREEMPT_RT 637 638 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 639 { 640 struct mpi3mr_intr_info *intr_info = privdata; 641 int ret; 642 643 if (!intr_info) 644 return IRQ_NONE; 645 646 /* Call primary ISR routine */ 647 ret = mpi3mr_isr_primary(irq, privdata); 648 649 /* 650 * If more IOs are expected, schedule IRQ polling thread. 651 * Otherwise exit from ISR. 652 */ 653 if (!intr_info->op_reply_q) 654 return ret; 655 656 if (!intr_info->op_reply_q->enable_irq_poll || 657 !atomic_read(&intr_info->op_reply_q->pend_ios)) 658 return ret; 659 660 disable_irq_nosync(intr_info->os_irq); 661 662 return IRQ_WAKE_THREAD; 663 } 664 665 /** 666 * mpi3mr_isr_poll - Reply queue polling routine 667 * @irq: IRQ 668 * @privdata: Interrupt info 669 * 670 * poll for pending I/O completions in a loop until pending I/Os 671 * present or controller queue depth I/Os are processed. 672 * 673 * Return: IRQ_NONE or IRQ_HANDLED 674 */ 675 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 676 { 677 struct mpi3mr_intr_info *intr_info = privdata; 678 struct mpi3mr_ioc *mrioc; 679 u16 midx; 680 u32 num_op_reply = 0; 681 682 if (!intr_info || !intr_info->op_reply_q) 683 return IRQ_NONE; 684 685 mrioc = intr_info->mrioc; 686 midx = intr_info->msix_index; 687 688 /* Poll for pending IOs completions */ 689 do { 690 if (!mrioc->intr_enabled || mrioc->unrecoverable) 691 break; 692 693 if (!midx) 694 mpi3mr_process_admin_reply_q(mrioc); 695 if (intr_info->op_reply_q) 696 num_op_reply += 697 mpi3mr_process_op_reply_q(mrioc, 698 intr_info->op_reply_q); 699 700 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 701 702 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 703 (num_op_reply < mrioc->max_host_ios)); 704 705 intr_info->op_reply_q->enable_irq_poll = false; 706 enable_irq(intr_info->os_irq); 707 708 return IRQ_HANDLED; 709 } 710 711 #endif 712 713 /** 714 * mpi3mr_request_irq - Request IRQ and register ISR 715 * @mrioc: Adapter instance reference 716 * @index: IRQ vector index 717 * 718 * Request threaded ISR with primary ISR and secondary 719 * 720 * Return: 0 on success and non zero on failures. 721 */ 722 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 723 { 724 struct pci_dev *pdev = mrioc->pdev; 725 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 726 int retval = 0; 727 728 intr_info->mrioc = mrioc; 729 intr_info->msix_index = index; 730 intr_info->op_reply_q = NULL; 731 732 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 733 mrioc->driver_name, mrioc->id, index); 734 735 #ifndef CONFIG_PREEMPT_RT 736 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 737 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 738 #else 739 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary, 740 NULL, IRQF_SHARED, intr_info->name, intr_info); 741 #endif 742 if (retval) { 743 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 744 intr_info->name, pci_irq_vector(pdev, index)); 745 return retval; 746 } 747 748 intr_info->os_irq = pci_irq_vector(pdev, index); 749 return retval; 750 } 751 752 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) 753 { 754 if (!mrioc->requested_poll_qcount) 755 return; 756 757 /* Reserved for Admin and Default Queue */ 758 if (max_vectors > 2 && 759 (mrioc->requested_poll_qcount < max_vectors - 2)) { 760 ioc_info(mrioc, 761 "enabled polled queues (%d) msix (%d)\n", 762 mrioc->requested_poll_qcount, max_vectors); 763 } else { 764 ioc_info(mrioc, 765 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", 766 mrioc->requested_poll_qcount, max_vectors); 767 mrioc->requested_poll_qcount = 0; 768 } 769 } 770 771 /** 772 * mpi3mr_setup_isr - Setup ISR for the controller 773 * @mrioc: Adapter instance reference 774 * @setup_one: Request one IRQ or more 775 * 776 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 777 * 778 * Return: 0 on success and non zero on failures. 779 */ 780 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 781 { 782 unsigned int irq_flags = PCI_IRQ_MSIX; 783 int max_vectors, min_vec; 784 int retval; 785 int i; 786 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; 787 788 if (mrioc->is_intr_info_set) 789 return 0; 790 791 mpi3mr_cleanup_isr(mrioc); 792 793 if (setup_one || reset_devices) { 794 max_vectors = 1; 795 retval = pci_alloc_irq_vectors(mrioc->pdev, 796 1, max_vectors, irq_flags); 797 if (retval < 0) { 798 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 799 retval); 800 goto out_failed; 801 } 802 } else { 803 max_vectors = 804 min_t(int, mrioc->cpu_count + 1 + 805 mrioc->requested_poll_qcount, mrioc->msix_count); 806 807 mpi3mr_calc_poll_queues(mrioc, max_vectors); 808 809 ioc_info(mrioc, 810 "MSI-X vectors supported: %d, no of cores: %d,", 811 mrioc->msix_count, mrioc->cpu_count); 812 ioc_info(mrioc, 813 "MSI-x vectors requested: %d poll_queues %d\n", 814 max_vectors, mrioc->requested_poll_qcount); 815 816 desc.post_vectors = mrioc->requested_poll_qcount; 817 min_vec = desc.pre_vectors + desc.post_vectors; 818 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 819 820 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 821 min_vec, max_vectors, irq_flags, &desc); 822 823 if (retval < 0) { 824 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 825 retval); 826 goto out_failed; 827 } 828 829 830 /* 831 * If only one MSI-x is allocated, then MSI-x 0 will be shared 832 * between Admin queue and operational queue 833 */ 834 if (retval == min_vec) 835 mrioc->op_reply_q_offset = 0; 836 else if (retval != (max_vectors)) { 837 ioc_info(mrioc, 838 "allocated vectors (%d) are less than configured (%d)\n", 839 retval, max_vectors); 840 } 841 842 max_vectors = retval; 843 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 844 845 mpi3mr_calc_poll_queues(mrioc, max_vectors); 846 847 } 848 849 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 850 GFP_KERNEL); 851 if (!mrioc->intr_info) { 852 retval = -ENOMEM; 853 pci_free_irq_vectors(mrioc->pdev); 854 goto out_failed; 855 } 856 for (i = 0; i < max_vectors; i++) { 857 retval = mpi3mr_request_irq(mrioc, i); 858 if (retval) { 859 mrioc->intr_info_count = i; 860 goto out_failed; 861 } 862 } 863 if (reset_devices || !setup_one) 864 mrioc->is_intr_info_set = true; 865 mrioc->intr_info_count = max_vectors; 866 mpi3mr_ioc_enable_intr(mrioc); 867 return 0; 868 869 out_failed: 870 mpi3mr_cleanup_isr(mrioc); 871 872 return retval; 873 } 874 875 static const struct { 876 enum mpi3mr_iocstate value; 877 char *name; 878 } mrioc_states[] = { 879 { MRIOC_STATE_READY, "ready" }, 880 { MRIOC_STATE_FAULT, "fault" }, 881 { MRIOC_STATE_RESET, "reset" }, 882 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 883 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 884 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 885 }; 886 887 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 888 { 889 int i; 890 char *name = NULL; 891 892 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 893 if (mrioc_states[i].value == mrioc_state) { 894 name = mrioc_states[i].name; 895 break; 896 } 897 } 898 return name; 899 } 900 901 /* Reset reason to name mapper structure*/ 902 static const struct { 903 enum mpi3mr_reset_reason value; 904 char *name; 905 } mpi3mr_reset_reason_codes[] = { 906 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 907 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 908 { MPI3MR_RESET_FROM_APP, "application invocation" }, 909 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 910 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 911 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, 912 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 913 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 914 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 915 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 916 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 917 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 918 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 919 { 920 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 921 "create request queue timeout" 922 }, 923 { 924 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 925 "create reply queue timeout" 926 }, 927 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 928 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 929 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 930 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 931 { 932 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 933 "component image activation timeout" 934 }, 935 { 936 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 937 "get package version timeout" 938 }, 939 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 940 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 941 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 942 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 943 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, 944 }; 945 946 /** 947 * mpi3mr_reset_rc_name - get reset reason code name 948 * @reason_code: reset reason code value 949 * 950 * Map reset reason to an NULL terminated ASCII string 951 * 952 * Return: name corresponding to reset reason value or NULL. 953 */ 954 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 955 { 956 int i; 957 char *name = NULL; 958 959 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 960 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 961 name = mpi3mr_reset_reason_codes[i].name; 962 break; 963 } 964 } 965 return name; 966 } 967 968 /* Reset type to name mapper structure*/ 969 static const struct { 970 u16 reset_type; 971 char *name; 972 } mpi3mr_reset_types[] = { 973 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 974 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 975 }; 976 977 /** 978 * mpi3mr_reset_type_name - get reset type name 979 * @reset_type: reset type value 980 * 981 * Map reset type to an NULL terminated ASCII string 982 * 983 * Return: name corresponding to reset type value or NULL. 984 */ 985 static const char *mpi3mr_reset_type_name(u16 reset_type) 986 { 987 int i; 988 char *name = NULL; 989 990 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 991 if (mpi3mr_reset_types[i].reset_type == reset_type) { 992 name = mpi3mr_reset_types[i].name; 993 break; 994 } 995 } 996 return name; 997 } 998 999 /** 1000 * mpi3mr_print_fault_info - Display fault information 1001 * @mrioc: Adapter instance reference 1002 * 1003 * Display the controller fault information if there is a 1004 * controller fault. 1005 * 1006 * Return: Nothing. 1007 */ 1008 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 1009 { 1010 u32 ioc_status, code, code1, code2, code3; 1011 1012 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1013 1014 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1015 code = readl(&mrioc->sysif_regs->fault); 1016 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 1017 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 1018 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 1019 1020 ioc_info(mrioc, 1021 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 1022 code, code1, code2, code3); 1023 } 1024 } 1025 1026 /** 1027 * mpi3mr_get_iocstate - Get IOC State 1028 * @mrioc: Adapter instance reference 1029 * 1030 * Return a proper IOC state enum based on the IOC status and 1031 * IOC configuration and unrcoverable state of the controller. 1032 * 1033 * Return: Current IOC state. 1034 */ 1035 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 1036 { 1037 u32 ioc_status, ioc_config; 1038 u8 ready, enabled; 1039 1040 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1041 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1042 1043 if (mrioc->unrecoverable) 1044 return MRIOC_STATE_UNRECOVERABLE; 1045 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 1046 return MRIOC_STATE_FAULT; 1047 1048 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 1049 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 1050 1051 if (ready && enabled) 1052 return MRIOC_STATE_READY; 1053 if ((!ready) && (!enabled)) 1054 return MRIOC_STATE_RESET; 1055 if ((!ready) && (enabled)) 1056 return MRIOC_STATE_BECOMING_READY; 1057 1058 return MRIOC_STATE_RESET_REQUESTED; 1059 } 1060 1061 /** 1062 * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma 1063 * @mrioc: Adapter instance reference 1064 * 1065 * Free the DMA memory allocated for IOCTL handling purpose. 1066 * 1067 * Return: None 1068 */ 1069 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc) 1070 { 1071 struct dma_memory_desc *mem_desc; 1072 u16 i; 1073 1074 if (!mrioc->ioctl_dma_pool) 1075 return; 1076 1077 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) { 1078 mem_desc = &mrioc->ioctl_sge[i]; 1079 if (mem_desc->addr) { 1080 dma_pool_free(mrioc->ioctl_dma_pool, 1081 mem_desc->addr, 1082 mem_desc->dma_addr); 1083 mem_desc->addr = NULL; 1084 } 1085 } 1086 dma_pool_destroy(mrioc->ioctl_dma_pool); 1087 mrioc->ioctl_dma_pool = NULL; 1088 mem_desc = &mrioc->ioctl_chain_sge; 1089 1090 if (mem_desc->addr) { 1091 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 1092 mem_desc->addr, mem_desc->dma_addr); 1093 mem_desc->addr = NULL; 1094 } 1095 mem_desc = &mrioc->ioctl_resp_sge; 1096 if (mem_desc->addr) { 1097 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 1098 mem_desc->addr, mem_desc->dma_addr); 1099 mem_desc->addr = NULL; 1100 } 1101 1102 mrioc->ioctl_sges_allocated = false; 1103 } 1104 1105 /** 1106 * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma 1107 * @mrioc: Adapter instance reference 1108 * 1109 * This function allocates dmaable memory required to handle the 1110 * application issued MPI3 IOCTL requests. 1111 * 1112 * Return: None 1113 */ 1114 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc) 1115 1116 { 1117 struct dma_memory_desc *mem_desc; 1118 u16 i; 1119 1120 mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool", 1121 &mrioc->pdev->dev, 1122 MPI3MR_IOCTL_SGE_SIZE, 1123 MPI3MR_PAGE_SIZE_4K, 0); 1124 1125 if (!mrioc->ioctl_dma_pool) { 1126 ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n"); 1127 goto out_failed; 1128 } 1129 1130 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) { 1131 mem_desc = &mrioc->ioctl_sge[i]; 1132 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE; 1133 mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool, 1134 GFP_KERNEL, 1135 &mem_desc->dma_addr); 1136 if (!mem_desc->addr) 1137 goto out_failed; 1138 } 1139 1140 mem_desc = &mrioc->ioctl_chain_sge; 1141 mem_desc->size = MPI3MR_PAGE_SIZE_4K; 1142 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 1143 mem_desc->size, 1144 &mem_desc->dma_addr, 1145 GFP_KERNEL); 1146 if (!mem_desc->addr) 1147 goto out_failed; 1148 1149 mem_desc = &mrioc->ioctl_resp_sge; 1150 mem_desc->size = MPI3MR_PAGE_SIZE_4K; 1151 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 1152 mem_desc->size, 1153 &mem_desc->dma_addr, 1154 GFP_KERNEL); 1155 if (!mem_desc->addr) 1156 goto out_failed; 1157 1158 mrioc->ioctl_sges_allocated = true; 1159 1160 return; 1161 out_failed: 1162 ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n" 1163 "from the applications, application interface for MPT command is disabled\n"); 1164 mpi3mr_free_ioctl_dma_memory(mrioc); 1165 } 1166 1167 /** 1168 * mpi3mr_clear_reset_history - clear reset history 1169 * @mrioc: Adapter instance reference 1170 * 1171 * Write the reset history bit in IOC status to clear the bit, 1172 * if it is already set. 1173 * 1174 * Return: Nothing. 1175 */ 1176 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 1177 { 1178 u32 ioc_status; 1179 1180 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1181 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1182 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 1183 } 1184 1185 /** 1186 * mpi3mr_issue_and_process_mur - Message unit Reset handler 1187 * @mrioc: Adapter instance reference 1188 * @reset_reason: Reset reason code 1189 * 1190 * Issue Message unit Reset to the controller and wait for it to 1191 * be complete. 1192 * 1193 * Return: 0 on success, -1 on failure. 1194 */ 1195 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 1196 u32 reset_reason) 1197 { 1198 u32 ioc_config, timeout, ioc_status, scratch_pad0; 1199 int retval = -1; 1200 1201 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 1202 if (mrioc->unrecoverable) { 1203 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 1204 return retval; 1205 } 1206 mpi3mr_clear_reset_history(mrioc); 1207 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << 1208 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | 1209 (mrioc->facts.ioc_num << 1210 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); 1211 writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]); 1212 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1213 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1214 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1215 1216 timeout = MPI3MR_MUR_TIMEOUT * 10; 1217 do { 1218 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1219 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 1220 mpi3mr_clear_reset_history(mrioc); 1221 break; 1222 } 1223 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1224 mpi3mr_print_fault_info(mrioc); 1225 break; 1226 } 1227 msleep(100); 1228 } while (--timeout); 1229 1230 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1231 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1232 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1233 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1234 retval = 0; 1235 1236 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 1237 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 1238 return retval; 1239 } 1240 1241 /** 1242 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1243 * during reset/resume 1244 * @mrioc: Adapter instance reference 1245 * 1246 * Return: zero if the new IOCFacts parameters value is compatible with 1247 * older values else return -EPERM 1248 */ 1249 static int 1250 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1251 { 1252 unsigned long *removepend_bitmap; 1253 1254 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1255 ioc_err(mrioc, 1256 "cannot increase reply size from %d to %d\n", 1257 mrioc->reply_sz, mrioc->facts.reply_sz); 1258 return -EPERM; 1259 } 1260 1261 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1262 ioc_err(mrioc, 1263 "cannot reduce number of operational reply queues from %d to %d\n", 1264 mrioc->num_op_reply_q, 1265 mrioc->facts.max_op_reply_q); 1266 return -EPERM; 1267 } 1268 1269 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1270 ioc_err(mrioc, 1271 "cannot reduce number of operational request queues from %d to %d\n", 1272 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1273 return -EPERM; 1274 } 1275 1276 if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512)) 1277 ioc_err(mrioc, "Warning: The maximum data transfer length\n" 1278 "\tchanged after reset: previous(%d), new(%d),\n" 1279 "the driver cannot change this at run time\n", 1280 mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length); 1281 1282 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & 1283 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) 1284 ioc_err(mrioc, 1285 "critical error: multipath capability is enabled at the\n" 1286 "\tcontroller while sas transport support is enabled at the\n" 1287 "\tdriver, please reboot the system or reload the driver\n"); 1288 1289 if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { 1290 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, 1291 GFP_KERNEL); 1292 if (!removepend_bitmap) { 1293 ioc_err(mrioc, 1294 "failed to increase removepend_bitmap bits from %d to %d\n", 1295 mrioc->dev_handle_bitmap_bits, 1296 mrioc->facts.max_devhandle); 1297 return -EPERM; 1298 } 1299 bitmap_free(mrioc->removepend_bitmap); 1300 mrioc->removepend_bitmap = removepend_bitmap; 1301 ioc_info(mrioc, 1302 "increased bits of dev_handle_bitmap from %d to %d\n", 1303 mrioc->dev_handle_bitmap_bits, 1304 mrioc->facts.max_devhandle); 1305 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; 1306 } 1307 1308 return 0; 1309 } 1310 1311 /** 1312 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1313 * @mrioc: Adapter instance reference 1314 * 1315 * Set Enable IOC bit in IOC configuration register and wait for 1316 * the controller to become ready. 1317 * 1318 * Return: 0 on success, appropriate error on failure. 1319 */ 1320 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1321 { 1322 u32 ioc_config, ioc_status, timeout, host_diagnostic; 1323 int retval = 0; 1324 enum mpi3mr_iocstate ioc_state; 1325 u64 base_info; 1326 1327 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1328 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1329 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1330 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1331 ioc_status, ioc_config, base_info); 1332 1333 /*The timeout value is in 2sec unit, changing it to seconds*/ 1334 mrioc->ready_timeout = 1335 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1336 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1337 1338 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1339 1340 ioc_state = mpi3mr_get_iocstate(mrioc); 1341 ioc_info(mrioc, "controller is in %s state during detection\n", 1342 mpi3mr_iocstate_name(ioc_state)); 1343 1344 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1345 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1346 timeout = mrioc->ready_timeout * 10; 1347 do { 1348 msleep(100); 1349 } while (--timeout); 1350 1351 if (!pci_device_is_present(mrioc->pdev)) { 1352 mrioc->unrecoverable = 1; 1353 ioc_err(mrioc, 1354 "controller is not present while waiting to reset\n"); 1355 retval = -1; 1356 goto out_device_not_present; 1357 } 1358 1359 ioc_state = mpi3mr_get_iocstate(mrioc); 1360 ioc_info(mrioc, 1361 "controller is in %s state after waiting to reset\n", 1362 mpi3mr_iocstate_name(ioc_state)); 1363 } 1364 1365 if (ioc_state == MRIOC_STATE_READY) { 1366 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1367 retval = mpi3mr_issue_and_process_mur(mrioc, 1368 MPI3MR_RESET_FROM_BRINGUP); 1369 ioc_state = mpi3mr_get_iocstate(mrioc); 1370 if (retval) 1371 ioc_err(mrioc, 1372 "message unit reset failed with error %d current state %s\n", 1373 retval, mpi3mr_iocstate_name(ioc_state)); 1374 } 1375 if (ioc_state != MRIOC_STATE_RESET) { 1376 if (ioc_state == MRIOC_STATE_FAULT) { 1377 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 1378 mpi3mr_print_fault_info(mrioc); 1379 do { 1380 host_diagnostic = 1381 readl(&mrioc->sysif_regs->host_diagnostic); 1382 if (!(host_diagnostic & 1383 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 1384 break; 1385 if (!pci_device_is_present(mrioc->pdev)) { 1386 mrioc->unrecoverable = 1; 1387 ioc_err(mrioc, "controller is not present at the bringup\n"); 1388 goto out_device_not_present; 1389 } 1390 msleep(100); 1391 } while (--timeout); 1392 } 1393 mpi3mr_print_fault_info(mrioc); 1394 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1395 retval = mpi3mr_issue_reset(mrioc, 1396 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1397 MPI3MR_RESET_FROM_BRINGUP); 1398 if (retval) { 1399 ioc_err(mrioc, 1400 "soft reset failed with error %d\n", retval); 1401 goto out_failed; 1402 } 1403 } 1404 ioc_state = mpi3mr_get_iocstate(mrioc); 1405 if (ioc_state != MRIOC_STATE_RESET) { 1406 ioc_err(mrioc, 1407 "cannot bring controller to reset state, current state: %s\n", 1408 mpi3mr_iocstate_name(ioc_state)); 1409 goto out_failed; 1410 } 1411 mpi3mr_clear_reset_history(mrioc); 1412 retval = mpi3mr_setup_admin_qpair(mrioc); 1413 if (retval) { 1414 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1415 retval); 1416 goto out_failed; 1417 } 1418 1419 ioc_info(mrioc, "bringing controller to ready state\n"); 1420 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1421 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1422 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1423 1424 timeout = mrioc->ready_timeout * 10; 1425 do { 1426 ioc_state = mpi3mr_get_iocstate(mrioc); 1427 if (ioc_state == MRIOC_STATE_READY) { 1428 ioc_info(mrioc, 1429 "successfully transitioned to %s state\n", 1430 mpi3mr_iocstate_name(ioc_state)); 1431 return 0; 1432 } 1433 if (!pci_device_is_present(mrioc->pdev)) { 1434 mrioc->unrecoverable = 1; 1435 ioc_err(mrioc, 1436 "controller is not present at the bringup\n"); 1437 retval = -1; 1438 goto out_device_not_present; 1439 } 1440 msleep(100); 1441 } while (--timeout); 1442 1443 out_failed: 1444 ioc_state = mpi3mr_get_iocstate(mrioc); 1445 ioc_err(mrioc, 1446 "failed to bring to ready state, current state: %s\n", 1447 mpi3mr_iocstate_name(ioc_state)); 1448 out_device_not_present: 1449 return retval; 1450 } 1451 1452 /** 1453 * mpi3mr_soft_reset_success - Check softreset is success or not 1454 * @ioc_status: IOC status register value 1455 * @ioc_config: IOC config register value 1456 * 1457 * Check whether the soft reset is successful or not based on 1458 * IOC status and IOC config register values. 1459 * 1460 * Return: True when the soft reset is success, false otherwise. 1461 */ 1462 static inline bool 1463 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1464 { 1465 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1466 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1467 return true; 1468 return false; 1469 } 1470 1471 /** 1472 * mpi3mr_diagfault_success - Check diag fault is success or not 1473 * @mrioc: Adapter reference 1474 * @ioc_status: IOC status register value 1475 * 1476 * Check whether the controller hit diag reset fault code. 1477 * 1478 * Return: True when there is diag fault, false otherwise. 1479 */ 1480 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1481 u32 ioc_status) 1482 { 1483 u32 fault; 1484 1485 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1486 return false; 1487 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1488 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1489 mpi3mr_print_fault_info(mrioc); 1490 return true; 1491 } 1492 return false; 1493 } 1494 1495 /** 1496 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1497 * @mrioc: Adapter reference 1498 * 1499 * Set diag save bit in IOC configuration register to enable 1500 * snapdump. 1501 * 1502 * Return: Nothing. 1503 */ 1504 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1505 { 1506 u32 ioc_config; 1507 1508 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1509 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1510 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1511 } 1512 1513 /** 1514 * mpi3mr_issue_reset - Issue reset to the controller 1515 * @mrioc: Adapter reference 1516 * @reset_type: Reset type 1517 * @reset_reason: Reset reason code 1518 * 1519 * Unlock the host diagnostic registers and write the specific 1520 * reset type to that, wait for reset acknowledgment from the 1521 * controller, if the reset is not successful retry for the 1522 * predefined number of times. 1523 * 1524 * Return: 0 on success, non-zero on failure. 1525 */ 1526 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1527 u16 reset_reason) 1528 { 1529 int retval = -1; 1530 u8 unlock_retry_count = 0; 1531 u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0; 1532 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1533 1534 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1535 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1536 return retval; 1537 if (mrioc->unrecoverable) 1538 return retval; 1539 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1540 retval = 0; 1541 return retval; 1542 } 1543 1544 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1545 mpi3mr_reset_type_name(reset_type), 1546 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1547 1548 mpi3mr_clear_reset_history(mrioc); 1549 do { 1550 ioc_info(mrioc, 1551 "Write magic sequence to unlock host diag register (retry=%d)\n", 1552 ++unlock_retry_count); 1553 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1554 ioc_err(mrioc, 1555 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1556 mpi3mr_reset_type_name(reset_type), 1557 host_diagnostic); 1558 mrioc->unrecoverable = 1; 1559 return retval; 1560 } 1561 1562 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1563 &mrioc->sysif_regs->write_sequence); 1564 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1565 &mrioc->sysif_regs->write_sequence); 1566 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1567 &mrioc->sysif_regs->write_sequence); 1568 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1569 &mrioc->sysif_regs->write_sequence); 1570 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1571 &mrioc->sysif_regs->write_sequence); 1572 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1573 &mrioc->sysif_regs->write_sequence); 1574 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1575 &mrioc->sysif_regs->write_sequence); 1576 usleep_range(1000, 1100); 1577 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1578 ioc_info(mrioc, 1579 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1580 unlock_retry_count, host_diagnostic); 1581 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1582 1583 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX << 1584 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num << 1585 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason); 1586 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1587 writel(host_diagnostic | reset_type, 1588 &mrioc->sysif_regs->host_diagnostic); 1589 switch (reset_type) { 1590 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1591 do { 1592 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1593 ioc_config = 1594 readl(&mrioc->sysif_regs->ioc_configuration); 1595 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1596 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1597 ) { 1598 mpi3mr_clear_reset_history(mrioc); 1599 retval = 0; 1600 break; 1601 } 1602 msleep(100); 1603 } while (--timeout); 1604 mpi3mr_print_fault_info(mrioc); 1605 break; 1606 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1607 do { 1608 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1609 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1610 retval = 0; 1611 break; 1612 } 1613 msleep(100); 1614 } while (--timeout); 1615 break; 1616 default: 1617 break; 1618 } 1619 1620 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1621 &mrioc->sysif_regs->write_sequence); 1622 1623 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1624 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1625 ioc_info(mrioc, 1626 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1627 (!retval)?"successful":"failed", ioc_status, 1628 ioc_config); 1629 if (retval) 1630 mrioc->unrecoverable = 1; 1631 return retval; 1632 } 1633 1634 /** 1635 * mpi3mr_admin_request_post - Post request to admin queue 1636 * @mrioc: Adapter reference 1637 * @admin_req: MPI3 request 1638 * @admin_req_sz: Request size 1639 * @ignore_reset: Ignore reset in process 1640 * 1641 * Post the MPI3 request into admin request queue and 1642 * inform the controller, if the queue is full return 1643 * appropriate error. 1644 * 1645 * Return: 0 on success, non-zero on failure. 1646 */ 1647 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1648 u16 admin_req_sz, u8 ignore_reset) 1649 { 1650 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1651 int retval = 0; 1652 unsigned long flags; 1653 u8 *areq_entry; 1654 1655 if (mrioc->unrecoverable) { 1656 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1657 return -EFAULT; 1658 } 1659 1660 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1661 areq_pi = mrioc->admin_req_pi; 1662 areq_ci = mrioc->admin_req_ci; 1663 max_entries = mrioc->num_admin_req; 1664 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1665 (areq_pi == (max_entries - 1)))) { 1666 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1667 retval = -EAGAIN; 1668 goto out; 1669 } 1670 if (!ignore_reset && mrioc->reset_in_progress) { 1671 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1672 retval = -EAGAIN; 1673 goto out; 1674 } 1675 areq_entry = (u8 *)mrioc->admin_req_base + 1676 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1677 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1678 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1679 1680 if (++areq_pi == max_entries) 1681 areq_pi = 0; 1682 mrioc->admin_req_pi = areq_pi; 1683 1684 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1685 1686 out: 1687 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1688 1689 return retval; 1690 } 1691 1692 /** 1693 * mpi3mr_free_op_req_q_segments - free request memory segments 1694 * @mrioc: Adapter instance reference 1695 * @q_idx: operational request queue index 1696 * 1697 * Free memory segments allocated for operational request queue 1698 * 1699 * Return: Nothing. 1700 */ 1701 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1702 { 1703 u16 j; 1704 int size; 1705 struct segments *segments; 1706 1707 segments = mrioc->req_qinfo[q_idx].q_segments; 1708 if (!segments) 1709 return; 1710 1711 if (mrioc->enable_segqueue) { 1712 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1713 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1714 dma_free_coherent(&mrioc->pdev->dev, 1715 MPI3MR_MAX_SEG_LIST_SIZE, 1716 mrioc->req_qinfo[q_idx].q_segment_list, 1717 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1718 mrioc->req_qinfo[q_idx].q_segment_list = NULL; 1719 } 1720 } else 1721 size = mrioc->req_qinfo[q_idx].segment_qd * 1722 mrioc->facts.op_req_sz; 1723 1724 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1725 if (!segments[j].segment) 1726 continue; 1727 dma_free_coherent(&mrioc->pdev->dev, 1728 size, segments[j].segment, segments[j].segment_dma); 1729 segments[j].segment = NULL; 1730 } 1731 kfree(mrioc->req_qinfo[q_idx].q_segments); 1732 mrioc->req_qinfo[q_idx].q_segments = NULL; 1733 mrioc->req_qinfo[q_idx].qid = 0; 1734 } 1735 1736 /** 1737 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1738 * @mrioc: Adapter instance reference 1739 * @q_idx: operational reply queue index 1740 * 1741 * Free memory segments allocated for operational reply queue 1742 * 1743 * Return: Nothing. 1744 */ 1745 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1746 { 1747 u16 j; 1748 int size; 1749 struct segments *segments; 1750 1751 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1752 if (!segments) 1753 return; 1754 1755 if (mrioc->enable_segqueue) { 1756 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1757 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1758 dma_free_coherent(&mrioc->pdev->dev, 1759 MPI3MR_MAX_SEG_LIST_SIZE, 1760 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1761 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1762 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1763 } 1764 } else 1765 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1766 mrioc->op_reply_desc_sz; 1767 1768 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1769 if (!segments[j].segment) 1770 continue; 1771 dma_free_coherent(&mrioc->pdev->dev, 1772 size, segments[j].segment, segments[j].segment_dma); 1773 segments[j].segment = NULL; 1774 } 1775 1776 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1777 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1778 mrioc->op_reply_qinfo[q_idx].qid = 0; 1779 } 1780 1781 /** 1782 * mpi3mr_delete_op_reply_q - delete operational reply queue 1783 * @mrioc: Adapter instance reference 1784 * @qidx: operational reply queue index 1785 * 1786 * Delete operatinal reply queue by issuing MPI request 1787 * through admin queue. 1788 * 1789 * Return: 0 on success, non-zero on failure. 1790 */ 1791 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1792 { 1793 struct mpi3_delete_reply_queue_request delq_req; 1794 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1795 int retval = 0; 1796 u16 reply_qid = 0, midx; 1797 1798 reply_qid = op_reply_q->qid; 1799 1800 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1801 1802 if (!reply_qid) { 1803 retval = -1; 1804 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1805 goto out; 1806 } 1807 1808 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : 1809 mrioc->active_poll_qcount--; 1810 1811 memset(&delq_req, 0, sizeof(delq_req)); 1812 mutex_lock(&mrioc->init_cmds.mutex); 1813 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1814 retval = -1; 1815 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1816 mutex_unlock(&mrioc->init_cmds.mutex); 1817 goto out; 1818 } 1819 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1820 mrioc->init_cmds.is_waiting = 1; 1821 mrioc->init_cmds.callback = NULL; 1822 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1823 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1824 delq_req.queue_id = cpu_to_le16(reply_qid); 1825 1826 init_completion(&mrioc->init_cmds.done); 1827 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1828 1); 1829 if (retval) { 1830 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1831 goto out_unlock; 1832 } 1833 wait_for_completion_timeout(&mrioc->init_cmds.done, 1834 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1835 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1836 ioc_err(mrioc, "delete reply queue timed out\n"); 1837 mpi3mr_check_rh_fault_ioc(mrioc, 1838 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1839 retval = -1; 1840 goto out_unlock; 1841 } 1842 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1843 != MPI3_IOCSTATUS_SUCCESS) { 1844 ioc_err(mrioc, 1845 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1846 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1847 mrioc->init_cmds.ioc_loginfo); 1848 retval = -1; 1849 goto out_unlock; 1850 } 1851 mrioc->intr_info[midx].op_reply_q = NULL; 1852 1853 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1854 out_unlock: 1855 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1856 mutex_unlock(&mrioc->init_cmds.mutex); 1857 out: 1858 1859 return retval; 1860 } 1861 1862 /** 1863 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1864 * @mrioc: Adapter instance reference 1865 * @qidx: request queue index 1866 * 1867 * Allocate segmented memory pools for operational reply 1868 * queue. 1869 * 1870 * Return: 0 on success, non-zero on failure. 1871 */ 1872 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1873 { 1874 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1875 int i, size; 1876 u64 *q_segment_list_entry = NULL; 1877 struct segments *segments; 1878 1879 if (mrioc->enable_segqueue) { 1880 op_reply_q->segment_qd = 1881 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1882 1883 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1884 1885 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1886 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1887 GFP_KERNEL); 1888 if (!op_reply_q->q_segment_list) 1889 return -ENOMEM; 1890 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1891 } else { 1892 op_reply_q->segment_qd = op_reply_q->num_replies; 1893 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1894 } 1895 1896 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1897 op_reply_q->segment_qd); 1898 1899 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1900 sizeof(struct segments), GFP_KERNEL); 1901 if (!op_reply_q->q_segments) 1902 return -ENOMEM; 1903 1904 segments = op_reply_q->q_segments; 1905 for (i = 0; i < op_reply_q->num_segments; i++) { 1906 segments[i].segment = 1907 dma_alloc_coherent(&mrioc->pdev->dev, 1908 size, &segments[i].segment_dma, GFP_KERNEL); 1909 if (!segments[i].segment) 1910 return -ENOMEM; 1911 if (mrioc->enable_segqueue) 1912 q_segment_list_entry[i] = 1913 (unsigned long)segments[i].segment_dma; 1914 } 1915 1916 return 0; 1917 } 1918 1919 /** 1920 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1921 * @mrioc: Adapter instance reference 1922 * @qidx: request queue index 1923 * 1924 * Allocate segmented memory pools for operational request 1925 * queue. 1926 * 1927 * Return: 0 on success, non-zero on failure. 1928 */ 1929 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1930 { 1931 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1932 int i, size; 1933 u64 *q_segment_list_entry = NULL; 1934 struct segments *segments; 1935 1936 if (mrioc->enable_segqueue) { 1937 op_req_q->segment_qd = 1938 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1939 1940 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1941 1942 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1943 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1944 GFP_KERNEL); 1945 if (!op_req_q->q_segment_list) 1946 return -ENOMEM; 1947 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1948 1949 } else { 1950 op_req_q->segment_qd = op_req_q->num_requests; 1951 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1952 } 1953 1954 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1955 op_req_q->segment_qd); 1956 1957 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1958 sizeof(struct segments), GFP_KERNEL); 1959 if (!op_req_q->q_segments) 1960 return -ENOMEM; 1961 1962 segments = op_req_q->q_segments; 1963 for (i = 0; i < op_req_q->num_segments; i++) { 1964 segments[i].segment = 1965 dma_alloc_coherent(&mrioc->pdev->dev, 1966 size, &segments[i].segment_dma, GFP_KERNEL); 1967 if (!segments[i].segment) 1968 return -ENOMEM; 1969 if (mrioc->enable_segqueue) 1970 q_segment_list_entry[i] = 1971 (unsigned long)segments[i].segment_dma; 1972 } 1973 1974 return 0; 1975 } 1976 1977 /** 1978 * mpi3mr_create_op_reply_q - create operational reply queue 1979 * @mrioc: Adapter instance reference 1980 * @qidx: operational reply queue index 1981 * 1982 * Create operatinal reply queue by issuing MPI request 1983 * through admin queue. 1984 * 1985 * Return: 0 on success, non-zero on failure. 1986 */ 1987 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1988 { 1989 struct mpi3_create_reply_queue_request create_req; 1990 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1991 int retval = 0; 1992 u16 reply_qid = 0, midx; 1993 1994 reply_qid = op_reply_q->qid; 1995 1996 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1997 1998 if (reply_qid) { 1999 retval = -1; 2000 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 2001 reply_qid); 2002 2003 return retval; 2004 } 2005 2006 reply_qid = qidx + 1; 2007 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 2008 if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 2009 !mrioc->pdev->revision) 2010 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; 2011 op_reply_q->ci = 0; 2012 op_reply_q->ephase = 1; 2013 atomic_set(&op_reply_q->pend_ios, 0); 2014 atomic_set(&op_reply_q->in_use, 0); 2015 op_reply_q->enable_irq_poll = false; 2016 2017 if (!op_reply_q->q_segments) { 2018 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 2019 if (retval) { 2020 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 2021 goto out; 2022 } 2023 } 2024 2025 memset(&create_req, 0, sizeof(create_req)); 2026 mutex_lock(&mrioc->init_cmds.mutex); 2027 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2028 retval = -1; 2029 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 2030 goto out_unlock; 2031 } 2032 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2033 mrioc->init_cmds.is_waiting = 1; 2034 mrioc->init_cmds.callback = NULL; 2035 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2036 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 2037 create_req.queue_id = cpu_to_le16(reply_qid); 2038 2039 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) 2040 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; 2041 else 2042 op_reply_q->qtype = MPI3MR_POLL_QUEUE; 2043 2044 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { 2045 create_req.flags = 2046 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 2047 create_req.msix_index = 2048 cpu_to_le16(mrioc->intr_info[midx].msix_index); 2049 } else { 2050 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); 2051 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", 2052 reply_qid, midx); 2053 if (!mrioc->active_poll_qcount) 2054 disable_irq_nosync(pci_irq_vector(mrioc->pdev, 2055 mrioc->intr_info_count - 1)); 2056 } 2057 2058 if (mrioc->enable_segqueue) { 2059 create_req.flags |= 2060 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 2061 create_req.base_address = cpu_to_le64( 2062 op_reply_q->q_segment_list_dma); 2063 } else 2064 create_req.base_address = cpu_to_le64( 2065 op_reply_q->q_segments[0].segment_dma); 2066 2067 create_req.size = cpu_to_le16(op_reply_q->num_replies); 2068 2069 init_completion(&mrioc->init_cmds.done); 2070 retval = mpi3mr_admin_request_post(mrioc, &create_req, 2071 sizeof(create_req), 1); 2072 if (retval) { 2073 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 2074 goto out_unlock; 2075 } 2076 wait_for_completion_timeout(&mrioc->init_cmds.done, 2077 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2078 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2079 ioc_err(mrioc, "create reply queue timed out\n"); 2080 mpi3mr_check_rh_fault_ioc(mrioc, 2081 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 2082 retval = -1; 2083 goto out_unlock; 2084 } 2085 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2086 != MPI3_IOCSTATUS_SUCCESS) { 2087 ioc_err(mrioc, 2088 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2089 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2090 mrioc->init_cmds.ioc_loginfo); 2091 retval = -1; 2092 goto out_unlock; 2093 } 2094 op_reply_q->qid = reply_qid; 2095 if (midx < mrioc->intr_info_count) 2096 mrioc->intr_info[midx].op_reply_q = op_reply_q; 2097 2098 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : 2099 mrioc->active_poll_qcount++; 2100 2101 out_unlock: 2102 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2103 mutex_unlock(&mrioc->init_cmds.mutex); 2104 out: 2105 2106 return retval; 2107 } 2108 2109 /** 2110 * mpi3mr_create_op_req_q - create operational request queue 2111 * @mrioc: Adapter instance reference 2112 * @idx: operational request queue index 2113 * @reply_qid: Reply queue ID 2114 * 2115 * Create operatinal request queue by issuing MPI request 2116 * through admin queue. 2117 * 2118 * Return: 0 on success, non-zero on failure. 2119 */ 2120 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 2121 u16 reply_qid) 2122 { 2123 struct mpi3_create_request_queue_request create_req; 2124 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 2125 int retval = 0; 2126 u16 req_qid = 0; 2127 2128 req_qid = op_req_q->qid; 2129 2130 if (req_qid) { 2131 retval = -1; 2132 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 2133 req_qid); 2134 2135 return retval; 2136 } 2137 req_qid = idx + 1; 2138 2139 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 2140 op_req_q->ci = 0; 2141 op_req_q->pi = 0; 2142 op_req_q->reply_qid = reply_qid; 2143 spin_lock_init(&op_req_q->q_lock); 2144 2145 if (!op_req_q->q_segments) { 2146 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 2147 if (retval) { 2148 mpi3mr_free_op_req_q_segments(mrioc, idx); 2149 goto out; 2150 } 2151 } 2152 2153 memset(&create_req, 0, sizeof(create_req)); 2154 mutex_lock(&mrioc->init_cmds.mutex); 2155 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2156 retval = -1; 2157 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 2158 goto out_unlock; 2159 } 2160 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2161 mrioc->init_cmds.is_waiting = 1; 2162 mrioc->init_cmds.callback = NULL; 2163 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2164 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 2165 create_req.queue_id = cpu_to_le16(req_qid); 2166 if (mrioc->enable_segqueue) { 2167 create_req.flags = 2168 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 2169 create_req.base_address = cpu_to_le64( 2170 op_req_q->q_segment_list_dma); 2171 } else 2172 create_req.base_address = cpu_to_le64( 2173 op_req_q->q_segments[0].segment_dma); 2174 create_req.reply_queue_id = cpu_to_le16(reply_qid); 2175 create_req.size = cpu_to_le16(op_req_q->num_requests); 2176 2177 init_completion(&mrioc->init_cmds.done); 2178 retval = mpi3mr_admin_request_post(mrioc, &create_req, 2179 sizeof(create_req), 1); 2180 if (retval) { 2181 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 2182 goto out_unlock; 2183 } 2184 wait_for_completion_timeout(&mrioc->init_cmds.done, 2185 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2186 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2187 ioc_err(mrioc, "create request queue timed out\n"); 2188 mpi3mr_check_rh_fault_ioc(mrioc, 2189 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 2190 retval = -1; 2191 goto out_unlock; 2192 } 2193 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2194 != MPI3_IOCSTATUS_SUCCESS) { 2195 ioc_err(mrioc, 2196 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2197 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2198 mrioc->init_cmds.ioc_loginfo); 2199 retval = -1; 2200 goto out_unlock; 2201 } 2202 op_req_q->qid = req_qid; 2203 2204 out_unlock: 2205 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2206 mutex_unlock(&mrioc->init_cmds.mutex); 2207 out: 2208 2209 return retval; 2210 } 2211 2212 /** 2213 * mpi3mr_create_op_queues - create operational queue pairs 2214 * @mrioc: Adapter instance reference 2215 * 2216 * Allocate memory for operational queue meta data and call 2217 * create request and reply queue functions. 2218 * 2219 * Return: 0 on success, non-zero on failures. 2220 */ 2221 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 2222 { 2223 int retval = 0; 2224 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 2225 2226 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 2227 mrioc->facts.max_op_req_q); 2228 2229 msix_count_op_q = 2230 mrioc->intr_info_count - mrioc->op_reply_q_offset; 2231 if (!mrioc->num_queues) 2232 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 2233 /* 2234 * During reset set the num_queues to the number of queues 2235 * that was set before the reset. 2236 */ 2237 num_queues = mrioc->num_op_reply_q ? 2238 mrioc->num_op_reply_q : mrioc->num_queues; 2239 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 2240 num_queues); 2241 2242 if (!mrioc->req_qinfo) { 2243 mrioc->req_qinfo = kcalloc(num_queues, 2244 sizeof(struct op_req_qinfo), GFP_KERNEL); 2245 if (!mrioc->req_qinfo) { 2246 retval = -1; 2247 goto out_failed; 2248 } 2249 2250 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 2251 num_queues, GFP_KERNEL); 2252 if (!mrioc->op_reply_qinfo) { 2253 retval = -1; 2254 goto out_failed; 2255 } 2256 } 2257 2258 if (mrioc->enable_segqueue) 2259 ioc_info(mrioc, 2260 "allocating operational queues through segmented queues\n"); 2261 2262 for (i = 0; i < num_queues; i++) { 2263 if (mpi3mr_create_op_reply_q(mrioc, i)) { 2264 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 2265 break; 2266 } 2267 if (mpi3mr_create_op_req_q(mrioc, i, 2268 mrioc->op_reply_qinfo[i].qid)) { 2269 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 2270 mpi3mr_delete_op_reply_q(mrioc, i); 2271 break; 2272 } 2273 } 2274 2275 if (i == 0) { 2276 /* Not even one queue is created successfully*/ 2277 retval = -1; 2278 goto out_failed; 2279 } 2280 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 2281 ioc_info(mrioc, 2282 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", 2283 mrioc->num_op_reply_q, mrioc->default_qcount, 2284 mrioc->active_poll_qcount); 2285 2286 return retval; 2287 out_failed: 2288 kfree(mrioc->req_qinfo); 2289 mrioc->req_qinfo = NULL; 2290 2291 kfree(mrioc->op_reply_qinfo); 2292 mrioc->op_reply_qinfo = NULL; 2293 2294 return retval; 2295 } 2296 2297 /** 2298 * mpi3mr_op_request_post - Post request to operational queue 2299 * @mrioc: Adapter reference 2300 * @op_req_q: Operational request queue info 2301 * @req: MPI3 request 2302 * 2303 * Post the MPI3 request into operational request queue and 2304 * inform the controller, if the queue is full return 2305 * appropriate error. 2306 * 2307 * Return: 0 on success, non-zero on failure. 2308 */ 2309 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 2310 struct op_req_qinfo *op_req_q, u8 *req) 2311 { 2312 u16 pi = 0, max_entries, reply_qidx = 0, midx; 2313 int retval = 0; 2314 unsigned long flags; 2315 u8 *req_entry; 2316 void *segment_base_addr; 2317 u16 req_sz = mrioc->facts.op_req_sz; 2318 struct segments *segments = op_req_q->q_segments; 2319 2320 reply_qidx = op_req_q->reply_qid - 1; 2321 2322 if (mrioc->unrecoverable) 2323 return -EFAULT; 2324 2325 spin_lock_irqsave(&op_req_q->q_lock, flags); 2326 pi = op_req_q->pi; 2327 max_entries = op_req_q->num_requests; 2328 2329 if (mpi3mr_check_req_qfull(op_req_q)) { 2330 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2331 reply_qidx, mrioc->op_reply_q_offset); 2332 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2333 2334 if (mpi3mr_check_req_qfull(op_req_q)) { 2335 retval = -EAGAIN; 2336 goto out; 2337 } 2338 } 2339 2340 if (mrioc->reset_in_progress) { 2341 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2342 retval = -EAGAIN; 2343 goto out; 2344 } 2345 2346 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2347 req_entry = (u8 *)segment_base_addr + 2348 ((pi % op_req_q->segment_qd) * req_sz); 2349 2350 memset(req_entry, 0, req_sz); 2351 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2352 2353 if (++pi == max_entries) 2354 pi = 0; 2355 op_req_q->pi = pi; 2356 2357 #ifndef CONFIG_PREEMPT_RT 2358 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2359 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2360 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2361 #else 2362 atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios); 2363 #endif 2364 2365 writel(op_req_q->pi, 2366 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2367 2368 out: 2369 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2370 return retval; 2371 } 2372 2373 /** 2374 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2375 * controller 2376 * @mrioc: Adapter instance reference 2377 * @reason_code: reason code for the fault. 2378 * 2379 * This routine will save snapdump and fault the controller with 2380 * the given reason code if it is not already in the fault or 2381 * not asynchronosuly reset. This will be used to handle 2382 * initilaization time faults/resets/timeout as in those cases 2383 * immediate soft reset invocation is not required. 2384 * 2385 * Return: None. 2386 */ 2387 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2388 { 2389 u32 ioc_status, host_diagnostic, timeout; 2390 2391 if (mrioc->unrecoverable) { 2392 ioc_err(mrioc, "controller is unrecoverable\n"); 2393 return; 2394 } 2395 2396 if (!pci_device_is_present(mrioc->pdev)) { 2397 mrioc->unrecoverable = 1; 2398 ioc_err(mrioc, "controller is not present\n"); 2399 return; 2400 } 2401 2402 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2403 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2404 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2405 mpi3mr_print_fault_info(mrioc); 2406 return; 2407 } 2408 mpi3mr_set_diagsave(mrioc); 2409 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2410 reason_code); 2411 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2412 do { 2413 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2414 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2415 break; 2416 msleep(100); 2417 } while (--timeout); 2418 } 2419 2420 /** 2421 * mpi3mr_sync_timestamp - Issue time stamp sync request 2422 * @mrioc: Adapter reference 2423 * 2424 * Issue IO unit control MPI request to synchornize firmware 2425 * timestamp with host time. 2426 * 2427 * Return: 0 on success, non-zero on failure. 2428 */ 2429 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2430 { 2431 ktime_t current_time; 2432 struct mpi3_iounit_control_request iou_ctrl; 2433 int retval = 0; 2434 2435 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2436 mutex_lock(&mrioc->init_cmds.mutex); 2437 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2438 retval = -1; 2439 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2440 mutex_unlock(&mrioc->init_cmds.mutex); 2441 goto out; 2442 } 2443 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2444 mrioc->init_cmds.is_waiting = 1; 2445 mrioc->init_cmds.callback = NULL; 2446 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2447 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2448 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2449 current_time = ktime_get_real(); 2450 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2451 2452 init_completion(&mrioc->init_cmds.done); 2453 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2454 sizeof(iou_ctrl), 0); 2455 if (retval) { 2456 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2457 goto out_unlock; 2458 } 2459 2460 wait_for_completion_timeout(&mrioc->init_cmds.done, 2461 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2462 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2463 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2464 mrioc->init_cmds.is_waiting = 0; 2465 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2466 mpi3mr_check_rh_fault_ioc(mrioc, 2467 MPI3MR_RESET_FROM_TSU_TIMEOUT); 2468 retval = -1; 2469 goto out_unlock; 2470 } 2471 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2472 != MPI3_IOCSTATUS_SUCCESS) { 2473 ioc_err(mrioc, 2474 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2475 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2476 mrioc->init_cmds.ioc_loginfo); 2477 retval = -1; 2478 goto out_unlock; 2479 } 2480 2481 out_unlock: 2482 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2483 mutex_unlock(&mrioc->init_cmds.mutex); 2484 2485 out: 2486 return retval; 2487 } 2488 2489 /** 2490 * mpi3mr_print_pkg_ver - display controller fw package version 2491 * @mrioc: Adapter reference 2492 * 2493 * Retrieve firmware package version from the component image 2494 * header of the controller flash and display it. 2495 * 2496 * Return: 0 on success and non-zero on failure. 2497 */ 2498 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2499 { 2500 struct mpi3_ci_upload_request ci_upload; 2501 int retval = -1; 2502 void *data = NULL; 2503 dma_addr_t data_dma; 2504 struct mpi3_ci_manifest_mpi *manifest; 2505 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2506 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2507 2508 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2509 GFP_KERNEL); 2510 if (!data) 2511 return -ENOMEM; 2512 2513 memset(&ci_upload, 0, sizeof(ci_upload)); 2514 mutex_lock(&mrioc->init_cmds.mutex); 2515 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2516 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2517 mutex_unlock(&mrioc->init_cmds.mutex); 2518 goto out; 2519 } 2520 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2521 mrioc->init_cmds.is_waiting = 1; 2522 mrioc->init_cmds.callback = NULL; 2523 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2524 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2525 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2526 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2527 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2528 ci_upload.segment_size = cpu_to_le32(data_len); 2529 2530 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2531 data_dma); 2532 init_completion(&mrioc->init_cmds.done); 2533 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2534 sizeof(ci_upload), 1); 2535 if (retval) { 2536 ioc_err(mrioc, "posting get package version failed\n"); 2537 goto out_unlock; 2538 } 2539 wait_for_completion_timeout(&mrioc->init_cmds.done, 2540 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2541 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2542 ioc_err(mrioc, "get package version timed out\n"); 2543 mpi3mr_check_rh_fault_ioc(mrioc, 2544 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2545 retval = -1; 2546 goto out_unlock; 2547 } 2548 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2549 == MPI3_IOCSTATUS_SUCCESS) { 2550 manifest = (struct mpi3_ci_manifest_mpi *) data; 2551 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2552 ioc_info(mrioc, 2553 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2554 manifest->package_version.gen_major, 2555 manifest->package_version.gen_minor, 2556 manifest->package_version.phase_major, 2557 manifest->package_version.phase_minor, 2558 manifest->package_version.customer_id, 2559 manifest->package_version.build_num); 2560 } 2561 } 2562 retval = 0; 2563 out_unlock: 2564 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2565 mutex_unlock(&mrioc->init_cmds.mutex); 2566 2567 out: 2568 if (data) 2569 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2570 data_dma); 2571 return retval; 2572 } 2573 2574 /** 2575 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2576 * @work: work struct 2577 * 2578 * Watch dog work periodically executed (1 second interval) to 2579 * monitor firmware fault and to issue periodic timer sync to 2580 * the firmware. 2581 * 2582 * Return: Nothing. 2583 */ 2584 static void mpi3mr_watchdog_work(struct work_struct *work) 2585 { 2586 struct mpi3mr_ioc *mrioc = 2587 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2588 unsigned long flags; 2589 enum mpi3mr_iocstate ioc_state; 2590 u32 fault, host_diagnostic, ioc_status; 2591 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2592 2593 if (mrioc->reset_in_progress) 2594 return; 2595 2596 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) { 2597 ioc_err(mrioc, "watchdog could not detect the controller\n"); 2598 mrioc->unrecoverable = 1; 2599 } 2600 2601 if (mrioc->unrecoverable) { 2602 ioc_err(mrioc, 2603 "flush pending commands for unrecoverable controller\n"); 2604 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 2605 return; 2606 } 2607 2608 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2609 mrioc->ts_update_counter = 0; 2610 mpi3mr_sync_timestamp(mrioc); 2611 } 2612 2613 if ((mrioc->prepare_for_reset) && 2614 ((mrioc->prepare_for_reset_timeout_counter++) >= 2615 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { 2616 mpi3mr_soft_reset_handler(mrioc, 2617 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); 2618 return; 2619 } 2620 2621 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2622 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2623 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2624 return; 2625 } 2626 2627 /*Check for fault state every one second and issue Soft reset*/ 2628 ioc_state = mpi3mr_get_iocstate(mrioc); 2629 if (ioc_state != MRIOC_STATE_FAULT) 2630 goto schedule_work; 2631 2632 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2633 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2634 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2635 if (!mrioc->diagsave_timeout) { 2636 mpi3mr_print_fault_info(mrioc); 2637 ioc_warn(mrioc, "diag save in progress\n"); 2638 } 2639 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2640 goto schedule_work; 2641 } 2642 2643 mpi3mr_print_fault_info(mrioc); 2644 mrioc->diagsave_timeout = 0; 2645 2646 switch (fault) { 2647 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED: 2648 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2649 ioc_warn(mrioc, 2650 "controller requires system power cycle, marking controller as unrecoverable\n"); 2651 mrioc->unrecoverable = 1; 2652 goto schedule_work; 2653 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: 2654 goto schedule_work; 2655 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: 2656 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; 2657 break; 2658 default: 2659 break; 2660 } 2661 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); 2662 return; 2663 2664 schedule_work: 2665 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2666 if (mrioc->watchdog_work_q) 2667 queue_delayed_work(mrioc->watchdog_work_q, 2668 &mrioc->watchdog_work, 2669 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2670 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2671 return; 2672 } 2673 2674 /** 2675 * mpi3mr_start_watchdog - Start watchdog 2676 * @mrioc: Adapter instance reference 2677 * 2678 * Create and start the watchdog thread to monitor controller 2679 * faults. 2680 * 2681 * Return: Nothing. 2682 */ 2683 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2684 { 2685 if (mrioc->watchdog_work_q) 2686 return; 2687 2688 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2689 snprintf(mrioc->watchdog_work_q_name, 2690 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2691 mrioc->id); 2692 mrioc->watchdog_work_q = 2693 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2694 if (!mrioc->watchdog_work_q) { 2695 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2696 return; 2697 } 2698 2699 if (mrioc->watchdog_work_q) 2700 queue_delayed_work(mrioc->watchdog_work_q, 2701 &mrioc->watchdog_work, 2702 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2703 } 2704 2705 /** 2706 * mpi3mr_stop_watchdog - Stop watchdog 2707 * @mrioc: Adapter instance reference 2708 * 2709 * Stop the watchdog thread created to monitor controller 2710 * faults. 2711 * 2712 * Return: Nothing. 2713 */ 2714 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2715 { 2716 unsigned long flags; 2717 struct workqueue_struct *wq; 2718 2719 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2720 wq = mrioc->watchdog_work_q; 2721 mrioc->watchdog_work_q = NULL; 2722 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2723 if (wq) { 2724 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2725 flush_workqueue(wq); 2726 destroy_workqueue(wq); 2727 } 2728 } 2729 2730 /** 2731 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2732 * @mrioc: Adapter instance reference 2733 * 2734 * Allocate memory for admin queue pair if required and register 2735 * the admin queue with the controller. 2736 * 2737 * Return: 0 on success, non-zero on failures. 2738 */ 2739 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2740 { 2741 int retval = 0; 2742 u32 num_admin_entries = 0; 2743 2744 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2745 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2746 MPI3MR_ADMIN_REQ_FRAME_SZ; 2747 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2748 2749 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2750 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2751 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2752 mrioc->admin_reply_ci = 0; 2753 mrioc->admin_reply_ephase = 1; 2754 atomic_set(&mrioc->admin_reply_q_in_use, 0); 2755 2756 if (!mrioc->admin_req_base) { 2757 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2758 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2759 2760 if (!mrioc->admin_req_base) { 2761 retval = -1; 2762 goto out_failed; 2763 } 2764 2765 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2766 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2767 GFP_KERNEL); 2768 2769 if (!mrioc->admin_reply_base) { 2770 retval = -1; 2771 goto out_failed; 2772 } 2773 } 2774 2775 num_admin_entries = (mrioc->num_admin_replies << 16) | 2776 (mrioc->num_admin_req); 2777 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2778 mpi3mr_writeq(mrioc->admin_req_dma, 2779 &mrioc->sysif_regs->admin_request_queue_address); 2780 mpi3mr_writeq(mrioc->admin_reply_dma, 2781 &mrioc->sysif_regs->admin_reply_queue_address); 2782 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2783 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2784 return retval; 2785 2786 out_failed: 2787 2788 if (mrioc->admin_reply_base) { 2789 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2790 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2791 mrioc->admin_reply_base = NULL; 2792 } 2793 if (mrioc->admin_req_base) { 2794 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2795 mrioc->admin_req_base, mrioc->admin_req_dma); 2796 mrioc->admin_req_base = NULL; 2797 } 2798 return retval; 2799 } 2800 2801 /** 2802 * mpi3mr_issue_iocfacts - Send IOC Facts 2803 * @mrioc: Adapter instance reference 2804 * @facts_data: Cached IOC facts data 2805 * 2806 * Issue IOC Facts MPI request through admin queue and wait for 2807 * the completion of it or time out. 2808 * 2809 * Return: 0 on success, non-zero on failures. 2810 */ 2811 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2812 struct mpi3_ioc_facts_data *facts_data) 2813 { 2814 struct mpi3_ioc_facts_request iocfacts_req; 2815 void *data = NULL; 2816 dma_addr_t data_dma; 2817 u32 data_len = sizeof(*facts_data); 2818 int retval = 0; 2819 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2820 2821 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2822 GFP_KERNEL); 2823 2824 if (!data) { 2825 retval = -1; 2826 goto out; 2827 } 2828 2829 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2830 mutex_lock(&mrioc->init_cmds.mutex); 2831 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2832 retval = -1; 2833 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2834 mutex_unlock(&mrioc->init_cmds.mutex); 2835 goto out; 2836 } 2837 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2838 mrioc->init_cmds.is_waiting = 1; 2839 mrioc->init_cmds.callback = NULL; 2840 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2841 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2842 2843 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2844 data_dma); 2845 2846 init_completion(&mrioc->init_cmds.done); 2847 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2848 sizeof(iocfacts_req), 1); 2849 if (retval) { 2850 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2851 goto out_unlock; 2852 } 2853 wait_for_completion_timeout(&mrioc->init_cmds.done, 2854 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2855 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2856 ioc_err(mrioc, "ioc_facts timed out\n"); 2857 mpi3mr_check_rh_fault_ioc(mrioc, 2858 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2859 retval = -1; 2860 goto out_unlock; 2861 } 2862 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2863 != MPI3_IOCSTATUS_SUCCESS) { 2864 ioc_err(mrioc, 2865 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2866 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2867 mrioc->init_cmds.ioc_loginfo); 2868 retval = -1; 2869 goto out_unlock; 2870 } 2871 memcpy(facts_data, (u8 *)data, data_len); 2872 mpi3mr_process_factsdata(mrioc, facts_data); 2873 out_unlock: 2874 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2875 mutex_unlock(&mrioc->init_cmds.mutex); 2876 2877 out: 2878 if (data) 2879 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2880 2881 return retval; 2882 } 2883 2884 /** 2885 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2886 * @mrioc: Adapter instance reference 2887 * 2888 * Check whether the new DMA mask requested through IOCFacts by 2889 * firmware needs to be set, if so set it . 2890 * 2891 * Return: 0 on success, non-zero on failure. 2892 */ 2893 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2894 { 2895 struct pci_dev *pdev = mrioc->pdev; 2896 int r; 2897 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2898 2899 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2900 return 0; 2901 2902 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2903 mrioc->dma_mask, facts_dma_mask); 2904 2905 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2906 if (r) { 2907 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2908 facts_dma_mask, r); 2909 return r; 2910 } 2911 mrioc->dma_mask = facts_dma_mask; 2912 return r; 2913 } 2914 2915 /** 2916 * mpi3mr_process_factsdata - Process IOC facts data 2917 * @mrioc: Adapter instance reference 2918 * @facts_data: Cached IOC facts data 2919 * 2920 * Convert IOC facts data into cpu endianness and cache it in 2921 * the driver . 2922 * 2923 * Return: Nothing. 2924 */ 2925 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2926 struct mpi3_ioc_facts_data *facts_data) 2927 { 2928 u32 ioc_config, req_sz, facts_flags; 2929 2930 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2931 (sizeof(*facts_data) / 4)) { 2932 ioc_warn(mrioc, 2933 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2934 sizeof(*facts_data), 2935 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2936 } 2937 2938 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2939 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2940 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2941 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2942 ioc_err(mrioc, 2943 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2944 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2945 } 2946 2947 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2948 2949 facts_flags = le32_to_cpu(facts_data->flags); 2950 mrioc->facts.op_req_sz = req_sz; 2951 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2952 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2953 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2954 2955 mrioc->facts.ioc_num = facts_data->ioc_number; 2956 mrioc->facts.who_init = facts_data->who_init; 2957 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2958 mrioc->facts.personality = (facts_flags & 2959 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2960 mrioc->facts.dma_mask = (facts_flags & 2961 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2962 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2963 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2964 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2965 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); 2966 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2967 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2968 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2969 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2970 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2971 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2972 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2973 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2974 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2975 mrioc->facts.max_pcie_switches = 2976 le16_to_cpu(facts_data->max_pcie_switches); 2977 mrioc->facts.max_sasexpanders = 2978 le16_to_cpu(facts_data->max_sas_expanders); 2979 mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length); 2980 mrioc->facts.max_sasinitiators = 2981 le16_to_cpu(facts_data->max_sas_initiators); 2982 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2983 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2984 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2985 mrioc->facts.max_op_req_q = 2986 le16_to_cpu(facts_data->max_operational_request_queues); 2987 mrioc->facts.max_op_reply_q = 2988 le16_to_cpu(facts_data->max_operational_reply_queues); 2989 mrioc->facts.ioc_capabilities = 2990 le32_to_cpu(facts_data->ioc_capabilities); 2991 mrioc->facts.fw_ver.build_num = 2992 le16_to_cpu(facts_data->fw_version.build_num); 2993 mrioc->facts.fw_ver.cust_id = 2994 le16_to_cpu(facts_data->fw_version.customer_id); 2995 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2996 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2997 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2998 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2999 mrioc->msix_count = min_t(int, mrioc->msix_count, 3000 mrioc->facts.max_msix_vectors); 3001 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 3002 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 3003 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 3004 mrioc->facts.shutdown_timeout = 3005 le16_to_cpu(facts_data->shutdown_timeout); 3006 3007 mrioc->facts.max_dev_per_tg = 3008 facts_data->max_devices_per_throttle_group; 3009 mrioc->facts.io_throttle_data_length = 3010 le16_to_cpu(facts_data->io_throttle_data_length); 3011 mrioc->facts.max_io_throttle_group = 3012 le16_to_cpu(facts_data->max_io_throttle_group); 3013 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); 3014 mrioc->facts.io_throttle_high = 3015 le16_to_cpu(facts_data->io_throttle_high); 3016 3017 if (mrioc->facts.max_data_length == 3018 MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED) 3019 mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE; 3020 else 3021 mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K; 3022 /* Store in 512b block count */ 3023 if (mrioc->facts.io_throttle_data_length) 3024 mrioc->io_throttle_data_length = 3025 (mrioc->facts.io_throttle_data_length * 2 * 4); 3026 else 3027 /* set the length to 1MB + 1K to disable throttle */ 3028 mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2; 3029 3030 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); 3031 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); 3032 3033 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 3034 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 3035 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 3036 ioc_info(mrioc, 3037 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 3038 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 3039 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 3040 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 3041 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 3042 mrioc->facts.sge_mod_shift); 3043 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n", 3044 mrioc->facts.dma_mask, (facts_flags & 3045 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length); 3046 ioc_info(mrioc, 3047 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", 3048 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); 3049 ioc_info(mrioc, 3050 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", 3051 mrioc->facts.io_throttle_data_length * 4, 3052 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); 3053 } 3054 3055 /** 3056 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 3057 * @mrioc: Adapter instance reference 3058 * 3059 * Allocate and initialize the reply free buffers, sense 3060 * buffers, reply free queue and sense buffer queue. 3061 * 3062 * Return: 0 on success, non-zero on failures. 3063 */ 3064 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 3065 { 3066 int retval = 0; 3067 u32 sz, i; 3068 3069 if (mrioc->init_cmds.reply) 3070 return retval; 3071 3072 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3073 if (!mrioc->init_cmds.reply) 3074 goto out_failed; 3075 3076 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3077 if (!mrioc->bsg_cmds.reply) 3078 goto out_failed; 3079 3080 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3081 if (!mrioc->transport_cmds.reply) 3082 goto out_failed; 3083 3084 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 3085 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 3086 GFP_KERNEL); 3087 if (!mrioc->dev_rmhs_cmds[i].reply) 3088 goto out_failed; 3089 } 3090 3091 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 3092 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, 3093 GFP_KERNEL); 3094 if (!mrioc->evtack_cmds[i].reply) 3095 goto out_failed; 3096 } 3097 3098 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3099 if (!mrioc->host_tm_cmds.reply) 3100 goto out_failed; 3101 3102 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3103 if (!mrioc->pel_cmds.reply) 3104 goto out_failed; 3105 3106 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 3107 if (!mrioc->pel_abort_cmd.reply) 3108 goto out_failed; 3109 3110 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; 3111 mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits, 3112 GFP_KERNEL); 3113 if (!mrioc->removepend_bitmap) 3114 goto out_failed; 3115 3116 mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL); 3117 if (!mrioc->devrem_bitmap) 3118 goto out_failed; 3119 3120 mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD, 3121 GFP_KERNEL); 3122 if (!mrioc->evtack_cmds_bitmap) 3123 goto out_failed; 3124 3125 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 3126 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 3127 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 3128 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 3129 3130 /* reply buffer pool, 16 byte align */ 3131 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 3132 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 3133 &mrioc->pdev->dev, sz, 16, 0); 3134 if (!mrioc->reply_buf_pool) { 3135 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 3136 goto out_failed; 3137 } 3138 3139 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 3140 &mrioc->reply_buf_dma); 3141 if (!mrioc->reply_buf) 3142 goto out_failed; 3143 3144 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 3145 3146 /* reply free queue, 8 byte align */ 3147 sz = mrioc->reply_free_qsz * 8; 3148 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 3149 &mrioc->pdev->dev, sz, 8, 0); 3150 if (!mrioc->reply_free_q_pool) { 3151 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 3152 goto out_failed; 3153 } 3154 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 3155 GFP_KERNEL, &mrioc->reply_free_q_dma); 3156 if (!mrioc->reply_free_q) 3157 goto out_failed; 3158 3159 /* sense buffer pool, 4 byte align */ 3160 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3161 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 3162 &mrioc->pdev->dev, sz, 4, 0); 3163 if (!mrioc->sense_buf_pool) { 3164 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 3165 goto out_failed; 3166 } 3167 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 3168 &mrioc->sense_buf_dma); 3169 if (!mrioc->sense_buf) 3170 goto out_failed; 3171 3172 /* sense buffer queue, 8 byte align */ 3173 sz = mrioc->sense_buf_q_sz * 8; 3174 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 3175 &mrioc->pdev->dev, sz, 8, 0); 3176 if (!mrioc->sense_buf_q_pool) { 3177 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 3178 goto out_failed; 3179 } 3180 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 3181 GFP_KERNEL, &mrioc->sense_buf_q_dma); 3182 if (!mrioc->sense_buf_q) 3183 goto out_failed; 3184 3185 return retval; 3186 3187 out_failed: 3188 retval = -1; 3189 return retval; 3190 } 3191 3192 /** 3193 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 3194 * buffers 3195 * @mrioc: Adapter instance reference 3196 * 3197 * Helper function to initialize reply and sense buffers along 3198 * with some debug prints. 3199 * 3200 * Return: None. 3201 */ 3202 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 3203 { 3204 u32 sz, i; 3205 dma_addr_t phy_addr; 3206 3207 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 3208 ioc_info(mrioc, 3209 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3210 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 3211 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 3212 sz = mrioc->reply_free_qsz * 8; 3213 ioc_info(mrioc, 3214 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3215 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 3216 (unsigned long long)mrioc->reply_free_q_dma); 3217 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3218 ioc_info(mrioc, 3219 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3220 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 3221 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 3222 sz = mrioc->sense_buf_q_sz * 8; 3223 ioc_info(mrioc, 3224 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3225 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 3226 (unsigned long long)mrioc->sense_buf_q_dma); 3227 3228 /* initialize Reply buffer Queue */ 3229 for (i = 0, phy_addr = mrioc->reply_buf_dma; 3230 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 3231 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 3232 mrioc->reply_free_q[i] = cpu_to_le64(0); 3233 3234 /* initialize Sense Buffer Queue */ 3235 for (i = 0, phy_addr = mrioc->sense_buf_dma; 3236 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 3237 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 3238 mrioc->sense_buf_q[i] = cpu_to_le64(0); 3239 } 3240 3241 /** 3242 * mpi3mr_issue_iocinit - Send IOC Init 3243 * @mrioc: Adapter instance reference 3244 * 3245 * Issue IOC Init MPI request through admin queue and wait for 3246 * the completion of it or time out. 3247 * 3248 * Return: 0 on success, non-zero on failures. 3249 */ 3250 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 3251 { 3252 struct mpi3_ioc_init_request iocinit_req; 3253 struct mpi3_driver_info_layout *drv_info; 3254 dma_addr_t data_dma; 3255 u32 data_len = sizeof(*drv_info); 3256 int retval = 0; 3257 ktime_t current_time; 3258 3259 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 3260 GFP_KERNEL); 3261 if (!drv_info) { 3262 retval = -1; 3263 goto out; 3264 } 3265 mpimr_initialize_reply_sbuf_queues(mrioc); 3266 3267 drv_info->information_length = cpu_to_le32(data_len); 3268 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 3269 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 3270 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 3271 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 3272 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 3273 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 3274 sizeof(drv_info->driver_release_date)); 3275 drv_info->driver_capabilities = 0; 3276 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 3277 sizeof(mrioc->driver_info)); 3278 3279 memset(&iocinit_req, 0, sizeof(iocinit_req)); 3280 mutex_lock(&mrioc->init_cmds.mutex); 3281 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3282 retval = -1; 3283 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 3284 mutex_unlock(&mrioc->init_cmds.mutex); 3285 goto out; 3286 } 3287 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3288 mrioc->init_cmds.is_waiting = 1; 3289 mrioc->init_cmds.callback = NULL; 3290 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3291 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 3292 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 3293 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 3294 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 3295 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 3296 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 3297 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 3298 iocinit_req.reply_free_queue_address = 3299 cpu_to_le64(mrioc->reply_free_q_dma); 3300 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 3301 iocinit_req.sense_buffer_free_queue_depth = 3302 cpu_to_le16(mrioc->sense_buf_q_sz); 3303 iocinit_req.sense_buffer_free_queue_address = 3304 cpu_to_le64(mrioc->sense_buf_q_dma); 3305 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 3306 3307 current_time = ktime_get_real(); 3308 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 3309 3310 iocinit_req.msg_flags |= 3311 MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED; 3312 iocinit_req.msg_flags |= 3313 MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED; 3314 3315 init_completion(&mrioc->init_cmds.done); 3316 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 3317 sizeof(iocinit_req), 1); 3318 if (retval) { 3319 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 3320 goto out_unlock; 3321 } 3322 wait_for_completion_timeout(&mrioc->init_cmds.done, 3323 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3324 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3325 mpi3mr_check_rh_fault_ioc(mrioc, 3326 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 3327 ioc_err(mrioc, "ioc_init timed out\n"); 3328 retval = -1; 3329 goto out_unlock; 3330 } 3331 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3332 != MPI3_IOCSTATUS_SUCCESS) { 3333 ioc_err(mrioc, 3334 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3335 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3336 mrioc->init_cmds.ioc_loginfo); 3337 retval = -1; 3338 goto out_unlock; 3339 } 3340 3341 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3342 writel(mrioc->reply_free_queue_host_index, 3343 &mrioc->sysif_regs->reply_free_host_index); 3344 3345 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3346 writel(mrioc->sbq_host_index, 3347 &mrioc->sysif_regs->sense_buffer_free_host_index); 3348 out_unlock: 3349 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3350 mutex_unlock(&mrioc->init_cmds.mutex); 3351 3352 out: 3353 if (drv_info) 3354 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 3355 data_dma); 3356 3357 return retval; 3358 } 3359 3360 /** 3361 * mpi3mr_unmask_events - Unmask events in event mask bitmap 3362 * @mrioc: Adapter instance reference 3363 * @event: MPI event ID 3364 * 3365 * Un mask the specific event by resetting the event_mask 3366 * bitmap. 3367 * 3368 * Return: 0 on success, non-zero on failures. 3369 */ 3370 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 3371 { 3372 u32 desired_event; 3373 u8 word; 3374 3375 if (event >= 128) 3376 return; 3377 3378 desired_event = (1 << (event % 32)); 3379 word = event / 32; 3380 3381 mrioc->event_masks[word] &= ~desired_event; 3382 } 3383 3384 /** 3385 * mpi3mr_issue_event_notification - Send event notification 3386 * @mrioc: Adapter instance reference 3387 * 3388 * Issue event notification MPI request through admin queue and 3389 * wait for the completion of it or time out. 3390 * 3391 * Return: 0 on success, non-zero on failures. 3392 */ 3393 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 3394 { 3395 struct mpi3_event_notification_request evtnotify_req; 3396 int retval = 0; 3397 u8 i; 3398 3399 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 3400 mutex_lock(&mrioc->init_cmds.mutex); 3401 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3402 retval = -1; 3403 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 3404 mutex_unlock(&mrioc->init_cmds.mutex); 3405 goto out; 3406 } 3407 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3408 mrioc->init_cmds.is_waiting = 1; 3409 mrioc->init_cmds.callback = NULL; 3410 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3411 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 3412 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3413 evtnotify_req.event_masks[i] = 3414 cpu_to_le32(mrioc->event_masks[i]); 3415 init_completion(&mrioc->init_cmds.done); 3416 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3417 sizeof(evtnotify_req), 1); 3418 if (retval) { 3419 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3420 goto out_unlock; 3421 } 3422 wait_for_completion_timeout(&mrioc->init_cmds.done, 3423 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3424 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3425 ioc_err(mrioc, "event notification timed out\n"); 3426 mpi3mr_check_rh_fault_ioc(mrioc, 3427 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3428 retval = -1; 3429 goto out_unlock; 3430 } 3431 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3432 != MPI3_IOCSTATUS_SUCCESS) { 3433 ioc_err(mrioc, 3434 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3435 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3436 mrioc->init_cmds.ioc_loginfo); 3437 retval = -1; 3438 goto out_unlock; 3439 } 3440 3441 out_unlock: 3442 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3443 mutex_unlock(&mrioc->init_cmds.mutex); 3444 out: 3445 return retval; 3446 } 3447 3448 /** 3449 * mpi3mr_process_event_ack - Process event acknowledgment 3450 * @mrioc: Adapter instance reference 3451 * @event: MPI3 event ID 3452 * @event_ctx: event context 3453 * 3454 * Send event acknowledgment through admin queue and wait for 3455 * it to complete. 3456 * 3457 * Return: 0 on success, non-zero on failures. 3458 */ 3459 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3460 u32 event_ctx) 3461 { 3462 struct mpi3_event_ack_request evtack_req; 3463 int retval = 0; 3464 3465 memset(&evtack_req, 0, sizeof(evtack_req)); 3466 mutex_lock(&mrioc->init_cmds.mutex); 3467 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3468 retval = -1; 3469 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3470 mutex_unlock(&mrioc->init_cmds.mutex); 3471 goto out; 3472 } 3473 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3474 mrioc->init_cmds.is_waiting = 1; 3475 mrioc->init_cmds.callback = NULL; 3476 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3477 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3478 evtack_req.event = event; 3479 evtack_req.event_context = cpu_to_le32(event_ctx); 3480 3481 init_completion(&mrioc->init_cmds.done); 3482 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3483 sizeof(evtack_req), 1); 3484 if (retval) { 3485 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3486 goto out_unlock; 3487 } 3488 wait_for_completion_timeout(&mrioc->init_cmds.done, 3489 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3490 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3491 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3492 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3493 mpi3mr_check_rh_fault_ioc(mrioc, 3494 MPI3MR_RESET_FROM_EVTACK_TIMEOUT); 3495 retval = -1; 3496 goto out_unlock; 3497 } 3498 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3499 != MPI3_IOCSTATUS_SUCCESS) { 3500 ioc_err(mrioc, 3501 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3502 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3503 mrioc->init_cmds.ioc_loginfo); 3504 retval = -1; 3505 goto out_unlock; 3506 } 3507 3508 out_unlock: 3509 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3510 mutex_unlock(&mrioc->init_cmds.mutex); 3511 out: 3512 return retval; 3513 } 3514 3515 /** 3516 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3517 * @mrioc: Adapter instance reference 3518 * 3519 * Allocate chain buffers and set a bitmap to indicate free 3520 * chain buffers. Chain buffers are used to pass the SGE 3521 * information along with MPI3 SCSI IO requests for host I/O. 3522 * 3523 * Return: 0 on success, non-zero on failure 3524 */ 3525 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3526 { 3527 int retval = 0; 3528 u32 sz, i; 3529 u16 num_chains; 3530 3531 if (mrioc->chain_sgl_list) 3532 return retval; 3533 3534 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3535 3536 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3537 | SHOST_DIX_TYPE1_PROTECTION 3538 | SHOST_DIX_TYPE2_PROTECTION 3539 | SHOST_DIX_TYPE3_PROTECTION)) 3540 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3541 3542 mrioc->chain_buf_count = num_chains; 3543 sz = sizeof(struct chain_element) * num_chains; 3544 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3545 if (!mrioc->chain_sgl_list) 3546 goto out_failed; 3547 3548 if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length / 3549 MPI3MR_PAGE_SIZE_4K)) 3550 mrioc->max_sgl_entries = mrioc->facts.max_data_length / 3551 MPI3MR_PAGE_SIZE_4K; 3552 sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common); 3553 ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n", 3554 mrioc->max_sgl_entries, sz/1024); 3555 3556 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3557 &mrioc->pdev->dev, sz, 16, 0); 3558 if (!mrioc->chain_buf_pool) { 3559 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3560 goto out_failed; 3561 } 3562 3563 for (i = 0; i < num_chains; i++) { 3564 mrioc->chain_sgl_list[i].addr = 3565 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3566 &mrioc->chain_sgl_list[i].dma_addr); 3567 3568 if (!mrioc->chain_sgl_list[i].addr) 3569 goto out_failed; 3570 } 3571 mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL); 3572 if (!mrioc->chain_bitmap) 3573 goto out_failed; 3574 return retval; 3575 out_failed: 3576 retval = -1; 3577 return retval; 3578 } 3579 3580 /** 3581 * mpi3mr_port_enable_complete - Mark port enable complete 3582 * @mrioc: Adapter instance reference 3583 * @drv_cmd: Internal command tracker 3584 * 3585 * Call back for asynchronous port enable request sets the 3586 * driver command to indicate port enable request is complete. 3587 * 3588 * Return: Nothing 3589 */ 3590 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3591 struct mpi3mr_drv_cmd *drv_cmd) 3592 { 3593 drv_cmd->callback = NULL; 3594 mrioc->scan_started = 0; 3595 if (drv_cmd->state & MPI3MR_CMD_RESET) 3596 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3597 else 3598 mrioc->scan_failed = drv_cmd->ioc_status; 3599 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3600 } 3601 3602 /** 3603 * mpi3mr_issue_port_enable - Issue Port Enable 3604 * @mrioc: Adapter instance reference 3605 * @async: Flag to wait for completion or not 3606 * 3607 * Issue Port Enable MPI request through admin queue and if the 3608 * async flag is not set wait for the completion of the port 3609 * enable or time out. 3610 * 3611 * Return: 0 on success, non-zero on failures. 3612 */ 3613 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3614 { 3615 struct mpi3_port_enable_request pe_req; 3616 int retval = 0; 3617 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3618 3619 memset(&pe_req, 0, sizeof(pe_req)); 3620 mutex_lock(&mrioc->init_cmds.mutex); 3621 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3622 retval = -1; 3623 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3624 mutex_unlock(&mrioc->init_cmds.mutex); 3625 goto out; 3626 } 3627 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3628 if (async) { 3629 mrioc->init_cmds.is_waiting = 0; 3630 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3631 } else { 3632 mrioc->init_cmds.is_waiting = 1; 3633 mrioc->init_cmds.callback = NULL; 3634 init_completion(&mrioc->init_cmds.done); 3635 } 3636 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3637 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3638 3639 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3640 if (retval) { 3641 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3642 goto out_unlock; 3643 } 3644 if (async) { 3645 mutex_unlock(&mrioc->init_cmds.mutex); 3646 goto out; 3647 } 3648 3649 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3650 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3651 ioc_err(mrioc, "port enable timed out\n"); 3652 retval = -1; 3653 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3654 goto out_unlock; 3655 } 3656 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3657 3658 out_unlock: 3659 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3660 mutex_unlock(&mrioc->init_cmds.mutex); 3661 out: 3662 return retval; 3663 } 3664 3665 /* Protocol type to name mapper structure */ 3666 static const struct { 3667 u8 protocol; 3668 char *name; 3669 } mpi3mr_protocols[] = { 3670 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3671 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3672 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3673 }; 3674 3675 /* Capability to name mapper structure*/ 3676 static const struct { 3677 u32 capability; 3678 char *name; 3679 } mpi3mr_capabilities[] = { 3680 { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" }, 3681 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" }, 3682 }; 3683 3684 /** 3685 * mpi3mr_print_ioc_info - Display controller information 3686 * @mrioc: Adapter instance reference 3687 * 3688 * Display controller personality, capability, supported 3689 * protocols etc. 3690 * 3691 * Return: Nothing 3692 */ 3693 static void 3694 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3695 { 3696 int i = 0, bytes_written = 0; 3697 const char *personality; 3698 char protocol[50] = {0}; 3699 char capabilities[100] = {0}; 3700 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3701 3702 switch (mrioc->facts.personality) { 3703 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3704 personality = "Enhanced HBA"; 3705 break; 3706 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3707 personality = "RAID"; 3708 break; 3709 default: 3710 personality = "Unknown"; 3711 break; 3712 } 3713 3714 ioc_info(mrioc, "Running in %s Personality", personality); 3715 3716 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3717 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3718 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3719 3720 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3721 if (mrioc->facts.protocol_flags & 3722 mpi3mr_protocols[i].protocol) { 3723 bytes_written += scnprintf(protocol + bytes_written, 3724 sizeof(protocol) - bytes_written, "%s%s", 3725 bytes_written ? "," : "", 3726 mpi3mr_protocols[i].name); 3727 } 3728 } 3729 3730 bytes_written = 0; 3731 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3732 if (mrioc->facts.protocol_flags & 3733 mpi3mr_capabilities[i].capability) { 3734 bytes_written += scnprintf(capabilities + bytes_written, 3735 sizeof(capabilities) - bytes_written, "%s%s", 3736 bytes_written ? "," : "", 3737 mpi3mr_capabilities[i].name); 3738 } 3739 } 3740 3741 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3742 protocol, capabilities); 3743 } 3744 3745 /** 3746 * mpi3mr_cleanup_resources - Free PCI resources 3747 * @mrioc: Adapter instance reference 3748 * 3749 * Unmap PCI device memory and disable PCI device. 3750 * 3751 * Return: 0 on success and non-zero on failure. 3752 */ 3753 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3754 { 3755 struct pci_dev *pdev = mrioc->pdev; 3756 3757 mpi3mr_cleanup_isr(mrioc); 3758 3759 if (mrioc->sysif_regs) { 3760 iounmap((void __iomem *)mrioc->sysif_regs); 3761 mrioc->sysif_regs = NULL; 3762 } 3763 3764 if (pci_is_enabled(pdev)) { 3765 if (mrioc->bars) 3766 pci_release_selected_regions(pdev, mrioc->bars); 3767 pci_disable_device(pdev); 3768 } 3769 } 3770 3771 /** 3772 * mpi3mr_setup_resources - Enable PCI resources 3773 * @mrioc: Adapter instance reference 3774 * 3775 * Enable PCI device memory, MSI-x registers and set DMA mask. 3776 * 3777 * Return: 0 on success and non-zero on failure. 3778 */ 3779 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3780 { 3781 struct pci_dev *pdev = mrioc->pdev; 3782 u32 memap_sz = 0; 3783 int i, retval = 0, capb = 0; 3784 u16 message_control; 3785 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3786 ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3787 3788 if (pci_enable_device_mem(pdev)) { 3789 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3790 retval = -ENODEV; 3791 goto out_failed; 3792 } 3793 3794 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3795 if (!capb) { 3796 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3797 retval = -ENODEV; 3798 goto out_failed; 3799 } 3800 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3801 3802 if (pci_request_selected_regions(pdev, mrioc->bars, 3803 mrioc->driver_name)) { 3804 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3805 retval = -ENODEV; 3806 goto out_failed; 3807 } 3808 3809 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3810 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3811 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3812 memap_sz = pci_resource_len(pdev, i); 3813 mrioc->sysif_regs = 3814 ioremap(mrioc->sysif_regs_phys, memap_sz); 3815 break; 3816 } 3817 } 3818 3819 pci_set_master(pdev); 3820 3821 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3822 if (retval) { 3823 if (dma_mask != DMA_BIT_MASK(32)) { 3824 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3825 dma_mask = DMA_BIT_MASK(32); 3826 retval = dma_set_mask_and_coherent(&pdev->dev, 3827 dma_mask); 3828 } 3829 if (retval) { 3830 mrioc->dma_mask = 0; 3831 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3832 goto out_failed; 3833 } 3834 } 3835 mrioc->dma_mask = dma_mask; 3836 3837 if (!mrioc->sysif_regs) { 3838 ioc_err(mrioc, 3839 "Unable to map adapter memory or resource not found\n"); 3840 retval = -EINVAL; 3841 goto out_failed; 3842 } 3843 3844 pci_read_config_word(pdev, capb + 2, &message_control); 3845 mrioc->msix_count = (message_control & 0x3FF) + 1; 3846 3847 pci_save_state(pdev); 3848 3849 pci_set_drvdata(pdev, mrioc->shost); 3850 3851 mpi3mr_ioc_disable_intr(mrioc); 3852 3853 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3854 (unsigned long long)mrioc->sysif_regs_phys, 3855 mrioc->sysif_regs, memap_sz); 3856 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3857 mrioc->msix_count); 3858 3859 if (!reset_devices && poll_queues > 0) 3860 mrioc->requested_poll_qcount = min_t(int, poll_queues, 3861 mrioc->msix_count - 2); 3862 return retval; 3863 3864 out_failed: 3865 mpi3mr_cleanup_resources(mrioc); 3866 return retval; 3867 } 3868 3869 /** 3870 * mpi3mr_enable_events - Enable required events 3871 * @mrioc: Adapter instance reference 3872 * 3873 * This routine unmasks the events required by the driver by 3874 * sennding appropriate event mask bitmapt through an event 3875 * notification request. 3876 * 3877 * Return: 0 on success and non-zero on failure. 3878 */ 3879 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3880 { 3881 int retval = 0; 3882 u32 i; 3883 3884 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3885 mrioc->event_masks[i] = -1; 3886 3887 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3888 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3889 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3890 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3891 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); 3892 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3893 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3894 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3895 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3896 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3897 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3898 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 3899 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3900 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3901 3902 retval = mpi3mr_issue_event_notification(mrioc); 3903 if (retval) 3904 ioc_err(mrioc, "failed to issue event notification %d\n", 3905 retval); 3906 return retval; 3907 } 3908 3909 /** 3910 * mpi3mr_init_ioc - Initialize the controller 3911 * @mrioc: Adapter instance reference 3912 * 3913 * This the controller initialization routine, executed either 3914 * after soft reset or from pci probe callback. 3915 * Setup the required resources, memory map the controller 3916 * registers, create admin and operational reply queue pairs, 3917 * allocate required memory for reply pool, sense buffer pool, 3918 * issue IOC init request to the firmware, unmask the events and 3919 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3920 * volumes. 3921 * 3922 * Return: 0 on success and non-zero on failure. 3923 */ 3924 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3925 { 3926 int retval = 0; 3927 u8 retry = 0; 3928 struct mpi3_ioc_facts_data facts_data; 3929 u32 sz; 3930 3931 retry_init: 3932 retval = mpi3mr_bring_ioc_ready(mrioc); 3933 if (retval) { 3934 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3935 retval); 3936 goto out_failed_noretry; 3937 } 3938 3939 retval = mpi3mr_setup_isr(mrioc, 1); 3940 if (retval) { 3941 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3942 retval); 3943 goto out_failed_noretry; 3944 } 3945 3946 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3947 if (retval) { 3948 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3949 retval); 3950 goto out_failed; 3951 } 3952 3953 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3954 mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512; 3955 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; 3956 atomic_set(&mrioc->pend_large_data_sz, 0); 3957 3958 if (reset_devices) 3959 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3960 MPI3MR_HOST_IOS_KDUMP); 3961 3962 if (!(mrioc->facts.ioc_capabilities & 3963 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) { 3964 mrioc->sas_transport_enabled = 1; 3965 mrioc->scsi_device_channel = 1; 3966 mrioc->shost->max_channel = 1; 3967 mrioc->shost->transportt = mpi3mr_transport_template; 3968 } 3969 3970 mrioc->reply_sz = mrioc->facts.reply_sz; 3971 3972 retval = mpi3mr_check_reset_dma_mask(mrioc); 3973 if (retval) { 3974 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3975 retval); 3976 goto out_failed_noretry; 3977 } 3978 3979 mpi3mr_print_ioc_info(mrioc); 3980 3981 if (!mrioc->cfg_page) { 3982 dprint_init(mrioc, "allocating config page buffers\n"); 3983 mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; 3984 mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, 3985 mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); 3986 if (!mrioc->cfg_page) { 3987 retval = -1; 3988 goto out_failed_noretry; 3989 } 3990 } 3991 3992 dprint_init(mrioc, "allocating ioctl dma buffers\n"); 3993 mpi3mr_alloc_ioctl_dma_memory(mrioc); 3994 3995 if (!mrioc->init_cmds.reply) { 3996 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3997 if (retval) { 3998 ioc_err(mrioc, 3999 "%s :Failed to allocated reply sense buffers %d\n", 4000 __func__, retval); 4001 goto out_failed_noretry; 4002 } 4003 } 4004 4005 if (!mrioc->chain_sgl_list) { 4006 retval = mpi3mr_alloc_chain_bufs(mrioc); 4007 if (retval) { 4008 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 4009 retval); 4010 goto out_failed_noretry; 4011 } 4012 } 4013 4014 retval = mpi3mr_issue_iocinit(mrioc); 4015 if (retval) { 4016 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 4017 retval); 4018 goto out_failed; 4019 } 4020 4021 retval = mpi3mr_print_pkg_ver(mrioc); 4022 if (retval) { 4023 ioc_err(mrioc, "failed to get package version\n"); 4024 goto out_failed; 4025 } 4026 4027 retval = mpi3mr_setup_isr(mrioc, 0); 4028 if (retval) { 4029 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 4030 retval); 4031 goto out_failed_noretry; 4032 } 4033 4034 retval = mpi3mr_create_op_queues(mrioc); 4035 if (retval) { 4036 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 4037 retval); 4038 goto out_failed; 4039 } 4040 4041 if (!mrioc->pel_seqnum_virt) { 4042 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); 4043 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 4044 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 4045 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 4046 GFP_KERNEL); 4047 if (!mrioc->pel_seqnum_virt) { 4048 retval = -ENOMEM; 4049 goto out_failed_noretry; 4050 } 4051 } 4052 4053 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { 4054 dprint_init(mrioc, "allocating memory for throttle groups\n"); 4055 sz = sizeof(struct mpi3mr_throttle_group_info); 4056 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); 4057 if (!mrioc->throttle_groups) { 4058 retval = -1; 4059 goto out_failed_noretry; 4060 } 4061 } 4062 4063 retval = mpi3mr_enable_events(mrioc); 4064 if (retval) { 4065 ioc_err(mrioc, "failed to enable events %d\n", 4066 retval); 4067 goto out_failed; 4068 } 4069 4070 ioc_info(mrioc, "controller initialization completed successfully\n"); 4071 return retval; 4072 out_failed: 4073 if (retry < 2) { 4074 retry++; 4075 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 4076 retry); 4077 mpi3mr_memset_buffers(mrioc); 4078 goto retry_init; 4079 } 4080 retval = -1; 4081 out_failed_noretry: 4082 ioc_err(mrioc, "controller initialization failed\n"); 4083 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 4084 MPI3MR_RESET_FROM_CTLR_CLEANUP); 4085 mrioc->unrecoverable = 1; 4086 return retval; 4087 } 4088 4089 /** 4090 * mpi3mr_reinit_ioc - Re-Initialize the controller 4091 * @mrioc: Adapter instance reference 4092 * @is_resume: Called from resume or reset path 4093 * 4094 * This the controller re-initialization routine, executed from 4095 * the soft reset handler or resume callback. Creates 4096 * operational reply queue pairs, allocate required memory for 4097 * reply pool, sense buffer pool, issue IOC init request to the 4098 * firmware, unmask the events and issue port enable to discover 4099 * SAS/SATA/NVMe devices and RAID volumes. 4100 * 4101 * Return: 0 on success and non-zero on failure. 4102 */ 4103 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 4104 { 4105 int retval = 0; 4106 u8 retry = 0; 4107 struct mpi3_ioc_facts_data facts_data; 4108 u32 pe_timeout, ioc_status; 4109 4110 retry_init: 4111 pe_timeout = 4112 (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL); 4113 4114 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 4115 retval = mpi3mr_bring_ioc_ready(mrioc); 4116 if (retval) { 4117 ioc_err(mrioc, "failed to bring to ready state\n"); 4118 goto out_failed_noretry; 4119 } 4120 4121 if (is_resume) { 4122 dprint_reset(mrioc, "setting up single ISR\n"); 4123 retval = mpi3mr_setup_isr(mrioc, 1); 4124 if (retval) { 4125 ioc_err(mrioc, "failed to setup ISR\n"); 4126 goto out_failed_noretry; 4127 } 4128 } else 4129 mpi3mr_ioc_enable_intr(mrioc); 4130 4131 dprint_reset(mrioc, "getting ioc_facts\n"); 4132 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 4133 if (retval) { 4134 ioc_err(mrioc, "failed to get ioc_facts\n"); 4135 goto out_failed; 4136 } 4137 4138 dprint_reset(mrioc, "validating ioc_facts\n"); 4139 retval = mpi3mr_revalidate_factsdata(mrioc); 4140 if (retval) { 4141 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 4142 goto out_failed_noretry; 4143 } 4144 4145 mpi3mr_print_ioc_info(mrioc); 4146 4147 dprint_reset(mrioc, "sending ioc_init\n"); 4148 retval = mpi3mr_issue_iocinit(mrioc); 4149 if (retval) { 4150 ioc_err(mrioc, "failed to send ioc_init\n"); 4151 goto out_failed; 4152 } 4153 4154 dprint_reset(mrioc, "getting package version\n"); 4155 retval = mpi3mr_print_pkg_ver(mrioc); 4156 if (retval) { 4157 ioc_err(mrioc, "failed to get package version\n"); 4158 goto out_failed; 4159 } 4160 4161 if (is_resume) { 4162 dprint_reset(mrioc, "setting up multiple ISR\n"); 4163 retval = mpi3mr_setup_isr(mrioc, 0); 4164 if (retval) { 4165 ioc_err(mrioc, "failed to re-setup ISR\n"); 4166 goto out_failed_noretry; 4167 } 4168 } 4169 4170 dprint_reset(mrioc, "creating operational queue pairs\n"); 4171 retval = mpi3mr_create_op_queues(mrioc); 4172 if (retval) { 4173 ioc_err(mrioc, "failed to create operational queue pairs\n"); 4174 goto out_failed; 4175 } 4176 4177 if (!mrioc->pel_seqnum_virt) { 4178 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); 4179 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 4180 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 4181 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 4182 GFP_KERNEL); 4183 if (!mrioc->pel_seqnum_virt) { 4184 retval = -ENOMEM; 4185 goto out_failed_noretry; 4186 } 4187 } 4188 4189 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 4190 ioc_err(mrioc, 4191 "cannot create minimum number of operational queues expected:%d created:%d\n", 4192 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 4193 retval = -1; 4194 goto out_failed_noretry; 4195 } 4196 4197 dprint_reset(mrioc, "enabling events\n"); 4198 retval = mpi3mr_enable_events(mrioc); 4199 if (retval) { 4200 ioc_err(mrioc, "failed to enable events\n"); 4201 goto out_failed; 4202 } 4203 4204 mrioc->device_refresh_on = 1; 4205 mpi3mr_add_event_wait_for_device_refresh(mrioc); 4206 4207 ioc_info(mrioc, "sending port enable\n"); 4208 retval = mpi3mr_issue_port_enable(mrioc, 1); 4209 if (retval) { 4210 ioc_err(mrioc, "failed to issue port enable\n"); 4211 goto out_failed; 4212 } 4213 do { 4214 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL); 4215 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED) 4216 break; 4217 if (!pci_device_is_present(mrioc->pdev)) 4218 mrioc->unrecoverable = 1; 4219 if (mrioc->unrecoverable) { 4220 retval = -1; 4221 goto out_failed_noretry; 4222 } 4223 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4224 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4225 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4226 mpi3mr_print_fault_info(mrioc); 4227 mrioc->init_cmds.is_waiting = 0; 4228 mrioc->init_cmds.callback = NULL; 4229 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4230 goto out_failed; 4231 } 4232 } while (--pe_timeout); 4233 4234 if (!pe_timeout) { 4235 ioc_err(mrioc, "port enable timed out\n"); 4236 mpi3mr_check_rh_fault_ioc(mrioc, 4237 MPI3MR_RESET_FROM_PE_TIMEOUT); 4238 mrioc->init_cmds.is_waiting = 0; 4239 mrioc->init_cmds.callback = NULL; 4240 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4241 goto out_failed; 4242 } else if (mrioc->scan_failed) { 4243 ioc_err(mrioc, 4244 "port enable failed with status=0x%04x\n", 4245 mrioc->scan_failed); 4246 } else 4247 ioc_info(mrioc, "port enable completed successfully\n"); 4248 4249 ioc_info(mrioc, "controller %s completed successfully\n", 4250 (is_resume)?"resume":"re-initialization"); 4251 return retval; 4252 out_failed: 4253 if (retry < 2) { 4254 retry++; 4255 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 4256 (is_resume)?"resume":"re-initialization", retry); 4257 mpi3mr_memset_buffers(mrioc); 4258 goto retry_init; 4259 } 4260 retval = -1; 4261 out_failed_noretry: 4262 ioc_err(mrioc, "controller %s is failed\n", 4263 (is_resume)?"resume":"re-initialization"); 4264 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 4265 MPI3MR_RESET_FROM_CTLR_CLEANUP); 4266 mrioc->unrecoverable = 1; 4267 return retval; 4268 } 4269 4270 /** 4271 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 4272 * segments 4273 * @mrioc: Adapter instance reference 4274 * @qidx: Operational reply queue index 4275 * 4276 * Return: Nothing. 4277 */ 4278 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4279 { 4280 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 4281 struct segments *segments; 4282 int i, size; 4283 4284 if (!op_reply_q->q_segments) 4285 return; 4286 4287 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 4288 segments = op_reply_q->q_segments; 4289 for (i = 0; i < op_reply_q->num_segments; i++) 4290 memset(segments[i].segment, 0, size); 4291 } 4292 4293 /** 4294 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 4295 * segments 4296 * @mrioc: Adapter instance reference 4297 * @qidx: Operational request queue index 4298 * 4299 * Return: Nothing. 4300 */ 4301 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4302 { 4303 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 4304 struct segments *segments; 4305 int i, size; 4306 4307 if (!op_req_q->q_segments) 4308 return; 4309 4310 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 4311 segments = op_req_q->q_segments; 4312 for (i = 0; i < op_req_q->num_segments; i++) 4313 memset(segments[i].segment, 0, size); 4314 } 4315 4316 /** 4317 * mpi3mr_memset_buffers - memset memory for a controller 4318 * @mrioc: Adapter instance reference 4319 * 4320 * clear all the memory allocated for a controller, typically 4321 * called post reset to reuse the memory allocated during the 4322 * controller init. 4323 * 4324 * Return: Nothing. 4325 */ 4326 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 4327 { 4328 u16 i; 4329 struct mpi3mr_throttle_group_info *tg; 4330 4331 mrioc->change_count = 0; 4332 mrioc->active_poll_qcount = 0; 4333 mrioc->default_qcount = 0; 4334 if (mrioc->admin_req_base) 4335 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 4336 if (mrioc->admin_reply_base) 4337 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 4338 atomic_set(&mrioc->admin_reply_q_in_use, 0); 4339 4340 if (mrioc->init_cmds.reply) { 4341 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 4342 memset(mrioc->bsg_cmds.reply, 0, 4343 sizeof(*mrioc->bsg_cmds.reply)); 4344 memset(mrioc->host_tm_cmds.reply, 0, 4345 sizeof(*mrioc->host_tm_cmds.reply)); 4346 memset(mrioc->pel_cmds.reply, 0, 4347 sizeof(*mrioc->pel_cmds.reply)); 4348 memset(mrioc->pel_abort_cmd.reply, 0, 4349 sizeof(*mrioc->pel_abort_cmd.reply)); 4350 memset(mrioc->transport_cmds.reply, 0, 4351 sizeof(*mrioc->transport_cmds.reply)); 4352 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4353 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 4354 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 4355 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 4356 memset(mrioc->evtack_cmds[i].reply, 0, 4357 sizeof(*mrioc->evtack_cmds[i].reply)); 4358 bitmap_clear(mrioc->removepend_bitmap, 0, 4359 mrioc->dev_handle_bitmap_bits); 4360 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); 4361 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, 4362 MPI3MR_NUM_EVTACKCMD); 4363 } 4364 4365 for (i = 0; i < mrioc->num_queues; i++) { 4366 mrioc->op_reply_qinfo[i].qid = 0; 4367 mrioc->op_reply_qinfo[i].ci = 0; 4368 mrioc->op_reply_qinfo[i].num_replies = 0; 4369 mrioc->op_reply_qinfo[i].ephase = 0; 4370 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4371 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4372 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4373 4374 mrioc->req_qinfo[i].ci = 0; 4375 mrioc->req_qinfo[i].pi = 0; 4376 mrioc->req_qinfo[i].num_requests = 0; 4377 mrioc->req_qinfo[i].qid = 0; 4378 mrioc->req_qinfo[i].reply_qid = 0; 4379 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4380 mpi3mr_memset_op_req_q_buffers(mrioc, i); 4381 } 4382 4383 atomic_set(&mrioc->pend_large_data_sz, 0); 4384 if (mrioc->throttle_groups) { 4385 tg = mrioc->throttle_groups; 4386 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { 4387 tg->id = 0; 4388 tg->fw_qd = 0; 4389 tg->modified_qd = 0; 4390 tg->io_divert = 0; 4391 tg->need_qd_reduction = 0; 4392 tg->high = 0; 4393 tg->low = 0; 4394 tg->qd_reduction = 0; 4395 atomic_set(&tg->pend_large_data_sz, 0); 4396 } 4397 } 4398 } 4399 4400 /** 4401 * mpi3mr_free_mem - Free memory allocated for a controller 4402 * @mrioc: Adapter instance reference 4403 * 4404 * Free all the memory allocated for a controller. 4405 * 4406 * Return: Nothing. 4407 */ 4408 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 4409 { 4410 u16 i; 4411 struct mpi3mr_intr_info *intr_info; 4412 4413 mpi3mr_free_enclosure_list(mrioc); 4414 mpi3mr_free_ioctl_dma_memory(mrioc); 4415 4416 if (mrioc->sense_buf_pool) { 4417 if (mrioc->sense_buf) 4418 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 4419 mrioc->sense_buf_dma); 4420 dma_pool_destroy(mrioc->sense_buf_pool); 4421 mrioc->sense_buf = NULL; 4422 mrioc->sense_buf_pool = NULL; 4423 } 4424 if (mrioc->sense_buf_q_pool) { 4425 if (mrioc->sense_buf_q) 4426 dma_pool_free(mrioc->sense_buf_q_pool, 4427 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 4428 dma_pool_destroy(mrioc->sense_buf_q_pool); 4429 mrioc->sense_buf_q = NULL; 4430 mrioc->sense_buf_q_pool = NULL; 4431 } 4432 4433 if (mrioc->reply_buf_pool) { 4434 if (mrioc->reply_buf) 4435 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 4436 mrioc->reply_buf_dma); 4437 dma_pool_destroy(mrioc->reply_buf_pool); 4438 mrioc->reply_buf = NULL; 4439 mrioc->reply_buf_pool = NULL; 4440 } 4441 if (mrioc->reply_free_q_pool) { 4442 if (mrioc->reply_free_q) 4443 dma_pool_free(mrioc->reply_free_q_pool, 4444 mrioc->reply_free_q, mrioc->reply_free_q_dma); 4445 dma_pool_destroy(mrioc->reply_free_q_pool); 4446 mrioc->reply_free_q = NULL; 4447 mrioc->reply_free_q_pool = NULL; 4448 } 4449 4450 for (i = 0; i < mrioc->num_op_req_q; i++) 4451 mpi3mr_free_op_req_q_segments(mrioc, i); 4452 4453 for (i = 0; i < mrioc->num_op_reply_q; i++) 4454 mpi3mr_free_op_reply_q_segments(mrioc, i); 4455 4456 for (i = 0; i < mrioc->intr_info_count; i++) { 4457 intr_info = mrioc->intr_info + i; 4458 intr_info->op_reply_q = NULL; 4459 } 4460 4461 kfree(mrioc->req_qinfo); 4462 mrioc->req_qinfo = NULL; 4463 mrioc->num_op_req_q = 0; 4464 4465 kfree(mrioc->op_reply_qinfo); 4466 mrioc->op_reply_qinfo = NULL; 4467 mrioc->num_op_reply_q = 0; 4468 4469 kfree(mrioc->init_cmds.reply); 4470 mrioc->init_cmds.reply = NULL; 4471 4472 kfree(mrioc->bsg_cmds.reply); 4473 mrioc->bsg_cmds.reply = NULL; 4474 4475 kfree(mrioc->host_tm_cmds.reply); 4476 mrioc->host_tm_cmds.reply = NULL; 4477 4478 kfree(mrioc->pel_cmds.reply); 4479 mrioc->pel_cmds.reply = NULL; 4480 4481 kfree(mrioc->pel_abort_cmd.reply); 4482 mrioc->pel_abort_cmd.reply = NULL; 4483 4484 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4485 kfree(mrioc->evtack_cmds[i].reply); 4486 mrioc->evtack_cmds[i].reply = NULL; 4487 } 4488 4489 bitmap_free(mrioc->removepend_bitmap); 4490 mrioc->removepend_bitmap = NULL; 4491 4492 bitmap_free(mrioc->devrem_bitmap); 4493 mrioc->devrem_bitmap = NULL; 4494 4495 bitmap_free(mrioc->evtack_cmds_bitmap); 4496 mrioc->evtack_cmds_bitmap = NULL; 4497 4498 bitmap_free(mrioc->chain_bitmap); 4499 mrioc->chain_bitmap = NULL; 4500 4501 kfree(mrioc->transport_cmds.reply); 4502 mrioc->transport_cmds.reply = NULL; 4503 4504 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4505 kfree(mrioc->dev_rmhs_cmds[i].reply); 4506 mrioc->dev_rmhs_cmds[i].reply = NULL; 4507 } 4508 4509 if (mrioc->chain_buf_pool) { 4510 for (i = 0; i < mrioc->chain_buf_count; i++) { 4511 if (mrioc->chain_sgl_list[i].addr) { 4512 dma_pool_free(mrioc->chain_buf_pool, 4513 mrioc->chain_sgl_list[i].addr, 4514 mrioc->chain_sgl_list[i].dma_addr); 4515 mrioc->chain_sgl_list[i].addr = NULL; 4516 } 4517 } 4518 dma_pool_destroy(mrioc->chain_buf_pool); 4519 mrioc->chain_buf_pool = NULL; 4520 } 4521 4522 kfree(mrioc->chain_sgl_list); 4523 mrioc->chain_sgl_list = NULL; 4524 4525 if (mrioc->admin_reply_base) { 4526 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 4527 mrioc->admin_reply_base, mrioc->admin_reply_dma); 4528 mrioc->admin_reply_base = NULL; 4529 } 4530 if (mrioc->admin_req_base) { 4531 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 4532 mrioc->admin_req_base, mrioc->admin_req_dma); 4533 mrioc->admin_req_base = NULL; 4534 } 4535 if (mrioc->cfg_page) { 4536 dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, 4537 mrioc->cfg_page, mrioc->cfg_page_dma); 4538 mrioc->cfg_page = NULL; 4539 } 4540 if (mrioc->pel_seqnum_virt) { 4541 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, 4542 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); 4543 mrioc->pel_seqnum_virt = NULL; 4544 } 4545 4546 kfree(mrioc->throttle_groups); 4547 mrioc->throttle_groups = NULL; 4548 4549 kfree(mrioc->logdata_buf); 4550 mrioc->logdata_buf = NULL; 4551 4552 } 4553 4554 /** 4555 * mpi3mr_issue_ioc_shutdown - shutdown controller 4556 * @mrioc: Adapter instance reference 4557 * 4558 * Send shutodwn notification to the controller and wait for the 4559 * shutdown_timeout for it to be completed. 4560 * 4561 * Return: Nothing. 4562 */ 4563 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 4564 { 4565 u32 ioc_config, ioc_status; 4566 u8 retval = 1; 4567 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 4568 4569 ioc_info(mrioc, "Issuing shutdown Notification\n"); 4570 if (mrioc->unrecoverable) { 4571 ioc_warn(mrioc, 4572 "IOC is unrecoverable shutdown is not issued\n"); 4573 return; 4574 } 4575 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4576 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4577 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 4578 ioc_info(mrioc, "shutdown already in progress\n"); 4579 return; 4580 } 4581 4582 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4583 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 4584 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 4585 4586 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 4587 4588 if (mrioc->facts.shutdown_timeout) 4589 timeout = mrioc->facts.shutdown_timeout * 10; 4590 4591 do { 4592 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4593 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4594 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 4595 retval = 0; 4596 break; 4597 } 4598 msleep(100); 4599 } while (--timeout); 4600 4601 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4602 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4603 4604 if (retval) { 4605 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4606 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 4607 ioc_warn(mrioc, 4608 "shutdown still in progress after timeout\n"); 4609 } 4610 4611 ioc_info(mrioc, 4612 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4613 (!retval) ? "successful" : "failed", ioc_status, 4614 ioc_config); 4615 } 4616 4617 /** 4618 * mpi3mr_cleanup_ioc - Cleanup controller 4619 * @mrioc: Adapter instance reference 4620 * 4621 * controller cleanup handler, Message unit reset or soft reset 4622 * and shutdown notification is issued to the controller. 4623 * 4624 * Return: Nothing. 4625 */ 4626 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4627 { 4628 enum mpi3mr_iocstate ioc_state; 4629 4630 dprint_exit(mrioc, "cleaning up the controller\n"); 4631 mpi3mr_ioc_disable_intr(mrioc); 4632 4633 ioc_state = mpi3mr_get_iocstate(mrioc); 4634 4635 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4636 (ioc_state == MRIOC_STATE_READY)) { 4637 if (mpi3mr_issue_and_process_mur(mrioc, 4638 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4639 mpi3mr_issue_reset(mrioc, 4640 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4641 MPI3MR_RESET_FROM_MUR_FAILURE); 4642 mpi3mr_issue_ioc_shutdown(mrioc); 4643 } 4644 dprint_exit(mrioc, "controller cleanup completed\n"); 4645 } 4646 4647 /** 4648 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4649 * @mrioc: Adapter instance reference 4650 * @cmdptr: Internal command tracker 4651 * 4652 * Complete an internal driver commands with state indicating it 4653 * is completed due to reset. 4654 * 4655 * Return: Nothing. 4656 */ 4657 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4658 struct mpi3mr_drv_cmd *cmdptr) 4659 { 4660 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4661 cmdptr->state |= MPI3MR_CMD_RESET; 4662 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4663 if (cmdptr->is_waiting) { 4664 complete(&cmdptr->done); 4665 cmdptr->is_waiting = 0; 4666 } else if (cmdptr->callback) 4667 cmdptr->callback(mrioc, cmdptr); 4668 } 4669 } 4670 4671 /** 4672 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4673 * @mrioc: Adapter instance reference 4674 * 4675 * Flush all internal driver commands post reset 4676 * 4677 * Return: Nothing. 4678 */ 4679 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4680 { 4681 struct mpi3mr_drv_cmd *cmdptr; 4682 u8 i; 4683 4684 cmdptr = &mrioc->init_cmds; 4685 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4686 4687 cmdptr = &mrioc->cfg_cmds; 4688 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4689 4690 cmdptr = &mrioc->bsg_cmds; 4691 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4692 cmdptr = &mrioc->host_tm_cmds; 4693 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4694 4695 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4696 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4697 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4698 } 4699 4700 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4701 cmdptr = &mrioc->evtack_cmds[i]; 4702 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4703 } 4704 4705 cmdptr = &mrioc->pel_cmds; 4706 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4707 4708 cmdptr = &mrioc->pel_abort_cmd; 4709 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4710 4711 cmdptr = &mrioc->transport_cmds; 4712 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4713 } 4714 4715 /** 4716 * mpi3mr_pel_wait_post - Issue PEL Wait 4717 * @mrioc: Adapter instance reference 4718 * @drv_cmd: Internal command tracker 4719 * 4720 * Issue PEL Wait MPI request through admin queue and return. 4721 * 4722 * Return: Nothing. 4723 */ 4724 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, 4725 struct mpi3mr_drv_cmd *drv_cmd) 4726 { 4727 struct mpi3_pel_req_action_wait pel_wait; 4728 4729 mrioc->pel_abort_requested = false; 4730 4731 memset(&pel_wait, 0, sizeof(pel_wait)); 4732 drv_cmd->state = MPI3MR_CMD_PENDING; 4733 drv_cmd->is_waiting = 0; 4734 drv_cmd->callback = mpi3mr_pel_wait_complete; 4735 drv_cmd->ioc_status = 0; 4736 drv_cmd->ioc_loginfo = 0; 4737 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4738 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4739 pel_wait.action = MPI3_PEL_ACTION_WAIT; 4740 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); 4741 pel_wait.locale = cpu_to_le16(mrioc->pel_locale); 4742 pel_wait.class = cpu_to_le16(mrioc->pel_class); 4743 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; 4744 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", 4745 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); 4746 4747 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { 4748 dprint_bsg_err(mrioc, 4749 "Issuing PELWait: Admin post failed\n"); 4750 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4751 drv_cmd->callback = NULL; 4752 drv_cmd->retry_count = 0; 4753 mrioc->pel_enabled = false; 4754 } 4755 } 4756 4757 /** 4758 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number 4759 * @mrioc: Adapter instance reference 4760 * @drv_cmd: Internal command tracker 4761 * 4762 * Issue PEL get sequence number MPI request through admin queue 4763 * and return. 4764 * 4765 * Return: 0 on success, non-zero on failure. 4766 */ 4767 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, 4768 struct mpi3mr_drv_cmd *drv_cmd) 4769 { 4770 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; 4771 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 4772 int retval = 0; 4773 4774 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 4775 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; 4776 mrioc->pel_cmds.is_waiting = 0; 4777 mrioc->pel_cmds.ioc_status = 0; 4778 mrioc->pel_cmds.ioc_loginfo = 0; 4779 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; 4780 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4781 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4782 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; 4783 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, 4784 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); 4785 4786 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, 4787 sizeof(pel_getseq_req), 0); 4788 if (retval) { 4789 if (drv_cmd) { 4790 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4791 drv_cmd->callback = NULL; 4792 drv_cmd->retry_count = 0; 4793 } 4794 mrioc->pel_enabled = false; 4795 } 4796 4797 return retval; 4798 } 4799 4800 /** 4801 * mpi3mr_pel_wait_complete - PELWait Completion callback 4802 * @mrioc: Adapter instance reference 4803 * @drv_cmd: Internal command tracker 4804 * 4805 * This is a callback handler for the PELWait request and 4806 * firmware completes a PELWait request when it is aborted or a 4807 * new PEL entry is available. This sends AEN to the application 4808 * and if the PELwait completion is not due to PELAbort then 4809 * this will send a request for new PEL Sequence number 4810 * 4811 * Return: Nothing. 4812 */ 4813 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 4814 struct mpi3mr_drv_cmd *drv_cmd) 4815 { 4816 struct mpi3_pel_reply *pel_reply = NULL; 4817 u16 ioc_status, pe_log_status; 4818 bool do_retry = false; 4819 4820 if (drv_cmd->state & MPI3MR_CMD_RESET) 4821 goto cleanup_drv_cmd; 4822 4823 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4824 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4825 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 4826 __func__, ioc_status, drv_cmd->ioc_loginfo); 4827 dprint_bsg_err(mrioc, 4828 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4829 ioc_status, drv_cmd->ioc_loginfo); 4830 do_retry = true; 4831 } 4832 4833 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4834 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4835 4836 if (!pel_reply) { 4837 dprint_bsg_err(mrioc, 4838 "pel_wait: failed due to no reply\n"); 4839 goto out_failed; 4840 } 4841 4842 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 4843 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && 4844 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { 4845 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", 4846 __func__, pe_log_status); 4847 dprint_bsg_err(mrioc, 4848 "pel_wait: failed due to pel_log_status(0x%04x)\n", 4849 pe_log_status); 4850 do_retry = true; 4851 } 4852 4853 if (do_retry) { 4854 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4855 drv_cmd->retry_count++; 4856 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", 4857 drv_cmd->retry_count); 4858 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4859 return; 4860 } 4861 dprint_bsg_err(mrioc, 4862 "pel_wait: failed after all retries(%d)\n", 4863 drv_cmd->retry_count); 4864 goto out_failed; 4865 } 4866 atomic64_inc(&event_counter); 4867 if (!mrioc->pel_abort_requested) { 4868 mrioc->pel_cmds.retry_count = 0; 4869 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); 4870 } 4871 4872 return; 4873 out_failed: 4874 mrioc->pel_enabled = false; 4875 cleanup_drv_cmd: 4876 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4877 drv_cmd->callback = NULL; 4878 drv_cmd->retry_count = 0; 4879 } 4880 4881 /** 4882 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback 4883 * @mrioc: Adapter instance reference 4884 * @drv_cmd: Internal command tracker 4885 * 4886 * This is a callback handler for the PEL get sequence number 4887 * request and a new PEL wait request will be issued to the 4888 * firmware from this 4889 * 4890 * Return: Nothing. 4891 */ 4892 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, 4893 struct mpi3mr_drv_cmd *drv_cmd) 4894 { 4895 struct mpi3_pel_reply *pel_reply = NULL; 4896 struct mpi3_pel_seq *pel_seqnum_virt; 4897 u16 ioc_status; 4898 bool do_retry = false; 4899 4900 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; 4901 4902 if (drv_cmd->state & MPI3MR_CMD_RESET) 4903 goto cleanup_drv_cmd; 4904 4905 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4906 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4907 dprint_bsg_err(mrioc, 4908 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4909 ioc_status, drv_cmd->ioc_loginfo); 4910 do_retry = true; 4911 } 4912 4913 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4914 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4915 if (!pel_reply) { 4916 dprint_bsg_err(mrioc, 4917 "pel_get_seqnum: failed due to no reply\n"); 4918 goto out_failed; 4919 } 4920 4921 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { 4922 dprint_bsg_err(mrioc, 4923 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", 4924 le16_to_cpu(pel_reply->pe_log_status)); 4925 do_retry = true; 4926 } 4927 4928 if (do_retry) { 4929 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4930 drv_cmd->retry_count++; 4931 dprint_bsg_err(mrioc, 4932 "pel_get_seqnum: retrying(%d)\n", 4933 drv_cmd->retry_count); 4934 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); 4935 return; 4936 } 4937 4938 dprint_bsg_err(mrioc, 4939 "pel_get_seqnum: failed after all retries(%d)\n", 4940 drv_cmd->retry_count); 4941 goto out_failed; 4942 } 4943 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; 4944 drv_cmd->retry_count = 0; 4945 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4946 4947 return; 4948 out_failed: 4949 mrioc->pel_enabled = false; 4950 cleanup_drv_cmd: 4951 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4952 drv_cmd->callback = NULL; 4953 drv_cmd->retry_count = 0; 4954 } 4955 4956 /** 4957 * mpi3mr_soft_reset_handler - Reset the controller 4958 * @mrioc: Adapter instance reference 4959 * @reset_reason: Reset reason code 4960 * @snapdump: Flag to generate snapdump in firmware or not 4961 * 4962 * This is an handler for recovering controller by issuing soft 4963 * reset are diag fault reset. This is a blocking function and 4964 * when one reset is executed if any other resets they will be 4965 * blocked. All BSG requests will be blocked during the reset. If 4966 * controller reset is successful then the controller will be 4967 * reinitalized, otherwise the controller will be marked as not 4968 * recoverable 4969 * 4970 * In snapdump bit is set, the controller is issued with diag 4971 * fault reset so that the firmware can create a snap dump and 4972 * post that the firmware will result in F000 fault and the 4973 * driver will issue soft reset to recover from that. 4974 * 4975 * Return: 0 on success, non-zero on failure. 4976 */ 4977 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4978 u16 reset_reason, u8 snapdump) 4979 { 4980 int retval = 0, i; 4981 unsigned long flags; 4982 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4983 4984 /* Block the reset handler until diag save in progress*/ 4985 dprint_reset(mrioc, 4986 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4987 mrioc->diagsave_timeout); 4988 while (mrioc->diagsave_timeout) 4989 ssleep(1); 4990 /* 4991 * Block new resets until the currently executing one is finished and 4992 * return the status of the existing reset for all blocked resets 4993 */ 4994 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4995 if (!mutex_trylock(&mrioc->reset_mutex)) { 4996 ioc_info(mrioc, 4997 "controller reset triggered by %s is blocked due to another reset in progress\n", 4998 mpi3mr_reset_rc_name(reset_reason)); 4999 do { 5000 ssleep(1); 5001 } while (mrioc->reset_in_progress == 1); 5002 ioc_info(mrioc, 5003 "returning previous reset result(%d) for the reset triggered by %s\n", 5004 mrioc->prev_reset_result, 5005 mpi3mr_reset_rc_name(reset_reason)); 5006 return mrioc->prev_reset_result; 5007 } 5008 ioc_info(mrioc, "controller reset is triggered by %s\n", 5009 mpi3mr_reset_rc_name(reset_reason)); 5010 5011 mrioc->device_refresh_on = 0; 5012 mrioc->reset_in_progress = 1; 5013 mrioc->stop_bsgs = 1; 5014 mrioc->prev_reset_result = -1; 5015 5016 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 5017 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 5018 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 5019 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 5020 mrioc->event_masks[i] = -1; 5021 5022 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 5023 mpi3mr_issue_event_notification(mrioc); 5024 } 5025 5026 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 5027 5028 mpi3mr_ioc_disable_intr(mrioc); 5029 5030 if (snapdump) { 5031 mpi3mr_set_diagsave(mrioc); 5032 retval = mpi3mr_issue_reset(mrioc, 5033 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5034 if (!retval) { 5035 do { 5036 host_diagnostic = 5037 readl(&mrioc->sysif_regs->host_diagnostic); 5038 if (!(host_diagnostic & 5039 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 5040 break; 5041 msleep(100); 5042 } while (--timeout); 5043 } 5044 } 5045 5046 retval = mpi3mr_issue_reset(mrioc, 5047 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 5048 if (retval) { 5049 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 5050 goto out; 5051 } 5052 if (mrioc->num_io_throttle_group != 5053 mrioc->facts.max_io_throttle_group) { 5054 ioc_err(mrioc, 5055 "max io throttle group doesn't match old(%d), new(%d)\n", 5056 mrioc->num_io_throttle_group, 5057 mrioc->facts.max_io_throttle_group); 5058 retval = -EPERM; 5059 goto out; 5060 } 5061 5062 mpi3mr_flush_delayed_cmd_lists(mrioc); 5063 mpi3mr_flush_drv_cmds(mrioc); 5064 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); 5065 bitmap_clear(mrioc->removepend_bitmap, 0, 5066 mrioc->dev_handle_bitmap_bits); 5067 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD); 5068 mpi3mr_flush_host_io(mrioc); 5069 mpi3mr_cleanup_fwevt_list(mrioc); 5070 mpi3mr_invalidate_devhandles(mrioc); 5071 mpi3mr_free_enclosure_list(mrioc); 5072 5073 if (mrioc->prepare_for_reset) { 5074 mrioc->prepare_for_reset = 0; 5075 mrioc->prepare_for_reset_timeout_counter = 0; 5076 } 5077 mpi3mr_memset_buffers(mrioc); 5078 retval = mpi3mr_reinit_ioc(mrioc, 0); 5079 if (retval) { 5080 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 5081 mrioc->name, reset_reason); 5082 goto out; 5083 } 5084 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5085 5086 out: 5087 if (!retval) { 5088 mrioc->diagsave_timeout = 0; 5089 mrioc->reset_in_progress = 0; 5090 mrioc->pel_abort_requested = 0; 5091 if (mrioc->pel_enabled) { 5092 mrioc->pel_cmds.retry_count = 0; 5093 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); 5094 } 5095 5096 mrioc->device_refresh_on = 0; 5097 5098 mrioc->ts_update_counter = 0; 5099 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 5100 if (mrioc->watchdog_work_q) 5101 queue_delayed_work(mrioc->watchdog_work_q, 5102 &mrioc->watchdog_work, 5103 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 5104 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 5105 mrioc->stop_bsgs = 0; 5106 if (mrioc->pel_enabled) 5107 atomic64_inc(&event_counter); 5108 } else { 5109 mpi3mr_issue_reset(mrioc, 5110 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 5111 mrioc->device_refresh_on = 0; 5112 mrioc->unrecoverable = 1; 5113 mrioc->reset_in_progress = 0; 5114 mrioc->stop_bsgs = 0; 5115 retval = -1; 5116 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5117 } 5118 mrioc->prev_reset_result = retval; 5119 mutex_unlock(&mrioc->reset_mutex); 5120 ioc_info(mrioc, "controller reset is %s\n", 5121 ((retval == 0) ? "successful" : "failed")); 5122 return retval; 5123 } 5124 5125 5126 /** 5127 * mpi3mr_free_config_dma_memory - free memory for config page 5128 * @mrioc: Adapter instance reference 5129 * @mem_desc: memory descriptor structure 5130 * 5131 * Check whether the size of the buffer specified by the memory 5132 * descriptor is greater than the default page size if so then 5133 * free the memory pointed by the descriptor. 5134 * 5135 * Return: Nothing. 5136 */ 5137 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, 5138 struct dma_memory_desc *mem_desc) 5139 { 5140 if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { 5141 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 5142 mem_desc->addr, mem_desc->dma_addr); 5143 mem_desc->addr = NULL; 5144 } 5145 } 5146 5147 /** 5148 * mpi3mr_alloc_config_dma_memory - Alloc memory for config page 5149 * @mrioc: Adapter instance reference 5150 * @mem_desc: Memory descriptor to hold dma memory info 5151 * 5152 * This function allocates new dmaable memory or provides the 5153 * default config page dmaable memory based on the memory size 5154 * described by the descriptor. 5155 * 5156 * Return: 0 on success, non-zero on failure. 5157 */ 5158 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, 5159 struct dma_memory_desc *mem_desc) 5160 { 5161 if (mem_desc->size > mrioc->cfg_page_sz) { 5162 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 5163 mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); 5164 if (!mem_desc->addr) 5165 return -ENOMEM; 5166 } else { 5167 mem_desc->addr = mrioc->cfg_page; 5168 mem_desc->dma_addr = mrioc->cfg_page_dma; 5169 memset(mem_desc->addr, 0, mrioc->cfg_page_sz); 5170 } 5171 return 0; 5172 } 5173 5174 /** 5175 * mpi3mr_post_cfg_req - Issue config requests and wait 5176 * @mrioc: Adapter instance reference 5177 * @cfg_req: Configuration request 5178 * @timeout: Timeout in seconds 5179 * @ioc_status: Pointer to return ioc status 5180 * 5181 * A generic function for posting MPI3 configuration request to 5182 * the firmware. This blocks for the completion of request for 5183 * timeout seconds and if the request times out this function 5184 * faults the controller with proper reason code. 5185 * 5186 * On successful completion of the request this function returns 5187 * appropriate ioc status from the firmware back to the caller. 5188 * 5189 * Return: 0 on success, non-zero on failure. 5190 */ 5191 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, 5192 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) 5193 { 5194 int retval = 0; 5195 5196 mutex_lock(&mrioc->cfg_cmds.mutex); 5197 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { 5198 retval = -1; 5199 ioc_err(mrioc, "sending config request failed due to command in use\n"); 5200 mutex_unlock(&mrioc->cfg_cmds.mutex); 5201 goto out; 5202 } 5203 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; 5204 mrioc->cfg_cmds.is_waiting = 1; 5205 mrioc->cfg_cmds.callback = NULL; 5206 mrioc->cfg_cmds.ioc_status = 0; 5207 mrioc->cfg_cmds.ioc_loginfo = 0; 5208 5209 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); 5210 cfg_req->function = MPI3_FUNCTION_CONFIG; 5211 5212 init_completion(&mrioc->cfg_cmds.done); 5213 dprint_cfg_info(mrioc, "posting config request\n"); 5214 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5215 dprint_dump(cfg_req, sizeof(struct mpi3_config_request), 5216 "mpi3_cfg_req"); 5217 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); 5218 if (retval) { 5219 ioc_err(mrioc, "posting config request failed\n"); 5220 goto out_unlock; 5221 } 5222 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); 5223 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { 5224 mpi3mr_check_rh_fault_ioc(mrioc, 5225 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); 5226 ioc_err(mrioc, "config request timed out\n"); 5227 retval = -1; 5228 goto out_unlock; 5229 } 5230 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 5231 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) 5232 dprint_cfg_err(mrioc, 5233 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", 5234 *ioc_status, mrioc->cfg_cmds.ioc_loginfo); 5235 5236 out_unlock: 5237 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; 5238 mutex_unlock(&mrioc->cfg_cmds.mutex); 5239 5240 out: 5241 return retval; 5242 } 5243 5244 /** 5245 * mpi3mr_process_cfg_req - config page request processor 5246 * @mrioc: Adapter instance reference 5247 * @cfg_req: Configuration request 5248 * @cfg_hdr: Configuration page header 5249 * @timeout: Timeout in seconds 5250 * @ioc_status: Pointer to return ioc status 5251 * @cfg_buf: Memory pointer to copy config page or header 5252 * @cfg_buf_sz: Size of the memory to get config page or header 5253 * 5254 * This is handler for config page read, write and config page 5255 * header read operations. 5256 * 5257 * This function expects the cfg_req to be populated with page 5258 * type, page number, action for the header read and with page 5259 * address for all other operations. 5260 * 5261 * The cfg_hdr can be passed as null for reading required header 5262 * details for read/write pages the cfg_hdr should point valid 5263 * configuration page header. 5264 * 5265 * This allocates dmaable memory based on the size of the config 5266 * buffer and set the SGE of the cfg_req. 5267 * 5268 * For write actions, the config page data has to be passed in 5269 * the cfg_buf and size of the data has to be mentioned in the 5270 * cfg_buf_sz. 5271 * 5272 * For read/header actions, on successful completion of the 5273 * request with successful ioc_status the data will be copied 5274 * into the cfg_buf limited to a minimum of actual page size and 5275 * cfg_buf_sz 5276 * 5277 * 5278 * Return: 0 on success, non-zero on failure. 5279 */ 5280 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, 5281 struct mpi3_config_request *cfg_req, 5282 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, 5283 void *cfg_buf, u32 cfg_buf_sz) 5284 { 5285 struct dma_memory_desc mem_desc; 5286 int retval = -1; 5287 u8 invalid_action = 0; 5288 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 5289 5290 memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); 5291 5292 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) 5293 mem_desc.size = sizeof(struct mpi3_config_page_header); 5294 else { 5295 if (!cfg_hdr) { 5296 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", 5297 cfg_req->action, cfg_req->page_type, 5298 cfg_req->page_number); 5299 goto out; 5300 } 5301 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { 5302 case MPI3_CONFIG_PAGEATTR_READ_ONLY: 5303 if (cfg_req->action 5304 != MPI3_CONFIG_ACTION_READ_CURRENT) 5305 invalid_action = 1; 5306 break; 5307 case MPI3_CONFIG_PAGEATTR_CHANGEABLE: 5308 if ((cfg_req->action == 5309 MPI3_CONFIG_ACTION_READ_PERSISTENT) || 5310 (cfg_req->action == 5311 MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) 5312 invalid_action = 1; 5313 break; 5314 case MPI3_CONFIG_PAGEATTR_PERSISTENT: 5315 default: 5316 break; 5317 } 5318 if (invalid_action) { 5319 ioc_err(mrioc, 5320 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", 5321 cfg_req->action, cfg_req->page_type, 5322 cfg_req->page_number, cfg_hdr->page_attribute); 5323 goto out; 5324 } 5325 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; 5326 cfg_req->page_length = cfg_hdr->page_length; 5327 cfg_req->page_version = cfg_hdr->page_version; 5328 } 5329 if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) 5330 goto out; 5331 5332 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, 5333 mem_desc.dma_addr); 5334 5335 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || 5336 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5337 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, 5338 cfg_buf_sz)); 5339 dprint_cfg_info(mrioc, "config buffer to be written\n"); 5340 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5341 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5342 } 5343 5344 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) 5345 goto out; 5346 5347 retval = 0; 5348 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && 5349 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && 5350 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5351 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, 5352 cfg_buf_sz)); 5353 dprint_cfg_info(mrioc, "config buffer read\n"); 5354 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5355 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5356 } 5357 5358 out: 5359 mpi3mr_free_config_dma_memory(mrioc, &mem_desc); 5360 return retval; 5361 } 5362 5363 /** 5364 * mpi3mr_cfg_get_dev_pg0 - Read current device page0 5365 * @mrioc: Adapter instance reference 5366 * @ioc_status: Pointer to return ioc status 5367 * @dev_pg0: Pointer to return device page 0 5368 * @pg_sz: Size of the memory allocated to the page pointer 5369 * @form: The form to be used for addressing the page 5370 * @form_spec: Form specific information like device handle 5371 * 5372 * This is handler for config page read for a specific device 5373 * page0. The ioc_status has the controller returned ioc_status. 5374 * This routine doesn't check ioc_status to decide whether the 5375 * page read is success or not and it is the callers 5376 * responsibility. 5377 * 5378 * Return: 0 on success, non-zero on failure. 5379 */ 5380 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5381 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) 5382 { 5383 struct mpi3_config_page_header cfg_hdr; 5384 struct mpi3_config_request cfg_req; 5385 u32 page_address; 5386 5387 memset(dev_pg0, 0, pg_sz); 5388 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5389 memset(&cfg_req, 0, sizeof(cfg_req)); 5390 5391 cfg_req.function = MPI3_FUNCTION_CONFIG; 5392 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5393 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; 5394 cfg_req.page_number = 0; 5395 cfg_req.page_address = 0; 5396 5397 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5398 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5399 ioc_err(mrioc, "device page0 header read failed\n"); 5400 goto out_failed; 5401 } 5402 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5403 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", 5404 *ioc_status); 5405 goto out_failed; 5406 } 5407 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5408 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | 5409 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); 5410 cfg_req.page_address = cpu_to_le32(page_address); 5411 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5412 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { 5413 ioc_err(mrioc, "device page0 read failed\n"); 5414 goto out_failed; 5415 } 5416 return 0; 5417 out_failed: 5418 return -1; 5419 } 5420 5421 5422 /** 5423 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 5424 * @mrioc: Adapter instance reference 5425 * @ioc_status: Pointer to return ioc status 5426 * @phy_pg0: Pointer to return SAS Phy page 0 5427 * @pg_sz: Size of the memory allocated to the page pointer 5428 * @form: The form to be used for addressing the page 5429 * @form_spec: Form specific information like phy number 5430 * 5431 * This is handler for config page read for a specific SAS Phy 5432 * page0. The ioc_status has the controller returned ioc_status. 5433 * This routine doesn't check ioc_status to decide whether the 5434 * page read is success or not and it is the callers 5435 * responsibility. 5436 * 5437 * Return: 0 on success, non-zero on failure. 5438 */ 5439 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5440 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, 5441 u32 form_spec) 5442 { 5443 struct mpi3_config_page_header cfg_hdr; 5444 struct mpi3_config_request cfg_req; 5445 u32 page_address; 5446 5447 memset(phy_pg0, 0, pg_sz); 5448 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5449 memset(&cfg_req, 0, sizeof(cfg_req)); 5450 5451 cfg_req.function = MPI3_FUNCTION_CONFIG; 5452 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5453 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5454 cfg_req.page_number = 0; 5455 cfg_req.page_address = 0; 5456 5457 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5458 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5459 ioc_err(mrioc, "sas phy page0 header read failed\n"); 5460 goto out_failed; 5461 } 5462 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5463 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", 5464 *ioc_status); 5465 goto out_failed; 5466 } 5467 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5468 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5469 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5470 cfg_req.page_address = cpu_to_le32(page_address); 5471 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5472 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { 5473 ioc_err(mrioc, "sas phy page0 read failed\n"); 5474 goto out_failed; 5475 } 5476 return 0; 5477 out_failed: 5478 return -1; 5479 } 5480 5481 /** 5482 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 5483 * @mrioc: Adapter instance reference 5484 * @ioc_status: Pointer to return ioc status 5485 * @phy_pg1: Pointer to return SAS Phy page 1 5486 * @pg_sz: Size of the memory allocated to the page pointer 5487 * @form: The form to be used for addressing the page 5488 * @form_spec: Form specific information like phy number 5489 * 5490 * This is handler for config page read for a specific SAS Phy 5491 * page1. The ioc_status has the controller returned ioc_status. 5492 * This routine doesn't check ioc_status to decide whether the 5493 * page read is success or not and it is the callers 5494 * responsibility. 5495 * 5496 * Return: 0 on success, non-zero on failure. 5497 */ 5498 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5499 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, 5500 u32 form_spec) 5501 { 5502 struct mpi3_config_page_header cfg_hdr; 5503 struct mpi3_config_request cfg_req; 5504 u32 page_address; 5505 5506 memset(phy_pg1, 0, pg_sz); 5507 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5508 memset(&cfg_req, 0, sizeof(cfg_req)); 5509 5510 cfg_req.function = MPI3_FUNCTION_CONFIG; 5511 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5512 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5513 cfg_req.page_number = 1; 5514 cfg_req.page_address = 0; 5515 5516 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5517 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5518 ioc_err(mrioc, "sas phy page1 header read failed\n"); 5519 goto out_failed; 5520 } 5521 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5522 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", 5523 *ioc_status); 5524 goto out_failed; 5525 } 5526 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5527 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5528 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5529 cfg_req.page_address = cpu_to_le32(page_address); 5530 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5531 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { 5532 ioc_err(mrioc, "sas phy page1 read failed\n"); 5533 goto out_failed; 5534 } 5535 return 0; 5536 out_failed: 5537 return -1; 5538 } 5539 5540 5541 /** 5542 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 5543 * @mrioc: Adapter instance reference 5544 * @ioc_status: Pointer to return ioc status 5545 * @exp_pg0: Pointer to return SAS Expander page 0 5546 * @pg_sz: Size of the memory allocated to the page pointer 5547 * @form: The form to be used for addressing the page 5548 * @form_spec: Form specific information like device handle 5549 * 5550 * This is handler for config page read for a specific SAS 5551 * Expander page0. The ioc_status has the controller returned 5552 * ioc_status. This routine doesn't check ioc_status to decide 5553 * whether the page read is success or not and it is the callers 5554 * responsibility. 5555 * 5556 * Return: 0 on success, non-zero on failure. 5557 */ 5558 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5559 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, 5560 u32 form_spec) 5561 { 5562 struct mpi3_config_page_header cfg_hdr; 5563 struct mpi3_config_request cfg_req; 5564 u32 page_address; 5565 5566 memset(exp_pg0, 0, pg_sz); 5567 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5568 memset(&cfg_req, 0, sizeof(cfg_req)); 5569 5570 cfg_req.function = MPI3_FUNCTION_CONFIG; 5571 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5572 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5573 cfg_req.page_number = 0; 5574 cfg_req.page_address = 0; 5575 5576 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5577 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5578 ioc_err(mrioc, "expander page0 header read failed\n"); 5579 goto out_failed; 5580 } 5581 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5582 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", 5583 *ioc_status); 5584 goto out_failed; 5585 } 5586 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5587 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5588 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5589 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5590 cfg_req.page_address = cpu_to_le32(page_address); 5591 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5592 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { 5593 ioc_err(mrioc, "expander page0 read failed\n"); 5594 goto out_failed; 5595 } 5596 return 0; 5597 out_failed: 5598 return -1; 5599 } 5600 5601 /** 5602 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 5603 * @mrioc: Adapter instance reference 5604 * @ioc_status: Pointer to return ioc status 5605 * @exp_pg1: Pointer to return SAS Expander page 1 5606 * @pg_sz: Size of the memory allocated to the page pointer 5607 * @form: The form to be used for addressing the page 5608 * @form_spec: Form specific information like phy number 5609 * 5610 * This is handler for config page read for a specific SAS 5611 * Expander page1. The ioc_status has the controller returned 5612 * ioc_status. This routine doesn't check ioc_status to decide 5613 * whether the page read is success or not and it is the callers 5614 * responsibility. 5615 * 5616 * Return: 0 on success, non-zero on failure. 5617 */ 5618 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5619 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, 5620 u32 form_spec) 5621 { 5622 struct mpi3_config_page_header cfg_hdr; 5623 struct mpi3_config_request cfg_req; 5624 u32 page_address; 5625 5626 memset(exp_pg1, 0, pg_sz); 5627 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5628 memset(&cfg_req, 0, sizeof(cfg_req)); 5629 5630 cfg_req.function = MPI3_FUNCTION_CONFIG; 5631 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5632 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5633 cfg_req.page_number = 1; 5634 cfg_req.page_address = 0; 5635 5636 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5637 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5638 ioc_err(mrioc, "expander page1 header read failed\n"); 5639 goto out_failed; 5640 } 5641 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5642 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", 5643 *ioc_status); 5644 goto out_failed; 5645 } 5646 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5647 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5648 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5649 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5650 cfg_req.page_address = cpu_to_le32(page_address); 5651 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5652 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { 5653 ioc_err(mrioc, "expander page1 read failed\n"); 5654 goto out_failed; 5655 } 5656 return 0; 5657 out_failed: 5658 return -1; 5659 } 5660 5661 /** 5662 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 5663 * @mrioc: Adapter instance reference 5664 * @ioc_status: Pointer to return ioc status 5665 * @encl_pg0: Pointer to return Enclosure page 0 5666 * @pg_sz: Size of the memory allocated to the page pointer 5667 * @form: The form to be used for addressing the page 5668 * @form_spec: Form specific information like device handle 5669 * 5670 * This is handler for config page read for a specific Enclosure 5671 * page0. The ioc_status has the controller returned ioc_status. 5672 * This routine doesn't check ioc_status to decide whether the 5673 * page read is success or not and it is the callers 5674 * responsibility. 5675 * 5676 * Return: 0 on success, non-zero on failure. 5677 */ 5678 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5679 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, 5680 u32 form_spec) 5681 { 5682 struct mpi3_config_page_header cfg_hdr; 5683 struct mpi3_config_request cfg_req; 5684 u32 page_address; 5685 5686 memset(encl_pg0, 0, pg_sz); 5687 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5688 memset(&cfg_req, 0, sizeof(cfg_req)); 5689 5690 cfg_req.function = MPI3_FUNCTION_CONFIG; 5691 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5692 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; 5693 cfg_req.page_number = 0; 5694 cfg_req.page_address = 0; 5695 5696 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5697 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5698 ioc_err(mrioc, "enclosure page0 header read failed\n"); 5699 goto out_failed; 5700 } 5701 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5702 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", 5703 *ioc_status); 5704 goto out_failed; 5705 } 5706 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5707 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | 5708 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); 5709 cfg_req.page_address = cpu_to_le32(page_address); 5710 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5711 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { 5712 ioc_err(mrioc, "enclosure page0 read failed\n"); 5713 goto out_failed; 5714 } 5715 return 0; 5716 out_failed: 5717 return -1; 5718 } 5719 5720 5721 /** 5722 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 5723 * @mrioc: Adapter instance reference 5724 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 5725 * @pg_sz: Size of the memory allocated to the page pointer 5726 * 5727 * This is handler for config page read for the SAS IO Unit 5728 * page0. This routine checks ioc_status to decide whether the 5729 * page read is success or not. 5730 * 5731 * Return: 0 on success, non-zero on failure. 5732 */ 5733 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, 5734 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) 5735 { 5736 struct mpi3_config_page_header cfg_hdr; 5737 struct mpi3_config_request cfg_req; 5738 u16 ioc_status = 0; 5739 5740 memset(sas_io_unit_pg0, 0, pg_sz); 5741 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5742 memset(&cfg_req, 0, sizeof(cfg_req)); 5743 5744 cfg_req.function = MPI3_FUNCTION_CONFIG; 5745 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5746 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5747 cfg_req.page_number = 0; 5748 cfg_req.page_address = 0; 5749 5750 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5751 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5752 ioc_err(mrioc, "sas io unit page0 header read failed\n"); 5753 goto out_failed; 5754 } 5755 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5756 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", 5757 ioc_status); 5758 goto out_failed; 5759 } 5760 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5761 5762 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5763 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { 5764 ioc_err(mrioc, "sas io unit page0 read failed\n"); 5765 goto out_failed; 5766 } 5767 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5768 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", 5769 ioc_status); 5770 goto out_failed; 5771 } 5772 return 0; 5773 out_failed: 5774 return -1; 5775 } 5776 5777 /** 5778 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 5779 * @mrioc: Adapter instance reference 5780 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 5781 * @pg_sz: Size of the memory allocated to the page pointer 5782 * 5783 * This is handler for config page read for the SAS IO Unit 5784 * page1. This routine checks ioc_status to decide whether the 5785 * page read is success or not. 5786 * 5787 * Return: 0 on success, non-zero on failure. 5788 */ 5789 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5790 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5791 { 5792 struct mpi3_config_page_header cfg_hdr; 5793 struct mpi3_config_request cfg_req; 5794 u16 ioc_status = 0; 5795 5796 memset(sas_io_unit_pg1, 0, pg_sz); 5797 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5798 memset(&cfg_req, 0, sizeof(cfg_req)); 5799 5800 cfg_req.function = MPI3_FUNCTION_CONFIG; 5801 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5802 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5803 cfg_req.page_number = 1; 5804 cfg_req.page_address = 0; 5805 5806 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5807 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5808 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5809 goto out_failed; 5810 } 5811 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5812 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5813 ioc_status); 5814 goto out_failed; 5815 } 5816 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5817 5818 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5819 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5820 ioc_err(mrioc, "sas io unit page1 read failed\n"); 5821 goto out_failed; 5822 } 5823 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5824 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", 5825 ioc_status); 5826 goto out_failed; 5827 } 5828 return 0; 5829 out_failed: 5830 return -1; 5831 } 5832 5833 /** 5834 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 5835 * @mrioc: Adapter instance reference 5836 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write 5837 * @pg_sz: Size of the memory allocated to the page pointer 5838 * 5839 * This is handler for config page write for the SAS IO Unit 5840 * page1. This routine checks ioc_status to decide whether the 5841 * page read is success or not. This will modify both current 5842 * and persistent page. 5843 * 5844 * Return: 0 on success, non-zero on failure. 5845 */ 5846 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5847 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5848 { 5849 struct mpi3_config_page_header cfg_hdr; 5850 struct mpi3_config_request cfg_req; 5851 u16 ioc_status = 0; 5852 5853 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5854 memset(&cfg_req, 0, sizeof(cfg_req)); 5855 5856 cfg_req.function = MPI3_FUNCTION_CONFIG; 5857 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5858 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5859 cfg_req.page_number = 1; 5860 cfg_req.page_address = 0; 5861 5862 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5863 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5864 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5865 goto out_failed; 5866 } 5867 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5868 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5869 ioc_status); 5870 goto out_failed; 5871 } 5872 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; 5873 5874 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5875 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5876 ioc_err(mrioc, "sas io unit page1 write current failed\n"); 5877 goto out_failed; 5878 } 5879 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5880 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", 5881 ioc_status); 5882 goto out_failed; 5883 } 5884 5885 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; 5886 5887 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5888 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5889 ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); 5890 goto out_failed; 5891 } 5892 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5893 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", 5894 ioc_status); 5895 goto out_failed; 5896 } 5897 return 0; 5898 out_failed: 5899 return -1; 5900 } 5901 5902 /** 5903 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 5904 * @mrioc: Adapter instance reference 5905 * @driver_pg1: Pointer to return Driver page 1 5906 * @pg_sz: Size of the memory allocated to the page pointer 5907 * 5908 * This is handler for config page read for the Driver page1. 5909 * This routine checks ioc_status to decide whether the page 5910 * read is success or not. 5911 * 5912 * Return: 0 on success, non-zero on failure. 5913 */ 5914 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, 5915 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) 5916 { 5917 struct mpi3_config_page_header cfg_hdr; 5918 struct mpi3_config_request cfg_req; 5919 u16 ioc_status = 0; 5920 5921 memset(driver_pg1, 0, pg_sz); 5922 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5923 memset(&cfg_req, 0, sizeof(cfg_req)); 5924 5925 cfg_req.function = MPI3_FUNCTION_CONFIG; 5926 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5927 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; 5928 cfg_req.page_number = 1; 5929 cfg_req.page_address = 0; 5930 5931 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5932 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5933 ioc_err(mrioc, "driver page1 header read failed\n"); 5934 goto out_failed; 5935 } 5936 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5937 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", 5938 ioc_status); 5939 goto out_failed; 5940 } 5941 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5942 5943 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5944 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { 5945 ioc_err(mrioc, "driver page1 read failed\n"); 5946 goto out_failed; 5947 } 5948 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5949 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", 5950 ioc_status); 5951 goto out_failed; 5952 } 5953 return 0; 5954 out_failed: 5955 return -1; 5956 } 5957