1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2022 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/io-64-nonatomic-lo-hi.h> 12 13 static int 14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason); 15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc); 16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 17 struct mpi3_ioc_facts_data *facts_data); 18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 19 struct mpi3mr_drv_cmd *drv_cmd); 20 21 static int poll_queues; 22 module_param(poll_queues, int, 0444); 23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)"); 24 25 #if defined(writeq) && defined(CONFIG_64BIT) 26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 27 { 28 writeq(b, addr); 29 } 30 #else 31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr) 32 { 33 __u64 data_out = b; 34 35 writel((u32)(data_out), addr); 36 writel((u32)(data_out >> 32), (addr + 4)); 37 } 38 #endif 39 40 static inline bool 41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q) 42 { 43 u16 pi, ci, max_entries; 44 bool is_qfull = false; 45 46 pi = op_req_q->pi; 47 ci = READ_ONCE(op_req_q->ci); 48 max_entries = op_req_q->num_requests; 49 50 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1)))) 51 is_qfull = true; 52 53 return is_qfull; 54 } 55 56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc) 57 { 58 u16 i, max_vectors; 59 60 max_vectors = mrioc->intr_info_count; 61 62 for (i = 0; i < max_vectors; i++) 63 synchronize_irq(pci_irq_vector(mrioc->pdev, i)); 64 } 65 66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc) 67 { 68 mrioc->intr_enabled = 0; 69 mpi3mr_sync_irqs(mrioc); 70 } 71 72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc) 73 { 74 mrioc->intr_enabled = 1; 75 } 76 77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc) 78 { 79 u16 i; 80 81 mpi3mr_ioc_disable_intr(mrioc); 82 83 if (!mrioc->intr_info) 84 return; 85 86 for (i = 0; i < mrioc->intr_info_count; i++) 87 free_irq(pci_irq_vector(mrioc->pdev, i), 88 (mrioc->intr_info + i)); 89 90 kfree(mrioc->intr_info); 91 mrioc->intr_info = NULL; 92 mrioc->intr_info_count = 0; 93 mrioc->is_intr_info_set = false; 94 pci_free_irq_vectors(mrioc->pdev); 95 } 96 97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length, 98 dma_addr_t dma_addr) 99 { 100 struct mpi3_sge_common *sgel = paddr; 101 102 sgel->flags = flags; 103 sgel->length = cpu_to_le32(length); 104 sgel->address = cpu_to_le64(dma_addr); 105 } 106 107 void mpi3mr_build_zero_len_sge(void *paddr) 108 { 109 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 110 111 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1); 112 } 113 114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc, 115 dma_addr_t phys_addr) 116 { 117 if (!phys_addr) 118 return NULL; 119 120 if ((phys_addr < mrioc->reply_buf_dma) || 121 (phys_addr > mrioc->reply_buf_dma_max_address)) 122 return NULL; 123 124 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma); 125 } 126 127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc, 128 dma_addr_t phys_addr) 129 { 130 if (!phys_addr) 131 return NULL; 132 133 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma); 134 } 135 136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc, 137 u64 reply_dma) 138 { 139 u32 old_idx = 0; 140 unsigned long flags; 141 142 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags); 143 old_idx = mrioc->reply_free_queue_host_index; 144 mrioc->reply_free_queue_host_index = ( 145 (mrioc->reply_free_queue_host_index == 146 (mrioc->reply_free_qsz - 1)) ? 0 : 147 (mrioc->reply_free_queue_host_index + 1)); 148 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma); 149 writel(mrioc->reply_free_queue_host_index, 150 &mrioc->sysif_regs->reply_free_host_index); 151 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags); 152 } 153 154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc, 155 u64 sense_buf_dma) 156 { 157 u32 old_idx = 0; 158 unsigned long flags; 159 160 spin_lock_irqsave(&mrioc->sbq_lock, flags); 161 old_idx = mrioc->sbq_host_index; 162 mrioc->sbq_host_index = ((mrioc->sbq_host_index == 163 (mrioc->sense_buf_q_sz - 1)) ? 0 : 164 (mrioc->sbq_host_index + 1)); 165 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma); 166 writel(mrioc->sbq_host_index, 167 &mrioc->sysif_regs->sense_buffer_free_host_index); 168 spin_unlock_irqrestore(&mrioc->sbq_lock, flags); 169 } 170 171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, 172 struct mpi3_event_notification_reply *event_reply) 173 { 174 char *desc = NULL; 175 u16 event; 176 177 event = event_reply->event; 178 179 switch (event) { 180 case MPI3_EVENT_LOG_DATA: 181 desc = "Log Data"; 182 break; 183 case MPI3_EVENT_CHANGE: 184 desc = "Event Change"; 185 break; 186 case MPI3_EVENT_GPIO_INTERRUPT: 187 desc = "GPIO Interrupt"; 188 break; 189 case MPI3_EVENT_CABLE_MGMT: 190 desc = "Cable Management"; 191 break; 192 case MPI3_EVENT_ENERGY_PACK_CHANGE: 193 desc = "Energy Pack Change"; 194 break; 195 case MPI3_EVENT_DEVICE_ADDED: 196 { 197 struct mpi3_device_page0 *event_data = 198 (struct mpi3_device_page0 *)event_reply->event_data; 199 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n", 200 event_data->dev_handle, event_data->device_form); 201 return; 202 } 203 case MPI3_EVENT_DEVICE_INFO_CHANGED: 204 { 205 struct mpi3_device_page0 *event_data = 206 (struct mpi3_device_page0 *)event_reply->event_data; 207 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n", 208 event_data->dev_handle, event_data->device_form); 209 return; 210 } 211 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 212 { 213 struct mpi3_event_data_device_status_change *event_data = 214 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 215 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n", 216 event_data->dev_handle, event_data->reason_code); 217 return; 218 } 219 case MPI3_EVENT_SAS_DISCOVERY: 220 { 221 struct mpi3_event_data_sas_discovery *event_data = 222 (struct mpi3_event_data_sas_discovery *)event_reply->event_data; 223 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n", 224 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ? 225 "start" : "stop", 226 le32_to_cpu(event_data->discovery_status)); 227 return; 228 } 229 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 230 desc = "SAS Broadcast Primitive"; 231 break; 232 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE: 233 desc = "SAS Notify Primitive"; 234 break; 235 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 236 desc = "SAS Init Device Status Change"; 237 break; 238 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW: 239 desc = "SAS Init Table Overflow"; 240 break; 241 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 242 desc = "SAS Topology Change List"; 243 break; 244 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 245 desc = "Enclosure Device Status Change"; 246 break; 247 case MPI3_EVENT_ENCL_DEVICE_ADDED: 248 desc = "Enclosure Added"; 249 break; 250 case MPI3_EVENT_HARD_RESET_RECEIVED: 251 desc = "Hard Reset Received"; 252 break; 253 case MPI3_EVENT_SAS_PHY_COUNTER: 254 desc = "SAS PHY Counter"; 255 break; 256 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 257 desc = "SAS Device Discovery Error"; 258 break; 259 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 260 desc = "PCIE Topology Change List"; 261 break; 262 case MPI3_EVENT_PCIE_ENUMERATION: 263 { 264 struct mpi3_event_data_pcie_enumeration *event_data = 265 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data; 266 ioc_info(mrioc, "PCIE Enumeration: (%s)", 267 (event_data->reason_code == 268 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop"); 269 if (event_data->enumeration_status) 270 ioc_info(mrioc, "enumeration_status(0x%08x)\n", 271 le32_to_cpu(event_data->enumeration_status)); 272 return; 273 } 274 case MPI3_EVENT_PREPARE_FOR_RESET: 275 desc = "Prepare For Reset"; 276 break; 277 } 278 279 if (!desc) 280 return; 281 282 ioc_info(mrioc, "%s\n", desc); 283 } 284 285 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc, 286 struct mpi3_default_reply *def_reply) 287 { 288 struct mpi3_event_notification_reply *event_reply = 289 (struct mpi3_event_notification_reply *)def_reply; 290 291 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count); 292 mpi3mr_print_event_data(mrioc, event_reply); 293 mpi3mr_os_handle_events(mrioc, event_reply); 294 } 295 296 static struct mpi3mr_drv_cmd * 297 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag, 298 struct mpi3_default_reply *def_reply) 299 { 300 u16 idx; 301 302 switch (host_tag) { 303 case MPI3MR_HOSTTAG_INITCMDS: 304 return &mrioc->init_cmds; 305 case MPI3MR_HOSTTAG_CFG_CMDS: 306 return &mrioc->cfg_cmds; 307 case MPI3MR_HOSTTAG_BSG_CMDS: 308 return &mrioc->bsg_cmds; 309 case MPI3MR_HOSTTAG_BLK_TMS: 310 return &mrioc->host_tm_cmds; 311 case MPI3MR_HOSTTAG_PEL_ABORT: 312 return &mrioc->pel_abort_cmd; 313 case MPI3MR_HOSTTAG_PEL_WAIT: 314 return &mrioc->pel_cmds; 315 case MPI3MR_HOSTTAG_TRANSPORT_CMDS: 316 return &mrioc->transport_cmds; 317 case MPI3MR_HOSTTAG_INVALID: 318 if (def_reply && def_reply->function == 319 MPI3_FUNCTION_EVENT_NOTIFICATION) 320 mpi3mr_handle_events(mrioc, def_reply); 321 return NULL; 322 default: 323 break; 324 } 325 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN && 326 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) { 327 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 328 return &mrioc->dev_rmhs_cmds[idx]; 329 } 330 331 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN && 332 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) { 333 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 334 return &mrioc->evtack_cmds[idx]; 335 } 336 337 return NULL; 338 } 339 340 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, 341 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma) 342 { 343 u16 reply_desc_type, host_tag = 0; 344 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 345 u32 ioc_loginfo = 0; 346 struct mpi3_status_reply_descriptor *status_desc; 347 struct mpi3_address_reply_descriptor *addr_desc; 348 struct mpi3_success_reply_descriptor *success_desc; 349 struct mpi3_default_reply *def_reply = NULL; 350 struct mpi3mr_drv_cmd *cmdptr = NULL; 351 struct mpi3_scsi_io_reply *scsi_reply; 352 u8 *sense_buf = NULL; 353 354 *reply_dma = 0; 355 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 356 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 357 switch (reply_desc_type) { 358 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 359 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 360 host_tag = le16_to_cpu(status_desc->host_tag); 361 ioc_status = le16_to_cpu(status_desc->ioc_status); 362 if (ioc_status & 363 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 364 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 365 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 366 break; 367 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 368 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 369 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 370 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma); 371 if (!def_reply) 372 goto out; 373 host_tag = le16_to_cpu(def_reply->host_tag); 374 ioc_status = le16_to_cpu(def_reply->ioc_status); 375 if (ioc_status & 376 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 377 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info); 378 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 379 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) { 380 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply; 381 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 382 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 383 } 384 break; 385 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 386 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 387 host_tag = le16_to_cpu(success_desc->host_tag); 388 break; 389 default: 390 break; 391 } 392 393 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply); 394 if (cmdptr) { 395 if (cmdptr->state & MPI3MR_CMD_PENDING) { 396 cmdptr->state |= MPI3MR_CMD_COMPLETE; 397 cmdptr->ioc_loginfo = ioc_loginfo; 398 cmdptr->ioc_status = ioc_status; 399 cmdptr->state &= ~MPI3MR_CMD_PENDING; 400 if (def_reply) { 401 cmdptr->state |= MPI3MR_CMD_REPLY_VALID; 402 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, 403 mrioc->reply_sz); 404 } 405 if (cmdptr->is_waiting) { 406 complete(&cmdptr->done); 407 cmdptr->is_waiting = 0; 408 } else if (cmdptr->callback) 409 cmdptr->callback(mrioc, cmdptr); 410 } 411 } 412 out: 413 if (sense_buf) 414 mpi3mr_repost_sense_buf(mrioc, 415 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 416 } 417 418 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc) 419 { 420 u32 exp_phase = mrioc->admin_reply_ephase; 421 u32 admin_reply_ci = mrioc->admin_reply_ci; 422 u32 num_admin_replies = 0; 423 u64 reply_dma = 0; 424 struct mpi3_default_reply_descriptor *reply_desc; 425 426 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 427 admin_reply_ci; 428 429 if ((le16_to_cpu(reply_desc->reply_flags) & 430 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 431 return 0; 432 433 do { 434 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci); 435 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma); 436 if (reply_dma) 437 mpi3mr_repost_reply_buf(mrioc, reply_dma); 438 num_admin_replies++; 439 if (++admin_reply_ci == mrioc->num_admin_replies) { 440 admin_reply_ci = 0; 441 exp_phase ^= 1; 442 } 443 reply_desc = 444 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base + 445 admin_reply_ci; 446 if ((le16_to_cpu(reply_desc->reply_flags) & 447 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 448 break; 449 } while (1); 450 451 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 452 mrioc->admin_reply_ci = admin_reply_ci; 453 mrioc->admin_reply_ephase = exp_phase; 454 455 return num_admin_replies; 456 } 457 458 /** 459 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to 460 * queue's consumer index from operational reply descriptor queue. 461 * @op_reply_q: op_reply_qinfo object 462 * @reply_ci: operational reply descriptor's queue consumer index 463 * 464 * Returns reply descriptor frame address 465 */ 466 static inline struct mpi3_default_reply_descriptor * 467 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci) 468 { 469 void *segment_base_addr; 470 struct segments *segments = op_reply_q->q_segments; 471 struct mpi3_default_reply_descriptor *reply_desc = NULL; 472 473 segment_base_addr = 474 segments[reply_ci / op_reply_q->segment_qd].segment; 475 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr + 476 (reply_ci % op_reply_q->segment_qd); 477 return reply_desc; 478 } 479 480 /** 481 * mpi3mr_process_op_reply_q - Operational reply queue handler 482 * @mrioc: Adapter instance reference 483 * @op_reply_q: Operational reply queue info 484 * 485 * Checks the specific operational reply queue and drains the 486 * reply queue entries until the queue is empty and process the 487 * individual reply descriptors. 488 * 489 * Return: 0 if queue is already processed,or number of reply 490 * descriptors processed. 491 */ 492 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc, 493 struct op_reply_qinfo *op_reply_q) 494 { 495 struct op_req_qinfo *op_req_q; 496 u32 exp_phase; 497 u32 reply_ci; 498 u32 num_op_reply = 0; 499 u64 reply_dma = 0; 500 struct mpi3_default_reply_descriptor *reply_desc; 501 u16 req_q_idx = 0, reply_qidx; 502 503 reply_qidx = op_reply_q->qid - 1; 504 505 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1)) 506 return 0; 507 508 exp_phase = op_reply_q->ephase; 509 reply_ci = op_reply_q->ci; 510 511 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 512 if ((le16_to_cpu(reply_desc->reply_flags) & 513 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) { 514 atomic_dec(&op_reply_q->in_use); 515 return 0; 516 } 517 518 do { 519 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1; 520 op_req_q = &mrioc->req_qinfo[req_q_idx]; 521 522 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci)); 523 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma, 524 reply_qidx); 525 atomic_dec(&op_reply_q->pend_ios); 526 if (reply_dma) 527 mpi3mr_repost_reply_buf(mrioc, reply_dma); 528 num_op_reply++; 529 530 if (++reply_ci == op_reply_q->num_replies) { 531 reply_ci = 0; 532 exp_phase ^= 1; 533 } 534 535 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci); 536 537 if ((le16_to_cpu(reply_desc->reply_flags) & 538 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) 539 break; 540 /* 541 * Exit completion loop to avoid CPU lockup 542 * Ensure remaining completion happens from threaded ISR. 543 */ 544 if (num_op_reply > mrioc->max_host_ios) { 545 op_reply_q->enable_irq_poll = true; 546 break; 547 } 548 549 } while (1); 550 551 writel(reply_ci, 552 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index); 553 op_reply_q->ci = reply_ci; 554 op_reply_q->ephase = exp_phase; 555 556 atomic_dec(&op_reply_q->in_use); 557 return num_op_reply; 558 } 559 560 /** 561 * mpi3mr_blk_mq_poll - Operational reply queue handler 562 * @shost: SCSI Host reference 563 * @queue_num: Request queue number (w.r.t OS it is hardware context number) 564 * 565 * Checks the specific operational reply queue and drains the 566 * reply queue entries until the queue is empty and process the 567 * individual reply descriptors. 568 * 569 * Return: 0 if queue is already processed,or number of reply 570 * descriptors processed. 571 */ 572 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 573 { 574 int num_entries = 0; 575 struct mpi3mr_ioc *mrioc; 576 577 mrioc = (struct mpi3mr_ioc *)shost->hostdata; 578 579 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset)) 580 return 0; 581 582 num_entries = mpi3mr_process_op_reply_q(mrioc, 583 &mrioc->op_reply_qinfo[queue_num]); 584 585 return num_entries; 586 } 587 588 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata) 589 { 590 struct mpi3mr_intr_info *intr_info = privdata; 591 struct mpi3mr_ioc *mrioc; 592 u16 midx; 593 u32 num_admin_replies = 0, num_op_reply = 0; 594 595 if (!intr_info) 596 return IRQ_NONE; 597 598 mrioc = intr_info->mrioc; 599 600 if (!mrioc->intr_enabled) 601 return IRQ_NONE; 602 603 midx = intr_info->msix_index; 604 605 if (!midx) 606 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc); 607 if (intr_info->op_reply_q) 608 num_op_reply = mpi3mr_process_op_reply_q(mrioc, 609 intr_info->op_reply_q); 610 611 if (num_admin_replies || num_op_reply) 612 return IRQ_HANDLED; 613 else 614 return IRQ_NONE; 615 } 616 617 static irqreturn_t mpi3mr_isr(int irq, void *privdata) 618 { 619 struct mpi3mr_intr_info *intr_info = privdata; 620 struct mpi3mr_ioc *mrioc; 621 u16 midx; 622 int ret; 623 624 if (!intr_info) 625 return IRQ_NONE; 626 627 mrioc = intr_info->mrioc; 628 midx = intr_info->msix_index; 629 /* Call primary ISR routine */ 630 ret = mpi3mr_isr_primary(irq, privdata); 631 632 /* 633 * If more IOs are expected, schedule IRQ polling thread. 634 * Otherwise exit from ISR. 635 */ 636 if (!intr_info->op_reply_q) 637 return ret; 638 639 if (!intr_info->op_reply_q->enable_irq_poll || 640 !atomic_read(&intr_info->op_reply_q->pend_ios)) 641 return ret; 642 643 disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx)); 644 645 return IRQ_WAKE_THREAD; 646 } 647 648 /** 649 * mpi3mr_isr_poll - Reply queue polling routine 650 * @irq: IRQ 651 * @privdata: Interrupt info 652 * 653 * poll for pending I/O completions in a loop until pending I/Os 654 * present or controller queue depth I/Os are processed. 655 * 656 * Return: IRQ_NONE or IRQ_HANDLED 657 */ 658 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata) 659 { 660 struct mpi3mr_intr_info *intr_info = privdata; 661 struct mpi3mr_ioc *mrioc; 662 u16 midx; 663 u32 num_op_reply = 0; 664 665 if (!intr_info || !intr_info->op_reply_q) 666 return IRQ_NONE; 667 668 mrioc = intr_info->mrioc; 669 midx = intr_info->msix_index; 670 671 /* Poll for pending IOs completions */ 672 do { 673 if (!mrioc->intr_enabled) 674 break; 675 676 if (!midx) 677 mpi3mr_process_admin_reply_q(mrioc); 678 if (intr_info->op_reply_q) 679 num_op_reply += 680 mpi3mr_process_op_reply_q(mrioc, 681 intr_info->op_reply_q); 682 683 usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP); 684 685 } while (atomic_read(&intr_info->op_reply_q->pend_ios) && 686 (num_op_reply < mrioc->max_host_ios)); 687 688 intr_info->op_reply_q->enable_irq_poll = false; 689 enable_irq(pci_irq_vector(mrioc->pdev, midx)); 690 691 return IRQ_HANDLED; 692 } 693 694 /** 695 * mpi3mr_request_irq - Request IRQ and register ISR 696 * @mrioc: Adapter instance reference 697 * @index: IRQ vector index 698 * 699 * Request threaded ISR with primary ISR and secondary 700 * 701 * Return: 0 on success and non zero on failures. 702 */ 703 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index) 704 { 705 struct pci_dev *pdev = mrioc->pdev; 706 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index; 707 int retval = 0; 708 709 intr_info->mrioc = mrioc; 710 intr_info->msix_index = index; 711 intr_info->op_reply_q = NULL; 712 713 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d", 714 mrioc->driver_name, mrioc->id, index); 715 716 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr, 717 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info); 718 if (retval) { 719 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n", 720 intr_info->name, pci_irq_vector(pdev, index)); 721 return retval; 722 } 723 724 return retval; 725 } 726 727 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors) 728 { 729 if (!mrioc->requested_poll_qcount) 730 return; 731 732 /* Reserved for Admin and Default Queue */ 733 if (max_vectors > 2 && 734 (mrioc->requested_poll_qcount < max_vectors - 2)) { 735 ioc_info(mrioc, 736 "enabled polled queues (%d) msix (%d)\n", 737 mrioc->requested_poll_qcount, max_vectors); 738 } else { 739 ioc_info(mrioc, 740 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n", 741 mrioc->requested_poll_qcount, max_vectors); 742 mrioc->requested_poll_qcount = 0; 743 } 744 } 745 746 /** 747 * mpi3mr_setup_isr - Setup ISR for the controller 748 * @mrioc: Adapter instance reference 749 * @setup_one: Request one IRQ or more 750 * 751 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR 752 * 753 * Return: 0 on success and non zero on failures. 754 */ 755 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one) 756 { 757 unsigned int irq_flags = PCI_IRQ_MSIX; 758 int max_vectors, min_vec; 759 int retval; 760 int i; 761 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 }; 762 763 if (mrioc->is_intr_info_set) 764 return 0; 765 766 mpi3mr_cleanup_isr(mrioc); 767 768 if (setup_one || reset_devices) { 769 max_vectors = 1; 770 retval = pci_alloc_irq_vectors(mrioc->pdev, 771 1, max_vectors, irq_flags); 772 if (retval < 0) { 773 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 774 retval); 775 goto out_failed; 776 } 777 } else { 778 max_vectors = 779 min_t(int, mrioc->cpu_count + 1 + 780 mrioc->requested_poll_qcount, mrioc->msix_count); 781 782 mpi3mr_calc_poll_queues(mrioc, max_vectors); 783 784 ioc_info(mrioc, 785 "MSI-X vectors supported: %d, no of cores: %d,", 786 mrioc->msix_count, mrioc->cpu_count); 787 ioc_info(mrioc, 788 "MSI-x vectors requested: %d poll_queues %d\n", 789 max_vectors, mrioc->requested_poll_qcount); 790 791 desc.post_vectors = mrioc->requested_poll_qcount; 792 min_vec = desc.pre_vectors + desc.post_vectors; 793 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 794 795 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev, 796 min_vec, max_vectors, irq_flags, &desc); 797 798 if (retval < 0) { 799 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n", 800 retval); 801 goto out_failed; 802 } 803 804 805 /* 806 * If only one MSI-x is allocated, then MSI-x 0 will be shared 807 * between Admin queue and operational queue 808 */ 809 if (retval == min_vec) 810 mrioc->op_reply_q_offset = 0; 811 else if (retval != (max_vectors)) { 812 ioc_info(mrioc, 813 "allocated vectors (%d) are less than configured (%d)\n", 814 retval, max_vectors); 815 } 816 817 max_vectors = retval; 818 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0; 819 820 mpi3mr_calc_poll_queues(mrioc, max_vectors); 821 822 } 823 824 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors, 825 GFP_KERNEL); 826 if (!mrioc->intr_info) { 827 retval = -ENOMEM; 828 pci_free_irq_vectors(mrioc->pdev); 829 goto out_failed; 830 } 831 for (i = 0; i < max_vectors; i++) { 832 retval = mpi3mr_request_irq(mrioc, i); 833 if (retval) { 834 mrioc->intr_info_count = i; 835 goto out_failed; 836 } 837 } 838 if (reset_devices || !setup_one) 839 mrioc->is_intr_info_set = true; 840 mrioc->intr_info_count = max_vectors; 841 mpi3mr_ioc_enable_intr(mrioc); 842 return 0; 843 844 out_failed: 845 mpi3mr_cleanup_isr(mrioc); 846 847 return retval; 848 } 849 850 static const struct { 851 enum mpi3mr_iocstate value; 852 char *name; 853 } mrioc_states[] = { 854 { MRIOC_STATE_READY, "ready" }, 855 { MRIOC_STATE_FAULT, "fault" }, 856 { MRIOC_STATE_RESET, "reset" }, 857 { MRIOC_STATE_BECOMING_READY, "becoming ready" }, 858 { MRIOC_STATE_RESET_REQUESTED, "reset requested" }, 859 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" }, 860 }; 861 862 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state) 863 { 864 int i; 865 char *name = NULL; 866 867 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) { 868 if (mrioc_states[i].value == mrioc_state) { 869 name = mrioc_states[i].name; 870 break; 871 } 872 } 873 return name; 874 } 875 876 /* Reset reason to name mapper structure*/ 877 static const struct { 878 enum mpi3mr_reset_reason value; 879 char *name; 880 } mpi3mr_reset_reason_codes[] = { 881 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" }, 882 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" }, 883 { MPI3MR_RESET_FROM_APP, "application invocation" }, 884 { MPI3MR_RESET_FROM_EH_HOS, "error handling" }, 885 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" }, 886 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" }, 887 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" }, 888 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" }, 889 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" }, 890 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" }, 891 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" }, 892 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" }, 893 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" }, 894 { 895 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT, 896 "create request queue timeout" 897 }, 898 { 899 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT, 900 "create reply queue timeout" 901 }, 902 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" }, 903 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" }, 904 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" }, 905 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" }, 906 { 907 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 908 "component image activation timeout" 909 }, 910 { 911 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT, 912 "get package version timeout" 913 }, 914 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" }, 915 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" }, 916 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" }, 917 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"}, 918 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" }, 919 }; 920 921 /** 922 * mpi3mr_reset_rc_name - get reset reason code name 923 * @reason_code: reset reason code value 924 * 925 * Map reset reason to an NULL terminated ASCII string 926 * 927 * Return: name corresponding to reset reason value or NULL. 928 */ 929 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code) 930 { 931 int i; 932 char *name = NULL; 933 934 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) { 935 if (mpi3mr_reset_reason_codes[i].value == reason_code) { 936 name = mpi3mr_reset_reason_codes[i].name; 937 break; 938 } 939 } 940 return name; 941 } 942 943 /* Reset type to name mapper structure*/ 944 static const struct { 945 u16 reset_type; 946 char *name; 947 } mpi3mr_reset_types[] = { 948 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" }, 949 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" }, 950 }; 951 952 /** 953 * mpi3mr_reset_type_name - get reset type name 954 * @reset_type: reset type value 955 * 956 * Map reset type to an NULL terminated ASCII string 957 * 958 * Return: name corresponding to reset type value or NULL. 959 */ 960 static const char *mpi3mr_reset_type_name(u16 reset_type) 961 { 962 int i; 963 char *name = NULL; 964 965 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) { 966 if (mpi3mr_reset_types[i].reset_type == reset_type) { 967 name = mpi3mr_reset_types[i].name; 968 break; 969 } 970 } 971 return name; 972 } 973 974 /** 975 * mpi3mr_print_fault_info - Display fault information 976 * @mrioc: Adapter instance reference 977 * 978 * Display the controller fault information if there is a 979 * controller fault. 980 * 981 * Return: Nothing. 982 */ 983 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc) 984 { 985 u32 ioc_status, code, code1, code2, code3; 986 987 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 988 989 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 990 code = readl(&mrioc->sysif_regs->fault); 991 code1 = readl(&mrioc->sysif_regs->fault_info[0]); 992 code2 = readl(&mrioc->sysif_regs->fault_info[1]); 993 code3 = readl(&mrioc->sysif_regs->fault_info[2]); 994 995 ioc_info(mrioc, 996 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n", 997 code, code1, code2, code3); 998 } 999 } 1000 1001 /** 1002 * mpi3mr_get_iocstate - Get IOC State 1003 * @mrioc: Adapter instance reference 1004 * 1005 * Return a proper IOC state enum based on the IOC status and 1006 * IOC configuration and unrcoverable state of the controller. 1007 * 1008 * Return: Current IOC state. 1009 */ 1010 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc) 1011 { 1012 u32 ioc_status, ioc_config; 1013 u8 ready, enabled; 1014 1015 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1016 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1017 1018 if (mrioc->unrecoverable) 1019 return MRIOC_STATE_UNRECOVERABLE; 1020 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) 1021 return MRIOC_STATE_FAULT; 1022 1023 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY); 1024 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC); 1025 1026 if (ready && enabled) 1027 return MRIOC_STATE_READY; 1028 if ((!ready) && (!enabled)) 1029 return MRIOC_STATE_RESET; 1030 if ((!ready) && (enabled)) 1031 return MRIOC_STATE_BECOMING_READY; 1032 1033 return MRIOC_STATE_RESET_REQUESTED; 1034 } 1035 1036 /** 1037 * mpi3mr_clear_reset_history - clear reset history 1038 * @mrioc: Adapter instance reference 1039 * 1040 * Write the reset history bit in IOC status to clear the bit, 1041 * if it is already set. 1042 * 1043 * Return: Nothing. 1044 */ 1045 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc) 1046 { 1047 u32 ioc_status; 1048 1049 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1050 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1051 writel(ioc_status, &mrioc->sysif_regs->ioc_status); 1052 } 1053 1054 /** 1055 * mpi3mr_issue_and_process_mur - Message unit Reset handler 1056 * @mrioc: Adapter instance reference 1057 * @reset_reason: Reset reason code 1058 * 1059 * Issue Message unit Reset to the controller and wait for it to 1060 * be complete. 1061 * 1062 * Return: 0 on success, -1 on failure. 1063 */ 1064 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, 1065 u32 reset_reason) 1066 { 1067 u32 ioc_config, timeout, ioc_status; 1068 int retval = -1; 1069 1070 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n"); 1071 if (mrioc->unrecoverable) { 1072 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n"); 1073 return retval; 1074 } 1075 mpi3mr_clear_reset_history(mrioc); 1076 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1077 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1078 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1079 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1080 1081 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1082 do { 1083 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1084 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) { 1085 mpi3mr_clear_reset_history(mrioc); 1086 break; 1087 } 1088 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) { 1089 mpi3mr_print_fault_info(mrioc); 1090 break; 1091 } 1092 msleep(100); 1093 } while (--timeout); 1094 1095 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1096 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1097 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) || 1098 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1099 retval = 0; 1100 1101 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n", 1102 (!retval) ? "successful" : "failed", ioc_status, ioc_config); 1103 return retval; 1104 } 1105 1106 /** 1107 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters 1108 * during reset/resume 1109 * @mrioc: Adapter instance reference 1110 * 1111 * Return zero if the new IOCFacts parameters value is compatible with 1112 * older values else return -EPERM 1113 */ 1114 static int 1115 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) 1116 { 1117 u16 dev_handle_bitmap_sz; 1118 void *removepend_bitmap; 1119 1120 if (mrioc->facts.reply_sz > mrioc->reply_sz) { 1121 ioc_err(mrioc, 1122 "cannot increase reply size from %d to %d\n", 1123 mrioc->reply_sz, mrioc->facts.reply_sz); 1124 return -EPERM; 1125 } 1126 1127 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) { 1128 ioc_err(mrioc, 1129 "cannot reduce number of operational reply queues from %d to %d\n", 1130 mrioc->num_op_reply_q, 1131 mrioc->facts.max_op_reply_q); 1132 return -EPERM; 1133 } 1134 1135 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) { 1136 ioc_err(mrioc, 1137 "cannot reduce number of operational request queues from %d to %d\n", 1138 mrioc->num_op_req_q, mrioc->facts.max_op_req_q); 1139 return -EPERM; 1140 } 1141 1142 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities & 1143 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) 1144 ioc_err(mrioc, 1145 "critical error: multipath capability is enabled at the\n" 1146 "\tcontroller while sas transport support is enabled at the\n" 1147 "\tdriver, please reboot the system or reload the driver\n"); 1148 1149 dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 1150 if (mrioc->facts.max_devhandle % 8) 1151 dev_handle_bitmap_sz++; 1152 if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { 1153 removepend_bitmap = krealloc(mrioc->removepend_bitmap, 1154 dev_handle_bitmap_sz, GFP_KERNEL); 1155 if (!removepend_bitmap) { 1156 ioc_err(mrioc, 1157 "failed to increase removepend_bitmap sz from: %d to %d\n", 1158 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1159 return -EPERM; 1160 } 1161 memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, 1162 dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); 1163 mrioc->removepend_bitmap = removepend_bitmap; 1164 ioc_info(mrioc, 1165 "increased dev_handle_bitmap_sz from %d to %d\n", 1166 mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); 1167 mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; 1168 } 1169 1170 return 0; 1171 } 1172 1173 /** 1174 * mpi3mr_bring_ioc_ready - Bring controller to ready state 1175 * @mrioc: Adapter instance reference 1176 * 1177 * Set Enable IOC bit in IOC configuration register and wait for 1178 * the controller to become ready. 1179 * 1180 * Return: 0 on success, appropriate error on failure. 1181 */ 1182 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc) 1183 { 1184 u32 ioc_config, ioc_status, timeout; 1185 int retval = 0; 1186 enum mpi3mr_iocstate ioc_state; 1187 u64 base_info; 1188 1189 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1190 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1191 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information); 1192 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n", 1193 ioc_status, ioc_config, base_info); 1194 1195 /*The timeout value is in 2sec unit, changing it to seconds*/ 1196 mrioc->ready_timeout = 1197 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >> 1198 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2; 1199 1200 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout); 1201 1202 ioc_state = mpi3mr_get_iocstate(mrioc); 1203 ioc_info(mrioc, "controller is in %s state during detection\n", 1204 mpi3mr_iocstate_name(ioc_state)); 1205 1206 if (ioc_state == MRIOC_STATE_BECOMING_READY || 1207 ioc_state == MRIOC_STATE_RESET_REQUESTED) { 1208 timeout = mrioc->ready_timeout * 10; 1209 do { 1210 msleep(100); 1211 } while (--timeout); 1212 1213 ioc_state = mpi3mr_get_iocstate(mrioc); 1214 ioc_info(mrioc, 1215 "controller is in %s state after waiting to reset\n", 1216 mpi3mr_iocstate_name(ioc_state)); 1217 } 1218 1219 if (ioc_state == MRIOC_STATE_READY) { 1220 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n"); 1221 retval = mpi3mr_issue_and_process_mur(mrioc, 1222 MPI3MR_RESET_FROM_BRINGUP); 1223 ioc_state = mpi3mr_get_iocstate(mrioc); 1224 if (retval) 1225 ioc_err(mrioc, 1226 "message unit reset failed with error %d current state %s\n", 1227 retval, mpi3mr_iocstate_name(ioc_state)); 1228 } 1229 if (ioc_state != MRIOC_STATE_RESET) { 1230 mpi3mr_print_fault_info(mrioc); 1231 ioc_info(mrioc, "issuing soft reset to bring to reset state\n"); 1232 retval = mpi3mr_issue_reset(mrioc, 1233 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 1234 MPI3MR_RESET_FROM_BRINGUP); 1235 if (retval) { 1236 ioc_err(mrioc, 1237 "soft reset failed with error %d\n", retval); 1238 goto out_failed; 1239 } 1240 } 1241 ioc_state = mpi3mr_get_iocstate(mrioc); 1242 if (ioc_state != MRIOC_STATE_RESET) { 1243 ioc_err(mrioc, 1244 "cannot bring controller to reset state, current state: %s\n", 1245 mpi3mr_iocstate_name(ioc_state)); 1246 goto out_failed; 1247 } 1248 mpi3mr_clear_reset_history(mrioc); 1249 retval = mpi3mr_setup_admin_qpair(mrioc); 1250 if (retval) { 1251 ioc_err(mrioc, "failed to setup admin queues: error %d\n", 1252 retval); 1253 goto out_failed; 1254 } 1255 1256 ioc_info(mrioc, "bringing controller to ready state\n"); 1257 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1258 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC; 1259 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1260 1261 timeout = mrioc->ready_timeout * 10; 1262 do { 1263 ioc_state = mpi3mr_get_iocstate(mrioc); 1264 if (ioc_state == MRIOC_STATE_READY) { 1265 ioc_info(mrioc, 1266 "successfully transitioned to %s state\n", 1267 mpi3mr_iocstate_name(ioc_state)); 1268 return 0; 1269 } 1270 msleep(100); 1271 } while (--timeout); 1272 1273 out_failed: 1274 ioc_state = mpi3mr_get_iocstate(mrioc); 1275 ioc_err(mrioc, 1276 "failed to bring to ready state, current state: %s\n", 1277 mpi3mr_iocstate_name(ioc_state)); 1278 return retval; 1279 } 1280 1281 /** 1282 * mpi3mr_soft_reset_success - Check softreset is success or not 1283 * @ioc_status: IOC status register value 1284 * @ioc_config: IOC config register value 1285 * 1286 * Check whether the soft reset is successful or not based on 1287 * IOC status and IOC config register values. 1288 * 1289 * Return: True when the soft reset is success, false otherwise. 1290 */ 1291 static inline bool 1292 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config) 1293 { 1294 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) || 1295 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) 1296 return true; 1297 return false; 1298 } 1299 1300 /** 1301 * mpi3mr_diagfault_success - Check diag fault is success or not 1302 * @mrioc: Adapter reference 1303 * @ioc_status: IOC status register value 1304 * 1305 * Check whether the controller hit diag reset fault code. 1306 * 1307 * Return: True when there is diag fault, false otherwise. 1308 */ 1309 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc, 1310 u32 ioc_status) 1311 { 1312 u32 fault; 1313 1314 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) 1315 return false; 1316 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 1317 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) { 1318 mpi3mr_print_fault_info(mrioc); 1319 return true; 1320 } 1321 return false; 1322 } 1323 1324 /** 1325 * mpi3mr_set_diagsave - Set diag save bit for snapdump 1326 * @mrioc: Adapter reference 1327 * 1328 * Set diag save bit in IOC configuration register to enable 1329 * snapdump. 1330 * 1331 * Return: Nothing. 1332 */ 1333 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc) 1334 { 1335 u32 ioc_config; 1336 1337 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1338 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE; 1339 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 1340 } 1341 1342 /** 1343 * mpi3mr_issue_reset - Issue reset to the controller 1344 * @mrioc: Adapter reference 1345 * @reset_type: Reset type 1346 * @reset_reason: Reset reason code 1347 * 1348 * Unlock the host diagnostic registers and write the specific 1349 * reset type to that, wait for reset acknowledgment from the 1350 * controller, if the reset is not successful retry for the 1351 * predefined number of times. 1352 * 1353 * Return: 0 on success, non-zero on failure. 1354 */ 1355 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, 1356 u32 reset_reason) 1357 { 1358 int retval = -1; 1359 u8 unlock_retry_count = 0; 1360 u32 host_diagnostic, ioc_status, ioc_config; 1361 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10; 1362 1363 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) && 1364 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)) 1365 return retval; 1366 if (mrioc->unrecoverable) 1367 return retval; 1368 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) { 1369 retval = 0; 1370 return retval; 1371 } 1372 1373 ioc_info(mrioc, "%s reset due to %s(0x%x)\n", 1374 mpi3mr_reset_type_name(reset_type), 1375 mpi3mr_reset_rc_name(reset_reason), reset_reason); 1376 1377 mpi3mr_clear_reset_history(mrioc); 1378 do { 1379 ioc_info(mrioc, 1380 "Write magic sequence to unlock host diag register (retry=%d)\n", 1381 ++unlock_retry_count); 1382 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) { 1383 ioc_err(mrioc, 1384 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n", 1385 mpi3mr_reset_type_name(reset_type), 1386 host_diagnostic); 1387 mrioc->unrecoverable = 1; 1388 return retval; 1389 } 1390 1391 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH, 1392 &mrioc->sysif_regs->write_sequence); 1393 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST, 1394 &mrioc->sysif_regs->write_sequence); 1395 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1396 &mrioc->sysif_regs->write_sequence); 1397 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD, 1398 &mrioc->sysif_regs->write_sequence); 1399 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH, 1400 &mrioc->sysif_regs->write_sequence); 1401 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH, 1402 &mrioc->sysif_regs->write_sequence); 1403 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH, 1404 &mrioc->sysif_regs->write_sequence); 1405 usleep_range(1000, 1100); 1406 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 1407 ioc_info(mrioc, 1408 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n", 1409 unlock_retry_count, host_diagnostic); 1410 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE)); 1411 1412 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]); 1413 writel(host_diagnostic | reset_type, 1414 &mrioc->sysif_regs->host_diagnostic); 1415 switch (reset_type) { 1416 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET: 1417 do { 1418 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1419 ioc_config = 1420 readl(&mrioc->sysif_regs->ioc_configuration); 1421 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) 1422 && mpi3mr_soft_reset_success(ioc_status, ioc_config) 1423 ) { 1424 mpi3mr_clear_reset_history(mrioc); 1425 retval = 0; 1426 break; 1427 } 1428 msleep(100); 1429 } while (--timeout); 1430 mpi3mr_print_fault_info(mrioc); 1431 break; 1432 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT: 1433 do { 1434 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1435 if (mpi3mr_diagfault_success(mrioc, ioc_status)) { 1436 retval = 0; 1437 break; 1438 } 1439 msleep(100); 1440 } while (--timeout); 1441 break; 1442 default: 1443 break; 1444 } 1445 1446 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND, 1447 &mrioc->sysif_regs->write_sequence); 1448 1449 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 1450 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 1451 ioc_info(mrioc, 1452 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n", 1453 (!retval)?"successful":"failed", ioc_status, 1454 ioc_config); 1455 if (retval) 1456 mrioc->unrecoverable = 1; 1457 return retval; 1458 } 1459 1460 /** 1461 * mpi3mr_admin_request_post - Post request to admin queue 1462 * @mrioc: Adapter reference 1463 * @admin_req: MPI3 request 1464 * @admin_req_sz: Request size 1465 * @ignore_reset: Ignore reset in process 1466 * 1467 * Post the MPI3 request into admin request queue and 1468 * inform the controller, if the queue is full return 1469 * appropriate error. 1470 * 1471 * Return: 0 on success, non-zero on failure. 1472 */ 1473 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req, 1474 u16 admin_req_sz, u8 ignore_reset) 1475 { 1476 u16 areq_pi = 0, areq_ci = 0, max_entries = 0; 1477 int retval = 0; 1478 unsigned long flags; 1479 u8 *areq_entry; 1480 1481 if (mrioc->unrecoverable) { 1482 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__); 1483 return -EFAULT; 1484 } 1485 1486 spin_lock_irqsave(&mrioc->admin_req_lock, flags); 1487 areq_pi = mrioc->admin_req_pi; 1488 areq_ci = mrioc->admin_req_ci; 1489 max_entries = mrioc->num_admin_req; 1490 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) && 1491 (areq_pi == (max_entries - 1)))) { 1492 ioc_err(mrioc, "AdminReqQ full condition detected\n"); 1493 retval = -EAGAIN; 1494 goto out; 1495 } 1496 if (!ignore_reset && mrioc->reset_in_progress) { 1497 ioc_err(mrioc, "AdminReqQ submit reset in progress\n"); 1498 retval = -EAGAIN; 1499 goto out; 1500 } 1501 areq_entry = (u8 *)mrioc->admin_req_base + 1502 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ); 1503 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 1504 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz); 1505 1506 if (++areq_pi == max_entries) 1507 areq_pi = 0; 1508 mrioc->admin_req_pi = areq_pi; 1509 1510 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 1511 1512 out: 1513 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags); 1514 1515 return retval; 1516 } 1517 1518 /** 1519 * mpi3mr_free_op_req_q_segments - free request memory segments 1520 * @mrioc: Adapter instance reference 1521 * @q_idx: operational request queue index 1522 * 1523 * Free memory segments allocated for operational request queue 1524 * 1525 * Return: Nothing. 1526 */ 1527 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1528 { 1529 u16 j; 1530 int size; 1531 struct segments *segments; 1532 1533 segments = mrioc->req_qinfo[q_idx].q_segments; 1534 if (!segments) 1535 return; 1536 1537 if (mrioc->enable_segqueue) { 1538 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1539 if (mrioc->req_qinfo[q_idx].q_segment_list) { 1540 dma_free_coherent(&mrioc->pdev->dev, 1541 MPI3MR_MAX_SEG_LIST_SIZE, 1542 mrioc->req_qinfo[q_idx].q_segment_list, 1543 mrioc->req_qinfo[q_idx].q_segment_list_dma); 1544 mrioc->req_qinfo[q_idx].q_segment_list = NULL; 1545 } 1546 } else 1547 size = mrioc->req_qinfo[q_idx].segment_qd * 1548 mrioc->facts.op_req_sz; 1549 1550 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) { 1551 if (!segments[j].segment) 1552 continue; 1553 dma_free_coherent(&mrioc->pdev->dev, 1554 size, segments[j].segment, segments[j].segment_dma); 1555 segments[j].segment = NULL; 1556 } 1557 kfree(mrioc->req_qinfo[q_idx].q_segments); 1558 mrioc->req_qinfo[q_idx].q_segments = NULL; 1559 mrioc->req_qinfo[q_idx].qid = 0; 1560 } 1561 1562 /** 1563 * mpi3mr_free_op_reply_q_segments - free reply memory segments 1564 * @mrioc: Adapter instance reference 1565 * @q_idx: operational reply queue index 1566 * 1567 * Free memory segments allocated for operational reply queue 1568 * 1569 * Return: Nothing. 1570 */ 1571 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx) 1572 { 1573 u16 j; 1574 int size; 1575 struct segments *segments; 1576 1577 segments = mrioc->op_reply_qinfo[q_idx].q_segments; 1578 if (!segments) 1579 return; 1580 1581 if (mrioc->enable_segqueue) { 1582 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1583 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) { 1584 dma_free_coherent(&mrioc->pdev->dev, 1585 MPI3MR_MAX_SEG_LIST_SIZE, 1586 mrioc->op_reply_qinfo[q_idx].q_segment_list, 1587 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma); 1588 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL; 1589 } 1590 } else 1591 size = mrioc->op_reply_qinfo[q_idx].segment_qd * 1592 mrioc->op_reply_desc_sz; 1593 1594 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) { 1595 if (!segments[j].segment) 1596 continue; 1597 dma_free_coherent(&mrioc->pdev->dev, 1598 size, segments[j].segment, segments[j].segment_dma); 1599 segments[j].segment = NULL; 1600 } 1601 1602 kfree(mrioc->op_reply_qinfo[q_idx].q_segments); 1603 mrioc->op_reply_qinfo[q_idx].q_segments = NULL; 1604 mrioc->op_reply_qinfo[q_idx].qid = 0; 1605 } 1606 1607 /** 1608 * mpi3mr_delete_op_reply_q - delete operational reply queue 1609 * @mrioc: Adapter instance reference 1610 * @qidx: operational reply queue index 1611 * 1612 * Delete operatinal reply queue by issuing MPI request 1613 * through admin queue. 1614 * 1615 * Return: 0 on success, non-zero on failure. 1616 */ 1617 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1618 { 1619 struct mpi3_delete_reply_queue_request delq_req; 1620 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1621 int retval = 0; 1622 u16 reply_qid = 0, midx; 1623 1624 reply_qid = op_reply_q->qid; 1625 1626 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1627 1628 if (!reply_qid) { 1629 retval = -1; 1630 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n"); 1631 goto out; 1632 } 1633 1634 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- : 1635 mrioc->active_poll_qcount--; 1636 1637 memset(&delq_req, 0, sizeof(delq_req)); 1638 mutex_lock(&mrioc->init_cmds.mutex); 1639 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1640 retval = -1; 1641 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n"); 1642 mutex_unlock(&mrioc->init_cmds.mutex); 1643 goto out; 1644 } 1645 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1646 mrioc->init_cmds.is_waiting = 1; 1647 mrioc->init_cmds.callback = NULL; 1648 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1649 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE; 1650 delq_req.queue_id = cpu_to_le16(reply_qid); 1651 1652 init_completion(&mrioc->init_cmds.done); 1653 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req), 1654 1); 1655 if (retval) { 1656 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n"); 1657 goto out_unlock; 1658 } 1659 wait_for_completion_timeout(&mrioc->init_cmds.done, 1660 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1661 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1662 ioc_err(mrioc, "delete reply queue timed out\n"); 1663 mpi3mr_check_rh_fault_ioc(mrioc, 1664 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT); 1665 retval = -1; 1666 goto out_unlock; 1667 } 1668 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1669 != MPI3_IOCSTATUS_SUCCESS) { 1670 ioc_err(mrioc, 1671 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1672 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1673 mrioc->init_cmds.ioc_loginfo); 1674 retval = -1; 1675 goto out_unlock; 1676 } 1677 mrioc->intr_info[midx].op_reply_q = NULL; 1678 1679 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1680 out_unlock: 1681 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1682 mutex_unlock(&mrioc->init_cmds.mutex); 1683 out: 1684 1685 return retval; 1686 } 1687 1688 /** 1689 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool 1690 * @mrioc: Adapter instance reference 1691 * @qidx: request queue index 1692 * 1693 * Allocate segmented memory pools for operational reply 1694 * queue. 1695 * 1696 * Return: 0 on success, non-zero on failure. 1697 */ 1698 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1699 { 1700 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1701 int i, size; 1702 u64 *q_segment_list_entry = NULL; 1703 struct segments *segments; 1704 1705 if (mrioc->enable_segqueue) { 1706 op_reply_q->segment_qd = 1707 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz; 1708 1709 size = MPI3MR_OP_REP_Q_SEG_SIZE; 1710 1711 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1712 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma, 1713 GFP_KERNEL); 1714 if (!op_reply_q->q_segment_list) 1715 return -ENOMEM; 1716 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list; 1717 } else { 1718 op_reply_q->segment_qd = op_reply_q->num_replies; 1719 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz; 1720 } 1721 1722 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies, 1723 op_reply_q->segment_qd); 1724 1725 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments, 1726 sizeof(struct segments), GFP_KERNEL); 1727 if (!op_reply_q->q_segments) 1728 return -ENOMEM; 1729 1730 segments = op_reply_q->q_segments; 1731 for (i = 0; i < op_reply_q->num_segments; i++) { 1732 segments[i].segment = 1733 dma_alloc_coherent(&mrioc->pdev->dev, 1734 size, &segments[i].segment_dma, GFP_KERNEL); 1735 if (!segments[i].segment) 1736 return -ENOMEM; 1737 if (mrioc->enable_segqueue) 1738 q_segment_list_entry[i] = 1739 (unsigned long)segments[i].segment_dma; 1740 } 1741 1742 return 0; 1743 } 1744 1745 /** 1746 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool. 1747 * @mrioc: Adapter instance reference 1748 * @qidx: request queue index 1749 * 1750 * Allocate segmented memory pools for operational request 1751 * queue. 1752 * 1753 * Return: 0 on success, non-zero on failure. 1754 */ 1755 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx) 1756 { 1757 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 1758 int i, size; 1759 u64 *q_segment_list_entry = NULL; 1760 struct segments *segments; 1761 1762 if (mrioc->enable_segqueue) { 1763 op_req_q->segment_qd = 1764 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz; 1765 1766 size = MPI3MR_OP_REQ_Q_SEG_SIZE; 1767 1768 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev, 1769 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma, 1770 GFP_KERNEL); 1771 if (!op_req_q->q_segment_list) 1772 return -ENOMEM; 1773 q_segment_list_entry = (u64 *)op_req_q->q_segment_list; 1774 1775 } else { 1776 op_req_q->segment_qd = op_req_q->num_requests; 1777 size = op_req_q->num_requests * mrioc->facts.op_req_sz; 1778 } 1779 1780 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests, 1781 op_req_q->segment_qd); 1782 1783 op_req_q->q_segments = kcalloc(op_req_q->num_segments, 1784 sizeof(struct segments), GFP_KERNEL); 1785 if (!op_req_q->q_segments) 1786 return -ENOMEM; 1787 1788 segments = op_req_q->q_segments; 1789 for (i = 0; i < op_req_q->num_segments; i++) { 1790 segments[i].segment = 1791 dma_alloc_coherent(&mrioc->pdev->dev, 1792 size, &segments[i].segment_dma, GFP_KERNEL); 1793 if (!segments[i].segment) 1794 return -ENOMEM; 1795 if (mrioc->enable_segqueue) 1796 q_segment_list_entry[i] = 1797 (unsigned long)segments[i].segment_dma; 1798 } 1799 1800 return 0; 1801 } 1802 1803 /** 1804 * mpi3mr_create_op_reply_q - create operational reply queue 1805 * @mrioc: Adapter instance reference 1806 * @qidx: operational reply queue index 1807 * 1808 * Create operatinal reply queue by issuing MPI request 1809 * through admin queue. 1810 * 1811 * Return: 0 on success, non-zero on failure. 1812 */ 1813 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx) 1814 { 1815 struct mpi3_create_reply_queue_request create_req; 1816 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 1817 int retval = 0; 1818 u16 reply_qid = 0, midx; 1819 1820 reply_qid = op_reply_q->qid; 1821 1822 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset); 1823 1824 if (reply_qid) { 1825 retval = -1; 1826 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n", 1827 reply_qid); 1828 1829 return retval; 1830 } 1831 1832 reply_qid = qidx + 1; 1833 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD; 1834 if (!mrioc->pdev->revision) 1835 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K; 1836 op_reply_q->ci = 0; 1837 op_reply_q->ephase = 1; 1838 atomic_set(&op_reply_q->pend_ios, 0); 1839 atomic_set(&op_reply_q->in_use, 0); 1840 op_reply_q->enable_irq_poll = false; 1841 1842 if (!op_reply_q->q_segments) { 1843 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx); 1844 if (retval) { 1845 mpi3mr_free_op_reply_q_segments(mrioc, qidx); 1846 goto out; 1847 } 1848 } 1849 1850 memset(&create_req, 0, sizeof(create_req)); 1851 mutex_lock(&mrioc->init_cmds.mutex); 1852 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1853 retval = -1; 1854 ioc_err(mrioc, "CreateRepQ: Init command is in use\n"); 1855 goto out_unlock; 1856 } 1857 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1858 mrioc->init_cmds.is_waiting = 1; 1859 mrioc->init_cmds.callback = NULL; 1860 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1861 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE; 1862 create_req.queue_id = cpu_to_le16(reply_qid); 1863 1864 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount)) 1865 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE; 1866 else 1867 op_reply_q->qtype = MPI3MR_POLL_QUEUE; 1868 1869 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) { 1870 create_req.flags = 1871 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE; 1872 create_req.msix_index = 1873 cpu_to_le16(mrioc->intr_info[midx].msix_index); 1874 } else { 1875 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1); 1876 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n", 1877 reply_qid, midx); 1878 if (!mrioc->active_poll_qcount) 1879 disable_irq_nosync(pci_irq_vector(mrioc->pdev, 1880 mrioc->intr_info_count - 1)); 1881 } 1882 1883 if (mrioc->enable_segqueue) { 1884 create_req.flags |= 1885 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1886 create_req.base_address = cpu_to_le64( 1887 op_reply_q->q_segment_list_dma); 1888 } else 1889 create_req.base_address = cpu_to_le64( 1890 op_reply_q->q_segments[0].segment_dma); 1891 1892 create_req.size = cpu_to_le16(op_reply_q->num_replies); 1893 1894 init_completion(&mrioc->init_cmds.done); 1895 retval = mpi3mr_admin_request_post(mrioc, &create_req, 1896 sizeof(create_req), 1); 1897 if (retval) { 1898 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n"); 1899 goto out_unlock; 1900 } 1901 wait_for_completion_timeout(&mrioc->init_cmds.done, 1902 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 1903 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 1904 ioc_err(mrioc, "create reply queue timed out\n"); 1905 mpi3mr_check_rh_fault_ioc(mrioc, 1906 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT); 1907 retval = -1; 1908 goto out_unlock; 1909 } 1910 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1911 != MPI3_IOCSTATUS_SUCCESS) { 1912 ioc_err(mrioc, 1913 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 1914 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1915 mrioc->init_cmds.ioc_loginfo); 1916 retval = -1; 1917 goto out_unlock; 1918 } 1919 op_reply_q->qid = reply_qid; 1920 if (midx < mrioc->intr_info_count) 1921 mrioc->intr_info[midx].op_reply_q = op_reply_q; 1922 1923 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ : 1924 mrioc->active_poll_qcount++; 1925 1926 out_unlock: 1927 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 1928 mutex_unlock(&mrioc->init_cmds.mutex); 1929 out: 1930 1931 return retval; 1932 } 1933 1934 /** 1935 * mpi3mr_create_op_req_q - create operational request queue 1936 * @mrioc: Adapter instance reference 1937 * @idx: operational request queue index 1938 * @reply_qid: Reply queue ID 1939 * 1940 * Create operatinal request queue by issuing MPI request 1941 * through admin queue. 1942 * 1943 * Return: 0 on success, non-zero on failure. 1944 */ 1945 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx, 1946 u16 reply_qid) 1947 { 1948 struct mpi3_create_request_queue_request create_req; 1949 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx; 1950 int retval = 0; 1951 u16 req_qid = 0; 1952 1953 req_qid = op_req_q->qid; 1954 1955 if (req_qid) { 1956 retval = -1; 1957 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n", 1958 req_qid); 1959 1960 return retval; 1961 } 1962 req_qid = idx + 1; 1963 1964 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD; 1965 op_req_q->ci = 0; 1966 op_req_q->pi = 0; 1967 op_req_q->reply_qid = reply_qid; 1968 spin_lock_init(&op_req_q->q_lock); 1969 1970 if (!op_req_q->q_segments) { 1971 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx); 1972 if (retval) { 1973 mpi3mr_free_op_req_q_segments(mrioc, idx); 1974 goto out; 1975 } 1976 } 1977 1978 memset(&create_req, 0, sizeof(create_req)); 1979 mutex_lock(&mrioc->init_cmds.mutex); 1980 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 1981 retval = -1; 1982 ioc_err(mrioc, "CreateReqQ: Init command is in use\n"); 1983 goto out_unlock; 1984 } 1985 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 1986 mrioc->init_cmds.is_waiting = 1; 1987 mrioc->init_cmds.callback = NULL; 1988 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 1989 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE; 1990 create_req.queue_id = cpu_to_le16(req_qid); 1991 if (mrioc->enable_segqueue) { 1992 create_req.flags = 1993 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED; 1994 create_req.base_address = cpu_to_le64( 1995 op_req_q->q_segment_list_dma); 1996 } else 1997 create_req.base_address = cpu_to_le64( 1998 op_req_q->q_segments[0].segment_dma); 1999 create_req.reply_queue_id = cpu_to_le16(reply_qid); 2000 create_req.size = cpu_to_le16(op_req_q->num_requests); 2001 2002 init_completion(&mrioc->init_cmds.done); 2003 retval = mpi3mr_admin_request_post(mrioc, &create_req, 2004 sizeof(create_req), 1); 2005 if (retval) { 2006 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n"); 2007 goto out_unlock; 2008 } 2009 wait_for_completion_timeout(&mrioc->init_cmds.done, 2010 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2011 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2012 ioc_err(mrioc, "create request queue timed out\n"); 2013 mpi3mr_check_rh_fault_ioc(mrioc, 2014 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT); 2015 retval = -1; 2016 goto out_unlock; 2017 } 2018 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2019 != MPI3_IOCSTATUS_SUCCESS) { 2020 ioc_err(mrioc, 2021 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2022 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2023 mrioc->init_cmds.ioc_loginfo); 2024 retval = -1; 2025 goto out_unlock; 2026 } 2027 op_req_q->qid = req_qid; 2028 2029 out_unlock: 2030 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2031 mutex_unlock(&mrioc->init_cmds.mutex); 2032 out: 2033 2034 return retval; 2035 } 2036 2037 /** 2038 * mpi3mr_create_op_queues - create operational queue pairs 2039 * @mrioc: Adapter instance reference 2040 * 2041 * Allocate memory for operational queue meta data and call 2042 * create request and reply queue functions. 2043 * 2044 * Return: 0 on success, non-zero on failures. 2045 */ 2046 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc) 2047 { 2048 int retval = 0; 2049 u16 num_queues = 0, i = 0, msix_count_op_q = 1; 2050 2051 num_queues = min_t(int, mrioc->facts.max_op_reply_q, 2052 mrioc->facts.max_op_req_q); 2053 2054 msix_count_op_q = 2055 mrioc->intr_info_count - mrioc->op_reply_q_offset; 2056 if (!mrioc->num_queues) 2057 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q); 2058 /* 2059 * During reset set the num_queues to the number of queues 2060 * that was set before the reset. 2061 */ 2062 num_queues = mrioc->num_op_reply_q ? 2063 mrioc->num_op_reply_q : mrioc->num_queues; 2064 ioc_info(mrioc, "trying to create %d operational queue pairs\n", 2065 num_queues); 2066 2067 if (!mrioc->req_qinfo) { 2068 mrioc->req_qinfo = kcalloc(num_queues, 2069 sizeof(struct op_req_qinfo), GFP_KERNEL); 2070 if (!mrioc->req_qinfo) { 2071 retval = -1; 2072 goto out_failed; 2073 } 2074 2075 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) * 2076 num_queues, GFP_KERNEL); 2077 if (!mrioc->op_reply_qinfo) { 2078 retval = -1; 2079 goto out_failed; 2080 } 2081 } 2082 2083 if (mrioc->enable_segqueue) 2084 ioc_info(mrioc, 2085 "allocating operational queues through segmented queues\n"); 2086 2087 for (i = 0; i < num_queues; i++) { 2088 if (mpi3mr_create_op_reply_q(mrioc, i)) { 2089 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i); 2090 break; 2091 } 2092 if (mpi3mr_create_op_req_q(mrioc, i, 2093 mrioc->op_reply_qinfo[i].qid)) { 2094 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i); 2095 mpi3mr_delete_op_reply_q(mrioc, i); 2096 break; 2097 } 2098 } 2099 2100 if (i == 0) { 2101 /* Not even one queue is created successfully*/ 2102 retval = -1; 2103 goto out_failed; 2104 } 2105 mrioc->num_op_reply_q = mrioc->num_op_req_q = i; 2106 ioc_info(mrioc, 2107 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n", 2108 mrioc->num_op_reply_q, mrioc->default_qcount, 2109 mrioc->active_poll_qcount); 2110 2111 return retval; 2112 out_failed: 2113 kfree(mrioc->req_qinfo); 2114 mrioc->req_qinfo = NULL; 2115 2116 kfree(mrioc->op_reply_qinfo); 2117 mrioc->op_reply_qinfo = NULL; 2118 2119 return retval; 2120 } 2121 2122 /** 2123 * mpi3mr_op_request_post - Post request to operational queue 2124 * @mrioc: Adapter reference 2125 * @op_req_q: Operational request queue info 2126 * @req: MPI3 request 2127 * 2128 * Post the MPI3 request into operational request queue and 2129 * inform the controller, if the queue is full return 2130 * appropriate error. 2131 * 2132 * Return: 0 on success, non-zero on failure. 2133 */ 2134 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc, 2135 struct op_req_qinfo *op_req_q, u8 *req) 2136 { 2137 u16 pi = 0, max_entries, reply_qidx = 0, midx; 2138 int retval = 0; 2139 unsigned long flags; 2140 u8 *req_entry; 2141 void *segment_base_addr; 2142 u16 req_sz = mrioc->facts.op_req_sz; 2143 struct segments *segments = op_req_q->q_segments; 2144 2145 reply_qidx = op_req_q->reply_qid - 1; 2146 2147 if (mrioc->unrecoverable) 2148 return -EFAULT; 2149 2150 spin_lock_irqsave(&op_req_q->q_lock, flags); 2151 pi = op_req_q->pi; 2152 max_entries = op_req_q->num_requests; 2153 2154 if (mpi3mr_check_req_qfull(op_req_q)) { 2155 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX( 2156 reply_qidx, mrioc->op_reply_q_offset); 2157 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q); 2158 2159 if (mpi3mr_check_req_qfull(op_req_q)) { 2160 retval = -EAGAIN; 2161 goto out; 2162 } 2163 } 2164 2165 if (mrioc->reset_in_progress) { 2166 ioc_err(mrioc, "OpReqQ submit reset in progress\n"); 2167 retval = -EAGAIN; 2168 goto out; 2169 } 2170 2171 segment_base_addr = segments[pi / op_req_q->segment_qd].segment; 2172 req_entry = (u8 *)segment_base_addr + 2173 ((pi % op_req_q->segment_qd) * req_sz); 2174 2175 memset(req_entry, 0, req_sz); 2176 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ); 2177 2178 if (++pi == max_entries) 2179 pi = 0; 2180 op_req_q->pi = pi; 2181 2182 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios) 2183 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT) 2184 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true; 2185 2186 writel(op_req_q->pi, 2187 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index); 2188 2189 out: 2190 spin_unlock_irqrestore(&op_req_q->q_lock, flags); 2191 return retval; 2192 } 2193 2194 /** 2195 * mpi3mr_check_rh_fault_ioc - check reset history and fault 2196 * controller 2197 * @mrioc: Adapter instance reference 2198 * @reason_code: reason code for the fault. 2199 * 2200 * This routine will save snapdump and fault the controller with 2201 * the given reason code if it is not already in the fault or 2202 * not asynchronosuly reset. This will be used to handle 2203 * initilaization time faults/resets/timeout as in those cases 2204 * immediate soft reset invocation is not required. 2205 * 2206 * Return: None. 2207 */ 2208 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code) 2209 { 2210 u32 ioc_status, host_diagnostic, timeout; 2211 2212 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2213 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 2214 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 2215 mpi3mr_print_fault_info(mrioc); 2216 return; 2217 } 2218 mpi3mr_set_diagsave(mrioc); 2219 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 2220 reason_code); 2221 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 2222 do { 2223 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2224 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 2225 break; 2226 msleep(100); 2227 } while (--timeout); 2228 } 2229 2230 /** 2231 * mpi3mr_sync_timestamp - Issue time stamp sync request 2232 * @mrioc: Adapter reference 2233 * 2234 * Issue IO unit control MPI request to synchornize firmware 2235 * timestamp with host time. 2236 * 2237 * Return: 0 on success, non-zero on failure. 2238 */ 2239 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc) 2240 { 2241 ktime_t current_time; 2242 struct mpi3_iounit_control_request iou_ctrl; 2243 int retval = 0; 2244 2245 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2246 mutex_lock(&mrioc->init_cmds.mutex); 2247 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2248 retval = -1; 2249 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n"); 2250 mutex_unlock(&mrioc->init_cmds.mutex); 2251 goto out; 2252 } 2253 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2254 mrioc->init_cmds.is_waiting = 1; 2255 mrioc->init_cmds.callback = NULL; 2256 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2257 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2258 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP; 2259 current_time = ktime_get_real(); 2260 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time)); 2261 2262 init_completion(&mrioc->init_cmds.done); 2263 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, 2264 sizeof(iou_ctrl), 0); 2265 if (retval) { 2266 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n"); 2267 goto out_unlock; 2268 } 2269 2270 wait_for_completion_timeout(&mrioc->init_cmds.done, 2271 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2272 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2273 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n"); 2274 mrioc->init_cmds.is_waiting = 0; 2275 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 2276 mpi3mr_soft_reset_handler(mrioc, 2277 MPI3MR_RESET_FROM_TSU_TIMEOUT, 1); 2278 retval = -1; 2279 goto out_unlock; 2280 } 2281 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2282 != MPI3_IOCSTATUS_SUCCESS) { 2283 ioc_err(mrioc, 2284 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2285 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2286 mrioc->init_cmds.ioc_loginfo); 2287 retval = -1; 2288 goto out_unlock; 2289 } 2290 2291 out_unlock: 2292 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2293 mutex_unlock(&mrioc->init_cmds.mutex); 2294 2295 out: 2296 return retval; 2297 } 2298 2299 /** 2300 * mpi3mr_print_pkg_ver - display controller fw package version 2301 * @mrioc: Adapter reference 2302 * 2303 * Retrieve firmware package version from the component image 2304 * header of the controller flash and display it. 2305 * 2306 * Return: 0 on success and non-zero on failure. 2307 */ 2308 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc) 2309 { 2310 struct mpi3_ci_upload_request ci_upload; 2311 int retval = -1; 2312 void *data = NULL; 2313 dma_addr_t data_dma; 2314 struct mpi3_ci_manifest_mpi *manifest; 2315 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi); 2316 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2317 2318 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2319 GFP_KERNEL); 2320 if (!data) 2321 return -ENOMEM; 2322 2323 memset(&ci_upload, 0, sizeof(ci_upload)); 2324 mutex_lock(&mrioc->init_cmds.mutex); 2325 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2326 ioc_err(mrioc, "sending get package version failed due to command in use\n"); 2327 mutex_unlock(&mrioc->init_cmds.mutex); 2328 goto out; 2329 } 2330 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2331 mrioc->init_cmds.is_waiting = 1; 2332 mrioc->init_cmds.callback = NULL; 2333 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2334 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD; 2335 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY; 2336 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST); 2337 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE); 2338 ci_upload.segment_size = cpu_to_le32(data_len); 2339 2340 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len, 2341 data_dma); 2342 init_completion(&mrioc->init_cmds.done); 2343 retval = mpi3mr_admin_request_post(mrioc, &ci_upload, 2344 sizeof(ci_upload), 1); 2345 if (retval) { 2346 ioc_err(mrioc, "posting get package version failed\n"); 2347 goto out_unlock; 2348 } 2349 wait_for_completion_timeout(&mrioc->init_cmds.done, 2350 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2351 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2352 ioc_err(mrioc, "get package version timed out\n"); 2353 mpi3mr_check_rh_fault_ioc(mrioc, 2354 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT); 2355 retval = -1; 2356 goto out_unlock; 2357 } 2358 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2359 == MPI3_IOCSTATUS_SUCCESS) { 2360 manifest = (struct mpi3_ci_manifest_mpi *) data; 2361 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) { 2362 ioc_info(mrioc, 2363 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n", 2364 manifest->package_version.gen_major, 2365 manifest->package_version.gen_minor, 2366 manifest->package_version.phase_major, 2367 manifest->package_version.phase_minor, 2368 manifest->package_version.customer_id, 2369 manifest->package_version.build_num); 2370 } 2371 } 2372 retval = 0; 2373 out_unlock: 2374 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2375 mutex_unlock(&mrioc->init_cmds.mutex); 2376 2377 out: 2378 if (data) 2379 dma_free_coherent(&mrioc->pdev->dev, data_len, data, 2380 data_dma); 2381 return retval; 2382 } 2383 2384 /** 2385 * mpi3mr_watchdog_work - watchdog thread to monitor faults 2386 * @work: work struct 2387 * 2388 * Watch dog work periodically executed (1 second interval) to 2389 * monitor firmware fault and to issue periodic timer sync to 2390 * the firmware. 2391 * 2392 * Return: Nothing. 2393 */ 2394 static void mpi3mr_watchdog_work(struct work_struct *work) 2395 { 2396 struct mpi3mr_ioc *mrioc = 2397 container_of(work, struct mpi3mr_ioc, watchdog_work.work); 2398 unsigned long flags; 2399 enum mpi3mr_iocstate ioc_state; 2400 u32 fault, host_diagnostic, ioc_status; 2401 u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH; 2402 2403 if (mrioc->reset_in_progress || mrioc->unrecoverable) 2404 return; 2405 2406 if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) { 2407 mrioc->ts_update_counter = 0; 2408 mpi3mr_sync_timestamp(mrioc); 2409 } 2410 2411 if ((mrioc->prepare_for_reset) && 2412 ((mrioc->prepare_for_reset_timeout_counter++) >= 2413 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) { 2414 mpi3mr_soft_reset_handler(mrioc, 2415 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1); 2416 return; 2417 } 2418 2419 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 2420 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) { 2421 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0); 2422 return; 2423 } 2424 2425 /*Check for fault state every one second and issue Soft reset*/ 2426 ioc_state = mpi3mr_get_iocstate(mrioc); 2427 if (ioc_state != MRIOC_STATE_FAULT) 2428 goto schedule_work; 2429 2430 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK; 2431 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic); 2432 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) { 2433 if (!mrioc->diagsave_timeout) { 2434 mpi3mr_print_fault_info(mrioc); 2435 ioc_warn(mrioc, "diag save in progress\n"); 2436 } 2437 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT) 2438 goto schedule_work; 2439 } 2440 2441 mpi3mr_print_fault_info(mrioc); 2442 mrioc->diagsave_timeout = 0; 2443 2444 switch (fault) { 2445 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED: 2446 ioc_info(mrioc, 2447 "controller requires system power cycle, marking controller as unrecoverable\n"); 2448 mrioc->unrecoverable = 1; 2449 return; 2450 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS: 2451 return; 2452 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET: 2453 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT; 2454 break; 2455 default: 2456 break; 2457 } 2458 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0); 2459 return; 2460 2461 schedule_work: 2462 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2463 if (mrioc->watchdog_work_q) 2464 queue_delayed_work(mrioc->watchdog_work_q, 2465 &mrioc->watchdog_work, 2466 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2467 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2468 return; 2469 } 2470 2471 /** 2472 * mpi3mr_start_watchdog - Start watchdog 2473 * @mrioc: Adapter instance reference 2474 * 2475 * Create and start the watchdog thread to monitor controller 2476 * faults. 2477 * 2478 * Return: Nothing. 2479 */ 2480 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc) 2481 { 2482 if (mrioc->watchdog_work_q) 2483 return; 2484 2485 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work); 2486 snprintf(mrioc->watchdog_work_q_name, 2487 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name, 2488 mrioc->id); 2489 mrioc->watchdog_work_q = 2490 create_singlethread_workqueue(mrioc->watchdog_work_q_name); 2491 if (!mrioc->watchdog_work_q) { 2492 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__); 2493 return; 2494 } 2495 2496 if (mrioc->watchdog_work_q) 2497 queue_delayed_work(mrioc->watchdog_work_q, 2498 &mrioc->watchdog_work, 2499 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 2500 } 2501 2502 /** 2503 * mpi3mr_stop_watchdog - Stop watchdog 2504 * @mrioc: Adapter instance reference 2505 * 2506 * Stop the watchdog thread created to monitor controller 2507 * faults. 2508 * 2509 * Return: Nothing. 2510 */ 2511 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc) 2512 { 2513 unsigned long flags; 2514 struct workqueue_struct *wq; 2515 2516 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 2517 wq = mrioc->watchdog_work_q; 2518 mrioc->watchdog_work_q = NULL; 2519 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 2520 if (wq) { 2521 if (!cancel_delayed_work_sync(&mrioc->watchdog_work)) 2522 flush_workqueue(wq); 2523 destroy_workqueue(wq); 2524 } 2525 } 2526 2527 /** 2528 * mpi3mr_setup_admin_qpair - Setup admin queue pair 2529 * @mrioc: Adapter instance reference 2530 * 2531 * Allocate memory for admin queue pair if required and register 2532 * the admin queue with the controller. 2533 * 2534 * Return: 0 on success, non-zero on failures. 2535 */ 2536 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc) 2537 { 2538 int retval = 0; 2539 u32 num_admin_entries = 0; 2540 2541 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE; 2542 mrioc->num_admin_req = mrioc->admin_req_q_sz / 2543 MPI3MR_ADMIN_REQ_FRAME_SZ; 2544 mrioc->admin_req_ci = mrioc->admin_req_pi = 0; 2545 mrioc->admin_req_base = NULL; 2546 2547 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE; 2548 mrioc->num_admin_replies = mrioc->admin_reply_q_sz / 2549 MPI3MR_ADMIN_REPLY_FRAME_SZ; 2550 mrioc->admin_reply_ci = 0; 2551 mrioc->admin_reply_ephase = 1; 2552 mrioc->admin_reply_base = NULL; 2553 2554 if (!mrioc->admin_req_base) { 2555 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev, 2556 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL); 2557 2558 if (!mrioc->admin_req_base) { 2559 retval = -1; 2560 goto out_failed; 2561 } 2562 2563 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev, 2564 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma, 2565 GFP_KERNEL); 2566 2567 if (!mrioc->admin_reply_base) { 2568 retval = -1; 2569 goto out_failed; 2570 } 2571 } 2572 2573 num_admin_entries = (mrioc->num_admin_replies << 16) | 2574 (mrioc->num_admin_req); 2575 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries); 2576 mpi3mr_writeq(mrioc->admin_req_dma, 2577 &mrioc->sysif_regs->admin_request_queue_address); 2578 mpi3mr_writeq(mrioc->admin_reply_dma, 2579 &mrioc->sysif_regs->admin_reply_queue_address); 2580 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi); 2581 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci); 2582 return retval; 2583 2584 out_failed: 2585 2586 if (mrioc->admin_reply_base) { 2587 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 2588 mrioc->admin_reply_base, mrioc->admin_reply_dma); 2589 mrioc->admin_reply_base = NULL; 2590 } 2591 if (mrioc->admin_req_base) { 2592 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 2593 mrioc->admin_req_base, mrioc->admin_req_dma); 2594 mrioc->admin_req_base = NULL; 2595 } 2596 return retval; 2597 } 2598 2599 /** 2600 * mpi3mr_issue_iocfacts - Send IOC Facts 2601 * @mrioc: Adapter instance reference 2602 * @facts_data: Cached IOC facts data 2603 * 2604 * Issue IOC Facts MPI request through admin queue and wait for 2605 * the completion of it or time out. 2606 * 2607 * Return: 0 on success, non-zero on failures. 2608 */ 2609 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc, 2610 struct mpi3_ioc_facts_data *facts_data) 2611 { 2612 struct mpi3_ioc_facts_request iocfacts_req; 2613 void *data = NULL; 2614 dma_addr_t data_dma; 2615 u32 data_len = sizeof(*facts_data); 2616 int retval = 0; 2617 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 2618 2619 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 2620 GFP_KERNEL); 2621 2622 if (!data) { 2623 retval = -1; 2624 goto out; 2625 } 2626 2627 memset(&iocfacts_req, 0, sizeof(iocfacts_req)); 2628 mutex_lock(&mrioc->init_cmds.mutex); 2629 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 2630 retval = -1; 2631 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n"); 2632 mutex_unlock(&mrioc->init_cmds.mutex); 2633 goto out; 2634 } 2635 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 2636 mrioc->init_cmds.is_waiting = 1; 2637 mrioc->init_cmds.callback = NULL; 2638 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 2639 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS; 2640 2641 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len, 2642 data_dma); 2643 2644 init_completion(&mrioc->init_cmds.done); 2645 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req, 2646 sizeof(iocfacts_req), 1); 2647 if (retval) { 2648 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n"); 2649 goto out_unlock; 2650 } 2651 wait_for_completion_timeout(&mrioc->init_cmds.done, 2652 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 2653 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 2654 ioc_err(mrioc, "ioc_facts timed out\n"); 2655 mpi3mr_check_rh_fault_ioc(mrioc, 2656 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT); 2657 retval = -1; 2658 goto out_unlock; 2659 } 2660 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 2661 != MPI3_IOCSTATUS_SUCCESS) { 2662 ioc_err(mrioc, 2663 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2664 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2665 mrioc->init_cmds.ioc_loginfo); 2666 retval = -1; 2667 goto out_unlock; 2668 } 2669 memcpy(facts_data, (u8 *)data, data_len); 2670 mpi3mr_process_factsdata(mrioc, facts_data); 2671 out_unlock: 2672 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 2673 mutex_unlock(&mrioc->init_cmds.mutex); 2674 2675 out: 2676 if (data) 2677 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma); 2678 2679 return retval; 2680 } 2681 2682 /** 2683 * mpi3mr_check_reset_dma_mask - Process IOC facts data 2684 * @mrioc: Adapter instance reference 2685 * 2686 * Check whether the new DMA mask requested through IOCFacts by 2687 * firmware needs to be set, if so set it . 2688 * 2689 * Return: 0 on success, non-zero on failure. 2690 */ 2691 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc) 2692 { 2693 struct pci_dev *pdev = mrioc->pdev; 2694 int r; 2695 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask); 2696 2697 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask)) 2698 return 0; 2699 2700 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n", 2701 mrioc->dma_mask, facts_dma_mask); 2702 2703 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask); 2704 if (r) { 2705 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n", 2706 facts_dma_mask, r); 2707 return r; 2708 } 2709 mrioc->dma_mask = facts_dma_mask; 2710 return r; 2711 } 2712 2713 /** 2714 * mpi3mr_process_factsdata - Process IOC facts data 2715 * @mrioc: Adapter instance reference 2716 * @facts_data: Cached IOC facts data 2717 * 2718 * Convert IOC facts data into cpu endianness and cache it in 2719 * the driver . 2720 * 2721 * Return: Nothing. 2722 */ 2723 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, 2724 struct mpi3_ioc_facts_data *facts_data) 2725 { 2726 u32 ioc_config, req_sz, facts_flags; 2727 2728 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) != 2729 (sizeof(*facts_data) / 4)) { 2730 ioc_warn(mrioc, 2731 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n", 2732 sizeof(*facts_data), 2733 le16_to_cpu(facts_data->ioc_facts_data_length) * 4); 2734 } 2735 2736 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 2737 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >> 2738 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT); 2739 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) { 2740 ioc_err(mrioc, 2741 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n", 2742 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size)); 2743 } 2744 2745 memset(&mrioc->facts, 0, sizeof(mrioc->facts)); 2746 2747 facts_flags = le32_to_cpu(facts_data->flags); 2748 mrioc->facts.op_req_sz = req_sz; 2749 mrioc->op_reply_desc_sz = 1 << ((ioc_config & 2750 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >> 2751 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT); 2752 2753 mrioc->facts.ioc_num = facts_data->ioc_number; 2754 mrioc->facts.who_init = facts_data->who_init; 2755 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors); 2756 mrioc->facts.personality = (facts_flags & 2757 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK); 2758 mrioc->facts.dma_mask = (facts_flags & 2759 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> 2760 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; 2761 mrioc->facts.protocol_flags = facts_data->protocol_flags; 2762 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); 2763 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); 2764 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id); 2765 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4; 2766 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions); 2767 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id); 2768 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds); 2769 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds); 2770 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds); 2771 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds); 2772 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme); 2773 mrioc->facts.max_pcie_switches = 2774 le16_to_cpu(facts_data->max_pcie_switches); 2775 mrioc->facts.max_sasexpanders = 2776 le16_to_cpu(facts_data->max_sas_expanders); 2777 mrioc->facts.max_sasinitiators = 2778 le16_to_cpu(facts_data->max_sas_initiators); 2779 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures); 2780 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle); 2781 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle); 2782 mrioc->facts.max_op_req_q = 2783 le16_to_cpu(facts_data->max_operational_request_queues); 2784 mrioc->facts.max_op_reply_q = 2785 le16_to_cpu(facts_data->max_operational_reply_queues); 2786 mrioc->facts.ioc_capabilities = 2787 le32_to_cpu(facts_data->ioc_capabilities); 2788 mrioc->facts.fw_ver.build_num = 2789 le16_to_cpu(facts_data->fw_version.build_num); 2790 mrioc->facts.fw_ver.cust_id = 2791 le16_to_cpu(facts_data->fw_version.customer_id); 2792 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor; 2793 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major; 2794 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor; 2795 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major; 2796 mrioc->msix_count = min_t(int, mrioc->msix_count, 2797 mrioc->facts.max_msix_vectors); 2798 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask; 2799 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value; 2800 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift; 2801 mrioc->facts.shutdown_timeout = 2802 le16_to_cpu(facts_data->shutdown_timeout); 2803 2804 mrioc->facts.max_dev_per_tg = 2805 facts_data->max_devices_per_throttle_group; 2806 mrioc->facts.io_throttle_data_length = 2807 le16_to_cpu(facts_data->io_throttle_data_length); 2808 mrioc->facts.max_io_throttle_group = 2809 le16_to_cpu(facts_data->max_io_throttle_group); 2810 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low); 2811 mrioc->facts.io_throttle_high = 2812 le16_to_cpu(facts_data->io_throttle_high); 2813 2814 /* Store in 512b block count */ 2815 if (mrioc->facts.io_throttle_data_length) 2816 mrioc->io_throttle_data_length = 2817 (mrioc->facts.io_throttle_data_length * 2 * 4); 2818 else 2819 /* set the length to 1MB + 1K to disable throttle */ 2820 mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2; 2821 2822 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024); 2823 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024); 2824 2825 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),", 2826 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q, 2827 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle); 2828 ioc_info(mrioc, 2829 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n", 2830 mrioc->facts.max_reqs, mrioc->facts.min_devhandle, 2831 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids); 2832 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ", 2833 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value, 2834 mrioc->facts.sge_mod_shift); 2835 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n", 2836 mrioc->facts.dma_mask, (facts_flags & 2837 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK)); 2838 ioc_info(mrioc, 2839 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n", 2840 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group); 2841 ioc_info(mrioc, 2842 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n", 2843 mrioc->facts.io_throttle_data_length * 4, 2844 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low); 2845 } 2846 2847 /** 2848 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init 2849 * @mrioc: Adapter instance reference 2850 * 2851 * Allocate and initialize the reply free buffers, sense 2852 * buffers, reply free queue and sense buffer queue. 2853 * 2854 * Return: 0 on success, non-zero on failures. 2855 */ 2856 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) 2857 { 2858 int retval = 0; 2859 u32 sz, i; 2860 2861 if (mrioc->init_cmds.reply) 2862 return retval; 2863 2864 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2865 if (!mrioc->init_cmds.reply) 2866 goto out_failed; 2867 2868 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2869 if (!mrioc->bsg_cmds.reply) 2870 goto out_failed; 2871 2872 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2873 if (!mrioc->transport_cmds.reply) 2874 goto out_failed; 2875 2876 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 2877 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz, 2878 GFP_KERNEL); 2879 if (!mrioc->dev_rmhs_cmds[i].reply) 2880 goto out_failed; 2881 } 2882 2883 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 2884 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz, 2885 GFP_KERNEL); 2886 if (!mrioc->evtack_cmds[i].reply) 2887 goto out_failed; 2888 } 2889 2890 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2891 if (!mrioc->host_tm_cmds.reply) 2892 goto out_failed; 2893 2894 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2895 if (!mrioc->pel_cmds.reply) 2896 goto out_failed; 2897 2898 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL); 2899 if (!mrioc->pel_abort_cmd.reply) 2900 goto out_failed; 2901 2902 mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; 2903 if (mrioc->facts.max_devhandle % 8) 2904 mrioc->dev_handle_bitmap_sz++; 2905 mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, 2906 GFP_KERNEL); 2907 if (!mrioc->removepend_bitmap) 2908 goto out_failed; 2909 2910 mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; 2911 if (MPI3MR_NUM_DEVRMCMD % 8) 2912 mrioc->devrem_bitmap_sz++; 2913 mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, 2914 GFP_KERNEL); 2915 if (!mrioc->devrem_bitmap) 2916 goto out_failed; 2917 2918 mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8; 2919 if (MPI3MR_NUM_EVTACKCMD % 8) 2920 mrioc->evtack_cmds_bitmap_sz++; 2921 mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz, 2922 GFP_KERNEL); 2923 if (!mrioc->evtack_cmds_bitmap) 2924 goto out_failed; 2925 2926 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES; 2927 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1; 2928 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR; 2929 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1; 2930 2931 /* reply buffer pool, 16 byte align */ 2932 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 2933 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool", 2934 &mrioc->pdev->dev, sz, 16, 0); 2935 if (!mrioc->reply_buf_pool) { 2936 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n"); 2937 goto out_failed; 2938 } 2939 2940 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL, 2941 &mrioc->reply_buf_dma); 2942 if (!mrioc->reply_buf) 2943 goto out_failed; 2944 2945 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz; 2946 2947 /* reply free queue, 8 byte align */ 2948 sz = mrioc->reply_free_qsz * 8; 2949 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool", 2950 &mrioc->pdev->dev, sz, 8, 0); 2951 if (!mrioc->reply_free_q_pool) { 2952 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n"); 2953 goto out_failed; 2954 } 2955 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool, 2956 GFP_KERNEL, &mrioc->reply_free_q_dma); 2957 if (!mrioc->reply_free_q) 2958 goto out_failed; 2959 2960 /* sense buffer pool, 4 byte align */ 2961 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 2962 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool", 2963 &mrioc->pdev->dev, sz, 4, 0); 2964 if (!mrioc->sense_buf_pool) { 2965 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n"); 2966 goto out_failed; 2967 } 2968 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL, 2969 &mrioc->sense_buf_dma); 2970 if (!mrioc->sense_buf) 2971 goto out_failed; 2972 2973 /* sense buffer queue, 8 byte align */ 2974 sz = mrioc->sense_buf_q_sz * 8; 2975 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool", 2976 &mrioc->pdev->dev, sz, 8, 0); 2977 if (!mrioc->sense_buf_q_pool) { 2978 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n"); 2979 goto out_failed; 2980 } 2981 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool, 2982 GFP_KERNEL, &mrioc->sense_buf_q_dma); 2983 if (!mrioc->sense_buf_q) 2984 goto out_failed; 2985 2986 return retval; 2987 2988 out_failed: 2989 retval = -1; 2990 return retval; 2991 } 2992 2993 /** 2994 * mpimr_initialize_reply_sbuf_queues - initialize reply sense 2995 * buffers 2996 * @mrioc: Adapter instance reference 2997 * 2998 * Helper function to initialize reply and sense buffers along 2999 * with some debug prints. 3000 * 3001 * Return: None. 3002 */ 3003 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc) 3004 { 3005 u32 sz, i; 3006 dma_addr_t phy_addr; 3007 3008 sz = mrioc->num_reply_bufs * mrioc->reply_sz; 3009 ioc_info(mrioc, 3010 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3011 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz, 3012 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma); 3013 sz = mrioc->reply_free_qsz * 8; 3014 ioc_info(mrioc, 3015 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n", 3016 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024), 3017 (unsigned long long)mrioc->reply_free_q_dma); 3018 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ; 3019 ioc_info(mrioc, 3020 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3021 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ, 3022 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma); 3023 sz = mrioc->sense_buf_q_sz * 8; 3024 ioc_info(mrioc, 3025 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n", 3026 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024), 3027 (unsigned long long)mrioc->sense_buf_q_dma); 3028 3029 /* initialize Reply buffer Queue */ 3030 for (i = 0, phy_addr = mrioc->reply_buf_dma; 3031 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz) 3032 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr); 3033 mrioc->reply_free_q[i] = cpu_to_le64(0); 3034 3035 /* initialize Sense Buffer Queue */ 3036 for (i = 0, phy_addr = mrioc->sense_buf_dma; 3037 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ) 3038 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr); 3039 mrioc->sense_buf_q[i] = cpu_to_le64(0); 3040 } 3041 3042 /** 3043 * mpi3mr_issue_iocinit - Send IOC Init 3044 * @mrioc: Adapter instance reference 3045 * 3046 * Issue IOC Init MPI request through admin queue and wait for 3047 * the completion of it or time out. 3048 * 3049 * Return: 0 on success, non-zero on failures. 3050 */ 3051 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) 3052 { 3053 struct mpi3_ioc_init_request iocinit_req; 3054 struct mpi3_driver_info_layout *drv_info; 3055 dma_addr_t data_dma; 3056 u32 data_len = sizeof(*drv_info); 3057 int retval = 0; 3058 ktime_t current_time; 3059 3060 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma, 3061 GFP_KERNEL); 3062 if (!drv_info) { 3063 retval = -1; 3064 goto out; 3065 } 3066 mpimr_initialize_reply_sbuf_queues(mrioc); 3067 3068 drv_info->information_length = cpu_to_le32(data_len); 3069 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); 3070 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); 3071 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); 3072 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); 3073 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); 3074 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, 3075 sizeof(drv_info->driver_release_date)); 3076 drv_info->driver_capabilities = 0; 3077 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, 3078 sizeof(mrioc->driver_info)); 3079 3080 memset(&iocinit_req, 0, sizeof(iocinit_req)); 3081 mutex_lock(&mrioc->init_cmds.mutex); 3082 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3083 retval = -1; 3084 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n"); 3085 mutex_unlock(&mrioc->init_cmds.mutex); 3086 goto out; 3087 } 3088 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3089 mrioc->init_cmds.is_waiting = 1; 3090 mrioc->init_cmds.callback = NULL; 3091 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3092 iocinit_req.function = MPI3_FUNCTION_IOC_INIT; 3093 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV; 3094 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT; 3095 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR; 3096 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR; 3097 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER; 3098 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz); 3099 iocinit_req.reply_free_queue_address = 3100 cpu_to_le64(mrioc->reply_free_q_dma); 3101 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ); 3102 iocinit_req.sense_buffer_free_queue_depth = 3103 cpu_to_le16(mrioc->sense_buf_q_sz); 3104 iocinit_req.sense_buffer_free_queue_address = 3105 cpu_to_le64(mrioc->sense_buf_q_dma); 3106 iocinit_req.driver_information_address = cpu_to_le64(data_dma); 3107 3108 current_time = ktime_get_real(); 3109 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time)); 3110 3111 init_completion(&mrioc->init_cmds.done); 3112 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req, 3113 sizeof(iocinit_req), 1); 3114 if (retval) { 3115 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n"); 3116 goto out_unlock; 3117 } 3118 wait_for_completion_timeout(&mrioc->init_cmds.done, 3119 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3120 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3121 mpi3mr_check_rh_fault_ioc(mrioc, 3122 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT); 3123 ioc_err(mrioc, "ioc_init timed out\n"); 3124 retval = -1; 3125 goto out_unlock; 3126 } 3127 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3128 != MPI3_IOCSTATUS_SUCCESS) { 3129 ioc_err(mrioc, 3130 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3131 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3132 mrioc->init_cmds.ioc_loginfo); 3133 retval = -1; 3134 goto out_unlock; 3135 } 3136 3137 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs; 3138 writel(mrioc->reply_free_queue_host_index, 3139 &mrioc->sysif_regs->reply_free_host_index); 3140 3141 mrioc->sbq_host_index = mrioc->num_sense_bufs; 3142 writel(mrioc->sbq_host_index, 3143 &mrioc->sysif_regs->sense_buffer_free_host_index); 3144 out_unlock: 3145 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3146 mutex_unlock(&mrioc->init_cmds.mutex); 3147 3148 out: 3149 if (drv_info) 3150 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info, 3151 data_dma); 3152 3153 return retval; 3154 } 3155 3156 /** 3157 * mpi3mr_unmask_events - Unmask events in event mask bitmap 3158 * @mrioc: Adapter instance reference 3159 * @event: MPI event ID 3160 * 3161 * Un mask the specific event by resetting the event_mask 3162 * bitmap. 3163 * 3164 * Return: 0 on success, non-zero on failures. 3165 */ 3166 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event) 3167 { 3168 u32 desired_event; 3169 u8 word; 3170 3171 if (event >= 128) 3172 return; 3173 3174 desired_event = (1 << (event % 32)); 3175 word = event / 32; 3176 3177 mrioc->event_masks[word] &= ~desired_event; 3178 } 3179 3180 /** 3181 * mpi3mr_issue_event_notification - Send event notification 3182 * @mrioc: Adapter instance reference 3183 * 3184 * Issue event notification MPI request through admin queue and 3185 * wait for the completion of it or time out. 3186 * 3187 * Return: 0 on success, non-zero on failures. 3188 */ 3189 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc) 3190 { 3191 struct mpi3_event_notification_request evtnotify_req; 3192 int retval = 0; 3193 u8 i; 3194 3195 memset(&evtnotify_req, 0, sizeof(evtnotify_req)); 3196 mutex_lock(&mrioc->init_cmds.mutex); 3197 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3198 retval = -1; 3199 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n"); 3200 mutex_unlock(&mrioc->init_cmds.mutex); 3201 goto out; 3202 } 3203 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3204 mrioc->init_cmds.is_waiting = 1; 3205 mrioc->init_cmds.callback = NULL; 3206 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3207 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION; 3208 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3209 evtnotify_req.event_masks[i] = 3210 cpu_to_le32(mrioc->event_masks[i]); 3211 init_completion(&mrioc->init_cmds.done); 3212 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req, 3213 sizeof(evtnotify_req), 1); 3214 if (retval) { 3215 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n"); 3216 goto out_unlock; 3217 } 3218 wait_for_completion_timeout(&mrioc->init_cmds.done, 3219 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3220 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3221 ioc_err(mrioc, "event notification timed out\n"); 3222 mpi3mr_check_rh_fault_ioc(mrioc, 3223 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT); 3224 retval = -1; 3225 goto out_unlock; 3226 } 3227 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3228 != MPI3_IOCSTATUS_SUCCESS) { 3229 ioc_err(mrioc, 3230 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3231 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3232 mrioc->init_cmds.ioc_loginfo); 3233 retval = -1; 3234 goto out_unlock; 3235 } 3236 3237 out_unlock: 3238 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3239 mutex_unlock(&mrioc->init_cmds.mutex); 3240 out: 3241 return retval; 3242 } 3243 3244 /** 3245 * mpi3mr_process_event_ack - Process event acknowledgment 3246 * @mrioc: Adapter instance reference 3247 * @event: MPI3 event ID 3248 * @event_ctx: event context 3249 * 3250 * Send event acknowledgment through admin queue and wait for 3251 * it to complete. 3252 * 3253 * Return: 0 on success, non-zero on failures. 3254 */ 3255 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 3256 u32 event_ctx) 3257 { 3258 struct mpi3_event_ack_request evtack_req; 3259 int retval = 0; 3260 3261 memset(&evtack_req, 0, sizeof(evtack_req)); 3262 mutex_lock(&mrioc->init_cmds.mutex); 3263 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3264 retval = -1; 3265 ioc_err(mrioc, "Send EvtAck: Init command is in use\n"); 3266 mutex_unlock(&mrioc->init_cmds.mutex); 3267 goto out; 3268 } 3269 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3270 mrioc->init_cmds.is_waiting = 1; 3271 mrioc->init_cmds.callback = NULL; 3272 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3273 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 3274 evtack_req.event = event; 3275 evtack_req.event_context = cpu_to_le32(event_ctx); 3276 3277 init_completion(&mrioc->init_cmds.done); 3278 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 3279 sizeof(evtack_req), 1); 3280 if (retval) { 3281 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n"); 3282 goto out_unlock; 3283 } 3284 wait_for_completion_timeout(&mrioc->init_cmds.done, 3285 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 3286 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3287 ioc_err(mrioc, "Issue EvtNotify: command timed out\n"); 3288 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET)) 3289 mpi3mr_soft_reset_handler(mrioc, 3290 MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1); 3291 retval = -1; 3292 goto out_unlock; 3293 } 3294 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 3295 != MPI3_IOCSTATUS_SUCCESS) { 3296 ioc_err(mrioc, 3297 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 3298 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 3299 mrioc->init_cmds.ioc_loginfo); 3300 retval = -1; 3301 goto out_unlock; 3302 } 3303 3304 out_unlock: 3305 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3306 mutex_unlock(&mrioc->init_cmds.mutex); 3307 out: 3308 return retval; 3309 } 3310 3311 /** 3312 * mpi3mr_alloc_chain_bufs - Allocate chain buffers 3313 * @mrioc: Adapter instance reference 3314 * 3315 * Allocate chain buffers and set a bitmap to indicate free 3316 * chain buffers. Chain buffers are used to pass the SGE 3317 * information along with MPI3 SCSI IO requests for host I/O. 3318 * 3319 * Return: 0 on success, non-zero on failure 3320 */ 3321 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) 3322 { 3323 int retval = 0; 3324 u32 sz, i; 3325 u16 num_chains; 3326 3327 if (mrioc->chain_sgl_list) 3328 return retval; 3329 3330 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR; 3331 3332 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION 3333 | SHOST_DIX_TYPE1_PROTECTION 3334 | SHOST_DIX_TYPE2_PROTECTION 3335 | SHOST_DIX_TYPE3_PROTECTION)) 3336 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR); 3337 3338 mrioc->chain_buf_count = num_chains; 3339 sz = sizeof(struct chain_element) * num_chains; 3340 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL); 3341 if (!mrioc->chain_sgl_list) 3342 goto out_failed; 3343 3344 sz = MPI3MR_PAGE_SIZE_4K; 3345 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool", 3346 &mrioc->pdev->dev, sz, 16, 0); 3347 if (!mrioc->chain_buf_pool) { 3348 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n"); 3349 goto out_failed; 3350 } 3351 3352 for (i = 0; i < num_chains; i++) { 3353 mrioc->chain_sgl_list[i].addr = 3354 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL, 3355 &mrioc->chain_sgl_list[i].dma_addr); 3356 3357 if (!mrioc->chain_sgl_list[i].addr) 3358 goto out_failed; 3359 } 3360 mrioc->chain_bitmap_sz = num_chains / 8; 3361 if (num_chains % 8) 3362 mrioc->chain_bitmap_sz++; 3363 mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); 3364 if (!mrioc->chain_bitmap) 3365 goto out_failed; 3366 return retval; 3367 out_failed: 3368 retval = -1; 3369 return retval; 3370 } 3371 3372 /** 3373 * mpi3mr_port_enable_complete - Mark port enable complete 3374 * @mrioc: Adapter instance reference 3375 * @drv_cmd: Internal command tracker 3376 * 3377 * Call back for asynchronous port enable request sets the 3378 * driver command to indicate port enable request is complete. 3379 * 3380 * Return: Nothing 3381 */ 3382 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc, 3383 struct mpi3mr_drv_cmd *drv_cmd) 3384 { 3385 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3386 drv_cmd->callback = NULL; 3387 mrioc->scan_failed = drv_cmd->ioc_status; 3388 mrioc->scan_started = 0; 3389 } 3390 3391 /** 3392 * mpi3mr_issue_port_enable - Issue Port Enable 3393 * @mrioc: Adapter instance reference 3394 * @async: Flag to wait for completion or not 3395 * 3396 * Issue Port Enable MPI request through admin queue and if the 3397 * async flag is not set wait for the completion of the port 3398 * enable or time out. 3399 * 3400 * Return: 0 on success, non-zero on failures. 3401 */ 3402 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async) 3403 { 3404 struct mpi3_port_enable_request pe_req; 3405 int retval = 0; 3406 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3407 3408 memset(&pe_req, 0, sizeof(pe_req)); 3409 mutex_lock(&mrioc->init_cmds.mutex); 3410 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) { 3411 retval = -1; 3412 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n"); 3413 mutex_unlock(&mrioc->init_cmds.mutex); 3414 goto out; 3415 } 3416 mrioc->init_cmds.state = MPI3MR_CMD_PENDING; 3417 if (async) { 3418 mrioc->init_cmds.is_waiting = 0; 3419 mrioc->init_cmds.callback = mpi3mr_port_enable_complete; 3420 } else { 3421 mrioc->init_cmds.is_waiting = 1; 3422 mrioc->init_cmds.callback = NULL; 3423 init_completion(&mrioc->init_cmds.done); 3424 } 3425 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS); 3426 pe_req.function = MPI3_FUNCTION_PORT_ENABLE; 3427 3428 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1); 3429 if (retval) { 3430 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n"); 3431 goto out_unlock; 3432 } 3433 if (async) { 3434 mutex_unlock(&mrioc->init_cmds.mutex); 3435 goto out; 3436 } 3437 3438 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ)); 3439 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) { 3440 ioc_err(mrioc, "port enable timed out\n"); 3441 retval = -1; 3442 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT); 3443 goto out_unlock; 3444 } 3445 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds); 3446 3447 out_unlock: 3448 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3449 mutex_unlock(&mrioc->init_cmds.mutex); 3450 out: 3451 return retval; 3452 } 3453 3454 /* Protocol type to name mapper structure */ 3455 static const struct { 3456 u8 protocol; 3457 char *name; 3458 } mpi3mr_protocols[] = { 3459 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" }, 3460 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" }, 3461 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" }, 3462 }; 3463 3464 /* Capability to name mapper structure*/ 3465 static const struct { 3466 u32 capability; 3467 char *name; 3468 } mpi3mr_capabilities[] = { 3469 { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" }, 3470 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" }, 3471 }; 3472 3473 /** 3474 * mpi3mr_print_ioc_info - Display controller information 3475 * @mrioc: Adapter instance reference 3476 * 3477 * Display controller personalit, capability, supported 3478 * protocols etc. 3479 * 3480 * Return: Nothing 3481 */ 3482 static void 3483 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3484 { 3485 int i = 0, bytes_written = 0; 3486 char personality[16]; 3487 char protocol[50] = {0}; 3488 char capabilities[100] = {0}; 3489 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3490 3491 switch (mrioc->facts.personality) { 3492 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA: 3493 strncpy(personality, "Enhanced HBA", sizeof(personality)); 3494 break; 3495 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR: 3496 strncpy(personality, "RAID", sizeof(personality)); 3497 break; 3498 default: 3499 strncpy(personality, "Unknown", sizeof(personality)); 3500 break; 3501 } 3502 3503 ioc_info(mrioc, "Running in %s Personality", personality); 3504 3505 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n", 3506 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 3507 fwver->ph_minor, fwver->cust_id, fwver->build_num); 3508 3509 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3510 if (mrioc->facts.protocol_flags & 3511 mpi3mr_protocols[i].protocol) { 3512 bytes_written += scnprintf(protocol + bytes_written, 3513 sizeof(protocol) - bytes_written, "%s%s", 3514 bytes_written ? "," : "", 3515 mpi3mr_protocols[i].name); 3516 } 3517 } 3518 3519 bytes_written = 0; 3520 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3521 if (mrioc->facts.protocol_flags & 3522 mpi3mr_capabilities[i].capability) { 3523 bytes_written += scnprintf(capabilities + bytes_written, 3524 sizeof(capabilities) - bytes_written, "%s%s", 3525 bytes_written ? "," : "", 3526 mpi3mr_capabilities[i].name); 3527 } 3528 } 3529 3530 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3531 protocol, capabilities); 3532 } 3533 3534 /** 3535 * mpi3mr_cleanup_resources - Free PCI resources 3536 * @mrioc: Adapter instance reference 3537 * 3538 * Unmap PCI device memory and disable PCI device. 3539 * 3540 * Return: 0 on success and non-zero on failure. 3541 */ 3542 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc) 3543 { 3544 struct pci_dev *pdev = mrioc->pdev; 3545 3546 mpi3mr_cleanup_isr(mrioc); 3547 3548 if (mrioc->sysif_regs) { 3549 iounmap((void __iomem *)mrioc->sysif_regs); 3550 mrioc->sysif_regs = NULL; 3551 } 3552 3553 if (pci_is_enabled(pdev)) { 3554 if (mrioc->bars) 3555 pci_release_selected_regions(pdev, mrioc->bars); 3556 pci_disable_device(pdev); 3557 } 3558 } 3559 3560 /** 3561 * mpi3mr_setup_resources - Enable PCI resources 3562 * @mrioc: Adapter instance reference 3563 * 3564 * Enable PCI device memory, MSI-x registers and set DMA mask. 3565 * 3566 * Return: 0 on success and non-zero on failure. 3567 */ 3568 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc) 3569 { 3570 struct pci_dev *pdev = mrioc->pdev; 3571 u32 memap_sz = 0; 3572 int i, retval = 0, capb = 0; 3573 u16 message_control; 3574 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask : 3575 (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) && 3576 (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32)); 3577 3578 if (pci_enable_device_mem(pdev)) { 3579 ioc_err(mrioc, "pci_enable_device_mem: failed\n"); 3580 retval = -ENODEV; 3581 goto out_failed; 3582 } 3583 3584 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 3585 if (!capb) { 3586 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n"); 3587 retval = -ENODEV; 3588 goto out_failed; 3589 } 3590 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 3591 3592 if (pci_request_selected_regions(pdev, mrioc->bars, 3593 mrioc->driver_name)) { 3594 ioc_err(mrioc, "pci_request_selected_regions: failed\n"); 3595 retval = -ENODEV; 3596 goto out_failed; 3597 } 3598 3599 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) { 3600 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3601 mrioc->sysif_regs_phys = pci_resource_start(pdev, i); 3602 memap_sz = pci_resource_len(pdev, i); 3603 mrioc->sysif_regs = 3604 ioremap(mrioc->sysif_regs_phys, memap_sz); 3605 break; 3606 } 3607 } 3608 3609 pci_set_master(pdev); 3610 3611 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask); 3612 if (retval) { 3613 if (dma_mask != DMA_BIT_MASK(32)) { 3614 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n"); 3615 dma_mask = DMA_BIT_MASK(32); 3616 retval = dma_set_mask_and_coherent(&pdev->dev, 3617 dma_mask); 3618 } 3619 if (retval) { 3620 mrioc->dma_mask = 0; 3621 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n"); 3622 goto out_failed; 3623 } 3624 } 3625 mrioc->dma_mask = dma_mask; 3626 3627 if (!mrioc->sysif_regs) { 3628 ioc_err(mrioc, 3629 "Unable to map adapter memory or resource not found\n"); 3630 retval = -EINVAL; 3631 goto out_failed; 3632 } 3633 3634 pci_read_config_word(pdev, capb + 2, &message_control); 3635 mrioc->msix_count = (message_control & 0x3FF) + 1; 3636 3637 pci_save_state(pdev); 3638 3639 pci_set_drvdata(pdev, mrioc->shost); 3640 3641 mpi3mr_ioc_disable_intr(mrioc); 3642 3643 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 3644 (unsigned long long)mrioc->sysif_regs_phys, 3645 mrioc->sysif_regs, memap_sz); 3646 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n", 3647 mrioc->msix_count); 3648 3649 if (!reset_devices && poll_queues > 0) 3650 mrioc->requested_poll_qcount = min_t(int, poll_queues, 3651 mrioc->msix_count - 2); 3652 return retval; 3653 3654 out_failed: 3655 mpi3mr_cleanup_resources(mrioc); 3656 return retval; 3657 } 3658 3659 /** 3660 * mpi3mr_enable_events - Enable required events 3661 * @mrioc: Adapter instance reference 3662 * 3663 * This routine unmasks the events required by the driver by 3664 * sennding appropriate event mask bitmapt through an event 3665 * notification request. 3666 * 3667 * Return: 0 on success and non-zero on failure. 3668 */ 3669 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc) 3670 { 3671 int retval = 0; 3672 u32 i; 3673 3674 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3675 mrioc->event_masks[i] = -1; 3676 3677 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED); 3678 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED); 3679 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE); 3680 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE); 3681 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED); 3682 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 3683 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY); 3684 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 3685 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE); 3686 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 3687 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION); 3688 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET); 3689 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT); 3690 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE); 3691 3692 retval = mpi3mr_issue_event_notification(mrioc); 3693 if (retval) 3694 ioc_err(mrioc, "failed to issue event notification %d\n", 3695 retval); 3696 return retval; 3697 } 3698 3699 /** 3700 * mpi3mr_init_ioc - Initialize the controller 3701 * @mrioc: Adapter instance reference 3702 * 3703 * This the controller initialization routine, executed either 3704 * after soft reset or from pci probe callback. 3705 * Setup the required resources, memory map the controller 3706 * registers, create admin and operational reply queue pairs, 3707 * allocate required memory for reply pool, sense buffer pool, 3708 * issue IOC init request to the firmware, unmask the events and 3709 * issue port enable to discover SAS/SATA/NVMe devies and RAID 3710 * volumes. 3711 * 3712 * Return: 0 on success and non-zero on failure. 3713 */ 3714 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc) 3715 { 3716 int retval = 0; 3717 u8 retry = 0; 3718 struct mpi3_ioc_facts_data facts_data; 3719 u32 sz; 3720 3721 retry_init: 3722 retval = mpi3mr_bring_ioc_ready(mrioc); 3723 if (retval) { 3724 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n", 3725 retval); 3726 goto out_failed_noretry; 3727 } 3728 3729 retval = mpi3mr_setup_isr(mrioc, 1); 3730 if (retval) { 3731 ioc_err(mrioc, "Failed to setup ISR error %d\n", 3732 retval); 3733 goto out_failed_noretry; 3734 } 3735 3736 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3737 if (retval) { 3738 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n", 3739 retval); 3740 goto out_failed; 3741 } 3742 3743 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD; 3744 3745 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group; 3746 atomic_set(&mrioc->pend_large_data_sz, 0); 3747 3748 if (reset_devices) 3749 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios, 3750 MPI3MR_HOST_IOS_KDUMP); 3751 3752 if (!(mrioc->facts.ioc_capabilities & 3753 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) { 3754 mrioc->sas_transport_enabled = 1; 3755 mrioc->scsi_device_channel = 1; 3756 mrioc->shost->max_channel = 1; 3757 mrioc->shost->transportt = mpi3mr_transport_template; 3758 } 3759 3760 mrioc->reply_sz = mrioc->facts.reply_sz; 3761 3762 retval = mpi3mr_check_reset_dma_mask(mrioc); 3763 if (retval) { 3764 ioc_err(mrioc, "Resetting dma mask failed %d\n", 3765 retval); 3766 goto out_failed_noretry; 3767 } 3768 3769 mpi3mr_print_ioc_info(mrioc); 3770 3771 dprint_init(mrioc, "allocating config page buffers\n"); 3772 mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, 3773 MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL); 3774 if (!mrioc->cfg_page) 3775 goto out_failed_noretry; 3776 3777 mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; 3778 3779 retval = mpi3mr_alloc_reply_sense_bufs(mrioc); 3780 if (retval) { 3781 ioc_err(mrioc, 3782 "%s :Failed to allocated reply sense buffers %d\n", 3783 __func__, retval); 3784 goto out_failed_noretry; 3785 } 3786 3787 retval = mpi3mr_alloc_chain_bufs(mrioc); 3788 if (retval) { 3789 ioc_err(mrioc, "Failed to allocated chain buffers %d\n", 3790 retval); 3791 goto out_failed_noretry; 3792 } 3793 3794 retval = mpi3mr_issue_iocinit(mrioc); 3795 if (retval) { 3796 ioc_err(mrioc, "Failed to Issue IOC Init %d\n", 3797 retval); 3798 goto out_failed; 3799 } 3800 3801 retval = mpi3mr_print_pkg_ver(mrioc); 3802 if (retval) { 3803 ioc_err(mrioc, "failed to get package version\n"); 3804 goto out_failed; 3805 } 3806 3807 retval = mpi3mr_setup_isr(mrioc, 0); 3808 if (retval) { 3809 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n", 3810 retval); 3811 goto out_failed_noretry; 3812 } 3813 3814 retval = mpi3mr_create_op_queues(mrioc); 3815 if (retval) { 3816 ioc_err(mrioc, "Failed to create OpQueues error %d\n", 3817 retval); 3818 goto out_failed; 3819 } 3820 3821 if (!mrioc->pel_seqnum_virt) { 3822 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n"); 3823 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 3824 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 3825 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 3826 GFP_KERNEL); 3827 if (!mrioc->pel_seqnum_virt) { 3828 retval = -ENOMEM; 3829 goto out_failed_noretry; 3830 } 3831 } 3832 3833 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) { 3834 dprint_init(mrioc, "allocating memory for throttle groups\n"); 3835 sz = sizeof(struct mpi3mr_throttle_group_info); 3836 mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *) 3837 kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL); 3838 if (!mrioc->throttle_groups) 3839 goto out_failed_noretry; 3840 } 3841 3842 retval = mpi3mr_enable_events(mrioc); 3843 if (retval) { 3844 ioc_err(mrioc, "failed to enable events %d\n", 3845 retval); 3846 goto out_failed; 3847 } 3848 3849 ioc_info(mrioc, "controller initialization completed successfully\n"); 3850 return retval; 3851 out_failed: 3852 if (retry < 2) { 3853 retry++; 3854 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n", 3855 retry); 3856 mpi3mr_memset_buffers(mrioc); 3857 goto retry_init; 3858 } 3859 out_failed_noretry: 3860 ioc_err(mrioc, "controller initialization failed\n"); 3861 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 3862 MPI3MR_RESET_FROM_CTLR_CLEANUP); 3863 mrioc->unrecoverable = 1; 3864 return retval; 3865 } 3866 3867 /** 3868 * mpi3mr_reinit_ioc - Re-Initialize the controller 3869 * @mrioc: Adapter instance reference 3870 * @is_resume: Called from resume or reset path 3871 * 3872 * This the controller re-initialization routine, executed from 3873 * the soft reset handler or resume callback. Creates 3874 * operational reply queue pairs, allocate required memory for 3875 * reply pool, sense buffer pool, issue IOC init request to the 3876 * firmware, unmask the events and issue port enable to discover 3877 * SAS/SATA/NVMe devices and RAID volumes. 3878 * 3879 * Return: 0 on success and non-zero on failure. 3880 */ 3881 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume) 3882 { 3883 int retval = 0; 3884 u8 retry = 0; 3885 struct mpi3_ioc_facts_data facts_data; 3886 3887 retry_init: 3888 dprint_reset(mrioc, "bringing up the controller to ready state\n"); 3889 retval = mpi3mr_bring_ioc_ready(mrioc); 3890 if (retval) { 3891 ioc_err(mrioc, "failed to bring to ready state\n"); 3892 goto out_failed_noretry; 3893 } 3894 3895 if (is_resume) { 3896 dprint_reset(mrioc, "setting up single ISR\n"); 3897 retval = mpi3mr_setup_isr(mrioc, 1); 3898 if (retval) { 3899 ioc_err(mrioc, "failed to setup ISR\n"); 3900 goto out_failed_noretry; 3901 } 3902 } else 3903 mpi3mr_ioc_enable_intr(mrioc); 3904 3905 dprint_reset(mrioc, "getting ioc_facts\n"); 3906 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data); 3907 if (retval) { 3908 ioc_err(mrioc, "failed to get ioc_facts\n"); 3909 goto out_failed; 3910 } 3911 3912 dprint_reset(mrioc, "validating ioc_facts\n"); 3913 retval = mpi3mr_revalidate_factsdata(mrioc); 3914 if (retval) { 3915 ioc_err(mrioc, "failed to revalidate ioc_facts data\n"); 3916 goto out_failed_noretry; 3917 } 3918 3919 mpi3mr_print_ioc_info(mrioc); 3920 3921 dprint_reset(mrioc, "sending ioc_init\n"); 3922 retval = mpi3mr_issue_iocinit(mrioc); 3923 if (retval) { 3924 ioc_err(mrioc, "failed to send ioc_init\n"); 3925 goto out_failed; 3926 } 3927 3928 dprint_reset(mrioc, "getting package version\n"); 3929 retval = mpi3mr_print_pkg_ver(mrioc); 3930 if (retval) { 3931 ioc_err(mrioc, "failed to get package version\n"); 3932 goto out_failed; 3933 } 3934 3935 if (is_resume) { 3936 dprint_reset(mrioc, "setting up multiple ISR\n"); 3937 retval = mpi3mr_setup_isr(mrioc, 0); 3938 if (retval) { 3939 ioc_err(mrioc, "failed to re-setup ISR\n"); 3940 goto out_failed_noretry; 3941 } 3942 } 3943 3944 dprint_reset(mrioc, "creating operational queue pairs\n"); 3945 retval = mpi3mr_create_op_queues(mrioc); 3946 if (retval) { 3947 ioc_err(mrioc, "failed to create operational queue pairs\n"); 3948 goto out_failed; 3949 } 3950 3951 if (!mrioc->pel_seqnum_virt) { 3952 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n"); 3953 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq); 3954 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev, 3955 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma, 3956 GFP_KERNEL); 3957 if (!mrioc->pel_seqnum_virt) { 3958 retval = -ENOMEM; 3959 goto out_failed_noretry; 3960 } 3961 } 3962 3963 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) { 3964 ioc_err(mrioc, 3965 "cannot create minimum number of operational queues expected:%d created:%d\n", 3966 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q); 3967 goto out_failed_noretry; 3968 } 3969 3970 dprint_reset(mrioc, "enabling events\n"); 3971 retval = mpi3mr_enable_events(mrioc); 3972 if (retval) { 3973 ioc_err(mrioc, "failed to enable events\n"); 3974 goto out_failed; 3975 } 3976 3977 if (!is_resume) { 3978 mrioc->device_refresh_on = 1; 3979 mpi3mr_add_event_wait_for_device_refresh(mrioc); 3980 } 3981 3982 ioc_info(mrioc, "sending port enable\n"); 3983 retval = mpi3mr_issue_port_enable(mrioc, 0); 3984 if (retval) { 3985 ioc_err(mrioc, "failed to issue port enable\n"); 3986 goto out_failed; 3987 } 3988 3989 ioc_info(mrioc, "controller %s completed successfully\n", 3990 (is_resume)?"resume":"re-initialization"); 3991 return retval; 3992 out_failed: 3993 if (retry < 2) { 3994 retry++; 3995 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n", 3996 (is_resume)?"resume":"re-initialization", retry); 3997 mpi3mr_memset_buffers(mrioc); 3998 goto retry_init; 3999 } 4000 out_failed_noretry: 4001 ioc_err(mrioc, "controller %s is failed\n", 4002 (is_resume)?"resume":"re-initialization"); 4003 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, 4004 MPI3MR_RESET_FROM_CTLR_CLEANUP); 4005 mrioc->unrecoverable = 1; 4006 return retval; 4007 } 4008 4009 /** 4010 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's 4011 * segments 4012 * @mrioc: Adapter instance reference 4013 * @qidx: Operational reply queue index 4014 * 4015 * Return: Nothing. 4016 */ 4017 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4018 { 4019 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx; 4020 struct segments *segments; 4021 int i, size; 4022 4023 if (!op_reply_q->q_segments) 4024 return; 4025 4026 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz; 4027 segments = op_reply_q->q_segments; 4028 for (i = 0; i < op_reply_q->num_segments; i++) 4029 memset(segments[i].segment, 0, size); 4030 } 4031 4032 /** 4033 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's 4034 * segments 4035 * @mrioc: Adapter instance reference 4036 * @qidx: Operational request queue index 4037 * 4038 * Return: Nothing. 4039 */ 4040 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx) 4041 { 4042 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx; 4043 struct segments *segments; 4044 int i, size; 4045 4046 if (!op_req_q->q_segments) 4047 return; 4048 4049 size = op_req_q->segment_qd * mrioc->facts.op_req_sz; 4050 segments = op_req_q->q_segments; 4051 for (i = 0; i < op_req_q->num_segments; i++) 4052 memset(segments[i].segment, 0, size); 4053 } 4054 4055 /** 4056 * mpi3mr_memset_buffers - memset memory for a controller 4057 * @mrioc: Adapter instance reference 4058 * 4059 * clear all the memory allocated for a controller, typically 4060 * called post reset to reuse the memory allocated during the 4061 * controller init. 4062 * 4063 * Return: Nothing. 4064 */ 4065 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) 4066 { 4067 u16 i; 4068 struct mpi3mr_throttle_group_info *tg; 4069 4070 mrioc->change_count = 0; 4071 mrioc->active_poll_qcount = 0; 4072 mrioc->default_qcount = 0; 4073 if (mrioc->admin_req_base) 4074 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz); 4075 if (mrioc->admin_reply_base) 4076 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz); 4077 4078 if (mrioc->init_cmds.reply) { 4079 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply)); 4080 memset(mrioc->bsg_cmds.reply, 0, 4081 sizeof(*mrioc->bsg_cmds.reply)); 4082 memset(mrioc->host_tm_cmds.reply, 0, 4083 sizeof(*mrioc->host_tm_cmds.reply)); 4084 memset(mrioc->pel_cmds.reply, 0, 4085 sizeof(*mrioc->pel_cmds.reply)); 4086 memset(mrioc->pel_abort_cmd.reply, 0, 4087 sizeof(*mrioc->pel_abort_cmd.reply)); 4088 memset(mrioc->transport_cmds.reply, 0, 4089 sizeof(*mrioc->transport_cmds.reply)); 4090 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4091 memset(mrioc->dev_rmhs_cmds[i].reply, 0, 4092 sizeof(*mrioc->dev_rmhs_cmds[i].reply)); 4093 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 4094 memset(mrioc->evtack_cmds[i].reply, 0, 4095 sizeof(*mrioc->evtack_cmds[i].reply)); 4096 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4097 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4098 memset(mrioc->evtack_cmds_bitmap, 0, 4099 mrioc->evtack_cmds_bitmap_sz); 4100 } 4101 4102 for (i = 0; i < mrioc->num_queues; i++) { 4103 mrioc->op_reply_qinfo[i].qid = 0; 4104 mrioc->op_reply_qinfo[i].ci = 0; 4105 mrioc->op_reply_qinfo[i].num_replies = 0; 4106 mrioc->op_reply_qinfo[i].ephase = 0; 4107 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 4108 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0); 4109 mpi3mr_memset_op_reply_q_buffers(mrioc, i); 4110 4111 mrioc->req_qinfo[i].ci = 0; 4112 mrioc->req_qinfo[i].pi = 0; 4113 mrioc->req_qinfo[i].num_requests = 0; 4114 mrioc->req_qinfo[i].qid = 0; 4115 mrioc->req_qinfo[i].reply_qid = 0; 4116 spin_lock_init(&mrioc->req_qinfo[i].q_lock); 4117 mpi3mr_memset_op_req_q_buffers(mrioc, i); 4118 } 4119 4120 atomic_set(&mrioc->pend_large_data_sz, 0); 4121 if (mrioc->throttle_groups) { 4122 tg = mrioc->throttle_groups; 4123 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) { 4124 tg->id = 0; 4125 tg->fw_qd = 0; 4126 tg->modified_qd = 0; 4127 tg->io_divert = 0; 4128 tg->need_qd_reduction = 0; 4129 tg->high = 0; 4130 tg->low = 0; 4131 tg->qd_reduction = 0; 4132 atomic_set(&tg->pend_large_data_sz, 0); 4133 } 4134 } 4135 } 4136 4137 /** 4138 * mpi3mr_free_mem - Free memory allocated for a controller 4139 * @mrioc: Adapter instance reference 4140 * 4141 * Free all the memory allocated for a controller. 4142 * 4143 * Return: Nothing. 4144 */ 4145 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) 4146 { 4147 u16 i; 4148 struct mpi3mr_intr_info *intr_info; 4149 4150 if (mrioc->sense_buf_pool) { 4151 if (mrioc->sense_buf) 4152 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf, 4153 mrioc->sense_buf_dma); 4154 dma_pool_destroy(mrioc->sense_buf_pool); 4155 mrioc->sense_buf = NULL; 4156 mrioc->sense_buf_pool = NULL; 4157 } 4158 if (mrioc->sense_buf_q_pool) { 4159 if (mrioc->sense_buf_q) 4160 dma_pool_free(mrioc->sense_buf_q_pool, 4161 mrioc->sense_buf_q, mrioc->sense_buf_q_dma); 4162 dma_pool_destroy(mrioc->sense_buf_q_pool); 4163 mrioc->sense_buf_q = NULL; 4164 mrioc->sense_buf_q_pool = NULL; 4165 } 4166 4167 if (mrioc->reply_buf_pool) { 4168 if (mrioc->reply_buf) 4169 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf, 4170 mrioc->reply_buf_dma); 4171 dma_pool_destroy(mrioc->reply_buf_pool); 4172 mrioc->reply_buf = NULL; 4173 mrioc->reply_buf_pool = NULL; 4174 } 4175 if (mrioc->reply_free_q_pool) { 4176 if (mrioc->reply_free_q) 4177 dma_pool_free(mrioc->reply_free_q_pool, 4178 mrioc->reply_free_q, mrioc->reply_free_q_dma); 4179 dma_pool_destroy(mrioc->reply_free_q_pool); 4180 mrioc->reply_free_q = NULL; 4181 mrioc->reply_free_q_pool = NULL; 4182 } 4183 4184 for (i = 0; i < mrioc->num_op_req_q; i++) 4185 mpi3mr_free_op_req_q_segments(mrioc, i); 4186 4187 for (i = 0; i < mrioc->num_op_reply_q; i++) 4188 mpi3mr_free_op_reply_q_segments(mrioc, i); 4189 4190 for (i = 0; i < mrioc->intr_info_count; i++) { 4191 intr_info = mrioc->intr_info + i; 4192 intr_info->op_reply_q = NULL; 4193 } 4194 4195 kfree(mrioc->req_qinfo); 4196 mrioc->req_qinfo = NULL; 4197 mrioc->num_op_req_q = 0; 4198 4199 kfree(mrioc->op_reply_qinfo); 4200 mrioc->op_reply_qinfo = NULL; 4201 mrioc->num_op_reply_q = 0; 4202 4203 kfree(mrioc->init_cmds.reply); 4204 mrioc->init_cmds.reply = NULL; 4205 4206 kfree(mrioc->bsg_cmds.reply); 4207 mrioc->bsg_cmds.reply = NULL; 4208 4209 kfree(mrioc->host_tm_cmds.reply); 4210 mrioc->host_tm_cmds.reply = NULL; 4211 4212 kfree(mrioc->pel_cmds.reply); 4213 mrioc->pel_cmds.reply = NULL; 4214 4215 kfree(mrioc->pel_abort_cmd.reply); 4216 mrioc->pel_abort_cmd.reply = NULL; 4217 4218 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4219 kfree(mrioc->evtack_cmds[i].reply); 4220 mrioc->evtack_cmds[i].reply = NULL; 4221 } 4222 4223 kfree(mrioc->removepend_bitmap); 4224 mrioc->removepend_bitmap = NULL; 4225 4226 kfree(mrioc->devrem_bitmap); 4227 mrioc->devrem_bitmap = NULL; 4228 4229 kfree(mrioc->evtack_cmds_bitmap); 4230 mrioc->evtack_cmds_bitmap = NULL; 4231 4232 kfree(mrioc->chain_bitmap); 4233 mrioc->chain_bitmap = NULL; 4234 4235 kfree(mrioc->transport_cmds.reply); 4236 mrioc->transport_cmds.reply = NULL; 4237 4238 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4239 kfree(mrioc->dev_rmhs_cmds[i].reply); 4240 mrioc->dev_rmhs_cmds[i].reply = NULL; 4241 } 4242 4243 if (mrioc->chain_buf_pool) { 4244 for (i = 0; i < mrioc->chain_buf_count; i++) { 4245 if (mrioc->chain_sgl_list[i].addr) { 4246 dma_pool_free(mrioc->chain_buf_pool, 4247 mrioc->chain_sgl_list[i].addr, 4248 mrioc->chain_sgl_list[i].dma_addr); 4249 mrioc->chain_sgl_list[i].addr = NULL; 4250 } 4251 } 4252 dma_pool_destroy(mrioc->chain_buf_pool); 4253 mrioc->chain_buf_pool = NULL; 4254 } 4255 4256 kfree(mrioc->chain_sgl_list); 4257 mrioc->chain_sgl_list = NULL; 4258 4259 if (mrioc->admin_reply_base) { 4260 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz, 4261 mrioc->admin_reply_base, mrioc->admin_reply_dma); 4262 mrioc->admin_reply_base = NULL; 4263 } 4264 if (mrioc->admin_req_base) { 4265 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz, 4266 mrioc->admin_req_base, mrioc->admin_req_dma); 4267 mrioc->admin_req_base = NULL; 4268 } 4269 4270 if (mrioc->pel_seqnum_virt) { 4271 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, 4272 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); 4273 mrioc->pel_seqnum_virt = NULL; 4274 } 4275 4276 kfree(mrioc->logdata_buf); 4277 mrioc->logdata_buf = NULL; 4278 4279 } 4280 4281 /** 4282 * mpi3mr_issue_ioc_shutdown - shutdown controller 4283 * @mrioc: Adapter instance reference 4284 * 4285 * Send shutodwn notification to the controller and wait for the 4286 * shutdown_timeout for it to be completed. 4287 * 4288 * Return: Nothing. 4289 */ 4290 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc) 4291 { 4292 u32 ioc_config, ioc_status; 4293 u8 retval = 1; 4294 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10; 4295 4296 ioc_info(mrioc, "Issuing shutdown Notification\n"); 4297 if (mrioc->unrecoverable) { 4298 ioc_warn(mrioc, 4299 "IOC is unrecoverable shutdown is not issued\n"); 4300 return; 4301 } 4302 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4303 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4304 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) { 4305 ioc_info(mrioc, "shutdown already in progress\n"); 4306 return; 4307 } 4308 4309 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4310 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL; 4311 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ; 4312 4313 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration); 4314 4315 if (mrioc->facts.shutdown_timeout) 4316 timeout = mrioc->facts.shutdown_timeout * 10; 4317 4318 do { 4319 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4320 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4321 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) { 4322 retval = 0; 4323 break; 4324 } 4325 msleep(100); 4326 } while (--timeout); 4327 4328 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4329 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration); 4330 4331 if (retval) { 4332 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK) 4333 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) 4334 ioc_warn(mrioc, 4335 "shutdown still in progress after timeout\n"); 4336 } 4337 4338 ioc_info(mrioc, 4339 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n", 4340 (!retval) ? "successful" : "failed", ioc_status, 4341 ioc_config); 4342 } 4343 4344 /** 4345 * mpi3mr_cleanup_ioc - Cleanup controller 4346 * @mrioc: Adapter instance reference 4347 * 4348 * controller cleanup handler, Message unit reset or soft reset 4349 * and shutdown notification is issued to the controller. 4350 * 4351 * Return: Nothing. 4352 */ 4353 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc) 4354 { 4355 enum mpi3mr_iocstate ioc_state; 4356 4357 dprint_exit(mrioc, "cleaning up the controller\n"); 4358 mpi3mr_ioc_disable_intr(mrioc); 4359 4360 ioc_state = mpi3mr_get_iocstate(mrioc); 4361 4362 if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) && 4363 (ioc_state == MRIOC_STATE_READY)) { 4364 if (mpi3mr_issue_and_process_mur(mrioc, 4365 MPI3MR_RESET_FROM_CTLR_CLEANUP)) 4366 mpi3mr_issue_reset(mrioc, 4367 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, 4368 MPI3MR_RESET_FROM_MUR_FAILURE); 4369 mpi3mr_issue_ioc_shutdown(mrioc); 4370 } 4371 dprint_exit(mrioc, "controller cleanup completed\n"); 4372 } 4373 4374 /** 4375 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command 4376 * @mrioc: Adapter instance reference 4377 * @cmdptr: Internal command tracker 4378 * 4379 * Complete an internal driver commands with state indicating it 4380 * is completed due to reset. 4381 * 4382 * Return: Nothing. 4383 */ 4384 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc, 4385 struct mpi3mr_drv_cmd *cmdptr) 4386 { 4387 if (cmdptr->state & MPI3MR_CMD_PENDING) { 4388 cmdptr->state |= MPI3MR_CMD_RESET; 4389 cmdptr->state &= ~MPI3MR_CMD_PENDING; 4390 if (cmdptr->is_waiting) { 4391 complete(&cmdptr->done); 4392 cmdptr->is_waiting = 0; 4393 } else if (cmdptr->callback) 4394 cmdptr->callback(mrioc, cmdptr); 4395 } 4396 } 4397 4398 /** 4399 * mpi3mr_flush_drv_cmds - Flush internaldriver commands 4400 * @mrioc: Adapter instance reference 4401 * 4402 * Flush all internal driver commands post reset 4403 * 4404 * Return: Nothing. 4405 */ 4406 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc) 4407 { 4408 struct mpi3mr_drv_cmd *cmdptr; 4409 u8 i; 4410 4411 cmdptr = &mrioc->init_cmds; 4412 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4413 4414 cmdptr = &mrioc->cfg_cmds; 4415 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4416 4417 cmdptr = &mrioc->bsg_cmds; 4418 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4419 cmdptr = &mrioc->host_tm_cmds; 4420 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4421 4422 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) { 4423 cmdptr = &mrioc->dev_rmhs_cmds[i]; 4424 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4425 } 4426 4427 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) { 4428 cmdptr = &mrioc->evtack_cmds[i]; 4429 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4430 } 4431 4432 cmdptr = &mrioc->pel_cmds; 4433 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4434 4435 cmdptr = &mrioc->pel_abort_cmd; 4436 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4437 4438 cmdptr = &mrioc->transport_cmds; 4439 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr); 4440 } 4441 4442 /** 4443 * mpi3mr_pel_wait_post - Issue PEL Wait 4444 * @mrioc: Adapter instance reference 4445 * @drv_cmd: Internal command tracker 4446 * 4447 * Issue PEL Wait MPI request through admin queue and return. 4448 * 4449 * Return: Nothing. 4450 */ 4451 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc, 4452 struct mpi3mr_drv_cmd *drv_cmd) 4453 { 4454 struct mpi3_pel_req_action_wait pel_wait; 4455 4456 mrioc->pel_abort_requested = false; 4457 4458 memset(&pel_wait, 0, sizeof(pel_wait)); 4459 drv_cmd->state = MPI3MR_CMD_PENDING; 4460 drv_cmd->is_waiting = 0; 4461 drv_cmd->callback = mpi3mr_pel_wait_complete; 4462 drv_cmd->ioc_status = 0; 4463 drv_cmd->ioc_loginfo = 0; 4464 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4465 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4466 pel_wait.action = MPI3_PEL_ACTION_WAIT; 4467 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum); 4468 pel_wait.locale = cpu_to_le16(mrioc->pel_locale); 4469 pel_wait.class = cpu_to_le16(mrioc->pel_class); 4470 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT; 4471 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n", 4472 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale); 4473 4474 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) { 4475 dprint_bsg_err(mrioc, 4476 "Issuing PELWait: Admin post failed\n"); 4477 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4478 drv_cmd->callback = NULL; 4479 drv_cmd->retry_count = 0; 4480 mrioc->pel_enabled = false; 4481 } 4482 } 4483 4484 /** 4485 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number 4486 * @mrioc: Adapter instance reference 4487 * @drv_cmd: Internal command tracker 4488 * 4489 * Issue PEL get sequence number MPI request through admin queue 4490 * and return. 4491 * 4492 * Return: 0 on success, non-zero on failure. 4493 */ 4494 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc, 4495 struct mpi3mr_drv_cmd *drv_cmd) 4496 { 4497 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req; 4498 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 4499 int retval = 0; 4500 4501 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req)); 4502 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING; 4503 mrioc->pel_cmds.is_waiting = 0; 4504 mrioc->pel_cmds.ioc_status = 0; 4505 mrioc->pel_cmds.ioc_loginfo = 0; 4506 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete; 4507 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 4508 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 4509 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM; 4510 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags, 4511 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma); 4512 4513 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req, 4514 sizeof(pel_getseq_req), 0); 4515 if (retval) { 4516 if (drv_cmd) { 4517 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4518 drv_cmd->callback = NULL; 4519 drv_cmd->retry_count = 0; 4520 } 4521 mrioc->pel_enabled = false; 4522 } 4523 4524 return retval; 4525 } 4526 4527 /** 4528 * mpi3mr_pel_wait_complete - PELWait Completion callback 4529 * @mrioc: Adapter instance reference 4530 * @drv_cmd: Internal command tracker 4531 * 4532 * This is a callback handler for the PELWait request and 4533 * firmware completes a PELWait request when it is aborted or a 4534 * new PEL entry is available. This sends AEN to the application 4535 * and if the PELwait completion is not due to PELAbort then 4536 * this will send a request for new PEL Sequence number 4537 * 4538 * Return: Nothing. 4539 */ 4540 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc, 4541 struct mpi3mr_drv_cmd *drv_cmd) 4542 { 4543 struct mpi3_pel_reply *pel_reply = NULL; 4544 u16 ioc_status, pe_log_status; 4545 bool do_retry = false; 4546 4547 if (drv_cmd->state & MPI3MR_CMD_RESET) 4548 goto cleanup_drv_cmd; 4549 4550 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4551 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4552 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 4553 __func__, ioc_status, drv_cmd->ioc_loginfo); 4554 dprint_bsg_err(mrioc, 4555 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4556 ioc_status, drv_cmd->ioc_loginfo); 4557 do_retry = true; 4558 } 4559 4560 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4561 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4562 4563 if (!pel_reply) { 4564 dprint_bsg_err(mrioc, 4565 "pel_wait: failed due to no reply\n"); 4566 goto out_failed; 4567 } 4568 4569 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 4570 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) && 4571 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) { 4572 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n", 4573 __func__, pe_log_status); 4574 dprint_bsg_err(mrioc, 4575 "pel_wait: failed due to pel_log_status(0x%04x)\n", 4576 pe_log_status); 4577 do_retry = true; 4578 } 4579 4580 if (do_retry) { 4581 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4582 drv_cmd->retry_count++; 4583 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n", 4584 drv_cmd->retry_count); 4585 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4586 return; 4587 } 4588 dprint_bsg_err(mrioc, 4589 "pel_wait: failed after all retries(%d)\n", 4590 drv_cmd->retry_count); 4591 goto out_failed; 4592 } 4593 atomic64_inc(&event_counter); 4594 if (!mrioc->pel_abort_requested) { 4595 mrioc->pel_cmds.retry_count = 0; 4596 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds); 4597 } 4598 4599 return; 4600 out_failed: 4601 mrioc->pel_enabled = false; 4602 cleanup_drv_cmd: 4603 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4604 drv_cmd->callback = NULL; 4605 drv_cmd->retry_count = 0; 4606 } 4607 4608 /** 4609 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback 4610 * @mrioc: Adapter instance reference 4611 * @drv_cmd: Internal command tracker 4612 * 4613 * This is a callback handler for the PEL get sequence number 4614 * request and a new PEL wait request will be issued to the 4615 * firmware from this 4616 * 4617 * Return: Nothing. 4618 */ 4619 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc, 4620 struct mpi3mr_drv_cmd *drv_cmd) 4621 { 4622 struct mpi3_pel_reply *pel_reply = NULL; 4623 struct mpi3_pel_seq *pel_seqnum_virt; 4624 u16 ioc_status; 4625 bool do_retry = false; 4626 4627 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt; 4628 4629 if (drv_cmd->state & MPI3MR_CMD_RESET) 4630 goto cleanup_drv_cmd; 4631 4632 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4633 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 4634 dprint_bsg_err(mrioc, 4635 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n", 4636 ioc_status, drv_cmd->ioc_loginfo); 4637 do_retry = true; 4638 } 4639 4640 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 4641 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply; 4642 if (!pel_reply) { 4643 dprint_bsg_err(mrioc, 4644 "pel_get_seqnum: failed due to no reply\n"); 4645 goto out_failed; 4646 } 4647 4648 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) { 4649 dprint_bsg_err(mrioc, 4650 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n", 4651 le16_to_cpu(pel_reply->pe_log_status)); 4652 do_retry = true; 4653 } 4654 4655 if (do_retry) { 4656 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) { 4657 drv_cmd->retry_count++; 4658 dprint_bsg_err(mrioc, 4659 "pel_get_seqnum: retrying(%d)\n", 4660 drv_cmd->retry_count); 4661 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd); 4662 return; 4663 } 4664 4665 dprint_bsg_err(mrioc, 4666 "pel_get_seqnum: failed after all retries(%d)\n", 4667 drv_cmd->retry_count); 4668 goto out_failed; 4669 } 4670 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1; 4671 drv_cmd->retry_count = 0; 4672 mpi3mr_pel_wait_post(mrioc, drv_cmd); 4673 4674 return; 4675 out_failed: 4676 mrioc->pel_enabled = false; 4677 cleanup_drv_cmd: 4678 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4679 drv_cmd->callback = NULL; 4680 drv_cmd->retry_count = 0; 4681 } 4682 4683 /** 4684 * mpi3mr_soft_reset_handler - Reset the controller 4685 * @mrioc: Adapter instance reference 4686 * @reset_reason: Reset reason code 4687 * @snapdump: Flag to generate snapdump in firmware or not 4688 * 4689 * This is an handler for recovering controller by issuing soft 4690 * reset are diag fault reset. This is a blocking function and 4691 * when one reset is executed if any other resets they will be 4692 * blocked. All BSG requests will be blocked during the reset. If 4693 * controller reset is successful then the controller will be 4694 * reinitalized, otherwise the controller will be marked as not 4695 * recoverable 4696 * 4697 * In snapdump bit is set, the controller is issued with diag 4698 * fault reset so that the firmware can create a snap dump and 4699 * post that the firmware will result in F000 fault and the 4700 * driver will issue soft reset to recover from that. 4701 * 4702 * Return: 0 on success, non-zero on failure. 4703 */ 4704 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, 4705 u32 reset_reason, u8 snapdump) 4706 { 4707 int retval = 0, i; 4708 unsigned long flags; 4709 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10; 4710 4711 /* Block the reset handler until diag save in progress*/ 4712 dprint_reset(mrioc, 4713 "soft_reset_handler: check and block on diagsave_timeout(%d)\n", 4714 mrioc->diagsave_timeout); 4715 while (mrioc->diagsave_timeout) 4716 ssleep(1); 4717 /* 4718 * Block new resets until the currently executing one is finished and 4719 * return the status of the existing reset for all blocked resets 4720 */ 4721 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n"); 4722 if (!mutex_trylock(&mrioc->reset_mutex)) { 4723 ioc_info(mrioc, 4724 "controller reset triggered by %s is blocked due to another reset in progress\n", 4725 mpi3mr_reset_rc_name(reset_reason)); 4726 do { 4727 ssleep(1); 4728 } while (mrioc->reset_in_progress == 1); 4729 ioc_info(mrioc, 4730 "returning previous reset result(%d) for the reset triggered by %s\n", 4731 mrioc->prev_reset_result, 4732 mpi3mr_reset_rc_name(reset_reason)); 4733 return mrioc->prev_reset_result; 4734 } 4735 ioc_info(mrioc, "controller reset is triggered by %s\n", 4736 mpi3mr_reset_rc_name(reset_reason)); 4737 4738 mrioc->device_refresh_on = 0; 4739 mrioc->reset_in_progress = 1; 4740 mrioc->stop_bsgs = 1; 4741 mrioc->prev_reset_result = -1; 4742 4743 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) && 4744 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) && 4745 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) { 4746 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4747 mrioc->event_masks[i] = -1; 4748 4749 dprint_reset(mrioc, "soft_reset_handler: masking events\n"); 4750 mpi3mr_issue_event_notification(mrioc); 4751 } 4752 4753 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT); 4754 4755 mpi3mr_ioc_disable_intr(mrioc); 4756 4757 if (snapdump) { 4758 mpi3mr_set_diagsave(mrioc); 4759 retval = mpi3mr_issue_reset(mrioc, 4760 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4761 if (!retval) { 4762 do { 4763 host_diagnostic = 4764 readl(&mrioc->sysif_regs->host_diagnostic); 4765 if (!(host_diagnostic & 4766 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS)) 4767 break; 4768 msleep(100); 4769 } while (--timeout); 4770 } 4771 } 4772 4773 retval = mpi3mr_issue_reset(mrioc, 4774 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason); 4775 if (retval) { 4776 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n"); 4777 goto out; 4778 } 4779 if (mrioc->num_io_throttle_group != 4780 mrioc->facts.max_io_throttle_group) { 4781 ioc_err(mrioc, 4782 "max io throttle group doesn't match old(%d), new(%d)\n", 4783 mrioc->num_io_throttle_group, 4784 mrioc->facts.max_io_throttle_group); 4785 retval = -EPERM; 4786 goto out; 4787 } 4788 4789 mpi3mr_flush_delayed_cmd_lists(mrioc); 4790 mpi3mr_flush_drv_cmds(mrioc); 4791 memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); 4792 memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); 4793 memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz); 4794 mpi3mr_flush_host_io(mrioc); 4795 mpi3mr_cleanup_fwevt_list(mrioc); 4796 mpi3mr_invalidate_devhandles(mrioc); 4797 if (mrioc->prepare_for_reset) { 4798 mrioc->prepare_for_reset = 0; 4799 mrioc->prepare_for_reset_timeout_counter = 0; 4800 } 4801 mpi3mr_memset_buffers(mrioc); 4802 retval = mpi3mr_reinit_ioc(mrioc, 0); 4803 if (retval) { 4804 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n", 4805 mrioc->name, reset_reason); 4806 goto out; 4807 } 4808 ssleep(10); 4809 4810 out: 4811 if (!retval) { 4812 mrioc->diagsave_timeout = 0; 4813 mrioc->reset_in_progress = 0; 4814 mrioc->pel_abort_requested = 0; 4815 if (mrioc->pel_enabled) { 4816 mrioc->pel_cmds.retry_count = 0; 4817 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds); 4818 } 4819 4820 mrioc->device_refresh_on = 0; 4821 4822 mrioc->ts_update_counter = 0; 4823 spin_lock_irqsave(&mrioc->watchdog_lock, flags); 4824 if (mrioc->watchdog_work_q) 4825 queue_delayed_work(mrioc->watchdog_work_q, 4826 &mrioc->watchdog_work, 4827 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL)); 4828 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags); 4829 mrioc->stop_bsgs = 0; 4830 if (mrioc->pel_enabled) 4831 atomic64_inc(&event_counter); 4832 } else { 4833 mpi3mr_issue_reset(mrioc, 4834 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason); 4835 mrioc->device_refresh_on = 0; 4836 mrioc->unrecoverable = 1; 4837 mrioc->reset_in_progress = 0; 4838 retval = -1; 4839 } 4840 mrioc->prev_reset_result = retval; 4841 mutex_unlock(&mrioc->reset_mutex); 4842 ioc_info(mrioc, "controller reset is %s\n", 4843 ((retval == 0) ? "successful" : "failed")); 4844 return retval; 4845 } 4846 4847 4848 /** 4849 * mpi3mr_free_config_dma_memory - free memory for config page 4850 * @mrioc: Adapter instance reference 4851 * @mem_desc: memory descriptor structure 4852 * 4853 * Check whether the size of the buffer specified by the memory 4854 * descriptor is greater than the default page size if so then 4855 * free the memory pointed by the descriptor. 4856 * 4857 * Return: Nothing. 4858 */ 4859 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc, 4860 struct dma_memory_desc *mem_desc) 4861 { 4862 if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) { 4863 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size, 4864 mem_desc->addr, mem_desc->dma_addr); 4865 mem_desc->addr = NULL; 4866 } 4867 } 4868 4869 /** 4870 * mpi3mr_alloc_config_dma_memory - Alloc memory for config page 4871 * @mrioc: Adapter instance reference 4872 * @mem_desc: Memory descriptor to hold dma memory info 4873 * 4874 * This function allocates new dmaable memory or provides the 4875 * default config page dmaable memory based on the memory size 4876 * described by the descriptor. 4877 * 4878 * Return: 0 on success, non-zero on failure. 4879 */ 4880 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc, 4881 struct dma_memory_desc *mem_desc) 4882 { 4883 if (mem_desc->size > mrioc->cfg_page_sz) { 4884 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev, 4885 mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL); 4886 if (!mem_desc->addr) 4887 return -ENOMEM; 4888 } else { 4889 mem_desc->addr = mrioc->cfg_page; 4890 mem_desc->dma_addr = mrioc->cfg_page_dma; 4891 memset(mem_desc->addr, 0, mrioc->cfg_page_sz); 4892 } 4893 return 0; 4894 } 4895 4896 /** 4897 * mpi3mr_post_cfg_req - Issue config requests and wait 4898 * @mrioc: Adapter instance reference 4899 * @cfg_req: Configuration request 4900 * @timeout: Timeout in seconds 4901 * @ioc_status: Pointer to return ioc status 4902 * 4903 * A generic function for posting MPI3 configuration request to 4904 * the firmware. This blocks for the completion of request for 4905 * timeout seconds and if the request times out this function 4906 * faults the controller with proper reason code. 4907 * 4908 * On successful completion of the request this function returns 4909 * appropriate ioc status from the firmware back to the caller. 4910 * 4911 * Return: 0 on success, non-zero on failure. 4912 */ 4913 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc, 4914 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status) 4915 { 4916 int retval = 0; 4917 4918 mutex_lock(&mrioc->cfg_cmds.mutex); 4919 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) { 4920 retval = -1; 4921 ioc_err(mrioc, "sending config request failed due to command in use\n"); 4922 mutex_unlock(&mrioc->cfg_cmds.mutex); 4923 goto out; 4924 } 4925 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING; 4926 mrioc->cfg_cmds.is_waiting = 1; 4927 mrioc->cfg_cmds.callback = NULL; 4928 mrioc->cfg_cmds.ioc_status = 0; 4929 mrioc->cfg_cmds.ioc_loginfo = 0; 4930 4931 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS); 4932 cfg_req->function = MPI3_FUNCTION_CONFIG; 4933 4934 init_completion(&mrioc->cfg_cmds.done); 4935 dprint_cfg_info(mrioc, "posting config request\n"); 4936 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 4937 dprint_dump(cfg_req, sizeof(struct mpi3_config_request), 4938 "mpi3_cfg_req"); 4939 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1); 4940 if (retval) { 4941 ioc_err(mrioc, "posting config request failed\n"); 4942 goto out_unlock; 4943 } 4944 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ)); 4945 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) { 4946 mpi3mr_check_rh_fault_ioc(mrioc, 4947 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT); 4948 ioc_err(mrioc, "config request timed out\n"); 4949 retval = -1; 4950 goto out_unlock; 4951 } 4952 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK; 4953 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS) 4954 dprint_cfg_err(mrioc, 4955 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n", 4956 *ioc_status, mrioc->cfg_cmds.ioc_loginfo); 4957 4958 out_unlock: 4959 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED; 4960 mutex_unlock(&mrioc->cfg_cmds.mutex); 4961 4962 out: 4963 return retval; 4964 } 4965 4966 /** 4967 * mpi3mr_process_cfg_req - config page request processor 4968 * @mrioc: Adapter instance reference 4969 * @cfg_req: Configuration request 4970 * @cfg_hdr: Configuration page header 4971 * @timeout: Timeout in seconds 4972 * @ioc_status: Pointer to return ioc status 4973 * @cfg_buf: Memory pointer to copy config page or header 4974 * @cfg_buf_sz: Size of the memory to get config page or header 4975 * 4976 * This is handler for config page read, write and config page 4977 * header read operations. 4978 * 4979 * This function expects the cfg_req to be populated with page 4980 * type, page number, action for the header read and with page 4981 * address for all other operations. 4982 * 4983 * The cfg_hdr can be passed as null for reading required header 4984 * details for read/write pages the cfg_hdr should point valid 4985 * configuration page header. 4986 * 4987 * This allocates dmaable memory based on the size of the config 4988 * buffer and set the SGE of the cfg_req. 4989 * 4990 * For write actions, the config page data has to be passed in 4991 * the cfg_buf and size of the data has to be mentioned in the 4992 * cfg_buf_sz. 4993 * 4994 * For read/header actions, on successful completion of the 4995 * request with successful ioc_status the data will be copied 4996 * into the cfg_buf limited to a minimum of actual page size and 4997 * cfg_buf_sz 4998 * 4999 * 5000 * Return: 0 on success, non-zero on failure. 5001 */ 5002 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc, 5003 struct mpi3_config_request *cfg_req, 5004 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status, 5005 void *cfg_buf, u32 cfg_buf_sz) 5006 { 5007 struct dma_memory_desc mem_desc; 5008 int retval = -1; 5009 u8 invalid_action = 0; 5010 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST; 5011 5012 memset(&mem_desc, 0, sizeof(struct dma_memory_desc)); 5013 5014 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER) 5015 mem_desc.size = sizeof(struct mpi3_config_page_header); 5016 else { 5017 if (!cfg_hdr) { 5018 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n", 5019 cfg_req->action, cfg_req->page_type, 5020 cfg_req->page_number); 5021 goto out; 5022 } 5023 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) { 5024 case MPI3_CONFIG_PAGEATTR_READ_ONLY: 5025 if (cfg_req->action 5026 != MPI3_CONFIG_ACTION_READ_CURRENT) 5027 invalid_action = 1; 5028 break; 5029 case MPI3_CONFIG_PAGEATTR_CHANGEABLE: 5030 if ((cfg_req->action == 5031 MPI3_CONFIG_ACTION_READ_PERSISTENT) || 5032 (cfg_req->action == 5033 MPI3_CONFIG_ACTION_WRITE_PERSISTENT)) 5034 invalid_action = 1; 5035 break; 5036 case MPI3_CONFIG_PAGEATTR_PERSISTENT: 5037 default: 5038 break; 5039 } 5040 if (invalid_action) { 5041 ioc_err(mrioc, 5042 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n", 5043 cfg_req->action, cfg_req->page_type, 5044 cfg_req->page_number, cfg_hdr->page_attribute); 5045 goto out; 5046 } 5047 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4; 5048 cfg_req->page_length = cfg_hdr->page_length; 5049 cfg_req->page_version = cfg_hdr->page_version; 5050 } 5051 if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc)) 5052 goto out; 5053 5054 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size, 5055 mem_desc.dma_addr); 5056 5057 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) || 5058 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5059 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size, 5060 cfg_buf_sz)); 5061 dprint_cfg_info(mrioc, "config buffer to be written\n"); 5062 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5063 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5064 } 5065 5066 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status)) 5067 goto out; 5068 5069 retval = 0; 5070 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) && 5071 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) && 5072 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) { 5073 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size, 5074 cfg_buf_sz)); 5075 dprint_cfg_info(mrioc, "config buffer read\n"); 5076 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO) 5077 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf"); 5078 } 5079 5080 out: 5081 mpi3mr_free_config_dma_memory(mrioc, &mem_desc); 5082 return retval; 5083 } 5084 5085 /** 5086 * mpi3mr_cfg_get_dev_pg0 - Read current device page0 5087 * @mrioc: Adapter instance reference 5088 * @ioc_status: Pointer to return ioc status 5089 * @dev_pg0: Pointer to return device page 0 5090 * @pg_sz: Size of the memory allocated to the page pointer 5091 * @form: The form to be used for addressing the page 5092 * @form_spec: Form specific information like device handle 5093 * 5094 * This is handler for config page read for a specific device 5095 * page0. The ioc_status has the controller returned ioc_status. 5096 * This routine doesn't check ioc_status to decide whether the 5097 * page read is success or not and it is the callers 5098 * responsibility. 5099 * 5100 * Return: 0 on success, non-zero on failure. 5101 */ 5102 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5103 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec) 5104 { 5105 struct mpi3_config_page_header cfg_hdr; 5106 struct mpi3_config_request cfg_req; 5107 u32 page_address; 5108 5109 memset(dev_pg0, 0, pg_sz); 5110 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5111 memset(&cfg_req, 0, sizeof(cfg_req)); 5112 5113 cfg_req.function = MPI3_FUNCTION_CONFIG; 5114 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5115 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE; 5116 cfg_req.page_number = 0; 5117 cfg_req.page_address = 0; 5118 5119 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5120 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5121 ioc_err(mrioc, "device page0 header read failed\n"); 5122 goto out_failed; 5123 } 5124 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5125 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n", 5126 *ioc_status); 5127 goto out_failed; 5128 } 5129 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5130 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) | 5131 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK)); 5132 cfg_req.page_address = cpu_to_le32(page_address); 5133 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5134 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) { 5135 ioc_err(mrioc, "device page0 read failed\n"); 5136 goto out_failed; 5137 } 5138 return 0; 5139 out_failed: 5140 return -1; 5141 } 5142 5143 5144 /** 5145 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0 5146 * @mrioc: Adapter instance reference 5147 * @ioc_status: Pointer to return ioc status 5148 * @phy_pg0: Pointer to return SAS Phy page 0 5149 * @pg_sz: Size of the memory allocated to the page pointer 5150 * @form: The form to be used for addressing the page 5151 * @form_spec: Form specific information like phy number 5152 * 5153 * This is handler for config page read for a specific SAS Phy 5154 * page0. The ioc_status has the controller returned ioc_status. 5155 * This routine doesn't check ioc_status to decide whether the 5156 * page read is success or not and it is the callers 5157 * responsibility. 5158 * 5159 * Return: 0 on success, non-zero on failure. 5160 */ 5161 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5162 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form, 5163 u32 form_spec) 5164 { 5165 struct mpi3_config_page_header cfg_hdr; 5166 struct mpi3_config_request cfg_req; 5167 u32 page_address; 5168 5169 memset(phy_pg0, 0, pg_sz); 5170 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5171 memset(&cfg_req, 0, sizeof(cfg_req)); 5172 5173 cfg_req.function = MPI3_FUNCTION_CONFIG; 5174 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5175 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5176 cfg_req.page_number = 0; 5177 cfg_req.page_address = 0; 5178 5179 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5180 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5181 ioc_err(mrioc, "sas phy page0 header read failed\n"); 5182 goto out_failed; 5183 } 5184 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5185 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n", 5186 *ioc_status); 5187 goto out_failed; 5188 } 5189 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5190 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5191 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5192 cfg_req.page_address = cpu_to_le32(page_address); 5193 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5194 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) { 5195 ioc_err(mrioc, "sas phy page0 read failed\n"); 5196 goto out_failed; 5197 } 5198 return 0; 5199 out_failed: 5200 return -1; 5201 } 5202 5203 /** 5204 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1 5205 * @mrioc: Adapter instance reference 5206 * @ioc_status: Pointer to return ioc status 5207 * @phy_pg1: Pointer to return SAS Phy page 1 5208 * @pg_sz: Size of the memory allocated to the page pointer 5209 * @form: The form to be used for addressing the page 5210 * @form_spec: Form specific information like phy number 5211 * 5212 * This is handler for config page read for a specific SAS Phy 5213 * page1. The ioc_status has the controller returned ioc_status. 5214 * This routine doesn't check ioc_status to decide whether the 5215 * page read is success or not and it is the callers 5216 * responsibility. 5217 * 5218 * Return: 0 on success, non-zero on failure. 5219 */ 5220 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5221 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form, 5222 u32 form_spec) 5223 { 5224 struct mpi3_config_page_header cfg_hdr; 5225 struct mpi3_config_request cfg_req; 5226 u32 page_address; 5227 5228 memset(phy_pg1, 0, pg_sz); 5229 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5230 memset(&cfg_req, 0, sizeof(cfg_req)); 5231 5232 cfg_req.function = MPI3_FUNCTION_CONFIG; 5233 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5234 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY; 5235 cfg_req.page_number = 1; 5236 cfg_req.page_address = 0; 5237 5238 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5239 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5240 ioc_err(mrioc, "sas phy page1 header read failed\n"); 5241 goto out_failed; 5242 } 5243 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5244 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n", 5245 *ioc_status); 5246 goto out_failed; 5247 } 5248 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5249 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) | 5250 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK)); 5251 cfg_req.page_address = cpu_to_le32(page_address); 5252 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5253 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) { 5254 ioc_err(mrioc, "sas phy page1 read failed\n"); 5255 goto out_failed; 5256 } 5257 return 0; 5258 out_failed: 5259 return -1; 5260 } 5261 5262 5263 /** 5264 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0 5265 * @mrioc: Adapter instance reference 5266 * @ioc_status: Pointer to return ioc status 5267 * @exp_pg0: Pointer to return SAS Expander page 0 5268 * @pg_sz: Size of the memory allocated to the page pointer 5269 * @form: The form to be used for addressing the page 5270 * @form_spec: Form specific information like device handle 5271 * 5272 * This is handler for config page read for a specific SAS 5273 * Expander page0. The ioc_status has the controller returned 5274 * ioc_status. This routine doesn't check ioc_status to decide 5275 * whether the page read is success or not and it is the callers 5276 * responsibility. 5277 * 5278 * Return: 0 on success, non-zero on failure. 5279 */ 5280 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5281 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form, 5282 u32 form_spec) 5283 { 5284 struct mpi3_config_page_header cfg_hdr; 5285 struct mpi3_config_request cfg_req; 5286 u32 page_address; 5287 5288 memset(exp_pg0, 0, pg_sz); 5289 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5290 memset(&cfg_req, 0, sizeof(cfg_req)); 5291 5292 cfg_req.function = MPI3_FUNCTION_CONFIG; 5293 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5294 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5295 cfg_req.page_number = 0; 5296 cfg_req.page_address = 0; 5297 5298 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5299 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5300 ioc_err(mrioc, "expander page0 header read failed\n"); 5301 goto out_failed; 5302 } 5303 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5304 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n", 5305 *ioc_status); 5306 goto out_failed; 5307 } 5308 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5309 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5310 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5311 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5312 cfg_req.page_address = cpu_to_le32(page_address); 5313 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5314 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) { 5315 ioc_err(mrioc, "expander page0 read failed\n"); 5316 goto out_failed; 5317 } 5318 return 0; 5319 out_failed: 5320 return -1; 5321 } 5322 5323 /** 5324 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1 5325 * @mrioc: Adapter instance reference 5326 * @ioc_status: Pointer to return ioc status 5327 * @exp_pg1: Pointer to return SAS Expander page 1 5328 * @pg_sz: Size of the memory allocated to the page pointer 5329 * @form: The form to be used for addressing the page 5330 * @form_spec: Form specific information like phy number 5331 * 5332 * This is handler for config page read for a specific SAS 5333 * Expander page1. The ioc_status has the controller returned 5334 * ioc_status. This routine doesn't check ioc_status to decide 5335 * whether the page read is success or not and it is the callers 5336 * responsibility. 5337 * 5338 * Return: 0 on success, non-zero on failure. 5339 */ 5340 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5341 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form, 5342 u32 form_spec) 5343 { 5344 struct mpi3_config_page_header cfg_hdr; 5345 struct mpi3_config_request cfg_req; 5346 u32 page_address; 5347 5348 memset(exp_pg1, 0, pg_sz); 5349 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5350 memset(&cfg_req, 0, sizeof(cfg_req)); 5351 5352 cfg_req.function = MPI3_FUNCTION_CONFIG; 5353 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5354 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER; 5355 cfg_req.page_number = 1; 5356 cfg_req.page_address = 0; 5357 5358 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5359 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5360 ioc_err(mrioc, "expander page1 header read failed\n"); 5361 goto out_failed; 5362 } 5363 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5364 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n", 5365 *ioc_status); 5366 goto out_failed; 5367 } 5368 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5369 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) | 5370 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK | 5371 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK))); 5372 cfg_req.page_address = cpu_to_le32(page_address); 5373 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5374 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) { 5375 ioc_err(mrioc, "expander page1 read failed\n"); 5376 goto out_failed; 5377 } 5378 return 0; 5379 out_failed: 5380 return -1; 5381 } 5382 5383 /** 5384 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0 5385 * @mrioc: Adapter instance reference 5386 * @ioc_status: Pointer to return ioc status 5387 * @encl_pg0: Pointer to return Enclosure page 0 5388 * @pg_sz: Size of the memory allocated to the page pointer 5389 * @form: The form to be used for addressing the page 5390 * @form_spec: Form specific information like device handle 5391 * 5392 * This is handler for config page read for a specific Enclosure 5393 * page0. The ioc_status has the controller returned ioc_status. 5394 * This routine doesn't check ioc_status to decide whether the 5395 * page read is success or not and it is the callers 5396 * responsibility. 5397 * 5398 * Return: 0 on success, non-zero on failure. 5399 */ 5400 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status, 5401 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form, 5402 u32 form_spec) 5403 { 5404 struct mpi3_config_page_header cfg_hdr; 5405 struct mpi3_config_request cfg_req; 5406 u32 page_address; 5407 5408 memset(encl_pg0, 0, pg_sz); 5409 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5410 memset(&cfg_req, 0, sizeof(cfg_req)); 5411 5412 cfg_req.function = MPI3_FUNCTION_CONFIG; 5413 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5414 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE; 5415 cfg_req.page_number = 0; 5416 cfg_req.page_address = 0; 5417 5418 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5419 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5420 ioc_err(mrioc, "enclosure page0 header read failed\n"); 5421 goto out_failed; 5422 } 5423 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5424 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n", 5425 *ioc_status); 5426 goto out_failed; 5427 } 5428 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5429 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) | 5430 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK)); 5431 cfg_req.page_address = cpu_to_le32(page_address); 5432 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5433 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) { 5434 ioc_err(mrioc, "enclosure page0 read failed\n"); 5435 goto out_failed; 5436 } 5437 return 0; 5438 out_failed: 5439 return -1; 5440 } 5441 5442 5443 /** 5444 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0 5445 * @mrioc: Adapter instance reference 5446 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0 5447 * @pg_sz: Size of the memory allocated to the page pointer 5448 * 5449 * This is handler for config page read for the SAS IO Unit 5450 * page0. This routine checks ioc_status to decide whether the 5451 * page read is success or not. 5452 * 5453 * Return: 0 on success, non-zero on failure. 5454 */ 5455 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc, 5456 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz) 5457 { 5458 struct mpi3_config_page_header cfg_hdr; 5459 struct mpi3_config_request cfg_req; 5460 u16 ioc_status = 0; 5461 5462 memset(sas_io_unit_pg0, 0, pg_sz); 5463 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5464 memset(&cfg_req, 0, sizeof(cfg_req)); 5465 5466 cfg_req.function = MPI3_FUNCTION_CONFIG; 5467 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5468 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5469 cfg_req.page_number = 0; 5470 cfg_req.page_address = 0; 5471 5472 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5473 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5474 ioc_err(mrioc, "sas io unit page0 header read failed\n"); 5475 goto out_failed; 5476 } 5477 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5478 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n", 5479 ioc_status); 5480 goto out_failed; 5481 } 5482 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5483 5484 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5485 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) { 5486 ioc_err(mrioc, "sas io unit page0 read failed\n"); 5487 goto out_failed; 5488 } 5489 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5490 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n", 5491 ioc_status); 5492 goto out_failed; 5493 } 5494 return 0; 5495 out_failed: 5496 return -1; 5497 } 5498 5499 /** 5500 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1 5501 * @mrioc: Adapter instance reference 5502 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1 5503 * @pg_sz: Size of the memory allocated to the page pointer 5504 * 5505 * This is handler for config page read for the SAS IO Unit 5506 * page1. This routine checks ioc_status to decide whether the 5507 * page read is success or not. 5508 * 5509 * Return: 0 on success, non-zero on failure. 5510 */ 5511 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5512 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5513 { 5514 struct mpi3_config_page_header cfg_hdr; 5515 struct mpi3_config_request cfg_req; 5516 u16 ioc_status = 0; 5517 5518 memset(sas_io_unit_pg1, 0, pg_sz); 5519 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5520 memset(&cfg_req, 0, sizeof(cfg_req)); 5521 5522 cfg_req.function = MPI3_FUNCTION_CONFIG; 5523 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5524 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5525 cfg_req.page_number = 1; 5526 cfg_req.page_address = 0; 5527 5528 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5529 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5530 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5531 goto out_failed; 5532 } 5533 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5534 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5535 ioc_status); 5536 goto out_failed; 5537 } 5538 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5539 5540 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5541 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5542 ioc_err(mrioc, "sas io unit page1 read failed\n"); 5543 goto out_failed; 5544 } 5545 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5546 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n", 5547 ioc_status); 5548 goto out_failed; 5549 } 5550 return 0; 5551 out_failed: 5552 return -1; 5553 } 5554 5555 /** 5556 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1 5557 * @mrioc: Adapter instance reference 5558 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write 5559 * @pg_sz: Size of the memory allocated to the page pointer 5560 * 5561 * This is handler for config page write for the SAS IO Unit 5562 * page1. This routine checks ioc_status to decide whether the 5563 * page read is success or not. This will modify both current 5564 * and persistent page. 5565 * 5566 * Return: 0 on success, non-zero on failure. 5567 */ 5568 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc, 5569 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz) 5570 { 5571 struct mpi3_config_page_header cfg_hdr; 5572 struct mpi3_config_request cfg_req; 5573 u16 ioc_status = 0; 5574 5575 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5576 memset(&cfg_req, 0, sizeof(cfg_req)); 5577 5578 cfg_req.function = MPI3_FUNCTION_CONFIG; 5579 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5580 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT; 5581 cfg_req.page_number = 1; 5582 cfg_req.page_address = 0; 5583 5584 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5585 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5586 ioc_err(mrioc, "sas io unit page1 header read failed\n"); 5587 goto out_failed; 5588 } 5589 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5590 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n", 5591 ioc_status); 5592 goto out_failed; 5593 } 5594 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT; 5595 5596 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5597 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5598 ioc_err(mrioc, "sas io unit page1 write current failed\n"); 5599 goto out_failed; 5600 } 5601 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5602 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n", 5603 ioc_status); 5604 goto out_failed; 5605 } 5606 5607 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT; 5608 5609 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5610 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) { 5611 ioc_err(mrioc, "sas io unit page1 write persistent failed\n"); 5612 goto out_failed; 5613 } 5614 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5615 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n", 5616 ioc_status); 5617 goto out_failed; 5618 } 5619 return 0; 5620 out_failed: 5621 return -1; 5622 } 5623 5624 /** 5625 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1 5626 * @mrioc: Adapter instance reference 5627 * @driver_pg1: Pointer to return Driver page 1 5628 * @pg_sz: Size of the memory allocated to the page pointer 5629 * 5630 * This is handler for config page read for the Driver page1. 5631 * This routine checks ioc_status to decide whether the page 5632 * read is success or not. 5633 * 5634 * Return: 0 on success, non-zero on failure. 5635 */ 5636 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc, 5637 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz) 5638 { 5639 struct mpi3_config_page_header cfg_hdr; 5640 struct mpi3_config_request cfg_req; 5641 u16 ioc_status = 0; 5642 5643 memset(driver_pg1, 0, pg_sz); 5644 memset(&cfg_hdr, 0, sizeof(cfg_hdr)); 5645 memset(&cfg_req, 0, sizeof(cfg_req)); 5646 5647 cfg_req.function = MPI3_FUNCTION_CONFIG; 5648 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER; 5649 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER; 5650 cfg_req.page_number = 1; 5651 cfg_req.page_address = 0; 5652 5653 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL, 5654 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) { 5655 ioc_err(mrioc, "driver page1 header read failed\n"); 5656 goto out_failed; 5657 } 5658 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5659 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n", 5660 ioc_status); 5661 goto out_failed; 5662 } 5663 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT; 5664 5665 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr, 5666 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) { 5667 ioc_err(mrioc, "driver page1 read failed\n"); 5668 goto out_failed; 5669 } 5670 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) { 5671 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n", 5672 ioc_status); 5673 goto out_failed; 5674 } 5675 return 0; 5676 out_failed: 5677 return -1; 5678 } 5679