1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/interrupt.h> 55 #include <linux/raid_class.h> 56 #include <linux/blk-mq-pci.h> 57 #include <asm/unaligned.h> 58 59 #include "mpt3sas_base.h" 60 61 #define RAID_CHANNEL 1 62 63 #define PCIE_CHANNEL 2 64 65 /* forward proto's */ 66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 67 struct _sas_node *sas_expander); 68 static void _firmware_event_work(struct work_struct *work); 69 70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 71 struct _sas_device *sas_device); 72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 73 u8 retry_count, u8 is_pd); 74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 76 struct _pcie_device *pcie_device); 77 static void 78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); 81 82 /* global parameters */ 83 LIST_HEAD(mpt3sas_ioc_list); 84 /* global ioc lock for list operations */ 85 DEFINE_SPINLOCK(gioc_lock); 86 87 MODULE_AUTHOR(MPT3SAS_AUTHOR); 88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 91 MODULE_ALIAS("mpt2sas"); 92 93 /* local parameters */ 94 static u8 scsi_io_cb_idx = -1; 95 static u8 tm_cb_idx = -1; 96 static u8 ctl_cb_idx = -1; 97 static u8 base_cb_idx = -1; 98 static u8 port_enable_cb_idx = -1; 99 static u8 transport_cb_idx = -1; 100 static u8 scsih_cb_idx = -1; 101 static u8 config_cb_idx = -1; 102 static int mpt2_ids; 103 static int mpt3_ids; 104 105 static u8 tm_tr_cb_idx = -1 ; 106 static u8 tm_tr_volume_cb_idx = -1 ; 107 static u8 tm_sas_control_cb_idx = -1; 108 109 /* command line options */ 110 static u32 logging_level; 111 MODULE_PARM_DESC(logging_level, 112 " bits for enabling additional logging info (default=0)"); 113 114 115 static ushort max_sectors = 0xFFFF; 116 module_param(max_sectors, ushort, 0444); 117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 118 119 120 static int missing_delay[2] = {-1, -1}; 121 module_param_array(missing_delay, int, NULL, 0444); 122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 123 124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 125 #define MPT3SAS_MAX_LUN (16895) 126 static u64 max_lun = MPT3SAS_MAX_LUN; 127 module_param(max_lun, ullong, 0444); 128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 129 130 static ushort hbas_to_enumerate; 131 module_param(hbas_to_enumerate, ushort, 0444); 132 MODULE_PARM_DESC(hbas_to_enumerate, 133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 134 1 - enumerates only SAS 2.0 generation HBAs\n \ 135 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 136 137 /* diag_buffer_enable is bitwise 138 * bit 0 set = TRACE 139 * bit 1 set = SNAPSHOT 140 * bit 2 set = EXTENDED 141 * 142 * Either bit can be set, or both 143 */ 144 static int diag_buffer_enable = -1; 145 module_param(diag_buffer_enable, int, 0444); 146 MODULE_PARM_DESC(diag_buffer_enable, 147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 148 static int disable_discovery = -1; 149 module_param(disable_discovery, int, 0444); 150 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 151 152 153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 154 static int prot_mask = -1; 155 module_param(prot_mask, int, 0444); 156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 157 158 static bool enable_sdev_max_qd; 159 module_param(enable_sdev_max_qd, bool, 0444); 160 MODULE_PARM_DESC(enable_sdev_max_qd, 161 "Enable sdev max qd as can_queue, def=disabled(0)"); 162 163 static int multipath_on_hba = -1; 164 module_param(multipath_on_hba, int, 0); 165 MODULE_PARM_DESC(multipath_on_hba, 166 "Multipath support to add same target device\n\t\t" 167 "as many times as it is visible to HBA from various paths\n\t\t" 168 "(by default:\n\t\t" 169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" 170 "\t SAS 3.5 HBA - This will be enabled)"); 171 172 static int host_tagset_enable = 1; 173 module_param(host_tagset_enable, int, 0444); 174 MODULE_PARM_DESC(host_tagset_enable, 175 "Shared host tagset enable/disable Default: enable(1)"); 176 177 /* raid transport support */ 178 static struct raid_template *mpt3sas_raid_template; 179 static struct raid_template *mpt2sas_raid_template; 180 181 182 /** 183 * struct sense_info - common structure for obtaining sense keys 184 * @skey: sense key 185 * @asc: additional sense code 186 * @ascq: additional sense code qualifier 187 */ 188 struct sense_info { 189 u8 skey; 190 u8 asc; 191 u8 ascq; 192 }; 193 194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 199 /** 200 * struct fw_event_work - firmware event struct 201 * @list: link list framework 202 * @work: work object (ioc->fault_reset_work_q) 203 * @ioc: per adapter object 204 * @device_handle: device handle 205 * @VF_ID: virtual function id 206 * @VP_ID: virtual port id 207 * @ignore: flag meaning this event has been marked to ignore 208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 209 * @refcount: kref for this event 210 * @event_data: reply event data payload follows 211 * 212 * This object stored on ioc->fw_event_list. 213 */ 214 struct fw_event_work { 215 struct list_head list; 216 struct work_struct work; 217 218 struct MPT3SAS_ADAPTER *ioc; 219 u16 device_handle; 220 u8 VF_ID; 221 u8 VP_ID; 222 u8 ignore; 223 u16 event; 224 struct kref refcount; 225 char event_data[] __aligned(4); 226 }; 227 228 static void fw_event_work_free(struct kref *r) 229 { 230 kfree(container_of(r, struct fw_event_work, refcount)); 231 } 232 233 static void fw_event_work_get(struct fw_event_work *fw_work) 234 { 235 kref_get(&fw_work->refcount); 236 } 237 238 static void fw_event_work_put(struct fw_event_work *fw_work) 239 { 240 kref_put(&fw_work->refcount, fw_event_work_free); 241 } 242 243 static struct fw_event_work *alloc_fw_event_work(int len) 244 { 245 struct fw_event_work *fw_event; 246 247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 248 if (!fw_event) 249 return NULL; 250 251 kref_init(&fw_event->refcount); 252 return fw_event; 253 } 254 255 /** 256 * struct _scsi_io_transfer - scsi io transfer 257 * @handle: sas device handle (assigned by firmware) 258 * @is_raid: flag set for hidden raid components 259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 260 * @data_length: data transfer length 261 * @data_dma: dma pointer to data 262 * @sense: sense data 263 * @lun: lun number 264 * @cdb_length: cdb length 265 * @cdb: cdb contents 266 * @timeout: timeout for this command 267 * @VF_ID: virtual function id 268 * @VP_ID: virtual port id 269 * @valid_reply: flag set for reply message 270 * @sense_length: sense length 271 * @ioc_status: ioc status 272 * @scsi_state: scsi state 273 * @scsi_status: scsi staus 274 * @log_info: log information 275 * @transfer_length: data length transfer when there is a reply message 276 * 277 * Used for sending internal scsi commands to devices within this module. 278 * Refer to _scsi_send_scsi_io(). 279 */ 280 struct _scsi_io_transfer { 281 u16 handle; 282 u8 is_raid; 283 enum dma_data_direction dir; 284 u32 data_length; 285 dma_addr_t data_dma; 286 u8 sense[SCSI_SENSE_BUFFERSIZE]; 287 u32 lun; 288 u8 cdb_length; 289 u8 cdb[32]; 290 u8 timeout; 291 u8 VF_ID; 292 u8 VP_ID; 293 u8 valid_reply; 294 /* the following bits are only valid when 'valid_reply = 1' */ 295 u32 sense_length; 296 u16 ioc_status; 297 u8 scsi_state; 298 u8 scsi_status; 299 u32 log_info; 300 u32 transfer_length; 301 }; 302 303 /** 304 * _scsih_set_debug_level - global setting of ioc->logging_level. 305 * @val: ? 306 * @kp: ? 307 * 308 * Note: The logging levels are defined in mpt3sas_debug.h. 309 */ 310 static int 311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 312 { 313 int ret = param_set_int(val, kp); 314 struct MPT3SAS_ADAPTER *ioc; 315 316 if (ret) 317 return ret; 318 319 pr_info("setting logging_level(0x%08x)\n", logging_level); 320 spin_lock(&gioc_lock); 321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 322 ioc->logging_level = logging_level; 323 spin_unlock(&gioc_lock); 324 return 0; 325 } 326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 327 &logging_level, 0644); 328 329 /** 330 * _scsih_srch_boot_sas_address - search based on sas_address 331 * @sas_address: sas address 332 * @boot_device: boot device object from bios page 2 333 * 334 * Return: 1 when there's a match, 0 means no match. 335 */ 336 static inline int 337 _scsih_srch_boot_sas_address(u64 sas_address, 338 Mpi2BootDeviceSasWwid_t *boot_device) 339 { 340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 341 } 342 343 /** 344 * _scsih_srch_boot_device_name - search based on device name 345 * @device_name: device name specified in INDENTIFY fram 346 * @boot_device: boot device object from bios page 2 347 * 348 * Return: 1 when there's a match, 0 means no match. 349 */ 350 static inline int 351 _scsih_srch_boot_device_name(u64 device_name, 352 Mpi2BootDeviceDeviceName_t *boot_device) 353 { 354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 355 } 356 357 /** 358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 359 * @enclosure_logical_id: enclosure logical id 360 * @slot_number: slot number 361 * @boot_device: boot device object from bios page 2 362 * 363 * Return: 1 when there's a match, 0 means no match. 364 */ 365 static inline int 366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 367 Mpi2BootDeviceEnclosureSlot_t *boot_device) 368 { 369 return (enclosure_logical_id == le64_to_cpu(boot_device-> 370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 371 SlotNumber)) ? 1 : 0; 372 } 373 374 /** 375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided 376 * port number from port list 377 * @ioc: per adapter object 378 * @port_id: port number 379 * @bypass_dirty_port_flag: when set look the matching hba port entry even 380 * if hba port entry is marked as dirty. 381 * 382 * Search for hba port entry corresponding to provided port number, 383 * if available return port object otherwise return NULL. 384 */ 385 struct hba_port * 386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, 387 u8 port_id, u8 bypass_dirty_port_flag) 388 { 389 struct hba_port *port, *port_next; 390 391 /* 392 * When multipath_on_hba is disabled then 393 * search the hba_port entry using default 394 * port id i.e. 255 395 */ 396 if (!ioc->multipath_on_hba) 397 port_id = MULTIPATH_DISABLED_PORT_ID; 398 399 list_for_each_entry_safe(port, port_next, 400 &ioc->port_table_list, list) { 401 if (port->port_id != port_id) 402 continue; 403 if (bypass_dirty_port_flag) 404 return port; 405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) 406 continue; 407 return port; 408 } 409 410 /* 411 * Allocate hba_port object for default port id (i.e. 255) 412 * when multipath_on_hba is disabled for the HBA. 413 * And add this object to port_table_list. 414 */ 415 if (!ioc->multipath_on_hba) { 416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); 417 if (!port) 418 return NULL; 419 420 port->port_id = port_id; 421 ioc_info(ioc, 422 "hba_port entry: %p, port: %d is added to hba_port list\n", 423 port, port->port_id); 424 list_add_tail(&port->list, 425 &ioc->port_table_list); 426 return port; 427 } 428 return NULL; 429 } 430 431 /** 432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number 433 * @ioc: per adapter object 434 * @port: hba_port object 435 * @phy: phy number 436 * 437 * Return virtual_phy object corresponding to phy number. 438 */ 439 struct virtual_phy * 440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, 441 struct hba_port *port, u32 phy) 442 { 443 struct virtual_phy *vphy, *vphy_next; 444 445 if (!port->vphys_mask) 446 return NULL; 447 448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { 449 if (vphy->phy_mask & (1 << phy)) 450 return vphy; 451 } 452 return NULL; 453 } 454 455 /** 456 * _scsih_is_boot_device - search for matching boot device. 457 * @sas_address: sas address 458 * @device_name: device name specified in INDENTIFY fram 459 * @enclosure_logical_id: enclosure logical id 460 * @slot: slot number 461 * @form: specifies boot device form 462 * @boot_device: boot device object from bios page 2 463 * 464 * Return: 1 when there's a match, 0 means no match. 465 */ 466 static int 467 _scsih_is_boot_device(u64 sas_address, u64 device_name, 468 u64 enclosure_logical_id, u16 slot, u8 form, 469 Mpi2BiosPage2BootDevice_t *boot_device) 470 { 471 int rc = 0; 472 473 switch (form) { 474 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 475 if (!sas_address) 476 break; 477 rc = _scsih_srch_boot_sas_address( 478 sas_address, &boot_device->SasWwid); 479 break; 480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 481 if (!enclosure_logical_id) 482 break; 483 rc = _scsih_srch_boot_encl_slot( 484 enclosure_logical_id, 485 slot, &boot_device->EnclosureSlot); 486 break; 487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 488 if (!device_name) 489 break; 490 rc = _scsih_srch_boot_device_name( 491 device_name, &boot_device->DeviceName); 492 break; 493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 494 break; 495 } 496 497 return rc; 498 } 499 500 /** 501 * _scsih_get_sas_address - set the sas_address for given device handle 502 * @ioc: ? 503 * @handle: device handle 504 * @sas_address: sas address 505 * 506 * Return: 0 success, non-zero when failure 507 */ 508 static int 509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 510 u64 *sas_address) 511 { 512 Mpi2SasDevicePage0_t sas_device_pg0; 513 Mpi2ConfigReply_t mpi_reply; 514 u32 ioc_status; 515 516 *sas_address = 0; 517 518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 520 ioc_err(ioc, "failure at %s:%d/%s()!\n", 521 __FILE__, __LINE__, __func__); 522 return -ENXIO; 523 } 524 525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 527 /* For HBA, vSES doesn't return HBA SAS address. Instead return 528 * vSES's sas address. 529 */ 530 if ((handle <= ioc->sas_hba.num_phys) && 531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 532 MPI2_SAS_DEVICE_INFO_SEP))) 533 *sas_address = ioc->sas_hba.sas_address; 534 else 535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 536 return 0; 537 } 538 539 /* we hit this because the given parent handle doesn't exist */ 540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 541 return -ENXIO; 542 543 /* else error case */ 544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 545 handle, ioc_status, __FILE__, __LINE__, __func__); 546 return -EIO; 547 } 548 549 /** 550 * _scsih_determine_boot_device - determine boot device. 551 * @ioc: per adapter object 552 * @device: sas_device or pcie_device object 553 * @channel: SAS or PCIe channel 554 * 555 * Determines whether this device should be first reported device to 556 * to scsi-ml or sas transport, this purpose is for persistent boot device. 557 * There are primary, alternate, and current entries in bios page 2. The order 558 * priority is primary, alternate, then current. This routine saves 559 * the corresponding device object. 560 * The saved data to be used later in _scsih_probe_boot_devices(). 561 */ 562 static void 563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 564 u32 channel) 565 { 566 struct _sas_device *sas_device; 567 struct _pcie_device *pcie_device; 568 struct _raid_device *raid_device; 569 u64 sas_address; 570 u64 device_name; 571 u64 enclosure_logical_id; 572 u16 slot; 573 574 /* only process this function when driver loads */ 575 if (!ioc->is_driver_loading) 576 return; 577 578 /* no Bios, return immediately */ 579 if (!ioc->bios_pg3.BiosVersion) 580 return; 581 582 if (channel == RAID_CHANNEL) { 583 raid_device = device; 584 sas_address = raid_device->wwid; 585 device_name = 0; 586 enclosure_logical_id = 0; 587 slot = 0; 588 } else if (channel == PCIE_CHANNEL) { 589 pcie_device = device; 590 sas_address = pcie_device->wwid; 591 device_name = 0; 592 enclosure_logical_id = 0; 593 slot = 0; 594 } else { 595 sas_device = device; 596 sas_address = sas_device->sas_address; 597 device_name = sas_device->device_name; 598 enclosure_logical_id = sas_device->enclosure_logical_id; 599 slot = sas_device->slot; 600 } 601 602 if (!ioc->req_boot_device.device) { 603 if (_scsih_is_boot_device(sas_address, device_name, 604 enclosure_logical_id, slot, 605 (ioc->bios_pg2.ReqBootDeviceForm & 606 MPI2_BIOSPAGE2_FORM_MASK), 607 &ioc->bios_pg2.RequestedBootDevice)) { 608 dinitprintk(ioc, 609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 610 __func__, (u64)sas_address)); 611 ioc->req_boot_device.device = device; 612 ioc->req_boot_device.channel = channel; 613 } 614 } 615 616 if (!ioc->req_alt_boot_device.device) { 617 if (_scsih_is_boot_device(sas_address, device_name, 618 enclosure_logical_id, slot, 619 (ioc->bios_pg2.ReqAltBootDeviceForm & 620 MPI2_BIOSPAGE2_FORM_MASK), 621 &ioc->bios_pg2.RequestedAltBootDevice)) { 622 dinitprintk(ioc, 623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 624 __func__, (u64)sas_address)); 625 ioc->req_alt_boot_device.device = device; 626 ioc->req_alt_boot_device.channel = channel; 627 } 628 } 629 630 if (!ioc->current_boot_device.device) { 631 if (_scsih_is_boot_device(sas_address, device_name, 632 enclosure_logical_id, slot, 633 (ioc->bios_pg2.CurrentBootDeviceForm & 634 MPI2_BIOSPAGE2_FORM_MASK), 635 &ioc->bios_pg2.CurrentBootDevice)) { 636 dinitprintk(ioc, 637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 638 __func__, (u64)sas_address)); 639 ioc->current_boot_device.device = device; 640 ioc->current_boot_device.channel = channel; 641 } 642 } 643 } 644 645 static struct _sas_device * 646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 647 struct MPT3SAS_TARGET *tgt_priv) 648 { 649 struct _sas_device *ret; 650 651 assert_spin_locked(&ioc->sas_device_lock); 652 653 ret = tgt_priv->sas_dev; 654 if (ret) 655 sas_device_get(ret); 656 657 return ret; 658 } 659 660 static struct _sas_device * 661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 662 struct MPT3SAS_TARGET *tgt_priv) 663 { 664 struct _sas_device *ret; 665 unsigned long flags; 666 667 spin_lock_irqsave(&ioc->sas_device_lock, flags); 668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 670 671 return ret; 672 } 673 674 static struct _pcie_device * 675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 676 struct MPT3SAS_TARGET *tgt_priv) 677 { 678 struct _pcie_device *ret; 679 680 assert_spin_locked(&ioc->pcie_device_lock); 681 682 ret = tgt_priv->pcie_dev; 683 if (ret) 684 pcie_device_get(ret); 685 686 return ret; 687 } 688 689 /** 690 * mpt3sas_get_pdev_from_target - pcie device search 691 * @ioc: per adapter object 692 * @tgt_priv: starget private object 693 * 694 * Context: This function will acquire ioc->pcie_device_lock and will release 695 * before returning the pcie_device object. 696 * 697 * This searches for pcie_device from target, then return pcie_device object. 698 */ 699 static struct _pcie_device * 700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 701 struct MPT3SAS_TARGET *tgt_priv) 702 { 703 struct _pcie_device *ret; 704 unsigned long flags; 705 706 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 709 710 return ret; 711 } 712 713 714 /** 715 * __mpt3sas_get_sdev_by_rphy - sas device search 716 * @ioc: per adapter object 717 * @rphy: sas_rphy pointer 718 * 719 * Context: This function will acquire ioc->sas_device_lock and will release 720 * before returning the sas_device object. 721 * 722 * This searches for sas_device from rphy object 723 * then return sas_device object. 724 */ 725 struct _sas_device * 726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, 727 struct sas_rphy *rphy) 728 { 729 struct _sas_device *sas_device; 730 731 assert_spin_locked(&ioc->sas_device_lock); 732 733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 734 if (sas_device->rphy != rphy) 735 continue; 736 sas_device_get(sas_device); 737 return sas_device; 738 } 739 740 sas_device = NULL; 741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 742 if (sas_device->rphy != rphy) 743 continue; 744 sas_device_get(sas_device); 745 return sas_device; 746 } 747 748 return NULL; 749 } 750 751 /** 752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided 753 * sas address from sas_device_list list 754 * @ioc: per adapter object 755 * @sas_address: device sas address 756 * @port: port number 757 * 758 * Search for _sas_device object corresponding to provided sas address, 759 * if available return _sas_device object address otherwise return NULL. 760 */ 761 struct _sas_device * 762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 763 u64 sas_address, struct hba_port *port) 764 { 765 struct _sas_device *sas_device; 766 767 if (!port) 768 return NULL; 769 770 assert_spin_locked(&ioc->sas_device_lock); 771 772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 773 if (sas_device->sas_address != sas_address) 774 continue; 775 if (sas_device->port != port) 776 continue; 777 sas_device_get(sas_device); 778 return sas_device; 779 } 780 781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 782 if (sas_device->sas_address != sas_address) 783 continue; 784 if (sas_device->port != port) 785 continue; 786 sas_device_get(sas_device); 787 return sas_device; 788 } 789 790 return NULL; 791 } 792 793 /** 794 * mpt3sas_get_sdev_by_addr - sas device search 795 * @ioc: per adapter object 796 * @sas_address: sas address 797 * @port: hba port entry 798 * Context: Calling function should acquire ioc->sas_device_lock 799 * 800 * This searches for sas_device based on sas_address & port number, 801 * then return sas_device object. 802 */ 803 struct _sas_device * 804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 805 u64 sas_address, struct hba_port *port) 806 { 807 struct _sas_device *sas_device; 808 unsigned long flags; 809 810 spin_lock_irqsave(&ioc->sas_device_lock, flags); 811 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 812 sas_address, port); 813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 814 815 return sas_device; 816 } 817 818 static struct _sas_device * 819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 820 { 821 struct _sas_device *sas_device; 822 823 assert_spin_locked(&ioc->sas_device_lock); 824 825 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 826 if (sas_device->handle == handle) 827 goto found_device; 828 829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 830 if (sas_device->handle == handle) 831 goto found_device; 832 833 return NULL; 834 835 found_device: 836 sas_device_get(sas_device); 837 return sas_device; 838 } 839 840 /** 841 * mpt3sas_get_sdev_by_handle - sas device search 842 * @ioc: per adapter object 843 * @handle: sas device handle (assigned by firmware) 844 * Context: Calling function should acquire ioc->sas_device_lock 845 * 846 * This searches for sas_device based on sas_address, then return sas_device 847 * object. 848 */ 849 struct _sas_device * 850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 851 { 852 struct _sas_device *sas_device; 853 unsigned long flags; 854 855 spin_lock_irqsave(&ioc->sas_device_lock, flags); 856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 858 859 return sas_device; 860 } 861 862 /** 863 * _scsih_display_enclosure_chassis_info - display device location info 864 * @ioc: per adapter object 865 * @sas_device: per sas device object 866 * @sdev: scsi device struct 867 * @starget: scsi target struct 868 */ 869 static void 870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 871 struct _sas_device *sas_device, struct scsi_device *sdev, 872 struct scsi_target *starget) 873 { 874 if (sdev) { 875 if (sas_device->enclosure_handle != 0) 876 sdev_printk(KERN_INFO, sdev, 877 "enclosure logical id (0x%016llx), slot(%d) \n", 878 (unsigned long long) 879 sas_device->enclosure_logical_id, 880 sas_device->slot); 881 if (sas_device->connector_name[0] != '\0') 882 sdev_printk(KERN_INFO, sdev, 883 "enclosure level(0x%04x), connector name( %s)\n", 884 sas_device->enclosure_level, 885 sas_device->connector_name); 886 if (sas_device->is_chassis_slot_valid) 887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 888 sas_device->chassis_slot); 889 } else if (starget) { 890 if (sas_device->enclosure_handle != 0) 891 starget_printk(KERN_INFO, starget, 892 "enclosure logical id(0x%016llx), slot(%d) \n", 893 (unsigned long long) 894 sas_device->enclosure_logical_id, 895 sas_device->slot); 896 if (sas_device->connector_name[0] != '\0') 897 starget_printk(KERN_INFO, starget, 898 "enclosure level(0x%04x), connector name( %s)\n", 899 sas_device->enclosure_level, 900 sas_device->connector_name); 901 if (sas_device->is_chassis_slot_valid) 902 starget_printk(KERN_INFO, starget, 903 "chassis slot(0x%04x)\n", 904 sas_device->chassis_slot); 905 } else { 906 if (sas_device->enclosure_handle != 0) 907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 908 (u64)sas_device->enclosure_logical_id, 909 sas_device->slot); 910 if (sas_device->connector_name[0] != '\0') 911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 912 sas_device->enclosure_level, 913 sas_device->connector_name); 914 if (sas_device->is_chassis_slot_valid) 915 ioc_info(ioc, "chassis slot(0x%04x)\n", 916 sas_device->chassis_slot); 917 } 918 } 919 920 /** 921 * _scsih_sas_device_remove - remove sas_device from list. 922 * @ioc: per adapter object 923 * @sas_device: the sas_device object 924 * Context: This function will acquire ioc->sas_device_lock. 925 * 926 * If sas_device is on the list, remove it and decrement its reference count. 927 */ 928 static void 929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 930 struct _sas_device *sas_device) 931 { 932 unsigned long flags; 933 934 if (!sas_device) 935 return; 936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 937 sas_device->handle, (u64)sas_device->sas_address); 938 939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 940 941 /* 942 * The lock serializes access to the list, but we still need to verify 943 * that nobody removed the entry while we were waiting on the lock. 944 */ 945 spin_lock_irqsave(&ioc->sas_device_lock, flags); 946 if (!list_empty(&sas_device->list)) { 947 list_del_init(&sas_device->list); 948 sas_device_put(sas_device); 949 } 950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 951 } 952 953 /** 954 * _scsih_device_remove_by_handle - removing device object by handle 955 * @ioc: per adapter object 956 * @handle: device handle 957 */ 958 static void 959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 960 { 961 struct _sas_device *sas_device; 962 unsigned long flags; 963 964 if (ioc->shost_recovery) 965 return; 966 967 spin_lock_irqsave(&ioc->sas_device_lock, flags); 968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 969 if (sas_device) { 970 list_del_init(&sas_device->list); 971 sas_device_put(sas_device); 972 } 973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 974 if (sas_device) { 975 _scsih_remove_device(ioc, sas_device); 976 sas_device_put(sas_device); 977 } 978 } 979 980 /** 981 * mpt3sas_device_remove_by_sas_address - removing device object by 982 * sas address & port number 983 * @ioc: per adapter object 984 * @sas_address: device sas_address 985 * @port: hba port entry 986 * 987 * Return nothing. 988 */ 989 void 990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 991 u64 sas_address, struct hba_port *port) 992 { 993 struct _sas_device *sas_device; 994 unsigned long flags; 995 996 if (ioc->shost_recovery) 997 return; 998 999 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); 1001 if (sas_device) { 1002 list_del_init(&sas_device->list); 1003 sas_device_put(sas_device); 1004 } 1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1006 if (sas_device) { 1007 _scsih_remove_device(ioc, sas_device); 1008 sas_device_put(sas_device); 1009 } 1010 } 1011 1012 /** 1013 * _scsih_sas_device_add - insert sas_device to the list. 1014 * @ioc: per adapter object 1015 * @sas_device: the sas_device object 1016 * Context: This function will acquire ioc->sas_device_lock. 1017 * 1018 * Adding new object to the ioc->sas_device_list. 1019 */ 1020 static void 1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 1022 struct _sas_device *sas_device) 1023 { 1024 unsigned long flags; 1025 1026 dewtprintk(ioc, 1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1028 __func__, sas_device->handle, 1029 (u64)sas_device->sas_address)); 1030 1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1032 NULL, NULL)); 1033 1034 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1035 sas_device_get(sas_device); 1036 list_add_tail(&sas_device->list, &ioc->sas_device_list); 1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1038 1039 if (ioc->hide_drives) { 1040 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1041 return; 1042 } 1043 1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 1045 sas_device->sas_address_parent, sas_device->port)) { 1046 _scsih_sas_device_remove(ioc, sas_device); 1047 } else if (!sas_device->starget) { 1048 /* 1049 * When asyn scanning is enabled, its not possible to remove 1050 * devices while scanning is turned on due to an oops in 1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 1052 */ 1053 if (!ioc->is_driver_loading) { 1054 mpt3sas_transport_port_remove(ioc, 1055 sas_device->sas_address, 1056 sas_device->sas_address_parent, 1057 sas_device->port); 1058 _scsih_sas_device_remove(ioc, sas_device); 1059 } 1060 } else 1061 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1062 } 1063 1064 /** 1065 * _scsih_sas_device_init_add - insert sas_device to the list. 1066 * @ioc: per adapter object 1067 * @sas_device: the sas_device object 1068 * Context: This function will acquire ioc->sas_device_lock. 1069 * 1070 * Adding new object at driver load time to the ioc->sas_device_init_list. 1071 */ 1072 static void 1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1074 struct _sas_device *sas_device) 1075 { 1076 unsigned long flags; 1077 1078 dewtprintk(ioc, 1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1080 __func__, sas_device->handle, 1081 (u64)sas_device->sas_address)); 1082 1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1084 NULL, NULL)); 1085 1086 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1087 sas_device_get(sas_device); 1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 1089 _scsih_determine_boot_device(ioc, sas_device, 0); 1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1091 } 1092 1093 1094 static struct _pcie_device * 1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1096 { 1097 struct _pcie_device *pcie_device; 1098 1099 assert_spin_locked(&ioc->pcie_device_lock); 1100 1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1102 if (pcie_device->wwid == wwid) 1103 goto found_device; 1104 1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1106 if (pcie_device->wwid == wwid) 1107 goto found_device; 1108 1109 return NULL; 1110 1111 found_device: 1112 pcie_device_get(pcie_device); 1113 return pcie_device; 1114 } 1115 1116 1117 /** 1118 * mpt3sas_get_pdev_by_wwid - pcie device search 1119 * @ioc: per adapter object 1120 * @wwid: wwid 1121 * 1122 * Context: This function will acquire ioc->pcie_device_lock and will release 1123 * before returning the pcie_device object. 1124 * 1125 * This searches for pcie_device based on wwid, then return pcie_device object. 1126 */ 1127 static struct _pcie_device * 1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1129 { 1130 struct _pcie_device *pcie_device; 1131 unsigned long flags; 1132 1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1136 1137 return pcie_device; 1138 } 1139 1140 1141 static struct _pcie_device * 1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 1143 int channel) 1144 { 1145 struct _pcie_device *pcie_device; 1146 1147 assert_spin_locked(&ioc->pcie_device_lock); 1148 1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1150 if (pcie_device->id == id && pcie_device->channel == channel) 1151 goto found_device; 1152 1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1154 if (pcie_device->id == id && pcie_device->channel == channel) 1155 goto found_device; 1156 1157 return NULL; 1158 1159 found_device: 1160 pcie_device_get(pcie_device); 1161 return pcie_device; 1162 } 1163 1164 static struct _pcie_device * 1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1166 { 1167 struct _pcie_device *pcie_device; 1168 1169 assert_spin_locked(&ioc->pcie_device_lock); 1170 1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1172 if (pcie_device->handle == handle) 1173 goto found_device; 1174 1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1176 if (pcie_device->handle == handle) 1177 goto found_device; 1178 1179 return NULL; 1180 1181 found_device: 1182 pcie_device_get(pcie_device); 1183 return pcie_device; 1184 } 1185 1186 1187 /** 1188 * mpt3sas_get_pdev_by_handle - pcie device search 1189 * @ioc: per adapter object 1190 * @handle: Firmware device handle 1191 * 1192 * Context: This function will acquire ioc->pcie_device_lock and will release 1193 * before returning the pcie_device object. 1194 * 1195 * This searches for pcie_device based on handle, then return pcie_device 1196 * object. 1197 */ 1198 struct _pcie_device * 1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1200 { 1201 struct _pcie_device *pcie_device; 1202 unsigned long flags; 1203 1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1207 1208 return pcie_device; 1209 } 1210 1211 /** 1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. 1213 * @ioc: per adapter object 1214 * Context: This function will acquire ioc->pcie_device_lock 1215 * 1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency 1217 * which has reported maximum among all available NVMe drives. 1218 * Minimum max_shutdown_latency will be six seconds. 1219 */ 1220 static void 1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) 1222 { 1223 struct _pcie_device *pcie_device; 1224 unsigned long flags; 1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 1226 1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1229 if (pcie_device->shutdown_latency) { 1230 if (shutdown_latency < pcie_device->shutdown_latency) 1231 shutdown_latency = 1232 pcie_device->shutdown_latency; 1233 } 1234 } 1235 ioc->max_shutdown_latency = shutdown_latency; 1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1237 } 1238 1239 /** 1240 * _scsih_pcie_device_remove - remove pcie_device from list. 1241 * @ioc: per adapter object 1242 * @pcie_device: the pcie_device object 1243 * Context: This function will acquire ioc->pcie_device_lock. 1244 * 1245 * If pcie_device is on the list, remove it and decrement its reference count. 1246 */ 1247 static void 1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1249 struct _pcie_device *pcie_device) 1250 { 1251 unsigned long flags; 1252 int was_on_pcie_device_list = 0; 1253 u8 update_latency = 0; 1254 1255 if (!pcie_device) 1256 return; 1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1258 pcie_device->handle, (u64)pcie_device->wwid); 1259 if (pcie_device->enclosure_handle != 0) 1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1261 (u64)pcie_device->enclosure_logical_id, 1262 pcie_device->slot); 1263 if (pcie_device->connector_name[0] != '\0') 1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1265 pcie_device->enclosure_level, 1266 pcie_device->connector_name); 1267 1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1269 if (!list_empty(&pcie_device->list)) { 1270 list_del_init(&pcie_device->list); 1271 was_on_pcie_device_list = 1; 1272 } 1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1274 update_latency = 1; 1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1276 if (was_on_pcie_device_list) { 1277 kfree(pcie_device->serial_number); 1278 pcie_device_put(pcie_device); 1279 } 1280 1281 /* 1282 * This device's RTD3 Entry Latency matches IOC's 1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1284 * from the available drives as current drive is getting removed. 1285 */ 1286 if (update_latency) 1287 _scsih_set_nvme_max_shutdown_latency(ioc); 1288 } 1289 1290 1291 /** 1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1293 * @ioc: per adapter object 1294 * @handle: device handle 1295 */ 1296 static void 1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1298 { 1299 struct _pcie_device *pcie_device; 1300 unsigned long flags; 1301 int was_on_pcie_device_list = 0; 1302 u8 update_latency = 0; 1303 1304 if (ioc->shost_recovery) 1305 return; 1306 1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1309 if (pcie_device) { 1310 if (!list_empty(&pcie_device->list)) { 1311 list_del_init(&pcie_device->list); 1312 was_on_pcie_device_list = 1; 1313 pcie_device_put(pcie_device); 1314 } 1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1316 update_latency = 1; 1317 } 1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1319 if (was_on_pcie_device_list) { 1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1321 pcie_device_put(pcie_device); 1322 } 1323 1324 /* 1325 * This device's RTD3 Entry Latency matches IOC's 1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1327 * from the available drives as current drive is getting removed. 1328 */ 1329 if (update_latency) 1330 _scsih_set_nvme_max_shutdown_latency(ioc); 1331 } 1332 1333 /** 1334 * _scsih_pcie_device_add - add pcie_device object 1335 * @ioc: per adapter object 1336 * @pcie_device: pcie_device object 1337 * 1338 * This is added to the pcie_device_list link list. 1339 */ 1340 static void 1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1342 struct _pcie_device *pcie_device) 1343 { 1344 unsigned long flags; 1345 1346 dewtprintk(ioc, 1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1348 __func__, 1349 pcie_device->handle, (u64)pcie_device->wwid)); 1350 if (pcie_device->enclosure_handle != 0) 1351 dewtprintk(ioc, 1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1353 __func__, 1354 (u64)pcie_device->enclosure_logical_id, 1355 pcie_device->slot)); 1356 if (pcie_device->connector_name[0] != '\0') 1357 dewtprintk(ioc, 1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1359 __func__, pcie_device->enclosure_level, 1360 pcie_device->connector_name)); 1361 1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1363 pcie_device_get(pcie_device); 1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1366 1367 if (pcie_device->access_status == 1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1370 return; 1371 } 1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1373 _scsih_pcie_device_remove(ioc, pcie_device); 1374 } else if (!pcie_device->starget) { 1375 if (!ioc->is_driver_loading) { 1376 /*TODO-- Need to find out whether this condition will occur or not*/ 1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1378 } 1379 } else 1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1381 } 1382 1383 /* 1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1385 * @ioc: per adapter object 1386 * @pcie_device: the pcie_device object 1387 * Context: This function will acquire ioc->pcie_device_lock. 1388 * 1389 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1390 */ 1391 static void 1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1393 struct _pcie_device *pcie_device) 1394 { 1395 unsigned long flags; 1396 1397 dewtprintk(ioc, 1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1399 __func__, 1400 pcie_device->handle, (u64)pcie_device->wwid)); 1401 if (pcie_device->enclosure_handle != 0) 1402 dewtprintk(ioc, 1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1404 __func__, 1405 (u64)pcie_device->enclosure_logical_id, 1406 pcie_device->slot)); 1407 if (pcie_device->connector_name[0] != '\0') 1408 dewtprintk(ioc, 1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1410 __func__, pcie_device->enclosure_level, 1411 pcie_device->connector_name)); 1412 1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1414 pcie_device_get(pcie_device); 1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1416 if (pcie_device->access_status != 1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) 1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1420 } 1421 /** 1422 * _scsih_raid_device_find_by_id - raid device search 1423 * @ioc: per adapter object 1424 * @id: sas device target id 1425 * @channel: sas device channel 1426 * Context: Calling function should acquire ioc->raid_device_lock 1427 * 1428 * This searches for raid_device based on target id, then return raid_device 1429 * object. 1430 */ 1431 static struct _raid_device * 1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1433 { 1434 struct _raid_device *raid_device, *r; 1435 1436 r = NULL; 1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1438 if (raid_device->id == id && raid_device->channel == channel) { 1439 r = raid_device; 1440 goto out; 1441 } 1442 } 1443 1444 out: 1445 return r; 1446 } 1447 1448 /** 1449 * mpt3sas_raid_device_find_by_handle - raid device search 1450 * @ioc: per adapter object 1451 * @handle: sas device handle (assigned by firmware) 1452 * Context: Calling function should acquire ioc->raid_device_lock 1453 * 1454 * This searches for raid_device based on handle, then return raid_device 1455 * object. 1456 */ 1457 struct _raid_device * 1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1459 { 1460 struct _raid_device *raid_device, *r; 1461 1462 r = NULL; 1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1464 if (raid_device->handle != handle) 1465 continue; 1466 r = raid_device; 1467 goto out; 1468 } 1469 1470 out: 1471 return r; 1472 } 1473 1474 /** 1475 * _scsih_raid_device_find_by_wwid - raid device search 1476 * @ioc: per adapter object 1477 * @wwid: ? 1478 * Context: Calling function should acquire ioc->raid_device_lock 1479 * 1480 * This searches for raid_device based on wwid, then return raid_device 1481 * object. 1482 */ 1483 static struct _raid_device * 1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1485 { 1486 struct _raid_device *raid_device, *r; 1487 1488 r = NULL; 1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1490 if (raid_device->wwid != wwid) 1491 continue; 1492 r = raid_device; 1493 goto out; 1494 } 1495 1496 out: 1497 return r; 1498 } 1499 1500 /** 1501 * _scsih_raid_device_add - add raid_device object 1502 * @ioc: per adapter object 1503 * @raid_device: raid_device object 1504 * 1505 * This is added to the raid_device_list link list. 1506 */ 1507 static void 1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1509 struct _raid_device *raid_device) 1510 { 1511 unsigned long flags; 1512 1513 dewtprintk(ioc, 1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1515 __func__, 1516 raid_device->handle, (u64)raid_device->wwid)); 1517 1518 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1519 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1521 } 1522 1523 /** 1524 * _scsih_raid_device_remove - delete raid_device object 1525 * @ioc: per adapter object 1526 * @raid_device: raid_device object 1527 * 1528 */ 1529 static void 1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1531 struct _raid_device *raid_device) 1532 { 1533 unsigned long flags; 1534 1535 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1536 list_del(&raid_device->list); 1537 kfree(raid_device); 1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1539 } 1540 1541 /** 1542 * mpt3sas_scsih_expander_find_by_handle - expander device search 1543 * @ioc: per adapter object 1544 * @handle: expander handle (assigned by firmware) 1545 * Context: Calling function should acquire ioc->sas_device_lock 1546 * 1547 * This searches for expander device based on handle, then returns the 1548 * sas_node object. 1549 */ 1550 struct _sas_node * 1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1552 { 1553 struct _sas_node *sas_expander, *r; 1554 1555 r = NULL; 1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1557 if (sas_expander->handle != handle) 1558 continue; 1559 r = sas_expander; 1560 goto out; 1561 } 1562 out: 1563 return r; 1564 } 1565 1566 /** 1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1568 * @ioc: per adapter object 1569 * @handle: enclosure handle (assigned by firmware) 1570 * Context: Calling function should acquire ioc->sas_device_lock 1571 * 1572 * This searches for enclosure device based on handle, then returns the 1573 * enclosure object. 1574 */ 1575 static struct _enclosure_node * 1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1577 { 1578 struct _enclosure_node *enclosure_dev, *r; 1579 1580 r = NULL; 1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1583 continue; 1584 r = enclosure_dev; 1585 goto out; 1586 } 1587 out: 1588 return r; 1589 } 1590 /** 1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1592 * @ioc: per adapter object 1593 * @sas_address: sas address 1594 * @port: hba port entry 1595 * Context: Calling function should acquire ioc->sas_node_lock. 1596 * 1597 * This searches for expander device based on sas_address & port number, 1598 * then returns the sas_node object. 1599 */ 1600 struct _sas_node * 1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1602 u64 sas_address, struct hba_port *port) 1603 { 1604 struct _sas_node *sas_expander, *r = NULL; 1605 1606 if (!port) 1607 return r; 1608 1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1610 if (sas_expander->sas_address != sas_address) 1611 continue; 1612 if (sas_expander->port != port) 1613 continue; 1614 r = sas_expander; 1615 goto out; 1616 } 1617 out: 1618 return r; 1619 } 1620 1621 /** 1622 * _scsih_expander_node_add - insert expander device to the list. 1623 * @ioc: per adapter object 1624 * @sas_expander: the sas_device object 1625 * Context: This function will acquire ioc->sas_node_lock. 1626 * 1627 * Adding new object to the ioc->sas_expander_list. 1628 */ 1629 static void 1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1631 struct _sas_node *sas_expander) 1632 { 1633 unsigned long flags; 1634 1635 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1638 } 1639 1640 /** 1641 * _scsih_is_end_device - determines if device is an end device 1642 * @device_info: bitfield providing information about the device. 1643 * Context: none 1644 * 1645 * Return: 1 if end device. 1646 */ 1647 static int 1648 _scsih_is_end_device(u32 device_info) 1649 { 1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1654 return 1; 1655 else 1656 return 0; 1657 } 1658 1659 /** 1660 * _scsih_is_nvme_pciescsi_device - determines if 1661 * device is an pcie nvme/scsi device 1662 * @device_info: bitfield providing information about the device. 1663 * Context: none 1664 * 1665 * Returns 1 if device is pcie device type nvme/scsi. 1666 */ 1667 static int 1668 _scsih_is_nvme_pciescsi_device(u32 device_info) 1669 { 1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1671 == MPI26_PCIE_DEVINFO_NVME) || 1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1673 == MPI26_PCIE_DEVINFO_SCSI)) 1674 return 1; 1675 else 1676 return 0; 1677 } 1678 1679 /** 1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id 1681 * @ioc: per adapter object 1682 * @id: target id 1683 * @channel: channel 1684 * Context: This function will acquire ioc->scsi_lookup_lock. 1685 * 1686 * This will search for a matching channel:id in the scsi_lookup array, 1687 * returning 1 if found. 1688 */ 1689 static u8 1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, 1691 int channel) 1692 { 1693 int smid; 1694 struct scsi_cmnd *scmd; 1695 1696 for (smid = 1; 1697 smid <= ioc->shost->can_queue; smid++) { 1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1699 if (!scmd) 1700 continue; 1701 if (scmd->device->id == id && 1702 scmd->device->channel == channel) 1703 return 1; 1704 } 1705 return 0; 1706 } 1707 1708 /** 1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun 1710 * @ioc: per adapter object 1711 * @id: target id 1712 * @lun: lun number 1713 * @channel: channel 1714 * Context: This function will acquire ioc->scsi_lookup_lock. 1715 * 1716 * This will search for a matching channel:id:lun in the scsi_lookup array, 1717 * returning 1 if found. 1718 */ 1719 static u8 1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, 1721 unsigned int lun, int channel) 1722 { 1723 int smid; 1724 struct scsi_cmnd *scmd; 1725 1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) { 1727 1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1729 if (!scmd) 1730 continue; 1731 if (scmd->device->id == id && 1732 scmd->device->channel == channel && 1733 scmd->device->lun == lun) 1734 return 1; 1735 } 1736 return 0; 1737 } 1738 1739 /** 1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1741 * @ioc: per adapter object 1742 * @smid: system request message index 1743 * 1744 * Return: the smid stored scmd pointer. 1745 * Then will dereference the stored scmd pointer. 1746 */ 1747 struct scsi_cmnd * 1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1749 { 1750 struct scsi_cmnd *scmd = NULL; 1751 struct scsiio_tracker *st; 1752 Mpi25SCSIIORequest_t *mpi_request; 1753 u16 tag = smid - 1; 1754 1755 if (smid > 0 && 1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1757 u32 unique_tag = 1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 1759 1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1761 1762 /* 1763 * If SCSI IO request is outstanding at driver level then 1764 * DevHandle filed must be non-zero. If DevHandle is zero 1765 * then it means that this smid is free at driver level, 1766 * so return NULL. 1767 */ 1768 if (!mpi_request->DevHandle) 1769 return scmd; 1770 1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1772 if (scmd) { 1773 st = scsi_cmd_priv(scmd); 1774 if (st->cb_idx == 0xFF || st->smid == 0) 1775 scmd = NULL; 1776 } 1777 } 1778 return scmd; 1779 } 1780 1781 /** 1782 * scsih_change_queue_depth - setting device queue depth 1783 * @sdev: scsi device struct 1784 * @qdepth: requested queue depth 1785 * 1786 * Return: queue depth. 1787 */ 1788 static int 1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1790 { 1791 struct Scsi_Host *shost = sdev->host; 1792 int max_depth; 1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1794 struct MPT3SAS_DEVICE *sas_device_priv_data; 1795 struct MPT3SAS_TARGET *sas_target_priv_data; 1796 struct _sas_device *sas_device; 1797 unsigned long flags; 1798 1799 max_depth = shost->can_queue; 1800 1801 /* 1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd 1803 * is disabled. 1804 */ 1805 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) 1806 goto not_sata; 1807 1808 sas_device_priv_data = sdev->hostdata; 1809 if (!sas_device_priv_data) 1810 goto not_sata; 1811 sas_target_priv_data = sas_device_priv_data->sas_target; 1812 if (!sas_target_priv_data) 1813 goto not_sata; 1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1815 goto not_sata; 1816 1817 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1819 if (sas_device) { 1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1822 1823 sas_device_put(sas_device); 1824 } 1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1826 1827 not_sata: 1828 1829 if (!sdev->tagged_supported) 1830 max_depth = 1; 1831 if (qdepth > max_depth) 1832 qdepth = max_depth; 1833 scsi_change_queue_depth(sdev, qdepth); 1834 sdev_printk(KERN_INFO, sdev, 1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", 1836 sdev->queue_depth, sdev->tagged_supported, 1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); 1838 return sdev->queue_depth; 1839 } 1840 1841 /** 1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth 1843 * @sdev: scsi device struct 1844 * @qdepth: requested queue depth 1845 * 1846 * Returns nothing. 1847 */ 1848 void 1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1850 { 1851 struct Scsi_Host *shost = sdev->host; 1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1853 1854 if (ioc->enable_sdev_max_qd) 1855 qdepth = shost->can_queue; 1856 1857 scsih_change_queue_depth(sdev, qdepth); 1858 } 1859 1860 /** 1861 * scsih_target_alloc - target add routine 1862 * @starget: scsi target struct 1863 * 1864 * Return: 0 if ok. Any other return is assumed to be an error and 1865 * the device is ignored. 1866 */ 1867 static int 1868 scsih_target_alloc(struct scsi_target *starget) 1869 { 1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1872 struct MPT3SAS_TARGET *sas_target_priv_data; 1873 struct _sas_device *sas_device; 1874 struct _raid_device *raid_device; 1875 struct _pcie_device *pcie_device; 1876 unsigned long flags; 1877 struct sas_rphy *rphy; 1878 1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1880 GFP_KERNEL); 1881 if (!sas_target_priv_data) 1882 return -ENOMEM; 1883 1884 starget->hostdata = sas_target_priv_data; 1885 sas_target_priv_data->starget = starget; 1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1887 1888 /* RAID volumes */ 1889 if (starget->channel == RAID_CHANNEL) { 1890 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1892 starget->channel); 1893 if (raid_device) { 1894 sas_target_priv_data->handle = raid_device->handle; 1895 sas_target_priv_data->sas_address = raid_device->wwid; 1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1897 if (ioc->is_warpdrive) 1898 sas_target_priv_data->raid_device = raid_device; 1899 raid_device->starget = starget; 1900 } 1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1902 return 0; 1903 } 1904 1905 /* PCIe devices */ 1906 if (starget->channel == PCIE_CHANNEL) { 1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1909 starget->channel); 1910 if (pcie_device) { 1911 sas_target_priv_data->handle = pcie_device->handle; 1912 sas_target_priv_data->sas_address = pcie_device->wwid; 1913 sas_target_priv_data->port = NULL; 1914 sas_target_priv_data->pcie_dev = pcie_device; 1915 pcie_device->starget = starget; 1916 pcie_device->id = starget->id; 1917 pcie_device->channel = starget->channel; 1918 sas_target_priv_data->flags |= 1919 MPT_TARGET_FLAGS_PCIE_DEVICE; 1920 if (pcie_device->fast_path) 1921 sas_target_priv_data->flags |= 1922 MPT_TARGET_FASTPATH_IO; 1923 } 1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1925 return 0; 1926 } 1927 1928 /* sas/sata devices */ 1929 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1930 rphy = dev_to_rphy(starget->dev.parent); 1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); 1932 1933 if (sas_device) { 1934 sas_target_priv_data->handle = sas_device->handle; 1935 sas_target_priv_data->sas_address = sas_device->sas_address; 1936 sas_target_priv_data->port = sas_device->port; 1937 sas_target_priv_data->sas_dev = sas_device; 1938 sas_device->starget = starget; 1939 sas_device->id = starget->id; 1940 sas_device->channel = starget->channel; 1941 if (test_bit(sas_device->handle, ioc->pd_handles)) 1942 sas_target_priv_data->flags |= 1943 MPT_TARGET_FLAGS_RAID_COMPONENT; 1944 if (sas_device->fast_path) 1945 sas_target_priv_data->flags |= 1946 MPT_TARGET_FASTPATH_IO; 1947 } 1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1949 1950 return 0; 1951 } 1952 1953 /** 1954 * scsih_target_destroy - target destroy routine 1955 * @starget: scsi target struct 1956 */ 1957 static void 1958 scsih_target_destroy(struct scsi_target *starget) 1959 { 1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1962 struct MPT3SAS_TARGET *sas_target_priv_data; 1963 struct _sas_device *sas_device; 1964 struct _raid_device *raid_device; 1965 struct _pcie_device *pcie_device; 1966 unsigned long flags; 1967 1968 sas_target_priv_data = starget->hostdata; 1969 if (!sas_target_priv_data) 1970 return; 1971 1972 if (starget->channel == RAID_CHANNEL) { 1973 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1975 starget->channel); 1976 if (raid_device) { 1977 raid_device->starget = NULL; 1978 raid_device->sdev = NULL; 1979 } 1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1981 goto out; 1982 } 1983 1984 if (starget->channel == PCIE_CHANNEL) { 1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1987 sas_target_priv_data); 1988 if (pcie_device && (pcie_device->starget == starget) && 1989 (pcie_device->id == starget->id) && 1990 (pcie_device->channel == starget->channel)) 1991 pcie_device->starget = NULL; 1992 1993 if (pcie_device) { 1994 /* 1995 * Corresponding get() is in _scsih_target_alloc() 1996 */ 1997 sas_target_priv_data->pcie_dev = NULL; 1998 pcie_device_put(pcie_device); 1999 pcie_device_put(pcie_device); 2000 } 2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2002 goto out; 2003 } 2004 2005 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 2007 if (sas_device && (sas_device->starget == starget) && 2008 (sas_device->id == starget->id) && 2009 (sas_device->channel == starget->channel)) 2010 sas_device->starget = NULL; 2011 2012 if (sas_device) { 2013 /* 2014 * Corresponding get() is in _scsih_target_alloc() 2015 */ 2016 sas_target_priv_data->sas_dev = NULL; 2017 sas_device_put(sas_device); 2018 2019 sas_device_put(sas_device); 2020 } 2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2022 2023 out: 2024 kfree(sas_target_priv_data); 2025 starget->hostdata = NULL; 2026 } 2027 2028 /** 2029 * scsih_slave_alloc - device add routine 2030 * @sdev: scsi device struct 2031 * 2032 * Return: 0 if ok. Any other return is assumed to be an error and 2033 * the device is ignored. 2034 */ 2035 static int 2036 scsih_slave_alloc(struct scsi_device *sdev) 2037 { 2038 struct Scsi_Host *shost; 2039 struct MPT3SAS_ADAPTER *ioc; 2040 struct MPT3SAS_TARGET *sas_target_priv_data; 2041 struct MPT3SAS_DEVICE *sas_device_priv_data; 2042 struct scsi_target *starget; 2043 struct _raid_device *raid_device; 2044 struct _sas_device *sas_device; 2045 struct _pcie_device *pcie_device; 2046 unsigned long flags; 2047 2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 2049 GFP_KERNEL); 2050 if (!sas_device_priv_data) 2051 return -ENOMEM; 2052 2053 sas_device_priv_data->lun = sdev->lun; 2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 2055 2056 starget = scsi_target(sdev); 2057 sas_target_priv_data = starget->hostdata; 2058 sas_target_priv_data->num_luns++; 2059 sas_device_priv_data->sas_target = sas_target_priv_data; 2060 sdev->hostdata = sas_device_priv_data; 2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 2062 sdev->no_uld_attach = 1; 2063 2064 shost = dev_to_shost(&starget->dev); 2065 ioc = shost_priv(shost); 2066 if (starget->channel == RAID_CHANNEL) { 2067 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2068 raid_device = _scsih_raid_device_find_by_id(ioc, 2069 starget->id, starget->channel); 2070 if (raid_device) 2071 raid_device->sdev = sdev; /* raid is single lun */ 2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2073 } 2074 if (starget->channel == PCIE_CHANNEL) { 2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2077 sas_target_priv_data->sas_address); 2078 if (pcie_device && (pcie_device->starget == NULL)) { 2079 sdev_printk(KERN_INFO, sdev, 2080 "%s : pcie_device->starget set to starget @ %d\n", 2081 __func__, __LINE__); 2082 pcie_device->starget = starget; 2083 } 2084 2085 if (pcie_device) 2086 pcie_device_put(pcie_device); 2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2088 2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2090 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2092 sas_target_priv_data->sas_address, 2093 sas_target_priv_data->port); 2094 if (sas_device && (sas_device->starget == NULL)) { 2095 sdev_printk(KERN_INFO, sdev, 2096 "%s : sas_device->starget set to starget @ %d\n", 2097 __func__, __LINE__); 2098 sas_device->starget = starget; 2099 } 2100 2101 if (sas_device) 2102 sas_device_put(sas_device); 2103 2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2105 } 2106 2107 return 0; 2108 } 2109 2110 /** 2111 * scsih_slave_destroy - device destroy routine 2112 * @sdev: scsi device struct 2113 */ 2114 static void 2115 scsih_slave_destroy(struct scsi_device *sdev) 2116 { 2117 struct MPT3SAS_TARGET *sas_target_priv_data; 2118 struct scsi_target *starget; 2119 struct Scsi_Host *shost; 2120 struct MPT3SAS_ADAPTER *ioc; 2121 struct _sas_device *sas_device; 2122 struct _pcie_device *pcie_device; 2123 unsigned long flags; 2124 2125 if (!sdev->hostdata) 2126 return; 2127 2128 starget = scsi_target(sdev); 2129 sas_target_priv_data = starget->hostdata; 2130 sas_target_priv_data->num_luns--; 2131 2132 shost = dev_to_shost(&starget->dev); 2133 ioc = shost_priv(shost); 2134 2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 2138 sas_target_priv_data); 2139 if (pcie_device && !sas_target_priv_data->num_luns) 2140 pcie_device->starget = NULL; 2141 2142 if (pcie_device) 2143 pcie_device_put(pcie_device); 2144 2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2146 2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2148 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2149 sas_device = __mpt3sas_get_sdev_from_target(ioc, 2150 sas_target_priv_data); 2151 if (sas_device && !sas_target_priv_data->num_luns) 2152 sas_device->starget = NULL; 2153 2154 if (sas_device) 2155 sas_device_put(sas_device); 2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2157 } 2158 2159 kfree(sdev->hostdata); 2160 sdev->hostdata = NULL; 2161 } 2162 2163 /** 2164 * _scsih_display_sata_capabilities - sata capabilities 2165 * @ioc: per adapter object 2166 * @handle: device handle 2167 * @sdev: scsi device struct 2168 */ 2169 static void 2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 2171 u16 handle, struct scsi_device *sdev) 2172 { 2173 Mpi2ConfigReply_t mpi_reply; 2174 Mpi2SasDevicePage0_t sas_device_pg0; 2175 u32 ioc_status; 2176 u16 flags; 2177 u32 device_info; 2178 2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 2181 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2182 __FILE__, __LINE__, __func__); 2183 return; 2184 } 2185 2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2187 MPI2_IOCSTATUS_MASK; 2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2189 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2190 __FILE__, __LINE__, __func__); 2191 return; 2192 } 2193 2194 flags = le16_to_cpu(sas_device_pg0.Flags); 2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 2196 2197 sdev_printk(KERN_INFO, sdev, 2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 2199 "sw_preserve(%s)\n", 2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 2203 "n", 2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 2207 } 2208 2209 /* 2210 * raid transport support - 2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when 2212 * unloading the driver followed by a load - I believe that the subroutine 2213 * raid_class_release() is not cleaning up properly. 2214 */ 2215 2216 /** 2217 * scsih_is_raid - return boolean indicating device is raid volume 2218 * @dev: the device struct object 2219 */ 2220 static int 2221 scsih_is_raid(struct device *dev) 2222 { 2223 struct scsi_device *sdev = to_scsi_device(dev); 2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2225 2226 if (ioc->is_warpdrive) 2227 return 0; 2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2229 } 2230 2231 static int 2232 scsih_is_nvme(struct device *dev) 2233 { 2234 struct scsi_device *sdev = to_scsi_device(dev); 2235 2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 2237 } 2238 2239 /** 2240 * scsih_get_resync - get raid volume resync percent complete 2241 * @dev: the device struct object 2242 */ 2243 static void 2244 scsih_get_resync(struct device *dev) 2245 { 2246 struct scsi_device *sdev = to_scsi_device(dev); 2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2248 static struct _raid_device *raid_device; 2249 unsigned long flags; 2250 Mpi2RaidVolPage0_t vol_pg0; 2251 Mpi2ConfigReply_t mpi_reply; 2252 u32 volume_status_flags; 2253 u8 percent_complete; 2254 u16 handle; 2255 2256 percent_complete = 0; 2257 handle = 0; 2258 if (ioc->is_warpdrive) 2259 goto out; 2260 2261 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2263 sdev->channel); 2264 if (raid_device) { 2265 handle = raid_device->handle; 2266 percent_complete = raid_device->percent_complete; 2267 } 2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2269 2270 if (!handle) 2271 goto out; 2272 2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2275 sizeof(Mpi2RaidVolPage0_t))) { 2276 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2277 __FILE__, __LINE__, __func__); 2278 percent_complete = 0; 2279 goto out; 2280 } 2281 2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2283 if (!(volume_status_flags & 2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 2285 percent_complete = 0; 2286 2287 out: 2288 2289 switch (ioc->hba_mpi_version_belonged) { 2290 case MPI2_VERSION: 2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 2292 break; 2293 case MPI25_VERSION: 2294 case MPI26_VERSION: 2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 2296 break; 2297 } 2298 } 2299 2300 /** 2301 * scsih_get_state - get raid volume level 2302 * @dev: the device struct object 2303 */ 2304 static void 2305 scsih_get_state(struct device *dev) 2306 { 2307 struct scsi_device *sdev = to_scsi_device(dev); 2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2309 static struct _raid_device *raid_device; 2310 unsigned long flags; 2311 Mpi2RaidVolPage0_t vol_pg0; 2312 Mpi2ConfigReply_t mpi_reply; 2313 u32 volstate; 2314 enum raid_state state = RAID_STATE_UNKNOWN; 2315 u16 handle = 0; 2316 2317 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2319 sdev->channel); 2320 if (raid_device) 2321 handle = raid_device->handle; 2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2323 2324 if (!raid_device) 2325 goto out; 2326 2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2329 sizeof(Mpi2RaidVolPage0_t))) { 2330 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2331 __FILE__, __LINE__, __func__); 2332 goto out; 2333 } 2334 2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2337 state = RAID_STATE_RESYNCING; 2338 goto out; 2339 } 2340 2341 switch (vol_pg0.VolumeState) { 2342 case MPI2_RAID_VOL_STATE_OPTIMAL: 2343 case MPI2_RAID_VOL_STATE_ONLINE: 2344 state = RAID_STATE_ACTIVE; 2345 break; 2346 case MPI2_RAID_VOL_STATE_DEGRADED: 2347 state = RAID_STATE_DEGRADED; 2348 break; 2349 case MPI2_RAID_VOL_STATE_FAILED: 2350 case MPI2_RAID_VOL_STATE_MISSING: 2351 state = RAID_STATE_OFFLINE; 2352 break; 2353 } 2354 out: 2355 switch (ioc->hba_mpi_version_belonged) { 2356 case MPI2_VERSION: 2357 raid_set_state(mpt2sas_raid_template, dev, state); 2358 break; 2359 case MPI25_VERSION: 2360 case MPI26_VERSION: 2361 raid_set_state(mpt3sas_raid_template, dev, state); 2362 break; 2363 } 2364 } 2365 2366 /** 2367 * _scsih_set_level - set raid level 2368 * @ioc: ? 2369 * @sdev: scsi device struct 2370 * @volume_type: volume type 2371 */ 2372 static void 2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2374 struct scsi_device *sdev, u8 volume_type) 2375 { 2376 enum raid_level level = RAID_LEVEL_UNKNOWN; 2377 2378 switch (volume_type) { 2379 case MPI2_RAID_VOL_TYPE_RAID0: 2380 level = RAID_LEVEL_0; 2381 break; 2382 case MPI2_RAID_VOL_TYPE_RAID10: 2383 level = RAID_LEVEL_10; 2384 break; 2385 case MPI2_RAID_VOL_TYPE_RAID1E: 2386 level = RAID_LEVEL_1E; 2387 break; 2388 case MPI2_RAID_VOL_TYPE_RAID1: 2389 level = RAID_LEVEL_1; 2390 break; 2391 } 2392 2393 switch (ioc->hba_mpi_version_belonged) { 2394 case MPI2_VERSION: 2395 raid_set_level(mpt2sas_raid_template, 2396 &sdev->sdev_gendev, level); 2397 break; 2398 case MPI25_VERSION: 2399 case MPI26_VERSION: 2400 raid_set_level(mpt3sas_raid_template, 2401 &sdev->sdev_gendev, level); 2402 break; 2403 } 2404 } 2405 2406 2407 /** 2408 * _scsih_get_volume_capabilities - volume capabilities 2409 * @ioc: per adapter object 2410 * @raid_device: the raid_device object 2411 * 2412 * Return: 0 for success, else 1 2413 */ 2414 static int 2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2416 struct _raid_device *raid_device) 2417 { 2418 Mpi2RaidVolPage0_t *vol_pg0; 2419 Mpi2RaidPhysDiskPage0_t pd_pg0; 2420 Mpi2SasDevicePage0_t sas_device_pg0; 2421 Mpi2ConfigReply_t mpi_reply; 2422 u16 sz; 2423 u8 num_pds; 2424 2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2426 &num_pds)) || !num_pds) { 2427 dfailprintk(ioc, 2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2429 __FILE__, __LINE__, __func__)); 2430 return 1; 2431 } 2432 2433 raid_device->num_pds = num_pds; 2434 sz = struct_size(vol_pg0, PhysDisk, num_pds); 2435 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2436 if (!vol_pg0) { 2437 dfailprintk(ioc, 2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2439 __FILE__, __LINE__, __func__)); 2440 return 1; 2441 } 2442 2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2445 dfailprintk(ioc, 2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2447 __FILE__, __LINE__, __func__)); 2448 kfree(vol_pg0); 2449 return 1; 2450 } 2451 2452 raid_device->volume_type = vol_pg0->VolumeType; 2453 2454 /* figure out what the underlying devices are by 2455 * obtaining the device_info bits for the 1st device 2456 */ 2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2459 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2462 le16_to_cpu(pd_pg0.DevHandle)))) { 2463 raid_device->device_info = 2464 le32_to_cpu(sas_device_pg0.DeviceInfo); 2465 } 2466 } 2467 2468 kfree(vol_pg0); 2469 return 0; 2470 } 2471 2472 /** 2473 * _scsih_enable_tlr - setting TLR flags 2474 * @ioc: per adapter object 2475 * @sdev: scsi device struct 2476 * 2477 * Enabling Transaction Layer Retries for tape devices when 2478 * vpd page 0x90 is present 2479 * 2480 */ 2481 static void 2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2483 { 2484 2485 /* only for TAPE */ 2486 if (sdev->type != TYPE_TAPE) 2487 return; 2488 2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2490 return; 2491 2492 sas_enable_tlr(sdev); 2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2495 return; 2496 2497 } 2498 2499 /** 2500 * scsih_device_configure - device configure routine. 2501 * @sdev: scsi device struct 2502 * @lim: queue limits 2503 * 2504 * Return: 0 if ok. Any other return is assumed to be an error and 2505 * the device is ignored. 2506 */ 2507 static int 2508 scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim) 2509 { 2510 struct Scsi_Host *shost = sdev->host; 2511 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2512 struct MPT3SAS_DEVICE *sas_device_priv_data; 2513 struct MPT3SAS_TARGET *sas_target_priv_data; 2514 struct _sas_device *sas_device; 2515 struct _pcie_device *pcie_device; 2516 struct _raid_device *raid_device; 2517 unsigned long flags; 2518 int qdepth; 2519 u8 ssp_target = 0; 2520 char *ds = ""; 2521 char *r_level = ""; 2522 u16 handle, volume_handle = 0; 2523 u64 volume_wwid = 0; 2524 2525 qdepth = 1; 2526 sas_device_priv_data = sdev->hostdata; 2527 sas_device_priv_data->configured_lun = 1; 2528 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2529 sas_target_priv_data = sas_device_priv_data->sas_target; 2530 handle = sas_target_priv_data->handle; 2531 2532 /* raid volume handling */ 2533 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2534 2535 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2538 if (!raid_device) { 2539 dfailprintk(ioc, 2540 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2541 __FILE__, __LINE__, __func__)); 2542 return 1; 2543 } 2544 2545 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2546 dfailprintk(ioc, 2547 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2548 __FILE__, __LINE__, __func__)); 2549 return 1; 2550 } 2551 2552 /* 2553 * WARPDRIVE: Initialize the required data for Direct IO 2554 */ 2555 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2556 2557 /* RAID Queue Depth Support 2558 * IS volume = underlying qdepth of drive type, either 2559 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2560 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2561 */ 2562 if (raid_device->device_info & 2563 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2564 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2565 ds = "SSP"; 2566 } else { 2567 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2568 if (raid_device->device_info & 2569 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2570 ds = "SATA"; 2571 else 2572 ds = "STP"; 2573 } 2574 2575 switch (raid_device->volume_type) { 2576 case MPI2_RAID_VOL_TYPE_RAID0: 2577 r_level = "RAID0"; 2578 break; 2579 case MPI2_RAID_VOL_TYPE_RAID1E: 2580 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2581 if (ioc->manu_pg10.OEMIdentifier && 2582 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2583 MFG10_GF0_R10_DISPLAY) && 2584 !(raid_device->num_pds % 2)) 2585 r_level = "RAID10"; 2586 else 2587 r_level = "RAID1E"; 2588 break; 2589 case MPI2_RAID_VOL_TYPE_RAID1: 2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2591 r_level = "RAID1"; 2592 break; 2593 case MPI2_RAID_VOL_TYPE_RAID10: 2594 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2595 r_level = "RAID10"; 2596 break; 2597 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2598 default: 2599 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2600 r_level = "RAIDX"; 2601 break; 2602 } 2603 2604 if (!ioc->hide_ir_msg) 2605 sdev_printk(KERN_INFO, sdev, 2606 "%s: handle(0x%04x), wwid(0x%016llx)," 2607 " pd_count(%d), type(%s)\n", 2608 r_level, raid_device->handle, 2609 (unsigned long long)raid_device->wwid, 2610 raid_device->num_pds, ds); 2611 2612 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2613 lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS; 2614 sdev_printk(KERN_INFO, sdev, 2615 "Set queue's max_sector to: %u\n", 2616 MPT3SAS_RAID_MAX_SECTORS); 2617 } 2618 2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2620 2621 /* raid transport support */ 2622 if (!ioc->is_warpdrive) 2623 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2624 return 0; 2625 } 2626 2627 /* non-raid handling */ 2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2629 if (mpt3sas_config_get_volume_handle(ioc, handle, 2630 &volume_handle)) { 2631 dfailprintk(ioc, 2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2633 __FILE__, __LINE__, __func__)); 2634 return 1; 2635 } 2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2637 volume_handle, &volume_wwid)) { 2638 dfailprintk(ioc, 2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2640 __FILE__, __LINE__, __func__)); 2641 return 1; 2642 } 2643 } 2644 2645 /* PCIe handling */ 2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2649 sas_device_priv_data->sas_target->sas_address); 2650 if (!pcie_device) { 2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2652 dfailprintk(ioc, 2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2654 __FILE__, __LINE__, __func__)); 2655 return 1; 2656 } 2657 2658 qdepth = ioc->max_nvme_qd; 2659 ds = "NVMe"; 2660 sdev_printk(KERN_INFO, sdev, 2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2662 ds, handle, (unsigned long long)pcie_device->wwid, 2663 pcie_device->port_num); 2664 if (pcie_device->enclosure_handle != 0) 2665 sdev_printk(KERN_INFO, sdev, 2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2667 ds, 2668 (unsigned long long)pcie_device->enclosure_logical_id, 2669 pcie_device->slot); 2670 if (pcie_device->connector_name[0] != '\0') 2671 sdev_printk(KERN_INFO, sdev, 2672 "%s: enclosure level(0x%04x)," 2673 "connector name( %s)\n", ds, 2674 pcie_device->enclosure_level, 2675 pcie_device->connector_name); 2676 2677 if (pcie_device->nvme_mdts) 2678 lim->max_hw_sectors = pcie_device->nvme_mdts / 512; 2679 2680 pcie_device_put(pcie_device); 2681 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2682 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2683 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2684 ** merged and can eliminate holes created during merging 2685 ** operation. 2686 **/ 2687 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2688 sdev->request_queue); 2689 lim->virt_boundary_mask = ioc->page_size - 1; 2690 return 0; 2691 } 2692 2693 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2694 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2695 sas_device_priv_data->sas_target->sas_address, 2696 sas_device_priv_data->sas_target->port); 2697 if (!sas_device) { 2698 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2699 dfailprintk(ioc, 2700 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2701 __FILE__, __LINE__, __func__)); 2702 return 1; 2703 } 2704 2705 sas_device->volume_handle = volume_handle; 2706 sas_device->volume_wwid = volume_wwid; 2707 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2708 qdepth = (sas_device->port_type > 1) ? 2709 ioc->max_wideport_qd : ioc->max_narrowport_qd; 2710 ssp_target = 1; 2711 if (sas_device->device_info & 2712 MPI2_SAS_DEVICE_INFO_SEP) { 2713 sdev_printk(KERN_WARNING, sdev, 2714 "set ignore_delay_remove for handle(0x%04x)\n", 2715 sas_device_priv_data->sas_target->handle); 2716 sas_device_priv_data->ignore_delay_remove = 1; 2717 ds = "SES"; 2718 } else 2719 ds = "SSP"; 2720 } else { 2721 qdepth = ioc->max_sata_qd; 2722 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2723 ds = "STP"; 2724 else if (sas_device->device_info & 2725 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2726 ds = "SATA"; 2727 } 2728 2729 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2730 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2731 ds, handle, (unsigned long long)sas_device->sas_address, 2732 sas_device->phy, (unsigned long long)sas_device->device_name); 2733 2734 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2735 2736 sas_device_put(sas_device); 2737 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2738 2739 if (!ssp_target) 2740 _scsih_display_sata_capabilities(ioc, handle, sdev); 2741 2742 2743 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2744 2745 if (ssp_target) { 2746 sas_read_port_mode_page(sdev); 2747 _scsih_enable_tlr(ioc, sdev); 2748 } 2749 2750 return 0; 2751 } 2752 2753 /** 2754 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2755 * @sdev: scsi device struct 2756 * @bdev: pointer to block device context 2757 * @capacity: device size (in 512 byte sectors) 2758 * @params: three element array to place output: 2759 * params[0] number of heads (max 255) 2760 * params[1] number of sectors (max 63) 2761 * params[2] number of cylinders 2762 */ 2763 static int 2764 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2765 sector_t capacity, int params[]) 2766 { 2767 int heads; 2768 int sectors; 2769 sector_t cylinders; 2770 ulong dummy; 2771 2772 heads = 64; 2773 sectors = 32; 2774 2775 dummy = heads * sectors; 2776 cylinders = capacity; 2777 sector_div(cylinders, dummy); 2778 2779 /* 2780 * Handle extended translation size for logical drives 2781 * > 1Gb 2782 */ 2783 if ((ulong)capacity >= 0x200000) { 2784 heads = 255; 2785 sectors = 63; 2786 dummy = heads * sectors; 2787 cylinders = capacity; 2788 sector_div(cylinders, dummy); 2789 } 2790 2791 /* return result */ 2792 params[0] = heads; 2793 params[1] = sectors; 2794 params[2] = cylinders; 2795 2796 return 0; 2797 } 2798 2799 /** 2800 * _scsih_response_code - translation of device response code 2801 * @ioc: per adapter object 2802 * @response_code: response code returned by the device 2803 */ 2804 static void 2805 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2806 { 2807 char *desc; 2808 2809 switch (response_code) { 2810 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2811 desc = "task management request completed"; 2812 break; 2813 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2814 desc = "invalid frame"; 2815 break; 2816 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2817 desc = "task management request not supported"; 2818 break; 2819 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2820 desc = "task management request failed"; 2821 break; 2822 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2823 desc = "task management request succeeded"; 2824 break; 2825 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2826 desc = "invalid lun"; 2827 break; 2828 case 0xA: 2829 desc = "overlapped tag attempted"; 2830 break; 2831 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2832 desc = "task queued, however not sent to target"; 2833 break; 2834 default: 2835 desc = "unknown"; 2836 break; 2837 } 2838 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2839 } 2840 2841 /** 2842 * _scsih_tm_done - tm completion routine 2843 * @ioc: per adapter object 2844 * @smid: system request message index 2845 * @msix_index: MSIX table index supplied by the OS 2846 * @reply: reply message frame(lower 32bit addr) 2847 * Context: none. 2848 * 2849 * The callback handler when using scsih_issue_tm. 2850 * 2851 * Return: 1 meaning mf should be freed from _base_interrupt 2852 * 0 means the mf is freed from this function. 2853 */ 2854 static u8 2855 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2856 { 2857 MPI2DefaultReply_t *mpi_reply; 2858 2859 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2860 return 1; 2861 if (ioc->tm_cmds.smid != smid) 2862 return 1; 2863 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2864 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2865 if (mpi_reply) { 2866 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2867 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2868 } 2869 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2870 complete(&ioc->tm_cmds.done); 2871 return 1; 2872 } 2873 2874 /** 2875 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2876 * @ioc: per adapter object 2877 * @handle: device handle 2878 * 2879 * During taskmangement request, we need to freeze the device queue. 2880 */ 2881 void 2882 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2883 { 2884 struct MPT3SAS_DEVICE *sas_device_priv_data; 2885 struct scsi_device *sdev; 2886 u8 skip = 0; 2887 2888 shost_for_each_device(sdev, ioc->shost) { 2889 if (skip) 2890 continue; 2891 sas_device_priv_data = sdev->hostdata; 2892 if (!sas_device_priv_data) 2893 continue; 2894 if (sas_device_priv_data->sas_target->handle == handle) { 2895 sas_device_priv_data->sas_target->tm_busy = 1; 2896 skip = 1; 2897 ioc->ignore_loginfos = 1; 2898 } 2899 } 2900 } 2901 2902 /** 2903 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2904 * @ioc: per adapter object 2905 * @handle: device handle 2906 * 2907 * During taskmangement request, we need to freeze the device queue. 2908 */ 2909 void 2910 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2911 { 2912 struct MPT3SAS_DEVICE *sas_device_priv_data; 2913 struct scsi_device *sdev; 2914 u8 skip = 0; 2915 2916 shost_for_each_device(sdev, ioc->shost) { 2917 if (skip) 2918 continue; 2919 sas_device_priv_data = sdev->hostdata; 2920 if (!sas_device_priv_data) 2921 continue; 2922 if (sas_device_priv_data->sas_target->handle == handle) { 2923 sas_device_priv_data->sas_target->tm_busy = 0; 2924 skip = 1; 2925 ioc->ignore_loginfos = 0; 2926 } 2927 } 2928 } 2929 2930 /** 2931 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status 2932 * @ioc: per adapter object 2933 * @channel: the channel assigned by the OS 2934 * @id: the id assigned by the OS 2935 * @lun: lun number 2936 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2937 * @smid_task: smid assigned to the task 2938 * 2939 * Look whether TM has aborted the timed out SCSI command, if 2940 * TM has aborted the IO then return SUCCESS else return FAILED. 2941 */ 2942 static int 2943 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, 2944 uint id, uint lun, u8 type, u16 smid_task) 2945 { 2946 2947 if (smid_task <= ioc->shost->can_queue) { 2948 switch (type) { 2949 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2950 if (!(_scsih_scsi_lookup_find_by_target(ioc, 2951 id, channel))) 2952 return SUCCESS; 2953 break; 2954 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 2955 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2956 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, 2957 lun, channel))) 2958 return SUCCESS; 2959 break; 2960 default: 2961 return SUCCESS; 2962 } 2963 } else if (smid_task == ioc->scsih_cmds.smid) { 2964 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || 2965 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) 2966 return SUCCESS; 2967 } else if (smid_task == ioc->ctl_cmds.smid) { 2968 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || 2969 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) 2970 return SUCCESS; 2971 } 2972 2973 return FAILED; 2974 } 2975 2976 /** 2977 * scsih_tm_post_processing - post processing of target & LUN reset 2978 * @ioc: per adapter object 2979 * @handle: device handle 2980 * @channel: the channel assigned by the OS 2981 * @id: the id assigned by the OS 2982 * @lun: lun number 2983 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2984 * @smid_task: smid assigned to the task 2985 * 2986 * Post processing of target & LUN reset. Due to interrupt latency 2987 * issue it possible that interrupt for aborted IO might not be 2988 * received yet. So before returning failure status, poll the 2989 * reply descriptor pools for the reply of timed out SCSI command. 2990 * Return FAILED status if reply for timed out is not received 2991 * otherwise return SUCCESS. 2992 */ 2993 static int 2994 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2995 uint channel, uint id, uint lun, u8 type, u16 smid_task) 2996 { 2997 int rc; 2998 2999 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3000 if (rc == SUCCESS) 3001 return rc; 3002 3003 ioc_info(ioc, 3004 "Poll ReplyDescriptor queues for completion of" 3005 " smid(%d), task_type(0x%02x), handle(0x%04x)\n", 3006 smid_task, type, handle); 3007 3008 /* 3009 * Due to interrupt latency issues, driver may receive interrupt for 3010 * TM first and then for aborted SCSI IO command. So, poll all the 3011 * ReplyDescriptor pools before returning the FAILED status to SML. 3012 */ 3013 mpt3sas_base_mask_interrupts(ioc); 3014 mpt3sas_base_sync_reply_irqs(ioc, 1); 3015 mpt3sas_base_unmask_interrupts(ioc); 3016 3017 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3018 } 3019 3020 /** 3021 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 3022 * @ioc: per adapter struct 3023 * @handle: device handle 3024 * @channel: the channel assigned by the OS 3025 * @id: the id assigned by the OS 3026 * @lun: lun number 3027 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 3028 * @smid_task: smid assigned to the task 3029 * @msix_task: MSIX table index supplied by the OS 3030 * @timeout: timeout in seconds 3031 * @tr_method: Target Reset Method 3032 * Context: user 3033 * 3034 * A generic API for sending task management requests to firmware. 3035 * 3036 * The callback index is set inside `ioc->tm_cb_idx`. 3037 * The caller is responsible to check for outstanding commands. 3038 * 3039 * Return: SUCCESS or FAILED. 3040 */ 3041 int 3042 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 3043 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, 3044 u8 timeout, u8 tr_method) 3045 { 3046 Mpi2SCSITaskManagementRequest_t *mpi_request; 3047 Mpi2SCSITaskManagementReply_t *mpi_reply; 3048 Mpi25SCSIIORequest_t *request; 3049 u16 smid = 0; 3050 u32 ioc_state; 3051 int rc; 3052 u8 issue_reset = 0; 3053 3054 lockdep_assert_held(&ioc->tm_cmds.mutex); 3055 3056 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 3057 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 3058 return FAILED; 3059 } 3060 3061 if (ioc->shost_recovery || ioc->remove_host || 3062 ioc->pci_error_recovery) { 3063 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 3064 return FAILED; 3065 } 3066 3067 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3068 if (ioc_state & MPI2_DOORBELL_USED) { 3069 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 3070 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3071 return (!rc) ? SUCCESS : FAILED; 3072 } 3073 3074 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3075 mpt3sas_print_fault_code(ioc, ioc_state & 3076 MPI2_DOORBELL_DATA_MASK); 3077 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3078 return (!rc) ? SUCCESS : FAILED; 3079 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3080 MPI2_IOC_STATE_COREDUMP) { 3081 mpt3sas_print_coredump_info(ioc, ioc_state & 3082 MPI2_DOORBELL_DATA_MASK); 3083 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3084 return (!rc) ? SUCCESS : FAILED; 3085 } 3086 3087 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 3088 if (!smid) { 3089 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 3090 return FAILED; 3091 } 3092 3093 dtmprintk(ioc, 3094 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 3095 handle, type, smid_task, timeout, tr_method)); 3096 ioc->tm_cmds.status = MPT3_CMD_PENDING; 3097 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3098 ioc->tm_cmds.smid = smid; 3099 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3100 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 3101 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3102 mpi_request->DevHandle = cpu_to_le16(handle); 3103 mpi_request->TaskType = type; 3104 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 3105 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3106 mpi_request->MsgFlags = tr_method; 3107 mpi_request->TaskMID = cpu_to_le16(smid_task); 3108 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 3109 mpt3sas_scsih_set_tm_flag(ioc, handle); 3110 init_completion(&ioc->tm_cmds.done); 3111 ioc->put_smid_hi_priority(ioc, smid, msix_task); 3112 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 3113 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 3114 mpt3sas_check_cmd_timeout(ioc, 3115 ioc->tm_cmds.status, mpi_request, 3116 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); 3117 if (issue_reset) { 3118 rc = mpt3sas_base_hard_reset_handler(ioc, 3119 FORCE_BIG_HAMMER); 3120 rc = (!rc) ? SUCCESS : FAILED; 3121 goto out; 3122 } 3123 } 3124 3125 /* sync IRQs in case those were busy during flush. */ 3126 mpt3sas_base_sync_reply_irqs(ioc, 0); 3127 3128 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 3129 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3130 mpi_reply = ioc->tm_cmds.reply; 3131 dtmprintk(ioc, 3132 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 3133 le16_to_cpu(mpi_reply->IOCStatus), 3134 le32_to_cpu(mpi_reply->IOCLogInfo), 3135 le32_to_cpu(mpi_reply->TerminationCount))); 3136 if (ioc->logging_level & MPT_DEBUG_TM) { 3137 _scsih_response_code(ioc, mpi_reply->ResponseCode); 3138 if (mpi_reply->IOCStatus) 3139 _debug_dump_mf(mpi_request, 3140 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 3141 } 3142 } 3143 3144 switch (type) { 3145 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3146 rc = SUCCESS; 3147 /* 3148 * If DevHandle filed in smid_task's entry of request pool 3149 * doesn't match with device handle on which this task abort 3150 * TM is received then it means that TM has successfully 3151 * aborted the timed out command. Since smid_task's entry in 3152 * request pool will be memset to zero once the timed out 3153 * command is returned to the SML. If the command is not 3154 * aborted then smid_task’s entry won’t be cleared and it 3155 * will have same DevHandle value on which this task abort TM 3156 * is received and driver will return the TM status as FAILED. 3157 */ 3158 request = mpt3sas_base_get_msg_frame(ioc, smid_task); 3159 if (le16_to_cpu(request->DevHandle) != handle) 3160 break; 3161 3162 ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," 3163 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", 3164 handle, timeout, tr_method, smid_task, msix_task); 3165 rc = FAILED; 3166 break; 3167 3168 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3169 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3170 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3171 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, 3172 type, smid_task); 3173 break; 3174 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3175 rc = SUCCESS; 3176 break; 3177 default: 3178 rc = FAILED; 3179 break; 3180 } 3181 3182 out: 3183 mpt3sas_scsih_clear_tm_flag(ioc, handle); 3184 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 3185 return rc; 3186 } 3187 3188 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 3189 uint channel, uint id, u64 lun, u8 type, u16 smid_task, 3190 u16 msix_task, u8 timeout, u8 tr_method) 3191 { 3192 int ret; 3193 3194 mutex_lock(&ioc->tm_cmds.mutex); 3195 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 3196 smid_task, msix_task, timeout, tr_method); 3197 mutex_unlock(&ioc->tm_cmds.mutex); 3198 3199 return ret; 3200 } 3201 3202 /** 3203 * _scsih_tm_display_info - displays info about the device 3204 * @ioc: per adapter struct 3205 * @scmd: pointer to scsi command object 3206 * 3207 * Called by task management callback handlers. 3208 */ 3209 static void 3210 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 3211 { 3212 struct scsi_target *starget = scmd->device->sdev_target; 3213 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 3214 struct _sas_device *sas_device = NULL; 3215 struct _pcie_device *pcie_device = NULL; 3216 unsigned long flags; 3217 char *device_str = NULL; 3218 3219 if (!priv_target) 3220 return; 3221 if (ioc->hide_ir_msg) 3222 device_str = "WarpDrive"; 3223 else 3224 device_str = "volume"; 3225 3226 scsi_print_command(scmd); 3227 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3228 starget_printk(KERN_INFO, starget, 3229 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 3230 device_str, priv_target->handle, 3231 device_str, (unsigned long long)priv_target->sas_address); 3232 3233 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 3234 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3235 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 3236 if (pcie_device) { 3237 starget_printk(KERN_INFO, starget, 3238 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 3239 pcie_device->handle, 3240 (unsigned long long)pcie_device->wwid, 3241 pcie_device->port_num); 3242 if (pcie_device->enclosure_handle != 0) 3243 starget_printk(KERN_INFO, starget, 3244 "enclosure logical id(0x%016llx), slot(%d)\n", 3245 (unsigned long long) 3246 pcie_device->enclosure_logical_id, 3247 pcie_device->slot); 3248 if (pcie_device->connector_name[0] != '\0') 3249 starget_printk(KERN_INFO, starget, 3250 "enclosure level(0x%04x), connector name( %s)\n", 3251 pcie_device->enclosure_level, 3252 pcie_device->connector_name); 3253 pcie_device_put(pcie_device); 3254 } 3255 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3256 3257 } else { 3258 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3259 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 3260 if (sas_device) { 3261 if (priv_target->flags & 3262 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3263 starget_printk(KERN_INFO, starget, 3264 "volume handle(0x%04x), " 3265 "volume wwid(0x%016llx)\n", 3266 sas_device->volume_handle, 3267 (unsigned long long)sas_device->volume_wwid); 3268 } 3269 starget_printk(KERN_INFO, starget, 3270 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 3271 sas_device->handle, 3272 (unsigned long long)sas_device->sas_address, 3273 sas_device->phy); 3274 3275 _scsih_display_enclosure_chassis_info(NULL, sas_device, 3276 NULL, starget); 3277 3278 sas_device_put(sas_device); 3279 } 3280 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3281 } 3282 } 3283 3284 /** 3285 * scsih_abort - eh threads main abort routine 3286 * @scmd: pointer to scsi command object 3287 * 3288 * Return: SUCCESS if command aborted else FAILED 3289 */ 3290 static int 3291 scsih_abort(struct scsi_cmnd *scmd) 3292 { 3293 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3294 struct MPT3SAS_DEVICE *sas_device_priv_data; 3295 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 3296 u16 handle; 3297 int r; 3298 3299 u8 timeout = 30; 3300 struct _pcie_device *pcie_device = NULL; 3301 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" 3302 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", 3303 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), 3304 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); 3305 _scsih_tm_display_info(ioc, scmd); 3306 3307 sas_device_priv_data = scmd->device->hostdata; 3308 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3309 ioc->remove_host) { 3310 sdev_printk(KERN_INFO, scmd->device, 3311 "device been deleted! scmd(0x%p)\n", scmd); 3312 scmd->result = DID_NO_CONNECT << 16; 3313 scsi_done(scmd); 3314 r = SUCCESS; 3315 goto out; 3316 } 3317 3318 /* check for completed command */ 3319 if (st == NULL || st->cb_idx == 0xFF) { 3320 sdev_printk(KERN_INFO, scmd->device, "No reference found at " 3321 "driver, assuming scmd(0x%p) might have completed\n", scmd); 3322 scmd->result = DID_RESET << 16; 3323 r = SUCCESS; 3324 goto out; 3325 } 3326 3327 /* for hidden raid components and volumes this is not supported */ 3328 if (sas_device_priv_data->sas_target->flags & 3329 MPT_TARGET_FLAGS_RAID_COMPONENT || 3330 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3331 scmd->result = DID_RESET << 16; 3332 r = FAILED; 3333 goto out; 3334 } 3335 3336 mpt3sas_halt_firmware(ioc); 3337 3338 handle = sas_device_priv_data->sas_target->handle; 3339 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3340 if (pcie_device && (!ioc->tm_custom_handling) && 3341 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) 3342 timeout = ioc->nvme_abort_timeout; 3343 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3344 scmd->device->id, scmd->device->lun, 3345 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3346 st->smid, st->msix_io, timeout, 0); 3347 /* Command must be cleared after abort */ 3348 if (r == SUCCESS && st->cb_idx != 0xFF) 3349 r = FAILED; 3350 out: 3351 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", 3352 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3353 if (pcie_device) 3354 pcie_device_put(pcie_device); 3355 return r; 3356 } 3357 3358 /** 3359 * scsih_dev_reset - eh threads main device reset routine 3360 * @scmd: pointer to scsi command object 3361 * 3362 * Return: SUCCESS if command aborted else FAILED 3363 */ 3364 static int 3365 scsih_dev_reset(struct scsi_cmnd *scmd) 3366 { 3367 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3368 struct MPT3SAS_DEVICE *sas_device_priv_data; 3369 struct _sas_device *sas_device = NULL; 3370 struct _pcie_device *pcie_device = NULL; 3371 u16 handle; 3372 u8 tr_method = 0; 3373 u8 tr_timeout = 30; 3374 int r; 3375 3376 struct scsi_target *starget = scmd->device->sdev_target; 3377 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3378 3379 sdev_printk(KERN_INFO, scmd->device, 3380 "attempting device reset! scmd(0x%p)\n", scmd); 3381 _scsih_tm_display_info(ioc, scmd); 3382 3383 sas_device_priv_data = scmd->device->hostdata; 3384 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3385 ioc->remove_host) { 3386 sdev_printk(KERN_INFO, scmd->device, 3387 "device been deleted! scmd(0x%p)\n", scmd); 3388 scmd->result = DID_NO_CONNECT << 16; 3389 scsi_done(scmd); 3390 r = SUCCESS; 3391 goto out; 3392 } 3393 3394 /* for hidden raid components obtain the volume_handle */ 3395 handle = 0; 3396 if (sas_device_priv_data->sas_target->flags & 3397 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3398 sas_device = mpt3sas_get_sdev_from_target(ioc, 3399 target_priv_data); 3400 if (sas_device) 3401 handle = sas_device->volume_handle; 3402 } else 3403 handle = sas_device_priv_data->sas_target->handle; 3404 3405 if (!handle) { 3406 scmd->result = DID_RESET << 16; 3407 r = FAILED; 3408 goto out; 3409 } 3410 3411 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3412 3413 if (pcie_device && (!ioc->tm_custom_handling) && 3414 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3415 tr_timeout = pcie_device->reset_timeout; 3416 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3417 } else 3418 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3419 3420 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3421 scmd->device->id, scmd->device->lun, 3422 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 3423 tr_timeout, tr_method); 3424 /* Check for busy commands after reset */ 3425 if (r == SUCCESS && scsi_device_busy(scmd->device)) 3426 r = FAILED; 3427 out: 3428 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", 3429 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3430 3431 if (sas_device) 3432 sas_device_put(sas_device); 3433 if (pcie_device) 3434 pcie_device_put(pcie_device); 3435 3436 return r; 3437 } 3438 3439 /** 3440 * scsih_target_reset - eh threads main target reset routine 3441 * @scmd: pointer to scsi command object 3442 * 3443 * Return: SUCCESS if command aborted else FAILED 3444 */ 3445 static int 3446 scsih_target_reset(struct scsi_cmnd *scmd) 3447 { 3448 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3449 struct MPT3SAS_DEVICE *sas_device_priv_data; 3450 struct _sas_device *sas_device = NULL; 3451 struct _pcie_device *pcie_device = NULL; 3452 u16 handle; 3453 u8 tr_method = 0; 3454 u8 tr_timeout = 30; 3455 int r; 3456 struct scsi_target *starget = scmd->device->sdev_target; 3457 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3458 3459 starget_printk(KERN_INFO, starget, 3460 "attempting target reset! scmd(0x%p)\n", scmd); 3461 _scsih_tm_display_info(ioc, scmd); 3462 3463 sas_device_priv_data = scmd->device->hostdata; 3464 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3465 ioc->remove_host) { 3466 starget_printk(KERN_INFO, starget, 3467 "target been deleted! scmd(0x%p)\n", scmd); 3468 scmd->result = DID_NO_CONNECT << 16; 3469 scsi_done(scmd); 3470 r = SUCCESS; 3471 goto out; 3472 } 3473 3474 /* for hidden raid components obtain the volume_handle */ 3475 handle = 0; 3476 if (sas_device_priv_data->sas_target->flags & 3477 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3478 sas_device = mpt3sas_get_sdev_from_target(ioc, 3479 target_priv_data); 3480 if (sas_device) 3481 handle = sas_device->volume_handle; 3482 } else 3483 handle = sas_device_priv_data->sas_target->handle; 3484 3485 if (!handle) { 3486 scmd->result = DID_RESET << 16; 3487 r = FAILED; 3488 goto out; 3489 } 3490 3491 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3492 3493 if (pcie_device && (!ioc->tm_custom_handling) && 3494 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3495 tr_timeout = pcie_device->reset_timeout; 3496 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3497 } else 3498 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3499 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3500 scmd->device->id, 0, 3501 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3502 tr_timeout, tr_method); 3503 /* Check for busy commands after reset */ 3504 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3505 r = FAILED; 3506 out: 3507 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", 3508 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3509 3510 if (sas_device) 3511 sas_device_put(sas_device); 3512 if (pcie_device) 3513 pcie_device_put(pcie_device); 3514 return r; 3515 } 3516 3517 3518 /** 3519 * scsih_host_reset - eh threads main host reset routine 3520 * @scmd: pointer to scsi command object 3521 * 3522 * Return: SUCCESS if command aborted else FAILED 3523 */ 3524 static int 3525 scsih_host_reset(struct scsi_cmnd *scmd) 3526 { 3527 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3528 int r, retval; 3529 3530 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); 3531 scsi_print_command(scmd); 3532 3533 if (ioc->is_driver_loading || ioc->remove_host) { 3534 ioc_info(ioc, "Blocking the host reset\n"); 3535 r = FAILED; 3536 goto out; 3537 } 3538 3539 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3540 r = (retval < 0) ? FAILED : SUCCESS; 3541 out: 3542 ioc_info(ioc, "host reset: %s scmd(0x%p)\n", 3543 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3544 3545 return r; 3546 } 3547 3548 /** 3549 * _scsih_fw_event_add - insert and queue up fw_event 3550 * @ioc: per adapter object 3551 * @fw_event: object describing the event 3552 * Context: This function will acquire ioc->fw_event_lock. 3553 * 3554 * This adds the firmware event object into link list, then queues it up to 3555 * be processed from user context. 3556 */ 3557 static void 3558 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3559 { 3560 unsigned long flags; 3561 3562 if (ioc->firmware_event_thread == NULL) 3563 return; 3564 3565 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3566 fw_event_work_get(fw_event); 3567 INIT_LIST_HEAD(&fw_event->list); 3568 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3569 INIT_WORK(&fw_event->work, _firmware_event_work); 3570 fw_event_work_get(fw_event); 3571 queue_work(ioc->firmware_event_thread, &fw_event->work); 3572 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3573 } 3574 3575 /** 3576 * _scsih_fw_event_del_from_list - delete fw_event from the list 3577 * @ioc: per adapter object 3578 * @fw_event: object describing the event 3579 * Context: This function will acquire ioc->fw_event_lock. 3580 * 3581 * If the fw_event is on the fw_event_list, remove it and do a put. 3582 */ 3583 static void 3584 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3585 *fw_event) 3586 { 3587 unsigned long flags; 3588 3589 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3590 if (!list_empty(&fw_event->list)) { 3591 list_del_init(&fw_event->list); 3592 fw_event_work_put(fw_event); 3593 } 3594 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3595 } 3596 3597 3598 /** 3599 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3600 * @ioc: per adapter object 3601 * @event_data: trigger event data 3602 */ 3603 void 3604 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3605 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3606 { 3607 struct fw_event_work *fw_event; 3608 u16 sz; 3609 3610 if (ioc->is_driver_loading) 3611 return; 3612 sz = sizeof(*event_data); 3613 fw_event = alloc_fw_event_work(sz); 3614 if (!fw_event) 3615 return; 3616 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3617 fw_event->ioc = ioc; 3618 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3619 _scsih_fw_event_add(ioc, fw_event); 3620 fw_event_work_put(fw_event); 3621 } 3622 3623 /** 3624 * _scsih_error_recovery_delete_devices - remove devices not responding 3625 * @ioc: per adapter object 3626 */ 3627 static void 3628 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3629 { 3630 struct fw_event_work *fw_event; 3631 3632 fw_event = alloc_fw_event_work(0); 3633 if (!fw_event) 3634 return; 3635 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3636 fw_event->ioc = ioc; 3637 _scsih_fw_event_add(ioc, fw_event); 3638 fw_event_work_put(fw_event); 3639 } 3640 3641 /** 3642 * mpt3sas_port_enable_complete - port enable completed (fake event) 3643 * @ioc: per adapter object 3644 */ 3645 void 3646 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3647 { 3648 struct fw_event_work *fw_event; 3649 3650 fw_event = alloc_fw_event_work(0); 3651 if (!fw_event) 3652 return; 3653 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3654 fw_event->ioc = ioc; 3655 _scsih_fw_event_add(ioc, fw_event); 3656 fw_event_work_put(fw_event); 3657 } 3658 3659 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3660 { 3661 unsigned long flags; 3662 struct fw_event_work *fw_event = NULL; 3663 3664 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3665 if (!list_empty(&ioc->fw_event_list)) { 3666 fw_event = list_first_entry(&ioc->fw_event_list, 3667 struct fw_event_work, list); 3668 list_del_init(&fw_event->list); 3669 fw_event_work_put(fw_event); 3670 } 3671 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3672 3673 return fw_event; 3674 } 3675 3676 /** 3677 * _scsih_fw_event_cleanup_queue - cleanup event queue 3678 * @ioc: per adapter object 3679 * 3680 * Walk the firmware event queue, either killing timers, or waiting 3681 * for outstanding events to complete 3682 * 3683 * Context: task, can sleep 3684 */ 3685 static void 3686 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3687 { 3688 struct fw_event_work *fw_event; 3689 3690 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || 3691 !ioc->firmware_event_thread) 3692 return; 3693 /* 3694 * Set current running event as ignore, so that 3695 * current running event will exit quickly. 3696 * As diag reset has occurred it is of no use 3697 * to process remaining stale event data entries. 3698 */ 3699 if (ioc->shost_recovery && ioc->current_event) 3700 ioc->current_event->ignore = 1; 3701 3702 ioc->fw_events_cleanup = 1; 3703 while ((fw_event = dequeue_next_fw_event(ioc)) || 3704 (fw_event = ioc->current_event)) { 3705 3706 /* 3707 * Don't call cancel_work_sync() for current_event 3708 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3709 * otherwise we may observe deadlock if current 3710 * hard reset issued as part of processing the current_event. 3711 * 3712 * Orginal logic of cleaning the current_event is added 3713 * for handling the back to back host reset issued by the user. 3714 * i.e. during back to back host reset, driver use to process 3715 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES 3716 * event back to back and this made the drives to unregister 3717 * the devices from SML. 3718 */ 3719 3720 if (fw_event == ioc->current_event && 3721 ioc->current_event->event != 3722 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { 3723 ioc->current_event = NULL; 3724 continue; 3725 } 3726 3727 /* 3728 * Driver has to clear ioc->start_scan flag when 3729 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, 3730 * otherwise scsi_scan_host() API waits for the 3731 * 5 minute timer to expire. If we exit from 3732 * scsi_scan_host() early then we can issue the 3733 * new port enable request as part of current diag reset. 3734 */ 3735 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { 3736 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 3737 ioc->start_scan = 0; 3738 } 3739 3740 /* 3741 * Wait on the fw_event to complete. If this returns 1, then 3742 * the event was never executed, and we need a put for the 3743 * reference the work had on the fw_event. 3744 * 3745 * If it did execute, we wait for it to finish, and the put will 3746 * happen from _firmware_event_work() 3747 */ 3748 if (cancel_work_sync(&fw_event->work)) 3749 fw_event_work_put(fw_event); 3750 3751 } 3752 ioc->fw_events_cleanup = 0; 3753 } 3754 3755 /** 3756 * _scsih_internal_device_block - block the sdev device 3757 * @sdev: per device object 3758 * @sas_device_priv_data : per device driver private data 3759 * 3760 * make sure device is blocked without error, if not 3761 * print an error 3762 */ 3763 static void 3764 _scsih_internal_device_block(struct scsi_device *sdev, 3765 struct MPT3SAS_DEVICE *sas_device_priv_data) 3766 { 3767 int r = 0; 3768 3769 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3770 sas_device_priv_data->sas_target->handle); 3771 sas_device_priv_data->block = 1; 3772 3773 r = scsi_internal_device_block_nowait(sdev); 3774 if (r == -EINVAL) 3775 sdev_printk(KERN_WARNING, sdev, 3776 "device_block failed with return(%d) for handle(0x%04x)\n", 3777 r, sas_device_priv_data->sas_target->handle); 3778 } 3779 3780 /** 3781 * _scsih_internal_device_unblock - unblock the sdev device 3782 * @sdev: per device object 3783 * @sas_device_priv_data : per device driver private data 3784 * make sure device is unblocked without error, if not retry 3785 * by blocking and then unblocking 3786 */ 3787 3788 static void 3789 _scsih_internal_device_unblock(struct scsi_device *sdev, 3790 struct MPT3SAS_DEVICE *sas_device_priv_data) 3791 { 3792 int r = 0; 3793 3794 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3795 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3796 sas_device_priv_data->block = 0; 3797 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3798 if (r == -EINVAL) { 3799 /* The device has been set to SDEV_RUNNING by SD layer during 3800 * device addition but the request queue is still stopped by 3801 * our earlier block call. We need to perform a block again 3802 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3803 3804 sdev_printk(KERN_WARNING, sdev, 3805 "device_unblock failed with return(%d) for handle(0x%04x) " 3806 "performing a block followed by an unblock\n", 3807 r, sas_device_priv_data->sas_target->handle); 3808 sas_device_priv_data->block = 1; 3809 r = scsi_internal_device_block_nowait(sdev); 3810 if (r) 3811 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3812 "failed with return(%d) for handle(0x%04x)\n", 3813 r, sas_device_priv_data->sas_target->handle); 3814 3815 sas_device_priv_data->block = 0; 3816 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3817 if (r) 3818 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3819 " failed with return(%d) for handle(0x%04x)\n", 3820 r, sas_device_priv_data->sas_target->handle); 3821 } 3822 } 3823 3824 /** 3825 * _scsih_ublock_io_all_device - unblock every device 3826 * @ioc: per adapter object 3827 * 3828 * change the device state from block to running 3829 */ 3830 static void 3831 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3832 { 3833 struct MPT3SAS_DEVICE *sas_device_priv_data; 3834 struct scsi_device *sdev; 3835 3836 shost_for_each_device(sdev, ioc->shost) { 3837 sas_device_priv_data = sdev->hostdata; 3838 if (!sas_device_priv_data) 3839 continue; 3840 if (!sas_device_priv_data->block) 3841 continue; 3842 3843 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3844 "device_running, handle(0x%04x)\n", 3845 sas_device_priv_data->sas_target->handle)); 3846 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3847 } 3848 } 3849 3850 3851 /** 3852 * _scsih_ublock_io_device - prepare device to be deleted 3853 * @ioc: per adapter object 3854 * @sas_address: sas address 3855 * @port: hba port entry 3856 * 3857 * unblock then put device in offline state 3858 */ 3859 static void 3860 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, 3861 u64 sas_address, struct hba_port *port) 3862 { 3863 struct MPT3SAS_DEVICE *sas_device_priv_data; 3864 struct scsi_device *sdev; 3865 3866 shost_for_each_device(sdev, ioc->shost) { 3867 sas_device_priv_data = sdev->hostdata; 3868 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 3869 continue; 3870 if (sas_device_priv_data->sas_target->sas_address 3871 != sas_address) 3872 continue; 3873 if (sas_device_priv_data->sas_target->port != port) 3874 continue; 3875 if (sas_device_priv_data->block) 3876 _scsih_internal_device_unblock(sdev, 3877 sas_device_priv_data); 3878 } 3879 } 3880 3881 /** 3882 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3883 * @ioc: per adapter object 3884 * 3885 * During device pull we need to appropriately set the sdev state. 3886 */ 3887 static void 3888 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3889 { 3890 struct MPT3SAS_DEVICE *sas_device_priv_data; 3891 struct scsi_device *sdev; 3892 3893 shost_for_each_device(sdev, ioc->shost) { 3894 sas_device_priv_data = sdev->hostdata; 3895 if (!sas_device_priv_data) 3896 continue; 3897 if (sas_device_priv_data->block) 3898 continue; 3899 if (sas_device_priv_data->ignore_delay_remove) { 3900 sdev_printk(KERN_INFO, sdev, 3901 "%s skip device_block for SES handle(0x%04x)\n", 3902 __func__, sas_device_priv_data->sas_target->handle); 3903 continue; 3904 } 3905 _scsih_internal_device_block(sdev, sas_device_priv_data); 3906 } 3907 } 3908 3909 /** 3910 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3911 * @ioc: per adapter object 3912 * @handle: device handle 3913 * 3914 * During device pull we need to appropriately set the sdev state. 3915 */ 3916 static void 3917 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3918 { 3919 struct MPT3SAS_DEVICE *sas_device_priv_data; 3920 struct scsi_device *sdev; 3921 struct _sas_device *sas_device; 3922 3923 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3924 3925 shost_for_each_device(sdev, ioc->shost) { 3926 sas_device_priv_data = sdev->hostdata; 3927 if (!sas_device_priv_data) 3928 continue; 3929 if (sas_device_priv_data->sas_target->handle != handle) 3930 continue; 3931 if (sas_device_priv_data->block) 3932 continue; 3933 if (sas_device && sas_device->pend_sas_rphy_add) 3934 continue; 3935 if (sas_device_priv_data->ignore_delay_remove) { 3936 sdev_printk(KERN_INFO, sdev, 3937 "%s skip device_block for SES handle(0x%04x)\n", 3938 __func__, sas_device_priv_data->sas_target->handle); 3939 continue; 3940 } 3941 _scsih_internal_device_block(sdev, sas_device_priv_data); 3942 } 3943 3944 if (sas_device) 3945 sas_device_put(sas_device); 3946 } 3947 3948 /** 3949 * _scsih_block_io_to_children_attached_to_ex 3950 * @ioc: per adapter object 3951 * @sas_expander: the sas_device object 3952 * 3953 * This routine set sdev state to SDEV_BLOCK for all devices 3954 * attached to this expander. This function called when expander is 3955 * pulled. 3956 */ 3957 static void 3958 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3959 struct _sas_node *sas_expander) 3960 { 3961 struct _sas_port *mpt3sas_port; 3962 struct _sas_device *sas_device; 3963 struct _sas_node *expander_sibling; 3964 unsigned long flags; 3965 3966 if (!sas_expander) 3967 return; 3968 3969 list_for_each_entry(mpt3sas_port, 3970 &sas_expander->sas_port_list, port_list) { 3971 if (mpt3sas_port->remote_identify.device_type == 3972 SAS_END_DEVICE) { 3973 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3974 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3975 mpt3sas_port->remote_identify.sas_address, 3976 mpt3sas_port->hba_port); 3977 if (sas_device) { 3978 set_bit(sas_device->handle, 3979 ioc->blocking_handles); 3980 sas_device_put(sas_device); 3981 } 3982 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3983 } 3984 } 3985 3986 list_for_each_entry(mpt3sas_port, 3987 &sas_expander->sas_port_list, port_list) { 3988 3989 if (mpt3sas_port->remote_identify.device_type == 3990 SAS_EDGE_EXPANDER_DEVICE || 3991 mpt3sas_port->remote_identify.device_type == 3992 SAS_FANOUT_EXPANDER_DEVICE) { 3993 expander_sibling = 3994 mpt3sas_scsih_expander_find_by_sas_address( 3995 ioc, mpt3sas_port->remote_identify.sas_address, 3996 mpt3sas_port->hba_port); 3997 _scsih_block_io_to_children_attached_to_ex(ioc, 3998 expander_sibling); 3999 } 4000 } 4001 } 4002 4003 /** 4004 * _scsih_block_io_to_children_attached_directly 4005 * @ioc: per adapter object 4006 * @event_data: topology change event data 4007 * 4008 * This routine set sdev state to SDEV_BLOCK for all devices 4009 * direct attached during device pull. 4010 */ 4011 static void 4012 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4013 Mpi2EventDataSasTopologyChangeList_t *event_data) 4014 { 4015 int i; 4016 u16 handle; 4017 u16 reason_code; 4018 4019 for (i = 0; i < event_data->NumEntries; i++) { 4020 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4021 if (!handle) 4022 continue; 4023 reason_code = event_data->PHY[i].PhyStatus & 4024 MPI2_EVENT_SAS_TOPO_RC_MASK; 4025 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 4026 _scsih_block_io_device(ioc, handle); 4027 } 4028 } 4029 4030 /** 4031 * _scsih_block_io_to_pcie_children_attached_directly 4032 * @ioc: per adapter object 4033 * @event_data: topology change event data 4034 * 4035 * This routine set sdev state to SDEV_BLOCK for all devices 4036 * direct attached during device pull/reconnect. 4037 */ 4038 static void 4039 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4040 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4041 { 4042 int i; 4043 u16 handle; 4044 u16 reason_code; 4045 4046 for (i = 0; i < event_data->NumEntries; i++) { 4047 handle = 4048 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4049 if (!handle) 4050 continue; 4051 reason_code = event_data->PortEntry[i].PortStatus; 4052 if (reason_code == 4053 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 4054 _scsih_block_io_device(ioc, handle); 4055 } 4056 } 4057 /** 4058 * _scsih_tm_tr_send - send task management request 4059 * @ioc: per adapter object 4060 * @handle: device handle 4061 * Context: interrupt time. 4062 * 4063 * This code is to initiate the device removal handshake protocol 4064 * with controller firmware. This function will issue target reset 4065 * using high priority request queue. It will send a sas iounit 4066 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 4067 * 4068 * This is designed to send muliple task management request at the same 4069 * time to the fifo. If the fifo is full, we will append the request, 4070 * and process it in a future completion. 4071 */ 4072 static void 4073 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4074 { 4075 Mpi2SCSITaskManagementRequest_t *mpi_request; 4076 u16 smid; 4077 struct _sas_device *sas_device = NULL; 4078 struct _pcie_device *pcie_device = NULL; 4079 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 4080 u64 sas_address = 0; 4081 unsigned long flags; 4082 struct _tr_list *delayed_tr; 4083 u32 ioc_state; 4084 u8 tr_method = 0; 4085 struct hba_port *port = NULL; 4086 4087 if (ioc->pci_error_recovery) { 4088 dewtprintk(ioc, 4089 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 4090 __func__, handle)); 4091 return; 4092 } 4093 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4094 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4095 dewtprintk(ioc, 4096 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 4097 __func__, handle)); 4098 return; 4099 } 4100 4101 /* if PD, then return */ 4102 if (test_bit(handle, ioc->pd_handles)) 4103 return; 4104 4105 clear_bit(handle, ioc->pend_os_device_add); 4106 4107 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4108 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 4109 if (sas_device && sas_device->starget && 4110 sas_device->starget->hostdata) { 4111 sas_target_priv_data = sas_device->starget->hostdata; 4112 sas_target_priv_data->deleted = 1; 4113 sas_address = sas_device->sas_address; 4114 port = sas_device->port; 4115 } 4116 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4117 if (!sas_device) { 4118 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 4119 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 4120 if (pcie_device && pcie_device->starget && 4121 pcie_device->starget->hostdata) { 4122 sas_target_priv_data = pcie_device->starget->hostdata; 4123 sas_target_priv_data->deleted = 1; 4124 sas_address = pcie_device->wwid; 4125 } 4126 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 4127 if (pcie_device && (!ioc->tm_custom_handling) && 4128 (!(mpt3sas_scsih_is_pcie_scsi_device( 4129 pcie_device->device_info)))) 4130 tr_method = 4131 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 4132 else 4133 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 4134 } 4135 if (sas_target_priv_data) { 4136 dewtprintk(ioc, 4137 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 4138 handle, (u64)sas_address)); 4139 if (sas_device) { 4140 if (sas_device->enclosure_handle != 0) 4141 dewtprintk(ioc, 4142 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 4143 (u64)sas_device->enclosure_logical_id, 4144 sas_device->slot)); 4145 if (sas_device->connector_name[0] != '\0') 4146 dewtprintk(ioc, 4147 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 4148 sas_device->enclosure_level, 4149 sas_device->connector_name)); 4150 } else if (pcie_device) { 4151 if (pcie_device->enclosure_handle != 0) 4152 dewtprintk(ioc, 4153 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 4154 (u64)pcie_device->enclosure_logical_id, 4155 pcie_device->slot)); 4156 if (pcie_device->connector_name[0] != '\0') 4157 dewtprintk(ioc, 4158 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 4159 pcie_device->enclosure_level, 4160 pcie_device->connector_name)); 4161 } 4162 _scsih_ublock_io_device(ioc, sas_address, port); 4163 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 4164 } 4165 4166 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 4167 if (!smid) { 4168 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4169 if (!delayed_tr) 4170 goto out; 4171 INIT_LIST_HEAD(&delayed_tr->list); 4172 delayed_tr->handle = handle; 4173 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4174 dewtprintk(ioc, 4175 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4176 handle)); 4177 goto out; 4178 } 4179 4180 dewtprintk(ioc, 4181 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4182 handle, smid, ioc->tm_tr_cb_idx)); 4183 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4184 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4185 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4186 mpi_request->DevHandle = cpu_to_le16(handle); 4187 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4188 mpi_request->MsgFlags = tr_method; 4189 set_bit(handle, ioc->device_remove_in_progress); 4190 ioc->put_smid_hi_priority(ioc, smid, 0); 4191 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 4192 4193 out: 4194 if (sas_device) 4195 sas_device_put(sas_device); 4196 if (pcie_device) 4197 pcie_device_put(pcie_device); 4198 } 4199 4200 /** 4201 * _scsih_tm_tr_complete - 4202 * @ioc: per adapter object 4203 * @smid: system request message index 4204 * @msix_index: MSIX table index supplied by the OS 4205 * @reply: reply message frame(lower 32bit addr) 4206 * Context: interrupt time. 4207 * 4208 * This is the target reset completion routine. 4209 * This code is part of the code to initiate the device removal 4210 * handshake protocol with controller firmware. 4211 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 4212 * 4213 * Return: 1 meaning mf should be freed from _base_interrupt 4214 * 0 means the mf is freed from this function. 4215 */ 4216 static u8 4217 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 4218 u32 reply) 4219 { 4220 u16 handle; 4221 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4222 Mpi2SCSITaskManagementReply_t *mpi_reply = 4223 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4224 Mpi2SasIoUnitControlRequest_t *mpi_request; 4225 u16 smid_sas_ctrl; 4226 u32 ioc_state; 4227 struct _sc_list *delayed_sc; 4228 4229 if (ioc->pci_error_recovery) { 4230 dewtprintk(ioc, 4231 ioc_info(ioc, "%s: host in pci error recovery\n", 4232 __func__)); 4233 return 1; 4234 } 4235 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4236 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4237 dewtprintk(ioc, 4238 ioc_info(ioc, "%s: host is not operational\n", 4239 __func__)); 4240 return 1; 4241 } 4242 if (unlikely(!mpi_reply)) { 4243 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4244 __FILE__, __LINE__, __func__); 4245 return 1; 4246 } 4247 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4248 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4249 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4250 dewtprintk(ioc, 4251 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4252 handle, 4253 le16_to_cpu(mpi_reply->DevHandle), smid)); 4254 return 0; 4255 } 4256 4257 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 4258 dewtprintk(ioc, 4259 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4260 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4261 le32_to_cpu(mpi_reply->IOCLogInfo), 4262 le32_to_cpu(mpi_reply->TerminationCount))); 4263 4264 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 4265 if (!smid_sas_ctrl) { 4266 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 4267 if (!delayed_sc) 4268 return _scsih_check_for_pending_tm(ioc, smid); 4269 INIT_LIST_HEAD(&delayed_sc->list); 4270 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 4271 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 4272 dewtprintk(ioc, 4273 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 4274 handle)); 4275 return _scsih_check_for_pending_tm(ioc, smid); 4276 } 4277 4278 dewtprintk(ioc, 4279 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4280 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 4281 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 4282 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4283 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4284 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4285 mpi_request->DevHandle = mpi_request_tm->DevHandle; 4286 ioc->put_smid_default(ioc, smid_sas_ctrl); 4287 4288 return _scsih_check_for_pending_tm(ioc, smid); 4289 } 4290 4291 /** _scsih_allow_scmd_to_device - check whether scmd needs to 4292 * issue to IOC or not. 4293 * @ioc: per adapter object 4294 * @scmd: pointer to scsi command object 4295 * 4296 * Returns true if scmd can be issued to IOC otherwise returns false. 4297 */ 4298 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, 4299 struct scsi_cmnd *scmd) 4300 { 4301 4302 if (ioc->pci_error_recovery) 4303 return false; 4304 4305 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { 4306 if (ioc->remove_host) 4307 return false; 4308 4309 return true; 4310 } 4311 4312 if (ioc->remove_host) { 4313 4314 switch (scmd->cmnd[0]) { 4315 case SYNCHRONIZE_CACHE: 4316 case START_STOP: 4317 return true; 4318 default: 4319 return false; 4320 } 4321 } 4322 4323 return true; 4324 } 4325 4326 /** 4327 * _scsih_sas_control_complete - completion routine 4328 * @ioc: per adapter object 4329 * @smid: system request message index 4330 * @msix_index: MSIX table index supplied by the OS 4331 * @reply: reply message frame(lower 32bit addr) 4332 * Context: interrupt time. 4333 * 4334 * This is the sas iounit control completion routine. 4335 * This code is part of the code to initiate the device removal 4336 * handshake protocol with controller firmware. 4337 * 4338 * Return: 1 meaning mf should be freed from _base_interrupt 4339 * 0 means the mf is freed from this function. 4340 */ 4341 static u8 4342 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4343 u8 msix_index, u32 reply) 4344 { 4345 Mpi2SasIoUnitControlReply_t *mpi_reply = 4346 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4347 4348 if (likely(mpi_reply)) { 4349 dewtprintk(ioc, 4350 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 4351 le16_to_cpu(mpi_reply->DevHandle), smid, 4352 le16_to_cpu(mpi_reply->IOCStatus), 4353 le32_to_cpu(mpi_reply->IOCLogInfo))); 4354 if (le16_to_cpu(mpi_reply->IOCStatus) == 4355 MPI2_IOCSTATUS_SUCCESS) { 4356 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 4357 ioc->device_remove_in_progress); 4358 } 4359 } else { 4360 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4361 __FILE__, __LINE__, __func__); 4362 } 4363 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 4364 } 4365 4366 /** 4367 * _scsih_tm_tr_volume_send - send target reset request for volumes 4368 * @ioc: per adapter object 4369 * @handle: device handle 4370 * Context: interrupt time. 4371 * 4372 * This is designed to send muliple task management request at the same 4373 * time to the fifo. If the fifo is full, we will append the request, 4374 * and process it in a future completion. 4375 */ 4376 static void 4377 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4378 { 4379 Mpi2SCSITaskManagementRequest_t *mpi_request; 4380 u16 smid; 4381 struct _tr_list *delayed_tr; 4382 4383 if (ioc->pci_error_recovery) { 4384 dewtprintk(ioc, 4385 ioc_info(ioc, "%s: host reset in progress!\n", 4386 __func__)); 4387 return; 4388 } 4389 4390 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 4391 if (!smid) { 4392 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4393 if (!delayed_tr) 4394 return; 4395 INIT_LIST_HEAD(&delayed_tr->list); 4396 delayed_tr->handle = handle; 4397 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 4398 dewtprintk(ioc, 4399 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4400 handle)); 4401 return; 4402 } 4403 4404 dewtprintk(ioc, 4405 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4406 handle, smid, ioc->tm_tr_volume_cb_idx)); 4407 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4408 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4409 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4410 mpi_request->DevHandle = cpu_to_le16(handle); 4411 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4412 ioc->put_smid_hi_priority(ioc, smid, 0); 4413 } 4414 4415 /** 4416 * _scsih_tm_volume_tr_complete - target reset completion 4417 * @ioc: per adapter object 4418 * @smid: system request message index 4419 * @msix_index: MSIX table index supplied by the OS 4420 * @reply: reply message frame(lower 32bit addr) 4421 * Context: interrupt time. 4422 * 4423 * Return: 1 meaning mf should be freed from _base_interrupt 4424 * 0 means the mf is freed from this function. 4425 */ 4426 static u8 4427 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4428 u8 msix_index, u32 reply) 4429 { 4430 u16 handle; 4431 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4432 Mpi2SCSITaskManagementReply_t *mpi_reply = 4433 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4434 4435 if (ioc->shost_recovery || ioc->pci_error_recovery) { 4436 dewtprintk(ioc, 4437 ioc_info(ioc, "%s: host reset in progress!\n", 4438 __func__)); 4439 return 1; 4440 } 4441 if (unlikely(!mpi_reply)) { 4442 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4443 __FILE__, __LINE__, __func__); 4444 return 1; 4445 } 4446 4447 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4448 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4449 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4450 dewtprintk(ioc, 4451 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4452 handle, le16_to_cpu(mpi_reply->DevHandle), 4453 smid)); 4454 return 0; 4455 } 4456 4457 dewtprintk(ioc, 4458 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4459 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4460 le32_to_cpu(mpi_reply->IOCLogInfo), 4461 le32_to_cpu(mpi_reply->TerminationCount))); 4462 4463 return _scsih_check_for_pending_tm(ioc, smid); 4464 } 4465 4466 /** 4467 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 4468 * @ioc: per adapter object 4469 * @smid: system request message index 4470 * @event: Event ID 4471 * @event_context: used to track events uniquely 4472 * 4473 * Context - processed in interrupt context. 4474 */ 4475 static void 4476 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 4477 U32 event_context) 4478 { 4479 Mpi2EventAckRequest_t *ack_request; 4480 int i = smid - ioc->internal_smid; 4481 unsigned long flags; 4482 4483 /* Without releasing the smid just update the 4484 * call back index and reuse the same smid for 4485 * processing this delayed request 4486 */ 4487 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4488 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 4489 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4490 4491 dewtprintk(ioc, 4492 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 4493 le16_to_cpu(event), smid, ioc->base_cb_idx)); 4494 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 4495 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 4496 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 4497 ack_request->Event = event; 4498 ack_request->EventContext = event_context; 4499 ack_request->VF_ID = 0; /* TODO */ 4500 ack_request->VP_ID = 0; 4501 ioc->put_smid_default(ioc, smid); 4502 } 4503 4504 /** 4505 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 4506 * sas_io_unit_ctrl messages 4507 * @ioc: per adapter object 4508 * @smid: system request message index 4509 * @handle: device handle 4510 * 4511 * Context - processed in interrupt context. 4512 */ 4513 static void 4514 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 4515 u16 smid, u16 handle) 4516 { 4517 Mpi2SasIoUnitControlRequest_t *mpi_request; 4518 u32 ioc_state; 4519 int i = smid - ioc->internal_smid; 4520 unsigned long flags; 4521 4522 if (ioc->remove_host) { 4523 dewtprintk(ioc, 4524 ioc_info(ioc, "%s: host has been removed\n", 4525 __func__)); 4526 return; 4527 } else if (ioc->pci_error_recovery) { 4528 dewtprintk(ioc, 4529 ioc_info(ioc, "%s: host in pci error recovery\n", 4530 __func__)); 4531 return; 4532 } 4533 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4534 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4535 dewtprintk(ioc, 4536 ioc_info(ioc, "%s: host is not operational\n", 4537 __func__)); 4538 return; 4539 } 4540 4541 /* Without releasing the smid just update the 4542 * call back index and reuse the same smid for 4543 * processing this delayed request 4544 */ 4545 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4546 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 4547 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4548 4549 dewtprintk(ioc, 4550 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4551 handle, smid, ioc->tm_sas_control_cb_idx)); 4552 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4553 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4554 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4555 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4556 mpi_request->DevHandle = cpu_to_le16(handle); 4557 ioc->put_smid_default(ioc, smid); 4558 } 4559 4560 /** 4561 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages 4562 * @ioc: per adapter object 4563 * @smid: system request message index 4564 * 4565 * Context: Executed in interrupt context 4566 * 4567 * This will check delayed internal messages list, and process the 4568 * next request. 4569 * 4570 * Return: 1 meaning mf should be freed from _base_interrupt 4571 * 0 means the mf is freed from this function. 4572 */ 4573 u8 4574 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4575 { 4576 struct _sc_list *delayed_sc; 4577 struct _event_ack_list *delayed_event_ack; 4578 4579 if (!list_empty(&ioc->delayed_event_ack_list)) { 4580 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4581 struct _event_ack_list, list); 4582 _scsih_issue_delayed_event_ack(ioc, smid, 4583 delayed_event_ack->Event, delayed_event_ack->EventContext); 4584 list_del(&delayed_event_ack->list); 4585 kfree(delayed_event_ack); 4586 return 0; 4587 } 4588 4589 if (!list_empty(&ioc->delayed_sc_list)) { 4590 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4591 struct _sc_list, list); 4592 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4593 delayed_sc->handle); 4594 list_del(&delayed_sc->list); 4595 kfree(delayed_sc); 4596 return 0; 4597 } 4598 return 1; 4599 } 4600 4601 /** 4602 * _scsih_check_for_pending_tm - check for pending task management 4603 * @ioc: per adapter object 4604 * @smid: system request message index 4605 * 4606 * This will check delayed target reset list, and feed the 4607 * next reqeust. 4608 * 4609 * Return: 1 meaning mf should be freed from _base_interrupt 4610 * 0 means the mf is freed from this function. 4611 */ 4612 static u8 4613 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4614 { 4615 struct _tr_list *delayed_tr; 4616 4617 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4618 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4619 struct _tr_list, list); 4620 mpt3sas_base_free_smid(ioc, smid); 4621 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4622 list_del(&delayed_tr->list); 4623 kfree(delayed_tr); 4624 return 0; 4625 } 4626 4627 if (!list_empty(&ioc->delayed_tr_list)) { 4628 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4629 struct _tr_list, list); 4630 mpt3sas_base_free_smid(ioc, smid); 4631 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4632 list_del(&delayed_tr->list); 4633 kfree(delayed_tr); 4634 return 0; 4635 } 4636 4637 return 1; 4638 } 4639 4640 /** 4641 * _scsih_check_topo_delete_events - sanity check on topo events 4642 * @ioc: per adapter object 4643 * @event_data: the event data payload 4644 * 4645 * This routine added to better handle cable breaker. 4646 * 4647 * This handles the case where driver receives multiple expander 4648 * add and delete events in a single shot. When there is a delete event 4649 * the routine will void any pending add events waiting in the event queue. 4650 */ 4651 static void 4652 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4653 Mpi2EventDataSasTopologyChangeList_t *event_data) 4654 { 4655 struct fw_event_work *fw_event; 4656 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4657 u16 expander_handle; 4658 struct _sas_node *sas_expander; 4659 unsigned long flags; 4660 int i, reason_code; 4661 u16 handle; 4662 4663 for (i = 0 ; i < event_data->NumEntries; i++) { 4664 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4665 if (!handle) 4666 continue; 4667 reason_code = event_data->PHY[i].PhyStatus & 4668 MPI2_EVENT_SAS_TOPO_RC_MASK; 4669 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4670 _scsih_tm_tr_send(ioc, handle); 4671 } 4672 4673 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4674 if (expander_handle < ioc->sas_hba.num_phys) { 4675 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4676 return; 4677 } 4678 if (event_data->ExpStatus == 4679 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4680 /* put expander attached devices into blocking state */ 4681 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4682 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4683 expander_handle); 4684 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4685 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4686 do { 4687 handle = find_first_bit(ioc->blocking_handles, 4688 ioc->facts.MaxDevHandle); 4689 if (handle < ioc->facts.MaxDevHandle) 4690 _scsih_block_io_device(ioc, handle); 4691 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4692 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4693 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4694 4695 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4696 return; 4697 4698 /* mark ignore flag for pending events */ 4699 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4700 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4701 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4702 fw_event->ignore) 4703 continue; 4704 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4705 fw_event->event_data; 4706 if (local_event_data->ExpStatus == 4707 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4708 local_event_data->ExpStatus == 4709 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4710 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4711 expander_handle) { 4712 dewtprintk(ioc, 4713 ioc_info(ioc, "setting ignoring flag\n")); 4714 fw_event->ignore = 1; 4715 } 4716 } 4717 } 4718 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4719 } 4720 4721 /** 4722 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4723 * events 4724 * @ioc: per adapter object 4725 * @event_data: the event data payload 4726 * 4727 * This handles the case where driver receives multiple switch 4728 * or device add and delete events in a single shot. When there 4729 * is a delete event the routine will void any pending add 4730 * events waiting in the event queue. 4731 */ 4732 static void 4733 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4734 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4735 { 4736 struct fw_event_work *fw_event; 4737 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4738 unsigned long flags; 4739 int i, reason_code; 4740 u16 handle, switch_handle; 4741 4742 for (i = 0; i < event_data->NumEntries; i++) { 4743 handle = 4744 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4745 if (!handle) 4746 continue; 4747 reason_code = event_data->PortEntry[i].PortStatus; 4748 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4749 _scsih_tm_tr_send(ioc, handle); 4750 } 4751 4752 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4753 if (!switch_handle) { 4754 _scsih_block_io_to_pcie_children_attached_directly( 4755 ioc, event_data); 4756 return; 4757 } 4758 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4759 if ((event_data->SwitchStatus 4760 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4761 (event_data->SwitchStatus == 4762 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4763 _scsih_block_io_to_pcie_children_attached_directly( 4764 ioc, event_data); 4765 4766 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4767 return; 4768 4769 /* mark ignore flag for pending events */ 4770 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4771 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4772 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4773 fw_event->ignore) 4774 continue; 4775 local_event_data = 4776 (Mpi26EventDataPCIeTopologyChangeList_t *) 4777 fw_event->event_data; 4778 if (local_event_data->SwitchStatus == 4779 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4780 local_event_data->SwitchStatus == 4781 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4782 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4783 switch_handle) { 4784 dewtprintk(ioc, 4785 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4786 fw_event->ignore = 1; 4787 } 4788 } 4789 } 4790 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4791 } 4792 4793 /** 4794 * _scsih_set_volume_delete_flag - setting volume delete flag 4795 * @ioc: per adapter object 4796 * @handle: device handle 4797 * 4798 * This returns nothing. 4799 */ 4800 static void 4801 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4802 { 4803 struct _raid_device *raid_device; 4804 struct MPT3SAS_TARGET *sas_target_priv_data; 4805 unsigned long flags; 4806 4807 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4808 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4809 if (raid_device && raid_device->starget && 4810 raid_device->starget->hostdata) { 4811 sas_target_priv_data = 4812 raid_device->starget->hostdata; 4813 sas_target_priv_data->deleted = 1; 4814 dewtprintk(ioc, 4815 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4816 handle, (u64)raid_device->wwid)); 4817 } 4818 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4819 } 4820 4821 /** 4822 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4823 * @handle: input handle 4824 * @a: handle for volume a 4825 * @b: handle for volume b 4826 * 4827 * IR firmware only supports two raid volumes. The purpose of this 4828 * routine is to set the volume handle in either a or b. When the given 4829 * input handle is non-zero, or when a and b have not been set before. 4830 */ 4831 static void 4832 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4833 { 4834 if (!handle || handle == *a || handle == *b) 4835 return; 4836 if (!*a) 4837 *a = handle; 4838 else if (!*b) 4839 *b = handle; 4840 } 4841 4842 /** 4843 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4844 * @ioc: per adapter object 4845 * @event_data: the event data payload 4846 * Context: interrupt time. 4847 * 4848 * This routine will send target reset to volume, followed by target 4849 * resets to the PDs. This is called when a PD has been removed, or 4850 * volume has been deleted or removed. When the target reset is sent 4851 * to volume, the PD target resets need to be queued to start upon 4852 * completion of the volume target reset. 4853 */ 4854 static void 4855 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4856 Mpi2EventDataIrConfigChangeList_t *event_data) 4857 { 4858 Mpi2EventIrConfigElement_t *element; 4859 int i; 4860 u16 handle, volume_handle, a, b; 4861 struct _tr_list *delayed_tr; 4862 4863 a = 0; 4864 b = 0; 4865 4866 if (ioc->is_warpdrive) 4867 return; 4868 4869 /* Volume Resets for Deleted or Removed */ 4870 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4871 for (i = 0; i < event_data->NumElements; i++, element++) { 4872 if (le32_to_cpu(event_data->Flags) & 4873 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4874 continue; 4875 if (element->ReasonCode == 4876 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4877 element->ReasonCode == 4878 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4879 volume_handle = le16_to_cpu(element->VolDevHandle); 4880 _scsih_set_volume_delete_flag(ioc, volume_handle); 4881 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4882 } 4883 } 4884 4885 /* Volume Resets for UNHIDE events */ 4886 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4887 for (i = 0; i < event_data->NumElements; i++, element++) { 4888 if (le32_to_cpu(event_data->Flags) & 4889 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4890 continue; 4891 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4892 volume_handle = le16_to_cpu(element->VolDevHandle); 4893 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4894 } 4895 } 4896 4897 if (a) 4898 _scsih_tm_tr_volume_send(ioc, a); 4899 if (b) 4900 _scsih_tm_tr_volume_send(ioc, b); 4901 4902 /* PD target resets */ 4903 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4904 for (i = 0; i < event_data->NumElements; i++, element++) { 4905 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4906 continue; 4907 handle = le16_to_cpu(element->PhysDiskDevHandle); 4908 volume_handle = le16_to_cpu(element->VolDevHandle); 4909 clear_bit(handle, ioc->pd_handles); 4910 if (!volume_handle) 4911 _scsih_tm_tr_send(ioc, handle); 4912 else if (volume_handle == a || volume_handle == b) { 4913 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4914 BUG_ON(!delayed_tr); 4915 INIT_LIST_HEAD(&delayed_tr->list); 4916 delayed_tr->handle = handle; 4917 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4918 dewtprintk(ioc, 4919 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4920 handle)); 4921 } else 4922 _scsih_tm_tr_send(ioc, handle); 4923 } 4924 } 4925 4926 4927 /** 4928 * _scsih_check_volume_delete_events - set delete flag for volumes 4929 * @ioc: per adapter object 4930 * @event_data: the event data payload 4931 * Context: interrupt time. 4932 * 4933 * This will handle the case when the cable connected to entire volume is 4934 * pulled. We will take care of setting the deleted flag so normal IO will 4935 * not be sent. 4936 */ 4937 static void 4938 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4939 Mpi2EventDataIrVolume_t *event_data) 4940 { 4941 u32 state; 4942 4943 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4944 return; 4945 state = le32_to_cpu(event_data->NewValue); 4946 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4947 MPI2_RAID_VOL_STATE_FAILED) 4948 _scsih_set_volume_delete_flag(ioc, 4949 le16_to_cpu(event_data->VolDevHandle)); 4950 } 4951 4952 /** 4953 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4954 * @ioc: per adapter object 4955 * @event_data: the temp threshold event data 4956 * Context: interrupt time. 4957 */ 4958 static void 4959 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4960 Mpi2EventDataTemperature_t *event_data) 4961 { 4962 u32 doorbell; 4963 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4964 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4965 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4966 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4967 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4968 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4969 event_data->SensorNum); 4970 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4971 event_data->CurrentTemperature); 4972 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4973 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 4974 if ((doorbell & MPI2_IOC_STATE_MASK) == 4975 MPI2_IOC_STATE_FAULT) { 4976 mpt3sas_print_fault_code(ioc, 4977 doorbell & MPI2_DOORBELL_DATA_MASK); 4978 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 4979 MPI2_IOC_STATE_COREDUMP) { 4980 mpt3sas_print_coredump_info(ioc, 4981 doorbell & MPI2_DOORBELL_DATA_MASK); 4982 } 4983 } 4984 } 4985 } 4986 4987 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4988 { 4989 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4990 4991 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4992 return 0; 4993 4994 if (pending) 4995 return test_and_set_bit(0, &priv->ata_command_pending); 4996 4997 clear_bit(0, &priv->ata_command_pending); 4998 return 0; 4999 } 5000 5001 /** 5002 * _scsih_flush_running_cmds - completing outstanding commands. 5003 * @ioc: per adapter object 5004 * 5005 * The flushing out of all pending scmd commands following host reset, 5006 * where all IO is dropped to the floor. 5007 */ 5008 static void 5009 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 5010 { 5011 struct scsi_cmnd *scmd; 5012 struct scsiio_tracker *st; 5013 u16 smid; 5014 int count = 0; 5015 5016 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 5017 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5018 if (!scmd) 5019 continue; 5020 count++; 5021 _scsih_set_satl_pending(scmd, false); 5022 st = scsi_cmd_priv(scmd); 5023 mpt3sas_base_clear_st(ioc, st); 5024 scsi_dma_unmap(scmd); 5025 if (ioc->pci_error_recovery || ioc->remove_host) 5026 scmd->result = DID_NO_CONNECT << 16; 5027 else 5028 scmd->result = DID_RESET << 16; 5029 scsi_done(scmd); 5030 } 5031 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 5032 } 5033 5034 /** 5035 * _scsih_setup_eedp - setup MPI request for EEDP transfer 5036 * @ioc: per adapter object 5037 * @scmd: pointer to scsi command object 5038 * @mpi_request: pointer to the SCSI_IO request message frame 5039 * 5040 * Supporting protection 1 and 3. 5041 */ 5042 static void 5043 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5044 Mpi25SCSIIORequest_t *mpi_request) 5045 { 5046 u16 eedp_flags; 5047 Mpi25SCSIIORequest_t *mpi_request_3v = 5048 (Mpi25SCSIIORequest_t *)mpi_request; 5049 5050 switch (scsi_get_prot_op(scmd)) { 5051 case SCSI_PROT_READ_STRIP: 5052 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 5053 break; 5054 case SCSI_PROT_WRITE_INSERT: 5055 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 5056 break; 5057 default: 5058 return; 5059 } 5060 5061 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 5062 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5063 5064 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) 5065 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; 5066 5067 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { 5068 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; 5069 5070 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5071 cpu_to_be32(scsi_prot_ref_tag(scmd)); 5072 } 5073 5074 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); 5075 5076 if (ioc->is_gen35_ioc) 5077 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 5078 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 5079 } 5080 5081 /** 5082 * _scsih_eedp_error_handling - return sense code for EEDP errors 5083 * @scmd: pointer to scsi command object 5084 * @ioc_status: ioc status 5085 */ 5086 static void 5087 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 5088 { 5089 u8 ascq; 5090 5091 switch (ioc_status) { 5092 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5093 ascq = 0x01; 5094 break; 5095 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5096 ascq = 0x02; 5097 break; 5098 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5099 ascq = 0x03; 5100 break; 5101 default: 5102 ascq = 0x00; 5103 break; 5104 } 5105 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq); 5106 set_host_byte(scmd, DID_ABORT); 5107 } 5108 5109 /** 5110 * scsih_qcmd - main scsi request entry point 5111 * @shost: SCSI host pointer 5112 * @scmd: pointer to scsi command object 5113 * 5114 * The callback index is set inside `ioc->scsi_io_cb_idx`. 5115 * 5116 * Return: 0 on success. If there's a failure, return either: 5117 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5118 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5119 */ 5120 static int 5121 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5122 { 5123 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5124 struct MPT3SAS_DEVICE *sas_device_priv_data; 5125 struct MPT3SAS_TARGET *sas_target_priv_data; 5126 struct _raid_device *raid_device; 5127 struct request *rq = scsi_cmd_to_rq(scmd); 5128 int class; 5129 Mpi25SCSIIORequest_t *mpi_request; 5130 struct _pcie_device *pcie_device = NULL; 5131 u32 mpi_control; 5132 u16 smid; 5133 u16 handle; 5134 5135 if (ioc->logging_level & MPT_DEBUG_SCSI) 5136 scsi_print_command(scmd); 5137 5138 sas_device_priv_data = scmd->device->hostdata; 5139 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5140 scmd->result = DID_NO_CONNECT << 16; 5141 scsi_done(scmd); 5142 return 0; 5143 } 5144 5145 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5146 scmd->result = DID_NO_CONNECT << 16; 5147 scsi_done(scmd); 5148 return 0; 5149 } 5150 5151 sas_target_priv_data = sas_device_priv_data->sas_target; 5152 5153 /* invalid device handle */ 5154 handle = sas_target_priv_data->handle; 5155 5156 /* 5157 * Avoid error handling escallation when device is disconnected 5158 */ 5159 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) { 5160 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5161 scmd->cmnd[0] == TEST_UNIT_READY) { 5162 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5163 scsi_done(scmd); 5164 return 0; 5165 } 5166 } 5167 5168 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5169 scmd->result = DID_NO_CONNECT << 16; 5170 scsi_done(scmd); 5171 return 0; 5172 } 5173 5174 5175 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 5176 /* host recovery or link resets sent via IOCTLs */ 5177 return SCSI_MLQUEUE_HOST_BUSY; 5178 } else if (sas_target_priv_data->deleted) { 5179 /* device has been deleted */ 5180 scmd->result = DID_NO_CONNECT << 16; 5181 scsi_done(scmd); 5182 return 0; 5183 } else if (sas_target_priv_data->tm_busy || 5184 sas_device_priv_data->block) { 5185 /* device busy with task management */ 5186 return SCSI_MLQUEUE_DEVICE_BUSY; 5187 } 5188 5189 /* 5190 * Bug work around for firmware SATL handling. The loop 5191 * is based on atomic operations and ensures consistency 5192 * since we're lockless at this point 5193 */ 5194 do { 5195 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) 5196 return SCSI_MLQUEUE_DEVICE_BUSY; 5197 } while (_scsih_set_satl_pending(scmd, true)); 5198 5199 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5200 mpi_control = MPI2_SCSIIO_CONTROL_READ; 5201 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5202 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 5203 else 5204 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 5205 5206 /* set tags */ 5207 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 5208 /* NCQ Prio supported, make sure control indicated high priority */ 5209 if (sas_device_priv_data->ncq_prio_enable) { 5210 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5211 if (class == IOPRIO_CLASS_RT) 5212 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 5213 } 5214 /* Make sure Device is not raid volume. 5215 * We do not expose raid functionality to upper layer for warpdrive. 5216 */ 5217 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 5218 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 5219 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 5220 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 5221 5222 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 5223 if (!smid) { 5224 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 5225 _scsih_set_satl_pending(scmd, false); 5226 goto out; 5227 } 5228 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5229 memset(mpi_request, 0, ioc->request_sz); 5230 _scsih_setup_eedp(ioc, scmd, mpi_request); 5231 5232 if (scmd->cmd_len == 32) 5233 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 5234 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5235 if (sas_device_priv_data->sas_target->flags & 5236 MPT_TARGET_FLAGS_RAID_COMPONENT) 5237 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 5238 else 5239 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5240 mpi_request->DevHandle = cpu_to_le16(handle); 5241 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 5242 mpi_request->Control = cpu_to_le32(mpi_control); 5243 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 5244 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 5245 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 5246 mpi_request->SenseBufferLowAddress = 5247 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 5248 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 5249 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 5250 mpi_request->LUN); 5251 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5252 5253 if (mpi_request->DataLength) { 5254 pcie_device = sas_target_priv_data->pcie_dev; 5255 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 5256 mpt3sas_base_free_smid(ioc, smid); 5257 _scsih_set_satl_pending(scmd, false); 5258 goto out; 5259 } 5260 } else 5261 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 5262 5263 raid_device = sas_target_priv_data->raid_device; 5264 if (raid_device && raid_device->direct_io_enabled) 5265 mpt3sas_setup_direct_io(ioc, scmd, 5266 raid_device, mpi_request); 5267 5268 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 5269 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 5270 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 5271 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 5272 ioc->put_smid_fast_path(ioc, smid, handle); 5273 } else 5274 ioc->put_smid_scsi_io(ioc, smid, 5275 le16_to_cpu(mpi_request->DevHandle)); 5276 } else 5277 ioc->put_smid_default(ioc, smid); 5278 return 0; 5279 5280 out: 5281 return SCSI_MLQUEUE_HOST_BUSY; 5282 } 5283 5284 /** 5285 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 5286 * @sense_buffer: sense data returned by target 5287 * @data: normalized skey/asc/ascq 5288 */ 5289 static void 5290 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 5291 { 5292 if ((sense_buffer[0] & 0x7F) >= 0x72) { 5293 /* descriptor format */ 5294 data->skey = sense_buffer[1] & 0x0F; 5295 data->asc = sense_buffer[2]; 5296 data->ascq = sense_buffer[3]; 5297 } else { 5298 /* fixed format */ 5299 data->skey = sense_buffer[2] & 0x0F; 5300 data->asc = sense_buffer[12]; 5301 data->ascq = sense_buffer[13]; 5302 } 5303 } 5304 5305 /** 5306 * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request 5307 * @ioc: per adapter object 5308 * @scmd: pointer to scsi command object 5309 * @mpi_reply: reply mf payload returned from firmware 5310 * @smid: ? 5311 * 5312 * scsi_status - SCSI Status code returned from target device 5313 * scsi_state - state info associated with SCSI_IO determined by ioc 5314 * ioc_status - ioc supplied status info 5315 */ 5316 static void 5317 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5318 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 5319 { 5320 u32 response_info; 5321 u8 *response_bytes; 5322 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 5323 MPI2_IOCSTATUS_MASK; 5324 u8 scsi_state = mpi_reply->SCSIState; 5325 u8 scsi_status = mpi_reply->SCSIStatus; 5326 char *desc_ioc_state = NULL; 5327 char *desc_scsi_status = NULL; 5328 char *desc_scsi_state = ioc->tmp_string; 5329 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5330 struct _sas_device *sas_device = NULL; 5331 struct _pcie_device *pcie_device = NULL; 5332 struct scsi_target *starget = scmd->device->sdev_target; 5333 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 5334 char *device_str = NULL; 5335 5336 if (!priv_target) 5337 return; 5338 if (ioc->hide_ir_msg) 5339 device_str = "WarpDrive"; 5340 else 5341 device_str = "volume"; 5342 5343 if (log_info == 0x31170000) 5344 return; 5345 5346 switch (ioc_status) { 5347 case MPI2_IOCSTATUS_SUCCESS: 5348 desc_ioc_state = "success"; 5349 break; 5350 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5351 desc_ioc_state = "invalid function"; 5352 break; 5353 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5354 desc_ioc_state = "scsi recovered error"; 5355 break; 5356 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 5357 desc_ioc_state = "scsi invalid dev handle"; 5358 break; 5359 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5360 desc_ioc_state = "scsi device not there"; 5361 break; 5362 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5363 desc_ioc_state = "scsi data overrun"; 5364 break; 5365 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5366 desc_ioc_state = "scsi data underrun"; 5367 break; 5368 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5369 desc_ioc_state = "scsi io data error"; 5370 break; 5371 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5372 desc_ioc_state = "scsi protocol error"; 5373 break; 5374 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5375 desc_ioc_state = "scsi task terminated"; 5376 break; 5377 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5378 desc_ioc_state = "scsi residual mismatch"; 5379 break; 5380 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5381 desc_ioc_state = "scsi task mgmt failed"; 5382 break; 5383 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5384 desc_ioc_state = "scsi ioc terminated"; 5385 break; 5386 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5387 desc_ioc_state = "scsi ext terminated"; 5388 break; 5389 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5390 desc_ioc_state = "eedp guard error"; 5391 break; 5392 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5393 desc_ioc_state = "eedp ref tag error"; 5394 break; 5395 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5396 desc_ioc_state = "eedp app tag error"; 5397 break; 5398 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5399 desc_ioc_state = "insufficient power"; 5400 break; 5401 default: 5402 desc_ioc_state = "unknown"; 5403 break; 5404 } 5405 5406 switch (scsi_status) { 5407 case MPI2_SCSI_STATUS_GOOD: 5408 desc_scsi_status = "good"; 5409 break; 5410 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5411 desc_scsi_status = "check condition"; 5412 break; 5413 case MPI2_SCSI_STATUS_CONDITION_MET: 5414 desc_scsi_status = "condition met"; 5415 break; 5416 case MPI2_SCSI_STATUS_BUSY: 5417 desc_scsi_status = "busy"; 5418 break; 5419 case MPI2_SCSI_STATUS_INTERMEDIATE: 5420 desc_scsi_status = "intermediate"; 5421 break; 5422 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 5423 desc_scsi_status = "intermediate condmet"; 5424 break; 5425 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5426 desc_scsi_status = "reservation conflict"; 5427 break; 5428 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 5429 desc_scsi_status = "command terminated"; 5430 break; 5431 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5432 desc_scsi_status = "task set full"; 5433 break; 5434 case MPI2_SCSI_STATUS_ACA_ACTIVE: 5435 desc_scsi_status = "aca active"; 5436 break; 5437 case MPI2_SCSI_STATUS_TASK_ABORTED: 5438 desc_scsi_status = "task aborted"; 5439 break; 5440 default: 5441 desc_scsi_status = "unknown"; 5442 break; 5443 } 5444 5445 desc_scsi_state[0] = '\0'; 5446 if (!scsi_state) 5447 desc_scsi_state = " "; 5448 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5449 strcat(desc_scsi_state, "response info "); 5450 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5451 strcat(desc_scsi_state, "state terminated "); 5452 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 5453 strcat(desc_scsi_state, "no status "); 5454 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 5455 strcat(desc_scsi_state, "autosense failed "); 5456 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 5457 strcat(desc_scsi_state, "autosense valid "); 5458 5459 scsi_print_command(scmd); 5460 5461 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5462 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 5463 device_str, (u64)priv_target->sas_address); 5464 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 5465 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 5466 if (pcie_device) { 5467 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 5468 (u64)pcie_device->wwid, pcie_device->port_num); 5469 if (pcie_device->enclosure_handle != 0) 5470 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 5471 (u64)pcie_device->enclosure_logical_id, 5472 pcie_device->slot); 5473 if (pcie_device->connector_name[0]) 5474 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 5475 pcie_device->enclosure_level, 5476 pcie_device->connector_name); 5477 pcie_device_put(pcie_device); 5478 } 5479 } else { 5480 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5481 if (sas_device) { 5482 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 5483 (u64)sas_device->sas_address, sas_device->phy); 5484 5485 _scsih_display_enclosure_chassis_info(ioc, sas_device, 5486 NULL, NULL); 5487 5488 sas_device_put(sas_device); 5489 } 5490 } 5491 5492 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 5493 le16_to_cpu(mpi_reply->DevHandle), 5494 desc_ioc_state, ioc_status, smid); 5495 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 5496 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 5497 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 5498 le16_to_cpu(mpi_reply->TaskTag), 5499 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 5500 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 5501 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 5502 5503 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5504 struct sense_info data; 5505 _scsih_normalize_sense(scmd->sense_buffer, &data); 5506 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5507 data.skey, data.asc, data.ascq, 5508 le32_to_cpu(mpi_reply->SenseCount)); 5509 } 5510 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5511 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5512 response_bytes = (u8 *)&response_info; 5513 _scsih_response_code(ioc, response_bytes[0]); 5514 } 5515 } 5516 5517 /** 5518 * _scsih_turn_on_pfa_led - illuminate PFA LED 5519 * @ioc: per adapter object 5520 * @handle: device handle 5521 * Context: process 5522 */ 5523 static void 5524 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5525 { 5526 Mpi2SepReply_t mpi_reply; 5527 Mpi2SepRequest_t mpi_request; 5528 struct _sas_device *sas_device; 5529 5530 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 5531 if (!sas_device) 5532 return; 5533 5534 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5535 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5536 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5537 mpi_request.SlotStatus = 5538 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 5539 mpi_request.DevHandle = cpu_to_le16(handle); 5540 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 5541 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5542 &mpi_request)) != 0) { 5543 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5544 __FILE__, __LINE__, __func__); 5545 goto out; 5546 } 5547 sas_device->pfa_led_on = 1; 5548 5549 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5550 dewtprintk(ioc, 5551 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5552 le16_to_cpu(mpi_reply.IOCStatus), 5553 le32_to_cpu(mpi_reply.IOCLogInfo))); 5554 goto out; 5555 } 5556 out: 5557 sas_device_put(sas_device); 5558 } 5559 5560 /** 5561 * _scsih_turn_off_pfa_led - turn off Fault LED 5562 * @ioc: per adapter object 5563 * @sas_device: sas device whose PFA LED has to turned off 5564 * Context: process 5565 */ 5566 static void 5567 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 5568 struct _sas_device *sas_device) 5569 { 5570 Mpi2SepReply_t mpi_reply; 5571 Mpi2SepRequest_t mpi_request; 5572 5573 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5574 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5575 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5576 mpi_request.SlotStatus = 0; 5577 mpi_request.Slot = cpu_to_le16(sas_device->slot); 5578 mpi_request.DevHandle = 0; 5579 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 5580 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5581 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5582 &mpi_request)) != 0) { 5583 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5584 __FILE__, __LINE__, __func__); 5585 return; 5586 } 5587 5588 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5589 dewtprintk(ioc, 5590 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5591 le16_to_cpu(mpi_reply.IOCStatus), 5592 le32_to_cpu(mpi_reply.IOCLogInfo))); 5593 return; 5594 } 5595 } 5596 5597 /** 5598 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5599 * @ioc: per adapter object 5600 * @handle: device handle 5601 * Context: interrupt. 5602 */ 5603 static void 5604 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5605 { 5606 struct fw_event_work *fw_event; 5607 5608 fw_event = alloc_fw_event_work(0); 5609 if (!fw_event) 5610 return; 5611 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5612 fw_event->device_handle = handle; 5613 fw_event->ioc = ioc; 5614 _scsih_fw_event_add(ioc, fw_event); 5615 fw_event_work_put(fw_event); 5616 } 5617 5618 /** 5619 * _scsih_smart_predicted_fault - process smart errors 5620 * @ioc: per adapter object 5621 * @handle: device handle 5622 * Context: interrupt. 5623 */ 5624 static void 5625 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5626 { 5627 struct scsi_target *starget; 5628 struct MPT3SAS_TARGET *sas_target_priv_data; 5629 Mpi2EventNotificationReply_t *event_reply; 5630 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5631 struct _sas_device *sas_device; 5632 ssize_t sz; 5633 unsigned long flags; 5634 5635 /* only handle non-raid devices */ 5636 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5637 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5638 if (!sas_device) 5639 goto out_unlock; 5640 5641 starget = sas_device->starget; 5642 sas_target_priv_data = starget->hostdata; 5643 5644 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5645 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5646 goto out_unlock; 5647 5648 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5649 5650 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5651 5652 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5653 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5654 5655 /* insert into event log */ 5656 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5657 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5658 event_reply = kzalloc(sz, GFP_ATOMIC); 5659 if (!event_reply) { 5660 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5661 __FILE__, __LINE__, __func__); 5662 goto out; 5663 } 5664 5665 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5666 event_reply->Event = 5667 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5668 event_reply->MsgLength = sz/4; 5669 event_reply->EventDataLength = 5670 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5671 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5672 event_reply->EventData; 5673 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5674 event_data->ASC = 0x5D; 5675 event_data->DevHandle = cpu_to_le16(handle); 5676 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5677 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5678 kfree(event_reply); 5679 out: 5680 if (sas_device) 5681 sas_device_put(sas_device); 5682 return; 5683 5684 out_unlock: 5685 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5686 goto out; 5687 } 5688 5689 /** 5690 * _scsih_io_done - scsi request callback 5691 * @ioc: per adapter object 5692 * @smid: system request message index 5693 * @msix_index: MSIX table index supplied by the OS 5694 * @reply: reply message frame(lower 32bit addr) 5695 * 5696 * Callback handler when using _scsih_qcmd. 5697 * 5698 * Return: 1 meaning mf should be freed from _base_interrupt 5699 * 0 means the mf is freed from this function. 5700 */ 5701 static u8 5702 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5703 { 5704 Mpi25SCSIIORequest_t *mpi_request; 5705 Mpi2SCSIIOReply_t *mpi_reply; 5706 struct scsi_cmnd *scmd; 5707 struct scsiio_tracker *st; 5708 u16 ioc_status; 5709 u32 xfer_cnt; 5710 u8 scsi_state; 5711 u8 scsi_status; 5712 u32 log_info; 5713 struct MPT3SAS_DEVICE *sas_device_priv_data; 5714 u32 response_code = 0; 5715 5716 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5717 5718 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5719 if (scmd == NULL) 5720 return 1; 5721 5722 _scsih_set_satl_pending(scmd, false); 5723 5724 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5725 5726 if (mpi_reply == NULL) { 5727 scmd->result = DID_OK << 16; 5728 goto out; 5729 } 5730 5731 sas_device_priv_data = scmd->device->hostdata; 5732 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5733 sas_device_priv_data->sas_target->deleted) { 5734 scmd->result = DID_NO_CONNECT << 16; 5735 goto out; 5736 } 5737 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5738 5739 /* 5740 * WARPDRIVE: If direct_io is set then it is directIO, 5741 * the failed direct I/O should be redirected to volume 5742 */ 5743 st = scsi_cmd_priv(scmd); 5744 if (st->direct_io && 5745 ((ioc_status & MPI2_IOCSTATUS_MASK) 5746 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5747 st->direct_io = 0; 5748 st->scmd = scmd; 5749 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5750 mpi_request->DevHandle = 5751 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5752 ioc->put_smid_scsi_io(ioc, smid, 5753 sas_device_priv_data->sas_target->handle); 5754 return 0; 5755 } 5756 /* turning off TLR */ 5757 scsi_state = mpi_reply->SCSIState; 5758 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5759 response_code = 5760 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5761 if (!sas_device_priv_data->tlr_snoop_check) { 5762 sas_device_priv_data->tlr_snoop_check++; 5763 if ((!ioc->is_warpdrive && 5764 !scsih_is_raid(&scmd->device->sdev_gendev) && 5765 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5766 && sas_is_tlr_enabled(scmd->device) && 5767 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5768 sas_disable_tlr(scmd->device); 5769 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5770 } 5771 } 5772 5773 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5774 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5775 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5776 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5777 else 5778 log_info = 0; 5779 ioc_status &= MPI2_IOCSTATUS_MASK; 5780 scsi_status = mpi_reply->SCSIStatus; 5781 5782 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5783 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5784 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5785 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5786 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5787 } 5788 5789 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5790 struct sense_info data; 5791 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5792 smid); 5793 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5794 le32_to_cpu(mpi_reply->SenseCount)); 5795 memcpy(scmd->sense_buffer, sense_data, sz); 5796 _scsih_normalize_sense(scmd->sense_buffer, &data); 5797 /* failure prediction threshold exceeded */ 5798 if (data.asc == 0x5D) 5799 _scsih_smart_predicted_fault(ioc, 5800 le16_to_cpu(mpi_reply->DevHandle)); 5801 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5802 5803 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5804 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5805 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5806 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5807 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5808 } 5809 switch (ioc_status) { 5810 case MPI2_IOCSTATUS_BUSY: 5811 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5812 scmd->result = SAM_STAT_BUSY; 5813 break; 5814 5815 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5816 scmd->result = DID_NO_CONNECT << 16; 5817 break; 5818 5819 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5820 if (sas_device_priv_data->block) { 5821 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5822 goto out; 5823 } 5824 if (log_info == 0x31110630) { 5825 if (scmd->retries > 2) { 5826 scmd->result = DID_NO_CONNECT << 16; 5827 scsi_device_set_state(scmd->device, 5828 SDEV_OFFLINE); 5829 } else { 5830 scmd->result = DID_SOFT_ERROR << 16; 5831 scmd->device->expecting_cc_ua = 1; 5832 } 5833 break; 5834 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5835 scmd->result = DID_RESET << 16; 5836 break; 5837 } else if ((scmd->device->channel == RAID_CHANNEL) && 5838 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5839 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5840 scmd->result = DID_RESET << 16; 5841 break; 5842 } 5843 scmd->result = DID_SOFT_ERROR << 16; 5844 break; 5845 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5846 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5847 scmd->result = DID_RESET << 16; 5848 break; 5849 5850 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5851 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5852 scmd->result = DID_SOFT_ERROR << 16; 5853 else 5854 scmd->result = (DID_OK << 16) | scsi_status; 5855 break; 5856 5857 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5858 scmd->result = (DID_OK << 16) | scsi_status; 5859 5860 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5861 break; 5862 5863 if (xfer_cnt < scmd->underflow) { 5864 if (scsi_status == SAM_STAT_BUSY) 5865 scmd->result = SAM_STAT_BUSY; 5866 else 5867 scmd->result = DID_SOFT_ERROR << 16; 5868 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5869 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5870 scmd->result = DID_SOFT_ERROR << 16; 5871 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5872 scmd->result = DID_RESET << 16; 5873 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5874 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5875 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5876 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 5877 0x20, 0); 5878 } 5879 break; 5880 5881 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5882 scsi_set_resid(scmd, 0); 5883 fallthrough; 5884 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5885 case MPI2_IOCSTATUS_SUCCESS: 5886 scmd->result = (DID_OK << 16) | scsi_status; 5887 if (response_code == 5888 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5889 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5890 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5891 scmd->result = DID_SOFT_ERROR << 16; 5892 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5893 scmd->result = DID_RESET << 16; 5894 break; 5895 5896 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5897 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5898 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5899 _scsih_eedp_error_handling(scmd, ioc_status); 5900 break; 5901 5902 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5903 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5904 case MPI2_IOCSTATUS_INVALID_SGL: 5905 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5906 case MPI2_IOCSTATUS_INVALID_FIELD: 5907 case MPI2_IOCSTATUS_INVALID_STATE: 5908 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5909 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5910 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5911 default: 5912 scmd->result = DID_SOFT_ERROR << 16; 5913 break; 5914 5915 } 5916 5917 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5918 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5919 5920 out: 5921 5922 scsi_dma_unmap(scmd); 5923 mpt3sas_base_free_smid(ioc, smid); 5924 scsi_done(scmd); 5925 return 0; 5926 } 5927 5928 /** 5929 * _scsih_update_vphys_after_reset - update the Port's 5930 * vphys_list after reset 5931 * @ioc: per adapter object 5932 * 5933 * Returns nothing. 5934 */ 5935 static void 5936 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) 5937 { 5938 u16 sz, ioc_status; 5939 int i; 5940 Mpi2ConfigReply_t mpi_reply; 5941 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5942 u16 attached_handle; 5943 u64 attached_sas_addr; 5944 u8 found = 0, port_id; 5945 Mpi2SasPhyPage0_t phy_pg0; 5946 struct hba_port *port, *port_next, *mport; 5947 struct virtual_phy *vphy, *vphy_next; 5948 struct _sas_device *sas_device; 5949 5950 /* 5951 * Mark all the vphys objects as dirty. 5952 */ 5953 list_for_each_entry_safe(port, port_next, 5954 &ioc->port_table_list, list) { 5955 if (!port->vphys_mask) 5956 continue; 5957 list_for_each_entry_safe(vphy, vphy_next, 5958 &port->vphys_list, list) { 5959 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; 5960 } 5961 } 5962 5963 /* 5964 * Read SASIOUnitPage0 to get each HBA Phy's data. 5965 */ 5966 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 5967 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5968 if (!sas_iounit_pg0) { 5969 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5970 __FILE__, __LINE__, __func__); 5971 return; 5972 } 5973 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5974 sas_iounit_pg0, sz)) != 0) 5975 goto out; 5976 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5977 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5978 goto out; 5979 /* 5980 * Loop over each HBA Phy. 5981 */ 5982 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 5983 /* 5984 * Check whether Phy's Negotiation Link Rate is > 1.5G or not. 5985 */ 5986 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 5987 MPI2_SAS_NEG_LINK_RATE_1_5) 5988 continue; 5989 /* 5990 * Check whether Phy is connected to SEP device or not, 5991 * if it is SEP device then read the Phy's SASPHYPage0 data to 5992 * determine whether Phy is a virtual Phy or not. if it is 5993 * virtual phy then it is conformed that the attached remote 5994 * device is a HBA's vSES device. 5995 */ 5996 if (!(le32_to_cpu( 5997 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 5998 MPI2_SAS_DEVICE_INFO_SEP)) 5999 continue; 6000 6001 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6002 i))) { 6003 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6004 __FILE__, __LINE__, __func__); 6005 continue; 6006 } 6007 6008 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6009 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6010 continue; 6011 /* 6012 * Get the vSES device's SAS Address. 6013 */ 6014 attached_handle = le16_to_cpu( 6015 sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6016 if (_scsih_get_sas_address(ioc, attached_handle, 6017 &attached_sas_addr) != 0) { 6018 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6019 __FILE__, __LINE__, __func__); 6020 continue; 6021 } 6022 6023 found = 0; 6024 port = port_next = NULL; 6025 /* 6026 * Loop over each virtual_phy object from 6027 * each port's vphys_list. 6028 */ 6029 list_for_each_entry_safe(port, 6030 port_next, &ioc->port_table_list, list) { 6031 if (!port->vphys_mask) 6032 continue; 6033 list_for_each_entry_safe(vphy, vphy_next, 6034 &port->vphys_list, list) { 6035 /* 6036 * Continue with next virtual_phy object 6037 * if the object is not marked as dirty. 6038 */ 6039 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) 6040 continue; 6041 6042 /* 6043 * Continue with next virtual_phy object 6044 * if the object's SAS Address is not equals 6045 * to current Phy's vSES device SAS Address. 6046 */ 6047 if (vphy->sas_address != attached_sas_addr) 6048 continue; 6049 /* 6050 * Enable current Phy number bit in object's 6051 * phy_mask field. 6052 */ 6053 if (!(vphy->phy_mask & (1 << i))) 6054 vphy->phy_mask = (1 << i); 6055 /* 6056 * Get hba_port object from hba_port table 6057 * corresponding to current phy's Port ID. 6058 * if there is no hba_port object corresponding 6059 * to Phy's Port ID then create a new hba_port 6060 * object & add to hba_port table. 6061 */ 6062 port_id = sas_iounit_pg0->PhyData[i].Port; 6063 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6064 if (!mport) { 6065 mport = kzalloc( 6066 sizeof(struct hba_port), GFP_KERNEL); 6067 if (!mport) 6068 break; 6069 mport->port_id = port_id; 6070 ioc_info(ioc, 6071 "%s: hba_port entry: %p, port: %d is added to hba_port list\n", 6072 __func__, mport, mport->port_id); 6073 list_add_tail(&mport->list, 6074 &ioc->port_table_list); 6075 } 6076 /* 6077 * If mport & port pointers are not pointing to 6078 * same hba_port object then it means that vSES 6079 * device's Port ID got changed after reset and 6080 * hence move current virtual_phy object from 6081 * port's vphys_list to mport's vphys_list. 6082 */ 6083 if (port != mport) { 6084 if (!mport->vphys_mask) 6085 INIT_LIST_HEAD( 6086 &mport->vphys_list); 6087 mport->vphys_mask |= (1 << i); 6088 port->vphys_mask &= ~(1 << i); 6089 list_move(&vphy->list, 6090 &mport->vphys_list); 6091 sas_device = mpt3sas_get_sdev_by_addr( 6092 ioc, attached_sas_addr, port); 6093 if (sas_device) 6094 sas_device->port = mport; 6095 } 6096 /* 6097 * Earlier while updating the hba_port table, 6098 * it is determined that there is no other 6099 * direct attached device with mport's Port ID, 6100 * Hence mport was marked as dirty. Only vSES 6101 * device has this Port ID, so unmark the mport 6102 * as dirt. 6103 */ 6104 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { 6105 mport->sas_address = 0; 6106 mport->phy_mask = 0; 6107 mport->flags &= 6108 ~HBA_PORT_FLAG_DIRTY_PORT; 6109 } 6110 /* 6111 * Unmark current virtual_phy object as dirty. 6112 */ 6113 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; 6114 found = 1; 6115 break; 6116 } 6117 if (found) 6118 break; 6119 } 6120 } 6121 out: 6122 kfree(sas_iounit_pg0); 6123 } 6124 6125 /** 6126 * _scsih_get_port_table_after_reset - Construct temporary port table 6127 * @ioc: per adapter object 6128 * @port_table: address where port table needs to be constructed 6129 * 6130 * return number of HBA port entries available after reset. 6131 */ 6132 static int 6133 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, 6134 struct hba_port *port_table) 6135 { 6136 u16 sz, ioc_status; 6137 int i, j; 6138 Mpi2ConfigReply_t mpi_reply; 6139 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6140 u16 attached_handle; 6141 u64 attached_sas_addr; 6142 u8 found = 0, port_count = 0, port_id; 6143 6144 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6145 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6146 if (!sas_iounit_pg0) { 6147 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6148 __FILE__, __LINE__, __func__); 6149 return port_count; 6150 } 6151 6152 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6153 sas_iounit_pg0, sz)) != 0) 6154 goto out; 6155 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6156 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6157 goto out; 6158 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 6159 found = 0; 6160 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 6161 MPI2_SAS_NEG_LINK_RATE_1_5) 6162 continue; 6163 attached_handle = 6164 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6165 if (_scsih_get_sas_address( 6166 ioc, attached_handle, &attached_sas_addr) != 0) { 6167 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6168 __FILE__, __LINE__, __func__); 6169 continue; 6170 } 6171 6172 for (j = 0; j < port_count; j++) { 6173 port_id = sas_iounit_pg0->PhyData[i].Port; 6174 if (port_table[j].port_id == port_id && 6175 port_table[j].sas_address == attached_sas_addr) { 6176 port_table[j].phy_mask |= (1 << i); 6177 found = 1; 6178 break; 6179 } 6180 } 6181 6182 if (found) 6183 continue; 6184 6185 port_id = sas_iounit_pg0->PhyData[i].Port; 6186 port_table[port_count].port_id = port_id; 6187 port_table[port_count].phy_mask = (1 << i); 6188 port_table[port_count].sas_address = attached_sas_addr; 6189 port_count++; 6190 } 6191 out: 6192 kfree(sas_iounit_pg0); 6193 return port_count; 6194 } 6195 6196 enum hba_port_matched_codes { 6197 NOT_MATCHED = 0, 6198 MATCHED_WITH_ADDR_AND_PHYMASK, 6199 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, 6200 MATCHED_WITH_ADDR_AND_SUBPHYMASK, 6201 MATCHED_WITH_ADDR, 6202 }; 6203 6204 /** 6205 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry 6206 * from HBA port table 6207 * @ioc: per adapter object 6208 * @port_entry: hba port entry from temporary port table which needs to be 6209 * searched for matched entry in the HBA port table 6210 * @matched_port_entry: save matched hba port entry here 6211 * @count: count of matched entries 6212 * 6213 * return type of matched entry found. 6214 */ 6215 static enum hba_port_matched_codes 6216 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, 6217 struct hba_port *port_entry, 6218 struct hba_port **matched_port_entry, int *count) 6219 { 6220 struct hba_port *port_table_entry, *matched_port = NULL; 6221 enum hba_port_matched_codes matched_code = NOT_MATCHED; 6222 int lcount = 0; 6223 *matched_port_entry = NULL; 6224 6225 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6226 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) 6227 continue; 6228 6229 if ((port_table_entry->sas_address == port_entry->sas_address) 6230 && (port_table_entry->phy_mask == port_entry->phy_mask)) { 6231 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; 6232 matched_port = port_table_entry; 6233 break; 6234 } 6235 6236 if ((port_table_entry->sas_address == port_entry->sas_address) 6237 && (port_table_entry->phy_mask & port_entry->phy_mask) 6238 && (port_table_entry->port_id == port_entry->port_id)) { 6239 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; 6240 matched_port = port_table_entry; 6241 continue; 6242 } 6243 6244 if ((port_table_entry->sas_address == port_entry->sas_address) 6245 && (port_table_entry->phy_mask & port_entry->phy_mask)) { 6246 if (matched_code == 6247 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6248 continue; 6249 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; 6250 matched_port = port_table_entry; 6251 continue; 6252 } 6253 6254 if (port_table_entry->sas_address == port_entry->sas_address) { 6255 if (matched_code == 6256 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6257 continue; 6258 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) 6259 continue; 6260 matched_code = MATCHED_WITH_ADDR; 6261 matched_port = port_table_entry; 6262 lcount++; 6263 } 6264 } 6265 6266 *matched_port_entry = matched_port; 6267 if (matched_code == MATCHED_WITH_ADDR) 6268 *count = lcount; 6269 return matched_code; 6270 } 6271 6272 /** 6273 * _scsih_del_phy_part_of_anther_port - remove phy if it 6274 * is a part of anther port 6275 *@ioc: per adapter object 6276 *@port_table: port table after reset 6277 *@index: hba port entry index 6278 *@port_count: number of ports available after host reset 6279 *@offset: HBA phy bit offset 6280 * 6281 */ 6282 static void 6283 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, 6284 struct hba_port *port_table, 6285 int index, u8 port_count, int offset) 6286 { 6287 struct _sas_node *sas_node = &ioc->sas_hba; 6288 u32 i, found = 0; 6289 6290 for (i = 0; i < port_count; i++) { 6291 if (i == index) 6292 continue; 6293 6294 if (port_table[i].phy_mask & (1 << offset)) { 6295 mpt3sas_transport_del_phy_from_an_existing_port( 6296 ioc, sas_node, &sas_node->phy[offset]); 6297 found = 1; 6298 break; 6299 } 6300 } 6301 if (!found) 6302 port_table[index].phy_mask |= (1 << offset); 6303 } 6304 6305 /** 6306 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from 6307 * right port 6308 *@ioc: per adapter object 6309 *@hba_port_entry: hba port table entry 6310 *@port_table: temporary port table 6311 *@index: hba port entry index 6312 *@port_count: number of ports available after host reset 6313 * 6314 */ 6315 static void 6316 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, 6317 struct hba_port *hba_port_entry, struct hba_port *port_table, 6318 int index, int port_count) 6319 { 6320 u32 phy_mask, offset = 0; 6321 struct _sas_node *sas_node = &ioc->sas_hba; 6322 6323 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; 6324 6325 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { 6326 if (phy_mask & (1 << offset)) { 6327 if (!(port_table[index].phy_mask & (1 << offset))) { 6328 _scsih_del_phy_part_of_anther_port( 6329 ioc, port_table, index, port_count, 6330 offset); 6331 continue; 6332 } 6333 if (sas_node->phy[offset].phy_belongs_to_port) 6334 mpt3sas_transport_del_phy_from_an_existing_port( 6335 ioc, sas_node, &sas_node->phy[offset]); 6336 mpt3sas_transport_add_phy_to_an_existing_port( 6337 ioc, sas_node, &sas_node->phy[offset], 6338 hba_port_entry->sas_address, 6339 hba_port_entry); 6340 } 6341 } 6342 } 6343 6344 /** 6345 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. 6346 * @ioc: per adapter object 6347 * 6348 * Returns nothing. 6349 */ 6350 static void 6351 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) 6352 { 6353 struct hba_port *port, *port_next; 6354 struct virtual_phy *vphy, *vphy_next; 6355 6356 list_for_each_entry_safe(port, port_next, 6357 &ioc->port_table_list, list) { 6358 if (!port->vphys_mask) 6359 continue; 6360 list_for_each_entry_safe(vphy, vphy_next, 6361 &port->vphys_list, list) { 6362 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { 6363 drsprintk(ioc, ioc_info(ioc, 6364 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", 6365 vphy, port->port_id, 6366 vphy->phy_mask)); 6367 port->vphys_mask &= ~vphy->phy_mask; 6368 list_del(&vphy->list); 6369 kfree(vphy); 6370 } 6371 } 6372 if (!port->vphys_mask && !port->sas_address) 6373 port->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6374 } 6375 } 6376 6377 /** 6378 * _scsih_del_dirty_port_entries - delete dirty port entries from port list 6379 * after host reset 6380 *@ioc: per adapter object 6381 * 6382 */ 6383 static void 6384 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) 6385 { 6386 struct hba_port *port, *port_next; 6387 6388 list_for_each_entry_safe(port, port_next, 6389 &ioc->port_table_list, list) { 6390 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || 6391 port->flags & HBA_PORT_FLAG_NEW_PORT) 6392 continue; 6393 6394 drsprintk(ioc, ioc_info(ioc, 6395 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", 6396 port, port->port_id, port->phy_mask)); 6397 list_del(&port->list); 6398 kfree(port); 6399 } 6400 } 6401 6402 /** 6403 * _scsih_sas_port_refresh - Update HBA port table after host reset 6404 * @ioc: per adapter object 6405 */ 6406 static void 6407 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) 6408 { 6409 u32 port_count = 0; 6410 struct hba_port *port_table; 6411 struct hba_port *port_table_entry; 6412 struct hba_port *port_entry = NULL; 6413 int i, j, count = 0, lcount = 0; 6414 int ret; 6415 u64 sas_addr; 6416 u8 num_phys; 6417 6418 drsprintk(ioc, ioc_info(ioc, 6419 "updating ports for sas_host(0x%016llx)\n", 6420 (unsigned long long)ioc->sas_hba.sas_address)); 6421 6422 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6423 if (!num_phys) { 6424 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6425 __FILE__, __LINE__, __func__); 6426 return; 6427 } 6428 6429 if (num_phys > ioc->sas_hba.nr_phys_allocated) { 6430 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6431 __FILE__, __LINE__, __func__); 6432 return; 6433 } 6434 ioc->sas_hba.num_phys = num_phys; 6435 6436 port_table = kcalloc(ioc->sas_hba.num_phys, 6437 sizeof(struct hba_port), GFP_KERNEL); 6438 if (!port_table) 6439 return; 6440 6441 port_count = _scsih_get_port_table_after_reset(ioc, port_table); 6442 if (!port_count) 6443 return; 6444 6445 drsprintk(ioc, ioc_info(ioc, "New Port table\n")); 6446 for (j = 0; j < port_count; j++) 6447 drsprintk(ioc, ioc_info(ioc, 6448 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6449 port_table[j].port_id, 6450 port_table[j].phy_mask, port_table[j].sas_address)); 6451 6452 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) 6453 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6454 6455 drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); 6456 port_table_entry = NULL; 6457 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6458 drsprintk(ioc, ioc_info(ioc, 6459 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6460 port_table_entry->port_id, 6461 port_table_entry->phy_mask, 6462 port_table_entry->sas_address)); 6463 } 6464 6465 for (j = 0; j < port_count; j++) { 6466 ret = _scsih_look_and_get_matched_port_entry(ioc, 6467 &port_table[j], &port_entry, &count); 6468 if (!port_entry) { 6469 drsprintk(ioc, ioc_info(ioc, 6470 "No Matched entry for sas_addr(0x%16llx), Port:%d\n", 6471 port_table[j].sas_address, 6472 port_table[j].port_id)); 6473 continue; 6474 } 6475 6476 switch (ret) { 6477 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: 6478 case MATCHED_WITH_ADDR_AND_SUBPHYMASK: 6479 _scsih_add_or_del_phys_from_existing_port(ioc, 6480 port_entry, port_table, j, port_count); 6481 break; 6482 case MATCHED_WITH_ADDR: 6483 sas_addr = port_table[j].sas_address; 6484 for (i = 0; i < port_count; i++) { 6485 if (port_table[i].sas_address == sas_addr) 6486 lcount++; 6487 } 6488 6489 if (count > 1 || lcount > 1) 6490 port_entry = NULL; 6491 else 6492 _scsih_add_or_del_phys_from_existing_port(ioc, 6493 port_entry, port_table, j, port_count); 6494 } 6495 6496 if (!port_entry) 6497 continue; 6498 6499 if (port_entry->port_id != port_table[j].port_id) 6500 port_entry->port_id = port_table[j].port_id; 6501 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; 6502 port_entry->phy_mask = port_table[j].phy_mask; 6503 } 6504 6505 port_table_entry = NULL; 6506 } 6507 6508 /** 6509 * _scsih_alloc_vphy - allocate virtual_phy object 6510 * @ioc: per adapter object 6511 * @port_id: Port ID number 6512 * @phy_num: HBA Phy number 6513 * 6514 * Returns allocated virtual_phy object. 6515 */ 6516 static struct virtual_phy * 6517 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) 6518 { 6519 struct virtual_phy *vphy; 6520 struct hba_port *port; 6521 6522 port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6523 if (!port) 6524 return NULL; 6525 6526 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); 6527 if (!vphy) { 6528 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); 6529 if (!vphy) 6530 return NULL; 6531 6532 if (!port->vphys_mask) 6533 INIT_LIST_HEAD(&port->vphys_list); 6534 6535 /* 6536 * Enable bit corresponding to HBA phy number on its 6537 * parent hba_port object's vphys_mask field. 6538 */ 6539 port->vphys_mask |= (1 << phy_num); 6540 vphy->phy_mask |= (1 << phy_num); 6541 6542 list_add_tail(&vphy->list, &port->vphys_list); 6543 6544 ioc_info(ioc, 6545 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", 6546 vphy, port->port_id, phy_num); 6547 } 6548 return vphy; 6549 } 6550 6551 /** 6552 * _scsih_sas_host_refresh - refreshing sas host object contents 6553 * @ioc: per adapter object 6554 * Context: user 6555 * 6556 * During port enable, fw will send topology events for every device. Its 6557 * possible that the handles may change from the previous setting, so this 6558 * code keeping handles updating if changed. 6559 */ 6560 static void 6561 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 6562 { 6563 u16 sz; 6564 u16 ioc_status; 6565 int i; 6566 Mpi2ConfigReply_t mpi_reply; 6567 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6568 u16 attached_handle; 6569 u8 link_rate, port_id; 6570 struct hba_port *port; 6571 Mpi2SasPhyPage0_t phy_pg0; 6572 6573 dtmprintk(ioc, 6574 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 6575 (u64)ioc->sas_hba.sas_address)); 6576 6577 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6578 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6579 if (!sas_iounit_pg0) { 6580 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6581 __FILE__, __LINE__, __func__); 6582 return; 6583 } 6584 6585 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6586 sas_iounit_pg0, sz)) != 0) 6587 goto out; 6588 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6589 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6590 goto out; 6591 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6592 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 6593 if (i == 0) 6594 ioc->sas_hba.handle = le16_to_cpu( 6595 sas_iounit_pg0->PhyData[0].ControllerDevHandle); 6596 port_id = sas_iounit_pg0->PhyData[i].Port; 6597 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6598 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6599 if (!port) 6600 goto out; 6601 6602 port->port_id = port_id; 6603 ioc_info(ioc, 6604 "hba_port entry: %p, port: %d is added to hba_port list\n", 6605 port, port->port_id); 6606 if (ioc->shost_recovery) 6607 port->flags = HBA_PORT_FLAG_NEW_PORT; 6608 list_add_tail(&port->list, &ioc->port_table_list); 6609 } 6610 /* 6611 * Check whether current Phy belongs to HBA vSES device or not. 6612 */ 6613 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6614 MPI2_SAS_DEVICE_INFO_SEP && 6615 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 6616 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6617 &phy_pg0, i))) { 6618 ioc_err(ioc, 6619 "failure at %s:%d/%s()!\n", 6620 __FILE__, __LINE__, __func__); 6621 goto out; 6622 } 6623 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6624 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6625 continue; 6626 /* 6627 * Allocate a virtual_phy object for vSES device, if 6628 * this vSES device is hot added. 6629 */ 6630 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6631 goto out; 6632 ioc->sas_hba.phy[i].hba_vphy = 1; 6633 } 6634 6635 /* 6636 * Add new HBA phys to STL if these new phys got added as part 6637 * of HBA Firmware upgrade/downgrade operation. 6638 */ 6639 if (!ioc->sas_hba.phy[i].phy) { 6640 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6641 &phy_pg0, i))) { 6642 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6643 __FILE__, __LINE__, __func__); 6644 continue; 6645 } 6646 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6647 MPI2_IOCSTATUS_MASK; 6648 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6649 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6650 __FILE__, __LINE__, __func__); 6651 continue; 6652 } 6653 ioc->sas_hba.phy[i].phy_id = i; 6654 mpt3sas_transport_add_host_phy(ioc, 6655 &ioc->sas_hba.phy[i], phy_pg0, 6656 ioc->sas_hba.parent_dev); 6657 continue; 6658 } 6659 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6660 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 6661 AttachedDevHandle); 6662 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6663 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 6664 ioc->sas_hba.phy[i].port = 6665 mpt3sas_get_port_by_id(ioc, port_id, 0); 6666 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 6667 attached_handle, i, link_rate, 6668 ioc->sas_hba.phy[i].port); 6669 } 6670 /* 6671 * Clear the phy details if this phy got disabled as part of 6672 * HBA Firmware upgrade/downgrade operation. 6673 */ 6674 for (i = ioc->sas_hba.num_phys; 6675 i < ioc->sas_hba.nr_phys_allocated; i++) { 6676 if (ioc->sas_hba.phy[i].phy && 6677 ioc->sas_hba.phy[i].phy->negotiated_linkrate >= 6678 SAS_LINK_RATE_1_5_GBPS) 6679 mpt3sas_transport_update_links(ioc, 6680 ioc->sas_hba.sas_address, 0, i, 6681 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); 6682 } 6683 out: 6684 kfree(sas_iounit_pg0); 6685 } 6686 6687 /** 6688 * _scsih_sas_host_add - create sas host object 6689 * @ioc: per adapter object 6690 * 6691 * Creating host side data object, stored in ioc->sas_hba 6692 */ 6693 static void 6694 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 6695 { 6696 int i; 6697 Mpi2ConfigReply_t mpi_reply; 6698 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6699 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 6700 Mpi2SasPhyPage0_t phy_pg0; 6701 Mpi2SasDevicePage0_t sas_device_pg0; 6702 Mpi2SasEnclosurePage0_t enclosure_pg0; 6703 u16 ioc_status; 6704 u16 sz; 6705 u8 device_missing_delay; 6706 u8 num_phys, port_id; 6707 struct hba_port *port; 6708 6709 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6710 if (!num_phys) { 6711 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6712 __FILE__, __LINE__, __func__); 6713 return; 6714 } 6715 6716 ioc->sas_hba.nr_phys_allocated = max_t(u8, 6717 MPT_MAX_HBA_NUM_PHYS, num_phys); 6718 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, 6719 sizeof(struct _sas_phy), GFP_KERNEL); 6720 if (!ioc->sas_hba.phy) { 6721 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6722 __FILE__, __LINE__, __func__); 6723 goto out; 6724 } 6725 ioc->sas_hba.num_phys = num_phys; 6726 6727 /* sas_iounit page 0 */ 6728 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6729 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6730 if (!sas_iounit_pg0) { 6731 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6732 __FILE__, __LINE__, __func__); 6733 return; 6734 } 6735 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6736 sas_iounit_pg0, sz))) { 6737 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6738 __FILE__, __LINE__, __func__); 6739 goto out; 6740 } 6741 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6742 MPI2_IOCSTATUS_MASK; 6743 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6744 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6745 __FILE__, __LINE__, __func__); 6746 goto out; 6747 } 6748 6749 /* sas_iounit page 1 */ 6750 sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys); 6751 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 6752 if (!sas_iounit_pg1) { 6753 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6754 __FILE__, __LINE__, __func__); 6755 goto out; 6756 } 6757 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 6758 sas_iounit_pg1, sz))) { 6759 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6760 __FILE__, __LINE__, __func__); 6761 goto out; 6762 } 6763 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6764 MPI2_IOCSTATUS_MASK; 6765 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6766 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6767 __FILE__, __LINE__, __func__); 6768 goto out; 6769 } 6770 6771 ioc->io_missing_delay = 6772 sas_iounit_pg1->IODeviceMissingDelay; 6773 device_missing_delay = 6774 sas_iounit_pg1->ReportDeviceMissingDelay; 6775 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 6776 ioc->device_missing_delay = (device_missing_delay & 6777 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 6778 else 6779 ioc->device_missing_delay = device_missing_delay & 6780 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 6781 6782 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 6783 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6784 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6785 i))) { 6786 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6787 __FILE__, __LINE__, __func__); 6788 goto out; 6789 } 6790 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6791 MPI2_IOCSTATUS_MASK; 6792 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6793 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6794 __FILE__, __LINE__, __func__); 6795 goto out; 6796 } 6797 6798 if (i == 0) 6799 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 6800 PhyData[0].ControllerDevHandle); 6801 6802 port_id = sas_iounit_pg0->PhyData[i].Port; 6803 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6804 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6805 if (!port) 6806 goto out; 6807 6808 port->port_id = port_id; 6809 ioc_info(ioc, 6810 "hba_port entry: %p, port: %d is added to hba_port list\n", 6811 port, port->port_id); 6812 list_add_tail(&port->list, 6813 &ioc->port_table_list); 6814 } 6815 6816 /* 6817 * Check whether current Phy belongs to HBA vSES device or not. 6818 */ 6819 if ((le32_to_cpu(phy_pg0.PhyInfo) & 6820 MPI2_SAS_PHYINFO_VIRTUAL_PHY) && 6821 (phy_pg0.NegotiatedLinkRate >> 4) >= 6822 MPI2_SAS_NEG_LINK_RATE_1_5) { 6823 /* 6824 * Allocate a virtual_phy object for vSES device. 6825 */ 6826 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6827 goto out; 6828 ioc->sas_hba.phy[i].hba_vphy = 1; 6829 } 6830 6831 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6832 ioc->sas_hba.phy[i].phy_id = i; 6833 ioc->sas_hba.phy[i].port = 6834 mpt3sas_get_port_by_id(ioc, port_id, 0); 6835 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 6836 phy_pg0, ioc->sas_hba.parent_dev); 6837 } 6838 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6839 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 6840 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6841 __FILE__, __LINE__, __func__); 6842 goto out; 6843 } 6844 ioc->sas_hba.enclosure_handle = 6845 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6846 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 6847 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6848 ioc->sas_hba.handle, 6849 (u64)ioc->sas_hba.sas_address, 6850 ioc->sas_hba.num_phys); 6851 6852 if (ioc->sas_hba.enclosure_handle) { 6853 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6854 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6855 ioc->sas_hba.enclosure_handle))) 6856 ioc->sas_hba.enclosure_logical_id = 6857 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6858 } 6859 6860 out: 6861 kfree(sas_iounit_pg1); 6862 kfree(sas_iounit_pg0); 6863 } 6864 6865 /** 6866 * _scsih_expander_add - creating expander object 6867 * @ioc: per adapter object 6868 * @handle: expander handle 6869 * 6870 * Creating expander object, stored in ioc->sas_expander_list. 6871 * 6872 * Return: 0 for success, else error. 6873 */ 6874 static int 6875 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6876 { 6877 struct _sas_node *sas_expander; 6878 struct _enclosure_node *enclosure_dev; 6879 Mpi2ConfigReply_t mpi_reply; 6880 Mpi2ExpanderPage0_t expander_pg0; 6881 Mpi2ExpanderPage1_t expander_pg1; 6882 u32 ioc_status; 6883 u16 parent_handle; 6884 u64 sas_address, sas_address_parent = 0; 6885 int i; 6886 unsigned long flags; 6887 struct _sas_port *mpt3sas_port = NULL; 6888 u8 port_id; 6889 6890 int rc = 0; 6891 6892 if (!handle) 6893 return -1; 6894 6895 if (ioc->shost_recovery || ioc->pci_error_recovery) 6896 return -1; 6897 6898 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 6899 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 6900 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6901 __FILE__, __LINE__, __func__); 6902 return -1; 6903 } 6904 6905 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6906 MPI2_IOCSTATUS_MASK; 6907 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6908 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6909 __FILE__, __LINE__, __func__); 6910 return -1; 6911 } 6912 6913 /* handle out of order topology events */ 6914 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 6915 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 6916 != 0) { 6917 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6918 __FILE__, __LINE__, __func__); 6919 return -1; 6920 } 6921 6922 port_id = expander_pg0.PhysicalPort; 6923 if (sas_address_parent != ioc->sas_hba.sas_address) { 6924 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6925 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6926 sas_address_parent, 6927 mpt3sas_get_port_by_id(ioc, port_id, 0)); 6928 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6929 if (!sas_expander) { 6930 rc = _scsih_expander_add(ioc, parent_handle); 6931 if (rc != 0) 6932 return rc; 6933 } 6934 } 6935 6936 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6937 sas_address = le64_to_cpu(expander_pg0.SASAddress); 6938 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6939 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 6940 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6941 6942 if (sas_expander) 6943 return 0; 6944 6945 sas_expander = kzalloc(sizeof(struct _sas_node), 6946 GFP_KERNEL); 6947 if (!sas_expander) { 6948 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6949 __FILE__, __LINE__, __func__); 6950 return -1; 6951 } 6952 6953 sas_expander->handle = handle; 6954 sas_expander->num_phys = expander_pg0.NumPhys; 6955 sas_expander->sas_address_parent = sas_address_parent; 6956 sas_expander->sas_address = sas_address; 6957 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6958 if (!sas_expander->port) { 6959 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6960 __FILE__, __LINE__, __func__); 6961 rc = -1; 6962 goto out_fail; 6963 } 6964 6965 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6966 handle, parent_handle, 6967 (u64)sas_expander->sas_address, sas_expander->num_phys); 6968 6969 if (!sas_expander->num_phys) { 6970 rc = -1; 6971 goto out_fail; 6972 } 6973 sas_expander->phy = kcalloc(sas_expander->num_phys, 6974 sizeof(struct _sas_phy), GFP_KERNEL); 6975 if (!sas_expander->phy) { 6976 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6977 __FILE__, __LINE__, __func__); 6978 rc = -1; 6979 goto out_fail; 6980 } 6981 6982 INIT_LIST_HEAD(&sas_expander->sas_port_list); 6983 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 6984 sas_address_parent, sas_expander->port); 6985 if (!mpt3sas_port) { 6986 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6987 __FILE__, __LINE__, __func__); 6988 rc = -1; 6989 goto out_fail; 6990 } 6991 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 6992 sas_expander->rphy = mpt3sas_port->rphy; 6993 6994 for (i = 0 ; i < sas_expander->num_phys ; i++) { 6995 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 6996 &expander_pg1, i, handle))) { 6997 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6998 __FILE__, __LINE__, __func__); 6999 rc = -1; 7000 goto out_fail; 7001 } 7002 sas_expander->phy[i].handle = handle; 7003 sas_expander->phy[i].phy_id = i; 7004 sas_expander->phy[i].port = 7005 mpt3sas_get_port_by_id(ioc, port_id, 0); 7006 7007 if ((mpt3sas_transport_add_expander_phy(ioc, 7008 &sas_expander->phy[i], expander_pg1, 7009 sas_expander->parent_dev))) { 7010 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7011 __FILE__, __LINE__, __func__); 7012 rc = -1; 7013 goto out_fail; 7014 } 7015 } 7016 7017 if (sas_expander->enclosure_handle) { 7018 enclosure_dev = 7019 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7020 sas_expander->enclosure_handle); 7021 if (enclosure_dev) 7022 sas_expander->enclosure_logical_id = 7023 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7024 } 7025 7026 _scsih_expander_node_add(ioc, sas_expander); 7027 return 0; 7028 7029 out_fail: 7030 7031 if (mpt3sas_port) 7032 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 7033 sas_address_parent, sas_expander->port); 7034 kfree(sas_expander); 7035 return rc; 7036 } 7037 7038 /** 7039 * mpt3sas_expander_remove - removing expander object 7040 * @ioc: per adapter object 7041 * @sas_address: expander sas_address 7042 * @port: hba port entry 7043 */ 7044 void 7045 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7046 struct hba_port *port) 7047 { 7048 struct _sas_node *sas_expander; 7049 unsigned long flags; 7050 7051 if (ioc->shost_recovery) 7052 return; 7053 7054 if (!port) 7055 return; 7056 7057 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7058 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 7059 sas_address, port); 7060 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7061 if (sas_expander) 7062 _scsih_expander_node_remove(ioc, sas_expander); 7063 } 7064 7065 /** 7066 * _scsih_done - internal SCSI_IO callback handler. 7067 * @ioc: per adapter object 7068 * @smid: system request message index 7069 * @msix_index: MSIX table index supplied by the OS 7070 * @reply: reply message frame(lower 32bit addr) 7071 * 7072 * Callback handler when sending internal generated SCSI_IO. 7073 * The callback index passed is `ioc->scsih_cb_idx` 7074 * 7075 * Return: 1 meaning mf should be freed from _base_interrupt 7076 * 0 means the mf is freed from this function. 7077 */ 7078 static u8 7079 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 7080 { 7081 MPI2DefaultReply_t *mpi_reply; 7082 7083 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 7084 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 7085 return 1; 7086 if (ioc->scsih_cmds.smid != smid) 7087 return 1; 7088 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 7089 if (mpi_reply) { 7090 memcpy(ioc->scsih_cmds.reply, mpi_reply, 7091 mpi_reply->MsgLength*4); 7092 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 7093 } 7094 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 7095 complete(&ioc->scsih_cmds.done); 7096 return 1; 7097 } 7098 7099 7100 7101 7102 #define MPT3_MAX_LUNS (255) 7103 7104 7105 /** 7106 * _scsih_check_access_status - check access flags 7107 * @ioc: per adapter object 7108 * @sas_address: sas address 7109 * @handle: sas device handle 7110 * @access_status: errors returned during discovery of the device 7111 * 7112 * Return: 0 for success, else failure 7113 */ 7114 static u8 7115 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7116 u16 handle, u8 access_status) 7117 { 7118 u8 rc = 1; 7119 char *desc = NULL; 7120 7121 switch (access_status) { 7122 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 7123 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 7124 rc = 0; 7125 break; 7126 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 7127 desc = "sata capability failed"; 7128 break; 7129 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 7130 desc = "sata affiliation conflict"; 7131 break; 7132 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 7133 desc = "route not addressable"; 7134 break; 7135 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 7136 desc = "smp error not addressable"; 7137 break; 7138 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 7139 desc = "device blocked"; 7140 break; 7141 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 7142 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 7143 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 7144 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 7145 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 7146 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 7147 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 7148 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 7149 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 7150 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 7151 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 7152 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 7153 desc = "sata initialization failed"; 7154 break; 7155 default: 7156 desc = "unknown"; 7157 break; 7158 } 7159 7160 if (!rc) 7161 return 0; 7162 7163 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 7164 desc, (u64)sas_address, handle); 7165 return rc; 7166 } 7167 7168 /** 7169 * _scsih_check_device - checking device responsiveness 7170 * @ioc: per adapter object 7171 * @parent_sas_address: sas address of parent expander or sas host 7172 * @handle: attached device handle 7173 * @phy_number: phy number 7174 * @link_rate: new link rate 7175 */ 7176 static void 7177 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 7178 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 7179 { 7180 Mpi2ConfigReply_t mpi_reply; 7181 Mpi2SasDevicePage0_t sas_device_pg0; 7182 struct _sas_device *sas_device = NULL; 7183 struct _enclosure_node *enclosure_dev = NULL; 7184 u32 ioc_status; 7185 unsigned long flags; 7186 u64 sas_address; 7187 struct scsi_target *starget; 7188 struct MPT3SAS_TARGET *sas_target_priv_data; 7189 u32 device_info; 7190 struct hba_port *port; 7191 7192 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7193 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 7194 return; 7195 7196 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7197 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7198 return; 7199 7200 /* wide port handling ~ we need only handle device once for the phy that 7201 * is matched in sas device page zero 7202 */ 7203 if (phy_number != sas_device_pg0.PhyNum) 7204 return; 7205 7206 /* check if this is end device */ 7207 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7208 if (!(_scsih_is_end_device(device_info))) 7209 return; 7210 7211 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7212 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7213 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); 7214 if (!port) 7215 goto out_unlock; 7216 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7217 sas_address, port); 7218 7219 if (!sas_device) 7220 goto out_unlock; 7221 7222 if (unlikely(sas_device->handle != handle)) { 7223 starget = sas_device->starget; 7224 sas_target_priv_data = starget->hostdata; 7225 starget_printk(KERN_INFO, starget, 7226 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7227 sas_device->handle, handle); 7228 sas_target_priv_data->handle = handle; 7229 sas_device->handle = handle; 7230 if (le16_to_cpu(sas_device_pg0.Flags) & 7231 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7232 sas_device->enclosure_level = 7233 sas_device_pg0.EnclosureLevel; 7234 memcpy(sas_device->connector_name, 7235 sas_device_pg0.ConnectorName, 4); 7236 sas_device->connector_name[4] = '\0'; 7237 } else { 7238 sas_device->enclosure_level = 0; 7239 sas_device->connector_name[0] = '\0'; 7240 } 7241 7242 sas_device->enclosure_handle = 7243 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7244 sas_device->is_chassis_slot_valid = 0; 7245 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 7246 sas_device->enclosure_handle); 7247 if (enclosure_dev) { 7248 sas_device->enclosure_logical_id = 7249 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7250 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7251 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7252 sas_device->is_chassis_slot_valid = 1; 7253 sas_device->chassis_slot = 7254 enclosure_dev->pg0.ChassisSlot; 7255 } 7256 } 7257 } 7258 7259 /* check if device is present */ 7260 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7261 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7262 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 7263 handle); 7264 goto out_unlock; 7265 } 7266 7267 /* check if there were any issues with discovery */ 7268 if (_scsih_check_access_status(ioc, sas_address, handle, 7269 sas_device_pg0.AccessStatus)) 7270 goto out_unlock; 7271 7272 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7273 _scsih_ublock_io_device(ioc, sas_address, port); 7274 7275 if (sas_device) 7276 sas_device_put(sas_device); 7277 return; 7278 7279 out_unlock: 7280 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7281 if (sas_device) 7282 sas_device_put(sas_device); 7283 } 7284 7285 /** 7286 * _scsih_add_device - creating sas device object 7287 * @ioc: per adapter object 7288 * @handle: sas device handle 7289 * @phy_num: phy number end device attached to 7290 * @is_pd: is this hidden raid component 7291 * 7292 * Creating end device object, stored in ioc->sas_device_list. 7293 * 7294 * Return: 0 for success, non-zero for failure. 7295 */ 7296 static int 7297 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7298 u8 is_pd) 7299 { 7300 Mpi2ConfigReply_t mpi_reply; 7301 Mpi2SasDevicePage0_t sas_device_pg0; 7302 struct _sas_device *sas_device; 7303 struct _enclosure_node *enclosure_dev = NULL; 7304 u32 ioc_status; 7305 u64 sas_address; 7306 u32 device_info; 7307 u8 port_id; 7308 7309 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7310 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7311 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7312 __FILE__, __LINE__, __func__); 7313 return -1; 7314 } 7315 7316 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7317 MPI2_IOCSTATUS_MASK; 7318 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7319 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7320 __FILE__, __LINE__, __func__); 7321 return -1; 7322 } 7323 7324 /* check if this is end device */ 7325 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7326 if (!(_scsih_is_end_device(device_info))) 7327 return -1; 7328 set_bit(handle, ioc->pend_os_device_add); 7329 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7330 7331 /* check if device is present */ 7332 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7333 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7334 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 7335 handle); 7336 return -1; 7337 } 7338 7339 /* check if there were any issues with discovery */ 7340 if (_scsih_check_access_status(ioc, sas_address, handle, 7341 sas_device_pg0.AccessStatus)) 7342 return -1; 7343 7344 port_id = sas_device_pg0.PhysicalPort; 7345 sas_device = mpt3sas_get_sdev_by_addr(ioc, 7346 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 7347 if (sas_device) { 7348 clear_bit(handle, ioc->pend_os_device_add); 7349 sas_device_put(sas_device); 7350 return -1; 7351 } 7352 7353 if (sas_device_pg0.EnclosureHandle) { 7354 enclosure_dev = 7355 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7356 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 7357 if (enclosure_dev == NULL) 7358 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 7359 sas_device_pg0.EnclosureHandle); 7360 } 7361 7362 sas_device = kzalloc(sizeof(struct _sas_device), 7363 GFP_KERNEL); 7364 if (!sas_device) { 7365 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7366 __FILE__, __LINE__, __func__); 7367 return 0; 7368 } 7369 7370 kref_init(&sas_device->refcount); 7371 sas_device->handle = handle; 7372 if (_scsih_get_sas_address(ioc, 7373 le16_to_cpu(sas_device_pg0.ParentDevHandle), 7374 &sas_device->sas_address_parent) != 0) 7375 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7376 __FILE__, __LINE__, __func__); 7377 sas_device->enclosure_handle = 7378 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7379 if (sas_device->enclosure_handle != 0) 7380 sas_device->slot = 7381 le16_to_cpu(sas_device_pg0.Slot); 7382 sas_device->device_info = device_info; 7383 sas_device->sas_address = sas_address; 7384 sas_device->phy = sas_device_pg0.PhyNum; 7385 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 7386 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 7387 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 7388 if (!sas_device->port) { 7389 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7390 __FILE__, __LINE__, __func__); 7391 goto out; 7392 } 7393 7394 if (le16_to_cpu(sas_device_pg0.Flags) 7395 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7396 sas_device->enclosure_level = 7397 sas_device_pg0.EnclosureLevel; 7398 memcpy(sas_device->connector_name, 7399 sas_device_pg0.ConnectorName, 4); 7400 sas_device->connector_name[4] = '\0'; 7401 } else { 7402 sas_device->enclosure_level = 0; 7403 sas_device->connector_name[0] = '\0'; 7404 } 7405 /* get enclosure_logical_id & chassis_slot*/ 7406 sas_device->is_chassis_slot_valid = 0; 7407 if (enclosure_dev) { 7408 sas_device->enclosure_logical_id = 7409 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7410 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7411 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7412 sas_device->is_chassis_slot_valid = 1; 7413 sas_device->chassis_slot = 7414 enclosure_dev->pg0.ChassisSlot; 7415 } 7416 } 7417 7418 /* get device name */ 7419 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 7420 sas_device->port_type = sas_device_pg0.MaxPortConnections; 7421 ioc_info(ioc, 7422 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", 7423 handle, sas_device->sas_address, sas_device->port_type); 7424 7425 if (ioc->wait_for_discovery_to_complete) 7426 _scsih_sas_device_init_add(ioc, sas_device); 7427 else 7428 _scsih_sas_device_add(ioc, sas_device); 7429 7430 out: 7431 sas_device_put(sas_device); 7432 return 0; 7433 } 7434 7435 /** 7436 * _scsih_remove_device - removing sas device object 7437 * @ioc: per adapter object 7438 * @sas_device: the sas_device object 7439 */ 7440 static void 7441 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 7442 struct _sas_device *sas_device) 7443 { 7444 struct MPT3SAS_TARGET *sas_target_priv_data; 7445 7446 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 7447 (sas_device->pfa_led_on)) { 7448 _scsih_turn_off_pfa_led(ioc, sas_device); 7449 sas_device->pfa_led_on = 0; 7450 } 7451 7452 dewtprintk(ioc, 7453 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 7454 __func__, 7455 sas_device->handle, (u64)sas_device->sas_address)); 7456 7457 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7458 NULL, NULL)); 7459 7460 if (sas_device->starget && sas_device->starget->hostdata) { 7461 sas_target_priv_data = sas_device->starget->hostdata; 7462 sas_target_priv_data->deleted = 1; 7463 _scsih_ublock_io_device(ioc, sas_device->sas_address, 7464 sas_device->port); 7465 sas_target_priv_data->handle = 7466 MPT3SAS_INVALID_DEVICE_HANDLE; 7467 } 7468 7469 if (!ioc->hide_drives) 7470 mpt3sas_transport_port_remove(ioc, 7471 sas_device->sas_address, 7472 sas_device->sas_address_parent, 7473 sas_device->port); 7474 7475 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 7476 sas_device->handle, (u64)sas_device->sas_address); 7477 7478 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 7479 7480 dewtprintk(ioc, 7481 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 7482 __func__, 7483 sas_device->handle, (u64)sas_device->sas_address)); 7484 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7485 NULL, NULL)); 7486 } 7487 7488 /** 7489 * _scsih_sas_topology_change_event_debug - debug for topology event 7490 * @ioc: per adapter object 7491 * @event_data: event data payload 7492 * Context: user. 7493 */ 7494 static void 7495 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7496 Mpi2EventDataSasTopologyChangeList_t *event_data) 7497 { 7498 int i; 7499 u16 handle; 7500 u16 reason_code; 7501 u8 phy_number; 7502 char *status_str = NULL; 7503 u8 link_rate, prev_link_rate; 7504 7505 switch (event_data->ExpStatus) { 7506 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 7507 status_str = "add"; 7508 break; 7509 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 7510 status_str = "remove"; 7511 break; 7512 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 7513 case 0: 7514 status_str = "responding"; 7515 break; 7516 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 7517 status_str = "remove delay"; 7518 break; 7519 default: 7520 status_str = "unknown status"; 7521 break; 7522 } 7523 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 7524 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 7525 "start_phy(%02d), count(%d)\n", 7526 le16_to_cpu(event_data->ExpanderDevHandle), 7527 le16_to_cpu(event_data->EnclosureHandle), 7528 event_data->StartPhyNum, event_data->NumEntries); 7529 for (i = 0; i < event_data->NumEntries; i++) { 7530 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7531 if (!handle) 7532 continue; 7533 phy_number = event_data->StartPhyNum + i; 7534 reason_code = event_data->PHY[i].PhyStatus & 7535 MPI2_EVENT_SAS_TOPO_RC_MASK; 7536 switch (reason_code) { 7537 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7538 status_str = "target add"; 7539 break; 7540 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7541 status_str = "target remove"; 7542 break; 7543 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7544 status_str = "delay target remove"; 7545 break; 7546 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7547 status_str = "link rate change"; 7548 break; 7549 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7550 status_str = "target responding"; 7551 break; 7552 default: 7553 status_str = "unknown"; 7554 break; 7555 } 7556 link_rate = event_data->PHY[i].LinkRate >> 4; 7557 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7558 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 7559 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 7560 handle, status_str, link_rate, prev_link_rate); 7561 7562 } 7563 } 7564 7565 /** 7566 * _scsih_sas_topology_change_event - handle topology changes 7567 * @ioc: per adapter object 7568 * @fw_event: The fw_event_work object 7569 * Context: user. 7570 * 7571 */ 7572 static int 7573 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7574 struct fw_event_work *fw_event) 7575 { 7576 int i; 7577 u16 parent_handle, handle; 7578 u16 reason_code; 7579 u8 phy_number, max_phys; 7580 struct _sas_node *sas_expander; 7581 u64 sas_address; 7582 unsigned long flags; 7583 u8 link_rate, prev_link_rate; 7584 struct hba_port *port; 7585 Mpi2EventDataSasTopologyChangeList_t *event_data = 7586 (Mpi2EventDataSasTopologyChangeList_t *) 7587 fw_event->event_data; 7588 7589 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7590 _scsih_sas_topology_change_event_debug(ioc, event_data); 7591 7592 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 7593 return 0; 7594 7595 if (!ioc->sas_hba.num_phys) 7596 _scsih_sas_host_add(ioc); 7597 else 7598 _scsih_sas_host_refresh(ioc); 7599 7600 if (fw_event->ignore) { 7601 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 7602 return 0; 7603 } 7604 7605 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 7606 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); 7607 7608 /* handle expander add */ 7609 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 7610 if (_scsih_expander_add(ioc, parent_handle) != 0) 7611 return 0; 7612 7613 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7614 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 7615 parent_handle); 7616 if (sas_expander) { 7617 sas_address = sas_expander->sas_address; 7618 max_phys = sas_expander->num_phys; 7619 port = sas_expander->port; 7620 } else if (parent_handle < ioc->sas_hba.num_phys) { 7621 sas_address = ioc->sas_hba.sas_address; 7622 max_phys = ioc->sas_hba.num_phys; 7623 } else { 7624 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7625 return 0; 7626 } 7627 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7628 7629 /* handle siblings events */ 7630 for (i = 0; i < event_data->NumEntries; i++) { 7631 if (fw_event->ignore) { 7632 dewtprintk(ioc, 7633 ioc_info(ioc, "ignoring expander event\n")); 7634 return 0; 7635 } 7636 if (ioc->remove_host || ioc->pci_error_recovery) 7637 return 0; 7638 phy_number = event_data->StartPhyNum + i; 7639 if (phy_number >= max_phys) 7640 continue; 7641 reason_code = event_data->PHY[i].PhyStatus & 7642 MPI2_EVENT_SAS_TOPO_RC_MASK; 7643 if ((event_data->PHY[i].PhyStatus & 7644 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 7645 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 7646 continue; 7647 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7648 if (!handle) 7649 continue; 7650 link_rate = event_data->PHY[i].LinkRate >> 4; 7651 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7652 switch (reason_code) { 7653 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7654 7655 if (ioc->shost_recovery) 7656 break; 7657 7658 if (link_rate == prev_link_rate) 7659 break; 7660 7661 mpt3sas_transport_update_links(ioc, sas_address, 7662 handle, phy_number, link_rate, port); 7663 7664 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7665 break; 7666 7667 _scsih_check_device(ioc, sas_address, handle, 7668 phy_number, link_rate); 7669 7670 if (!test_bit(handle, ioc->pend_os_device_add)) 7671 break; 7672 7673 fallthrough; 7674 7675 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7676 7677 if (ioc->shost_recovery) 7678 break; 7679 7680 mpt3sas_transport_update_links(ioc, sas_address, 7681 handle, phy_number, link_rate, port); 7682 7683 _scsih_add_device(ioc, handle, phy_number, 0); 7684 7685 break; 7686 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7687 7688 _scsih_device_remove_by_handle(ioc, handle); 7689 break; 7690 } 7691 } 7692 7693 /* handle expander removal */ 7694 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 7695 sas_expander) 7696 mpt3sas_expander_remove(ioc, sas_address, port); 7697 7698 return 0; 7699 } 7700 7701 /** 7702 * _scsih_sas_device_status_change_event_debug - debug for device event 7703 * @ioc: ? 7704 * @event_data: event data payload 7705 * Context: user. 7706 */ 7707 static void 7708 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7709 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7710 { 7711 char *reason_str = NULL; 7712 7713 switch (event_data->ReasonCode) { 7714 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7715 reason_str = "smart data"; 7716 break; 7717 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7718 reason_str = "unsupported device discovered"; 7719 break; 7720 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7721 reason_str = "internal device reset"; 7722 break; 7723 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7724 reason_str = "internal task abort"; 7725 break; 7726 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7727 reason_str = "internal task abort set"; 7728 break; 7729 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7730 reason_str = "internal clear task set"; 7731 break; 7732 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7733 reason_str = "internal query task"; 7734 break; 7735 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 7736 reason_str = "sata init failure"; 7737 break; 7738 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7739 reason_str = "internal device reset complete"; 7740 break; 7741 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7742 reason_str = "internal task abort complete"; 7743 break; 7744 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7745 reason_str = "internal async notification"; 7746 break; 7747 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 7748 reason_str = "expander reduced functionality"; 7749 break; 7750 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 7751 reason_str = "expander reduced functionality complete"; 7752 break; 7753 default: 7754 reason_str = "unknown reason"; 7755 break; 7756 } 7757 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 7758 reason_str, le16_to_cpu(event_data->DevHandle), 7759 (u64)le64_to_cpu(event_data->SASAddress), 7760 le16_to_cpu(event_data->TaskTag)); 7761 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 7762 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7763 event_data->ASC, event_data->ASCQ); 7764 pr_cont("\n"); 7765 } 7766 7767 /** 7768 * _scsih_sas_device_status_change_event - handle device status change 7769 * @ioc: per adapter object 7770 * @event_data: The fw event 7771 * Context: user. 7772 */ 7773 static void 7774 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7775 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7776 { 7777 struct MPT3SAS_TARGET *target_priv_data; 7778 struct _sas_device *sas_device; 7779 u64 sas_address; 7780 unsigned long flags; 7781 7782 /* In MPI Revision K (0xC), the internal device reset complete was 7783 * implemented, so avoid setting tm_busy flag for older firmware. 7784 */ 7785 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 7786 return; 7787 7788 if (event_data->ReasonCode != 7789 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 7790 event_data->ReasonCode != 7791 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7792 return; 7793 7794 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7795 sas_address = le64_to_cpu(event_data->SASAddress); 7796 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7797 sas_address, 7798 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); 7799 7800 if (!sas_device || !sas_device->starget) 7801 goto out; 7802 7803 target_priv_data = sas_device->starget->hostdata; 7804 if (!target_priv_data) 7805 goto out; 7806 7807 if (event_data->ReasonCode == 7808 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 7809 target_priv_data->tm_busy = 1; 7810 else 7811 target_priv_data->tm_busy = 0; 7812 7813 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7814 ioc_info(ioc, 7815 "%s tm_busy flag for handle(0x%04x)\n", 7816 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", 7817 target_priv_data->handle); 7818 7819 out: 7820 if (sas_device) 7821 sas_device_put(sas_device); 7822 7823 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7824 } 7825 7826 7827 /** 7828 * _scsih_check_pcie_access_status - check access flags 7829 * @ioc: per adapter object 7830 * @wwid: wwid 7831 * @handle: sas device handle 7832 * @access_status: errors returned during discovery of the device 7833 * 7834 * Return: 0 for success, else failure 7835 */ 7836 static u8 7837 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 7838 u16 handle, u8 access_status) 7839 { 7840 u8 rc = 1; 7841 char *desc = NULL; 7842 7843 switch (access_status) { 7844 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 7845 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 7846 rc = 0; 7847 break; 7848 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 7849 desc = "PCIe device capability failed"; 7850 break; 7851 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 7852 desc = "PCIe device blocked"; 7853 ioc_info(ioc, 7854 "Device with Access Status (%s): wwid(0x%016llx), " 7855 "handle(0x%04x)\n ll only be added to the internal list", 7856 desc, (u64)wwid, handle); 7857 rc = 0; 7858 break; 7859 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 7860 desc = "PCIe device mem space access failed"; 7861 break; 7862 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 7863 desc = "PCIe device unsupported"; 7864 break; 7865 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 7866 desc = "PCIe device MSIx Required"; 7867 break; 7868 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 7869 desc = "PCIe device init fail max"; 7870 break; 7871 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 7872 desc = "PCIe device status unknown"; 7873 break; 7874 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 7875 desc = "nvme ready timeout"; 7876 break; 7877 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 7878 desc = "nvme device configuration unsupported"; 7879 break; 7880 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 7881 desc = "nvme identify failed"; 7882 break; 7883 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 7884 desc = "nvme qconfig failed"; 7885 break; 7886 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 7887 desc = "nvme qcreation failed"; 7888 break; 7889 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 7890 desc = "nvme eventcfg failed"; 7891 break; 7892 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 7893 desc = "nvme get feature stat failed"; 7894 break; 7895 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 7896 desc = "nvme idle timeout"; 7897 break; 7898 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 7899 desc = "nvme failure status"; 7900 break; 7901 default: 7902 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 7903 access_status, (u64)wwid, handle); 7904 return rc; 7905 } 7906 7907 if (!rc) 7908 return rc; 7909 7910 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 7911 desc, (u64)wwid, handle); 7912 return rc; 7913 } 7914 7915 /** 7916 * _scsih_pcie_device_remove_from_sml - removing pcie device 7917 * from SML and free up associated memory 7918 * @ioc: per adapter object 7919 * @pcie_device: the pcie_device object 7920 */ 7921 static void 7922 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 7923 struct _pcie_device *pcie_device) 7924 { 7925 struct MPT3SAS_TARGET *sas_target_priv_data; 7926 7927 dewtprintk(ioc, 7928 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 7929 __func__, 7930 pcie_device->handle, (u64)pcie_device->wwid)); 7931 if (pcie_device->enclosure_handle != 0) 7932 dewtprintk(ioc, 7933 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 7934 __func__, 7935 (u64)pcie_device->enclosure_logical_id, 7936 pcie_device->slot)); 7937 if (pcie_device->connector_name[0] != '\0') 7938 dewtprintk(ioc, 7939 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 7940 __func__, 7941 pcie_device->enclosure_level, 7942 pcie_device->connector_name)); 7943 7944 if (pcie_device->starget && pcie_device->starget->hostdata) { 7945 sas_target_priv_data = pcie_device->starget->hostdata; 7946 sas_target_priv_data->deleted = 1; 7947 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); 7948 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 7949 } 7950 7951 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7952 pcie_device->handle, (u64)pcie_device->wwid); 7953 if (pcie_device->enclosure_handle != 0) 7954 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 7955 (u64)pcie_device->enclosure_logical_id, 7956 pcie_device->slot); 7957 if (pcie_device->connector_name[0] != '\0') 7958 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 7959 pcie_device->enclosure_level, 7960 pcie_device->connector_name); 7961 7962 if (pcie_device->starget && (pcie_device->access_status != 7963 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) 7964 scsi_remove_target(&pcie_device->starget->dev); 7965 dewtprintk(ioc, 7966 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 7967 __func__, 7968 pcie_device->handle, (u64)pcie_device->wwid)); 7969 if (pcie_device->enclosure_handle != 0) 7970 dewtprintk(ioc, 7971 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 7972 __func__, 7973 (u64)pcie_device->enclosure_logical_id, 7974 pcie_device->slot)); 7975 if (pcie_device->connector_name[0] != '\0') 7976 dewtprintk(ioc, 7977 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 7978 __func__, 7979 pcie_device->enclosure_level, 7980 pcie_device->connector_name)); 7981 7982 kfree(pcie_device->serial_number); 7983 } 7984 7985 7986 /** 7987 * _scsih_pcie_check_device - checking device responsiveness 7988 * @ioc: per adapter object 7989 * @handle: attached device handle 7990 */ 7991 static void 7992 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7993 { 7994 Mpi2ConfigReply_t mpi_reply; 7995 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7996 u32 ioc_status; 7997 struct _pcie_device *pcie_device; 7998 u64 wwid; 7999 unsigned long flags; 8000 struct scsi_target *starget; 8001 struct MPT3SAS_TARGET *sas_target_priv_data; 8002 u32 device_info; 8003 8004 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8005 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 8006 return; 8007 8008 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 8009 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8010 return; 8011 8012 /* check if this is end device */ 8013 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8014 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 8015 return; 8016 8017 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8018 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8019 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8020 8021 if (!pcie_device) { 8022 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8023 return; 8024 } 8025 8026 if (unlikely(pcie_device->handle != handle)) { 8027 starget = pcie_device->starget; 8028 sas_target_priv_data = starget->hostdata; 8029 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8030 starget_printk(KERN_INFO, starget, 8031 "handle changed from(0x%04x) to (0x%04x)!!!\n", 8032 pcie_device->handle, handle); 8033 sas_target_priv_data->handle = handle; 8034 pcie_device->handle = handle; 8035 8036 if (le32_to_cpu(pcie_device_pg0.Flags) & 8037 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8038 pcie_device->enclosure_level = 8039 pcie_device_pg0.EnclosureLevel; 8040 memcpy(&pcie_device->connector_name[0], 8041 &pcie_device_pg0.ConnectorName[0], 4); 8042 } else { 8043 pcie_device->enclosure_level = 0; 8044 pcie_device->connector_name[0] = '\0'; 8045 } 8046 } 8047 8048 /* check if device is present */ 8049 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8050 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8051 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 8052 handle); 8053 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8054 pcie_device_put(pcie_device); 8055 return; 8056 } 8057 8058 /* check if there were any issues with discovery */ 8059 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8060 pcie_device_pg0.AccessStatus)) { 8061 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8062 pcie_device_put(pcie_device); 8063 return; 8064 } 8065 8066 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8067 pcie_device_put(pcie_device); 8068 8069 _scsih_ublock_io_device(ioc, wwid, NULL); 8070 8071 return; 8072 } 8073 8074 /** 8075 * _scsih_pcie_add_device - creating pcie device object 8076 * @ioc: per adapter object 8077 * @handle: pcie device handle 8078 * 8079 * Creating end device object, stored in ioc->pcie_device_list. 8080 * 8081 * Return: 1 means queue the event later, 0 means complete the event 8082 */ 8083 static int 8084 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8085 { 8086 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8087 Mpi26PCIeDevicePage2_t pcie_device_pg2; 8088 Mpi2ConfigReply_t mpi_reply; 8089 struct _pcie_device *pcie_device; 8090 struct _enclosure_node *enclosure_dev; 8091 u32 ioc_status; 8092 u64 wwid; 8093 8094 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8095 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 8096 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8097 __FILE__, __LINE__, __func__); 8098 return 0; 8099 } 8100 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8101 MPI2_IOCSTATUS_MASK; 8102 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8103 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8104 __FILE__, __LINE__, __func__); 8105 return 0; 8106 } 8107 8108 set_bit(handle, ioc->pend_os_device_add); 8109 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8110 8111 /* check if device is present */ 8112 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8113 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8114 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 8115 handle); 8116 return 0; 8117 } 8118 8119 /* check if there were any issues with discovery */ 8120 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8121 pcie_device_pg0.AccessStatus)) 8122 return 0; 8123 8124 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu 8125 (pcie_device_pg0.DeviceInfo)))) 8126 return 0; 8127 8128 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 8129 if (pcie_device) { 8130 clear_bit(handle, ioc->pend_os_device_add); 8131 pcie_device_put(pcie_device); 8132 return 0; 8133 } 8134 8135 /* PCIe Device Page 2 contains read-only information about a 8136 * specific NVMe device; therefore, this page is only 8137 * valid for NVMe devices and skip for pcie devices of type scsi. 8138 */ 8139 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8140 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8141 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 8142 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8143 handle)) { 8144 ioc_err(ioc, 8145 "failure at %s:%d/%s()!\n", __FILE__, 8146 __LINE__, __func__); 8147 return 0; 8148 } 8149 8150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8151 MPI2_IOCSTATUS_MASK; 8152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8153 ioc_err(ioc, 8154 "failure at %s:%d/%s()!\n", __FILE__, 8155 __LINE__, __func__); 8156 return 0; 8157 } 8158 } 8159 8160 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 8161 if (!pcie_device) { 8162 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8163 __FILE__, __LINE__, __func__); 8164 return 0; 8165 } 8166 8167 kref_init(&pcie_device->refcount); 8168 pcie_device->id = ioc->pcie_target_id++; 8169 pcie_device->channel = PCIE_CHANNEL; 8170 pcie_device->handle = handle; 8171 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8172 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8173 pcie_device->wwid = wwid; 8174 pcie_device->port_num = pcie_device_pg0.PortNum; 8175 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 8176 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 8177 8178 pcie_device->enclosure_handle = 8179 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 8180 if (pcie_device->enclosure_handle != 0) 8181 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 8182 8183 if (le32_to_cpu(pcie_device_pg0.Flags) & 8184 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8185 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 8186 memcpy(&pcie_device->connector_name[0], 8187 &pcie_device_pg0.ConnectorName[0], 4); 8188 } else { 8189 pcie_device->enclosure_level = 0; 8190 pcie_device->connector_name[0] = '\0'; 8191 } 8192 8193 /* get enclosure_logical_id */ 8194 if (pcie_device->enclosure_handle) { 8195 enclosure_dev = 8196 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8197 pcie_device->enclosure_handle); 8198 if (enclosure_dev) 8199 pcie_device->enclosure_logical_id = 8200 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8201 } 8202 /* TODO -- Add device name once FW supports it */ 8203 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8204 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8205 pcie_device->nvme_mdts = 8206 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 8207 pcie_device->shutdown_latency = 8208 le16_to_cpu(pcie_device_pg2.ShutdownLatency); 8209 /* 8210 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency 8211 * if drive's RTD3 Entry Latency is greater then IOC's 8212 * max_shutdown_latency. 8213 */ 8214 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) 8215 ioc->max_shutdown_latency = 8216 pcie_device->shutdown_latency; 8217 if (pcie_device_pg2.ControllerResetTO) 8218 pcie_device->reset_timeout = 8219 pcie_device_pg2.ControllerResetTO; 8220 else 8221 pcie_device->reset_timeout = 30; 8222 } else 8223 pcie_device->reset_timeout = 30; 8224 8225 if (ioc->wait_for_discovery_to_complete) 8226 _scsih_pcie_device_init_add(ioc, pcie_device); 8227 else 8228 _scsih_pcie_device_add(ioc, pcie_device); 8229 8230 pcie_device_put(pcie_device); 8231 return 0; 8232 } 8233 8234 /** 8235 * _scsih_pcie_topology_change_event_debug - debug for topology 8236 * event 8237 * @ioc: per adapter object 8238 * @event_data: event data payload 8239 * Context: user. 8240 */ 8241 static void 8242 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8243 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 8244 { 8245 int i; 8246 u16 handle; 8247 u16 reason_code; 8248 u8 port_number; 8249 char *status_str = NULL; 8250 u8 link_rate, prev_link_rate; 8251 8252 switch (event_data->SwitchStatus) { 8253 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 8254 status_str = "add"; 8255 break; 8256 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 8257 status_str = "remove"; 8258 break; 8259 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 8260 case 0: 8261 status_str = "responding"; 8262 break; 8263 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 8264 status_str = "remove delay"; 8265 break; 8266 default: 8267 status_str = "unknown status"; 8268 break; 8269 } 8270 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 8271 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 8272 "start_port(%02d), count(%d)\n", 8273 le16_to_cpu(event_data->SwitchDevHandle), 8274 le16_to_cpu(event_data->EnclosureHandle), 8275 event_data->StartPortNum, event_data->NumEntries); 8276 for (i = 0; i < event_data->NumEntries; i++) { 8277 handle = 8278 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8279 if (!handle) 8280 continue; 8281 port_number = event_data->StartPortNum + i; 8282 reason_code = event_data->PortEntry[i].PortStatus; 8283 switch (reason_code) { 8284 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8285 status_str = "target add"; 8286 break; 8287 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8288 status_str = "target remove"; 8289 break; 8290 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 8291 status_str = "delay target remove"; 8292 break; 8293 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8294 status_str = "link rate change"; 8295 break; 8296 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 8297 status_str = "target responding"; 8298 break; 8299 default: 8300 status_str = "unknown"; 8301 break; 8302 } 8303 link_rate = event_data->PortEntry[i].CurrentPortInfo & 8304 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8305 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 8306 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8307 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 8308 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 8309 handle, status_str, link_rate, prev_link_rate); 8310 } 8311 } 8312 8313 /** 8314 * _scsih_pcie_topology_change_event - handle PCIe topology 8315 * changes 8316 * @ioc: per adapter object 8317 * @fw_event: The fw_event_work object 8318 * Context: user. 8319 * 8320 */ 8321 static void 8322 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 8323 struct fw_event_work *fw_event) 8324 { 8325 int i; 8326 u16 handle; 8327 u16 reason_code; 8328 u8 link_rate, prev_link_rate; 8329 unsigned long flags; 8330 int rc; 8331 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 8332 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 8333 struct _pcie_device *pcie_device; 8334 8335 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8336 _scsih_pcie_topology_change_event_debug(ioc, event_data); 8337 8338 if (ioc->shost_recovery || ioc->remove_host || 8339 ioc->pci_error_recovery) 8340 return; 8341 8342 if (fw_event->ignore) { 8343 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 8344 return; 8345 } 8346 8347 /* handle siblings events */ 8348 for (i = 0; i < event_data->NumEntries; i++) { 8349 if (fw_event->ignore) { 8350 dewtprintk(ioc, 8351 ioc_info(ioc, "ignoring switch event\n")); 8352 return; 8353 } 8354 if (ioc->remove_host || ioc->pci_error_recovery) 8355 return; 8356 reason_code = event_data->PortEntry[i].PortStatus; 8357 handle = 8358 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8359 if (!handle) 8360 continue; 8361 8362 link_rate = event_data->PortEntry[i].CurrentPortInfo 8363 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8364 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 8365 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8366 8367 switch (reason_code) { 8368 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8369 if (ioc->shost_recovery) 8370 break; 8371 if (link_rate == prev_link_rate) 8372 break; 8373 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8374 break; 8375 8376 _scsih_pcie_check_device(ioc, handle); 8377 8378 /* This code after this point handles the test case 8379 * where a device has been added, however its returning 8380 * BUSY for sometime. Then before the Device Missing 8381 * Delay expires and the device becomes READY, the 8382 * device is removed and added back. 8383 */ 8384 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8385 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 8386 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8387 8388 if (pcie_device) { 8389 pcie_device_put(pcie_device); 8390 break; 8391 } 8392 8393 if (!test_bit(handle, ioc->pend_os_device_add)) 8394 break; 8395 8396 dewtprintk(ioc, 8397 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 8398 handle)); 8399 event_data->PortEntry[i].PortStatus &= 0xF0; 8400 event_data->PortEntry[i].PortStatus |= 8401 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 8402 fallthrough; 8403 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8404 if (ioc->shost_recovery) 8405 break; 8406 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8407 break; 8408 8409 rc = _scsih_pcie_add_device(ioc, handle); 8410 if (!rc) { 8411 /* mark entry vacant */ 8412 /* TODO This needs to be reviewed and fixed, 8413 * we dont have an entry 8414 * to make an event void like vacant 8415 */ 8416 event_data->PortEntry[i].PortStatus |= 8417 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 8418 } 8419 break; 8420 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8421 _scsih_pcie_device_remove_by_handle(ioc, handle); 8422 break; 8423 } 8424 } 8425 } 8426 8427 /** 8428 * _scsih_pcie_device_status_change_event_debug - debug for device event 8429 * @ioc: ? 8430 * @event_data: event data payload 8431 * Context: user. 8432 */ 8433 static void 8434 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8435 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 8436 { 8437 char *reason_str = NULL; 8438 8439 switch (event_data->ReasonCode) { 8440 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 8441 reason_str = "smart data"; 8442 break; 8443 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 8444 reason_str = "unsupported device discovered"; 8445 break; 8446 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 8447 reason_str = "internal device reset"; 8448 break; 8449 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 8450 reason_str = "internal task abort"; 8451 break; 8452 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 8453 reason_str = "internal task abort set"; 8454 break; 8455 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 8456 reason_str = "internal clear task set"; 8457 break; 8458 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 8459 reason_str = "internal query task"; 8460 break; 8461 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 8462 reason_str = "device init failure"; 8463 break; 8464 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 8465 reason_str = "internal device reset complete"; 8466 break; 8467 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 8468 reason_str = "internal task abort complete"; 8469 break; 8470 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 8471 reason_str = "internal async notification"; 8472 break; 8473 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 8474 reason_str = "pcie hot reset failed"; 8475 break; 8476 default: 8477 reason_str = "unknown reason"; 8478 break; 8479 } 8480 8481 ioc_info(ioc, "PCIE device status change: (%s)\n" 8482 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 8483 reason_str, le16_to_cpu(event_data->DevHandle), 8484 (u64)le64_to_cpu(event_data->WWID), 8485 le16_to_cpu(event_data->TaskTag)); 8486 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 8487 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 8488 event_data->ASC, event_data->ASCQ); 8489 pr_cont("\n"); 8490 } 8491 8492 /** 8493 * _scsih_pcie_device_status_change_event - handle device status 8494 * change 8495 * @ioc: per adapter object 8496 * @fw_event: The fw_event_work object 8497 * Context: user. 8498 */ 8499 static void 8500 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8501 struct fw_event_work *fw_event) 8502 { 8503 struct MPT3SAS_TARGET *target_priv_data; 8504 struct _pcie_device *pcie_device; 8505 u64 wwid; 8506 unsigned long flags; 8507 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 8508 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 8509 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8510 _scsih_pcie_device_status_change_event_debug(ioc, 8511 event_data); 8512 8513 if (event_data->ReasonCode != 8514 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 8515 event_data->ReasonCode != 8516 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 8517 return; 8518 8519 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8520 wwid = le64_to_cpu(event_data->WWID); 8521 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8522 8523 if (!pcie_device || !pcie_device->starget) 8524 goto out; 8525 8526 target_priv_data = pcie_device->starget->hostdata; 8527 if (!target_priv_data) 8528 goto out; 8529 8530 if (event_data->ReasonCode == 8531 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 8532 target_priv_data->tm_busy = 1; 8533 else 8534 target_priv_data->tm_busy = 0; 8535 out: 8536 if (pcie_device) 8537 pcie_device_put(pcie_device); 8538 8539 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8540 } 8541 8542 /** 8543 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 8544 * event 8545 * @ioc: per adapter object 8546 * @event_data: event data payload 8547 * Context: user. 8548 */ 8549 static void 8550 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8551 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 8552 { 8553 char *reason_str = NULL; 8554 8555 switch (event_data->ReasonCode) { 8556 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8557 reason_str = "enclosure add"; 8558 break; 8559 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8560 reason_str = "enclosure remove"; 8561 break; 8562 default: 8563 reason_str = "unknown reason"; 8564 break; 8565 } 8566 8567 ioc_info(ioc, "enclosure status change: (%s)\n" 8568 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 8569 reason_str, 8570 le16_to_cpu(event_data->EnclosureHandle), 8571 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 8572 le16_to_cpu(event_data->StartSlot)); 8573 } 8574 8575 /** 8576 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 8577 * @ioc: per adapter object 8578 * @fw_event: The fw_event_work object 8579 * Context: user. 8580 */ 8581 static void 8582 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8583 struct fw_event_work *fw_event) 8584 { 8585 Mpi2ConfigReply_t mpi_reply; 8586 struct _enclosure_node *enclosure_dev = NULL; 8587 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 8588 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 8589 int rc; 8590 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 8591 8592 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8593 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 8594 (Mpi2EventDataSasEnclDevStatusChange_t *) 8595 fw_event->event_data); 8596 if (ioc->shost_recovery) 8597 return; 8598 8599 if (enclosure_handle) 8600 enclosure_dev = 8601 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8602 enclosure_handle); 8603 switch (event_data->ReasonCode) { 8604 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8605 if (!enclosure_dev) { 8606 enclosure_dev = 8607 kzalloc(sizeof(struct _enclosure_node), 8608 GFP_KERNEL); 8609 if (!enclosure_dev) { 8610 ioc_info(ioc, "failure at %s:%d/%s()!\n", 8611 __FILE__, __LINE__, __func__); 8612 return; 8613 } 8614 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8615 &enclosure_dev->pg0, 8616 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8617 enclosure_handle); 8618 8619 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8620 MPI2_IOCSTATUS_MASK)) { 8621 kfree(enclosure_dev); 8622 return; 8623 } 8624 8625 list_add_tail(&enclosure_dev->list, 8626 &ioc->enclosure_list); 8627 } 8628 break; 8629 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8630 if (enclosure_dev) { 8631 list_del(&enclosure_dev->list); 8632 kfree(enclosure_dev); 8633 } 8634 break; 8635 default: 8636 break; 8637 } 8638 } 8639 8640 /** 8641 * _scsih_sas_broadcast_primitive_event - handle broadcast events 8642 * @ioc: per adapter object 8643 * @fw_event: The fw_event_work object 8644 * Context: user. 8645 */ 8646 static void 8647 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 8648 struct fw_event_work *fw_event) 8649 { 8650 struct scsi_cmnd *scmd; 8651 struct scsi_device *sdev; 8652 struct scsiio_tracker *st; 8653 u16 smid, handle; 8654 u32 lun; 8655 struct MPT3SAS_DEVICE *sas_device_priv_data; 8656 u32 termination_count; 8657 u32 query_count; 8658 Mpi2SCSITaskManagementReply_t *mpi_reply; 8659 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 8660 (Mpi2EventDataSasBroadcastPrimitive_t *) 8661 fw_event->event_data; 8662 u16 ioc_status; 8663 unsigned long flags; 8664 int r; 8665 u8 max_retries = 0; 8666 u8 task_abort_retries; 8667 8668 mutex_lock(&ioc->tm_cmds.mutex); 8669 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 8670 __func__, event_data->PhyNum, event_data->PortWidth); 8671 8672 _scsih_block_io_all_device(ioc); 8673 8674 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8675 mpi_reply = ioc->tm_cmds.reply; 8676 broadcast_aen_retry: 8677 8678 /* sanity checks for retrying this loop */ 8679 if (max_retries++ == 5) { 8680 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 8681 goto out; 8682 } else if (max_retries > 1) 8683 dewtprintk(ioc, 8684 ioc_info(ioc, "%s: %d retry\n", 8685 __func__, max_retries - 1)); 8686 8687 termination_count = 0; 8688 query_count = 0; 8689 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 8690 if (ioc->shost_recovery) 8691 goto out; 8692 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 8693 if (!scmd) 8694 continue; 8695 st = scsi_cmd_priv(scmd); 8696 sdev = scmd->device; 8697 sas_device_priv_data = sdev->hostdata; 8698 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 8699 continue; 8700 /* skip hidden raid components */ 8701 if (sas_device_priv_data->sas_target->flags & 8702 MPT_TARGET_FLAGS_RAID_COMPONENT) 8703 continue; 8704 /* skip volumes */ 8705 if (sas_device_priv_data->sas_target->flags & 8706 MPT_TARGET_FLAGS_VOLUME) 8707 continue; 8708 /* skip PCIe devices */ 8709 if (sas_device_priv_data->sas_target->flags & 8710 MPT_TARGET_FLAGS_PCIE_DEVICE) 8711 continue; 8712 8713 handle = sas_device_priv_data->sas_target->handle; 8714 lun = sas_device_priv_data->lun; 8715 query_count++; 8716 8717 if (ioc->shost_recovery) 8718 goto out; 8719 8720 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8721 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 8722 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 8723 st->msix_io, 30, 0); 8724 if (r == FAILED) { 8725 sdev_printk(KERN_WARNING, sdev, 8726 "mpt3sas_scsih_issue_tm: FAILED when sending " 8727 "QUERY_TASK: scmd(%p)\n", scmd); 8728 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8729 goto broadcast_aen_retry; 8730 } 8731 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 8732 & MPI2_IOCSTATUS_MASK; 8733 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8734 sdev_printk(KERN_WARNING, sdev, 8735 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 8736 ioc_status, scmd); 8737 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8738 goto broadcast_aen_retry; 8739 } 8740 8741 /* see if IO is still owned by IOC and target */ 8742 if (mpi_reply->ResponseCode == 8743 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 8744 mpi_reply->ResponseCode == 8745 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 8746 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8747 continue; 8748 } 8749 task_abort_retries = 0; 8750 tm_retry: 8751 if (task_abort_retries++ == 60) { 8752 dewtprintk(ioc, 8753 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 8754 __func__)); 8755 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8756 goto broadcast_aen_retry; 8757 } 8758 8759 if (ioc->shost_recovery) 8760 goto out_no_lock; 8761 8762 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 8763 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 8764 st->smid, st->msix_io, 30, 0); 8765 if (r == FAILED || st->cb_idx != 0xFF) { 8766 sdev_printk(KERN_WARNING, sdev, 8767 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 8768 "scmd(%p)\n", scmd); 8769 goto tm_retry; 8770 } 8771 8772 if (task_abort_retries > 1) 8773 sdev_printk(KERN_WARNING, sdev, 8774 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 8775 " scmd(%p)\n", 8776 task_abort_retries - 1, scmd); 8777 8778 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 8779 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8780 } 8781 8782 if (ioc->broadcast_aen_pending) { 8783 dewtprintk(ioc, 8784 ioc_info(ioc, 8785 "%s: loop back due to pending AEN\n", 8786 __func__)); 8787 ioc->broadcast_aen_pending = 0; 8788 goto broadcast_aen_retry; 8789 } 8790 8791 out: 8792 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8793 out_no_lock: 8794 8795 dewtprintk(ioc, 8796 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 8797 __func__, query_count, termination_count)); 8798 8799 ioc->broadcast_aen_busy = 0; 8800 if (!ioc->shost_recovery) 8801 _scsih_ublock_io_all_device(ioc); 8802 mutex_unlock(&ioc->tm_cmds.mutex); 8803 } 8804 8805 /** 8806 * _scsih_sas_discovery_event - handle discovery events 8807 * @ioc: per adapter object 8808 * @fw_event: The fw_event_work object 8809 * Context: user. 8810 */ 8811 static void 8812 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 8813 struct fw_event_work *fw_event) 8814 { 8815 Mpi2EventDataSasDiscovery_t *event_data = 8816 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 8817 8818 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 8819 ioc_info(ioc, "discovery event: (%s)", 8820 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 8821 "start" : "stop"); 8822 if (event_data->DiscoveryStatus) 8823 pr_cont("discovery_status(0x%08x)", 8824 le32_to_cpu(event_data->DiscoveryStatus)); 8825 pr_cont("\n"); 8826 } 8827 8828 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 8829 !ioc->sas_hba.num_phys) { 8830 if (disable_discovery > 0 && ioc->shost_recovery) { 8831 /* Wait for the reset to complete */ 8832 while (ioc->shost_recovery) 8833 ssleep(1); 8834 } 8835 _scsih_sas_host_add(ioc); 8836 } 8837 } 8838 8839 /** 8840 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 8841 * events 8842 * @ioc: per adapter object 8843 * @fw_event: The fw_event_work object 8844 * Context: user. 8845 */ 8846 static void 8847 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 8848 struct fw_event_work *fw_event) 8849 { 8850 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 8851 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 8852 8853 switch (event_data->ReasonCode) { 8854 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 8855 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 8856 le16_to_cpu(event_data->DevHandle), 8857 (u64)le64_to_cpu(event_data->SASAddress), 8858 event_data->PhysicalPort); 8859 break; 8860 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 8861 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 8862 le16_to_cpu(event_data->DevHandle), 8863 (u64)le64_to_cpu(event_data->SASAddress), 8864 event_data->PhysicalPort); 8865 break; 8866 default: 8867 break; 8868 } 8869 } 8870 8871 /** 8872 * _scsih_pcie_enumeration_event - handle enumeration events 8873 * @ioc: per adapter object 8874 * @fw_event: The fw_event_work object 8875 * Context: user. 8876 */ 8877 static void 8878 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 8879 struct fw_event_work *fw_event) 8880 { 8881 Mpi26EventDataPCIeEnumeration_t *event_data = 8882 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 8883 8884 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 8885 return; 8886 8887 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 8888 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 8889 "started" : "completed", 8890 event_data->Flags); 8891 if (event_data->EnumerationStatus) 8892 pr_cont("enumeration_status(0x%08x)", 8893 le32_to_cpu(event_data->EnumerationStatus)); 8894 pr_cont("\n"); 8895 } 8896 8897 /** 8898 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 8899 * @ioc: per adapter object 8900 * @handle: device handle for physical disk 8901 * @phys_disk_num: physical disk number 8902 * 8903 * Return: 0 for success, else failure. 8904 */ 8905 static int 8906 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 8907 { 8908 Mpi2RaidActionRequest_t *mpi_request; 8909 Mpi2RaidActionReply_t *mpi_reply; 8910 u16 smid; 8911 u8 issue_reset = 0; 8912 int rc = 0; 8913 u16 ioc_status; 8914 u32 log_info; 8915 8916 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 8917 return rc; 8918 8919 mutex_lock(&ioc->scsih_cmds.mutex); 8920 8921 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 8922 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 8923 rc = -EAGAIN; 8924 goto out; 8925 } 8926 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 8927 8928 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 8929 if (!smid) { 8930 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 8931 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8932 rc = -EAGAIN; 8933 goto out; 8934 } 8935 8936 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 8937 ioc->scsih_cmds.smid = smid; 8938 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 8939 8940 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 8941 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 8942 mpi_request->PhysDiskNum = phys_disk_num; 8943 8944 dewtprintk(ioc, 8945 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 8946 handle, phys_disk_num)); 8947 8948 init_completion(&ioc->scsih_cmds.done); 8949 ioc->put_smid_default(ioc, smid); 8950 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8951 8952 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8953 mpt3sas_check_cmd_timeout(ioc, 8954 ioc->scsih_cmds.status, mpi_request, 8955 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); 8956 rc = -EFAULT; 8957 goto out; 8958 } 8959 8960 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 8961 8962 mpi_reply = ioc->scsih_cmds.reply; 8963 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 8964 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 8965 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 8966 else 8967 log_info = 0; 8968 ioc_status &= MPI2_IOCSTATUS_MASK; 8969 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8970 dewtprintk(ioc, 8971 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 8972 ioc_status, log_info)); 8973 rc = -EFAULT; 8974 } else 8975 dewtprintk(ioc, 8976 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 8977 } 8978 8979 out: 8980 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8981 mutex_unlock(&ioc->scsih_cmds.mutex); 8982 8983 if (issue_reset) 8984 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 8985 return rc; 8986 } 8987 8988 /** 8989 * _scsih_reprobe_lun - reprobing lun 8990 * @sdev: scsi device struct 8991 * @no_uld_attach: sdev->no_uld_attach flag setting 8992 * 8993 **/ 8994 static void 8995 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 8996 { 8997 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 8998 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 8999 sdev->no_uld_attach ? "hiding" : "exposing"); 9000 WARN_ON(scsi_device_reprobe(sdev)); 9001 } 9002 9003 /** 9004 * _scsih_sas_volume_add - add new volume 9005 * @ioc: per adapter object 9006 * @element: IR config element data 9007 * Context: user. 9008 */ 9009 static void 9010 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 9011 Mpi2EventIrConfigElement_t *element) 9012 { 9013 struct _raid_device *raid_device; 9014 unsigned long flags; 9015 u64 wwid; 9016 u16 handle = le16_to_cpu(element->VolDevHandle); 9017 int rc; 9018 9019 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9020 if (!wwid) { 9021 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9022 __FILE__, __LINE__, __func__); 9023 return; 9024 } 9025 9026 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9027 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 9028 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9029 9030 if (raid_device) 9031 return; 9032 9033 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9034 if (!raid_device) { 9035 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9036 __FILE__, __LINE__, __func__); 9037 return; 9038 } 9039 9040 raid_device->id = ioc->sas_id++; 9041 raid_device->channel = RAID_CHANNEL; 9042 raid_device->handle = handle; 9043 raid_device->wwid = wwid; 9044 _scsih_raid_device_add(ioc, raid_device); 9045 if (!ioc->wait_for_discovery_to_complete) { 9046 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9047 raid_device->id, 0); 9048 if (rc) 9049 _scsih_raid_device_remove(ioc, raid_device); 9050 } else { 9051 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9052 _scsih_determine_boot_device(ioc, raid_device, 1); 9053 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9054 } 9055 } 9056 9057 /** 9058 * _scsih_sas_volume_delete - delete volume 9059 * @ioc: per adapter object 9060 * @handle: volume device handle 9061 * Context: user. 9062 */ 9063 static void 9064 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 9065 { 9066 struct _raid_device *raid_device; 9067 unsigned long flags; 9068 struct MPT3SAS_TARGET *sas_target_priv_data; 9069 struct scsi_target *starget = NULL; 9070 9071 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9072 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9073 if (raid_device) { 9074 if (raid_device->starget) { 9075 starget = raid_device->starget; 9076 sas_target_priv_data = starget->hostdata; 9077 sas_target_priv_data->deleted = 1; 9078 } 9079 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 9080 raid_device->handle, (u64)raid_device->wwid); 9081 list_del(&raid_device->list); 9082 kfree(raid_device); 9083 } 9084 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9085 if (starget) 9086 scsi_remove_target(&starget->dev); 9087 } 9088 9089 /** 9090 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 9091 * @ioc: per adapter object 9092 * @element: IR config element data 9093 * Context: user. 9094 */ 9095 static void 9096 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 9097 Mpi2EventIrConfigElement_t *element) 9098 { 9099 struct _sas_device *sas_device; 9100 struct scsi_target *starget = NULL; 9101 struct MPT3SAS_TARGET *sas_target_priv_data; 9102 unsigned long flags; 9103 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9104 9105 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9106 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9107 if (sas_device) { 9108 sas_device->volume_handle = 0; 9109 sas_device->volume_wwid = 0; 9110 clear_bit(handle, ioc->pd_handles); 9111 if (sas_device->starget && sas_device->starget->hostdata) { 9112 starget = sas_device->starget; 9113 sas_target_priv_data = starget->hostdata; 9114 sas_target_priv_data->flags &= 9115 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 9116 } 9117 } 9118 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9119 if (!sas_device) 9120 return; 9121 9122 /* exposing raid component */ 9123 if (starget) 9124 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 9125 9126 sas_device_put(sas_device); 9127 } 9128 9129 /** 9130 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 9131 * @ioc: per adapter object 9132 * @element: IR config element data 9133 * Context: user. 9134 */ 9135 static void 9136 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 9137 Mpi2EventIrConfigElement_t *element) 9138 { 9139 struct _sas_device *sas_device; 9140 struct scsi_target *starget = NULL; 9141 struct MPT3SAS_TARGET *sas_target_priv_data; 9142 unsigned long flags; 9143 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9144 u16 volume_handle = 0; 9145 u64 volume_wwid = 0; 9146 9147 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 9148 if (volume_handle) 9149 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 9150 &volume_wwid); 9151 9152 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9153 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9154 if (sas_device) { 9155 set_bit(handle, ioc->pd_handles); 9156 if (sas_device->starget && sas_device->starget->hostdata) { 9157 starget = sas_device->starget; 9158 sas_target_priv_data = starget->hostdata; 9159 sas_target_priv_data->flags |= 9160 MPT_TARGET_FLAGS_RAID_COMPONENT; 9161 sas_device->volume_handle = volume_handle; 9162 sas_device->volume_wwid = volume_wwid; 9163 } 9164 } 9165 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9166 if (!sas_device) 9167 return; 9168 9169 /* hiding raid component */ 9170 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9171 9172 if (starget) 9173 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 9174 9175 sas_device_put(sas_device); 9176 } 9177 9178 /** 9179 * _scsih_sas_pd_delete - delete pd component 9180 * @ioc: per adapter object 9181 * @element: IR config element data 9182 * Context: user. 9183 */ 9184 static void 9185 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 9186 Mpi2EventIrConfigElement_t *element) 9187 { 9188 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9189 9190 _scsih_device_remove_by_handle(ioc, handle); 9191 } 9192 9193 /** 9194 * _scsih_sas_pd_add - remove pd component 9195 * @ioc: per adapter object 9196 * @element: IR config element data 9197 * Context: user. 9198 */ 9199 static void 9200 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 9201 Mpi2EventIrConfigElement_t *element) 9202 { 9203 struct _sas_device *sas_device; 9204 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9205 Mpi2ConfigReply_t mpi_reply; 9206 Mpi2SasDevicePage0_t sas_device_pg0; 9207 u32 ioc_status; 9208 u64 sas_address; 9209 u16 parent_handle; 9210 9211 set_bit(handle, ioc->pd_handles); 9212 9213 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9214 if (sas_device) { 9215 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9216 sas_device_put(sas_device); 9217 return; 9218 } 9219 9220 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 9221 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 9222 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9223 __FILE__, __LINE__, __func__); 9224 return; 9225 } 9226 9227 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9228 MPI2_IOCSTATUS_MASK; 9229 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9230 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9231 __FILE__, __LINE__, __func__); 9232 return; 9233 } 9234 9235 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9236 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9237 mpt3sas_transport_update_links(ioc, sas_address, handle, 9238 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9239 mpt3sas_get_port_by_id(ioc, 9240 sas_device_pg0.PhysicalPort, 0)); 9241 9242 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9243 _scsih_add_device(ioc, handle, 0, 1); 9244 } 9245 9246 /** 9247 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 9248 * @ioc: per adapter object 9249 * @event_data: event data payload 9250 * Context: user. 9251 */ 9252 static void 9253 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 9254 Mpi2EventDataIrConfigChangeList_t *event_data) 9255 { 9256 Mpi2EventIrConfigElement_t *element; 9257 u8 element_type; 9258 int i; 9259 char *reason_str = NULL, *element_str = NULL; 9260 9261 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9262 9263 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 9264 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 9265 "foreign" : "native", 9266 event_data->NumElements); 9267 for (i = 0; i < event_data->NumElements; i++, element++) { 9268 switch (element->ReasonCode) { 9269 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9270 reason_str = "add"; 9271 break; 9272 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9273 reason_str = "remove"; 9274 break; 9275 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 9276 reason_str = "no change"; 9277 break; 9278 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9279 reason_str = "hide"; 9280 break; 9281 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9282 reason_str = "unhide"; 9283 break; 9284 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9285 reason_str = "volume_created"; 9286 break; 9287 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9288 reason_str = "volume_deleted"; 9289 break; 9290 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9291 reason_str = "pd_created"; 9292 break; 9293 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9294 reason_str = "pd_deleted"; 9295 break; 9296 default: 9297 reason_str = "unknown reason"; 9298 break; 9299 } 9300 element_type = le16_to_cpu(element->ElementFlags) & 9301 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 9302 switch (element_type) { 9303 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 9304 element_str = "volume"; 9305 break; 9306 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 9307 element_str = "phys disk"; 9308 break; 9309 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 9310 element_str = "hot spare"; 9311 break; 9312 default: 9313 element_str = "unknown element"; 9314 break; 9315 } 9316 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 9317 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 9318 reason_str, le16_to_cpu(element->VolDevHandle), 9319 le16_to_cpu(element->PhysDiskDevHandle), 9320 element->PhysDiskNum); 9321 } 9322 } 9323 9324 /** 9325 * _scsih_sas_ir_config_change_event - handle ir configuration change events 9326 * @ioc: per adapter object 9327 * @fw_event: The fw_event_work object 9328 * Context: user. 9329 */ 9330 static void 9331 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 9332 struct fw_event_work *fw_event) 9333 { 9334 Mpi2EventIrConfigElement_t *element; 9335 int i; 9336 u8 foreign_config; 9337 Mpi2EventDataIrConfigChangeList_t *event_data = 9338 (Mpi2EventDataIrConfigChangeList_t *) 9339 fw_event->event_data; 9340 9341 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9342 (!ioc->hide_ir_msg)) 9343 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 9344 9345 foreign_config = (le32_to_cpu(event_data->Flags) & 9346 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 9347 9348 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9349 if (ioc->shost_recovery && 9350 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 9351 for (i = 0; i < event_data->NumElements; i++, element++) { 9352 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 9353 _scsih_ir_fastpath(ioc, 9354 le16_to_cpu(element->PhysDiskDevHandle), 9355 element->PhysDiskNum); 9356 } 9357 return; 9358 } 9359 9360 for (i = 0; i < event_data->NumElements; i++, element++) { 9361 9362 switch (element->ReasonCode) { 9363 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9364 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9365 if (!foreign_config) 9366 _scsih_sas_volume_add(ioc, element); 9367 break; 9368 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9369 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9370 if (!foreign_config) 9371 _scsih_sas_volume_delete(ioc, 9372 le16_to_cpu(element->VolDevHandle)); 9373 break; 9374 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9375 if (!ioc->is_warpdrive) 9376 _scsih_sas_pd_hide(ioc, element); 9377 break; 9378 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9379 if (!ioc->is_warpdrive) 9380 _scsih_sas_pd_expose(ioc, element); 9381 break; 9382 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9383 if (!ioc->is_warpdrive) 9384 _scsih_sas_pd_add(ioc, element); 9385 break; 9386 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9387 if (!ioc->is_warpdrive) 9388 _scsih_sas_pd_delete(ioc, element); 9389 break; 9390 } 9391 } 9392 } 9393 9394 /** 9395 * _scsih_sas_ir_volume_event - IR volume event 9396 * @ioc: per adapter object 9397 * @fw_event: The fw_event_work object 9398 * Context: user. 9399 */ 9400 static void 9401 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 9402 struct fw_event_work *fw_event) 9403 { 9404 u64 wwid; 9405 unsigned long flags; 9406 struct _raid_device *raid_device; 9407 u16 handle; 9408 u32 state; 9409 int rc; 9410 Mpi2EventDataIrVolume_t *event_data = 9411 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 9412 9413 if (ioc->shost_recovery) 9414 return; 9415 9416 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 9417 return; 9418 9419 handle = le16_to_cpu(event_data->VolDevHandle); 9420 state = le32_to_cpu(event_data->NewValue); 9421 if (!ioc->hide_ir_msg) 9422 dewtprintk(ioc, 9423 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9424 __func__, handle, 9425 le32_to_cpu(event_data->PreviousValue), 9426 state)); 9427 switch (state) { 9428 case MPI2_RAID_VOL_STATE_MISSING: 9429 case MPI2_RAID_VOL_STATE_FAILED: 9430 _scsih_sas_volume_delete(ioc, handle); 9431 break; 9432 9433 case MPI2_RAID_VOL_STATE_ONLINE: 9434 case MPI2_RAID_VOL_STATE_DEGRADED: 9435 case MPI2_RAID_VOL_STATE_OPTIMAL: 9436 9437 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9438 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9439 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9440 9441 if (raid_device) 9442 break; 9443 9444 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9445 if (!wwid) { 9446 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9447 __FILE__, __LINE__, __func__); 9448 break; 9449 } 9450 9451 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9452 if (!raid_device) { 9453 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9454 __FILE__, __LINE__, __func__); 9455 break; 9456 } 9457 9458 raid_device->id = ioc->sas_id++; 9459 raid_device->channel = RAID_CHANNEL; 9460 raid_device->handle = handle; 9461 raid_device->wwid = wwid; 9462 _scsih_raid_device_add(ioc, raid_device); 9463 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9464 raid_device->id, 0); 9465 if (rc) 9466 _scsih_raid_device_remove(ioc, raid_device); 9467 break; 9468 9469 case MPI2_RAID_VOL_STATE_INITIALIZING: 9470 default: 9471 break; 9472 } 9473 } 9474 9475 /** 9476 * _scsih_sas_ir_physical_disk_event - PD event 9477 * @ioc: per adapter object 9478 * @fw_event: The fw_event_work object 9479 * Context: user. 9480 */ 9481 static void 9482 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 9483 struct fw_event_work *fw_event) 9484 { 9485 u16 handle, parent_handle; 9486 u32 state; 9487 struct _sas_device *sas_device; 9488 Mpi2ConfigReply_t mpi_reply; 9489 Mpi2SasDevicePage0_t sas_device_pg0; 9490 u32 ioc_status; 9491 Mpi2EventDataIrPhysicalDisk_t *event_data = 9492 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 9493 u64 sas_address; 9494 9495 if (ioc->shost_recovery) 9496 return; 9497 9498 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 9499 return; 9500 9501 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 9502 state = le32_to_cpu(event_data->NewValue); 9503 9504 if (!ioc->hide_ir_msg) 9505 dewtprintk(ioc, 9506 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9507 __func__, handle, 9508 le32_to_cpu(event_data->PreviousValue), 9509 state)); 9510 9511 switch (state) { 9512 case MPI2_RAID_PD_STATE_ONLINE: 9513 case MPI2_RAID_PD_STATE_DEGRADED: 9514 case MPI2_RAID_PD_STATE_REBUILDING: 9515 case MPI2_RAID_PD_STATE_OPTIMAL: 9516 case MPI2_RAID_PD_STATE_HOT_SPARE: 9517 9518 if (!ioc->is_warpdrive) 9519 set_bit(handle, ioc->pd_handles); 9520 9521 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9522 if (sas_device) { 9523 sas_device_put(sas_device); 9524 return; 9525 } 9526 9527 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9528 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 9529 handle))) { 9530 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9531 __FILE__, __LINE__, __func__); 9532 return; 9533 } 9534 9535 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9536 MPI2_IOCSTATUS_MASK; 9537 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9538 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9539 __FILE__, __LINE__, __func__); 9540 return; 9541 } 9542 9543 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9544 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9545 mpt3sas_transport_update_links(ioc, sas_address, handle, 9546 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9547 mpt3sas_get_port_by_id(ioc, 9548 sas_device_pg0.PhysicalPort, 0)); 9549 9550 _scsih_add_device(ioc, handle, 0, 1); 9551 9552 break; 9553 9554 case MPI2_RAID_PD_STATE_OFFLINE: 9555 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 9556 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 9557 default: 9558 break; 9559 } 9560 } 9561 9562 /** 9563 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 9564 * @ioc: per adapter object 9565 * @event_data: event data payload 9566 * Context: user. 9567 */ 9568 static void 9569 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 9570 Mpi2EventDataIrOperationStatus_t *event_data) 9571 { 9572 char *reason_str = NULL; 9573 9574 switch (event_data->RAIDOperation) { 9575 case MPI2_EVENT_IR_RAIDOP_RESYNC: 9576 reason_str = "resync"; 9577 break; 9578 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 9579 reason_str = "online capacity expansion"; 9580 break; 9581 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 9582 reason_str = "consistency check"; 9583 break; 9584 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 9585 reason_str = "background init"; 9586 break; 9587 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 9588 reason_str = "make data consistent"; 9589 break; 9590 } 9591 9592 if (!reason_str) 9593 return; 9594 9595 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 9596 reason_str, 9597 le16_to_cpu(event_data->VolDevHandle), 9598 event_data->PercentComplete); 9599 } 9600 9601 /** 9602 * _scsih_sas_ir_operation_status_event - handle RAID operation events 9603 * @ioc: per adapter object 9604 * @fw_event: The fw_event_work object 9605 * Context: user. 9606 */ 9607 static void 9608 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 9609 struct fw_event_work *fw_event) 9610 { 9611 Mpi2EventDataIrOperationStatus_t *event_data = 9612 (Mpi2EventDataIrOperationStatus_t *) 9613 fw_event->event_data; 9614 static struct _raid_device *raid_device; 9615 unsigned long flags; 9616 u16 handle; 9617 9618 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9619 (!ioc->hide_ir_msg)) 9620 _scsih_sas_ir_operation_status_event_debug(ioc, 9621 event_data); 9622 9623 /* code added for raid transport support */ 9624 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 9625 9626 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9627 handle = le16_to_cpu(event_data->VolDevHandle); 9628 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9629 if (raid_device) 9630 raid_device->percent_complete = 9631 event_data->PercentComplete; 9632 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9633 } 9634 } 9635 9636 /** 9637 * _scsih_prep_device_scan - initialize parameters prior to device scan 9638 * @ioc: per adapter object 9639 * 9640 * Set the deleted flag prior to device scan. If the device is found during 9641 * the scan, then we clear the deleted flag. 9642 */ 9643 static void 9644 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 9645 { 9646 struct MPT3SAS_DEVICE *sas_device_priv_data; 9647 struct scsi_device *sdev; 9648 9649 shost_for_each_device(sdev, ioc->shost) { 9650 sas_device_priv_data = sdev->hostdata; 9651 if (sas_device_priv_data && sas_device_priv_data->sas_target) 9652 sas_device_priv_data->sas_target->deleted = 1; 9653 } 9654 } 9655 9656 /** 9657 * _scsih_update_device_qdepth - Update QD during Reset. 9658 * @ioc: per adapter object 9659 * 9660 */ 9661 static void 9662 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) 9663 { 9664 struct MPT3SAS_DEVICE *sas_device_priv_data; 9665 struct MPT3SAS_TARGET *sas_target_priv_data; 9666 struct _sas_device *sas_device; 9667 struct scsi_device *sdev; 9668 u16 qdepth; 9669 9670 ioc_info(ioc, "Update devices with firmware reported queue depth\n"); 9671 shost_for_each_device(sdev, ioc->shost) { 9672 sas_device_priv_data = sdev->hostdata; 9673 if (sas_device_priv_data && sas_device_priv_data->sas_target) { 9674 sas_target_priv_data = sas_device_priv_data->sas_target; 9675 sas_device = sas_device_priv_data->sas_target->sas_dev; 9676 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) 9677 qdepth = ioc->max_nvme_qd; 9678 else if (sas_device && 9679 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) 9680 qdepth = (sas_device->port_type > 1) ? 9681 ioc->max_wideport_qd : ioc->max_narrowport_qd; 9682 else if (sas_device && 9683 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 9684 qdepth = ioc->max_sata_qd; 9685 else 9686 continue; 9687 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 9688 } 9689 } 9690 } 9691 9692 /** 9693 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 9694 * @ioc: per adapter object 9695 * @sas_device_pg0: SAS Device page 0 9696 * 9697 * After host reset, find out whether devices are still responding. 9698 * Used in _scsih_remove_unresponsive_sas_devices. 9699 */ 9700 static void 9701 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 9702 Mpi2SasDevicePage0_t *sas_device_pg0) 9703 { 9704 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9705 struct scsi_target *starget; 9706 struct _sas_device *sas_device = NULL; 9707 struct _enclosure_node *enclosure_dev = NULL; 9708 unsigned long flags; 9709 struct hba_port *port = mpt3sas_get_port_by_id( 9710 ioc, sas_device_pg0->PhysicalPort, 0); 9711 9712 if (sas_device_pg0->EnclosureHandle) { 9713 enclosure_dev = 9714 mpt3sas_scsih_enclosure_find_by_handle(ioc, 9715 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 9716 if (enclosure_dev == NULL) 9717 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 9718 sas_device_pg0->EnclosureHandle); 9719 } 9720 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9721 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 9722 if (sas_device->sas_address != le64_to_cpu( 9723 sas_device_pg0->SASAddress)) 9724 continue; 9725 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) 9726 continue; 9727 if (sas_device->port != port) 9728 continue; 9729 sas_device->responding = 1; 9730 starget = sas_device->starget; 9731 if (starget && starget->hostdata) { 9732 sas_target_priv_data = starget->hostdata; 9733 sas_target_priv_data->tm_busy = 0; 9734 sas_target_priv_data->deleted = 0; 9735 } else 9736 sas_target_priv_data = NULL; 9737 if (starget) { 9738 starget_printk(KERN_INFO, starget, 9739 "handle(0x%04x), sas_addr(0x%016llx)\n", 9740 le16_to_cpu(sas_device_pg0->DevHandle), 9741 (unsigned long long) 9742 sas_device->sas_address); 9743 9744 if (sas_device->enclosure_handle != 0) 9745 starget_printk(KERN_INFO, starget, 9746 "enclosure logical id(0x%016llx), slot(%d)\n", 9747 (unsigned long long) 9748 sas_device->enclosure_logical_id, 9749 sas_device->slot); 9750 } 9751 if (le16_to_cpu(sas_device_pg0->Flags) & 9752 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 9753 sas_device->enclosure_level = 9754 sas_device_pg0->EnclosureLevel; 9755 memcpy(&sas_device->connector_name[0], 9756 &sas_device_pg0->ConnectorName[0], 4); 9757 } else { 9758 sas_device->enclosure_level = 0; 9759 sas_device->connector_name[0] = '\0'; 9760 } 9761 9762 sas_device->enclosure_handle = 9763 le16_to_cpu(sas_device_pg0->EnclosureHandle); 9764 sas_device->is_chassis_slot_valid = 0; 9765 if (enclosure_dev) { 9766 sas_device->enclosure_logical_id = le64_to_cpu( 9767 enclosure_dev->pg0.EnclosureLogicalID); 9768 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 9769 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 9770 sas_device->is_chassis_slot_valid = 1; 9771 sas_device->chassis_slot = 9772 enclosure_dev->pg0.ChassisSlot; 9773 } 9774 } 9775 9776 if (sas_device->handle == le16_to_cpu( 9777 sas_device_pg0->DevHandle)) 9778 goto out; 9779 pr_info("\thandle changed from(0x%04x)!!!\n", 9780 sas_device->handle); 9781 sas_device->handle = le16_to_cpu( 9782 sas_device_pg0->DevHandle); 9783 if (sas_target_priv_data) 9784 sas_target_priv_data->handle = 9785 le16_to_cpu(sas_device_pg0->DevHandle); 9786 goto out; 9787 } 9788 out: 9789 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9790 } 9791 9792 /** 9793 * _scsih_create_enclosure_list_after_reset - Free Existing list, 9794 * And create enclosure list by scanning all Enclosure Page(0)s 9795 * @ioc: per adapter object 9796 */ 9797 static void 9798 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 9799 { 9800 struct _enclosure_node *enclosure_dev; 9801 Mpi2ConfigReply_t mpi_reply; 9802 u16 enclosure_handle; 9803 int rc; 9804 9805 /* Free existing enclosure list */ 9806 mpt3sas_free_enclosure_list(ioc); 9807 9808 /* Re constructing enclosure list after reset*/ 9809 enclosure_handle = 0xFFFF; 9810 do { 9811 enclosure_dev = 9812 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 9813 if (!enclosure_dev) { 9814 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9815 __FILE__, __LINE__, __func__); 9816 return; 9817 } 9818 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 9819 &enclosure_dev->pg0, 9820 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 9821 enclosure_handle); 9822 9823 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 9824 MPI2_IOCSTATUS_MASK)) { 9825 kfree(enclosure_dev); 9826 return; 9827 } 9828 list_add_tail(&enclosure_dev->list, 9829 &ioc->enclosure_list); 9830 enclosure_handle = 9831 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 9832 } while (1); 9833 } 9834 9835 /** 9836 * _scsih_search_responding_sas_devices - 9837 * @ioc: per adapter object 9838 * 9839 * After host reset, find out whether devices are still responding. 9840 * If not remove. 9841 */ 9842 static void 9843 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 9844 { 9845 Mpi2SasDevicePage0_t sas_device_pg0; 9846 Mpi2ConfigReply_t mpi_reply; 9847 u16 ioc_status; 9848 u16 handle; 9849 u32 device_info; 9850 9851 ioc_info(ioc, "search for end-devices: start\n"); 9852 9853 if (list_empty(&ioc->sas_device_list)) 9854 goto out; 9855 9856 handle = 0xFFFF; 9857 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9858 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9859 handle))) { 9860 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9861 MPI2_IOCSTATUS_MASK; 9862 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9863 break; 9864 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9865 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 9866 if (!(_scsih_is_end_device(device_info))) 9867 continue; 9868 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 9869 } 9870 9871 out: 9872 ioc_info(ioc, "search for end-devices: complete\n"); 9873 } 9874 9875 /** 9876 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 9877 * @ioc: per adapter object 9878 * @pcie_device_pg0: PCIe Device page 0 9879 * 9880 * After host reset, find out whether devices are still responding. 9881 * Used in _scsih_remove_unresponding_devices. 9882 */ 9883 static void 9884 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 9885 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 9886 { 9887 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9888 struct scsi_target *starget; 9889 struct _pcie_device *pcie_device; 9890 unsigned long flags; 9891 9892 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9893 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 9894 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 9895 && (pcie_device->slot == le16_to_cpu( 9896 pcie_device_pg0->Slot))) { 9897 pcie_device->access_status = 9898 pcie_device_pg0->AccessStatus; 9899 pcie_device->responding = 1; 9900 starget = pcie_device->starget; 9901 if (starget && starget->hostdata) { 9902 sas_target_priv_data = starget->hostdata; 9903 sas_target_priv_data->tm_busy = 0; 9904 sas_target_priv_data->deleted = 0; 9905 } else 9906 sas_target_priv_data = NULL; 9907 if (starget) { 9908 starget_printk(KERN_INFO, starget, 9909 "handle(0x%04x), wwid(0x%016llx) ", 9910 pcie_device->handle, 9911 (unsigned long long)pcie_device->wwid); 9912 if (pcie_device->enclosure_handle != 0) 9913 starget_printk(KERN_INFO, starget, 9914 "enclosure logical id(0x%016llx), " 9915 "slot(%d)\n", 9916 (unsigned long long) 9917 pcie_device->enclosure_logical_id, 9918 pcie_device->slot); 9919 } 9920 9921 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 9922 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 9923 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 9924 pcie_device->enclosure_level = 9925 pcie_device_pg0->EnclosureLevel; 9926 memcpy(&pcie_device->connector_name[0], 9927 &pcie_device_pg0->ConnectorName[0], 4); 9928 } else { 9929 pcie_device->enclosure_level = 0; 9930 pcie_device->connector_name[0] = '\0'; 9931 } 9932 9933 if (pcie_device->handle == le16_to_cpu( 9934 pcie_device_pg0->DevHandle)) 9935 goto out; 9936 pr_info("\thandle changed from(0x%04x)!!!\n", 9937 pcie_device->handle); 9938 pcie_device->handle = le16_to_cpu( 9939 pcie_device_pg0->DevHandle); 9940 if (sas_target_priv_data) 9941 sas_target_priv_data->handle = 9942 le16_to_cpu(pcie_device_pg0->DevHandle); 9943 goto out; 9944 } 9945 } 9946 9947 out: 9948 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9949 } 9950 9951 /** 9952 * _scsih_search_responding_pcie_devices - 9953 * @ioc: per adapter object 9954 * 9955 * After host reset, find out whether devices are still responding. 9956 * If not remove. 9957 */ 9958 static void 9959 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 9960 { 9961 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9962 Mpi2ConfigReply_t mpi_reply; 9963 u16 ioc_status; 9964 u16 handle; 9965 u32 device_info; 9966 9967 ioc_info(ioc, "search for end-devices: start\n"); 9968 9969 if (list_empty(&ioc->pcie_device_list)) 9970 goto out; 9971 9972 handle = 0xFFFF; 9973 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9974 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9975 handle))) { 9976 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9977 MPI2_IOCSTATUS_MASK; 9978 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9979 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 9980 __func__, ioc_status, 9981 le32_to_cpu(mpi_reply.IOCLogInfo)); 9982 break; 9983 } 9984 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9985 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 9986 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 9987 continue; 9988 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 9989 } 9990 out: 9991 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 9992 } 9993 9994 /** 9995 * _scsih_mark_responding_raid_device - mark a raid_device as responding 9996 * @ioc: per adapter object 9997 * @wwid: world wide identifier for raid volume 9998 * @handle: device handle 9999 * 10000 * After host reset, find out whether devices are still responding. 10001 * Used in _scsih_remove_unresponsive_raid_devices. 10002 */ 10003 static void 10004 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 10005 u16 handle) 10006 { 10007 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 10008 struct scsi_target *starget; 10009 struct _raid_device *raid_device; 10010 unsigned long flags; 10011 10012 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10013 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 10014 if (raid_device->wwid == wwid && raid_device->starget) { 10015 starget = raid_device->starget; 10016 if (starget && starget->hostdata) { 10017 sas_target_priv_data = starget->hostdata; 10018 sas_target_priv_data->deleted = 0; 10019 } else 10020 sas_target_priv_data = NULL; 10021 raid_device->responding = 1; 10022 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10023 starget_printk(KERN_INFO, raid_device->starget, 10024 "handle(0x%04x), wwid(0x%016llx)\n", handle, 10025 (unsigned long long)raid_device->wwid); 10026 10027 /* 10028 * WARPDRIVE: The handles of the PDs might have changed 10029 * across the host reset so re-initialize the 10030 * required data for Direct IO 10031 */ 10032 mpt3sas_init_warpdrive_properties(ioc, raid_device); 10033 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10034 if (raid_device->handle == handle) { 10035 spin_unlock_irqrestore(&ioc->raid_device_lock, 10036 flags); 10037 return; 10038 } 10039 pr_info("\thandle changed from(0x%04x)!!!\n", 10040 raid_device->handle); 10041 raid_device->handle = handle; 10042 if (sas_target_priv_data) 10043 sas_target_priv_data->handle = handle; 10044 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10045 return; 10046 } 10047 } 10048 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10049 } 10050 10051 /** 10052 * _scsih_search_responding_raid_devices - 10053 * @ioc: per adapter object 10054 * 10055 * After host reset, find out whether devices are still responding. 10056 * If not remove. 10057 */ 10058 static void 10059 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 10060 { 10061 Mpi2RaidVolPage1_t volume_pg1; 10062 Mpi2RaidVolPage0_t volume_pg0; 10063 Mpi2RaidPhysDiskPage0_t pd_pg0; 10064 Mpi2ConfigReply_t mpi_reply; 10065 u16 ioc_status; 10066 u16 handle; 10067 u8 phys_disk_num; 10068 10069 if (!ioc->ir_firmware) 10070 return; 10071 10072 ioc_info(ioc, "search for raid volumes: start\n"); 10073 10074 if (list_empty(&ioc->raid_device_list)) 10075 goto out; 10076 10077 handle = 0xFFFF; 10078 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10079 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10080 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10081 MPI2_IOCSTATUS_MASK; 10082 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10083 break; 10084 handle = le16_to_cpu(volume_pg1.DevHandle); 10085 10086 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10087 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10088 sizeof(Mpi2RaidVolPage0_t))) 10089 continue; 10090 10091 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10092 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10093 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 10094 _scsih_mark_responding_raid_device(ioc, 10095 le64_to_cpu(volume_pg1.WWID), handle); 10096 } 10097 10098 /* refresh the pd_handles */ 10099 if (!ioc->is_warpdrive) { 10100 phys_disk_num = 0xFF; 10101 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 10102 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10103 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10104 phys_disk_num))) { 10105 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10106 MPI2_IOCSTATUS_MASK; 10107 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10108 break; 10109 phys_disk_num = pd_pg0.PhysDiskNum; 10110 handle = le16_to_cpu(pd_pg0.DevHandle); 10111 set_bit(handle, ioc->pd_handles); 10112 } 10113 } 10114 out: 10115 ioc_info(ioc, "search for responding raid volumes: complete\n"); 10116 } 10117 10118 /** 10119 * _scsih_mark_responding_expander - mark a expander as responding 10120 * @ioc: per adapter object 10121 * @expander_pg0:SAS Expander Config Page0 10122 * 10123 * After host reset, find out whether devices are still responding. 10124 * Used in _scsih_remove_unresponsive_expanders. 10125 */ 10126 static void 10127 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 10128 Mpi2ExpanderPage0_t *expander_pg0) 10129 { 10130 struct _sas_node *sas_expander = NULL; 10131 unsigned long flags; 10132 int i; 10133 struct _enclosure_node *enclosure_dev = NULL; 10134 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 10135 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 10136 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 10137 struct hba_port *port = mpt3sas_get_port_by_id( 10138 ioc, expander_pg0->PhysicalPort, 0); 10139 10140 if (enclosure_handle) 10141 enclosure_dev = 10142 mpt3sas_scsih_enclosure_find_by_handle(ioc, 10143 enclosure_handle); 10144 10145 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10146 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 10147 if (sas_expander->sas_address != sas_address) 10148 continue; 10149 if (sas_expander->port != port) 10150 continue; 10151 sas_expander->responding = 1; 10152 10153 if (enclosure_dev) { 10154 sas_expander->enclosure_logical_id = 10155 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 10156 sas_expander->enclosure_handle = 10157 le16_to_cpu(expander_pg0->EnclosureHandle); 10158 } 10159 10160 if (sas_expander->handle == handle) 10161 goto out; 10162 pr_info("\texpander(0x%016llx): handle changed" \ 10163 " from(0x%04x) to (0x%04x)!!!\n", 10164 (unsigned long long)sas_expander->sas_address, 10165 sas_expander->handle, handle); 10166 sas_expander->handle = handle; 10167 for (i = 0 ; i < sas_expander->num_phys ; i++) 10168 sas_expander->phy[i].handle = handle; 10169 goto out; 10170 } 10171 out: 10172 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10173 } 10174 10175 /** 10176 * _scsih_search_responding_expanders - 10177 * @ioc: per adapter object 10178 * 10179 * After host reset, find out whether devices are still responding. 10180 * If not remove. 10181 */ 10182 static void 10183 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 10184 { 10185 Mpi2ExpanderPage0_t expander_pg0; 10186 Mpi2ConfigReply_t mpi_reply; 10187 u16 ioc_status; 10188 u64 sas_address; 10189 u16 handle; 10190 u8 port; 10191 10192 ioc_info(ioc, "search for expanders: start\n"); 10193 10194 if (list_empty(&ioc->sas_expander_list)) 10195 goto out; 10196 10197 handle = 0xFFFF; 10198 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10199 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10200 10201 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10202 MPI2_IOCSTATUS_MASK; 10203 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10204 break; 10205 10206 handle = le16_to_cpu(expander_pg0.DevHandle); 10207 sas_address = le64_to_cpu(expander_pg0.SASAddress); 10208 port = expander_pg0.PhysicalPort; 10209 pr_info( 10210 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10211 handle, (unsigned long long)sas_address, 10212 (ioc->multipath_on_hba ? 10213 port : MULTIPATH_DISABLED_PORT_ID)); 10214 _scsih_mark_responding_expander(ioc, &expander_pg0); 10215 } 10216 10217 out: 10218 ioc_info(ioc, "search for expanders: complete\n"); 10219 } 10220 10221 /** 10222 * _scsih_remove_unresponding_devices - removing unresponding devices 10223 * @ioc: per adapter object 10224 */ 10225 static void 10226 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 10227 { 10228 struct _sas_device *sas_device, *sas_device_next; 10229 struct _sas_node *sas_expander, *sas_expander_next; 10230 struct _raid_device *raid_device, *raid_device_next; 10231 struct _pcie_device *pcie_device, *pcie_device_next; 10232 struct list_head tmp_list; 10233 unsigned long flags; 10234 LIST_HEAD(head); 10235 10236 ioc_info(ioc, "removing unresponding devices: start\n"); 10237 10238 /* removing unresponding end devices */ 10239 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 10240 /* 10241 * Iterate, pulling off devices marked as non-responding. We become the 10242 * owner for the reference the list had on any object we prune. 10243 */ 10244 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10245 10246 /* 10247 * Clean up the sas_device_init_list list as 10248 * driver goes for fresh scan as part of diag reset. 10249 */ 10250 list_for_each_entry_safe(sas_device, sas_device_next, 10251 &ioc->sas_device_init_list, list) { 10252 list_del_init(&sas_device->list); 10253 sas_device_put(sas_device); 10254 } 10255 10256 list_for_each_entry_safe(sas_device, sas_device_next, 10257 &ioc->sas_device_list, list) { 10258 if (!sas_device->responding) 10259 list_move_tail(&sas_device->list, &head); 10260 else 10261 sas_device->responding = 0; 10262 } 10263 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 10264 10265 /* 10266 * Now, uninitialize and remove the unresponding devices we pruned. 10267 */ 10268 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 10269 _scsih_remove_device(ioc, sas_device); 10270 list_del_init(&sas_device->list); 10271 sas_device_put(sas_device); 10272 } 10273 10274 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 10275 INIT_LIST_HEAD(&head); 10276 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 10277 /* 10278 * Clean up the pcie_device_init_list list as 10279 * driver goes for fresh scan as part of diag reset. 10280 */ 10281 list_for_each_entry_safe(pcie_device, pcie_device_next, 10282 &ioc->pcie_device_init_list, list) { 10283 list_del_init(&pcie_device->list); 10284 pcie_device_put(pcie_device); 10285 } 10286 10287 list_for_each_entry_safe(pcie_device, pcie_device_next, 10288 &ioc->pcie_device_list, list) { 10289 if (!pcie_device->responding) 10290 list_move_tail(&pcie_device->list, &head); 10291 else 10292 pcie_device->responding = 0; 10293 } 10294 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 10295 10296 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 10297 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 10298 list_del_init(&pcie_device->list); 10299 pcie_device_put(pcie_device); 10300 } 10301 10302 /* removing unresponding volumes */ 10303 if (ioc->ir_firmware) { 10304 ioc_info(ioc, "removing unresponding devices: volumes\n"); 10305 list_for_each_entry_safe(raid_device, raid_device_next, 10306 &ioc->raid_device_list, list) { 10307 if (!raid_device->responding) 10308 _scsih_sas_volume_delete(ioc, 10309 raid_device->handle); 10310 else 10311 raid_device->responding = 0; 10312 } 10313 } 10314 10315 /* removing unresponding expanders */ 10316 ioc_info(ioc, "removing unresponding devices: expanders\n"); 10317 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10318 INIT_LIST_HEAD(&tmp_list); 10319 list_for_each_entry_safe(sas_expander, sas_expander_next, 10320 &ioc->sas_expander_list, list) { 10321 if (!sas_expander->responding) 10322 list_move_tail(&sas_expander->list, &tmp_list); 10323 else 10324 sas_expander->responding = 0; 10325 } 10326 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10327 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 10328 list) { 10329 _scsih_expander_node_remove(ioc, sas_expander); 10330 } 10331 10332 ioc_info(ioc, "removing unresponding devices: complete\n"); 10333 10334 /* unblock devices */ 10335 _scsih_ublock_io_all_device(ioc); 10336 } 10337 10338 static void 10339 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 10340 struct _sas_node *sas_expander, u16 handle) 10341 { 10342 Mpi2ExpanderPage1_t expander_pg1; 10343 Mpi2ConfigReply_t mpi_reply; 10344 int i; 10345 10346 for (i = 0 ; i < sas_expander->num_phys ; i++) { 10347 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 10348 &expander_pg1, i, handle))) { 10349 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10350 __FILE__, __LINE__, __func__); 10351 return; 10352 } 10353 10354 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 10355 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 10356 expander_pg1.NegotiatedLinkRate >> 4, 10357 sas_expander->port); 10358 } 10359 } 10360 10361 /** 10362 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 10363 * @ioc: per adapter object 10364 */ 10365 static void 10366 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 10367 { 10368 Mpi2ExpanderPage0_t expander_pg0; 10369 Mpi2SasDevicePage0_t sas_device_pg0; 10370 Mpi26PCIeDevicePage0_t pcie_device_pg0; 10371 Mpi2RaidVolPage1_t volume_pg1; 10372 Mpi2RaidVolPage0_t volume_pg0; 10373 Mpi2RaidPhysDiskPage0_t pd_pg0; 10374 Mpi2EventIrConfigElement_t element; 10375 Mpi2ConfigReply_t mpi_reply; 10376 u8 phys_disk_num, port_id; 10377 u16 ioc_status; 10378 u16 handle, parent_handle; 10379 u64 sas_address; 10380 struct _sas_device *sas_device; 10381 struct _pcie_device *pcie_device; 10382 struct _sas_node *expander_device; 10383 static struct _raid_device *raid_device; 10384 u8 retry_count; 10385 unsigned long flags; 10386 10387 ioc_info(ioc, "scan devices: start\n"); 10388 10389 _scsih_sas_host_refresh(ioc); 10390 10391 ioc_info(ioc, "\tscan devices: expanders start\n"); 10392 10393 /* expanders */ 10394 handle = 0xFFFF; 10395 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10396 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10397 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10398 MPI2_IOCSTATUS_MASK; 10399 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10400 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10401 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10402 break; 10403 } 10404 handle = le16_to_cpu(expander_pg0.DevHandle); 10405 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10406 port_id = expander_pg0.PhysicalPort; 10407 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 10408 ioc, le64_to_cpu(expander_pg0.SASAddress), 10409 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10410 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10411 if (expander_device) 10412 _scsih_refresh_expander_links(ioc, expander_device, 10413 handle); 10414 else { 10415 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10416 handle, 10417 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10418 _scsih_expander_add(ioc, handle); 10419 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10420 handle, 10421 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10422 } 10423 } 10424 10425 ioc_info(ioc, "\tscan devices: expanders complete\n"); 10426 10427 if (!ioc->ir_firmware) 10428 goto skip_to_sas; 10429 10430 ioc_info(ioc, "\tscan devices: phys disk start\n"); 10431 10432 /* phys disk */ 10433 phys_disk_num = 0xFF; 10434 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10435 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10436 phys_disk_num))) { 10437 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10438 MPI2_IOCSTATUS_MASK; 10439 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10440 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10441 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10442 break; 10443 } 10444 phys_disk_num = pd_pg0.PhysDiskNum; 10445 handle = le16_to_cpu(pd_pg0.DevHandle); 10446 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 10447 if (sas_device) { 10448 sas_device_put(sas_device); 10449 continue; 10450 } 10451 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10452 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 10453 handle) != 0) 10454 continue; 10455 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10456 MPI2_IOCSTATUS_MASK; 10457 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10458 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 10459 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10460 break; 10461 } 10462 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10463 if (!_scsih_get_sas_address(ioc, parent_handle, 10464 &sas_address)) { 10465 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10466 handle, 10467 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10468 port_id = sas_device_pg0.PhysicalPort; 10469 mpt3sas_transport_update_links(ioc, sas_address, 10470 handle, sas_device_pg0.PhyNum, 10471 MPI2_SAS_NEG_LINK_RATE_1_5, 10472 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10473 set_bit(handle, ioc->pd_handles); 10474 retry_count = 0; 10475 /* This will retry adding the end device. 10476 * _scsih_add_device() will decide on retries and 10477 * return "1" when it should be retried 10478 */ 10479 while (_scsih_add_device(ioc, handle, retry_count++, 10480 1)) { 10481 ssleep(1); 10482 } 10483 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10484 handle, 10485 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10486 } 10487 } 10488 10489 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 10490 10491 ioc_info(ioc, "\tscan devices: volumes start\n"); 10492 10493 /* volumes */ 10494 handle = 0xFFFF; 10495 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10496 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10497 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10498 MPI2_IOCSTATUS_MASK; 10499 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10500 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10501 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10502 break; 10503 } 10504 handle = le16_to_cpu(volume_pg1.DevHandle); 10505 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10506 raid_device = _scsih_raid_device_find_by_wwid(ioc, 10507 le64_to_cpu(volume_pg1.WWID)); 10508 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10509 if (raid_device) 10510 continue; 10511 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10512 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10513 sizeof(Mpi2RaidVolPage0_t))) 10514 continue; 10515 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10516 MPI2_IOCSTATUS_MASK; 10517 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10518 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10519 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10520 break; 10521 } 10522 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10523 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10524 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 10525 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 10526 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 10527 element.VolDevHandle = volume_pg1.DevHandle; 10528 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 10529 volume_pg1.DevHandle); 10530 _scsih_sas_volume_add(ioc, &element); 10531 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 10532 volume_pg1.DevHandle); 10533 } 10534 } 10535 10536 ioc_info(ioc, "\tscan devices: volumes complete\n"); 10537 10538 skip_to_sas: 10539 10540 ioc_info(ioc, "\tscan devices: end devices start\n"); 10541 10542 /* sas devices */ 10543 handle = 0xFFFF; 10544 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10545 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10546 handle))) { 10547 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10548 MPI2_IOCSTATUS_MASK; 10549 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10550 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10551 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10552 break; 10553 } 10554 handle = le16_to_cpu(sas_device_pg0.DevHandle); 10555 if (!(_scsih_is_end_device( 10556 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 10557 continue; 10558 port_id = sas_device_pg0.PhysicalPort; 10559 sas_device = mpt3sas_get_sdev_by_addr(ioc, 10560 le64_to_cpu(sas_device_pg0.SASAddress), 10561 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10562 if (sas_device) { 10563 sas_device_put(sas_device); 10564 continue; 10565 } 10566 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10567 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 10568 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10569 handle, 10570 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10571 mpt3sas_transport_update_links(ioc, sas_address, handle, 10572 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 10573 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10574 retry_count = 0; 10575 /* This will retry adding the end device. 10576 * _scsih_add_device() will decide on retries and 10577 * return "1" when it should be retried 10578 */ 10579 while (_scsih_add_device(ioc, handle, retry_count++, 10580 0)) { 10581 ssleep(1); 10582 } 10583 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10584 handle, 10585 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10586 } 10587 } 10588 ioc_info(ioc, "\tscan devices: end devices complete\n"); 10589 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 10590 10591 /* pcie devices */ 10592 handle = 0xFFFF; 10593 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 10594 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10595 handle))) { 10596 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 10597 & MPI2_IOCSTATUS_MASK; 10598 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10599 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10600 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10601 break; 10602 } 10603 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 10604 if (!(_scsih_is_nvme_pciescsi_device( 10605 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 10606 continue; 10607 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 10608 le64_to_cpu(pcie_device_pg0.WWID)); 10609 if (pcie_device) { 10610 pcie_device_put(pcie_device); 10611 continue; 10612 } 10613 retry_count = 0; 10614 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 10615 _scsih_pcie_add_device(ioc, handle); 10616 10617 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 10618 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 10619 } 10620 10621 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 10622 ioc_info(ioc, "scan devices: complete\n"); 10623 } 10624 10625 /** 10626 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) 10627 * @ioc: per adapter object 10628 * 10629 * The handler for doing any required cleanup or initialization. 10630 */ 10631 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 10632 { 10633 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 10634 } 10635 10636 /** 10637 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding 10638 * scsi & tm cmds. 10639 * @ioc: per adapter object 10640 * 10641 * The handler for doing any required cleanup or initialization. 10642 */ 10643 void 10644 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) 10645 { 10646 dtmprintk(ioc, 10647 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); 10648 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 10649 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 10650 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 10651 complete(&ioc->scsih_cmds.done); 10652 } 10653 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 10654 ioc->tm_cmds.status |= MPT3_CMD_RESET; 10655 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 10656 complete(&ioc->tm_cmds.done); 10657 } 10658 10659 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 10660 memset(ioc->device_remove_in_progress, 0, 10661 ioc->device_remove_in_progress_sz); 10662 _scsih_fw_event_cleanup_queue(ioc); 10663 _scsih_flush_running_cmds(ioc); 10664 } 10665 10666 /** 10667 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) 10668 * @ioc: per adapter object 10669 * 10670 * The handler for doing any required cleanup or initialization. 10671 */ 10672 void 10673 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 10674 { 10675 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 10676 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { 10677 if (ioc->multipath_on_hba) { 10678 _scsih_sas_port_refresh(ioc); 10679 _scsih_update_vphys_after_reset(ioc); 10680 } 10681 _scsih_prep_device_scan(ioc); 10682 _scsih_create_enclosure_list_after_reset(ioc); 10683 _scsih_search_responding_sas_devices(ioc); 10684 _scsih_search_responding_pcie_devices(ioc); 10685 _scsih_search_responding_raid_devices(ioc); 10686 _scsih_search_responding_expanders(ioc); 10687 _scsih_error_recovery_delete_devices(ioc); 10688 } 10689 } 10690 10691 /** 10692 * _mpt3sas_fw_work - delayed task for processing firmware events 10693 * @ioc: per adapter object 10694 * @fw_event: The fw_event_work object 10695 * Context: user. 10696 */ 10697 static void 10698 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 10699 { 10700 ioc->current_event = fw_event; 10701 _scsih_fw_event_del_from_list(ioc, fw_event); 10702 10703 /* the queue is being flushed so ignore this event */ 10704 if (ioc->remove_host || ioc->pci_error_recovery) { 10705 fw_event_work_put(fw_event); 10706 ioc->current_event = NULL; 10707 return; 10708 } 10709 10710 switch (fw_event->event) { 10711 case MPT3SAS_PROCESS_TRIGGER_DIAG: 10712 mpt3sas_process_trigger_data(ioc, 10713 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 10714 fw_event->event_data); 10715 break; 10716 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 10717 while (scsi_host_in_recovery(ioc->shost) || 10718 ioc->shost_recovery) { 10719 /* 10720 * If we're unloading or cancelling the work, bail. 10721 * Otherwise, this can become an infinite loop. 10722 */ 10723 if (ioc->remove_host || ioc->fw_events_cleanup) 10724 goto out; 10725 ssleep(1); 10726 } 10727 _scsih_remove_unresponding_devices(ioc); 10728 _scsih_del_dirty_vphy(ioc); 10729 _scsih_del_dirty_port_entries(ioc); 10730 if (ioc->is_gen35_ioc) 10731 _scsih_update_device_qdepth(ioc); 10732 _scsih_scan_for_devices_after_reset(ioc); 10733 /* 10734 * If diag reset has occurred during the driver load 10735 * then driver has to complete the driver load operation 10736 * by executing the following items: 10737 *- Register the devices from sas_device_init_list to SML 10738 *- clear is_driver_loading flag, 10739 *- start the watchdog thread. 10740 * In happy driver load path, above things are taken care of when 10741 * driver executes scsih_scan_finished(). 10742 */ 10743 if (ioc->is_driver_loading) 10744 _scsih_complete_devices_scanning(ioc); 10745 _scsih_set_nvme_max_shutdown_latency(ioc); 10746 break; 10747 case MPT3SAS_PORT_ENABLE_COMPLETE: 10748 ioc->start_scan = 0; 10749 if (missing_delay[0] != -1 && missing_delay[1] != -1) 10750 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 10751 missing_delay[1]); 10752 dewtprintk(ioc, 10753 ioc_info(ioc, "port enable: complete from worker thread\n")); 10754 break; 10755 case MPT3SAS_TURN_ON_PFA_LED: 10756 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 10757 break; 10758 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10759 _scsih_sas_topology_change_event(ioc, fw_event); 10760 break; 10761 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10762 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 10763 _scsih_sas_device_status_change_event_debug(ioc, 10764 (Mpi2EventDataSasDeviceStatusChange_t *) 10765 fw_event->event_data); 10766 break; 10767 case MPI2_EVENT_SAS_DISCOVERY: 10768 _scsih_sas_discovery_event(ioc, fw_event); 10769 break; 10770 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10771 _scsih_sas_device_discovery_error_event(ioc, fw_event); 10772 break; 10773 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10774 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 10775 break; 10776 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10777 _scsih_sas_enclosure_dev_status_change_event(ioc, 10778 fw_event); 10779 break; 10780 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10781 _scsih_sas_ir_config_change_event(ioc, fw_event); 10782 break; 10783 case MPI2_EVENT_IR_VOLUME: 10784 _scsih_sas_ir_volume_event(ioc, fw_event); 10785 break; 10786 case MPI2_EVENT_IR_PHYSICAL_DISK: 10787 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 10788 break; 10789 case MPI2_EVENT_IR_OPERATION_STATUS: 10790 _scsih_sas_ir_operation_status_event(ioc, fw_event); 10791 break; 10792 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10793 _scsih_pcie_device_status_change_event(ioc, fw_event); 10794 break; 10795 case MPI2_EVENT_PCIE_ENUMERATION: 10796 _scsih_pcie_enumeration_event(ioc, fw_event); 10797 break; 10798 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10799 _scsih_pcie_topology_change_event(ioc, fw_event); 10800 ioc->current_event = NULL; 10801 return; 10802 } 10803 out: 10804 fw_event_work_put(fw_event); 10805 ioc->current_event = NULL; 10806 } 10807 10808 /** 10809 * _firmware_event_work 10810 * @work: The fw_event_work object 10811 * Context: user. 10812 * 10813 * wrappers for the work thread handling firmware events 10814 */ 10815 10816 static void 10817 _firmware_event_work(struct work_struct *work) 10818 { 10819 struct fw_event_work *fw_event = container_of(work, 10820 struct fw_event_work, work); 10821 10822 _mpt3sas_fw_work(fw_event->ioc, fw_event); 10823 } 10824 10825 /** 10826 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 10827 * @ioc: per adapter object 10828 * @msix_index: MSIX table index supplied by the OS 10829 * @reply: reply message frame(lower 32bit addr) 10830 * Context: interrupt. 10831 * 10832 * This function merely adds a new work task into ioc->firmware_event_thread. 10833 * The tasks are worked from _firmware_event_work in user context. 10834 * 10835 * Return: 1 meaning mf should be freed from _base_interrupt 10836 * 0 means the mf is freed from this function. 10837 */ 10838 u8 10839 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 10840 u32 reply) 10841 { 10842 struct fw_event_work *fw_event; 10843 Mpi2EventNotificationReply_t *mpi_reply; 10844 u16 event; 10845 u16 sz; 10846 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 10847 10848 /* events turned off due to host reset */ 10849 if (ioc->pci_error_recovery) 10850 return 1; 10851 10852 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 10853 10854 if (unlikely(!mpi_reply)) { 10855 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 10856 __FILE__, __LINE__, __func__); 10857 return 1; 10858 } 10859 10860 event = le16_to_cpu(mpi_reply->Event); 10861 10862 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 10863 mpt3sas_trigger_event(ioc, event, 0); 10864 10865 switch (event) { 10866 /* handle these */ 10867 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10868 { 10869 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 10870 (Mpi2EventDataSasBroadcastPrimitive_t *) 10871 mpi_reply->EventData; 10872 10873 if (baen_data->Primitive != 10874 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 10875 return 1; 10876 10877 if (ioc->broadcast_aen_busy) { 10878 ioc->broadcast_aen_pending++; 10879 return 1; 10880 } else 10881 ioc->broadcast_aen_busy = 1; 10882 break; 10883 } 10884 10885 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10886 _scsih_check_topo_delete_events(ioc, 10887 (Mpi2EventDataSasTopologyChangeList_t *) 10888 mpi_reply->EventData); 10889 /* 10890 * No need to add the topology change list 10891 * event to fw event work queue when 10892 * diag reset is going on. Since during diag 10893 * reset driver scan the devices by reading 10894 * sas device page0's not by processing the 10895 * events. 10896 */ 10897 if (ioc->shost_recovery) 10898 return 1; 10899 break; 10900 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10901 _scsih_check_pcie_topo_remove_events(ioc, 10902 (Mpi26EventDataPCIeTopologyChangeList_t *) 10903 mpi_reply->EventData); 10904 if (ioc->shost_recovery) 10905 return 1; 10906 break; 10907 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10908 _scsih_check_ir_config_unhide_events(ioc, 10909 (Mpi2EventDataIrConfigChangeList_t *) 10910 mpi_reply->EventData); 10911 break; 10912 case MPI2_EVENT_IR_VOLUME: 10913 _scsih_check_volume_delete_events(ioc, 10914 (Mpi2EventDataIrVolume_t *) 10915 mpi_reply->EventData); 10916 break; 10917 case MPI2_EVENT_LOG_ENTRY_ADDED: 10918 { 10919 Mpi2EventDataLogEntryAdded_t *log_entry; 10920 u32 log_code; 10921 10922 if (!ioc->is_warpdrive) 10923 break; 10924 10925 log_entry = (Mpi2EventDataLogEntryAdded_t *) 10926 mpi_reply->EventData; 10927 log_code = le32_to_cpu(*(__le32 *)log_entry->LogData); 10928 10929 if (le16_to_cpu(log_entry->LogEntryQualifier) 10930 != MPT2_WARPDRIVE_LOGENTRY) 10931 break; 10932 10933 switch (log_code) { 10934 case MPT2_WARPDRIVE_LC_SSDT: 10935 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10936 break; 10937 case MPT2_WARPDRIVE_LC_SSDLW: 10938 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 10939 break; 10940 case MPT2_WARPDRIVE_LC_SSDLF: 10941 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 10942 break; 10943 case MPT2_WARPDRIVE_LC_BRMF: 10944 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10945 break; 10946 } 10947 10948 break; 10949 } 10950 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10951 _scsih_sas_device_status_change_event(ioc, 10952 (Mpi2EventDataSasDeviceStatusChange_t *) 10953 mpi_reply->EventData); 10954 break; 10955 case MPI2_EVENT_IR_OPERATION_STATUS: 10956 case MPI2_EVENT_SAS_DISCOVERY: 10957 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10958 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10959 case MPI2_EVENT_IR_PHYSICAL_DISK: 10960 case MPI2_EVENT_PCIE_ENUMERATION: 10961 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10962 break; 10963 10964 case MPI2_EVENT_TEMP_THRESHOLD: 10965 _scsih_temp_threshold_events(ioc, 10966 (Mpi2EventDataTemperature_t *) 10967 mpi_reply->EventData); 10968 break; 10969 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 10970 ActiveCableEventData = 10971 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 10972 switch (ActiveCableEventData->ReasonCode) { 10973 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 10974 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 10975 ActiveCableEventData->ReceptacleID); 10976 pr_notice("cannot be powered and devices connected\n"); 10977 pr_notice("to this active cable will not be seen\n"); 10978 pr_notice("This active cable requires %d mW of power\n", 10979 le32_to_cpu( 10980 ActiveCableEventData->ActiveCablePowerRequirement)); 10981 break; 10982 10983 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 10984 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 10985 ActiveCableEventData->ReceptacleID); 10986 pr_notice( 10987 "is not running at optimal speed(12 Gb/s rate)\n"); 10988 break; 10989 } 10990 10991 break; 10992 10993 default: /* ignore the rest */ 10994 return 1; 10995 } 10996 10997 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 10998 fw_event = alloc_fw_event_work(sz); 10999 if (!fw_event) { 11000 ioc_err(ioc, "failure at %s:%d/%s()!\n", 11001 __FILE__, __LINE__, __func__); 11002 return 1; 11003 } 11004 11005 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 11006 fw_event->ioc = ioc; 11007 fw_event->VF_ID = mpi_reply->VF_ID; 11008 fw_event->VP_ID = mpi_reply->VP_ID; 11009 fw_event->event = event; 11010 _scsih_fw_event_add(ioc, fw_event); 11011 fw_event_work_put(fw_event); 11012 return 1; 11013 } 11014 11015 /** 11016 * _scsih_expander_node_remove - removing expander device from list. 11017 * @ioc: per adapter object 11018 * @sas_expander: the sas_device object 11019 * 11020 * Removing object and freeing associated memory from the 11021 * ioc->sas_expander_list. 11022 */ 11023 static void 11024 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 11025 struct _sas_node *sas_expander) 11026 { 11027 struct _sas_port *mpt3sas_port, *next; 11028 unsigned long flags; 11029 int port_id; 11030 11031 /* remove sibling ports attached to this expander */ 11032 list_for_each_entry_safe(mpt3sas_port, next, 11033 &sas_expander->sas_port_list, port_list) { 11034 if (ioc->shost_recovery) 11035 return; 11036 if (mpt3sas_port->remote_identify.device_type == 11037 SAS_END_DEVICE) 11038 mpt3sas_device_remove_by_sas_address(ioc, 11039 mpt3sas_port->remote_identify.sas_address, 11040 mpt3sas_port->hba_port); 11041 else if (mpt3sas_port->remote_identify.device_type == 11042 SAS_EDGE_EXPANDER_DEVICE || 11043 mpt3sas_port->remote_identify.device_type == 11044 SAS_FANOUT_EXPANDER_DEVICE) 11045 mpt3sas_expander_remove(ioc, 11046 mpt3sas_port->remote_identify.sas_address, 11047 mpt3sas_port->hba_port); 11048 } 11049 11050 port_id = sas_expander->port->port_id; 11051 11052 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 11053 sas_expander->sas_address_parent, sas_expander->port); 11054 11055 ioc_info(ioc, 11056 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 11057 sas_expander->handle, (unsigned long long) 11058 sas_expander->sas_address, 11059 port_id); 11060 11061 spin_lock_irqsave(&ioc->sas_node_lock, flags); 11062 list_del(&sas_expander->list); 11063 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 11064 11065 kfree(sas_expander->phy); 11066 kfree(sas_expander); 11067 } 11068 11069 /** 11070 * _scsih_nvme_shutdown - NVMe shutdown notification 11071 * @ioc: per adapter object 11072 * 11073 * Sending IoUnitControl request with shutdown operation code to alert IOC that 11074 * the host system is shutting down so that IOC can issue NVMe shutdown to 11075 * NVMe drives attached to it. 11076 */ 11077 static void 11078 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) 11079 { 11080 Mpi26IoUnitControlRequest_t *mpi_request; 11081 Mpi26IoUnitControlReply_t *mpi_reply; 11082 u16 smid; 11083 11084 /* are there any NVMe devices ? */ 11085 if (list_empty(&ioc->pcie_device_list)) 11086 return; 11087 11088 mutex_lock(&ioc->scsih_cmds.mutex); 11089 11090 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11091 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11092 goto out; 11093 } 11094 11095 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11096 11097 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11098 if (!smid) { 11099 ioc_err(ioc, 11100 "%s: failed obtaining a smid\n", __func__); 11101 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11102 goto out; 11103 } 11104 11105 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11106 ioc->scsih_cmds.smid = smid; 11107 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 11108 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 11109 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; 11110 11111 init_completion(&ioc->scsih_cmds.done); 11112 ioc->put_smid_default(ioc, smid); 11113 /* Wait for max_shutdown_latency seconds */ 11114 ioc_info(ioc, 11115 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", 11116 ioc->max_shutdown_latency); 11117 wait_for_completion_timeout(&ioc->scsih_cmds.done, 11118 ioc->max_shutdown_latency*HZ); 11119 11120 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11121 ioc_err(ioc, "%s: timeout\n", __func__); 11122 goto out; 11123 } 11124 11125 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11126 mpi_reply = ioc->scsih_cmds.reply; 11127 ioc_info(ioc, "Io Unit Control shutdown (complete):" 11128 "ioc_status(0x%04x), loginfo(0x%08x)\n", 11129 le16_to_cpu(mpi_reply->IOCStatus), 11130 le32_to_cpu(mpi_reply->IOCLogInfo)); 11131 } 11132 out: 11133 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11134 mutex_unlock(&ioc->scsih_cmds.mutex); 11135 } 11136 11137 11138 /** 11139 * _scsih_ir_shutdown - IR shutdown notification 11140 * @ioc: per adapter object 11141 * 11142 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 11143 * the host system is shutting down. 11144 */ 11145 static void 11146 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 11147 { 11148 Mpi2RaidActionRequest_t *mpi_request; 11149 Mpi2RaidActionReply_t *mpi_reply; 11150 u16 smid; 11151 11152 /* is IR firmware build loaded ? */ 11153 if (!ioc->ir_firmware) 11154 return; 11155 11156 /* are there any volumes ? */ 11157 if (list_empty(&ioc->raid_device_list)) 11158 return; 11159 11160 mutex_lock(&ioc->scsih_cmds.mutex); 11161 11162 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11163 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11164 goto out; 11165 } 11166 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11167 11168 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11169 if (!smid) { 11170 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 11171 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11172 goto out; 11173 } 11174 11175 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11176 ioc->scsih_cmds.smid = smid; 11177 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 11178 11179 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 11180 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 11181 11182 if (!ioc->hide_ir_msg) 11183 ioc_info(ioc, "IR shutdown (sending)\n"); 11184 init_completion(&ioc->scsih_cmds.done); 11185 ioc->put_smid_default(ioc, smid); 11186 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 11187 11188 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11189 ioc_err(ioc, "%s: timeout\n", __func__); 11190 goto out; 11191 } 11192 11193 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11194 mpi_reply = ioc->scsih_cmds.reply; 11195 if (!ioc->hide_ir_msg) 11196 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 11197 le16_to_cpu(mpi_reply->IOCStatus), 11198 le32_to_cpu(mpi_reply->IOCLogInfo)); 11199 } 11200 11201 out: 11202 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11203 mutex_unlock(&ioc->scsih_cmds.mutex); 11204 } 11205 11206 /** 11207 * _scsih_get_shost_and_ioc - get shost and ioc 11208 * and verify whether they are NULL or not 11209 * @pdev: PCI device struct 11210 * @shost: address of scsi host pointer 11211 * @ioc: address of HBA adapter pointer 11212 * 11213 * Return zero if *shost and *ioc are not NULL otherwise return error number. 11214 */ 11215 static int 11216 _scsih_get_shost_and_ioc(struct pci_dev *pdev, 11217 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) 11218 { 11219 *shost = pci_get_drvdata(pdev); 11220 if (*shost == NULL) { 11221 dev_err(&pdev->dev, "pdev's driver data is null\n"); 11222 return -ENXIO; 11223 } 11224 11225 *ioc = shost_priv(*shost); 11226 if (*ioc == NULL) { 11227 dev_err(&pdev->dev, "shost's private data is null\n"); 11228 return -ENXIO; 11229 } 11230 11231 return 0; 11232 } 11233 11234 /** 11235 * scsih_remove - detach and remove add host 11236 * @pdev: PCI device struct 11237 * 11238 * Routine called when unloading the driver. 11239 */ 11240 static void scsih_remove(struct pci_dev *pdev) 11241 { 11242 struct Scsi_Host *shost; 11243 struct MPT3SAS_ADAPTER *ioc; 11244 struct _sas_port *mpt3sas_port, *next_port; 11245 struct _raid_device *raid_device, *next; 11246 struct MPT3SAS_TARGET *sas_target_priv_data; 11247 struct _pcie_device *pcie_device, *pcienext; 11248 struct workqueue_struct *wq; 11249 unsigned long flags; 11250 Mpi2ConfigReply_t mpi_reply; 11251 struct hba_port *port, *port_next; 11252 11253 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11254 return; 11255 11256 ioc->remove_host = 1; 11257 11258 if (!pci_device_is_present(pdev)) { 11259 mpt3sas_base_pause_mq_polling(ioc); 11260 _scsih_flush_running_cmds(ioc); 11261 } 11262 11263 _scsih_fw_event_cleanup_queue(ioc); 11264 11265 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11266 wq = ioc->firmware_event_thread; 11267 ioc->firmware_event_thread = NULL; 11268 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11269 if (wq) 11270 destroy_workqueue(wq); 11271 /* 11272 * Copy back the unmodified ioc page1. so that on next driver load, 11273 * current modified changes on ioc page1 won't take effect. 11274 */ 11275 if (ioc->is_aero_ioc) 11276 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11277 &ioc->ioc_pg1_copy); 11278 /* release all the volumes */ 11279 _scsih_ir_shutdown(ioc); 11280 mpt3sas_destroy_debugfs(ioc); 11281 sas_remove_host(shost); 11282 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 11283 list) { 11284 if (raid_device->starget) { 11285 sas_target_priv_data = 11286 raid_device->starget->hostdata; 11287 sas_target_priv_data->deleted = 1; 11288 scsi_remove_target(&raid_device->starget->dev); 11289 } 11290 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 11291 raid_device->handle, (u64)raid_device->wwid); 11292 _scsih_raid_device_remove(ioc, raid_device); 11293 } 11294 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 11295 list) { 11296 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 11297 list_del_init(&pcie_device->list); 11298 pcie_device_put(pcie_device); 11299 } 11300 11301 /* free ports attached to the sas_host */ 11302 list_for_each_entry_safe(mpt3sas_port, next_port, 11303 &ioc->sas_hba.sas_port_list, port_list) { 11304 if (mpt3sas_port->remote_identify.device_type == 11305 SAS_END_DEVICE) 11306 mpt3sas_device_remove_by_sas_address(ioc, 11307 mpt3sas_port->remote_identify.sas_address, 11308 mpt3sas_port->hba_port); 11309 else if (mpt3sas_port->remote_identify.device_type == 11310 SAS_EDGE_EXPANDER_DEVICE || 11311 mpt3sas_port->remote_identify.device_type == 11312 SAS_FANOUT_EXPANDER_DEVICE) 11313 mpt3sas_expander_remove(ioc, 11314 mpt3sas_port->remote_identify.sas_address, 11315 mpt3sas_port->hba_port); 11316 } 11317 11318 list_for_each_entry_safe(port, port_next, 11319 &ioc->port_table_list, list) { 11320 list_del(&port->list); 11321 kfree(port); 11322 } 11323 11324 /* free phys attached to the sas_host */ 11325 if (ioc->sas_hba.num_phys) { 11326 kfree(ioc->sas_hba.phy); 11327 ioc->sas_hba.phy = NULL; 11328 ioc->sas_hba.num_phys = 0; 11329 } 11330 11331 mpt3sas_base_detach(ioc); 11332 mpt3sas_ctl_release(ioc); 11333 spin_lock(&gioc_lock); 11334 list_del(&ioc->list); 11335 spin_unlock(&gioc_lock); 11336 scsi_host_put(shost); 11337 } 11338 11339 /** 11340 * scsih_shutdown - routine call during system shutdown 11341 * @pdev: PCI device struct 11342 */ 11343 static void 11344 scsih_shutdown(struct pci_dev *pdev) 11345 { 11346 struct Scsi_Host *shost; 11347 struct MPT3SAS_ADAPTER *ioc; 11348 struct workqueue_struct *wq; 11349 unsigned long flags; 11350 Mpi2ConfigReply_t mpi_reply; 11351 11352 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11353 return; 11354 11355 ioc->remove_host = 1; 11356 11357 if (!pci_device_is_present(pdev)) { 11358 mpt3sas_base_pause_mq_polling(ioc); 11359 _scsih_flush_running_cmds(ioc); 11360 } 11361 11362 _scsih_fw_event_cleanup_queue(ioc); 11363 11364 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11365 wq = ioc->firmware_event_thread; 11366 ioc->firmware_event_thread = NULL; 11367 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11368 if (wq) 11369 destroy_workqueue(wq); 11370 /* 11371 * Copy back the unmodified ioc page1 so that on next driver load, 11372 * current modified changes on ioc page1 won't take effect. 11373 */ 11374 if (ioc->is_aero_ioc) 11375 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11376 &ioc->ioc_pg1_copy); 11377 11378 _scsih_ir_shutdown(ioc); 11379 _scsih_nvme_shutdown(ioc); 11380 mpt3sas_base_mask_interrupts(ioc); 11381 mpt3sas_base_stop_watchdog(ioc); 11382 ioc->shost_recovery = 1; 11383 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); 11384 ioc->shost_recovery = 0; 11385 mpt3sas_base_free_irq(ioc); 11386 mpt3sas_base_disable_msix(ioc); 11387 } 11388 11389 11390 /** 11391 * _scsih_probe_boot_devices - reports 1st device 11392 * @ioc: per adapter object 11393 * 11394 * If specified in bios page 2, this routine reports the 1st 11395 * device scsi-ml or sas transport for persistent boot device 11396 * purposes. Please refer to function _scsih_determine_boot_device() 11397 */ 11398 static void 11399 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 11400 { 11401 u32 channel; 11402 void *device; 11403 struct _sas_device *sas_device; 11404 struct _raid_device *raid_device; 11405 struct _pcie_device *pcie_device; 11406 u16 handle; 11407 u64 sas_address_parent; 11408 u64 sas_address; 11409 unsigned long flags; 11410 int rc; 11411 int tid; 11412 struct hba_port *port; 11413 11414 /* no Bios, return immediately */ 11415 if (!ioc->bios_pg3.BiosVersion) 11416 return; 11417 11418 device = NULL; 11419 if (ioc->req_boot_device.device) { 11420 device = ioc->req_boot_device.device; 11421 channel = ioc->req_boot_device.channel; 11422 } else if (ioc->req_alt_boot_device.device) { 11423 device = ioc->req_alt_boot_device.device; 11424 channel = ioc->req_alt_boot_device.channel; 11425 } else if (ioc->current_boot_device.device) { 11426 device = ioc->current_boot_device.device; 11427 channel = ioc->current_boot_device.channel; 11428 } 11429 11430 if (!device) 11431 return; 11432 11433 if (channel == RAID_CHANNEL) { 11434 raid_device = device; 11435 /* 11436 * If this boot vd is already registered with SML then 11437 * no need to register it again as part of device scanning 11438 * after diag reset during driver load operation. 11439 */ 11440 if (raid_device->starget) 11441 return; 11442 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11443 raid_device->id, 0); 11444 if (rc) 11445 _scsih_raid_device_remove(ioc, raid_device); 11446 } else if (channel == PCIE_CHANNEL) { 11447 pcie_device = device; 11448 /* 11449 * If this boot NVMe device is already registered with SML then 11450 * no need to register it again as part of device scanning 11451 * after diag reset during driver load operation. 11452 */ 11453 if (pcie_device->starget) 11454 return; 11455 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11456 tid = pcie_device->id; 11457 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 11458 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11459 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 11460 if (rc) 11461 _scsih_pcie_device_remove(ioc, pcie_device); 11462 } else { 11463 sas_device = device; 11464 /* 11465 * If this boot sas/sata device is already registered with SML 11466 * then no need to register it again as part of device scanning 11467 * after diag reset during driver load operation. 11468 */ 11469 if (sas_device->starget) 11470 return; 11471 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11472 handle = sas_device->handle; 11473 sas_address_parent = sas_device->sas_address_parent; 11474 sas_address = sas_device->sas_address; 11475 port = sas_device->port; 11476 list_move_tail(&sas_device->list, &ioc->sas_device_list); 11477 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11478 11479 if (ioc->hide_drives) 11480 return; 11481 11482 if (!port) 11483 return; 11484 11485 if (!mpt3sas_transport_port_add(ioc, handle, 11486 sas_address_parent, port)) { 11487 _scsih_sas_device_remove(ioc, sas_device); 11488 } else if (!sas_device->starget) { 11489 if (!ioc->is_driver_loading) { 11490 mpt3sas_transport_port_remove(ioc, 11491 sas_address, 11492 sas_address_parent, port); 11493 _scsih_sas_device_remove(ioc, sas_device); 11494 } 11495 } 11496 } 11497 } 11498 11499 /** 11500 * _scsih_probe_raid - reporting raid volumes to scsi-ml 11501 * @ioc: per adapter object 11502 * 11503 * Called during initial loading of the driver. 11504 */ 11505 static void 11506 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 11507 { 11508 struct _raid_device *raid_device, *raid_next; 11509 int rc; 11510 11511 list_for_each_entry_safe(raid_device, raid_next, 11512 &ioc->raid_device_list, list) { 11513 if (raid_device->starget) 11514 continue; 11515 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11516 raid_device->id, 0); 11517 if (rc) 11518 _scsih_raid_device_remove(ioc, raid_device); 11519 } 11520 } 11521 11522 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 11523 { 11524 struct _sas_device *sas_device = NULL; 11525 unsigned long flags; 11526 11527 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11528 if (!list_empty(&ioc->sas_device_init_list)) { 11529 sas_device = list_first_entry(&ioc->sas_device_init_list, 11530 struct _sas_device, list); 11531 sas_device_get(sas_device); 11532 } 11533 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11534 11535 return sas_device; 11536 } 11537 11538 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11539 struct _sas_device *sas_device) 11540 { 11541 unsigned long flags; 11542 11543 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11544 11545 /* 11546 * Since we dropped the lock during the call to port_add(), we need to 11547 * be careful here that somebody else didn't move or delete this item 11548 * while we were busy with other things. 11549 * 11550 * If it was on the list, we need a put() for the reference the list 11551 * had. Either way, we need a get() for the destination list. 11552 */ 11553 if (!list_empty(&sas_device->list)) { 11554 list_del_init(&sas_device->list); 11555 sas_device_put(sas_device); 11556 } 11557 11558 sas_device_get(sas_device); 11559 list_add_tail(&sas_device->list, &ioc->sas_device_list); 11560 11561 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11562 } 11563 11564 /** 11565 * _scsih_probe_sas - reporting sas devices to sas transport 11566 * @ioc: per adapter object 11567 * 11568 * Called during initial loading of the driver. 11569 */ 11570 static void 11571 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 11572 { 11573 struct _sas_device *sas_device; 11574 11575 if (ioc->hide_drives) 11576 return; 11577 11578 while ((sas_device = get_next_sas_device(ioc))) { 11579 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 11580 sas_device->sas_address_parent, sas_device->port)) { 11581 _scsih_sas_device_remove(ioc, sas_device); 11582 sas_device_put(sas_device); 11583 continue; 11584 } else if (!sas_device->starget) { 11585 /* 11586 * When asyn scanning is enabled, its not possible to 11587 * remove devices while scanning is turned on due to an 11588 * oops in scsi_sysfs_add_sdev()->add_device()-> 11589 * sysfs_addrm_start() 11590 */ 11591 if (!ioc->is_driver_loading) { 11592 mpt3sas_transport_port_remove(ioc, 11593 sas_device->sas_address, 11594 sas_device->sas_address_parent, 11595 sas_device->port); 11596 _scsih_sas_device_remove(ioc, sas_device); 11597 sas_device_put(sas_device); 11598 continue; 11599 } 11600 } 11601 sas_device_make_active(ioc, sas_device); 11602 sas_device_put(sas_device); 11603 } 11604 } 11605 11606 /** 11607 * get_next_pcie_device - Get the next pcie device 11608 * @ioc: per adapter object 11609 * 11610 * Get the next pcie device from pcie_device_init_list list. 11611 * 11612 * Return: pcie device structure if pcie_device_init_list list is not empty 11613 * otherwise returns NULL 11614 */ 11615 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 11616 { 11617 struct _pcie_device *pcie_device = NULL; 11618 unsigned long flags; 11619 11620 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11621 if (!list_empty(&ioc->pcie_device_init_list)) { 11622 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 11623 struct _pcie_device, list); 11624 pcie_device_get(pcie_device); 11625 } 11626 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11627 11628 return pcie_device; 11629 } 11630 11631 /** 11632 * pcie_device_make_active - Add pcie device to pcie_device_list list 11633 * @ioc: per adapter object 11634 * @pcie_device: pcie device object 11635 * 11636 * Add the pcie device which has registered with SCSI Transport Later to 11637 * pcie_device_list list 11638 */ 11639 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11640 struct _pcie_device *pcie_device) 11641 { 11642 unsigned long flags; 11643 11644 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11645 11646 if (!list_empty(&pcie_device->list)) { 11647 list_del_init(&pcie_device->list); 11648 pcie_device_put(pcie_device); 11649 } 11650 pcie_device_get(pcie_device); 11651 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 11652 11653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11654 } 11655 11656 /** 11657 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 11658 * @ioc: per adapter object 11659 * 11660 * Called during initial loading of the driver. 11661 */ 11662 static void 11663 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 11664 { 11665 struct _pcie_device *pcie_device; 11666 int rc; 11667 11668 /* PCIe Device List */ 11669 while ((pcie_device = get_next_pcie_device(ioc))) { 11670 if (pcie_device->starget) { 11671 pcie_device_put(pcie_device); 11672 continue; 11673 } 11674 if (pcie_device->access_status == 11675 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 11676 pcie_device_make_active(ioc, pcie_device); 11677 pcie_device_put(pcie_device); 11678 continue; 11679 } 11680 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 11681 pcie_device->id, 0); 11682 if (rc) { 11683 _scsih_pcie_device_remove(ioc, pcie_device); 11684 pcie_device_put(pcie_device); 11685 continue; 11686 } else if (!pcie_device->starget) { 11687 /* 11688 * When async scanning is enabled, its not possible to 11689 * remove devices while scanning is turned on due to an 11690 * oops in scsi_sysfs_add_sdev()->add_device()-> 11691 * sysfs_addrm_start() 11692 */ 11693 if (!ioc->is_driver_loading) { 11694 /* TODO-- Need to find out whether this condition will 11695 * occur or not 11696 */ 11697 _scsih_pcie_device_remove(ioc, pcie_device); 11698 pcie_device_put(pcie_device); 11699 continue; 11700 } 11701 } 11702 pcie_device_make_active(ioc, pcie_device); 11703 pcie_device_put(pcie_device); 11704 } 11705 } 11706 11707 /** 11708 * _scsih_probe_devices - probing for devices 11709 * @ioc: per adapter object 11710 * 11711 * Called during initial loading of the driver. 11712 */ 11713 static void 11714 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 11715 { 11716 u16 volume_mapping_flags; 11717 11718 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 11719 return; /* return when IOC doesn't support initiator mode */ 11720 11721 _scsih_probe_boot_devices(ioc); 11722 11723 if (ioc->ir_firmware) { 11724 volume_mapping_flags = 11725 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 11726 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 11727 if (volume_mapping_flags == 11728 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 11729 _scsih_probe_raid(ioc); 11730 _scsih_probe_sas(ioc); 11731 } else { 11732 _scsih_probe_sas(ioc); 11733 _scsih_probe_raid(ioc); 11734 } 11735 } else { 11736 _scsih_probe_sas(ioc); 11737 _scsih_probe_pcie(ioc); 11738 } 11739 } 11740 11741 /** 11742 * scsih_scan_start - scsi lld callback for .scan_start 11743 * @shost: SCSI host pointer 11744 * 11745 * The shost has the ability to discover targets on its own instead 11746 * of scanning the entire bus. In our implemention, we will kick off 11747 * firmware discovery. 11748 */ 11749 static void 11750 scsih_scan_start(struct Scsi_Host *shost) 11751 { 11752 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11753 int rc; 11754 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 11755 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 11756 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) 11757 mpt3sas_enable_diag_buffer(ioc, 1); 11758 11759 if (disable_discovery > 0) 11760 return; 11761 11762 ioc->start_scan = 1; 11763 rc = mpt3sas_port_enable(ioc); 11764 11765 if (rc != 0) 11766 ioc_info(ioc, "port enable: FAILED\n"); 11767 } 11768 11769 /** 11770 * _scsih_complete_devices_scanning - add the devices to sml and 11771 * complete ioc initialization. 11772 * @ioc: per adapter object 11773 * 11774 * Return nothing. 11775 */ 11776 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) 11777 { 11778 11779 if (ioc->wait_for_discovery_to_complete) { 11780 ioc->wait_for_discovery_to_complete = 0; 11781 _scsih_probe_devices(ioc); 11782 } 11783 11784 mpt3sas_base_start_watchdog(ioc); 11785 ioc->is_driver_loading = 0; 11786 } 11787 11788 /** 11789 * scsih_scan_finished - scsi lld callback for .scan_finished 11790 * @shost: SCSI host pointer 11791 * @time: elapsed time of the scan in jiffies 11792 * 11793 * This function will be called periodicallyn until it returns 1 with the 11794 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 11795 * we wait for firmware discovery to complete, then return 1. 11796 */ 11797 static int 11798 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 11799 { 11800 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11801 u32 ioc_state; 11802 int issue_hard_reset = 0; 11803 11804 if (disable_discovery > 0) { 11805 ioc->is_driver_loading = 0; 11806 ioc->wait_for_discovery_to_complete = 0; 11807 return 1; 11808 } 11809 11810 if (time >= (300 * HZ)) { 11811 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11812 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 11813 ioc->is_driver_loading = 0; 11814 return 1; 11815 } 11816 11817 if (ioc->start_scan) { 11818 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 11819 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 11820 mpt3sas_print_fault_code(ioc, ioc_state & 11821 MPI2_DOORBELL_DATA_MASK); 11822 issue_hard_reset = 1; 11823 goto out; 11824 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 11825 MPI2_IOC_STATE_COREDUMP) { 11826 mpt3sas_base_coredump_info(ioc, ioc_state & 11827 MPI2_DOORBELL_DATA_MASK); 11828 mpt3sas_base_wait_for_coredump_completion(ioc, __func__); 11829 issue_hard_reset = 1; 11830 goto out; 11831 } 11832 return 0; 11833 } 11834 11835 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { 11836 ioc_info(ioc, 11837 "port enable: aborted due to diag reset\n"); 11838 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11839 goto out; 11840 } 11841 if (ioc->start_scan_failed) { 11842 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 11843 ioc->start_scan_failed); 11844 ioc->is_driver_loading = 0; 11845 ioc->wait_for_discovery_to_complete = 0; 11846 ioc->remove_host = 1; 11847 return 1; 11848 } 11849 11850 ioc_info(ioc, "port enable: SUCCESS\n"); 11851 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11852 _scsih_complete_devices_scanning(ioc); 11853 11854 out: 11855 if (issue_hard_reset) { 11856 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11857 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) 11858 ioc->is_driver_loading = 0; 11859 } 11860 return 1; 11861 } 11862 11863 /** 11864 * scsih_map_queues - map reply queues with request queues 11865 * @shost: SCSI host pointer 11866 */ 11867 static void scsih_map_queues(struct Scsi_Host *shost) 11868 { 11869 struct MPT3SAS_ADAPTER *ioc = 11870 (struct MPT3SAS_ADAPTER *)shost->hostdata; 11871 struct blk_mq_queue_map *map; 11872 int i, qoff, offset; 11873 int nr_msix_vectors = ioc->iopoll_q_start_index; 11874 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; 11875 11876 if (shost->nr_hw_queues == 1) 11877 return; 11878 11879 for (i = 0, qoff = 0; i < shost->nr_maps; i++) { 11880 map = &shost->tag_set.map[i]; 11881 map->nr_queues = 0; 11882 offset = 0; 11883 if (i == HCTX_TYPE_DEFAULT) { 11884 map->nr_queues = 11885 nr_msix_vectors - ioc->high_iops_queues; 11886 offset = ioc->high_iops_queues; 11887 } else if (i == HCTX_TYPE_POLL) 11888 map->nr_queues = iopoll_q_count; 11889 11890 if (!map->nr_queues) 11891 BUG_ON(i == HCTX_TYPE_DEFAULT); 11892 11893 /* 11894 * The poll queue(s) doesn't have an IRQ (and hence IRQ 11895 * affinity), so use the regular blk-mq cpu mapping 11896 */ 11897 map->queue_offset = qoff; 11898 if (i != HCTX_TYPE_POLL) 11899 blk_mq_pci_map_queues(map, ioc->pdev, offset); 11900 else 11901 blk_mq_map_queues(map); 11902 11903 qoff += map->nr_queues; 11904 } 11905 } 11906 11907 /* shost template for SAS 2.0 HBA devices */ 11908 static const struct scsi_host_template mpt2sas_driver_template = { 11909 .module = THIS_MODULE, 11910 .name = "Fusion MPT SAS Host", 11911 .proc_name = MPT2SAS_DRIVER_NAME, 11912 .queuecommand = scsih_qcmd, 11913 .target_alloc = scsih_target_alloc, 11914 .slave_alloc = scsih_slave_alloc, 11915 .device_configure = scsih_device_configure, 11916 .target_destroy = scsih_target_destroy, 11917 .slave_destroy = scsih_slave_destroy, 11918 .scan_finished = scsih_scan_finished, 11919 .scan_start = scsih_scan_start, 11920 .change_queue_depth = scsih_change_queue_depth, 11921 .eh_abort_handler = scsih_abort, 11922 .eh_device_reset_handler = scsih_dev_reset, 11923 .eh_target_reset_handler = scsih_target_reset, 11924 .eh_host_reset_handler = scsih_host_reset, 11925 .bios_param = scsih_bios_param, 11926 .can_queue = 1, 11927 .this_id = -1, 11928 .sg_tablesize = MPT2SAS_SG_DEPTH, 11929 .max_sectors = 32767, 11930 .cmd_per_lun = 7, 11931 .shost_groups = mpt3sas_host_groups, 11932 .sdev_groups = mpt3sas_dev_groups, 11933 .track_queue_depth = 1, 11934 .cmd_size = sizeof(struct scsiio_tracker), 11935 }; 11936 11937 /* raid transport support for SAS 2.0 HBA devices */ 11938 static struct raid_function_template mpt2sas_raid_functions = { 11939 .cookie = &mpt2sas_driver_template, 11940 .is_raid = scsih_is_raid, 11941 .get_resync = scsih_get_resync, 11942 .get_state = scsih_get_state, 11943 }; 11944 11945 /* shost template for SAS 3.0 HBA devices */ 11946 static const struct scsi_host_template mpt3sas_driver_template = { 11947 .module = THIS_MODULE, 11948 .name = "Fusion MPT SAS Host", 11949 .proc_name = MPT3SAS_DRIVER_NAME, 11950 .queuecommand = scsih_qcmd, 11951 .target_alloc = scsih_target_alloc, 11952 .slave_alloc = scsih_slave_alloc, 11953 .device_configure = scsih_device_configure, 11954 .target_destroy = scsih_target_destroy, 11955 .slave_destroy = scsih_slave_destroy, 11956 .scan_finished = scsih_scan_finished, 11957 .scan_start = scsih_scan_start, 11958 .change_queue_depth = scsih_change_queue_depth, 11959 .eh_abort_handler = scsih_abort, 11960 .eh_device_reset_handler = scsih_dev_reset, 11961 .eh_target_reset_handler = scsih_target_reset, 11962 .eh_host_reset_handler = scsih_host_reset, 11963 .bios_param = scsih_bios_param, 11964 .can_queue = 1, 11965 .this_id = -1, 11966 .sg_tablesize = MPT3SAS_SG_DEPTH, 11967 .max_sectors = 32767, 11968 .max_segment_size = 0xffffffff, 11969 .cmd_per_lun = 128, 11970 .shost_groups = mpt3sas_host_groups, 11971 .sdev_groups = mpt3sas_dev_groups, 11972 .track_queue_depth = 1, 11973 .cmd_size = sizeof(struct scsiio_tracker), 11974 .map_queues = scsih_map_queues, 11975 .mq_poll = mpt3sas_blk_mq_poll, 11976 }; 11977 11978 /* raid transport support for SAS 3.0 HBA devices */ 11979 static struct raid_function_template mpt3sas_raid_functions = { 11980 .cookie = &mpt3sas_driver_template, 11981 .is_raid = scsih_is_raid, 11982 .get_resync = scsih_get_resync, 11983 .get_state = scsih_get_state, 11984 }; 11985 11986 /** 11987 * _scsih_determine_hba_mpi_version - determine in which MPI version class 11988 * this device belongs to. 11989 * @pdev: PCI device struct 11990 * 11991 * return MPI2_VERSION for SAS 2.0 HBA devices, 11992 * MPI25_VERSION for SAS 3.0 HBA devices, and 11993 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 11994 */ 11995 static u16 11996 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 11997 { 11998 11999 switch (pdev->device) { 12000 case MPI2_MFGPAGE_DEVID_SSS6200: 12001 case MPI2_MFGPAGE_DEVID_SAS2004: 12002 case MPI2_MFGPAGE_DEVID_SAS2008: 12003 case MPI2_MFGPAGE_DEVID_SAS2108_1: 12004 case MPI2_MFGPAGE_DEVID_SAS2108_2: 12005 case MPI2_MFGPAGE_DEVID_SAS2108_3: 12006 case MPI2_MFGPAGE_DEVID_SAS2116_1: 12007 case MPI2_MFGPAGE_DEVID_SAS2116_2: 12008 case MPI2_MFGPAGE_DEVID_SAS2208_1: 12009 case MPI2_MFGPAGE_DEVID_SAS2208_2: 12010 case MPI2_MFGPAGE_DEVID_SAS2208_3: 12011 case MPI2_MFGPAGE_DEVID_SAS2208_4: 12012 case MPI2_MFGPAGE_DEVID_SAS2208_5: 12013 case MPI2_MFGPAGE_DEVID_SAS2208_6: 12014 case MPI2_MFGPAGE_DEVID_SAS2308_1: 12015 case MPI2_MFGPAGE_DEVID_SAS2308_2: 12016 case MPI2_MFGPAGE_DEVID_SAS2308_3: 12017 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 12018 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 12019 return MPI2_VERSION; 12020 case MPI25_MFGPAGE_DEVID_SAS3004: 12021 case MPI25_MFGPAGE_DEVID_SAS3008: 12022 case MPI25_MFGPAGE_DEVID_SAS3108_1: 12023 case MPI25_MFGPAGE_DEVID_SAS3108_2: 12024 case MPI25_MFGPAGE_DEVID_SAS3108_5: 12025 case MPI25_MFGPAGE_DEVID_SAS3108_6: 12026 return MPI25_VERSION; 12027 case MPI26_MFGPAGE_DEVID_SAS3216: 12028 case MPI26_MFGPAGE_DEVID_SAS3224: 12029 case MPI26_MFGPAGE_DEVID_SAS3316_1: 12030 case MPI26_MFGPAGE_DEVID_SAS3316_2: 12031 case MPI26_MFGPAGE_DEVID_SAS3316_3: 12032 case MPI26_MFGPAGE_DEVID_SAS3316_4: 12033 case MPI26_MFGPAGE_DEVID_SAS3324_1: 12034 case MPI26_MFGPAGE_DEVID_SAS3324_2: 12035 case MPI26_MFGPAGE_DEVID_SAS3324_3: 12036 case MPI26_MFGPAGE_DEVID_SAS3324_4: 12037 case MPI26_MFGPAGE_DEVID_SAS3508: 12038 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12039 case MPI26_MFGPAGE_DEVID_SAS3408: 12040 case MPI26_MFGPAGE_DEVID_SAS3516: 12041 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12042 case MPI26_MFGPAGE_DEVID_SAS3416: 12043 case MPI26_MFGPAGE_DEVID_SAS3616: 12044 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12045 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12046 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12047 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12048 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12049 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12050 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12051 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12052 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12053 return MPI26_VERSION; 12054 } 12055 return 0; 12056 } 12057 12058 /** 12059 * _scsih_probe - attach and add scsi host 12060 * @pdev: PCI device struct 12061 * @id: pci device id 12062 * 12063 * Return: 0 success, anything else error. 12064 */ 12065 static int 12066 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 12067 { 12068 struct MPT3SAS_ADAPTER *ioc; 12069 struct Scsi_Host *shost = NULL; 12070 int rv; 12071 u16 hba_mpi_version; 12072 int iopoll_q_count = 0; 12073 12074 /* Determine in which MPI version class this pci device belongs */ 12075 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 12076 if (hba_mpi_version == 0) 12077 return -ENODEV; 12078 12079 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 12080 * for other generation HBA's return with -ENODEV 12081 */ 12082 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 12083 return -ENODEV; 12084 12085 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 12086 * for other generation HBA's return with -ENODEV 12087 */ 12088 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 12089 || hba_mpi_version == MPI26_VERSION))) 12090 return -ENODEV; 12091 12092 switch (hba_mpi_version) { 12093 case MPI2_VERSION: 12094 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 12095 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 12096 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 12097 shost = scsi_host_alloc(&mpt2sas_driver_template, 12098 sizeof(struct MPT3SAS_ADAPTER)); 12099 if (!shost) 12100 return -ENODEV; 12101 ioc = shost_priv(shost); 12102 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12103 ioc->hba_mpi_version_belonged = hba_mpi_version; 12104 ioc->id = mpt2_ids++; 12105 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 12106 switch (pdev->device) { 12107 case MPI2_MFGPAGE_DEVID_SSS6200: 12108 ioc->is_warpdrive = 1; 12109 ioc->hide_ir_msg = 1; 12110 break; 12111 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 12112 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 12113 ioc->is_mcpu_endpoint = 1; 12114 break; 12115 default: 12116 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 12117 break; 12118 } 12119 12120 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12121 ioc->multipath_on_hba = 0; 12122 else 12123 ioc->multipath_on_hba = 1; 12124 12125 break; 12126 case MPI25_VERSION: 12127 case MPI26_VERSION: 12128 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 12129 shost = scsi_host_alloc(&mpt3sas_driver_template, 12130 sizeof(struct MPT3SAS_ADAPTER)); 12131 if (!shost) 12132 return -ENODEV; 12133 ioc = shost_priv(shost); 12134 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12135 ioc->hba_mpi_version_belonged = hba_mpi_version; 12136 ioc->id = mpt3_ids++; 12137 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 12138 switch (pdev->device) { 12139 case MPI26_MFGPAGE_DEVID_SAS3508: 12140 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12141 case MPI26_MFGPAGE_DEVID_SAS3408: 12142 case MPI26_MFGPAGE_DEVID_SAS3516: 12143 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12144 case MPI26_MFGPAGE_DEVID_SAS3416: 12145 case MPI26_MFGPAGE_DEVID_SAS3616: 12146 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12147 ioc->is_gen35_ioc = 1; 12148 break; 12149 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12150 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12151 dev_err(&pdev->dev, 12152 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", 12153 pdev->device, pdev->subsystem_vendor, 12154 pdev->subsystem_device); 12155 return 1; 12156 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12157 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12158 dev_err(&pdev->dev, 12159 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", 12160 pdev->device, pdev->subsystem_vendor, 12161 pdev->subsystem_device); 12162 return 1; 12163 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12164 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12165 dev_info(&pdev->dev, 12166 "HBA is in Configurable Secure mode\n"); 12167 fallthrough; 12168 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12169 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12170 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; 12171 break; 12172 default: 12173 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; 12174 } 12175 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 12176 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 12177 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 12178 ioc->combined_reply_queue = 1; 12179 if (ioc->is_gen35_ioc) 12180 ioc->combined_reply_index_count = 12181 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 12182 else 12183 ioc->combined_reply_index_count = 12184 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 12185 } 12186 12187 switch (ioc->is_gen35_ioc) { 12188 case 0: 12189 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12190 ioc->multipath_on_hba = 0; 12191 else 12192 ioc->multipath_on_hba = 1; 12193 break; 12194 case 1: 12195 if (multipath_on_hba == -1 || multipath_on_hba > 0) 12196 ioc->multipath_on_hba = 1; 12197 else 12198 ioc->multipath_on_hba = 0; 12199 break; 12200 default: 12201 break; 12202 } 12203 12204 break; 12205 default: 12206 return -ENODEV; 12207 } 12208 12209 INIT_LIST_HEAD(&ioc->list); 12210 spin_lock(&gioc_lock); 12211 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 12212 spin_unlock(&gioc_lock); 12213 ioc->shost = shost; 12214 ioc->pdev = pdev; 12215 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 12216 ioc->tm_cb_idx = tm_cb_idx; 12217 ioc->ctl_cb_idx = ctl_cb_idx; 12218 ioc->base_cb_idx = base_cb_idx; 12219 ioc->port_enable_cb_idx = port_enable_cb_idx; 12220 ioc->transport_cb_idx = transport_cb_idx; 12221 ioc->scsih_cb_idx = scsih_cb_idx; 12222 ioc->config_cb_idx = config_cb_idx; 12223 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 12224 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 12225 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 12226 ioc->logging_level = logging_level; 12227 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 12228 /* Host waits for minimum of six seconds */ 12229 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 12230 /* 12231 * Enable MEMORY MOVE support flag. 12232 */ 12233 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; 12234 /* Enable ADDITIONAL QUERY support flag. */ 12235 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; 12236 12237 ioc->enable_sdev_max_qd = enable_sdev_max_qd; 12238 12239 /* misc semaphores and spin locks */ 12240 mutex_init(&ioc->reset_in_progress_mutex); 12241 mutex_init(&ioc->hostdiag_unlock_mutex); 12242 /* initializing pci_access_mutex lock */ 12243 mutex_init(&ioc->pci_access_mutex); 12244 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 12245 spin_lock_init(&ioc->scsi_lookup_lock); 12246 spin_lock_init(&ioc->sas_device_lock); 12247 spin_lock_init(&ioc->sas_node_lock); 12248 spin_lock_init(&ioc->fw_event_lock); 12249 spin_lock_init(&ioc->raid_device_lock); 12250 spin_lock_init(&ioc->pcie_device_lock); 12251 spin_lock_init(&ioc->diag_trigger_lock); 12252 12253 INIT_LIST_HEAD(&ioc->sas_device_list); 12254 INIT_LIST_HEAD(&ioc->sas_device_init_list); 12255 INIT_LIST_HEAD(&ioc->sas_expander_list); 12256 INIT_LIST_HEAD(&ioc->enclosure_list); 12257 INIT_LIST_HEAD(&ioc->pcie_device_list); 12258 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 12259 INIT_LIST_HEAD(&ioc->fw_event_list); 12260 INIT_LIST_HEAD(&ioc->raid_device_list); 12261 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 12262 INIT_LIST_HEAD(&ioc->delayed_tr_list); 12263 INIT_LIST_HEAD(&ioc->delayed_sc_list); 12264 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 12265 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 12266 INIT_LIST_HEAD(&ioc->reply_queue_list); 12267 INIT_LIST_HEAD(&ioc->port_table_list); 12268 12269 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 12270 12271 /* init shost parameters */ 12272 shost->max_cmd_len = 32; 12273 shost->max_lun = max_lun; 12274 shost->transportt = mpt3sas_transport_template; 12275 shost->unique_id = ioc->id; 12276 12277 if (ioc->is_mcpu_endpoint) { 12278 /* mCPU MPI support 64K max IO */ 12279 shost->max_sectors = 128; 12280 ioc_info(ioc, "The max_sectors value is set to %d\n", 12281 shost->max_sectors); 12282 } else { 12283 if (max_sectors != 0xFFFF) { 12284 if (max_sectors < 64) { 12285 shost->max_sectors = 64; 12286 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 12287 max_sectors); 12288 } else if (max_sectors > 32767) { 12289 shost->max_sectors = 32767; 12290 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 12291 max_sectors); 12292 } else { 12293 shost->max_sectors = max_sectors & 0xFFFE; 12294 ioc_info(ioc, "The max_sectors value is set to %d\n", 12295 shost->max_sectors); 12296 } 12297 } 12298 } 12299 /* register EEDP capabilities with SCSI layer */ 12300 if (prot_mask >= 0) 12301 scsi_host_set_prot(shost, (prot_mask & 0x07)); 12302 else 12303 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 12304 | SHOST_DIF_TYPE2_PROTECTION 12305 | SHOST_DIF_TYPE3_PROTECTION); 12306 12307 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 12308 12309 /* event thread */ 12310 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 12311 "fw_event_%s%d", ioc->driver_name, ioc->id); 12312 ioc->firmware_event_thread = alloc_ordered_workqueue( 12313 ioc->firmware_event_name, 0); 12314 if (!ioc->firmware_event_thread) { 12315 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12316 __FILE__, __LINE__, __func__); 12317 rv = -ENODEV; 12318 goto out_thread_fail; 12319 } 12320 12321 shost->host_tagset = 0; 12322 12323 if (ioc->is_gen35_ioc && host_tagset_enable) 12324 shost->host_tagset = 1; 12325 12326 ioc->is_driver_loading = 1; 12327 if ((mpt3sas_base_attach(ioc))) { 12328 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12329 __FILE__, __LINE__, __func__); 12330 rv = -ENODEV; 12331 goto out_attach_fail; 12332 } 12333 12334 if (ioc->is_warpdrive) { 12335 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 12336 ioc->hide_drives = 0; 12337 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 12338 ioc->hide_drives = 1; 12339 else { 12340 if (mpt3sas_get_num_volumes(ioc)) 12341 ioc->hide_drives = 1; 12342 else 12343 ioc->hide_drives = 0; 12344 } 12345 } else 12346 ioc->hide_drives = 0; 12347 12348 shost->nr_hw_queues = 1; 12349 12350 if (shost->host_tagset) { 12351 shost->nr_hw_queues = 12352 ioc->reply_queue_count - ioc->high_iops_queues; 12353 12354 iopoll_q_count = 12355 ioc->reply_queue_count - ioc->iopoll_q_start_index; 12356 12357 shost->nr_maps = iopoll_q_count ? 3 : 1; 12358 12359 dev_info(&ioc->pdev->dev, 12360 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", 12361 shost->can_queue, shost->nr_hw_queues); 12362 } 12363 12364 rv = scsi_add_host(shost, &pdev->dev); 12365 if (rv) { 12366 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12367 __FILE__, __LINE__, __func__); 12368 goto out_add_shost_fail; 12369 } 12370 12371 scsi_scan_host(shost); 12372 mpt3sas_setup_debugfs(ioc); 12373 return 0; 12374 out_add_shost_fail: 12375 mpt3sas_base_detach(ioc); 12376 out_attach_fail: 12377 destroy_workqueue(ioc->firmware_event_thread); 12378 out_thread_fail: 12379 spin_lock(&gioc_lock); 12380 list_del(&ioc->list); 12381 spin_unlock(&gioc_lock); 12382 scsi_host_put(shost); 12383 return rv; 12384 } 12385 12386 /** 12387 * scsih_suspend - power management suspend main entry point 12388 * @dev: Device struct 12389 * 12390 * Return: 0 success, anything else error. 12391 */ 12392 static int __maybe_unused 12393 scsih_suspend(struct device *dev) 12394 { 12395 struct pci_dev *pdev = to_pci_dev(dev); 12396 struct Scsi_Host *shost; 12397 struct MPT3SAS_ADAPTER *ioc; 12398 int rc; 12399 12400 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12401 if (rc) 12402 return rc; 12403 12404 mpt3sas_base_stop_watchdog(ioc); 12405 scsi_block_requests(shost); 12406 _scsih_nvme_shutdown(ioc); 12407 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", 12408 pdev, pci_name(pdev)); 12409 12410 mpt3sas_base_free_resources(ioc); 12411 return 0; 12412 } 12413 12414 /** 12415 * scsih_resume - power management resume main entry point 12416 * @dev: Device struct 12417 * 12418 * Return: 0 success, anything else error. 12419 */ 12420 static int __maybe_unused 12421 scsih_resume(struct device *dev) 12422 { 12423 struct pci_dev *pdev = to_pci_dev(dev); 12424 struct Scsi_Host *shost; 12425 struct MPT3SAS_ADAPTER *ioc; 12426 pci_power_t device_state = pdev->current_state; 12427 int r; 12428 12429 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12430 if (r) 12431 return r; 12432 12433 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 12434 pdev, pci_name(pdev), device_state); 12435 12436 ioc->pdev = pdev; 12437 r = mpt3sas_base_map_resources(ioc); 12438 if (r) 12439 return r; 12440 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); 12441 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 12442 scsi_unblock_requests(shost); 12443 mpt3sas_base_start_watchdog(ioc); 12444 return 0; 12445 } 12446 12447 /** 12448 * scsih_pci_error_detected - Called when a PCI error is detected. 12449 * @pdev: PCI device struct 12450 * @state: PCI channel state 12451 * 12452 * Description: Called when a PCI error is detected. 12453 * 12454 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 12455 */ 12456 static pci_ers_result_t 12457 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12458 { 12459 struct Scsi_Host *shost; 12460 struct MPT3SAS_ADAPTER *ioc; 12461 12462 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12463 return PCI_ERS_RESULT_DISCONNECT; 12464 12465 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 12466 12467 switch (state) { 12468 case pci_channel_io_normal: 12469 return PCI_ERS_RESULT_CAN_RECOVER; 12470 case pci_channel_io_frozen: 12471 /* Fatal error, prepare for slot reset */ 12472 ioc->pci_error_recovery = 1; 12473 scsi_block_requests(ioc->shost); 12474 mpt3sas_base_stop_watchdog(ioc); 12475 mpt3sas_base_free_resources(ioc); 12476 return PCI_ERS_RESULT_NEED_RESET; 12477 case pci_channel_io_perm_failure: 12478 /* Permanent error, prepare for device removal */ 12479 ioc->pci_error_recovery = 1; 12480 mpt3sas_base_stop_watchdog(ioc); 12481 mpt3sas_base_pause_mq_polling(ioc); 12482 _scsih_flush_running_cmds(ioc); 12483 return PCI_ERS_RESULT_DISCONNECT; 12484 } 12485 return PCI_ERS_RESULT_NEED_RESET; 12486 } 12487 12488 /** 12489 * scsih_pci_slot_reset - Called when PCI slot has been reset. 12490 * @pdev: PCI device struct 12491 * 12492 * Description: This routine is called by the pci error recovery 12493 * code after the PCI slot has been reset, just before we 12494 * should resume normal operations. 12495 */ 12496 static pci_ers_result_t 12497 scsih_pci_slot_reset(struct pci_dev *pdev) 12498 { 12499 struct Scsi_Host *shost; 12500 struct MPT3SAS_ADAPTER *ioc; 12501 int rc; 12502 12503 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12504 return PCI_ERS_RESULT_DISCONNECT; 12505 12506 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 12507 12508 ioc->pci_error_recovery = 0; 12509 ioc->pdev = pdev; 12510 pci_restore_state(pdev); 12511 rc = mpt3sas_base_map_resources(ioc); 12512 if (rc) 12513 return PCI_ERS_RESULT_DISCONNECT; 12514 12515 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); 12516 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 12517 12518 ioc_warn(ioc, "hard reset: %s\n", 12519 (rc == 0) ? "success" : "failed"); 12520 12521 if (!rc) 12522 return PCI_ERS_RESULT_RECOVERED; 12523 else 12524 return PCI_ERS_RESULT_DISCONNECT; 12525 } 12526 12527 /** 12528 * scsih_pci_resume() - resume normal ops after PCI reset 12529 * @pdev: pointer to PCI device 12530 * 12531 * Called when the error recovery driver tells us that its 12532 * OK to resume normal operation. Use completion to allow 12533 * halted scsi ops to resume. 12534 */ 12535 static void 12536 scsih_pci_resume(struct pci_dev *pdev) 12537 { 12538 struct Scsi_Host *shost; 12539 struct MPT3SAS_ADAPTER *ioc; 12540 12541 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12542 return; 12543 12544 ioc_info(ioc, "PCI error: resume callback!!\n"); 12545 12546 mpt3sas_base_start_watchdog(ioc); 12547 scsi_unblock_requests(ioc->shost); 12548 } 12549 12550 /** 12551 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 12552 * @pdev: pointer to PCI device 12553 */ 12554 static pci_ers_result_t 12555 scsih_pci_mmio_enabled(struct pci_dev *pdev) 12556 { 12557 struct Scsi_Host *shost; 12558 struct MPT3SAS_ADAPTER *ioc; 12559 12560 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12561 return PCI_ERS_RESULT_DISCONNECT; 12562 12563 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 12564 12565 /* TODO - dump whatever for debugging purposes */ 12566 12567 /* This called only if scsih_pci_error_detected returns 12568 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 12569 * works, no need to reset slot. 12570 */ 12571 return PCI_ERS_RESULT_RECOVERED; 12572 } 12573 12574 /** 12575 * scsih_ncq_prio_supp - Check for NCQ command priority support 12576 * @sdev: scsi device struct 12577 * 12578 * This is called when a user indicates they would like to enable 12579 * ncq command priorities. This works only on SATA devices. 12580 */ 12581 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 12582 { 12583 struct scsi_vpd *vpd; 12584 bool ncq_prio_supp = false; 12585 12586 rcu_read_lock(); 12587 vpd = rcu_dereference(sdev->vpd_pg89); 12588 if (!vpd || vpd->len < 214) 12589 goto out; 12590 12591 ncq_prio_supp = (vpd->data[213] >> 4) & 1; 12592 out: 12593 rcu_read_unlock(); 12594 12595 return ncq_prio_supp; 12596 } 12597 /* 12598 * The pci device ids are defined in mpi/mpi2_cnfg.h. 12599 */ 12600 static const struct pci_device_id mpt3sas_pci_table[] = { 12601 /* Spitfire ~ 2004 */ 12602 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 12603 PCI_ANY_ID, PCI_ANY_ID }, 12604 /* Falcon ~ 2008 */ 12605 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 12606 PCI_ANY_ID, PCI_ANY_ID }, 12607 /* Liberator ~ 2108 */ 12608 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 12609 PCI_ANY_ID, PCI_ANY_ID }, 12610 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 12611 PCI_ANY_ID, PCI_ANY_ID }, 12612 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 12613 PCI_ANY_ID, PCI_ANY_ID }, 12614 /* Meteor ~ 2116 */ 12615 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 12616 PCI_ANY_ID, PCI_ANY_ID }, 12617 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 12618 PCI_ANY_ID, PCI_ANY_ID }, 12619 /* Thunderbolt ~ 2208 */ 12620 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 12621 PCI_ANY_ID, PCI_ANY_ID }, 12622 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 12623 PCI_ANY_ID, PCI_ANY_ID }, 12624 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 12625 PCI_ANY_ID, PCI_ANY_ID }, 12626 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 12627 PCI_ANY_ID, PCI_ANY_ID }, 12628 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 12629 PCI_ANY_ID, PCI_ANY_ID }, 12630 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 12631 PCI_ANY_ID, PCI_ANY_ID }, 12632 /* Mustang ~ 2308 */ 12633 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 12634 PCI_ANY_ID, PCI_ANY_ID }, 12635 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 12636 PCI_ANY_ID, PCI_ANY_ID }, 12637 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 12638 PCI_ANY_ID, PCI_ANY_ID }, 12639 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, 12640 PCI_ANY_ID, PCI_ANY_ID }, 12641 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, 12642 PCI_ANY_ID, PCI_ANY_ID }, 12643 /* SSS6200 */ 12644 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 12645 PCI_ANY_ID, PCI_ANY_ID }, 12646 /* Fury ~ 3004 and 3008 */ 12647 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 12648 PCI_ANY_ID, PCI_ANY_ID }, 12649 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 12650 PCI_ANY_ID, PCI_ANY_ID }, 12651 /* Invader ~ 3108 */ 12652 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 12653 PCI_ANY_ID, PCI_ANY_ID }, 12654 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 12655 PCI_ANY_ID, PCI_ANY_ID }, 12656 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 12657 PCI_ANY_ID, PCI_ANY_ID }, 12658 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 12659 PCI_ANY_ID, PCI_ANY_ID }, 12660 /* Cutlass ~ 3216 and 3224 */ 12661 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 12662 PCI_ANY_ID, PCI_ANY_ID }, 12663 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 12664 PCI_ANY_ID, PCI_ANY_ID }, 12665 /* Intruder ~ 3316 and 3324 */ 12666 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 12667 PCI_ANY_ID, PCI_ANY_ID }, 12668 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 12669 PCI_ANY_ID, PCI_ANY_ID }, 12670 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 12671 PCI_ANY_ID, PCI_ANY_ID }, 12672 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 12673 PCI_ANY_ID, PCI_ANY_ID }, 12674 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 12675 PCI_ANY_ID, PCI_ANY_ID }, 12676 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 12677 PCI_ANY_ID, PCI_ANY_ID }, 12678 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 12679 PCI_ANY_ID, PCI_ANY_ID }, 12680 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 12681 PCI_ANY_ID, PCI_ANY_ID }, 12682 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 12683 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 12684 PCI_ANY_ID, PCI_ANY_ID }, 12685 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 12686 PCI_ANY_ID, PCI_ANY_ID }, 12687 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 12688 PCI_ANY_ID, PCI_ANY_ID }, 12689 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 12690 PCI_ANY_ID, PCI_ANY_ID }, 12691 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 12692 PCI_ANY_ID, PCI_ANY_ID }, 12693 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 12694 PCI_ANY_ID, PCI_ANY_ID }, 12695 /* Mercator ~ 3616*/ 12696 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 12697 PCI_ANY_ID, PCI_ANY_ID }, 12698 12699 /* Aero SI 0x00E1 Configurable Secure 12700 * 0x00E2 Hard Secure 12701 */ 12702 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, 12703 PCI_ANY_ID, PCI_ANY_ID }, 12704 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, 12705 PCI_ANY_ID, PCI_ANY_ID }, 12706 12707 /* 12708 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered 12709 */ 12710 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, 12711 PCI_ANY_ID, PCI_ANY_ID }, 12712 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, 12713 PCI_ANY_ID, PCI_ANY_ID }, 12714 12715 /* Atlas PCIe Switch Management Port */ 12716 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, 12717 PCI_ANY_ID, PCI_ANY_ID }, 12718 12719 /* Sea SI 0x00E5 Configurable Secure 12720 * 0x00E6 Hard Secure 12721 */ 12722 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, 12723 PCI_ANY_ID, PCI_ANY_ID }, 12724 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12725 PCI_ANY_ID, PCI_ANY_ID }, 12726 12727 /* 12728 * ATTO Branded ExpressSAS H12xx GT 12729 */ 12730 { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12731 PCI_ANY_ID, PCI_ANY_ID }, 12732 12733 /* 12734 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered 12735 */ 12736 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, 12737 PCI_ANY_ID, PCI_ANY_ID }, 12738 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, 12739 PCI_ANY_ID, PCI_ANY_ID }, 12740 12741 {0} /* Terminating entry */ 12742 }; 12743 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 12744 12745 static struct pci_error_handlers _mpt3sas_err_handler = { 12746 .error_detected = scsih_pci_error_detected, 12747 .mmio_enabled = scsih_pci_mmio_enabled, 12748 .slot_reset = scsih_pci_slot_reset, 12749 .resume = scsih_pci_resume, 12750 }; 12751 12752 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); 12753 12754 static struct pci_driver mpt3sas_driver = { 12755 .name = MPT3SAS_DRIVER_NAME, 12756 .id_table = mpt3sas_pci_table, 12757 .probe = _scsih_probe, 12758 .remove = scsih_remove, 12759 .shutdown = scsih_shutdown, 12760 .err_handler = &_mpt3sas_err_handler, 12761 .driver.pm = &scsih_pm_ops, 12762 }; 12763 12764 /** 12765 * scsih_init - main entry point for this driver. 12766 * 12767 * Return: 0 success, anything else error. 12768 */ 12769 static int 12770 scsih_init(void) 12771 { 12772 mpt2_ids = 0; 12773 mpt3_ids = 0; 12774 12775 mpt3sas_base_initialize_callback_handler(); 12776 12777 /* queuecommand callback hander */ 12778 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 12779 12780 /* task management callback handler */ 12781 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 12782 12783 /* base internal commands callback handler */ 12784 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 12785 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 12786 mpt3sas_port_enable_done); 12787 12788 /* transport internal commands callback handler */ 12789 transport_cb_idx = mpt3sas_base_register_callback_handler( 12790 mpt3sas_transport_done); 12791 12792 /* scsih internal commands callback handler */ 12793 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 12794 12795 /* configuration page API internal commands callback handler */ 12796 config_cb_idx = mpt3sas_base_register_callback_handler( 12797 mpt3sas_config_done); 12798 12799 /* ctl module callback handler */ 12800 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 12801 12802 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 12803 _scsih_tm_tr_complete); 12804 12805 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 12806 _scsih_tm_volume_tr_complete); 12807 12808 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 12809 _scsih_sas_control_complete); 12810 12811 mpt3sas_init_debugfs(); 12812 return 0; 12813 } 12814 12815 /** 12816 * scsih_exit - exit point for this driver (when it is a module). 12817 * 12818 * Return: 0 success, anything else error. 12819 */ 12820 static void 12821 scsih_exit(void) 12822 { 12823 12824 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 12825 mpt3sas_base_release_callback_handler(tm_cb_idx); 12826 mpt3sas_base_release_callback_handler(base_cb_idx); 12827 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 12828 mpt3sas_base_release_callback_handler(transport_cb_idx); 12829 mpt3sas_base_release_callback_handler(scsih_cb_idx); 12830 mpt3sas_base_release_callback_handler(config_cb_idx); 12831 mpt3sas_base_release_callback_handler(ctl_cb_idx); 12832 12833 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 12834 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 12835 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 12836 12837 /* raid transport support */ 12838 if (hbas_to_enumerate != 1) 12839 raid_class_release(mpt3sas_raid_template); 12840 if (hbas_to_enumerate != 2) 12841 raid_class_release(mpt2sas_raid_template); 12842 sas_release_transport(mpt3sas_transport_template); 12843 mpt3sas_exit_debugfs(); 12844 } 12845 12846 /** 12847 * _mpt3sas_init - main entry point for this driver. 12848 * 12849 * Return: 0 success, anything else error. 12850 */ 12851 static int __init 12852 _mpt3sas_init(void) 12853 { 12854 int error; 12855 12856 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 12857 MPT3SAS_DRIVER_VERSION); 12858 12859 mpt3sas_transport_template = 12860 sas_attach_transport(&mpt3sas_transport_functions); 12861 if (!mpt3sas_transport_template) 12862 return -ENODEV; 12863 12864 /* No need attach mpt3sas raid functions template 12865 * if hbas_to_enumarate value is one. 12866 */ 12867 if (hbas_to_enumerate != 1) { 12868 mpt3sas_raid_template = 12869 raid_class_attach(&mpt3sas_raid_functions); 12870 if (!mpt3sas_raid_template) { 12871 sas_release_transport(mpt3sas_transport_template); 12872 return -ENODEV; 12873 } 12874 } 12875 12876 /* No need to attach mpt2sas raid functions template 12877 * if hbas_to_enumarate value is two 12878 */ 12879 if (hbas_to_enumerate != 2) { 12880 mpt2sas_raid_template = 12881 raid_class_attach(&mpt2sas_raid_functions); 12882 if (!mpt2sas_raid_template) { 12883 sas_release_transport(mpt3sas_transport_template); 12884 return -ENODEV; 12885 } 12886 } 12887 12888 error = scsih_init(); 12889 if (error) { 12890 scsih_exit(); 12891 return error; 12892 } 12893 12894 mpt3sas_ctl_init(hbas_to_enumerate); 12895 12896 error = pci_register_driver(&mpt3sas_driver); 12897 if (error) { 12898 mpt3sas_ctl_exit(hbas_to_enumerate); 12899 scsih_exit(); 12900 } 12901 12902 return error; 12903 } 12904 12905 /** 12906 * _mpt3sas_exit - exit point for this driver (when it is a module). 12907 * 12908 */ 12909 static void __exit 12910 _mpt3sas_exit(void) 12911 { 12912 pr_info("mpt3sas version %s unloading\n", 12913 MPT3SAS_DRIVER_VERSION); 12914 12915 pci_unregister_driver(&mpt3sas_driver); 12916 12917 mpt3sas_ctl_exit(hbas_to_enumerate); 12918 12919 scsih_exit(); 12920 } 12921 12922 module_init(_mpt3sas_init); 12923 module_exit(_mpt3sas_exit); 12924