1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/interrupt.h> 55 #include <linux/raid_class.h> 56 #include <linux/blk-mq-pci.h> 57 #include <asm/unaligned.h> 58 59 #include "mpt3sas_base.h" 60 61 #define RAID_CHANNEL 1 62 63 #define PCIE_CHANNEL 2 64 65 /* forward proto's */ 66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 67 struct _sas_node *sas_expander); 68 static void _firmware_event_work(struct work_struct *work); 69 70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 71 struct _sas_device *sas_device); 72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 73 u8 retry_count, u8 is_pd); 74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 76 struct _pcie_device *pcie_device); 77 static void 78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); 81 82 /* global parameters */ 83 LIST_HEAD(mpt3sas_ioc_list); 84 /* global ioc lock for list operations */ 85 DEFINE_SPINLOCK(gioc_lock); 86 87 MODULE_AUTHOR(MPT3SAS_AUTHOR); 88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 91 MODULE_ALIAS("mpt2sas"); 92 93 /* local parameters */ 94 static u8 scsi_io_cb_idx = -1; 95 static u8 tm_cb_idx = -1; 96 static u8 ctl_cb_idx = -1; 97 static u8 base_cb_idx = -1; 98 static u8 port_enable_cb_idx = -1; 99 static u8 transport_cb_idx = -1; 100 static u8 scsih_cb_idx = -1; 101 static u8 config_cb_idx = -1; 102 static int mpt2_ids; 103 static int mpt3_ids; 104 105 static u8 tm_tr_cb_idx = -1 ; 106 static u8 tm_tr_volume_cb_idx = -1 ; 107 static u8 tm_sas_control_cb_idx = -1; 108 109 /* command line options */ 110 static u32 logging_level; 111 MODULE_PARM_DESC(logging_level, 112 " bits for enabling additional logging info (default=0)"); 113 114 115 static ushort max_sectors = 0xFFFF; 116 module_param(max_sectors, ushort, 0444); 117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 118 119 120 static int missing_delay[2] = {-1, -1}; 121 module_param_array(missing_delay, int, NULL, 0444); 122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 123 124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 125 #define MPT3SAS_MAX_LUN (16895) 126 static u64 max_lun = MPT3SAS_MAX_LUN; 127 module_param(max_lun, ullong, 0444); 128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 129 130 static ushort hbas_to_enumerate; 131 module_param(hbas_to_enumerate, ushort, 0444); 132 MODULE_PARM_DESC(hbas_to_enumerate, 133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 134 1 - enumerates only SAS 2.0 generation HBAs\n \ 135 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 136 137 /* diag_buffer_enable is bitwise 138 * bit 0 set = TRACE 139 * bit 1 set = SNAPSHOT 140 * bit 2 set = EXTENDED 141 * 142 * Either bit can be set, or both 143 */ 144 static int diag_buffer_enable = -1; 145 module_param(diag_buffer_enable, int, 0444); 146 MODULE_PARM_DESC(diag_buffer_enable, 147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 148 static int disable_discovery = -1; 149 module_param(disable_discovery, int, 0444); 150 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 151 152 153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 154 static int prot_mask = -1; 155 module_param(prot_mask, int, 0444); 156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 157 158 static bool enable_sdev_max_qd; 159 module_param(enable_sdev_max_qd, bool, 0444); 160 MODULE_PARM_DESC(enable_sdev_max_qd, 161 "Enable sdev max qd as can_queue, def=disabled(0)"); 162 163 static int multipath_on_hba = -1; 164 module_param(multipath_on_hba, int, 0); 165 MODULE_PARM_DESC(multipath_on_hba, 166 "Multipath support to add same target device\n\t\t" 167 "as many times as it is visible to HBA from various paths\n\t\t" 168 "(by default:\n\t\t" 169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" 170 "\t SAS 3.5 HBA - This will be enabled)"); 171 172 static int host_tagset_enable = 1; 173 module_param(host_tagset_enable, int, 0444); 174 MODULE_PARM_DESC(host_tagset_enable, 175 "Shared host tagset enable/disable Default: enable(1)"); 176 177 /* raid transport support */ 178 static struct raid_template *mpt3sas_raid_template; 179 static struct raid_template *mpt2sas_raid_template; 180 181 182 /** 183 * struct sense_info - common structure for obtaining sense keys 184 * @skey: sense key 185 * @asc: additional sense code 186 * @ascq: additional sense code qualifier 187 */ 188 struct sense_info { 189 u8 skey; 190 u8 asc; 191 u8 ascq; 192 }; 193 194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 199 /** 200 * struct fw_event_work - firmware event struct 201 * @list: link list framework 202 * @work: work object (ioc->fault_reset_work_q) 203 * @ioc: per adapter object 204 * @device_handle: device handle 205 * @VF_ID: virtual function id 206 * @VP_ID: virtual port id 207 * @ignore: flag meaning this event has been marked to ignore 208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 209 * @refcount: kref for this event 210 * @event_data: reply event data payload follows 211 * 212 * This object stored on ioc->fw_event_list. 213 */ 214 struct fw_event_work { 215 struct list_head list; 216 struct work_struct work; 217 218 struct MPT3SAS_ADAPTER *ioc; 219 u16 device_handle; 220 u8 VF_ID; 221 u8 VP_ID; 222 u8 ignore; 223 u16 event; 224 struct kref refcount; 225 char event_data[] __aligned(4); 226 }; 227 228 static void fw_event_work_free(struct kref *r) 229 { 230 kfree(container_of(r, struct fw_event_work, refcount)); 231 } 232 233 static void fw_event_work_get(struct fw_event_work *fw_work) 234 { 235 kref_get(&fw_work->refcount); 236 } 237 238 static void fw_event_work_put(struct fw_event_work *fw_work) 239 { 240 kref_put(&fw_work->refcount, fw_event_work_free); 241 } 242 243 static struct fw_event_work *alloc_fw_event_work(int len) 244 { 245 struct fw_event_work *fw_event; 246 247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 248 if (!fw_event) 249 return NULL; 250 251 kref_init(&fw_event->refcount); 252 return fw_event; 253 } 254 255 /** 256 * struct _scsi_io_transfer - scsi io transfer 257 * @handle: sas device handle (assigned by firmware) 258 * @is_raid: flag set for hidden raid components 259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 260 * @data_length: data transfer length 261 * @data_dma: dma pointer to data 262 * @sense: sense data 263 * @lun: lun number 264 * @cdb_length: cdb length 265 * @cdb: cdb contents 266 * @timeout: timeout for this command 267 * @VF_ID: virtual function id 268 * @VP_ID: virtual port id 269 * @valid_reply: flag set for reply message 270 * @sense_length: sense length 271 * @ioc_status: ioc status 272 * @scsi_state: scsi state 273 * @scsi_status: scsi staus 274 * @log_info: log information 275 * @transfer_length: data length transfer when there is a reply message 276 * 277 * Used for sending internal scsi commands to devices within this module. 278 * Refer to _scsi_send_scsi_io(). 279 */ 280 struct _scsi_io_transfer { 281 u16 handle; 282 u8 is_raid; 283 enum dma_data_direction dir; 284 u32 data_length; 285 dma_addr_t data_dma; 286 u8 sense[SCSI_SENSE_BUFFERSIZE]; 287 u32 lun; 288 u8 cdb_length; 289 u8 cdb[32]; 290 u8 timeout; 291 u8 VF_ID; 292 u8 VP_ID; 293 u8 valid_reply; 294 /* the following bits are only valid when 'valid_reply = 1' */ 295 u32 sense_length; 296 u16 ioc_status; 297 u8 scsi_state; 298 u8 scsi_status; 299 u32 log_info; 300 u32 transfer_length; 301 }; 302 303 /** 304 * _scsih_set_debug_level - global setting of ioc->logging_level. 305 * @val: ? 306 * @kp: ? 307 * 308 * Note: The logging levels are defined in mpt3sas_debug.h. 309 */ 310 static int 311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 312 { 313 int ret = param_set_int(val, kp); 314 struct MPT3SAS_ADAPTER *ioc; 315 316 if (ret) 317 return ret; 318 319 pr_info("setting logging_level(0x%08x)\n", logging_level); 320 spin_lock(&gioc_lock); 321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 322 ioc->logging_level = logging_level; 323 spin_unlock(&gioc_lock); 324 return 0; 325 } 326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 327 &logging_level, 0644); 328 329 /** 330 * _scsih_srch_boot_sas_address - search based on sas_address 331 * @sas_address: sas address 332 * @boot_device: boot device object from bios page 2 333 * 334 * Return: 1 when there's a match, 0 means no match. 335 */ 336 static inline int 337 _scsih_srch_boot_sas_address(u64 sas_address, 338 Mpi2BootDeviceSasWwid_t *boot_device) 339 { 340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 341 } 342 343 /** 344 * _scsih_srch_boot_device_name - search based on device name 345 * @device_name: device name specified in INDENTIFY fram 346 * @boot_device: boot device object from bios page 2 347 * 348 * Return: 1 when there's a match, 0 means no match. 349 */ 350 static inline int 351 _scsih_srch_boot_device_name(u64 device_name, 352 Mpi2BootDeviceDeviceName_t *boot_device) 353 { 354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 355 } 356 357 /** 358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 359 * @enclosure_logical_id: enclosure logical id 360 * @slot_number: slot number 361 * @boot_device: boot device object from bios page 2 362 * 363 * Return: 1 when there's a match, 0 means no match. 364 */ 365 static inline int 366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 367 Mpi2BootDeviceEnclosureSlot_t *boot_device) 368 { 369 return (enclosure_logical_id == le64_to_cpu(boot_device-> 370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 371 SlotNumber)) ? 1 : 0; 372 } 373 374 /** 375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided 376 * port number from port list 377 * @ioc: per adapter object 378 * @port_id: port number 379 * @bypass_dirty_port_flag: when set look the matching hba port entry even 380 * if hba port entry is marked as dirty. 381 * 382 * Search for hba port entry corresponding to provided port number, 383 * if available return port object otherwise return NULL. 384 */ 385 struct hba_port * 386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, 387 u8 port_id, u8 bypass_dirty_port_flag) 388 { 389 struct hba_port *port, *port_next; 390 391 /* 392 * When multipath_on_hba is disabled then 393 * search the hba_port entry using default 394 * port id i.e. 255 395 */ 396 if (!ioc->multipath_on_hba) 397 port_id = MULTIPATH_DISABLED_PORT_ID; 398 399 list_for_each_entry_safe(port, port_next, 400 &ioc->port_table_list, list) { 401 if (port->port_id != port_id) 402 continue; 403 if (bypass_dirty_port_flag) 404 return port; 405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) 406 continue; 407 return port; 408 } 409 410 /* 411 * Allocate hba_port object for default port id (i.e. 255) 412 * when multipath_on_hba is disabled for the HBA. 413 * And add this object to port_table_list. 414 */ 415 if (!ioc->multipath_on_hba) { 416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); 417 if (!port) 418 return NULL; 419 420 port->port_id = port_id; 421 ioc_info(ioc, 422 "hba_port entry: %p, port: %d is added to hba_port list\n", 423 port, port->port_id); 424 list_add_tail(&port->list, 425 &ioc->port_table_list); 426 return port; 427 } 428 return NULL; 429 } 430 431 /** 432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number 433 * @ioc: per adapter object 434 * @port: hba_port object 435 * @phy: phy number 436 * 437 * Return virtual_phy object corresponding to phy number. 438 */ 439 struct virtual_phy * 440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, 441 struct hba_port *port, u32 phy) 442 { 443 struct virtual_phy *vphy, *vphy_next; 444 445 if (!port->vphys_mask) 446 return NULL; 447 448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { 449 if (vphy->phy_mask & (1 << phy)) 450 return vphy; 451 } 452 return NULL; 453 } 454 455 /** 456 * _scsih_is_boot_device - search for matching boot device. 457 * @sas_address: sas address 458 * @device_name: device name specified in INDENTIFY fram 459 * @enclosure_logical_id: enclosure logical id 460 * @slot: slot number 461 * @form: specifies boot device form 462 * @boot_device: boot device object from bios page 2 463 * 464 * Return: 1 when there's a match, 0 means no match. 465 */ 466 static int 467 _scsih_is_boot_device(u64 sas_address, u64 device_name, 468 u64 enclosure_logical_id, u16 slot, u8 form, 469 Mpi2BiosPage2BootDevice_t *boot_device) 470 { 471 int rc = 0; 472 473 switch (form) { 474 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 475 if (!sas_address) 476 break; 477 rc = _scsih_srch_boot_sas_address( 478 sas_address, &boot_device->SasWwid); 479 break; 480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 481 if (!enclosure_logical_id) 482 break; 483 rc = _scsih_srch_boot_encl_slot( 484 enclosure_logical_id, 485 slot, &boot_device->EnclosureSlot); 486 break; 487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 488 if (!device_name) 489 break; 490 rc = _scsih_srch_boot_device_name( 491 device_name, &boot_device->DeviceName); 492 break; 493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 494 break; 495 } 496 497 return rc; 498 } 499 500 /** 501 * _scsih_get_sas_address - set the sas_address for given device handle 502 * @ioc: ? 503 * @handle: device handle 504 * @sas_address: sas address 505 * 506 * Return: 0 success, non-zero when failure 507 */ 508 static int 509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 510 u64 *sas_address) 511 { 512 Mpi2SasDevicePage0_t sas_device_pg0; 513 Mpi2ConfigReply_t mpi_reply; 514 u32 ioc_status; 515 516 *sas_address = 0; 517 518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 520 ioc_err(ioc, "failure at %s:%d/%s()!\n", 521 __FILE__, __LINE__, __func__); 522 return -ENXIO; 523 } 524 525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 527 /* For HBA, vSES doesn't return HBA SAS address. Instead return 528 * vSES's sas address. 529 */ 530 if ((handle <= ioc->sas_hba.num_phys) && 531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 532 MPI2_SAS_DEVICE_INFO_SEP))) 533 *sas_address = ioc->sas_hba.sas_address; 534 else 535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 536 return 0; 537 } 538 539 /* we hit this because the given parent handle doesn't exist */ 540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 541 return -ENXIO; 542 543 /* else error case */ 544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 545 handle, ioc_status, __FILE__, __LINE__, __func__); 546 return -EIO; 547 } 548 549 /** 550 * _scsih_determine_boot_device - determine boot device. 551 * @ioc: per adapter object 552 * @device: sas_device or pcie_device object 553 * @channel: SAS or PCIe channel 554 * 555 * Determines whether this device should be first reported device to 556 * to scsi-ml or sas transport, this purpose is for persistent boot device. 557 * There are primary, alternate, and current entries in bios page 2. The order 558 * priority is primary, alternate, then current. This routine saves 559 * the corresponding device object. 560 * The saved data to be used later in _scsih_probe_boot_devices(). 561 */ 562 static void 563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 564 u32 channel) 565 { 566 struct _sas_device *sas_device; 567 struct _pcie_device *pcie_device; 568 struct _raid_device *raid_device; 569 u64 sas_address; 570 u64 device_name; 571 u64 enclosure_logical_id; 572 u16 slot; 573 574 /* only process this function when driver loads */ 575 if (!ioc->is_driver_loading) 576 return; 577 578 /* no Bios, return immediately */ 579 if (!ioc->bios_pg3.BiosVersion) 580 return; 581 582 if (channel == RAID_CHANNEL) { 583 raid_device = device; 584 sas_address = raid_device->wwid; 585 device_name = 0; 586 enclosure_logical_id = 0; 587 slot = 0; 588 } else if (channel == PCIE_CHANNEL) { 589 pcie_device = device; 590 sas_address = pcie_device->wwid; 591 device_name = 0; 592 enclosure_logical_id = 0; 593 slot = 0; 594 } else { 595 sas_device = device; 596 sas_address = sas_device->sas_address; 597 device_name = sas_device->device_name; 598 enclosure_logical_id = sas_device->enclosure_logical_id; 599 slot = sas_device->slot; 600 } 601 602 if (!ioc->req_boot_device.device) { 603 if (_scsih_is_boot_device(sas_address, device_name, 604 enclosure_logical_id, slot, 605 (ioc->bios_pg2.ReqBootDeviceForm & 606 MPI2_BIOSPAGE2_FORM_MASK), 607 &ioc->bios_pg2.RequestedBootDevice)) { 608 dinitprintk(ioc, 609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 610 __func__, (u64)sas_address)); 611 ioc->req_boot_device.device = device; 612 ioc->req_boot_device.channel = channel; 613 } 614 } 615 616 if (!ioc->req_alt_boot_device.device) { 617 if (_scsih_is_boot_device(sas_address, device_name, 618 enclosure_logical_id, slot, 619 (ioc->bios_pg2.ReqAltBootDeviceForm & 620 MPI2_BIOSPAGE2_FORM_MASK), 621 &ioc->bios_pg2.RequestedAltBootDevice)) { 622 dinitprintk(ioc, 623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 624 __func__, (u64)sas_address)); 625 ioc->req_alt_boot_device.device = device; 626 ioc->req_alt_boot_device.channel = channel; 627 } 628 } 629 630 if (!ioc->current_boot_device.device) { 631 if (_scsih_is_boot_device(sas_address, device_name, 632 enclosure_logical_id, slot, 633 (ioc->bios_pg2.CurrentBootDeviceForm & 634 MPI2_BIOSPAGE2_FORM_MASK), 635 &ioc->bios_pg2.CurrentBootDevice)) { 636 dinitprintk(ioc, 637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 638 __func__, (u64)sas_address)); 639 ioc->current_boot_device.device = device; 640 ioc->current_boot_device.channel = channel; 641 } 642 } 643 } 644 645 static struct _sas_device * 646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 647 struct MPT3SAS_TARGET *tgt_priv) 648 { 649 struct _sas_device *ret; 650 651 assert_spin_locked(&ioc->sas_device_lock); 652 653 ret = tgt_priv->sas_dev; 654 if (ret) 655 sas_device_get(ret); 656 657 return ret; 658 } 659 660 static struct _sas_device * 661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 662 struct MPT3SAS_TARGET *tgt_priv) 663 { 664 struct _sas_device *ret; 665 unsigned long flags; 666 667 spin_lock_irqsave(&ioc->sas_device_lock, flags); 668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 670 671 return ret; 672 } 673 674 static struct _pcie_device * 675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 676 struct MPT3SAS_TARGET *tgt_priv) 677 { 678 struct _pcie_device *ret; 679 680 assert_spin_locked(&ioc->pcie_device_lock); 681 682 ret = tgt_priv->pcie_dev; 683 if (ret) 684 pcie_device_get(ret); 685 686 return ret; 687 } 688 689 /** 690 * mpt3sas_get_pdev_from_target - pcie device search 691 * @ioc: per adapter object 692 * @tgt_priv: starget private object 693 * 694 * Context: This function will acquire ioc->pcie_device_lock and will release 695 * before returning the pcie_device object. 696 * 697 * This searches for pcie_device from target, then return pcie_device object. 698 */ 699 static struct _pcie_device * 700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 701 struct MPT3SAS_TARGET *tgt_priv) 702 { 703 struct _pcie_device *ret; 704 unsigned long flags; 705 706 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 709 710 return ret; 711 } 712 713 714 /** 715 * __mpt3sas_get_sdev_by_rphy - sas device search 716 * @ioc: per adapter object 717 * @rphy: sas_rphy pointer 718 * 719 * Context: This function will acquire ioc->sas_device_lock and will release 720 * before returning the sas_device object. 721 * 722 * This searches for sas_device from rphy object 723 * then return sas_device object. 724 */ 725 struct _sas_device * 726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, 727 struct sas_rphy *rphy) 728 { 729 struct _sas_device *sas_device; 730 731 assert_spin_locked(&ioc->sas_device_lock); 732 733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 734 if (sas_device->rphy != rphy) 735 continue; 736 sas_device_get(sas_device); 737 return sas_device; 738 } 739 740 sas_device = NULL; 741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 742 if (sas_device->rphy != rphy) 743 continue; 744 sas_device_get(sas_device); 745 return sas_device; 746 } 747 748 return NULL; 749 } 750 751 /** 752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided 753 * sas address from sas_device_list list 754 * @ioc: per adapter object 755 * @sas_address: device sas address 756 * @port: port number 757 * 758 * Search for _sas_device object corresponding to provided sas address, 759 * if available return _sas_device object address otherwise return NULL. 760 */ 761 struct _sas_device * 762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 763 u64 sas_address, struct hba_port *port) 764 { 765 struct _sas_device *sas_device; 766 767 if (!port) 768 return NULL; 769 770 assert_spin_locked(&ioc->sas_device_lock); 771 772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 773 if (sas_device->sas_address != sas_address) 774 continue; 775 if (sas_device->port != port) 776 continue; 777 sas_device_get(sas_device); 778 return sas_device; 779 } 780 781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 782 if (sas_device->sas_address != sas_address) 783 continue; 784 if (sas_device->port != port) 785 continue; 786 sas_device_get(sas_device); 787 return sas_device; 788 } 789 790 return NULL; 791 } 792 793 /** 794 * mpt3sas_get_sdev_by_addr - sas device search 795 * @ioc: per adapter object 796 * @sas_address: sas address 797 * @port: hba port entry 798 * Context: Calling function should acquire ioc->sas_device_lock 799 * 800 * This searches for sas_device based on sas_address & port number, 801 * then return sas_device object. 802 */ 803 struct _sas_device * 804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 805 u64 sas_address, struct hba_port *port) 806 { 807 struct _sas_device *sas_device; 808 unsigned long flags; 809 810 spin_lock_irqsave(&ioc->sas_device_lock, flags); 811 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 812 sas_address, port); 813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 814 815 return sas_device; 816 } 817 818 static struct _sas_device * 819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 820 { 821 struct _sas_device *sas_device; 822 823 assert_spin_locked(&ioc->sas_device_lock); 824 825 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 826 if (sas_device->handle == handle) 827 goto found_device; 828 829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 830 if (sas_device->handle == handle) 831 goto found_device; 832 833 return NULL; 834 835 found_device: 836 sas_device_get(sas_device); 837 return sas_device; 838 } 839 840 /** 841 * mpt3sas_get_sdev_by_handle - sas device search 842 * @ioc: per adapter object 843 * @handle: sas device handle (assigned by firmware) 844 * Context: Calling function should acquire ioc->sas_device_lock 845 * 846 * This searches for sas_device based on sas_address, then return sas_device 847 * object. 848 */ 849 struct _sas_device * 850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 851 { 852 struct _sas_device *sas_device; 853 unsigned long flags; 854 855 spin_lock_irqsave(&ioc->sas_device_lock, flags); 856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 858 859 return sas_device; 860 } 861 862 /** 863 * _scsih_display_enclosure_chassis_info - display device location info 864 * @ioc: per adapter object 865 * @sas_device: per sas device object 866 * @sdev: scsi device struct 867 * @starget: scsi target struct 868 */ 869 static void 870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 871 struct _sas_device *sas_device, struct scsi_device *sdev, 872 struct scsi_target *starget) 873 { 874 if (sdev) { 875 if (sas_device->enclosure_handle != 0) 876 sdev_printk(KERN_INFO, sdev, 877 "enclosure logical id (0x%016llx), slot(%d) \n", 878 (unsigned long long) 879 sas_device->enclosure_logical_id, 880 sas_device->slot); 881 if (sas_device->connector_name[0] != '\0') 882 sdev_printk(KERN_INFO, sdev, 883 "enclosure level(0x%04x), connector name( %s)\n", 884 sas_device->enclosure_level, 885 sas_device->connector_name); 886 if (sas_device->is_chassis_slot_valid) 887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 888 sas_device->chassis_slot); 889 } else if (starget) { 890 if (sas_device->enclosure_handle != 0) 891 starget_printk(KERN_INFO, starget, 892 "enclosure logical id(0x%016llx), slot(%d) \n", 893 (unsigned long long) 894 sas_device->enclosure_logical_id, 895 sas_device->slot); 896 if (sas_device->connector_name[0] != '\0') 897 starget_printk(KERN_INFO, starget, 898 "enclosure level(0x%04x), connector name( %s)\n", 899 sas_device->enclosure_level, 900 sas_device->connector_name); 901 if (sas_device->is_chassis_slot_valid) 902 starget_printk(KERN_INFO, starget, 903 "chassis slot(0x%04x)\n", 904 sas_device->chassis_slot); 905 } else { 906 if (sas_device->enclosure_handle != 0) 907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 908 (u64)sas_device->enclosure_logical_id, 909 sas_device->slot); 910 if (sas_device->connector_name[0] != '\0') 911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 912 sas_device->enclosure_level, 913 sas_device->connector_name); 914 if (sas_device->is_chassis_slot_valid) 915 ioc_info(ioc, "chassis slot(0x%04x)\n", 916 sas_device->chassis_slot); 917 } 918 } 919 920 /** 921 * _scsih_sas_device_remove - remove sas_device from list. 922 * @ioc: per adapter object 923 * @sas_device: the sas_device object 924 * Context: This function will acquire ioc->sas_device_lock. 925 * 926 * If sas_device is on the list, remove it and decrement its reference count. 927 */ 928 static void 929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 930 struct _sas_device *sas_device) 931 { 932 unsigned long flags; 933 934 if (!sas_device) 935 return; 936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 937 sas_device->handle, (u64)sas_device->sas_address); 938 939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 940 941 /* 942 * The lock serializes access to the list, but we still need to verify 943 * that nobody removed the entry while we were waiting on the lock. 944 */ 945 spin_lock_irqsave(&ioc->sas_device_lock, flags); 946 if (!list_empty(&sas_device->list)) { 947 list_del_init(&sas_device->list); 948 sas_device_put(sas_device); 949 } 950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 951 } 952 953 /** 954 * _scsih_device_remove_by_handle - removing device object by handle 955 * @ioc: per adapter object 956 * @handle: device handle 957 */ 958 static void 959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 960 { 961 struct _sas_device *sas_device; 962 unsigned long flags; 963 964 if (ioc->shost_recovery) 965 return; 966 967 spin_lock_irqsave(&ioc->sas_device_lock, flags); 968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 969 if (sas_device) { 970 list_del_init(&sas_device->list); 971 sas_device_put(sas_device); 972 } 973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 974 if (sas_device) { 975 _scsih_remove_device(ioc, sas_device); 976 sas_device_put(sas_device); 977 } 978 } 979 980 /** 981 * mpt3sas_device_remove_by_sas_address - removing device object by 982 * sas address & port number 983 * @ioc: per adapter object 984 * @sas_address: device sas_address 985 * @port: hba port entry 986 * 987 * Return nothing. 988 */ 989 void 990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 991 u64 sas_address, struct hba_port *port) 992 { 993 struct _sas_device *sas_device; 994 unsigned long flags; 995 996 if (ioc->shost_recovery) 997 return; 998 999 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); 1001 if (sas_device) { 1002 list_del_init(&sas_device->list); 1003 sas_device_put(sas_device); 1004 } 1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1006 if (sas_device) { 1007 _scsih_remove_device(ioc, sas_device); 1008 sas_device_put(sas_device); 1009 } 1010 } 1011 1012 /** 1013 * _scsih_sas_device_add - insert sas_device to the list. 1014 * @ioc: per adapter object 1015 * @sas_device: the sas_device object 1016 * Context: This function will acquire ioc->sas_device_lock. 1017 * 1018 * Adding new object to the ioc->sas_device_list. 1019 */ 1020 static void 1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 1022 struct _sas_device *sas_device) 1023 { 1024 unsigned long flags; 1025 1026 dewtprintk(ioc, 1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1028 __func__, sas_device->handle, 1029 (u64)sas_device->sas_address)); 1030 1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1032 NULL, NULL)); 1033 1034 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1035 sas_device_get(sas_device); 1036 list_add_tail(&sas_device->list, &ioc->sas_device_list); 1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1038 1039 if (ioc->hide_drives) { 1040 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1041 return; 1042 } 1043 1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 1045 sas_device->sas_address_parent, sas_device->port)) { 1046 _scsih_sas_device_remove(ioc, sas_device); 1047 } else if (!sas_device->starget) { 1048 /* 1049 * When asyn scanning is enabled, its not possible to remove 1050 * devices while scanning is turned on due to an oops in 1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 1052 */ 1053 if (!ioc->is_driver_loading) { 1054 mpt3sas_transport_port_remove(ioc, 1055 sas_device->sas_address, 1056 sas_device->sas_address_parent, 1057 sas_device->port); 1058 _scsih_sas_device_remove(ioc, sas_device); 1059 } 1060 } else 1061 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1062 } 1063 1064 /** 1065 * _scsih_sas_device_init_add - insert sas_device to the list. 1066 * @ioc: per adapter object 1067 * @sas_device: the sas_device object 1068 * Context: This function will acquire ioc->sas_device_lock. 1069 * 1070 * Adding new object at driver load time to the ioc->sas_device_init_list. 1071 */ 1072 static void 1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1074 struct _sas_device *sas_device) 1075 { 1076 unsigned long flags; 1077 1078 dewtprintk(ioc, 1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1080 __func__, sas_device->handle, 1081 (u64)sas_device->sas_address)); 1082 1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1084 NULL, NULL)); 1085 1086 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1087 sas_device_get(sas_device); 1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 1089 _scsih_determine_boot_device(ioc, sas_device, 0); 1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1091 } 1092 1093 1094 static struct _pcie_device * 1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1096 { 1097 struct _pcie_device *pcie_device; 1098 1099 assert_spin_locked(&ioc->pcie_device_lock); 1100 1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1102 if (pcie_device->wwid == wwid) 1103 goto found_device; 1104 1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1106 if (pcie_device->wwid == wwid) 1107 goto found_device; 1108 1109 return NULL; 1110 1111 found_device: 1112 pcie_device_get(pcie_device); 1113 return pcie_device; 1114 } 1115 1116 1117 /** 1118 * mpt3sas_get_pdev_by_wwid - pcie device search 1119 * @ioc: per adapter object 1120 * @wwid: wwid 1121 * 1122 * Context: This function will acquire ioc->pcie_device_lock and will release 1123 * before returning the pcie_device object. 1124 * 1125 * This searches for pcie_device based on wwid, then return pcie_device object. 1126 */ 1127 static struct _pcie_device * 1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1129 { 1130 struct _pcie_device *pcie_device; 1131 unsigned long flags; 1132 1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1136 1137 return pcie_device; 1138 } 1139 1140 1141 static struct _pcie_device * 1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 1143 int channel) 1144 { 1145 struct _pcie_device *pcie_device; 1146 1147 assert_spin_locked(&ioc->pcie_device_lock); 1148 1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1150 if (pcie_device->id == id && pcie_device->channel == channel) 1151 goto found_device; 1152 1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1154 if (pcie_device->id == id && pcie_device->channel == channel) 1155 goto found_device; 1156 1157 return NULL; 1158 1159 found_device: 1160 pcie_device_get(pcie_device); 1161 return pcie_device; 1162 } 1163 1164 static struct _pcie_device * 1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1166 { 1167 struct _pcie_device *pcie_device; 1168 1169 assert_spin_locked(&ioc->pcie_device_lock); 1170 1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1172 if (pcie_device->handle == handle) 1173 goto found_device; 1174 1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1176 if (pcie_device->handle == handle) 1177 goto found_device; 1178 1179 return NULL; 1180 1181 found_device: 1182 pcie_device_get(pcie_device); 1183 return pcie_device; 1184 } 1185 1186 1187 /** 1188 * mpt3sas_get_pdev_by_handle - pcie device search 1189 * @ioc: per adapter object 1190 * @handle: Firmware device handle 1191 * 1192 * Context: This function will acquire ioc->pcie_device_lock and will release 1193 * before returning the pcie_device object. 1194 * 1195 * This searches for pcie_device based on handle, then return pcie_device 1196 * object. 1197 */ 1198 struct _pcie_device * 1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1200 { 1201 struct _pcie_device *pcie_device; 1202 unsigned long flags; 1203 1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1207 1208 return pcie_device; 1209 } 1210 1211 /** 1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. 1213 * @ioc: per adapter object 1214 * Context: This function will acquire ioc->pcie_device_lock 1215 * 1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency 1217 * which has reported maximum among all available NVMe drives. 1218 * Minimum max_shutdown_latency will be six seconds. 1219 */ 1220 static void 1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) 1222 { 1223 struct _pcie_device *pcie_device; 1224 unsigned long flags; 1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 1226 1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1229 if (pcie_device->shutdown_latency) { 1230 if (shutdown_latency < pcie_device->shutdown_latency) 1231 shutdown_latency = 1232 pcie_device->shutdown_latency; 1233 } 1234 } 1235 ioc->max_shutdown_latency = shutdown_latency; 1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1237 } 1238 1239 /** 1240 * _scsih_pcie_device_remove - remove pcie_device from list. 1241 * @ioc: per adapter object 1242 * @pcie_device: the pcie_device object 1243 * Context: This function will acquire ioc->pcie_device_lock. 1244 * 1245 * If pcie_device is on the list, remove it and decrement its reference count. 1246 */ 1247 static void 1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1249 struct _pcie_device *pcie_device) 1250 { 1251 unsigned long flags; 1252 int was_on_pcie_device_list = 0; 1253 u8 update_latency = 0; 1254 1255 if (!pcie_device) 1256 return; 1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1258 pcie_device->handle, (u64)pcie_device->wwid); 1259 if (pcie_device->enclosure_handle != 0) 1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1261 (u64)pcie_device->enclosure_logical_id, 1262 pcie_device->slot); 1263 if (pcie_device->connector_name[0] != '\0') 1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1265 pcie_device->enclosure_level, 1266 pcie_device->connector_name); 1267 1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1269 if (!list_empty(&pcie_device->list)) { 1270 list_del_init(&pcie_device->list); 1271 was_on_pcie_device_list = 1; 1272 } 1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1274 update_latency = 1; 1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1276 if (was_on_pcie_device_list) { 1277 kfree(pcie_device->serial_number); 1278 pcie_device_put(pcie_device); 1279 } 1280 1281 /* 1282 * This device's RTD3 Entry Latency matches IOC's 1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1284 * from the available drives as current drive is getting removed. 1285 */ 1286 if (update_latency) 1287 _scsih_set_nvme_max_shutdown_latency(ioc); 1288 } 1289 1290 1291 /** 1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1293 * @ioc: per adapter object 1294 * @handle: device handle 1295 */ 1296 static void 1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1298 { 1299 struct _pcie_device *pcie_device; 1300 unsigned long flags; 1301 int was_on_pcie_device_list = 0; 1302 u8 update_latency = 0; 1303 1304 if (ioc->shost_recovery) 1305 return; 1306 1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1309 if (pcie_device) { 1310 if (!list_empty(&pcie_device->list)) { 1311 list_del_init(&pcie_device->list); 1312 was_on_pcie_device_list = 1; 1313 pcie_device_put(pcie_device); 1314 } 1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1316 update_latency = 1; 1317 } 1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1319 if (was_on_pcie_device_list) { 1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1321 pcie_device_put(pcie_device); 1322 } 1323 1324 /* 1325 * This device's RTD3 Entry Latency matches IOC's 1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1327 * from the available drives as current drive is getting removed. 1328 */ 1329 if (update_latency) 1330 _scsih_set_nvme_max_shutdown_latency(ioc); 1331 } 1332 1333 /** 1334 * _scsih_pcie_device_add - add pcie_device object 1335 * @ioc: per adapter object 1336 * @pcie_device: pcie_device object 1337 * 1338 * This is added to the pcie_device_list link list. 1339 */ 1340 static void 1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1342 struct _pcie_device *pcie_device) 1343 { 1344 unsigned long flags; 1345 1346 dewtprintk(ioc, 1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1348 __func__, 1349 pcie_device->handle, (u64)pcie_device->wwid)); 1350 if (pcie_device->enclosure_handle != 0) 1351 dewtprintk(ioc, 1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1353 __func__, 1354 (u64)pcie_device->enclosure_logical_id, 1355 pcie_device->slot)); 1356 if (pcie_device->connector_name[0] != '\0') 1357 dewtprintk(ioc, 1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1359 __func__, pcie_device->enclosure_level, 1360 pcie_device->connector_name)); 1361 1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1363 pcie_device_get(pcie_device); 1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1366 1367 if (pcie_device->access_status == 1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1370 return; 1371 } 1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1373 _scsih_pcie_device_remove(ioc, pcie_device); 1374 } else if (!pcie_device->starget) { 1375 if (!ioc->is_driver_loading) { 1376 /*TODO-- Need to find out whether this condition will occur or not*/ 1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1378 } 1379 } else 1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1381 } 1382 1383 /* 1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1385 * @ioc: per adapter object 1386 * @pcie_device: the pcie_device object 1387 * Context: This function will acquire ioc->pcie_device_lock. 1388 * 1389 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1390 */ 1391 static void 1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1393 struct _pcie_device *pcie_device) 1394 { 1395 unsigned long flags; 1396 1397 dewtprintk(ioc, 1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1399 __func__, 1400 pcie_device->handle, (u64)pcie_device->wwid)); 1401 if (pcie_device->enclosure_handle != 0) 1402 dewtprintk(ioc, 1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1404 __func__, 1405 (u64)pcie_device->enclosure_logical_id, 1406 pcie_device->slot)); 1407 if (pcie_device->connector_name[0] != '\0') 1408 dewtprintk(ioc, 1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1410 __func__, pcie_device->enclosure_level, 1411 pcie_device->connector_name)); 1412 1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1414 pcie_device_get(pcie_device); 1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1416 if (pcie_device->access_status != 1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) 1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1420 } 1421 /** 1422 * _scsih_raid_device_find_by_id - raid device search 1423 * @ioc: per adapter object 1424 * @id: sas device target id 1425 * @channel: sas device channel 1426 * Context: Calling function should acquire ioc->raid_device_lock 1427 * 1428 * This searches for raid_device based on target id, then return raid_device 1429 * object. 1430 */ 1431 static struct _raid_device * 1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1433 { 1434 struct _raid_device *raid_device, *r; 1435 1436 r = NULL; 1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1438 if (raid_device->id == id && raid_device->channel == channel) { 1439 r = raid_device; 1440 goto out; 1441 } 1442 } 1443 1444 out: 1445 return r; 1446 } 1447 1448 /** 1449 * mpt3sas_raid_device_find_by_handle - raid device search 1450 * @ioc: per adapter object 1451 * @handle: sas device handle (assigned by firmware) 1452 * Context: Calling function should acquire ioc->raid_device_lock 1453 * 1454 * This searches for raid_device based on handle, then return raid_device 1455 * object. 1456 */ 1457 struct _raid_device * 1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1459 { 1460 struct _raid_device *raid_device, *r; 1461 1462 r = NULL; 1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1464 if (raid_device->handle != handle) 1465 continue; 1466 r = raid_device; 1467 goto out; 1468 } 1469 1470 out: 1471 return r; 1472 } 1473 1474 /** 1475 * _scsih_raid_device_find_by_wwid - raid device search 1476 * @ioc: per adapter object 1477 * @wwid: ? 1478 * Context: Calling function should acquire ioc->raid_device_lock 1479 * 1480 * This searches for raid_device based on wwid, then return raid_device 1481 * object. 1482 */ 1483 static struct _raid_device * 1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1485 { 1486 struct _raid_device *raid_device, *r; 1487 1488 r = NULL; 1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1490 if (raid_device->wwid != wwid) 1491 continue; 1492 r = raid_device; 1493 goto out; 1494 } 1495 1496 out: 1497 return r; 1498 } 1499 1500 /** 1501 * _scsih_raid_device_add - add raid_device object 1502 * @ioc: per adapter object 1503 * @raid_device: raid_device object 1504 * 1505 * This is added to the raid_device_list link list. 1506 */ 1507 static void 1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1509 struct _raid_device *raid_device) 1510 { 1511 unsigned long flags; 1512 1513 dewtprintk(ioc, 1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1515 __func__, 1516 raid_device->handle, (u64)raid_device->wwid)); 1517 1518 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1519 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1521 } 1522 1523 /** 1524 * _scsih_raid_device_remove - delete raid_device object 1525 * @ioc: per adapter object 1526 * @raid_device: raid_device object 1527 * 1528 */ 1529 static void 1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1531 struct _raid_device *raid_device) 1532 { 1533 unsigned long flags; 1534 1535 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1536 list_del(&raid_device->list); 1537 kfree(raid_device); 1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1539 } 1540 1541 /** 1542 * mpt3sas_scsih_expander_find_by_handle - expander device search 1543 * @ioc: per adapter object 1544 * @handle: expander handle (assigned by firmware) 1545 * Context: Calling function should acquire ioc->sas_device_lock 1546 * 1547 * This searches for expander device based on handle, then returns the 1548 * sas_node object. 1549 */ 1550 struct _sas_node * 1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1552 { 1553 struct _sas_node *sas_expander, *r; 1554 1555 r = NULL; 1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1557 if (sas_expander->handle != handle) 1558 continue; 1559 r = sas_expander; 1560 goto out; 1561 } 1562 out: 1563 return r; 1564 } 1565 1566 /** 1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1568 * @ioc: per adapter object 1569 * @handle: enclosure handle (assigned by firmware) 1570 * Context: Calling function should acquire ioc->sas_device_lock 1571 * 1572 * This searches for enclosure device based on handle, then returns the 1573 * enclosure object. 1574 */ 1575 static struct _enclosure_node * 1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1577 { 1578 struct _enclosure_node *enclosure_dev, *r; 1579 1580 r = NULL; 1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1583 continue; 1584 r = enclosure_dev; 1585 goto out; 1586 } 1587 out: 1588 return r; 1589 } 1590 /** 1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1592 * @ioc: per adapter object 1593 * @sas_address: sas address 1594 * @port: hba port entry 1595 * Context: Calling function should acquire ioc->sas_node_lock. 1596 * 1597 * This searches for expander device based on sas_address & port number, 1598 * then returns the sas_node object. 1599 */ 1600 struct _sas_node * 1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1602 u64 sas_address, struct hba_port *port) 1603 { 1604 struct _sas_node *sas_expander, *r = NULL; 1605 1606 if (!port) 1607 return r; 1608 1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1610 if (sas_expander->sas_address != sas_address) 1611 continue; 1612 if (sas_expander->port != port) 1613 continue; 1614 r = sas_expander; 1615 goto out; 1616 } 1617 out: 1618 return r; 1619 } 1620 1621 /** 1622 * _scsih_expander_node_add - insert expander device to the list. 1623 * @ioc: per adapter object 1624 * @sas_expander: the sas_device object 1625 * Context: This function will acquire ioc->sas_node_lock. 1626 * 1627 * Adding new object to the ioc->sas_expander_list. 1628 */ 1629 static void 1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1631 struct _sas_node *sas_expander) 1632 { 1633 unsigned long flags; 1634 1635 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1638 } 1639 1640 /** 1641 * _scsih_is_end_device - determines if device is an end device 1642 * @device_info: bitfield providing information about the device. 1643 * Context: none 1644 * 1645 * Return: 1 if end device. 1646 */ 1647 static int 1648 _scsih_is_end_device(u32 device_info) 1649 { 1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1654 return 1; 1655 else 1656 return 0; 1657 } 1658 1659 /** 1660 * _scsih_is_nvme_pciescsi_device - determines if 1661 * device is an pcie nvme/scsi device 1662 * @device_info: bitfield providing information about the device. 1663 * Context: none 1664 * 1665 * Returns 1 if device is pcie device type nvme/scsi. 1666 */ 1667 static int 1668 _scsih_is_nvme_pciescsi_device(u32 device_info) 1669 { 1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1671 == MPI26_PCIE_DEVINFO_NVME) || 1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1673 == MPI26_PCIE_DEVINFO_SCSI)) 1674 return 1; 1675 else 1676 return 0; 1677 } 1678 1679 /** 1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id 1681 * @ioc: per adapter object 1682 * @id: target id 1683 * @channel: channel 1684 * Context: This function will acquire ioc->scsi_lookup_lock. 1685 * 1686 * This will search for a matching channel:id in the scsi_lookup array, 1687 * returning 1 if found. 1688 */ 1689 static u8 1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, 1691 int channel) 1692 { 1693 int smid; 1694 struct scsi_cmnd *scmd; 1695 1696 for (smid = 1; 1697 smid <= ioc->shost->can_queue; smid++) { 1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1699 if (!scmd) 1700 continue; 1701 if (scmd->device->id == id && 1702 scmd->device->channel == channel) 1703 return 1; 1704 } 1705 return 0; 1706 } 1707 1708 /** 1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun 1710 * @ioc: per adapter object 1711 * @id: target id 1712 * @lun: lun number 1713 * @channel: channel 1714 * Context: This function will acquire ioc->scsi_lookup_lock. 1715 * 1716 * This will search for a matching channel:id:lun in the scsi_lookup array, 1717 * returning 1 if found. 1718 */ 1719 static u8 1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, 1721 unsigned int lun, int channel) 1722 { 1723 int smid; 1724 struct scsi_cmnd *scmd; 1725 1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) { 1727 1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1729 if (!scmd) 1730 continue; 1731 if (scmd->device->id == id && 1732 scmd->device->channel == channel && 1733 scmd->device->lun == lun) 1734 return 1; 1735 } 1736 return 0; 1737 } 1738 1739 /** 1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1741 * @ioc: per adapter object 1742 * @smid: system request message index 1743 * 1744 * Return: the smid stored scmd pointer. 1745 * Then will dereference the stored scmd pointer. 1746 */ 1747 struct scsi_cmnd * 1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1749 { 1750 struct scsi_cmnd *scmd = NULL; 1751 struct scsiio_tracker *st; 1752 Mpi25SCSIIORequest_t *mpi_request; 1753 u16 tag = smid - 1; 1754 1755 if (smid > 0 && 1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1757 u32 unique_tag = 1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 1759 1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1761 1762 /* 1763 * If SCSI IO request is outstanding at driver level then 1764 * DevHandle filed must be non-zero. If DevHandle is zero 1765 * then it means that this smid is free at driver level, 1766 * so return NULL. 1767 */ 1768 if (!mpi_request->DevHandle) 1769 return scmd; 1770 1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1772 if (scmd) { 1773 st = scsi_cmd_priv(scmd); 1774 if (st->cb_idx == 0xFF || st->smid == 0) 1775 scmd = NULL; 1776 } 1777 } 1778 return scmd; 1779 } 1780 1781 /** 1782 * scsih_change_queue_depth - setting device queue depth 1783 * @sdev: scsi device struct 1784 * @qdepth: requested queue depth 1785 * 1786 * Return: queue depth. 1787 */ 1788 static int 1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1790 { 1791 struct Scsi_Host *shost = sdev->host; 1792 int max_depth; 1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1794 struct MPT3SAS_DEVICE *sas_device_priv_data; 1795 struct MPT3SAS_TARGET *sas_target_priv_data; 1796 struct _sas_device *sas_device; 1797 unsigned long flags; 1798 1799 max_depth = shost->can_queue; 1800 1801 /* 1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd 1803 * is disabled. 1804 */ 1805 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) 1806 goto not_sata; 1807 1808 sas_device_priv_data = sdev->hostdata; 1809 if (!sas_device_priv_data) 1810 goto not_sata; 1811 sas_target_priv_data = sas_device_priv_data->sas_target; 1812 if (!sas_target_priv_data) 1813 goto not_sata; 1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1815 goto not_sata; 1816 1817 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1819 if (sas_device) { 1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1822 1823 sas_device_put(sas_device); 1824 } 1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1826 1827 not_sata: 1828 1829 if (!sdev->tagged_supported) 1830 max_depth = 1; 1831 if (qdepth > max_depth) 1832 qdepth = max_depth; 1833 scsi_change_queue_depth(sdev, qdepth); 1834 sdev_printk(KERN_INFO, sdev, 1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", 1836 sdev->queue_depth, sdev->tagged_supported, 1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); 1838 return sdev->queue_depth; 1839 } 1840 1841 /** 1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth 1843 * @sdev: scsi device struct 1844 * @qdepth: requested queue depth 1845 * 1846 * Returns nothing. 1847 */ 1848 void 1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1850 { 1851 struct Scsi_Host *shost = sdev->host; 1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1853 1854 if (ioc->enable_sdev_max_qd) 1855 qdepth = shost->can_queue; 1856 1857 scsih_change_queue_depth(sdev, qdepth); 1858 } 1859 1860 /** 1861 * scsih_target_alloc - target add routine 1862 * @starget: scsi target struct 1863 * 1864 * Return: 0 if ok. Any other return is assumed to be an error and 1865 * the device is ignored. 1866 */ 1867 static int 1868 scsih_target_alloc(struct scsi_target *starget) 1869 { 1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1872 struct MPT3SAS_TARGET *sas_target_priv_data; 1873 struct _sas_device *sas_device; 1874 struct _raid_device *raid_device; 1875 struct _pcie_device *pcie_device; 1876 unsigned long flags; 1877 struct sas_rphy *rphy; 1878 1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1880 GFP_KERNEL); 1881 if (!sas_target_priv_data) 1882 return -ENOMEM; 1883 1884 starget->hostdata = sas_target_priv_data; 1885 sas_target_priv_data->starget = starget; 1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1887 1888 /* RAID volumes */ 1889 if (starget->channel == RAID_CHANNEL) { 1890 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1892 starget->channel); 1893 if (raid_device) { 1894 sas_target_priv_data->handle = raid_device->handle; 1895 sas_target_priv_data->sas_address = raid_device->wwid; 1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1897 if (ioc->is_warpdrive) 1898 sas_target_priv_data->raid_device = raid_device; 1899 raid_device->starget = starget; 1900 } 1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1902 return 0; 1903 } 1904 1905 /* PCIe devices */ 1906 if (starget->channel == PCIE_CHANNEL) { 1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1909 starget->channel); 1910 if (pcie_device) { 1911 sas_target_priv_data->handle = pcie_device->handle; 1912 sas_target_priv_data->sas_address = pcie_device->wwid; 1913 sas_target_priv_data->port = NULL; 1914 sas_target_priv_data->pcie_dev = pcie_device; 1915 pcie_device->starget = starget; 1916 pcie_device->id = starget->id; 1917 pcie_device->channel = starget->channel; 1918 sas_target_priv_data->flags |= 1919 MPT_TARGET_FLAGS_PCIE_DEVICE; 1920 if (pcie_device->fast_path) 1921 sas_target_priv_data->flags |= 1922 MPT_TARGET_FASTPATH_IO; 1923 } 1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1925 return 0; 1926 } 1927 1928 /* sas/sata devices */ 1929 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1930 rphy = dev_to_rphy(starget->dev.parent); 1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); 1932 1933 if (sas_device) { 1934 sas_target_priv_data->handle = sas_device->handle; 1935 sas_target_priv_data->sas_address = sas_device->sas_address; 1936 sas_target_priv_data->port = sas_device->port; 1937 sas_target_priv_data->sas_dev = sas_device; 1938 sas_device->starget = starget; 1939 sas_device->id = starget->id; 1940 sas_device->channel = starget->channel; 1941 if (test_bit(sas_device->handle, ioc->pd_handles)) 1942 sas_target_priv_data->flags |= 1943 MPT_TARGET_FLAGS_RAID_COMPONENT; 1944 if (sas_device->fast_path) 1945 sas_target_priv_data->flags |= 1946 MPT_TARGET_FASTPATH_IO; 1947 } 1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1949 1950 return 0; 1951 } 1952 1953 /** 1954 * scsih_target_destroy - target destroy routine 1955 * @starget: scsi target struct 1956 */ 1957 static void 1958 scsih_target_destroy(struct scsi_target *starget) 1959 { 1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1962 struct MPT3SAS_TARGET *sas_target_priv_data; 1963 struct _sas_device *sas_device; 1964 struct _raid_device *raid_device; 1965 struct _pcie_device *pcie_device; 1966 unsigned long flags; 1967 1968 sas_target_priv_data = starget->hostdata; 1969 if (!sas_target_priv_data) 1970 return; 1971 1972 if (starget->channel == RAID_CHANNEL) { 1973 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1975 starget->channel); 1976 if (raid_device) { 1977 raid_device->starget = NULL; 1978 raid_device->sdev = NULL; 1979 } 1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1981 goto out; 1982 } 1983 1984 if (starget->channel == PCIE_CHANNEL) { 1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1987 sas_target_priv_data); 1988 if (pcie_device && (pcie_device->starget == starget) && 1989 (pcie_device->id == starget->id) && 1990 (pcie_device->channel == starget->channel)) 1991 pcie_device->starget = NULL; 1992 1993 if (pcie_device) { 1994 /* 1995 * Corresponding get() is in _scsih_target_alloc() 1996 */ 1997 sas_target_priv_data->pcie_dev = NULL; 1998 pcie_device_put(pcie_device); 1999 pcie_device_put(pcie_device); 2000 } 2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2002 goto out; 2003 } 2004 2005 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 2007 if (sas_device && (sas_device->starget == starget) && 2008 (sas_device->id == starget->id) && 2009 (sas_device->channel == starget->channel)) 2010 sas_device->starget = NULL; 2011 2012 if (sas_device) { 2013 /* 2014 * Corresponding get() is in _scsih_target_alloc() 2015 */ 2016 sas_target_priv_data->sas_dev = NULL; 2017 sas_device_put(sas_device); 2018 2019 sas_device_put(sas_device); 2020 } 2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2022 2023 out: 2024 kfree(sas_target_priv_data); 2025 starget->hostdata = NULL; 2026 } 2027 2028 /** 2029 * scsih_slave_alloc - device add routine 2030 * @sdev: scsi device struct 2031 * 2032 * Return: 0 if ok. Any other return is assumed to be an error and 2033 * the device is ignored. 2034 */ 2035 static int 2036 scsih_slave_alloc(struct scsi_device *sdev) 2037 { 2038 struct Scsi_Host *shost; 2039 struct MPT3SAS_ADAPTER *ioc; 2040 struct MPT3SAS_TARGET *sas_target_priv_data; 2041 struct MPT3SAS_DEVICE *sas_device_priv_data; 2042 struct scsi_target *starget; 2043 struct _raid_device *raid_device; 2044 struct _sas_device *sas_device; 2045 struct _pcie_device *pcie_device; 2046 unsigned long flags; 2047 2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 2049 GFP_KERNEL); 2050 if (!sas_device_priv_data) 2051 return -ENOMEM; 2052 2053 sas_device_priv_data->lun = sdev->lun; 2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 2055 2056 starget = scsi_target(sdev); 2057 sas_target_priv_data = starget->hostdata; 2058 sas_target_priv_data->num_luns++; 2059 sas_device_priv_data->sas_target = sas_target_priv_data; 2060 sdev->hostdata = sas_device_priv_data; 2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 2062 sdev->no_uld_attach = 1; 2063 2064 shost = dev_to_shost(&starget->dev); 2065 ioc = shost_priv(shost); 2066 if (starget->channel == RAID_CHANNEL) { 2067 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2068 raid_device = _scsih_raid_device_find_by_id(ioc, 2069 starget->id, starget->channel); 2070 if (raid_device) 2071 raid_device->sdev = sdev; /* raid is single lun */ 2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2073 } 2074 if (starget->channel == PCIE_CHANNEL) { 2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2077 sas_target_priv_data->sas_address); 2078 if (pcie_device && (pcie_device->starget == NULL)) { 2079 sdev_printk(KERN_INFO, sdev, 2080 "%s : pcie_device->starget set to starget @ %d\n", 2081 __func__, __LINE__); 2082 pcie_device->starget = starget; 2083 } 2084 2085 if (pcie_device) 2086 pcie_device_put(pcie_device); 2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2088 2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2090 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2092 sas_target_priv_data->sas_address, 2093 sas_target_priv_data->port); 2094 if (sas_device && (sas_device->starget == NULL)) { 2095 sdev_printk(KERN_INFO, sdev, 2096 "%s : sas_device->starget set to starget @ %d\n", 2097 __func__, __LINE__); 2098 sas_device->starget = starget; 2099 } 2100 2101 if (sas_device) 2102 sas_device_put(sas_device); 2103 2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2105 } 2106 2107 return 0; 2108 } 2109 2110 /** 2111 * scsih_slave_destroy - device destroy routine 2112 * @sdev: scsi device struct 2113 */ 2114 static void 2115 scsih_slave_destroy(struct scsi_device *sdev) 2116 { 2117 struct MPT3SAS_TARGET *sas_target_priv_data; 2118 struct scsi_target *starget; 2119 struct Scsi_Host *shost; 2120 struct MPT3SAS_ADAPTER *ioc; 2121 struct _sas_device *sas_device; 2122 struct _pcie_device *pcie_device; 2123 unsigned long flags; 2124 2125 if (!sdev->hostdata) 2126 return; 2127 2128 starget = scsi_target(sdev); 2129 sas_target_priv_data = starget->hostdata; 2130 sas_target_priv_data->num_luns--; 2131 2132 shost = dev_to_shost(&starget->dev); 2133 ioc = shost_priv(shost); 2134 2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 2138 sas_target_priv_data); 2139 if (pcie_device && !sas_target_priv_data->num_luns) 2140 pcie_device->starget = NULL; 2141 2142 if (pcie_device) 2143 pcie_device_put(pcie_device); 2144 2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2146 2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2148 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2149 sas_device = __mpt3sas_get_sdev_from_target(ioc, 2150 sas_target_priv_data); 2151 if (sas_device && !sas_target_priv_data->num_luns) 2152 sas_device->starget = NULL; 2153 2154 if (sas_device) 2155 sas_device_put(sas_device); 2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2157 } 2158 2159 kfree(sdev->hostdata); 2160 sdev->hostdata = NULL; 2161 } 2162 2163 /** 2164 * _scsih_display_sata_capabilities - sata capabilities 2165 * @ioc: per adapter object 2166 * @handle: device handle 2167 * @sdev: scsi device struct 2168 */ 2169 static void 2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 2171 u16 handle, struct scsi_device *sdev) 2172 { 2173 Mpi2ConfigReply_t mpi_reply; 2174 Mpi2SasDevicePage0_t sas_device_pg0; 2175 u32 ioc_status; 2176 u16 flags; 2177 u32 device_info; 2178 2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 2181 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2182 __FILE__, __LINE__, __func__); 2183 return; 2184 } 2185 2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2187 MPI2_IOCSTATUS_MASK; 2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2189 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2190 __FILE__, __LINE__, __func__); 2191 return; 2192 } 2193 2194 flags = le16_to_cpu(sas_device_pg0.Flags); 2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 2196 2197 sdev_printk(KERN_INFO, sdev, 2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 2199 "sw_preserve(%s)\n", 2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 2203 "n", 2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 2207 } 2208 2209 /* 2210 * raid transport support - 2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when 2212 * unloading the driver followed by a load - I believe that the subroutine 2213 * raid_class_release() is not cleaning up properly. 2214 */ 2215 2216 /** 2217 * scsih_is_raid - return boolean indicating device is raid volume 2218 * @dev: the device struct object 2219 */ 2220 static int 2221 scsih_is_raid(struct device *dev) 2222 { 2223 struct scsi_device *sdev = to_scsi_device(dev); 2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2225 2226 if (ioc->is_warpdrive) 2227 return 0; 2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2229 } 2230 2231 static int 2232 scsih_is_nvme(struct device *dev) 2233 { 2234 struct scsi_device *sdev = to_scsi_device(dev); 2235 2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 2237 } 2238 2239 /** 2240 * scsih_get_resync - get raid volume resync percent complete 2241 * @dev: the device struct object 2242 */ 2243 static void 2244 scsih_get_resync(struct device *dev) 2245 { 2246 struct scsi_device *sdev = to_scsi_device(dev); 2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2248 static struct _raid_device *raid_device; 2249 unsigned long flags; 2250 Mpi2RaidVolPage0_t vol_pg0; 2251 Mpi2ConfigReply_t mpi_reply; 2252 u32 volume_status_flags; 2253 u8 percent_complete; 2254 u16 handle; 2255 2256 percent_complete = 0; 2257 handle = 0; 2258 if (ioc->is_warpdrive) 2259 goto out; 2260 2261 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2263 sdev->channel); 2264 if (raid_device) { 2265 handle = raid_device->handle; 2266 percent_complete = raid_device->percent_complete; 2267 } 2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2269 2270 if (!handle) 2271 goto out; 2272 2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2275 sizeof(Mpi2RaidVolPage0_t))) { 2276 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2277 __FILE__, __LINE__, __func__); 2278 percent_complete = 0; 2279 goto out; 2280 } 2281 2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2283 if (!(volume_status_flags & 2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 2285 percent_complete = 0; 2286 2287 out: 2288 2289 switch (ioc->hba_mpi_version_belonged) { 2290 case MPI2_VERSION: 2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 2292 break; 2293 case MPI25_VERSION: 2294 case MPI26_VERSION: 2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 2296 break; 2297 } 2298 } 2299 2300 /** 2301 * scsih_get_state - get raid volume level 2302 * @dev: the device struct object 2303 */ 2304 static void 2305 scsih_get_state(struct device *dev) 2306 { 2307 struct scsi_device *sdev = to_scsi_device(dev); 2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2309 static struct _raid_device *raid_device; 2310 unsigned long flags; 2311 Mpi2RaidVolPage0_t vol_pg0; 2312 Mpi2ConfigReply_t mpi_reply; 2313 u32 volstate; 2314 enum raid_state state = RAID_STATE_UNKNOWN; 2315 u16 handle = 0; 2316 2317 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2319 sdev->channel); 2320 if (raid_device) 2321 handle = raid_device->handle; 2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2323 2324 if (!raid_device) 2325 goto out; 2326 2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2329 sizeof(Mpi2RaidVolPage0_t))) { 2330 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2331 __FILE__, __LINE__, __func__); 2332 goto out; 2333 } 2334 2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2337 state = RAID_STATE_RESYNCING; 2338 goto out; 2339 } 2340 2341 switch (vol_pg0.VolumeState) { 2342 case MPI2_RAID_VOL_STATE_OPTIMAL: 2343 case MPI2_RAID_VOL_STATE_ONLINE: 2344 state = RAID_STATE_ACTIVE; 2345 break; 2346 case MPI2_RAID_VOL_STATE_DEGRADED: 2347 state = RAID_STATE_DEGRADED; 2348 break; 2349 case MPI2_RAID_VOL_STATE_FAILED: 2350 case MPI2_RAID_VOL_STATE_MISSING: 2351 state = RAID_STATE_OFFLINE; 2352 break; 2353 } 2354 out: 2355 switch (ioc->hba_mpi_version_belonged) { 2356 case MPI2_VERSION: 2357 raid_set_state(mpt2sas_raid_template, dev, state); 2358 break; 2359 case MPI25_VERSION: 2360 case MPI26_VERSION: 2361 raid_set_state(mpt3sas_raid_template, dev, state); 2362 break; 2363 } 2364 } 2365 2366 /** 2367 * _scsih_set_level - set raid level 2368 * @ioc: ? 2369 * @sdev: scsi device struct 2370 * @volume_type: volume type 2371 */ 2372 static void 2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2374 struct scsi_device *sdev, u8 volume_type) 2375 { 2376 enum raid_level level = RAID_LEVEL_UNKNOWN; 2377 2378 switch (volume_type) { 2379 case MPI2_RAID_VOL_TYPE_RAID0: 2380 level = RAID_LEVEL_0; 2381 break; 2382 case MPI2_RAID_VOL_TYPE_RAID10: 2383 level = RAID_LEVEL_10; 2384 break; 2385 case MPI2_RAID_VOL_TYPE_RAID1E: 2386 level = RAID_LEVEL_1E; 2387 break; 2388 case MPI2_RAID_VOL_TYPE_RAID1: 2389 level = RAID_LEVEL_1; 2390 break; 2391 } 2392 2393 switch (ioc->hba_mpi_version_belonged) { 2394 case MPI2_VERSION: 2395 raid_set_level(mpt2sas_raid_template, 2396 &sdev->sdev_gendev, level); 2397 break; 2398 case MPI25_VERSION: 2399 case MPI26_VERSION: 2400 raid_set_level(mpt3sas_raid_template, 2401 &sdev->sdev_gendev, level); 2402 break; 2403 } 2404 } 2405 2406 2407 /** 2408 * _scsih_get_volume_capabilities - volume capabilities 2409 * @ioc: per adapter object 2410 * @raid_device: the raid_device object 2411 * 2412 * Return: 0 for success, else 1 2413 */ 2414 static int 2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2416 struct _raid_device *raid_device) 2417 { 2418 Mpi2RaidVolPage0_t *vol_pg0; 2419 Mpi2RaidPhysDiskPage0_t pd_pg0; 2420 Mpi2SasDevicePage0_t sas_device_pg0; 2421 Mpi2ConfigReply_t mpi_reply; 2422 u16 sz; 2423 u8 num_pds; 2424 2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2426 &num_pds)) || !num_pds) { 2427 dfailprintk(ioc, 2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2429 __FILE__, __LINE__, __func__)); 2430 return 1; 2431 } 2432 2433 raid_device->num_pds = num_pds; 2434 sz = struct_size(vol_pg0, PhysDisk, num_pds); 2435 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2436 if (!vol_pg0) { 2437 dfailprintk(ioc, 2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2439 __FILE__, __LINE__, __func__)); 2440 return 1; 2441 } 2442 2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2445 dfailprintk(ioc, 2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2447 __FILE__, __LINE__, __func__)); 2448 kfree(vol_pg0); 2449 return 1; 2450 } 2451 2452 raid_device->volume_type = vol_pg0->VolumeType; 2453 2454 /* figure out what the underlying devices are by 2455 * obtaining the device_info bits for the 1st device 2456 */ 2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2459 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2462 le16_to_cpu(pd_pg0.DevHandle)))) { 2463 raid_device->device_info = 2464 le32_to_cpu(sas_device_pg0.DeviceInfo); 2465 } 2466 } 2467 2468 kfree(vol_pg0); 2469 return 0; 2470 } 2471 2472 /** 2473 * _scsih_enable_tlr - setting TLR flags 2474 * @ioc: per adapter object 2475 * @sdev: scsi device struct 2476 * 2477 * Enabling Transaction Layer Retries for tape devices when 2478 * vpd page 0x90 is present 2479 * 2480 */ 2481 static void 2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2483 { 2484 2485 /* only for TAPE */ 2486 if (sdev->type != TYPE_TAPE) 2487 return; 2488 2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2490 return; 2491 2492 sas_enable_tlr(sdev); 2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2495 return; 2496 2497 } 2498 2499 /** 2500 * scsih_slave_configure - device configure routine. 2501 * @sdev: scsi device struct 2502 * 2503 * Return: 0 if ok. Any other return is assumed to be an error and 2504 * the device is ignored. 2505 */ 2506 static int 2507 scsih_slave_configure(struct scsi_device *sdev) 2508 { 2509 struct Scsi_Host *shost = sdev->host; 2510 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2511 struct MPT3SAS_DEVICE *sas_device_priv_data; 2512 struct MPT3SAS_TARGET *sas_target_priv_data; 2513 struct _sas_device *sas_device; 2514 struct _pcie_device *pcie_device; 2515 struct _raid_device *raid_device; 2516 unsigned long flags; 2517 int qdepth; 2518 u8 ssp_target = 0; 2519 char *ds = ""; 2520 char *r_level = ""; 2521 u16 handle, volume_handle = 0; 2522 u64 volume_wwid = 0; 2523 2524 qdepth = 1; 2525 sas_device_priv_data = sdev->hostdata; 2526 sas_device_priv_data->configured_lun = 1; 2527 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2528 sas_target_priv_data = sas_device_priv_data->sas_target; 2529 handle = sas_target_priv_data->handle; 2530 2531 /* raid volume handling */ 2532 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2533 2534 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2535 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2536 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2537 if (!raid_device) { 2538 dfailprintk(ioc, 2539 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2540 __FILE__, __LINE__, __func__)); 2541 return 1; 2542 } 2543 2544 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2545 dfailprintk(ioc, 2546 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2547 __FILE__, __LINE__, __func__)); 2548 return 1; 2549 } 2550 2551 /* 2552 * WARPDRIVE: Initialize the required data for Direct IO 2553 */ 2554 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2555 2556 /* RAID Queue Depth Support 2557 * IS volume = underlying qdepth of drive type, either 2558 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2559 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2560 */ 2561 if (raid_device->device_info & 2562 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2563 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2564 ds = "SSP"; 2565 } else { 2566 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2567 if (raid_device->device_info & 2568 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2569 ds = "SATA"; 2570 else 2571 ds = "STP"; 2572 } 2573 2574 switch (raid_device->volume_type) { 2575 case MPI2_RAID_VOL_TYPE_RAID0: 2576 r_level = "RAID0"; 2577 break; 2578 case MPI2_RAID_VOL_TYPE_RAID1E: 2579 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2580 if (ioc->manu_pg10.OEMIdentifier && 2581 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2582 MFG10_GF0_R10_DISPLAY) && 2583 !(raid_device->num_pds % 2)) 2584 r_level = "RAID10"; 2585 else 2586 r_level = "RAID1E"; 2587 break; 2588 case MPI2_RAID_VOL_TYPE_RAID1: 2589 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2590 r_level = "RAID1"; 2591 break; 2592 case MPI2_RAID_VOL_TYPE_RAID10: 2593 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2594 r_level = "RAID10"; 2595 break; 2596 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2597 default: 2598 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2599 r_level = "RAIDX"; 2600 break; 2601 } 2602 2603 if (!ioc->hide_ir_msg) 2604 sdev_printk(KERN_INFO, sdev, 2605 "%s: handle(0x%04x), wwid(0x%016llx)," 2606 " pd_count(%d), type(%s)\n", 2607 r_level, raid_device->handle, 2608 (unsigned long long)raid_device->wwid, 2609 raid_device->num_pds, ds); 2610 2611 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2612 blk_queue_max_hw_sectors(sdev->request_queue, 2613 MPT3SAS_RAID_MAX_SECTORS); 2614 sdev_printk(KERN_INFO, sdev, 2615 "Set queue's max_sector to: %u\n", 2616 MPT3SAS_RAID_MAX_SECTORS); 2617 } 2618 2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2620 2621 /* raid transport support */ 2622 if (!ioc->is_warpdrive) 2623 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2624 return 0; 2625 } 2626 2627 /* non-raid handling */ 2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2629 if (mpt3sas_config_get_volume_handle(ioc, handle, 2630 &volume_handle)) { 2631 dfailprintk(ioc, 2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2633 __FILE__, __LINE__, __func__)); 2634 return 1; 2635 } 2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2637 volume_handle, &volume_wwid)) { 2638 dfailprintk(ioc, 2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2640 __FILE__, __LINE__, __func__)); 2641 return 1; 2642 } 2643 } 2644 2645 /* PCIe handling */ 2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2649 sas_device_priv_data->sas_target->sas_address); 2650 if (!pcie_device) { 2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2652 dfailprintk(ioc, 2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2654 __FILE__, __LINE__, __func__)); 2655 return 1; 2656 } 2657 2658 qdepth = ioc->max_nvme_qd; 2659 ds = "NVMe"; 2660 sdev_printk(KERN_INFO, sdev, 2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2662 ds, handle, (unsigned long long)pcie_device->wwid, 2663 pcie_device->port_num); 2664 if (pcie_device->enclosure_handle != 0) 2665 sdev_printk(KERN_INFO, sdev, 2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2667 ds, 2668 (unsigned long long)pcie_device->enclosure_logical_id, 2669 pcie_device->slot); 2670 if (pcie_device->connector_name[0] != '\0') 2671 sdev_printk(KERN_INFO, sdev, 2672 "%s: enclosure level(0x%04x)," 2673 "connector name( %s)\n", ds, 2674 pcie_device->enclosure_level, 2675 pcie_device->connector_name); 2676 2677 if (pcie_device->nvme_mdts) 2678 blk_queue_max_hw_sectors(sdev->request_queue, 2679 pcie_device->nvme_mdts/512); 2680 2681 pcie_device_put(pcie_device); 2682 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2683 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2684 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2685 ** merged and can eliminate holes created during merging 2686 ** operation. 2687 **/ 2688 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2689 sdev->request_queue); 2690 blk_queue_virt_boundary(sdev->request_queue, 2691 ioc->page_size - 1); 2692 return 0; 2693 } 2694 2695 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2696 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2697 sas_device_priv_data->sas_target->sas_address, 2698 sas_device_priv_data->sas_target->port); 2699 if (!sas_device) { 2700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2701 dfailprintk(ioc, 2702 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2703 __FILE__, __LINE__, __func__)); 2704 return 1; 2705 } 2706 2707 sas_device->volume_handle = volume_handle; 2708 sas_device->volume_wwid = volume_wwid; 2709 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2710 qdepth = (sas_device->port_type > 1) ? 2711 ioc->max_wideport_qd : ioc->max_narrowport_qd; 2712 ssp_target = 1; 2713 if (sas_device->device_info & 2714 MPI2_SAS_DEVICE_INFO_SEP) { 2715 sdev_printk(KERN_WARNING, sdev, 2716 "set ignore_delay_remove for handle(0x%04x)\n", 2717 sas_device_priv_data->sas_target->handle); 2718 sas_device_priv_data->ignore_delay_remove = 1; 2719 ds = "SES"; 2720 } else 2721 ds = "SSP"; 2722 } else { 2723 qdepth = ioc->max_sata_qd; 2724 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2725 ds = "STP"; 2726 else if (sas_device->device_info & 2727 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2728 ds = "SATA"; 2729 } 2730 2731 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2732 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2733 ds, handle, (unsigned long long)sas_device->sas_address, 2734 sas_device->phy, (unsigned long long)sas_device->device_name); 2735 2736 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2737 2738 sas_device_put(sas_device); 2739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2740 2741 if (!ssp_target) 2742 _scsih_display_sata_capabilities(ioc, handle, sdev); 2743 2744 2745 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2746 2747 if (ssp_target) { 2748 sas_read_port_mode_page(sdev); 2749 _scsih_enable_tlr(ioc, sdev); 2750 } 2751 2752 return 0; 2753 } 2754 2755 /** 2756 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2757 * @sdev: scsi device struct 2758 * @bdev: pointer to block device context 2759 * @capacity: device size (in 512 byte sectors) 2760 * @params: three element array to place output: 2761 * params[0] number of heads (max 255) 2762 * params[1] number of sectors (max 63) 2763 * params[2] number of cylinders 2764 */ 2765 static int 2766 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2767 sector_t capacity, int params[]) 2768 { 2769 int heads; 2770 int sectors; 2771 sector_t cylinders; 2772 ulong dummy; 2773 2774 heads = 64; 2775 sectors = 32; 2776 2777 dummy = heads * sectors; 2778 cylinders = capacity; 2779 sector_div(cylinders, dummy); 2780 2781 /* 2782 * Handle extended translation size for logical drives 2783 * > 1Gb 2784 */ 2785 if ((ulong)capacity >= 0x200000) { 2786 heads = 255; 2787 sectors = 63; 2788 dummy = heads * sectors; 2789 cylinders = capacity; 2790 sector_div(cylinders, dummy); 2791 } 2792 2793 /* return result */ 2794 params[0] = heads; 2795 params[1] = sectors; 2796 params[2] = cylinders; 2797 2798 return 0; 2799 } 2800 2801 /** 2802 * _scsih_response_code - translation of device response code 2803 * @ioc: per adapter object 2804 * @response_code: response code returned by the device 2805 */ 2806 static void 2807 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2808 { 2809 char *desc; 2810 2811 switch (response_code) { 2812 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2813 desc = "task management request completed"; 2814 break; 2815 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2816 desc = "invalid frame"; 2817 break; 2818 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2819 desc = "task management request not supported"; 2820 break; 2821 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2822 desc = "task management request failed"; 2823 break; 2824 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2825 desc = "task management request succeeded"; 2826 break; 2827 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2828 desc = "invalid lun"; 2829 break; 2830 case 0xA: 2831 desc = "overlapped tag attempted"; 2832 break; 2833 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2834 desc = "task queued, however not sent to target"; 2835 break; 2836 default: 2837 desc = "unknown"; 2838 break; 2839 } 2840 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2841 } 2842 2843 /** 2844 * _scsih_tm_done - tm completion routine 2845 * @ioc: per adapter object 2846 * @smid: system request message index 2847 * @msix_index: MSIX table index supplied by the OS 2848 * @reply: reply message frame(lower 32bit addr) 2849 * Context: none. 2850 * 2851 * The callback handler when using scsih_issue_tm. 2852 * 2853 * Return: 1 meaning mf should be freed from _base_interrupt 2854 * 0 means the mf is freed from this function. 2855 */ 2856 static u8 2857 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2858 { 2859 MPI2DefaultReply_t *mpi_reply; 2860 2861 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2862 return 1; 2863 if (ioc->tm_cmds.smid != smid) 2864 return 1; 2865 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2866 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2867 if (mpi_reply) { 2868 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2869 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2870 } 2871 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2872 complete(&ioc->tm_cmds.done); 2873 return 1; 2874 } 2875 2876 /** 2877 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2878 * @ioc: per adapter object 2879 * @handle: device handle 2880 * 2881 * During taskmangement request, we need to freeze the device queue. 2882 */ 2883 void 2884 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2885 { 2886 struct MPT3SAS_DEVICE *sas_device_priv_data; 2887 struct scsi_device *sdev; 2888 u8 skip = 0; 2889 2890 shost_for_each_device(sdev, ioc->shost) { 2891 if (skip) 2892 continue; 2893 sas_device_priv_data = sdev->hostdata; 2894 if (!sas_device_priv_data) 2895 continue; 2896 if (sas_device_priv_data->sas_target->handle == handle) { 2897 sas_device_priv_data->sas_target->tm_busy = 1; 2898 skip = 1; 2899 ioc->ignore_loginfos = 1; 2900 } 2901 } 2902 } 2903 2904 /** 2905 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2906 * @ioc: per adapter object 2907 * @handle: device handle 2908 * 2909 * During taskmangement request, we need to freeze the device queue. 2910 */ 2911 void 2912 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2913 { 2914 struct MPT3SAS_DEVICE *sas_device_priv_data; 2915 struct scsi_device *sdev; 2916 u8 skip = 0; 2917 2918 shost_for_each_device(sdev, ioc->shost) { 2919 if (skip) 2920 continue; 2921 sas_device_priv_data = sdev->hostdata; 2922 if (!sas_device_priv_data) 2923 continue; 2924 if (sas_device_priv_data->sas_target->handle == handle) { 2925 sas_device_priv_data->sas_target->tm_busy = 0; 2926 skip = 1; 2927 ioc->ignore_loginfos = 0; 2928 } 2929 } 2930 } 2931 2932 /** 2933 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status 2934 * @ioc: per adapter object 2935 * @channel: the channel assigned by the OS 2936 * @id: the id assigned by the OS 2937 * @lun: lun number 2938 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2939 * @smid_task: smid assigned to the task 2940 * 2941 * Look whether TM has aborted the timed out SCSI command, if 2942 * TM has aborted the IO then return SUCCESS else return FAILED. 2943 */ 2944 static int 2945 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, 2946 uint id, uint lun, u8 type, u16 smid_task) 2947 { 2948 2949 if (smid_task <= ioc->shost->can_queue) { 2950 switch (type) { 2951 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2952 if (!(_scsih_scsi_lookup_find_by_target(ioc, 2953 id, channel))) 2954 return SUCCESS; 2955 break; 2956 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 2957 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2958 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, 2959 lun, channel))) 2960 return SUCCESS; 2961 break; 2962 default: 2963 return SUCCESS; 2964 } 2965 } else if (smid_task == ioc->scsih_cmds.smid) { 2966 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || 2967 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) 2968 return SUCCESS; 2969 } else if (smid_task == ioc->ctl_cmds.smid) { 2970 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || 2971 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) 2972 return SUCCESS; 2973 } 2974 2975 return FAILED; 2976 } 2977 2978 /** 2979 * scsih_tm_post_processing - post processing of target & LUN reset 2980 * @ioc: per adapter object 2981 * @handle: device handle 2982 * @channel: the channel assigned by the OS 2983 * @id: the id assigned by the OS 2984 * @lun: lun number 2985 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2986 * @smid_task: smid assigned to the task 2987 * 2988 * Post processing of target & LUN reset. Due to interrupt latency 2989 * issue it possible that interrupt for aborted IO might not be 2990 * received yet. So before returning failure status, poll the 2991 * reply descriptor pools for the reply of timed out SCSI command. 2992 * Return FAILED status if reply for timed out is not received 2993 * otherwise return SUCCESS. 2994 */ 2995 static int 2996 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2997 uint channel, uint id, uint lun, u8 type, u16 smid_task) 2998 { 2999 int rc; 3000 3001 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3002 if (rc == SUCCESS) 3003 return rc; 3004 3005 ioc_info(ioc, 3006 "Poll ReplyDescriptor queues for completion of" 3007 " smid(%d), task_type(0x%02x), handle(0x%04x)\n", 3008 smid_task, type, handle); 3009 3010 /* 3011 * Due to interrupt latency issues, driver may receive interrupt for 3012 * TM first and then for aborted SCSI IO command. So, poll all the 3013 * ReplyDescriptor pools before returning the FAILED status to SML. 3014 */ 3015 mpt3sas_base_mask_interrupts(ioc); 3016 mpt3sas_base_sync_reply_irqs(ioc, 1); 3017 mpt3sas_base_unmask_interrupts(ioc); 3018 3019 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3020 } 3021 3022 /** 3023 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 3024 * @ioc: per adapter struct 3025 * @handle: device handle 3026 * @channel: the channel assigned by the OS 3027 * @id: the id assigned by the OS 3028 * @lun: lun number 3029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 3030 * @smid_task: smid assigned to the task 3031 * @msix_task: MSIX table index supplied by the OS 3032 * @timeout: timeout in seconds 3033 * @tr_method: Target Reset Method 3034 * Context: user 3035 * 3036 * A generic API for sending task management requests to firmware. 3037 * 3038 * The callback index is set inside `ioc->tm_cb_idx`. 3039 * The caller is responsible to check for outstanding commands. 3040 * 3041 * Return: SUCCESS or FAILED. 3042 */ 3043 int 3044 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 3045 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, 3046 u8 timeout, u8 tr_method) 3047 { 3048 Mpi2SCSITaskManagementRequest_t *mpi_request; 3049 Mpi2SCSITaskManagementReply_t *mpi_reply; 3050 Mpi25SCSIIORequest_t *request; 3051 u16 smid = 0; 3052 u32 ioc_state; 3053 int rc; 3054 u8 issue_reset = 0; 3055 3056 lockdep_assert_held(&ioc->tm_cmds.mutex); 3057 3058 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 3059 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 3060 return FAILED; 3061 } 3062 3063 if (ioc->shost_recovery || ioc->remove_host || 3064 ioc->pci_error_recovery) { 3065 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 3066 return FAILED; 3067 } 3068 3069 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3070 if (ioc_state & MPI2_DOORBELL_USED) { 3071 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 3072 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3073 return (!rc) ? SUCCESS : FAILED; 3074 } 3075 3076 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3077 mpt3sas_print_fault_code(ioc, ioc_state & 3078 MPI2_DOORBELL_DATA_MASK); 3079 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3080 return (!rc) ? SUCCESS : FAILED; 3081 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3082 MPI2_IOC_STATE_COREDUMP) { 3083 mpt3sas_print_coredump_info(ioc, ioc_state & 3084 MPI2_DOORBELL_DATA_MASK); 3085 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3086 return (!rc) ? SUCCESS : FAILED; 3087 } 3088 3089 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 3090 if (!smid) { 3091 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 3092 return FAILED; 3093 } 3094 3095 dtmprintk(ioc, 3096 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 3097 handle, type, smid_task, timeout, tr_method)); 3098 ioc->tm_cmds.status = MPT3_CMD_PENDING; 3099 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3100 ioc->tm_cmds.smid = smid; 3101 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3102 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 3103 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3104 mpi_request->DevHandle = cpu_to_le16(handle); 3105 mpi_request->TaskType = type; 3106 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 3107 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3108 mpi_request->MsgFlags = tr_method; 3109 mpi_request->TaskMID = cpu_to_le16(smid_task); 3110 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 3111 mpt3sas_scsih_set_tm_flag(ioc, handle); 3112 init_completion(&ioc->tm_cmds.done); 3113 ioc->put_smid_hi_priority(ioc, smid, msix_task); 3114 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 3115 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 3116 mpt3sas_check_cmd_timeout(ioc, 3117 ioc->tm_cmds.status, mpi_request, 3118 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); 3119 if (issue_reset) { 3120 rc = mpt3sas_base_hard_reset_handler(ioc, 3121 FORCE_BIG_HAMMER); 3122 rc = (!rc) ? SUCCESS : FAILED; 3123 goto out; 3124 } 3125 } 3126 3127 /* sync IRQs in case those were busy during flush. */ 3128 mpt3sas_base_sync_reply_irqs(ioc, 0); 3129 3130 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 3131 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3132 mpi_reply = ioc->tm_cmds.reply; 3133 dtmprintk(ioc, 3134 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 3135 le16_to_cpu(mpi_reply->IOCStatus), 3136 le32_to_cpu(mpi_reply->IOCLogInfo), 3137 le32_to_cpu(mpi_reply->TerminationCount))); 3138 if (ioc->logging_level & MPT_DEBUG_TM) { 3139 _scsih_response_code(ioc, mpi_reply->ResponseCode); 3140 if (mpi_reply->IOCStatus) 3141 _debug_dump_mf(mpi_request, 3142 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 3143 } 3144 } 3145 3146 switch (type) { 3147 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3148 rc = SUCCESS; 3149 /* 3150 * If DevHandle filed in smid_task's entry of request pool 3151 * doesn't match with device handle on which this task abort 3152 * TM is received then it means that TM has successfully 3153 * aborted the timed out command. Since smid_task's entry in 3154 * request pool will be memset to zero once the timed out 3155 * command is returned to the SML. If the command is not 3156 * aborted then smid_task’s entry won’t be cleared and it 3157 * will have same DevHandle value on which this task abort TM 3158 * is received and driver will return the TM status as FAILED. 3159 */ 3160 request = mpt3sas_base_get_msg_frame(ioc, smid_task); 3161 if (le16_to_cpu(request->DevHandle) != handle) 3162 break; 3163 3164 ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," 3165 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", 3166 handle, timeout, tr_method, smid_task, msix_task); 3167 rc = FAILED; 3168 break; 3169 3170 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3171 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3172 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3173 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, 3174 type, smid_task); 3175 break; 3176 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3177 rc = SUCCESS; 3178 break; 3179 default: 3180 rc = FAILED; 3181 break; 3182 } 3183 3184 out: 3185 mpt3sas_scsih_clear_tm_flag(ioc, handle); 3186 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 3187 return rc; 3188 } 3189 3190 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 3191 uint channel, uint id, u64 lun, u8 type, u16 smid_task, 3192 u16 msix_task, u8 timeout, u8 tr_method) 3193 { 3194 int ret; 3195 3196 mutex_lock(&ioc->tm_cmds.mutex); 3197 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 3198 smid_task, msix_task, timeout, tr_method); 3199 mutex_unlock(&ioc->tm_cmds.mutex); 3200 3201 return ret; 3202 } 3203 3204 /** 3205 * _scsih_tm_display_info - displays info about the device 3206 * @ioc: per adapter struct 3207 * @scmd: pointer to scsi command object 3208 * 3209 * Called by task management callback handlers. 3210 */ 3211 static void 3212 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 3213 { 3214 struct scsi_target *starget = scmd->device->sdev_target; 3215 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 3216 struct _sas_device *sas_device = NULL; 3217 struct _pcie_device *pcie_device = NULL; 3218 unsigned long flags; 3219 char *device_str = NULL; 3220 3221 if (!priv_target) 3222 return; 3223 if (ioc->hide_ir_msg) 3224 device_str = "WarpDrive"; 3225 else 3226 device_str = "volume"; 3227 3228 scsi_print_command(scmd); 3229 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3230 starget_printk(KERN_INFO, starget, 3231 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 3232 device_str, priv_target->handle, 3233 device_str, (unsigned long long)priv_target->sas_address); 3234 3235 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 3236 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3237 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 3238 if (pcie_device) { 3239 starget_printk(KERN_INFO, starget, 3240 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 3241 pcie_device->handle, 3242 (unsigned long long)pcie_device->wwid, 3243 pcie_device->port_num); 3244 if (pcie_device->enclosure_handle != 0) 3245 starget_printk(KERN_INFO, starget, 3246 "enclosure logical id(0x%016llx), slot(%d)\n", 3247 (unsigned long long) 3248 pcie_device->enclosure_logical_id, 3249 pcie_device->slot); 3250 if (pcie_device->connector_name[0] != '\0') 3251 starget_printk(KERN_INFO, starget, 3252 "enclosure level(0x%04x), connector name( %s)\n", 3253 pcie_device->enclosure_level, 3254 pcie_device->connector_name); 3255 pcie_device_put(pcie_device); 3256 } 3257 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3258 3259 } else { 3260 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3261 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 3262 if (sas_device) { 3263 if (priv_target->flags & 3264 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3265 starget_printk(KERN_INFO, starget, 3266 "volume handle(0x%04x), " 3267 "volume wwid(0x%016llx)\n", 3268 sas_device->volume_handle, 3269 (unsigned long long)sas_device->volume_wwid); 3270 } 3271 starget_printk(KERN_INFO, starget, 3272 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 3273 sas_device->handle, 3274 (unsigned long long)sas_device->sas_address, 3275 sas_device->phy); 3276 3277 _scsih_display_enclosure_chassis_info(NULL, sas_device, 3278 NULL, starget); 3279 3280 sas_device_put(sas_device); 3281 } 3282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3283 } 3284 } 3285 3286 /** 3287 * scsih_abort - eh threads main abort routine 3288 * @scmd: pointer to scsi command object 3289 * 3290 * Return: SUCCESS if command aborted else FAILED 3291 */ 3292 static int 3293 scsih_abort(struct scsi_cmnd *scmd) 3294 { 3295 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3296 struct MPT3SAS_DEVICE *sas_device_priv_data; 3297 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 3298 u16 handle; 3299 int r; 3300 3301 u8 timeout = 30; 3302 struct _pcie_device *pcie_device = NULL; 3303 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" 3304 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", 3305 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), 3306 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); 3307 _scsih_tm_display_info(ioc, scmd); 3308 3309 sas_device_priv_data = scmd->device->hostdata; 3310 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3311 ioc->remove_host) { 3312 sdev_printk(KERN_INFO, scmd->device, 3313 "device been deleted! scmd(0x%p)\n", scmd); 3314 scmd->result = DID_NO_CONNECT << 16; 3315 scsi_done(scmd); 3316 r = SUCCESS; 3317 goto out; 3318 } 3319 3320 /* check for completed command */ 3321 if (st == NULL || st->cb_idx == 0xFF) { 3322 sdev_printk(KERN_INFO, scmd->device, "No reference found at " 3323 "driver, assuming scmd(0x%p) might have completed\n", scmd); 3324 scmd->result = DID_RESET << 16; 3325 r = SUCCESS; 3326 goto out; 3327 } 3328 3329 /* for hidden raid components and volumes this is not supported */ 3330 if (sas_device_priv_data->sas_target->flags & 3331 MPT_TARGET_FLAGS_RAID_COMPONENT || 3332 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3333 scmd->result = DID_RESET << 16; 3334 r = FAILED; 3335 goto out; 3336 } 3337 3338 mpt3sas_halt_firmware(ioc); 3339 3340 handle = sas_device_priv_data->sas_target->handle; 3341 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3342 if (pcie_device && (!ioc->tm_custom_handling) && 3343 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) 3344 timeout = ioc->nvme_abort_timeout; 3345 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3346 scmd->device->id, scmd->device->lun, 3347 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3348 st->smid, st->msix_io, timeout, 0); 3349 /* Command must be cleared after abort */ 3350 if (r == SUCCESS && st->cb_idx != 0xFF) 3351 r = FAILED; 3352 out: 3353 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", 3354 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3355 if (pcie_device) 3356 pcie_device_put(pcie_device); 3357 return r; 3358 } 3359 3360 /** 3361 * scsih_dev_reset - eh threads main device reset routine 3362 * @scmd: pointer to scsi command object 3363 * 3364 * Return: SUCCESS if command aborted else FAILED 3365 */ 3366 static int 3367 scsih_dev_reset(struct scsi_cmnd *scmd) 3368 { 3369 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3370 struct MPT3SAS_DEVICE *sas_device_priv_data; 3371 struct _sas_device *sas_device = NULL; 3372 struct _pcie_device *pcie_device = NULL; 3373 u16 handle; 3374 u8 tr_method = 0; 3375 u8 tr_timeout = 30; 3376 int r; 3377 3378 struct scsi_target *starget = scmd->device->sdev_target; 3379 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3380 3381 sdev_printk(KERN_INFO, scmd->device, 3382 "attempting device reset! scmd(0x%p)\n", scmd); 3383 _scsih_tm_display_info(ioc, scmd); 3384 3385 sas_device_priv_data = scmd->device->hostdata; 3386 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3387 ioc->remove_host) { 3388 sdev_printk(KERN_INFO, scmd->device, 3389 "device been deleted! scmd(0x%p)\n", scmd); 3390 scmd->result = DID_NO_CONNECT << 16; 3391 scsi_done(scmd); 3392 r = SUCCESS; 3393 goto out; 3394 } 3395 3396 /* for hidden raid components obtain the volume_handle */ 3397 handle = 0; 3398 if (sas_device_priv_data->sas_target->flags & 3399 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3400 sas_device = mpt3sas_get_sdev_from_target(ioc, 3401 target_priv_data); 3402 if (sas_device) 3403 handle = sas_device->volume_handle; 3404 } else 3405 handle = sas_device_priv_data->sas_target->handle; 3406 3407 if (!handle) { 3408 scmd->result = DID_RESET << 16; 3409 r = FAILED; 3410 goto out; 3411 } 3412 3413 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3414 3415 if (pcie_device && (!ioc->tm_custom_handling) && 3416 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3417 tr_timeout = pcie_device->reset_timeout; 3418 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3419 } else 3420 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3421 3422 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3423 scmd->device->id, scmd->device->lun, 3424 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 3425 tr_timeout, tr_method); 3426 /* Check for busy commands after reset */ 3427 if (r == SUCCESS && scsi_device_busy(scmd->device)) 3428 r = FAILED; 3429 out: 3430 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", 3431 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3432 3433 if (sas_device) 3434 sas_device_put(sas_device); 3435 if (pcie_device) 3436 pcie_device_put(pcie_device); 3437 3438 return r; 3439 } 3440 3441 /** 3442 * scsih_target_reset - eh threads main target reset routine 3443 * @scmd: pointer to scsi command object 3444 * 3445 * Return: SUCCESS if command aborted else FAILED 3446 */ 3447 static int 3448 scsih_target_reset(struct scsi_cmnd *scmd) 3449 { 3450 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3451 struct MPT3SAS_DEVICE *sas_device_priv_data; 3452 struct _sas_device *sas_device = NULL; 3453 struct _pcie_device *pcie_device = NULL; 3454 u16 handle; 3455 u8 tr_method = 0; 3456 u8 tr_timeout = 30; 3457 int r; 3458 struct scsi_target *starget = scmd->device->sdev_target; 3459 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3460 3461 starget_printk(KERN_INFO, starget, 3462 "attempting target reset! scmd(0x%p)\n", scmd); 3463 _scsih_tm_display_info(ioc, scmd); 3464 3465 sas_device_priv_data = scmd->device->hostdata; 3466 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3467 ioc->remove_host) { 3468 starget_printk(KERN_INFO, starget, 3469 "target been deleted! scmd(0x%p)\n", scmd); 3470 scmd->result = DID_NO_CONNECT << 16; 3471 scsi_done(scmd); 3472 r = SUCCESS; 3473 goto out; 3474 } 3475 3476 /* for hidden raid components obtain the volume_handle */ 3477 handle = 0; 3478 if (sas_device_priv_data->sas_target->flags & 3479 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3480 sas_device = mpt3sas_get_sdev_from_target(ioc, 3481 target_priv_data); 3482 if (sas_device) 3483 handle = sas_device->volume_handle; 3484 } else 3485 handle = sas_device_priv_data->sas_target->handle; 3486 3487 if (!handle) { 3488 scmd->result = DID_RESET << 16; 3489 r = FAILED; 3490 goto out; 3491 } 3492 3493 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3494 3495 if (pcie_device && (!ioc->tm_custom_handling) && 3496 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3497 tr_timeout = pcie_device->reset_timeout; 3498 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3499 } else 3500 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3501 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3502 scmd->device->id, 0, 3503 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3504 tr_timeout, tr_method); 3505 /* Check for busy commands after reset */ 3506 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3507 r = FAILED; 3508 out: 3509 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", 3510 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3511 3512 if (sas_device) 3513 sas_device_put(sas_device); 3514 if (pcie_device) 3515 pcie_device_put(pcie_device); 3516 return r; 3517 } 3518 3519 3520 /** 3521 * scsih_host_reset - eh threads main host reset routine 3522 * @scmd: pointer to scsi command object 3523 * 3524 * Return: SUCCESS if command aborted else FAILED 3525 */ 3526 static int 3527 scsih_host_reset(struct scsi_cmnd *scmd) 3528 { 3529 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3530 int r, retval; 3531 3532 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); 3533 scsi_print_command(scmd); 3534 3535 if (ioc->is_driver_loading || ioc->remove_host) { 3536 ioc_info(ioc, "Blocking the host reset\n"); 3537 r = FAILED; 3538 goto out; 3539 } 3540 3541 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3542 r = (retval < 0) ? FAILED : SUCCESS; 3543 out: 3544 ioc_info(ioc, "host reset: %s scmd(0x%p)\n", 3545 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3546 3547 return r; 3548 } 3549 3550 /** 3551 * _scsih_fw_event_add - insert and queue up fw_event 3552 * @ioc: per adapter object 3553 * @fw_event: object describing the event 3554 * Context: This function will acquire ioc->fw_event_lock. 3555 * 3556 * This adds the firmware event object into link list, then queues it up to 3557 * be processed from user context. 3558 */ 3559 static void 3560 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3561 { 3562 unsigned long flags; 3563 3564 if (ioc->firmware_event_thread == NULL) 3565 return; 3566 3567 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3568 fw_event_work_get(fw_event); 3569 INIT_LIST_HEAD(&fw_event->list); 3570 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3571 INIT_WORK(&fw_event->work, _firmware_event_work); 3572 fw_event_work_get(fw_event); 3573 queue_work(ioc->firmware_event_thread, &fw_event->work); 3574 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3575 } 3576 3577 /** 3578 * _scsih_fw_event_del_from_list - delete fw_event from the list 3579 * @ioc: per adapter object 3580 * @fw_event: object describing the event 3581 * Context: This function will acquire ioc->fw_event_lock. 3582 * 3583 * If the fw_event is on the fw_event_list, remove it and do a put. 3584 */ 3585 static void 3586 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3587 *fw_event) 3588 { 3589 unsigned long flags; 3590 3591 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3592 if (!list_empty(&fw_event->list)) { 3593 list_del_init(&fw_event->list); 3594 fw_event_work_put(fw_event); 3595 } 3596 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3597 } 3598 3599 3600 /** 3601 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3602 * @ioc: per adapter object 3603 * @event_data: trigger event data 3604 */ 3605 void 3606 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3607 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3608 { 3609 struct fw_event_work *fw_event; 3610 u16 sz; 3611 3612 if (ioc->is_driver_loading) 3613 return; 3614 sz = sizeof(*event_data); 3615 fw_event = alloc_fw_event_work(sz); 3616 if (!fw_event) 3617 return; 3618 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3619 fw_event->ioc = ioc; 3620 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3621 _scsih_fw_event_add(ioc, fw_event); 3622 fw_event_work_put(fw_event); 3623 } 3624 3625 /** 3626 * _scsih_error_recovery_delete_devices - remove devices not responding 3627 * @ioc: per adapter object 3628 */ 3629 static void 3630 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3631 { 3632 struct fw_event_work *fw_event; 3633 3634 fw_event = alloc_fw_event_work(0); 3635 if (!fw_event) 3636 return; 3637 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3638 fw_event->ioc = ioc; 3639 _scsih_fw_event_add(ioc, fw_event); 3640 fw_event_work_put(fw_event); 3641 } 3642 3643 /** 3644 * mpt3sas_port_enable_complete - port enable completed (fake event) 3645 * @ioc: per adapter object 3646 */ 3647 void 3648 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3649 { 3650 struct fw_event_work *fw_event; 3651 3652 fw_event = alloc_fw_event_work(0); 3653 if (!fw_event) 3654 return; 3655 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3656 fw_event->ioc = ioc; 3657 _scsih_fw_event_add(ioc, fw_event); 3658 fw_event_work_put(fw_event); 3659 } 3660 3661 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3662 { 3663 unsigned long flags; 3664 struct fw_event_work *fw_event = NULL; 3665 3666 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3667 if (!list_empty(&ioc->fw_event_list)) { 3668 fw_event = list_first_entry(&ioc->fw_event_list, 3669 struct fw_event_work, list); 3670 list_del_init(&fw_event->list); 3671 fw_event_work_put(fw_event); 3672 } 3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3674 3675 return fw_event; 3676 } 3677 3678 /** 3679 * _scsih_fw_event_cleanup_queue - cleanup event queue 3680 * @ioc: per adapter object 3681 * 3682 * Walk the firmware event queue, either killing timers, or waiting 3683 * for outstanding events to complete 3684 * 3685 * Context: task, can sleep 3686 */ 3687 static void 3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3689 { 3690 struct fw_event_work *fw_event; 3691 3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || 3693 !ioc->firmware_event_thread) 3694 return; 3695 /* 3696 * Set current running event as ignore, so that 3697 * current running event will exit quickly. 3698 * As diag reset has occurred it is of no use 3699 * to process remaining stale event data entries. 3700 */ 3701 if (ioc->shost_recovery && ioc->current_event) 3702 ioc->current_event->ignore = 1; 3703 3704 ioc->fw_events_cleanup = 1; 3705 while ((fw_event = dequeue_next_fw_event(ioc)) || 3706 (fw_event = ioc->current_event)) { 3707 3708 /* 3709 * Don't call cancel_work_sync() for current_event 3710 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3711 * otherwise we may observe deadlock if current 3712 * hard reset issued as part of processing the current_event. 3713 * 3714 * Orginal logic of cleaning the current_event is added 3715 * for handling the back to back host reset issued by the user. 3716 * i.e. during back to back host reset, driver use to process 3717 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES 3718 * event back to back and this made the drives to unregister 3719 * the devices from SML. 3720 */ 3721 3722 if (fw_event == ioc->current_event && 3723 ioc->current_event->event != 3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { 3725 ioc->current_event = NULL; 3726 continue; 3727 } 3728 3729 /* 3730 * Driver has to clear ioc->start_scan flag when 3731 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, 3732 * otherwise scsi_scan_host() API waits for the 3733 * 5 minute timer to expire. If we exit from 3734 * scsi_scan_host() early then we can issue the 3735 * new port enable request as part of current diag reset. 3736 */ 3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { 3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 3739 ioc->start_scan = 0; 3740 } 3741 3742 /* 3743 * Wait on the fw_event to complete. If this returns 1, then 3744 * the event was never executed, and we need a put for the 3745 * reference the work had on the fw_event. 3746 * 3747 * If it did execute, we wait for it to finish, and the put will 3748 * happen from _firmware_event_work() 3749 */ 3750 if (cancel_work_sync(&fw_event->work)) 3751 fw_event_work_put(fw_event); 3752 3753 } 3754 ioc->fw_events_cleanup = 0; 3755 } 3756 3757 /** 3758 * _scsih_internal_device_block - block the sdev device 3759 * @sdev: per device object 3760 * @sas_device_priv_data : per device driver private data 3761 * 3762 * make sure device is blocked without error, if not 3763 * print an error 3764 */ 3765 static void 3766 _scsih_internal_device_block(struct scsi_device *sdev, 3767 struct MPT3SAS_DEVICE *sas_device_priv_data) 3768 { 3769 int r = 0; 3770 3771 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3772 sas_device_priv_data->sas_target->handle); 3773 sas_device_priv_data->block = 1; 3774 3775 r = scsi_internal_device_block_nowait(sdev); 3776 if (r == -EINVAL) 3777 sdev_printk(KERN_WARNING, sdev, 3778 "device_block failed with return(%d) for handle(0x%04x)\n", 3779 r, sas_device_priv_data->sas_target->handle); 3780 } 3781 3782 /** 3783 * _scsih_internal_device_unblock - unblock the sdev device 3784 * @sdev: per device object 3785 * @sas_device_priv_data : per device driver private data 3786 * make sure device is unblocked without error, if not retry 3787 * by blocking and then unblocking 3788 */ 3789 3790 static void 3791 _scsih_internal_device_unblock(struct scsi_device *sdev, 3792 struct MPT3SAS_DEVICE *sas_device_priv_data) 3793 { 3794 int r = 0; 3795 3796 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3797 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3798 sas_device_priv_data->block = 0; 3799 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3800 if (r == -EINVAL) { 3801 /* The device has been set to SDEV_RUNNING by SD layer during 3802 * device addition but the request queue is still stopped by 3803 * our earlier block call. We need to perform a block again 3804 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3805 3806 sdev_printk(KERN_WARNING, sdev, 3807 "device_unblock failed with return(%d) for handle(0x%04x) " 3808 "performing a block followed by an unblock\n", 3809 r, sas_device_priv_data->sas_target->handle); 3810 sas_device_priv_data->block = 1; 3811 r = scsi_internal_device_block_nowait(sdev); 3812 if (r) 3813 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3814 "failed with return(%d) for handle(0x%04x)\n", 3815 r, sas_device_priv_data->sas_target->handle); 3816 3817 sas_device_priv_data->block = 0; 3818 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3819 if (r) 3820 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3821 " failed with return(%d) for handle(0x%04x)\n", 3822 r, sas_device_priv_data->sas_target->handle); 3823 } 3824 } 3825 3826 /** 3827 * _scsih_ublock_io_all_device - unblock every device 3828 * @ioc: per adapter object 3829 * 3830 * change the device state from block to running 3831 */ 3832 static void 3833 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3834 { 3835 struct MPT3SAS_DEVICE *sas_device_priv_data; 3836 struct scsi_device *sdev; 3837 3838 shost_for_each_device(sdev, ioc->shost) { 3839 sas_device_priv_data = sdev->hostdata; 3840 if (!sas_device_priv_data) 3841 continue; 3842 if (!sas_device_priv_data->block) 3843 continue; 3844 3845 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3846 "device_running, handle(0x%04x)\n", 3847 sas_device_priv_data->sas_target->handle)); 3848 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3849 } 3850 } 3851 3852 3853 /** 3854 * _scsih_ublock_io_device - prepare device to be deleted 3855 * @ioc: per adapter object 3856 * @sas_address: sas address 3857 * @port: hba port entry 3858 * 3859 * unblock then put device in offline state 3860 */ 3861 static void 3862 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, 3863 u64 sas_address, struct hba_port *port) 3864 { 3865 struct MPT3SAS_DEVICE *sas_device_priv_data; 3866 struct scsi_device *sdev; 3867 3868 shost_for_each_device(sdev, ioc->shost) { 3869 sas_device_priv_data = sdev->hostdata; 3870 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 3871 continue; 3872 if (sas_device_priv_data->sas_target->sas_address 3873 != sas_address) 3874 continue; 3875 if (sas_device_priv_data->sas_target->port != port) 3876 continue; 3877 if (sas_device_priv_data->block) 3878 _scsih_internal_device_unblock(sdev, 3879 sas_device_priv_data); 3880 } 3881 } 3882 3883 /** 3884 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3885 * @ioc: per adapter object 3886 * 3887 * During device pull we need to appropriately set the sdev state. 3888 */ 3889 static void 3890 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3891 { 3892 struct MPT3SAS_DEVICE *sas_device_priv_data; 3893 struct scsi_device *sdev; 3894 3895 shost_for_each_device(sdev, ioc->shost) { 3896 sas_device_priv_data = sdev->hostdata; 3897 if (!sas_device_priv_data) 3898 continue; 3899 if (sas_device_priv_data->block) 3900 continue; 3901 if (sas_device_priv_data->ignore_delay_remove) { 3902 sdev_printk(KERN_INFO, sdev, 3903 "%s skip device_block for SES handle(0x%04x)\n", 3904 __func__, sas_device_priv_data->sas_target->handle); 3905 continue; 3906 } 3907 _scsih_internal_device_block(sdev, sas_device_priv_data); 3908 } 3909 } 3910 3911 /** 3912 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3913 * @ioc: per adapter object 3914 * @handle: device handle 3915 * 3916 * During device pull we need to appropriately set the sdev state. 3917 */ 3918 static void 3919 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3920 { 3921 struct MPT3SAS_DEVICE *sas_device_priv_data; 3922 struct scsi_device *sdev; 3923 struct _sas_device *sas_device; 3924 3925 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3926 3927 shost_for_each_device(sdev, ioc->shost) { 3928 sas_device_priv_data = sdev->hostdata; 3929 if (!sas_device_priv_data) 3930 continue; 3931 if (sas_device_priv_data->sas_target->handle != handle) 3932 continue; 3933 if (sas_device_priv_data->block) 3934 continue; 3935 if (sas_device && sas_device->pend_sas_rphy_add) 3936 continue; 3937 if (sas_device_priv_data->ignore_delay_remove) { 3938 sdev_printk(KERN_INFO, sdev, 3939 "%s skip device_block for SES handle(0x%04x)\n", 3940 __func__, sas_device_priv_data->sas_target->handle); 3941 continue; 3942 } 3943 _scsih_internal_device_block(sdev, sas_device_priv_data); 3944 } 3945 3946 if (sas_device) 3947 sas_device_put(sas_device); 3948 } 3949 3950 /** 3951 * _scsih_block_io_to_children_attached_to_ex 3952 * @ioc: per adapter object 3953 * @sas_expander: the sas_device object 3954 * 3955 * This routine set sdev state to SDEV_BLOCK for all devices 3956 * attached to this expander. This function called when expander is 3957 * pulled. 3958 */ 3959 static void 3960 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3961 struct _sas_node *sas_expander) 3962 { 3963 struct _sas_port *mpt3sas_port; 3964 struct _sas_device *sas_device; 3965 struct _sas_node *expander_sibling; 3966 unsigned long flags; 3967 3968 if (!sas_expander) 3969 return; 3970 3971 list_for_each_entry(mpt3sas_port, 3972 &sas_expander->sas_port_list, port_list) { 3973 if (mpt3sas_port->remote_identify.device_type == 3974 SAS_END_DEVICE) { 3975 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3976 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3977 mpt3sas_port->remote_identify.sas_address, 3978 mpt3sas_port->hba_port); 3979 if (sas_device) { 3980 set_bit(sas_device->handle, 3981 ioc->blocking_handles); 3982 sas_device_put(sas_device); 3983 } 3984 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3985 } 3986 } 3987 3988 list_for_each_entry(mpt3sas_port, 3989 &sas_expander->sas_port_list, port_list) { 3990 3991 if (mpt3sas_port->remote_identify.device_type == 3992 SAS_EDGE_EXPANDER_DEVICE || 3993 mpt3sas_port->remote_identify.device_type == 3994 SAS_FANOUT_EXPANDER_DEVICE) { 3995 expander_sibling = 3996 mpt3sas_scsih_expander_find_by_sas_address( 3997 ioc, mpt3sas_port->remote_identify.sas_address, 3998 mpt3sas_port->hba_port); 3999 _scsih_block_io_to_children_attached_to_ex(ioc, 4000 expander_sibling); 4001 } 4002 } 4003 } 4004 4005 /** 4006 * _scsih_block_io_to_children_attached_directly 4007 * @ioc: per adapter object 4008 * @event_data: topology change event data 4009 * 4010 * This routine set sdev state to SDEV_BLOCK for all devices 4011 * direct attached during device pull. 4012 */ 4013 static void 4014 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4015 Mpi2EventDataSasTopologyChangeList_t *event_data) 4016 { 4017 int i; 4018 u16 handle; 4019 u16 reason_code; 4020 4021 for (i = 0; i < event_data->NumEntries; i++) { 4022 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4023 if (!handle) 4024 continue; 4025 reason_code = event_data->PHY[i].PhyStatus & 4026 MPI2_EVENT_SAS_TOPO_RC_MASK; 4027 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 4028 _scsih_block_io_device(ioc, handle); 4029 } 4030 } 4031 4032 /** 4033 * _scsih_block_io_to_pcie_children_attached_directly 4034 * @ioc: per adapter object 4035 * @event_data: topology change event data 4036 * 4037 * This routine set sdev state to SDEV_BLOCK for all devices 4038 * direct attached during device pull/reconnect. 4039 */ 4040 static void 4041 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4042 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4043 { 4044 int i; 4045 u16 handle; 4046 u16 reason_code; 4047 4048 for (i = 0; i < event_data->NumEntries; i++) { 4049 handle = 4050 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4051 if (!handle) 4052 continue; 4053 reason_code = event_data->PortEntry[i].PortStatus; 4054 if (reason_code == 4055 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 4056 _scsih_block_io_device(ioc, handle); 4057 } 4058 } 4059 /** 4060 * _scsih_tm_tr_send - send task management request 4061 * @ioc: per adapter object 4062 * @handle: device handle 4063 * Context: interrupt time. 4064 * 4065 * This code is to initiate the device removal handshake protocol 4066 * with controller firmware. This function will issue target reset 4067 * using high priority request queue. It will send a sas iounit 4068 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 4069 * 4070 * This is designed to send muliple task management request at the same 4071 * time to the fifo. If the fifo is full, we will append the request, 4072 * and process it in a future completion. 4073 */ 4074 static void 4075 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4076 { 4077 Mpi2SCSITaskManagementRequest_t *mpi_request; 4078 u16 smid; 4079 struct _sas_device *sas_device = NULL; 4080 struct _pcie_device *pcie_device = NULL; 4081 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 4082 u64 sas_address = 0; 4083 unsigned long flags; 4084 struct _tr_list *delayed_tr; 4085 u32 ioc_state; 4086 u8 tr_method = 0; 4087 struct hba_port *port = NULL; 4088 4089 if (ioc->pci_error_recovery) { 4090 dewtprintk(ioc, 4091 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 4092 __func__, handle)); 4093 return; 4094 } 4095 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4096 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4097 dewtprintk(ioc, 4098 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 4099 __func__, handle)); 4100 return; 4101 } 4102 4103 /* if PD, then return */ 4104 if (test_bit(handle, ioc->pd_handles)) 4105 return; 4106 4107 clear_bit(handle, ioc->pend_os_device_add); 4108 4109 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4110 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 4111 if (sas_device && sas_device->starget && 4112 sas_device->starget->hostdata) { 4113 sas_target_priv_data = sas_device->starget->hostdata; 4114 sas_target_priv_data->deleted = 1; 4115 sas_address = sas_device->sas_address; 4116 port = sas_device->port; 4117 } 4118 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4119 if (!sas_device) { 4120 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 4121 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 4122 if (pcie_device && pcie_device->starget && 4123 pcie_device->starget->hostdata) { 4124 sas_target_priv_data = pcie_device->starget->hostdata; 4125 sas_target_priv_data->deleted = 1; 4126 sas_address = pcie_device->wwid; 4127 } 4128 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 4129 if (pcie_device && (!ioc->tm_custom_handling) && 4130 (!(mpt3sas_scsih_is_pcie_scsi_device( 4131 pcie_device->device_info)))) 4132 tr_method = 4133 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 4134 else 4135 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 4136 } 4137 if (sas_target_priv_data) { 4138 dewtprintk(ioc, 4139 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 4140 handle, (u64)sas_address)); 4141 if (sas_device) { 4142 if (sas_device->enclosure_handle != 0) 4143 dewtprintk(ioc, 4144 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 4145 (u64)sas_device->enclosure_logical_id, 4146 sas_device->slot)); 4147 if (sas_device->connector_name[0] != '\0') 4148 dewtprintk(ioc, 4149 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 4150 sas_device->enclosure_level, 4151 sas_device->connector_name)); 4152 } else if (pcie_device) { 4153 if (pcie_device->enclosure_handle != 0) 4154 dewtprintk(ioc, 4155 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 4156 (u64)pcie_device->enclosure_logical_id, 4157 pcie_device->slot)); 4158 if (pcie_device->connector_name[0] != '\0') 4159 dewtprintk(ioc, 4160 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 4161 pcie_device->enclosure_level, 4162 pcie_device->connector_name)); 4163 } 4164 _scsih_ublock_io_device(ioc, sas_address, port); 4165 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 4166 } 4167 4168 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 4169 if (!smid) { 4170 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4171 if (!delayed_tr) 4172 goto out; 4173 INIT_LIST_HEAD(&delayed_tr->list); 4174 delayed_tr->handle = handle; 4175 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4176 dewtprintk(ioc, 4177 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4178 handle)); 4179 goto out; 4180 } 4181 4182 dewtprintk(ioc, 4183 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4184 handle, smid, ioc->tm_tr_cb_idx)); 4185 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4186 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4187 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4188 mpi_request->DevHandle = cpu_to_le16(handle); 4189 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4190 mpi_request->MsgFlags = tr_method; 4191 set_bit(handle, ioc->device_remove_in_progress); 4192 ioc->put_smid_hi_priority(ioc, smid, 0); 4193 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 4194 4195 out: 4196 if (sas_device) 4197 sas_device_put(sas_device); 4198 if (pcie_device) 4199 pcie_device_put(pcie_device); 4200 } 4201 4202 /** 4203 * _scsih_tm_tr_complete - 4204 * @ioc: per adapter object 4205 * @smid: system request message index 4206 * @msix_index: MSIX table index supplied by the OS 4207 * @reply: reply message frame(lower 32bit addr) 4208 * Context: interrupt time. 4209 * 4210 * This is the target reset completion routine. 4211 * This code is part of the code to initiate the device removal 4212 * handshake protocol with controller firmware. 4213 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 4214 * 4215 * Return: 1 meaning mf should be freed from _base_interrupt 4216 * 0 means the mf is freed from this function. 4217 */ 4218 static u8 4219 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 4220 u32 reply) 4221 { 4222 u16 handle; 4223 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4224 Mpi2SCSITaskManagementReply_t *mpi_reply = 4225 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4226 Mpi2SasIoUnitControlRequest_t *mpi_request; 4227 u16 smid_sas_ctrl; 4228 u32 ioc_state; 4229 struct _sc_list *delayed_sc; 4230 4231 if (ioc->pci_error_recovery) { 4232 dewtprintk(ioc, 4233 ioc_info(ioc, "%s: host in pci error recovery\n", 4234 __func__)); 4235 return 1; 4236 } 4237 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4238 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4239 dewtprintk(ioc, 4240 ioc_info(ioc, "%s: host is not operational\n", 4241 __func__)); 4242 return 1; 4243 } 4244 if (unlikely(!mpi_reply)) { 4245 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4246 __FILE__, __LINE__, __func__); 4247 return 1; 4248 } 4249 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4250 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4251 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4252 dewtprintk(ioc, 4253 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4254 handle, 4255 le16_to_cpu(mpi_reply->DevHandle), smid)); 4256 return 0; 4257 } 4258 4259 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 4260 dewtprintk(ioc, 4261 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4262 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4263 le32_to_cpu(mpi_reply->IOCLogInfo), 4264 le32_to_cpu(mpi_reply->TerminationCount))); 4265 4266 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 4267 if (!smid_sas_ctrl) { 4268 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 4269 if (!delayed_sc) 4270 return _scsih_check_for_pending_tm(ioc, smid); 4271 INIT_LIST_HEAD(&delayed_sc->list); 4272 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 4273 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 4274 dewtprintk(ioc, 4275 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 4276 handle)); 4277 return _scsih_check_for_pending_tm(ioc, smid); 4278 } 4279 4280 dewtprintk(ioc, 4281 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4282 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 4283 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 4284 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4285 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4286 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4287 mpi_request->DevHandle = mpi_request_tm->DevHandle; 4288 ioc->put_smid_default(ioc, smid_sas_ctrl); 4289 4290 return _scsih_check_for_pending_tm(ioc, smid); 4291 } 4292 4293 /** _scsih_allow_scmd_to_device - check whether scmd needs to 4294 * issue to IOC or not. 4295 * @ioc: per adapter object 4296 * @scmd: pointer to scsi command object 4297 * 4298 * Returns true if scmd can be issued to IOC otherwise returns false. 4299 */ 4300 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, 4301 struct scsi_cmnd *scmd) 4302 { 4303 4304 if (ioc->pci_error_recovery) 4305 return false; 4306 4307 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { 4308 if (ioc->remove_host) 4309 return false; 4310 4311 return true; 4312 } 4313 4314 if (ioc->remove_host) { 4315 4316 switch (scmd->cmnd[0]) { 4317 case SYNCHRONIZE_CACHE: 4318 case START_STOP: 4319 return true; 4320 default: 4321 return false; 4322 } 4323 } 4324 4325 return true; 4326 } 4327 4328 /** 4329 * _scsih_sas_control_complete - completion routine 4330 * @ioc: per adapter object 4331 * @smid: system request message index 4332 * @msix_index: MSIX table index supplied by the OS 4333 * @reply: reply message frame(lower 32bit addr) 4334 * Context: interrupt time. 4335 * 4336 * This is the sas iounit control completion routine. 4337 * This code is part of the code to initiate the device removal 4338 * handshake protocol with controller firmware. 4339 * 4340 * Return: 1 meaning mf should be freed from _base_interrupt 4341 * 0 means the mf is freed from this function. 4342 */ 4343 static u8 4344 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4345 u8 msix_index, u32 reply) 4346 { 4347 Mpi2SasIoUnitControlReply_t *mpi_reply = 4348 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4349 4350 if (likely(mpi_reply)) { 4351 dewtprintk(ioc, 4352 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 4353 le16_to_cpu(mpi_reply->DevHandle), smid, 4354 le16_to_cpu(mpi_reply->IOCStatus), 4355 le32_to_cpu(mpi_reply->IOCLogInfo))); 4356 if (le16_to_cpu(mpi_reply->IOCStatus) == 4357 MPI2_IOCSTATUS_SUCCESS) { 4358 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 4359 ioc->device_remove_in_progress); 4360 } 4361 } else { 4362 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4363 __FILE__, __LINE__, __func__); 4364 } 4365 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 4366 } 4367 4368 /** 4369 * _scsih_tm_tr_volume_send - send target reset request for volumes 4370 * @ioc: per adapter object 4371 * @handle: device handle 4372 * Context: interrupt time. 4373 * 4374 * This is designed to send muliple task management request at the same 4375 * time to the fifo. If the fifo is full, we will append the request, 4376 * and process it in a future completion. 4377 */ 4378 static void 4379 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4380 { 4381 Mpi2SCSITaskManagementRequest_t *mpi_request; 4382 u16 smid; 4383 struct _tr_list *delayed_tr; 4384 4385 if (ioc->pci_error_recovery) { 4386 dewtprintk(ioc, 4387 ioc_info(ioc, "%s: host reset in progress!\n", 4388 __func__)); 4389 return; 4390 } 4391 4392 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 4393 if (!smid) { 4394 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4395 if (!delayed_tr) 4396 return; 4397 INIT_LIST_HEAD(&delayed_tr->list); 4398 delayed_tr->handle = handle; 4399 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 4400 dewtprintk(ioc, 4401 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4402 handle)); 4403 return; 4404 } 4405 4406 dewtprintk(ioc, 4407 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4408 handle, smid, ioc->tm_tr_volume_cb_idx)); 4409 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4410 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4411 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4412 mpi_request->DevHandle = cpu_to_le16(handle); 4413 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4414 ioc->put_smid_hi_priority(ioc, smid, 0); 4415 } 4416 4417 /** 4418 * _scsih_tm_volume_tr_complete - target reset completion 4419 * @ioc: per adapter object 4420 * @smid: system request message index 4421 * @msix_index: MSIX table index supplied by the OS 4422 * @reply: reply message frame(lower 32bit addr) 4423 * Context: interrupt time. 4424 * 4425 * Return: 1 meaning mf should be freed from _base_interrupt 4426 * 0 means the mf is freed from this function. 4427 */ 4428 static u8 4429 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4430 u8 msix_index, u32 reply) 4431 { 4432 u16 handle; 4433 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4434 Mpi2SCSITaskManagementReply_t *mpi_reply = 4435 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4436 4437 if (ioc->shost_recovery || ioc->pci_error_recovery) { 4438 dewtprintk(ioc, 4439 ioc_info(ioc, "%s: host reset in progress!\n", 4440 __func__)); 4441 return 1; 4442 } 4443 if (unlikely(!mpi_reply)) { 4444 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4445 __FILE__, __LINE__, __func__); 4446 return 1; 4447 } 4448 4449 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4450 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4451 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4452 dewtprintk(ioc, 4453 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4454 handle, le16_to_cpu(mpi_reply->DevHandle), 4455 smid)); 4456 return 0; 4457 } 4458 4459 dewtprintk(ioc, 4460 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4461 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4462 le32_to_cpu(mpi_reply->IOCLogInfo), 4463 le32_to_cpu(mpi_reply->TerminationCount))); 4464 4465 return _scsih_check_for_pending_tm(ioc, smid); 4466 } 4467 4468 /** 4469 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 4470 * @ioc: per adapter object 4471 * @smid: system request message index 4472 * @event: Event ID 4473 * @event_context: used to track events uniquely 4474 * 4475 * Context - processed in interrupt context. 4476 */ 4477 static void 4478 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 4479 U32 event_context) 4480 { 4481 Mpi2EventAckRequest_t *ack_request; 4482 int i = smid - ioc->internal_smid; 4483 unsigned long flags; 4484 4485 /* Without releasing the smid just update the 4486 * call back index and reuse the same smid for 4487 * processing this delayed request 4488 */ 4489 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4490 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 4491 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4492 4493 dewtprintk(ioc, 4494 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 4495 le16_to_cpu(event), smid, ioc->base_cb_idx)); 4496 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 4497 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 4498 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 4499 ack_request->Event = event; 4500 ack_request->EventContext = event_context; 4501 ack_request->VF_ID = 0; /* TODO */ 4502 ack_request->VP_ID = 0; 4503 ioc->put_smid_default(ioc, smid); 4504 } 4505 4506 /** 4507 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 4508 * sas_io_unit_ctrl messages 4509 * @ioc: per adapter object 4510 * @smid: system request message index 4511 * @handle: device handle 4512 * 4513 * Context - processed in interrupt context. 4514 */ 4515 static void 4516 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 4517 u16 smid, u16 handle) 4518 { 4519 Mpi2SasIoUnitControlRequest_t *mpi_request; 4520 u32 ioc_state; 4521 int i = smid - ioc->internal_smid; 4522 unsigned long flags; 4523 4524 if (ioc->remove_host) { 4525 dewtprintk(ioc, 4526 ioc_info(ioc, "%s: host has been removed\n", 4527 __func__)); 4528 return; 4529 } else if (ioc->pci_error_recovery) { 4530 dewtprintk(ioc, 4531 ioc_info(ioc, "%s: host in pci error recovery\n", 4532 __func__)); 4533 return; 4534 } 4535 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4536 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4537 dewtprintk(ioc, 4538 ioc_info(ioc, "%s: host is not operational\n", 4539 __func__)); 4540 return; 4541 } 4542 4543 /* Without releasing the smid just update the 4544 * call back index and reuse the same smid for 4545 * processing this delayed request 4546 */ 4547 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4548 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 4549 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4550 4551 dewtprintk(ioc, 4552 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4553 handle, smid, ioc->tm_sas_control_cb_idx)); 4554 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4555 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4556 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4557 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4558 mpi_request->DevHandle = cpu_to_le16(handle); 4559 ioc->put_smid_default(ioc, smid); 4560 } 4561 4562 /** 4563 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages 4564 * @ioc: per adapter object 4565 * @smid: system request message index 4566 * 4567 * Context: Executed in interrupt context 4568 * 4569 * This will check delayed internal messages list, and process the 4570 * next request. 4571 * 4572 * Return: 1 meaning mf should be freed from _base_interrupt 4573 * 0 means the mf is freed from this function. 4574 */ 4575 u8 4576 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4577 { 4578 struct _sc_list *delayed_sc; 4579 struct _event_ack_list *delayed_event_ack; 4580 4581 if (!list_empty(&ioc->delayed_event_ack_list)) { 4582 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4583 struct _event_ack_list, list); 4584 _scsih_issue_delayed_event_ack(ioc, smid, 4585 delayed_event_ack->Event, delayed_event_ack->EventContext); 4586 list_del(&delayed_event_ack->list); 4587 kfree(delayed_event_ack); 4588 return 0; 4589 } 4590 4591 if (!list_empty(&ioc->delayed_sc_list)) { 4592 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4593 struct _sc_list, list); 4594 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4595 delayed_sc->handle); 4596 list_del(&delayed_sc->list); 4597 kfree(delayed_sc); 4598 return 0; 4599 } 4600 return 1; 4601 } 4602 4603 /** 4604 * _scsih_check_for_pending_tm - check for pending task management 4605 * @ioc: per adapter object 4606 * @smid: system request message index 4607 * 4608 * This will check delayed target reset list, and feed the 4609 * next reqeust. 4610 * 4611 * Return: 1 meaning mf should be freed from _base_interrupt 4612 * 0 means the mf is freed from this function. 4613 */ 4614 static u8 4615 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4616 { 4617 struct _tr_list *delayed_tr; 4618 4619 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4620 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4621 struct _tr_list, list); 4622 mpt3sas_base_free_smid(ioc, smid); 4623 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4624 list_del(&delayed_tr->list); 4625 kfree(delayed_tr); 4626 return 0; 4627 } 4628 4629 if (!list_empty(&ioc->delayed_tr_list)) { 4630 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4631 struct _tr_list, list); 4632 mpt3sas_base_free_smid(ioc, smid); 4633 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4634 list_del(&delayed_tr->list); 4635 kfree(delayed_tr); 4636 return 0; 4637 } 4638 4639 return 1; 4640 } 4641 4642 /** 4643 * _scsih_check_topo_delete_events - sanity check on topo events 4644 * @ioc: per adapter object 4645 * @event_data: the event data payload 4646 * 4647 * This routine added to better handle cable breaker. 4648 * 4649 * This handles the case where driver receives multiple expander 4650 * add and delete events in a single shot. When there is a delete event 4651 * the routine will void any pending add events waiting in the event queue. 4652 */ 4653 static void 4654 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4655 Mpi2EventDataSasTopologyChangeList_t *event_data) 4656 { 4657 struct fw_event_work *fw_event; 4658 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4659 u16 expander_handle; 4660 struct _sas_node *sas_expander; 4661 unsigned long flags; 4662 int i, reason_code; 4663 u16 handle; 4664 4665 for (i = 0 ; i < event_data->NumEntries; i++) { 4666 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4667 if (!handle) 4668 continue; 4669 reason_code = event_data->PHY[i].PhyStatus & 4670 MPI2_EVENT_SAS_TOPO_RC_MASK; 4671 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4672 _scsih_tm_tr_send(ioc, handle); 4673 } 4674 4675 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4676 if (expander_handle < ioc->sas_hba.num_phys) { 4677 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4678 return; 4679 } 4680 if (event_data->ExpStatus == 4681 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4682 /* put expander attached devices into blocking state */ 4683 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4684 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4685 expander_handle); 4686 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4687 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4688 do { 4689 handle = find_first_bit(ioc->blocking_handles, 4690 ioc->facts.MaxDevHandle); 4691 if (handle < ioc->facts.MaxDevHandle) 4692 _scsih_block_io_device(ioc, handle); 4693 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4694 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4695 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4696 4697 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4698 return; 4699 4700 /* mark ignore flag for pending events */ 4701 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4702 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4703 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4704 fw_event->ignore) 4705 continue; 4706 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4707 fw_event->event_data; 4708 if (local_event_data->ExpStatus == 4709 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4710 local_event_data->ExpStatus == 4711 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4712 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4713 expander_handle) { 4714 dewtprintk(ioc, 4715 ioc_info(ioc, "setting ignoring flag\n")); 4716 fw_event->ignore = 1; 4717 } 4718 } 4719 } 4720 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4721 } 4722 4723 /** 4724 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4725 * events 4726 * @ioc: per adapter object 4727 * @event_data: the event data payload 4728 * 4729 * This handles the case where driver receives multiple switch 4730 * or device add and delete events in a single shot. When there 4731 * is a delete event the routine will void any pending add 4732 * events waiting in the event queue. 4733 */ 4734 static void 4735 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4736 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4737 { 4738 struct fw_event_work *fw_event; 4739 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4740 unsigned long flags; 4741 int i, reason_code; 4742 u16 handle, switch_handle; 4743 4744 for (i = 0; i < event_data->NumEntries; i++) { 4745 handle = 4746 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4747 if (!handle) 4748 continue; 4749 reason_code = event_data->PortEntry[i].PortStatus; 4750 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4751 _scsih_tm_tr_send(ioc, handle); 4752 } 4753 4754 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4755 if (!switch_handle) { 4756 _scsih_block_io_to_pcie_children_attached_directly( 4757 ioc, event_data); 4758 return; 4759 } 4760 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4761 if ((event_data->SwitchStatus 4762 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4763 (event_data->SwitchStatus == 4764 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4765 _scsih_block_io_to_pcie_children_attached_directly( 4766 ioc, event_data); 4767 4768 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4769 return; 4770 4771 /* mark ignore flag for pending events */ 4772 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4773 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4774 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4775 fw_event->ignore) 4776 continue; 4777 local_event_data = 4778 (Mpi26EventDataPCIeTopologyChangeList_t *) 4779 fw_event->event_data; 4780 if (local_event_data->SwitchStatus == 4781 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4782 local_event_data->SwitchStatus == 4783 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4784 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4785 switch_handle) { 4786 dewtprintk(ioc, 4787 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4788 fw_event->ignore = 1; 4789 } 4790 } 4791 } 4792 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4793 } 4794 4795 /** 4796 * _scsih_set_volume_delete_flag - setting volume delete flag 4797 * @ioc: per adapter object 4798 * @handle: device handle 4799 * 4800 * This returns nothing. 4801 */ 4802 static void 4803 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4804 { 4805 struct _raid_device *raid_device; 4806 struct MPT3SAS_TARGET *sas_target_priv_data; 4807 unsigned long flags; 4808 4809 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4810 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4811 if (raid_device && raid_device->starget && 4812 raid_device->starget->hostdata) { 4813 sas_target_priv_data = 4814 raid_device->starget->hostdata; 4815 sas_target_priv_data->deleted = 1; 4816 dewtprintk(ioc, 4817 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4818 handle, (u64)raid_device->wwid)); 4819 } 4820 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4821 } 4822 4823 /** 4824 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4825 * @handle: input handle 4826 * @a: handle for volume a 4827 * @b: handle for volume b 4828 * 4829 * IR firmware only supports two raid volumes. The purpose of this 4830 * routine is to set the volume handle in either a or b. When the given 4831 * input handle is non-zero, or when a and b have not been set before. 4832 */ 4833 static void 4834 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4835 { 4836 if (!handle || handle == *a || handle == *b) 4837 return; 4838 if (!*a) 4839 *a = handle; 4840 else if (!*b) 4841 *b = handle; 4842 } 4843 4844 /** 4845 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4846 * @ioc: per adapter object 4847 * @event_data: the event data payload 4848 * Context: interrupt time. 4849 * 4850 * This routine will send target reset to volume, followed by target 4851 * resets to the PDs. This is called when a PD has been removed, or 4852 * volume has been deleted or removed. When the target reset is sent 4853 * to volume, the PD target resets need to be queued to start upon 4854 * completion of the volume target reset. 4855 */ 4856 static void 4857 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4858 Mpi2EventDataIrConfigChangeList_t *event_data) 4859 { 4860 Mpi2EventIrConfigElement_t *element; 4861 int i; 4862 u16 handle, volume_handle, a, b; 4863 struct _tr_list *delayed_tr; 4864 4865 a = 0; 4866 b = 0; 4867 4868 if (ioc->is_warpdrive) 4869 return; 4870 4871 /* Volume Resets for Deleted or Removed */ 4872 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4873 for (i = 0; i < event_data->NumElements; i++, element++) { 4874 if (le32_to_cpu(event_data->Flags) & 4875 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4876 continue; 4877 if (element->ReasonCode == 4878 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4879 element->ReasonCode == 4880 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4881 volume_handle = le16_to_cpu(element->VolDevHandle); 4882 _scsih_set_volume_delete_flag(ioc, volume_handle); 4883 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4884 } 4885 } 4886 4887 /* Volume Resets for UNHIDE events */ 4888 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4889 for (i = 0; i < event_data->NumElements; i++, element++) { 4890 if (le32_to_cpu(event_data->Flags) & 4891 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4892 continue; 4893 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4894 volume_handle = le16_to_cpu(element->VolDevHandle); 4895 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4896 } 4897 } 4898 4899 if (a) 4900 _scsih_tm_tr_volume_send(ioc, a); 4901 if (b) 4902 _scsih_tm_tr_volume_send(ioc, b); 4903 4904 /* PD target resets */ 4905 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4906 for (i = 0; i < event_data->NumElements; i++, element++) { 4907 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4908 continue; 4909 handle = le16_to_cpu(element->PhysDiskDevHandle); 4910 volume_handle = le16_to_cpu(element->VolDevHandle); 4911 clear_bit(handle, ioc->pd_handles); 4912 if (!volume_handle) 4913 _scsih_tm_tr_send(ioc, handle); 4914 else if (volume_handle == a || volume_handle == b) { 4915 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4916 BUG_ON(!delayed_tr); 4917 INIT_LIST_HEAD(&delayed_tr->list); 4918 delayed_tr->handle = handle; 4919 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4920 dewtprintk(ioc, 4921 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4922 handle)); 4923 } else 4924 _scsih_tm_tr_send(ioc, handle); 4925 } 4926 } 4927 4928 4929 /** 4930 * _scsih_check_volume_delete_events - set delete flag for volumes 4931 * @ioc: per adapter object 4932 * @event_data: the event data payload 4933 * Context: interrupt time. 4934 * 4935 * This will handle the case when the cable connected to entire volume is 4936 * pulled. We will take care of setting the deleted flag so normal IO will 4937 * not be sent. 4938 */ 4939 static void 4940 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4941 Mpi2EventDataIrVolume_t *event_data) 4942 { 4943 u32 state; 4944 4945 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4946 return; 4947 state = le32_to_cpu(event_data->NewValue); 4948 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4949 MPI2_RAID_VOL_STATE_FAILED) 4950 _scsih_set_volume_delete_flag(ioc, 4951 le16_to_cpu(event_data->VolDevHandle)); 4952 } 4953 4954 /** 4955 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4956 * @ioc: per adapter object 4957 * @event_data: the temp threshold event data 4958 * Context: interrupt time. 4959 */ 4960 static void 4961 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4962 Mpi2EventDataTemperature_t *event_data) 4963 { 4964 u32 doorbell; 4965 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4966 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4967 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4968 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4969 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4970 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4971 event_data->SensorNum); 4972 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4973 event_data->CurrentTemperature); 4974 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4975 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 4976 if ((doorbell & MPI2_IOC_STATE_MASK) == 4977 MPI2_IOC_STATE_FAULT) { 4978 mpt3sas_print_fault_code(ioc, 4979 doorbell & MPI2_DOORBELL_DATA_MASK); 4980 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 4981 MPI2_IOC_STATE_COREDUMP) { 4982 mpt3sas_print_coredump_info(ioc, 4983 doorbell & MPI2_DOORBELL_DATA_MASK); 4984 } 4985 } 4986 } 4987 } 4988 4989 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4990 { 4991 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4992 4993 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4994 return 0; 4995 4996 if (pending) 4997 return test_and_set_bit(0, &priv->ata_command_pending); 4998 4999 clear_bit(0, &priv->ata_command_pending); 5000 return 0; 5001 } 5002 5003 /** 5004 * _scsih_flush_running_cmds - completing outstanding commands. 5005 * @ioc: per adapter object 5006 * 5007 * The flushing out of all pending scmd commands following host reset, 5008 * where all IO is dropped to the floor. 5009 */ 5010 static void 5011 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 5012 { 5013 struct scsi_cmnd *scmd; 5014 struct scsiio_tracker *st; 5015 u16 smid; 5016 int count = 0; 5017 5018 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 5019 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5020 if (!scmd) 5021 continue; 5022 count++; 5023 _scsih_set_satl_pending(scmd, false); 5024 st = scsi_cmd_priv(scmd); 5025 mpt3sas_base_clear_st(ioc, st); 5026 scsi_dma_unmap(scmd); 5027 if (ioc->pci_error_recovery || ioc->remove_host) 5028 scmd->result = DID_NO_CONNECT << 16; 5029 else 5030 scmd->result = DID_RESET << 16; 5031 scsi_done(scmd); 5032 } 5033 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 5034 } 5035 5036 /** 5037 * _scsih_setup_eedp - setup MPI request for EEDP transfer 5038 * @ioc: per adapter object 5039 * @scmd: pointer to scsi command object 5040 * @mpi_request: pointer to the SCSI_IO request message frame 5041 * 5042 * Supporting protection 1 and 3. 5043 */ 5044 static void 5045 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5046 Mpi25SCSIIORequest_t *mpi_request) 5047 { 5048 u16 eedp_flags; 5049 Mpi25SCSIIORequest_t *mpi_request_3v = 5050 (Mpi25SCSIIORequest_t *)mpi_request; 5051 5052 switch (scsi_get_prot_op(scmd)) { 5053 case SCSI_PROT_READ_STRIP: 5054 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 5055 break; 5056 case SCSI_PROT_WRITE_INSERT: 5057 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 5058 break; 5059 default: 5060 return; 5061 } 5062 5063 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 5064 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5065 5066 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) 5067 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; 5068 5069 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { 5070 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; 5071 5072 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5073 cpu_to_be32(scsi_prot_ref_tag(scmd)); 5074 } 5075 5076 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); 5077 5078 if (ioc->is_gen35_ioc) 5079 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 5080 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 5081 } 5082 5083 /** 5084 * _scsih_eedp_error_handling - return sense code for EEDP errors 5085 * @scmd: pointer to scsi command object 5086 * @ioc_status: ioc status 5087 */ 5088 static void 5089 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 5090 { 5091 u8 ascq; 5092 5093 switch (ioc_status) { 5094 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5095 ascq = 0x01; 5096 break; 5097 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5098 ascq = 0x02; 5099 break; 5100 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5101 ascq = 0x03; 5102 break; 5103 default: 5104 ascq = 0x00; 5105 break; 5106 } 5107 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq); 5108 set_host_byte(scmd, DID_ABORT); 5109 } 5110 5111 /** 5112 * scsih_qcmd - main scsi request entry point 5113 * @shost: SCSI host pointer 5114 * @scmd: pointer to scsi command object 5115 * 5116 * The callback index is set inside `ioc->scsi_io_cb_idx`. 5117 * 5118 * Return: 0 on success. If there's a failure, return either: 5119 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5120 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5121 */ 5122 static int 5123 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5124 { 5125 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5126 struct MPT3SAS_DEVICE *sas_device_priv_data; 5127 struct MPT3SAS_TARGET *sas_target_priv_data; 5128 struct _raid_device *raid_device; 5129 struct request *rq = scsi_cmd_to_rq(scmd); 5130 int class; 5131 Mpi25SCSIIORequest_t *mpi_request; 5132 struct _pcie_device *pcie_device = NULL; 5133 u32 mpi_control; 5134 u16 smid; 5135 u16 handle; 5136 5137 if (ioc->logging_level & MPT_DEBUG_SCSI) 5138 scsi_print_command(scmd); 5139 5140 sas_device_priv_data = scmd->device->hostdata; 5141 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5142 scmd->result = DID_NO_CONNECT << 16; 5143 scsi_done(scmd); 5144 return 0; 5145 } 5146 5147 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5148 scmd->result = DID_NO_CONNECT << 16; 5149 scsi_done(scmd); 5150 return 0; 5151 } 5152 5153 sas_target_priv_data = sas_device_priv_data->sas_target; 5154 5155 /* invalid device handle */ 5156 handle = sas_target_priv_data->handle; 5157 5158 /* 5159 * Avoid error handling escallation when device is disconnected 5160 */ 5161 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) { 5162 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5163 scmd->cmnd[0] == TEST_UNIT_READY) { 5164 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5165 scsi_done(scmd); 5166 return 0; 5167 } 5168 } 5169 5170 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5171 scmd->result = DID_NO_CONNECT << 16; 5172 scsi_done(scmd); 5173 return 0; 5174 } 5175 5176 5177 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 5178 /* host recovery or link resets sent via IOCTLs */ 5179 return SCSI_MLQUEUE_HOST_BUSY; 5180 } else if (sas_target_priv_data->deleted) { 5181 /* device has been deleted */ 5182 scmd->result = DID_NO_CONNECT << 16; 5183 scsi_done(scmd); 5184 return 0; 5185 } else if (sas_target_priv_data->tm_busy || 5186 sas_device_priv_data->block) { 5187 /* device busy with task management */ 5188 return SCSI_MLQUEUE_DEVICE_BUSY; 5189 } 5190 5191 /* 5192 * Bug work around for firmware SATL handling. The loop 5193 * is based on atomic operations and ensures consistency 5194 * since we're lockless at this point 5195 */ 5196 do { 5197 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) 5198 return SCSI_MLQUEUE_DEVICE_BUSY; 5199 } while (_scsih_set_satl_pending(scmd, true)); 5200 5201 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5202 mpi_control = MPI2_SCSIIO_CONTROL_READ; 5203 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5204 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 5205 else 5206 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 5207 5208 /* set tags */ 5209 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 5210 /* NCQ Prio supported, make sure control indicated high priority */ 5211 if (sas_device_priv_data->ncq_prio_enable) { 5212 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5213 if (class == IOPRIO_CLASS_RT) 5214 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 5215 } 5216 /* Make sure Device is not raid volume. 5217 * We do not expose raid functionality to upper layer for warpdrive. 5218 */ 5219 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 5220 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 5221 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 5222 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 5223 5224 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 5225 if (!smid) { 5226 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 5227 _scsih_set_satl_pending(scmd, false); 5228 goto out; 5229 } 5230 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5231 memset(mpi_request, 0, ioc->request_sz); 5232 _scsih_setup_eedp(ioc, scmd, mpi_request); 5233 5234 if (scmd->cmd_len == 32) 5235 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 5236 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5237 if (sas_device_priv_data->sas_target->flags & 5238 MPT_TARGET_FLAGS_RAID_COMPONENT) 5239 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 5240 else 5241 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5242 mpi_request->DevHandle = cpu_to_le16(handle); 5243 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 5244 mpi_request->Control = cpu_to_le32(mpi_control); 5245 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 5246 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 5247 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 5248 mpi_request->SenseBufferLowAddress = 5249 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 5250 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 5251 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 5252 mpi_request->LUN); 5253 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5254 5255 if (mpi_request->DataLength) { 5256 pcie_device = sas_target_priv_data->pcie_dev; 5257 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 5258 mpt3sas_base_free_smid(ioc, smid); 5259 _scsih_set_satl_pending(scmd, false); 5260 goto out; 5261 } 5262 } else 5263 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 5264 5265 raid_device = sas_target_priv_data->raid_device; 5266 if (raid_device && raid_device->direct_io_enabled) 5267 mpt3sas_setup_direct_io(ioc, scmd, 5268 raid_device, mpi_request); 5269 5270 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 5271 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 5272 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 5273 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 5274 ioc->put_smid_fast_path(ioc, smid, handle); 5275 } else 5276 ioc->put_smid_scsi_io(ioc, smid, 5277 le16_to_cpu(mpi_request->DevHandle)); 5278 } else 5279 ioc->put_smid_default(ioc, smid); 5280 return 0; 5281 5282 out: 5283 return SCSI_MLQUEUE_HOST_BUSY; 5284 } 5285 5286 /** 5287 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 5288 * @sense_buffer: sense data returned by target 5289 * @data: normalized skey/asc/ascq 5290 */ 5291 static void 5292 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 5293 { 5294 if ((sense_buffer[0] & 0x7F) >= 0x72) { 5295 /* descriptor format */ 5296 data->skey = sense_buffer[1] & 0x0F; 5297 data->asc = sense_buffer[2]; 5298 data->ascq = sense_buffer[3]; 5299 } else { 5300 /* fixed format */ 5301 data->skey = sense_buffer[2] & 0x0F; 5302 data->asc = sense_buffer[12]; 5303 data->ascq = sense_buffer[13]; 5304 } 5305 } 5306 5307 /** 5308 * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request 5309 * @ioc: per adapter object 5310 * @scmd: pointer to scsi command object 5311 * @mpi_reply: reply mf payload returned from firmware 5312 * @smid: ? 5313 * 5314 * scsi_status - SCSI Status code returned from target device 5315 * scsi_state - state info associated with SCSI_IO determined by ioc 5316 * ioc_status - ioc supplied status info 5317 */ 5318 static void 5319 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5320 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 5321 { 5322 u32 response_info; 5323 u8 *response_bytes; 5324 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 5325 MPI2_IOCSTATUS_MASK; 5326 u8 scsi_state = mpi_reply->SCSIState; 5327 u8 scsi_status = mpi_reply->SCSIStatus; 5328 char *desc_ioc_state = NULL; 5329 char *desc_scsi_status = NULL; 5330 char *desc_scsi_state = ioc->tmp_string; 5331 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5332 struct _sas_device *sas_device = NULL; 5333 struct _pcie_device *pcie_device = NULL; 5334 struct scsi_target *starget = scmd->device->sdev_target; 5335 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 5336 char *device_str = NULL; 5337 5338 if (!priv_target) 5339 return; 5340 if (ioc->hide_ir_msg) 5341 device_str = "WarpDrive"; 5342 else 5343 device_str = "volume"; 5344 5345 if (log_info == 0x31170000) 5346 return; 5347 5348 switch (ioc_status) { 5349 case MPI2_IOCSTATUS_SUCCESS: 5350 desc_ioc_state = "success"; 5351 break; 5352 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5353 desc_ioc_state = "invalid function"; 5354 break; 5355 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5356 desc_ioc_state = "scsi recovered error"; 5357 break; 5358 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 5359 desc_ioc_state = "scsi invalid dev handle"; 5360 break; 5361 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5362 desc_ioc_state = "scsi device not there"; 5363 break; 5364 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5365 desc_ioc_state = "scsi data overrun"; 5366 break; 5367 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5368 desc_ioc_state = "scsi data underrun"; 5369 break; 5370 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5371 desc_ioc_state = "scsi io data error"; 5372 break; 5373 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5374 desc_ioc_state = "scsi protocol error"; 5375 break; 5376 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5377 desc_ioc_state = "scsi task terminated"; 5378 break; 5379 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5380 desc_ioc_state = "scsi residual mismatch"; 5381 break; 5382 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5383 desc_ioc_state = "scsi task mgmt failed"; 5384 break; 5385 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5386 desc_ioc_state = "scsi ioc terminated"; 5387 break; 5388 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5389 desc_ioc_state = "scsi ext terminated"; 5390 break; 5391 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5392 desc_ioc_state = "eedp guard error"; 5393 break; 5394 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5395 desc_ioc_state = "eedp ref tag error"; 5396 break; 5397 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5398 desc_ioc_state = "eedp app tag error"; 5399 break; 5400 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5401 desc_ioc_state = "insufficient power"; 5402 break; 5403 default: 5404 desc_ioc_state = "unknown"; 5405 break; 5406 } 5407 5408 switch (scsi_status) { 5409 case MPI2_SCSI_STATUS_GOOD: 5410 desc_scsi_status = "good"; 5411 break; 5412 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5413 desc_scsi_status = "check condition"; 5414 break; 5415 case MPI2_SCSI_STATUS_CONDITION_MET: 5416 desc_scsi_status = "condition met"; 5417 break; 5418 case MPI2_SCSI_STATUS_BUSY: 5419 desc_scsi_status = "busy"; 5420 break; 5421 case MPI2_SCSI_STATUS_INTERMEDIATE: 5422 desc_scsi_status = "intermediate"; 5423 break; 5424 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 5425 desc_scsi_status = "intermediate condmet"; 5426 break; 5427 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5428 desc_scsi_status = "reservation conflict"; 5429 break; 5430 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 5431 desc_scsi_status = "command terminated"; 5432 break; 5433 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5434 desc_scsi_status = "task set full"; 5435 break; 5436 case MPI2_SCSI_STATUS_ACA_ACTIVE: 5437 desc_scsi_status = "aca active"; 5438 break; 5439 case MPI2_SCSI_STATUS_TASK_ABORTED: 5440 desc_scsi_status = "task aborted"; 5441 break; 5442 default: 5443 desc_scsi_status = "unknown"; 5444 break; 5445 } 5446 5447 desc_scsi_state[0] = '\0'; 5448 if (!scsi_state) 5449 desc_scsi_state = " "; 5450 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5451 strcat(desc_scsi_state, "response info "); 5452 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5453 strcat(desc_scsi_state, "state terminated "); 5454 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 5455 strcat(desc_scsi_state, "no status "); 5456 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 5457 strcat(desc_scsi_state, "autosense failed "); 5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 5459 strcat(desc_scsi_state, "autosense valid "); 5460 5461 scsi_print_command(scmd); 5462 5463 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5464 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 5465 device_str, (u64)priv_target->sas_address); 5466 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 5467 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 5468 if (pcie_device) { 5469 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 5470 (u64)pcie_device->wwid, pcie_device->port_num); 5471 if (pcie_device->enclosure_handle != 0) 5472 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 5473 (u64)pcie_device->enclosure_logical_id, 5474 pcie_device->slot); 5475 if (pcie_device->connector_name[0]) 5476 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 5477 pcie_device->enclosure_level, 5478 pcie_device->connector_name); 5479 pcie_device_put(pcie_device); 5480 } 5481 } else { 5482 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5483 if (sas_device) { 5484 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 5485 (u64)sas_device->sas_address, sas_device->phy); 5486 5487 _scsih_display_enclosure_chassis_info(ioc, sas_device, 5488 NULL, NULL); 5489 5490 sas_device_put(sas_device); 5491 } 5492 } 5493 5494 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 5495 le16_to_cpu(mpi_reply->DevHandle), 5496 desc_ioc_state, ioc_status, smid); 5497 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 5498 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 5499 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 5500 le16_to_cpu(mpi_reply->TaskTag), 5501 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 5502 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 5503 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 5504 5505 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5506 struct sense_info data; 5507 _scsih_normalize_sense(scmd->sense_buffer, &data); 5508 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5509 data.skey, data.asc, data.ascq, 5510 le32_to_cpu(mpi_reply->SenseCount)); 5511 } 5512 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5513 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5514 response_bytes = (u8 *)&response_info; 5515 _scsih_response_code(ioc, response_bytes[0]); 5516 } 5517 } 5518 5519 /** 5520 * _scsih_turn_on_pfa_led - illuminate PFA LED 5521 * @ioc: per adapter object 5522 * @handle: device handle 5523 * Context: process 5524 */ 5525 static void 5526 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5527 { 5528 Mpi2SepReply_t mpi_reply; 5529 Mpi2SepRequest_t mpi_request; 5530 struct _sas_device *sas_device; 5531 5532 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 5533 if (!sas_device) 5534 return; 5535 5536 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5537 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5538 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5539 mpi_request.SlotStatus = 5540 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 5541 mpi_request.DevHandle = cpu_to_le16(handle); 5542 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 5543 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5544 &mpi_request)) != 0) { 5545 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5546 __FILE__, __LINE__, __func__); 5547 goto out; 5548 } 5549 sas_device->pfa_led_on = 1; 5550 5551 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5552 dewtprintk(ioc, 5553 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5554 le16_to_cpu(mpi_reply.IOCStatus), 5555 le32_to_cpu(mpi_reply.IOCLogInfo))); 5556 goto out; 5557 } 5558 out: 5559 sas_device_put(sas_device); 5560 } 5561 5562 /** 5563 * _scsih_turn_off_pfa_led - turn off Fault LED 5564 * @ioc: per adapter object 5565 * @sas_device: sas device whose PFA LED has to turned off 5566 * Context: process 5567 */ 5568 static void 5569 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 5570 struct _sas_device *sas_device) 5571 { 5572 Mpi2SepReply_t mpi_reply; 5573 Mpi2SepRequest_t mpi_request; 5574 5575 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5576 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5577 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5578 mpi_request.SlotStatus = 0; 5579 mpi_request.Slot = cpu_to_le16(sas_device->slot); 5580 mpi_request.DevHandle = 0; 5581 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 5582 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5583 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5584 &mpi_request)) != 0) { 5585 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5586 __FILE__, __LINE__, __func__); 5587 return; 5588 } 5589 5590 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5591 dewtprintk(ioc, 5592 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5593 le16_to_cpu(mpi_reply.IOCStatus), 5594 le32_to_cpu(mpi_reply.IOCLogInfo))); 5595 return; 5596 } 5597 } 5598 5599 /** 5600 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5601 * @ioc: per adapter object 5602 * @handle: device handle 5603 * Context: interrupt. 5604 */ 5605 static void 5606 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5607 { 5608 struct fw_event_work *fw_event; 5609 5610 fw_event = alloc_fw_event_work(0); 5611 if (!fw_event) 5612 return; 5613 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5614 fw_event->device_handle = handle; 5615 fw_event->ioc = ioc; 5616 _scsih_fw_event_add(ioc, fw_event); 5617 fw_event_work_put(fw_event); 5618 } 5619 5620 /** 5621 * _scsih_smart_predicted_fault - process smart errors 5622 * @ioc: per adapter object 5623 * @handle: device handle 5624 * Context: interrupt. 5625 */ 5626 static void 5627 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5628 { 5629 struct scsi_target *starget; 5630 struct MPT3SAS_TARGET *sas_target_priv_data; 5631 Mpi2EventNotificationReply_t *event_reply; 5632 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5633 struct _sas_device *sas_device; 5634 ssize_t sz; 5635 unsigned long flags; 5636 5637 /* only handle non-raid devices */ 5638 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5639 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5640 if (!sas_device) 5641 goto out_unlock; 5642 5643 starget = sas_device->starget; 5644 sas_target_priv_data = starget->hostdata; 5645 5646 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5647 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5648 goto out_unlock; 5649 5650 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5651 5652 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5653 5654 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5655 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5656 5657 /* insert into event log */ 5658 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5659 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5660 event_reply = kzalloc(sz, GFP_ATOMIC); 5661 if (!event_reply) { 5662 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5663 __FILE__, __LINE__, __func__); 5664 goto out; 5665 } 5666 5667 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5668 event_reply->Event = 5669 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5670 event_reply->MsgLength = sz/4; 5671 event_reply->EventDataLength = 5672 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5673 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5674 event_reply->EventData; 5675 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5676 event_data->ASC = 0x5D; 5677 event_data->DevHandle = cpu_to_le16(handle); 5678 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5679 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5680 kfree(event_reply); 5681 out: 5682 if (sas_device) 5683 sas_device_put(sas_device); 5684 return; 5685 5686 out_unlock: 5687 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5688 goto out; 5689 } 5690 5691 /** 5692 * _scsih_io_done - scsi request callback 5693 * @ioc: per adapter object 5694 * @smid: system request message index 5695 * @msix_index: MSIX table index supplied by the OS 5696 * @reply: reply message frame(lower 32bit addr) 5697 * 5698 * Callback handler when using _scsih_qcmd. 5699 * 5700 * Return: 1 meaning mf should be freed from _base_interrupt 5701 * 0 means the mf is freed from this function. 5702 */ 5703 static u8 5704 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5705 { 5706 Mpi25SCSIIORequest_t *mpi_request; 5707 Mpi2SCSIIOReply_t *mpi_reply; 5708 struct scsi_cmnd *scmd; 5709 struct scsiio_tracker *st; 5710 u16 ioc_status; 5711 u32 xfer_cnt; 5712 u8 scsi_state; 5713 u8 scsi_status; 5714 u32 log_info; 5715 struct MPT3SAS_DEVICE *sas_device_priv_data; 5716 u32 response_code = 0; 5717 5718 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5719 5720 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5721 if (scmd == NULL) 5722 return 1; 5723 5724 _scsih_set_satl_pending(scmd, false); 5725 5726 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5727 5728 if (mpi_reply == NULL) { 5729 scmd->result = DID_OK << 16; 5730 goto out; 5731 } 5732 5733 sas_device_priv_data = scmd->device->hostdata; 5734 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5735 sas_device_priv_data->sas_target->deleted) { 5736 scmd->result = DID_NO_CONNECT << 16; 5737 goto out; 5738 } 5739 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5740 5741 /* 5742 * WARPDRIVE: If direct_io is set then it is directIO, 5743 * the failed direct I/O should be redirected to volume 5744 */ 5745 st = scsi_cmd_priv(scmd); 5746 if (st->direct_io && 5747 ((ioc_status & MPI2_IOCSTATUS_MASK) 5748 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5749 st->direct_io = 0; 5750 st->scmd = scmd; 5751 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5752 mpi_request->DevHandle = 5753 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5754 ioc->put_smid_scsi_io(ioc, smid, 5755 sas_device_priv_data->sas_target->handle); 5756 return 0; 5757 } 5758 /* turning off TLR */ 5759 scsi_state = mpi_reply->SCSIState; 5760 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5761 response_code = 5762 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5763 if (!sas_device_priv_data->tlr_snoop_check) { 5764 sas_device_priv_data->tlr_snoop_check++; 5765 if ((!ioc->is_warpdrive && 5766 !scsih_is_raid(&scmd->device->sdev_gendev) && 5767 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5768 && sas_is_tlr_enabled(scmd->device) && 5769 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5770 sas_disable_tlr(scmd->device); 5771 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5772 } 5773 } 5774 5775 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5776 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5777 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5778 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5779 else 5780 log_info = 0; 5781 ioc_status &= MPI2_IOCSTATUS_MASK; 5782 scsi_status = mpi_reply->SCSIStatus; 5783 5784 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5785 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5786 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5787 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5788 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5789 } 5790 5791 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5792 struct sense_info data; 5793 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5794 smid); 5795 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5796 le32_to_cpu(mpi_reply->SenseCount)); 5797 memcpy(scmd->sense_buffer, sense_data, sz); 5798 _scsih_normalize_sense(scmd->sense_buffer, &data); 5799 /* failure prediction threshold exceeded */ 5800 if (data.asc == 0x5D) 5801 _scsih_smart_predicted_fault(ioc, 5802 le16_to_cpu(mpi_reply->DevHandle)); 5803 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5804 5805 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5806 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5807 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5808 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5809 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5810 } 5811 switch (ioc_status) { 5812 case MPI2_IOCSTATUS_BUSY: 5813 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5814 scmd->result = SAM_STAT_BUSY; 5815 break; 5816 5817 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5818 scmd->result = DID_NO_CONNECT << 16; 5819 break; 5820 5821 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5822 if (sas_device_priv_data->block) { 5823 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5824 goto out; 5825 } 5826 if (log_info == 0x31110630) { 5827 if (scmd->retries > 2) { 5828 scmd->result = DID_NO_CONNECT << 16; 5829 scsi_device_set_state(scmd->device, 5830 SDEV_OFFLINE); 5831 } else { 5832 scmd->result = DID_SOFT_ERROR << 16; 5833 scmd->device->expecting_cc_ua = 1; 5834 } 5835 break; 5836 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5837 scmd->result = DID_RESET << 16; 5838 break; 5839 } else if ((scmd->device->channel == RAID_CHANNEL) && 5840 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5841 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5842 scmd->result = DID_RESET << 16; 5843 break; 5844 } 5845 scmd->result = DID_SOFT_ERROR << 16; 5846 break; 5847 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5848 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5849 scmd->result = DID_RESET << 16; 5850 break; 5851 5852 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5853 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5854 scmd->result = DID_SOFT_ERROR << 16; 5855 else 5856 scmd->result = (DID_OK << 16) | scsi_status; 5857 break; 5858 5859 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5860 scmd->result = (DID_OK << 16) | scsi_status; 5861 5862 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5863 break; 5864 5865 if (xfer_cnt < scmd->underflow) { 5866 if (scsi_status == SAM_STAT_BUSY) 5867 scmd->result = SAM_STAT_BUSY; 5868 else 5869 scmd->result = DID_SOFT_ERROR << 16; 5870 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5871 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5872 scmd->result = DID_SOFT_ERROR << 16; 5873 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5874 scmd->result = DID_RESET << 16; 5875 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5876 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5877 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5878 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 5879 0x20, 0); 5880 } 5881 break; 5882 5883 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5884 scsi_set_resid(scmd, 0); 5885 fallthrough; 5886 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5887 case MPI2_IOCSTATUS_SUCCESS: 5888 scmd->result = (DID_OK << 16) | scsi_status; 5889 if (response_code == 5890 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5891 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5892 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5893 scmd->result = DID_SOFT_ERROR << 16; 5894 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5895 scmd->result = DID_RESET << 16; 5896 break; 5897 5898 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5899 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5900 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5901 _scsih_eedp_error_handling(scmd, ioc_status); 5902 break; 5903 5904 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5905 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5906 case MPI2_IOCSTATUS_INVALID_SGL: 5907 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5908 case MPI2_IOCSTATUS_INVALID_FIELD: 5909 case MPI2_IOCSTATUS_INVALID_STATE: 5910 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5911 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5912 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5913 default: 5914 scmd->result = DID_SOFT_ERROR << 16; 5915 break; 5916 5917 } 5918 5919 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5920 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5921 5922 out: 5923 5924 scsi_dma_unmap(scmd); 5925 mpt3sas_base_free_smid(ioc, smid); 5926 scsi_done(scmd); 5927 return 0; 5928 } 5929 5930 /** 5931 * _scsih_update_vphys_after_reset - update the Port's 5932 * vphys_list after reset 5933 * @ioc: per adapter object 5934 * 5935 * Returns nothing. 5936 */ 5937 static void 5938 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) 5939 { 5940 u16 sz, ioc_status; 5941 int i; 5942 Mpi2ConfigReply_t mpi_reply; 5943 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5944 u16 attached_handle; 5945 u64 attached_sas_addr; 5946 u8 found = 0, port_id; 5947 Mpi2SasPhyPage0_t phy_pg0; 5948 struct hba_port *port, *port_next, *mport; 5949 struct virtual_phy *vphy, *vphy_next; 5950 struct _sas_device *sas_device; 5951 5952 /* 5953 * Mark all the vphys objects as dirty. 5954 */ 5955 list_for_each_entry_safe(port, port_next, 5956 &ioc->port_table_list, list) { 5957 if (!port->vphys_mask) 5958 continue; 5959 list_for_each_entry_safe(vphy, vphy_next, 5960 &port->vphys_list, list) { 5961 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; 5962 } 5963 } 5964 5965 /* 5966 * Read SASIOUnitPage0 to get each HBA Phy's data. 5967 */ 5968 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 5969 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5970 if (!sas_iounit_pg0) { 5971 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5972 __FILE__, __LINE__, __func__); 5973 return; 5974 } 5975 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5976 sas_iounit_pg0, sz)) != 0) 5977 goto out; 5978 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5979 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5980 goto out; 5981 /* 5982 * Loop over each HBA Phy. 5983 */ 5984 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 5985 /* 5986 * Check whether Phy's Negotiation Link Rate is > 1.5G or not. 5987 */ 5988 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 5989 MPI2_SAS_NEG_LINK_RATE_1_5) 5990 continue; 5991 /* 5992 * Check whether Phy is connected to SEP device or not, 5993 * if it is SEP device then read the Phy's SASPHYPage0 data to 5994 * determine whether Phy is a virtual Phy or not. if it is 5995 * virtual phy then it is conformed that the attached remote 5996 * device is a HBA's vSES device. 5997 */ 5998 if (!(le32_to_cpu( 5999 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6000 MPI2_SAS_DEVICE_INFO_SEP)) 6001 continue; 6002 6003 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6004 i))) { 6005 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6006 __FILE__, __LINE__, __func__); 6007 continue; 6008 } 6009 6010 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6011 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6012 continue; 6013 /* 6014 * Get the vSES device's SAS Address. 6015 */ 6016 attached_handle = le16_to_cpu( 6017 sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6018 if (_scsih_get_sas_address(ioc, attached_handle, 6019 &attached_sas_addr) != 0) { 6020 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6021 __FILE__, __LINE__, __func__); 6022 continue; 6023 } 6024 6025 found = 0; 6026 port = port_next = NULL; 6027 /* 6028 * Loop over each virtual_phy object from 6029 * each port's vphys_list. 6030 */ 6031 list_for_each_entry_safe(port, 6032 port_next, &ioc->port_table_list, list) { 6033 if (!port->vphys_mask) 6034 continue; 6035 list_for_each_entry_safe(vphy, vphy_next, 6036 &port->vphys_list, list) { 6037 /* 6038 * Continue with next virtual_phy object 6039 * if the object is not marked as dirty. 6040 */ 6041 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) 6042 continue; 6043 6044 /* 6045 * Continue with next virtual_phy object 6046 * if the object's SAS Address is not equals 6047 * to current Phy's vSES device SAS Address. 6048 */ 6049 if (vphy->sas_address != attached_sas_addr) 6050 continue; 6051 /* 6052 * Enable current Phy number bit in object's 6053 * phy_mask field. 6054 */ 6055 if (!(vphy->phy_mask & (1 << i))) 6056 vphy->phy_mask = (1 << i); 6057 /* 6058 * Get hba_port object from hba_port table 6059 * corresponding to current phy's Port ID. 6060 * if there is no hba_port object corresponding 6061 * to Phy's Port ID then create a new hba_port 6062 * object & add to hba_port table. 6063 */ 6064 port_id = sas_iounit_pg0->PhyData[i].Port; 6065 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6066 if (!mport) { 6067 mport = kzalloc( 6068 sizeof(struct hba_port), GFP_KERNEL); 6069 if (!mport) 6070 break; 6071 mport->port_id = port_id; 6072 ioc_info(ioc, 6073 "%s: hba_port entry: %p, port: %d is added to hba_port list\n", 6074 __func__, mport, mport->port_id); 6075 list_add_tail(&mport->list, 6076 &ioc->port_table_list); 6077 } 6078 /* 6079 * If mport & port pointers are not pointing to 6080 * same hba_port object then it means that vSES 6081 * device's Port ID got changed after reset and 6082 * hence move current virtual_phy object from 6083 * port's vphys_list to mport's vphys_list. 6084 */ 6085 if (port != mport) { 6086 if (!mport->vphys_mask) 6087 INIT_LIST_HEAD( 6088 &mport->vphys_list); 6089 mport->vphys_mask |= (1 << i); 6090 port->vphys_mask &= ~(1 << i); 6091 list_move(&vphy->list, 6092 &mport->vphys_list); 6093 sas_device = mpt3sas_get_sdev_by_addr( 6094 ioc, attached_sas_addr, port); 6095 if (sas_device) 6096 sas_device->port = mport; 6097 } 6098 /* 6099 * Earlier while updating the hba_port table, 6100 * it is determined that there is no other 6101 * direct attached device with mport's Port ID, 6102 * Hence mport was marked as dirty. Only vSES 6103 * device has this Port ID, so unmark the mport 6104 * as dirt. 6105 */ 6106 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { 6107 mport->sas_address = 0; 6108 mport->phy_mask = 0; 6109 mport->flags &= 6110 ~HBA_PORT_FLAG_DIRTY_PORT; 6111 } 6112 /* 6113 * Unmark current virtual_phy object as dirty. 6114 */ 6115 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; 6116 found = 1; 6117 break; 6118 } 6119 if (found) 6120 break; 6121 } 6122 } 6123 out: 6124 kfree(sas_iounit_pg0); 6125 } 6126 6127 /** 6128 * _scsih_get_port_table_after_reset - Construct temporary port table 6129 * @ioc: per adapter object 6130 * @port_table: address where port table needs to be constructed 6131 * 6132 * return number of HBA port entries available after reset. 6133 */ 6134 static int 6135 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, 6136 struct hba_port *port_table) 6137 { 6138 u16 sz, ioc_status; 6139 int i, j; 6140 Mpi2ConfigReply_t mpi_reply; 6141 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6142 u16 attached_handle; 6143 u64 attached_sas_addr; 6144 u8 found = 0, port_count = 0, port_id; 6145 6146 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6147 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6148 if (!sas_iounit_pg0) { 6149 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6150 __FILE__, __LINE__, __func__); 6151 return port_count; 6152 } 6153 6154 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6155 sas_iounit_pg0, sz)) != 0) 6156 goto out; 6157 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6158 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6159 goto out; 6160 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 6161 found = 0; 6162 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 6163 MPI2_SAS_NEG_LINK_RATE_1_5) 6164 continue; 6165 attached_handle = 6166 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6167 if (_scsih_get_sas_address( 6168 ioc, attached_handle, &attached_sas_addr) != 0) { 6169 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6170 __FILE__, __LINE__, __func__); 6171 continue; 6172 } 6173 6174 for (j = 0; j < port_count; j++) { 6175 port_id = sas_iounit_pg0->PhyData[i].Port; 6176 if (port_table[j].port_id == port_id && 6177 port_table[j].sas_address == attached_sas_addr) { 6178 port_table[j].phy_mask |= (1 << i); 6179 found = 1; 6180 break; 6181 } 6182 } 6183 6184 if (found) 6185 continue; 6186 6187 port_id = sas_iounit_pg0->PhyData[i].Port; 6188 port_table[port_count].port_id = port_id; 6189 port_table[port_count].phy_mask = (1 << i); 6190 port_table[port_count].sas_address = attached_sas_addr; 6191 port_count++; 6192 } 6193 out: 6194 kfree(sas_iounit_pg0); 6195 return port_count; 6196 } 6197 6198 enum hba_port_matched_codes { 6199 NOT_MATCHED = 0, 6200 MATCHED_WITH_ADDR_AND_PHYMASK, 6201 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, 6202 MATCHED_WITH_ADDR_AND_SUBPHYMASK, 6203 MATCHED_WITH_ADDR, 6204 }; 6205 6206 /** 6207 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry 6208 * from HBA port table 6209 * @ioc: per adapter object 6210 * @port_entry: hba port entry from temporary port table which needs to be 6211 * searched for matched entry in the HBA port table 6212 * @matched_port_entry: save matched hba port entry here 6213 * @count: count of matched entries 6214 * 6215 * return type of matched entry found. 6216 */ 6217 static enum hba_port_matched_codes 6218 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, 6219 struct hba_port *port_entry, 6220 struct hba_port **matched_port_entry, int *count) 6221 { 6222 struct hba_port *port_table_entry, *matched_port = NULL; 6223 enum hba_port_matched_codes matched_code = NOT_MATCHED; 6224 int lcount = 0; 6225 *matched_port_entry = NULL; 6226 6227 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6228 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) 6229 continue; 6230 6231 if ((port_table_entry->sas_address == port_entry->sas_address) 6232 && (port_table_entry->phy_mask == port_entry->phy_mask)) { 6233 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; 6234 matched_port = port_table_entry; 6235 break; 6236 } 6237 6238 if ((port_table_entry->sas_address == port_entry->sas_address) 6239 && (port_table_entry->phy_mask & port_entry->phy_mask) 6240 && (port_table_entry->port_id == port_entry->port_id)) { 6241 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; 6242 matched_port = port_table_entry; 6243 continue; 6244 } 6245 6246 if ((port_table_entry->sas_address == port_entry->sas_address) 6247 && (port_table_entry->phy_mask & port_entry->phy_mask)) { 6248 if (matched_code == 6249 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6250 continue; 6251 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; 6252 matched_port = port_table_entry; 6253 continue; 6254 } 6255 6256 if (port_table_entry->sas_address == port_entry->sas_address) { 6257 if (matched_code == 6258 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6259 continue; 6260 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) 6261 continue; 6262 matched_code = MATCHED_WITH_ADDR; 6263 matched_port = port_table_entry; 6264 lcount++; 6265 } 6266 } 6267 6268 *matched_port_entry = matched_port; 6269 if (matched_code == MATCHED_WITH_ADDR) 6270 *count = lcount; 6271 return matched_code; 6272 } 6273 6274 /** 6275 * _scsih_del_phy_part_of_anther_port - remove phy if it 6276 * is a part of anther port 6277 *@ioc: per adapter object 6278 *@port_table: port table after reset 6279 *@index: hba port entry index 6280 *@port_count: number of ports available after host reset 6281 *@offset: HBA phy bit offset 6282 * 6283 */ 6284 static void 6285 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, 6286 struct hba_port *port_table, 6287 int index, u8 port_count, int offset) 6288 { 6289 struct _sas_node *sas_node = &ioc->sas_hba; 6290 u32 i, found = 0; 6291 6292 for (i = 0; i < port_count; i++) { 6293 if (i == index) 6294 continue; 6295 6296 if (port_table[i].phy_mask & (1 << offset)) { 6297 mpt3sas_transport_del_phy_from_an_existing_port( 6298 ioc, sas_node, &sas_node->phy[offset]); 6299 found = 1; 6300 break; 6301 } 6302 } 6303 if (!found) 6304 port_table[index].phy_mask |= (1 << offset); 6305 } 6306 6307 /** 6308 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from 6309 * right port 6310 *@ioc: per adapter object 6311 *@hba_port_entry: hba port table entry 6312 *@port_table: temporary port table 6313 *@index: hba port entry index 6314 *@port_count: number of ports available after host reset 6315 * 6316 */ 6317 static void 6318 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, 6319 struct hba_port *hba_port_entry, struct hba_port *port_table, 6320 int index, int port_count) 6321 { 6322 u32 phy_mask, offset = 0; 6323 struct _sas_node *sas_node = &ioc->sas_hba; 6324 6325 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; 6326 6327 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { 6328 if (phy_mask & (1 << offset)) { 6329 if (!(port_table[index].phy_mask & (1 << offset))) { 6330 _scsih_del_phy_part_of_anther_port( 6331 ioc, port_table, index, port_count, 6332 offset); 6333 continue; 6334 } 6335 if (sas_node->phy[offset].phy_belongs_to_port) 6336 mpt3sas_transport_del_phy_from_an_existing_port( 6337 ioc, sas_node, &sas_node->phy[offset]); 6338 mpt3sas_transport_add_phy_to_an_existing_port( 6339 ioc, sas_node, &sas_node->phy[offset], 6340 hba_port_entry->sas_address, 6341 hba_port_entry); 6342 } 6343 } 6344 } 6345 6346 /** 6347 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. 6348 * @ioc: per adapter object 6349 * 6350 * Returns nothing. 6351 */ 6352 static void 6353 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) 6354 { 6355 struct hba_port *port, *port_next; 6356 struct virtual_phy *vphy, *vphy_next; 6357 6358 list_for_each_entry_safe(port, port_next, 6359 &ioc->port_table_list, list) { 6360 if (!port->vphys_mask) 6361 continue; 6362 list_for_each_entry_safe(vphy, vphy_next, 6363 &port->vphys_list, list) { 6364 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { 6365 drsprintk(ioc, ioc_info(ioc, 6366 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", 6367 vphy, port->port_id, 6368 vphy->phy_mask)); 6369 port->vphys_mask &= ~vphy->phy_mask; 6370 list_del(&vphy->list); 6371 kfree(vphy); 6372 } 6373 } 6374 if (!port->vphys_mask && !port->sas_address) 6375 port->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6376 } 6377 } 6378 6379 /** 6380 * _scsih_del_dirty_port_entries - delete dirty port entries from port list 6381 * after host reset 6382 *@ioc: per adapter object 6383 * 6384 */ 6385 static void 6386 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) 6387 { 6388 struct hba_port *port, *port_next; 6389 6390 list_for_each_entry_safe(port, port_next, 6391 &ioc->port_table_list, list) { 6392 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || 6393 port->flags & HBA_PORT_FLAG_NEW_PORT) 6394 continue; 6395 6396 drsprintk(ioc, ioc_info(ioc, 6397 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", 6398 port, port->port_id, port->phy_mask)); 6399 list_del(&port->list); 6400 kfree(port); 6401 } 6402 } 6403 6404 /** 6405 * _scsih_sas_port_refresh - Update HBA port table after host reset 6406 * @ioc: per adapter object 6407 */ 6408 static void 6409 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) 6410 { 6411 u32 port_count = 0; 6412 struct hba_port *port_table; 6413 struct hba_port *port_table_entry; 6414 struct hba_port *port_entry = NULL; 6415 int i, j, count = 0, lcount = 0; 6416 int ret; 6417 u64 sas_addr; 6418 u8 num_phys; 6419 6420 drsprintk(ioc, ioc_info(ioc, 6421 "updating ports for sas_host(0x%016llx)\n", 6422 (unsigned long long)ioc->sas_hba.sas_address)); 6423 6424 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6425 if (!num_phys) { 6426 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6427 __FILE__, __LINE__, __func__); 6428 return; 6429 } 6430 6431 if (num_phys > ioc->sas_hba.nr_phys_allocated) { 6432 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6433 __FILE__, __LINE__, __func__); 6434 return; 6435 } 6436 ioc->sas_hba.num_phys = num_phys; 6437 6438 port_table = kcalloc(ioc->sas_hba.num_phys, 6439 sizeof(struct hba_port), GFP_KERNEL); 6440 if (!port_table) 6441 return; 6442 6443 port_count = _scsih_get_port_table_after_reset(ioc, port_table); 6444 if (!port_count) 6445 return; 6446 6447 drsprintk(ioc, ioc_info(ioc, "New Port table\n")); 6448 for (j = 0; j < port_count; j++) 6449 drsprintk(ioc, ioc_info(ioc, 6450 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6451 port_table[j].port_id, 6452 port_table[j].phy_mask, port_table[j].sas_address)); 6453 6454 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) 6455 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6456 6457 drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); 6458 port_table_entry = NULL; 6459 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6460 drsprintk(ioc, ioc_info(ioc, 6461 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6462 port_table_entry->port_id, 6463 port_table_entry->phy_mask, 6464 port_table_entry->sas_address)); 6465 } 6466 6467 for (j = 0; j < port_count; j++) { 6468 ret = _scsih_look_and_get_matched_port_entry(ioc, 6469 &port_table[j], &port_entry, &count); 6470 if (!port_entry) { 6471 drsprintk(ioc, ioc_info(ioc, 6472 "No Matched entry for sas_addr(0x%16llx), Port:%d\n", 6473 port_table[j].sas_address, 6474 port_table[j].port_id)); 6475 continue; 6476 } 6477 6478 switch (ret) { 6479 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: 6480 case MATCHED_WITH_ADDR_AND_SUBPHYMASK: 6481 _scsih_add_or_del_phys_from_existing_port(ioc, 6482 port_entry, port_table, j, port_count); 6483 break; 6484 case MATCHED_WITH_ADDR: 6485 sas_addr = port_table[j].sas_address; 6486 for (i = 0; i < port_count; i++) { 6487 if (port_table[i].sas_address == sas_addr) 6488 lcount++; 6489 } 6490 6491 if (count > 1 || lcount > 1) 6492 port_entry = NULL; 6493 else 6494 _scsih_add_or_del_phys_from_existing_port(ioc, 6495 port_entry, port_table, j, port_count); 6496 } 6497 6498 if (!port_entry) 6499 continue; 6500 6501 if (port_entry->port_id != port_table[j].port_id) 6502 port_entry->port_id = port_table[j].port_id; 6503 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; 6504 port_entry->phy_mask = port_table[j].phy_mask; 6505 } 6506 6507 port_table_entry = NULL; 6508 } 6509 6510 /** 6511 * _scsih_alloc_vphy - allocate virtual_phy object 6512 * @ioc: per adapter object 6513 * @port_id: Port ID number 6514 * @phy_num: HBA Phy number 6515 * 6516 * Returns allocated virtual_phy object. 6517 */ 6518 static struct virtual_phy * 6519 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) 6520 { 6521 struct virtual_phy *vphy; 6522 struct hba_port *port; 6523 6524 port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6525 if (!port) 6526 return NULL; 6527 6528 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); 6529 if (!vphy) { 6530 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); 6531 if (!vphy) 6532 return NULL; 6533 6534 if (!port->vphys_mask) 6535 INIT_LIST_HEAD(&port->vphys_list); 6536 6537 /* 6538 * Enable bit corresponding to HBA phy number on its 6539 * parent hba_port object's vphys_mask field. 6540 */ 6541 port->vphys_mask |= (1 << phy_num); 6542 vphy->phy_mask |= (1 << phy_num); 6543 6544 list_add_tail(&vphy->list, &port->vphys_list); 6545 6546 ioc_info(ioc, 6547 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", 6548 vphy, port->port_id, phy_num); 6549 } 6550 return vphy; 6551 } 6552 6553 /** 6554 * _scsih_sas_host_refresh - refreshing sas host object contents 6555 * @ioc: per adapter object 6556 * Context: user 6557 * 6558 * During port enable, fw will send topology events for every device. Its 6559 * possible that the handles may change from the previous setting, so this 6560 * code keeping handles updating if changed. 6561 */ 6562 static void 6563 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 6564 { 6565 u16 sz; 6566 u16 ioc_status; 6567 int i; 6568 Mpi2ConfigReply_t mpi_reply; 6569 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6570 u16 attached_handle; 6571 u8 link_rate, port_id; 6572 struct hba_port *port; 6573 Mpi2SasPhyPage0_t phy_pg0; 6574 6575 dtmprintk(ioc, 6576 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 6577 (u64)ioc->sas_hba.sas_address)); 6578 6579 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6580 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6581 if (!sas_iounit_pg0) { 6582 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6583 __FILE__, __LINE__, __func__); 6584 return; 6585 } 6586 6587 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6588 sas_iounit_pg0, sz)) != 0) 6589 goto out; 6590 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6591 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6592 goto out; 6593 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6594 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 6595 if (i == 0) 6596 ioc->sas_hba.handle = le16_to_cpu( 6597 sas_iounit_pg0->PhyData[0].ControllerDevHandle); 6598 port_id = sas_iounit_pg0->PhyData[i].Port; 6599 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6600 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6601 if (!port) 6602 goto out; 6603 6604 port->port_id = port_id; 6605 ioc_info(ioc, 6606 "hba_port entry: %p, port: %d is added to hba_port list\n", 6607 port, port->port_id); 6608 if (ioc->shost_recovery) 6609 port->flags = HBA_PORT_FLAG_NEW_PORT; 6610 list_add_tail(&port->list, &ioc->port_table_list); 6611 } 6612 /* 6613 * Check whether current Phy belongs to HBA vSES device or not. 6614 */ 6615 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6616 MPI2_SAS_DEVICE_INFO_SEP && 6617 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 6618 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6619 &phy_pg0, i))) { 6620 ioc_err(ioc, 6621 "failure at %s:%d/%s()!\n", 6622 __FILE__, __LINE__, __func__); 6623 goto out; 6624 } 6625 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6626 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6627 continue; 6628 /* 6629 * Allocate a virtual_phy object for vSES device, if 6630 * this vSES device is hot added. 6631 */ 6632 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6633 goto out; 6634 ioc->sas_hba.phy[i].hba_vphy = 1; 6635 } 6636 6637 /* 6638 * Add new HBA phys to STL if these new phys got added as part 6639 * of HBA Firmware upgrade/downgrade operation. 6640 */ 6641 if (!ioc->sas_hba.phy[i].phy) { 6642 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6643 &phy_pg0, i))) { 6644 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6645 __FILE__, __LINE__, __func__); 6646 continue; 6647 } 6648 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6649 MPI2_IOCSTATUS_MASK; 6650 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6651 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6652 __FILE__, __LINE__, __func__); 6653 continue; 6654 } 6655 ioc->sas_hba.phy[i].phy_id = i; 6656 mpt3sas_transport_add_host_phy(ioc, 6657 &ioc->sas_hba.phy[i], phy_pg0, 6658 ioc->sas_hba.parent_dev); 6659 continue; 6660 } 6661 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6662 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 6663 AttachedDevHandle); 6664 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6665 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 6666 ioc->sas_hba.phy[i].port = 6667 mpt3sas_get_port_by_id(ioc, port_id, 0); 6668 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 6669 attached_handle, i, link_rate, 6670 ioc->sas_hba.phy[i].port); 6671 } 6672 /* 6673 * Clear the phy details if this phy got disabled as part of 6674 * HBA Firmware upgrade/downgrade operation. 6675 */ 6676 for (i = ioc->sas_hba.num_phys; 6677 i < ioc->sas_hba.nr_phys_allocated; i++) { 6678 if (ioc->sas_hba.phy[i].phy && 6679 ioc->sas_hba.phy[i].phy->negotiated_linkrate >= 6680 SAS_LINK_RATE_1_5_GBPS) 6681 mpt3sas_transport_update_links(ioc, 6682 ioc->sas_hba.sas_address, 0, i, 6683 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); 6684 } 6685 out: 6686 kfree(sas_iounit_pg0); 6687 } 6688 6689 /** 6690 * _scsih_sas_host_add - create sas host object 6691 * @ioc: per adapter object 6692 * 6693 * Creating host side data object, stored in ioc->sas_hba 6694 */ 6695 static void 6696 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 6697 { 6698 int i; 6699 Mpi2ConfigReply_t mpi_reply; 6700 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6701 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 6702 Mpi2SasPhyPage0_t phy_pg0; 6703 Mpi2SasDevicePage0_t sas_device_pg0; 6704 Mpi2SasEnclosurePage0_t enclosure_pg0; 6705 u16 ioc_status; 6706 u16 sz; 6707 u8 device_missing_delay; 6708 u8 num_phys, port_id; 6709 struct hba_port *port; 6710 6711 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6712 if (!num_phys) { 6713 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6714 __FILE__, __LINE__, __func__); 6715 return; 6716 } 6717 6718 ioc->sas_hba.nr_phys_allocated = max_t(u8, 6719 MPT_MAX_HBA_NUM_PHYS, num_phys); 6720 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, 6721 sizeof(struct _sas_phy), GFP_KERNEL); 6722 if (!ioc->sas_hba.phy) { 6723 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6724 __FILE__, __LINE__, __func__); 6725 goto out; 6726 } 6727 ioc->sas_hba.num_phys = num_phys; 6728 6729 /* sas_iounit page 0 */ 6730 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys); 6731 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6732 if (!sas_iounit_pg0) { 6733 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6734 __FILE__, __LINE__, __func__); 6735 return; 6736 } 6737 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6738 sas_iounit_pg0, sz))) { 6739 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6740 __FILE__, __LINE__, __func__); 6741 goto out; 6742 } 6743 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6744 MPI2_IOCSTATUS_MASK; 6745 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6746 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6747 __FILE__, __LINE__, __func__); 6748 goto out; 6749 } 6750 6751 /* sas_iounit page 1 */ 6752 sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys); 6753 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 6754 if (!sas_iounit_pg1) { 6755 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6756 __FILE__, __LINE__, __func__); 6757 goto out; 6758 } 6759 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 6760 sas_iounit_pg1, sz))) { 6761 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6762 __FILE__, __LINE__, __func__); 6763 goto out; 6764 } 6765 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6766 MPI2_IOCSTATUS_MASK; 6767 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6768 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6769 __FILE__, __LINE__, __func__); 6770 goto out; 6771 } 6772 6773 ioc->io_missing_delay = 6774 sas_iounit_pg1->IODeviceMissingDelay; 6775 device_missing_delay = 6776 sas_iounit_pg1->ReportDeviceMissingDelay; 6777 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 6778 ioc->device_missing_delay = (device_missing_delay & 6779 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 6780 else 6781 ioc->device_missing_delay = device_missing_delay & 6782 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 6783 6784 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 6785 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6786 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6787 i))) { 6788 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6789 __FILE__, __LINE__, __func__); 6790 goto out; 6791 } 6792 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6793 MPI2_IOCSTATUS_MASK; 6794 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6795 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6796 __FILE__, __LINE__, __func__); 6797 goto out; 6798 } 6799 6800 if (i == 0) 6801 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 6802 PhyData[0].ControllerDevHandle); 6803 6804 port_id = sas_iounit_pg0->PhyData[i].Port; 6805 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6806 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6807 if (!port) 6808 goto out; 6809 6810 port->port_id = port_id; 6811 ioc_info(ioc, 6812 "hba_port entry: %p, port: %d is added to hba_port list\n", 6813 port, port->port_id); 6814 list_add_tail(&port->list, 6815 &ioc->port_table_list); 6816 } 6817 6818 /* 6819 * Check whether current Phy belongs to HBA vSES device or not. 6820 */ 6821 if ((le32_to_cpu(phy_pg0.PhyInfo) & 6822 MPI2_SAS_PHYINFO_VIRTUAL_PHY) && 6823 (phy_pg0.NegotiatedLinkRate >> 4) >= 6824 MPI2_SAS_NEG_LINK_RATE_1_5) { 6825 /* 6826 * Allocate a virtual_phy object for vSES device. 6827 */ 6828 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6829 goto out; 6830 ioc->sas_hba.phy[i].hba_vphy = 1; 6831 } 6832 6833 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6834 ioc->sas_hba.phy[i].phy_id = i; 6835 ioc->sas_hba.phy[i].port = 6836 mpt3sas_get_port_by_id(ioc, port_id, 0); 6837 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 6838 phy_pg0, ioc->sas_hba.parent_dev); 6839 } 6840 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6841 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 6842 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6843 __FILE__, __LINE__, __func__); 6844 goto out; 6845 } 6846 ioc->sas_hba.enclosure_handle = 6847 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6848 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 6849 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6850 ioc->sas_hba.handle, 6851 (u64)ioc->sas_hba.sas_address, 6852 ioc->sas_hba.num_phys); 6853 6854 if (ioc->sas_hba.enclosure_handle) { 6855 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6856 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6857 ioc->sas_hba.enclosure_handle))) 6858 ioc->sas_hba.enclosure_logical_id = 6859 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6860 } 6861 6862 out: 6863 kfree(sas_iounit_pg1); 6864 kfree(sas_iounit_pg0); 6865 } 6866 6867 /** 6868 * _scsih_expander_add - creating expander object 6869 * @ioc: per adapter object 6870 * @handle: expander handle 6871 * 6872 * Creating expander object, stored in ioc->sas_expander_list. 6873 * 6874 * Return: 0 for success, else error. 6875 */ 6876 static int 6877 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6878 { 6879 struct _sas_node *sas_expander; 6880 struct _enclosure_node *enclosure_dev; 6881 Mpi2ConfigReply_t mpi_reply; 6882 Mpi2ExpanderPage0_t expander_pg0; 6883 Mpi2ExpanderPage1_t expander_pg1; 6884 u32 ioc_status; 6885 u16 parent_handle; 6886 u64 sas_address, sas_address_parent = 0; 6887 int i; 6888 unsigned long flags; 6889 struct _sas_port *mpt3sas_port = NULL; 6890 u8 port_id; 6891 6892 int rc = 0; 6893 6894 if (!handle) 6895 return -1; 6896 6897 if (ioc->shost_recovery || ioc->pci_error_recovery) 6898 return -1; 6899 6900 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 6901 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 6902 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6903 __FILE__, __LINE__, __func__); 6904 return -1; 6905 } 6906 6907 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6908 MPI2_IOCSTATUS_MASK; 6909 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6910 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6911 __FILE__, __LINE__, __func__); 6912 return -1; 6913 } 6914 6915 /* handle out of order topology events */ 6916 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 6917 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 6918 != 0) { 6919 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6920 __FILE__, __LINE__, __func__); 6921 return -1; 6922 } 6923 6924 port_id = expander_pg0.PhysicalPort; 6925 if (sas_address_parent != ioc->sas_hba.sas_address) { 6926 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6927 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6928 sas_address_parent, 6929 mpt3sas_get_port_by_id(ioc, port_id, 0)); 6930 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6931 if (!sas_expander) { 6932 rc = _scsih_expander_add(ioc, parent_handle); 6933 if (rc != 0) 6934 return rc; 6935 } 6936 } 6937 6938 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6939 sas_address = le64_to_cpu(expander_pg0.SASAddress); 6940 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6941 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 6942 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6943 6944 if (sas_expander) 6945 return 0; 6946 6947 sas_expander = kzalloc(sizeof(struct _sas_node), 6948 GFP_KERNEL); 6949 if (!sas_expander) { 6950 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6951 __FILE__, __LINE__, __func__); 6952 return -1; 6953 } 6954 6955 sas_expander->handle = handle; 6956 sas_expander->num_phys = expander_pg0.NumPhys; 6957 sas_expander->sas_address_parent = sas_address_parent; 6958 sas_expander->sas_address = sas_address; 6959 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6960 if (!sas_expander->port) { 6961 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6962 __FILE__, __LINE__, __func__); 6963 rc = -1; 6964 goto out_fail; 6965 } 6966 6967 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6968 handle, parent_handle, 6969 (u64)sas_expander->sas_address, sas_expander->num_phys); 6970 6971 if (!sas_expander->num_phys) { 6972 rc = -1; 6973 goto out_fail; 6974 } 6975 sas_expander->phy = kcalloc(sas_expander->num_phys, 6976 sizeof(struct _sas_phy), GFP_KERNEL); 6977 if (!sas_expander->phy) { 6978 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6979 __FILE__, __LINE__, __func__); 6980 rc = -1; 6981 goto out_fail; 6982 } 6983 6984 INIT_LIST_HEAD(&sas_expander->sas_port_list); 6985 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 6986 sas_address_parent, sas_expander->port); 6987 if (!mpt3sas_port) { 6988 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6989 __FILE__, __LINE__, __func__); 6990 rc = -1; 6991 goto out_fail; 6992 } 6993 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 6994 sas_expander->rphy = mpt3sas_port->rphy; 6995 6996 for (i = 0 ; i < sas_expander->num_phys ; i++) { 6997 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 6998 &expander_pg1, i, handle))) { 6999 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7000 __FILE__, __LINE__, __func__); 7001 rc = -1; 7002 goto out_fail; 7003 } 7004 sas_expander->phy[i].handle = handle; 7005 sas_expander->phy[i].phy_id = i; 7006 sas_expander->phy[i].port = 7007 mpt3sas_get_port_by_id(ioc, port_id, 0); 7008 7009 if ((mpt3sas_transport_add_expander_phy(ioc, 7010 &sas_expander->phy[i], expander_pg1, 7011 sas_expander->parent_dev))) { 7012 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7013 __FILE__, __LINE__, __func__); 7014 rc = -1; 7015 goto out_fail; 7016 } 7017 } 7018 7019 if (sas_expander->enclosure_handle) { 7020 enclosure_dev = 7021 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7022 sas_expander->enclosure_handle); 7023 if (enclosure_dev) 7024 sas_expander->enclosure_logical_id = 7025 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7026 } 7027 7028 _scsih_expander_node_add(ioc, sas_expander); 7029 return 0; 7030 7031 out_fail: 7032 7033 if (mpt3sas_port) 7034 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 7035 sas_address_parent, sas_expander->port); 7036 kfree(sas_expander); 7037 return rc; 7038 } 7039 7040 /** 7041 * mpt3sas_expander_remove - removing expander object 7042 * @ioc: per adapter object 7043 * @sas_address: expander sas_address 7044 * @port: hba port entry 7045 */ 7046 void 7047 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7048 struct hba_port *port) 7049 { 7050 struct _sas_node *sas_expander; 7051 unsigned long flags; 7052 7053 if (ioc->shost_recovery) 7054 return; 7055 7056 if (!port) 7057 return; 7058 7059 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7060 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 7061 sas_address, port); 7062 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7063 if (sas_expander) 7064 _scsih_expander_node_remove(ioc, sas_expander); 7065 } 7066 7067 /** 7068 * _scsih_done - internal SCSI_IO callback handler. 7069 * @ioc: per adapter object 7070 * @smid: system request message index 7071 * @msix_index: MSIX table index supplied by the OS 7072 * @reply: reply message frame(lower 32bit addr) 7073 * 7074 * Callback handler when sending internal generated SCSI_IO. 7075 * The callback index passed is `ioc->scsih_cb_idx` 7076 * 7077 * Return: 1 meaning mf should be freed from _base_interrupt 7078 * 0 means the mf is freed from this function. 7079 */ 7080 static u8 7081 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 7082 { 7083 MPI2DefaultReply_t *mpi_reply; 7084 7085 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 7086 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 7087 return 1; 7088 if (ioc->scsih_cmds.smid != smid) 7089 return 1; 7090 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 7091 if (mpi_reply) { 7092 memcpy(ioc->scsih_cmds.reply, mpi_reply, 7093 mpi_reply->MsgLength*4); 7094 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 7095 } 7096 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 7097 complete(&ioc->scsih_cmds.done); 7098 return 1; 7099 } 7100 7101 7102 7103 7104 #define MPT3_MAX_LUNS (255) 7105 7106 7107 /** 7108 * _scsih_check_access_status - check access flags 7109 * @ioc: per adapter object 7110 * @sas_address: sas address 7111 * @handle: sas device handle 7112 * @access_status: errors returned during discovery of the device 7113 * 7114 * Return: 0 for success, else failure 7115 */ 7116 static u8 7117 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7118 u16 handle, u8 access_status) 7119 { 7120 u8 rc = 1; 7121 char *desc = NULL; 7122 7123 switch (access_status) { 7124 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 7125 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 7126 rc = 0; 7127 break; 7128 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 7129 desc = "sata capability failed"; 7130 break; 7131 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 7132 desc = "sata affiliation conflict"; 7133 break; 7134 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 7135 desc = "route not addressable"; 7136 break; 7137 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 7138 desc = "smp error not addressable"; 7139 break; 7140 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 7141 desc = "device blocked"; 7142 break; 7143 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 7144 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 7145 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 7146 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 7147 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 7148 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 7149 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 7150 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 7151 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 7152 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 7153 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 7154 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 7155 desc = "sata initialization failed"; 7156 break; 7157 default: 7158 desc = "unknown"; 7159 break; 7160 } 7161 7162 if (!rc) 7163 return 0; 7164 7165 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 7166 desc, (u64)sas_address, handle); 7167 return rc; 7168 } 7169 7170 /** 7171 * _scsih_check_device - checking device responsiveness 7172 * @ioc: per adapter object 7173 * @parent_sas_address: sas address of parent expander or sas host 7174 * @handle: attached device handle 7175 * @phy_number: phy number 7176 * @link_rate: new link rate 7177 */ 7178 static void 7179 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 7180 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 7181 { 7182 Mpi2ConfigReply_t mpi_reply; 7183 Mpi2SasDevicePage0_t sas_device_pg0; 7184 struct _sas_device *sas_device = NULL; 7185 struct _enclosure_node *enclosure_dev = NULL; 7186 u32 ioc_status; 7187 unsigned long flags; 7188 u64 sas_address; 7189 struct scsi_target *starget; 7190 struct MPT3SAS_TARGET *sas_target_priv_data; 7191 u32 device_info; 7192 struct hba_port *port; 7193 7194 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7195 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 7196 return; 7197 7198 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7199 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7200 return; 7201 7202 /* wide port handling ~ we need only handle device once for the phy that 7203 * is matched in sas device page zero 7204 */ 7205 if (phy_number != sas_device_pg0.PhyNum) 7206 return; 7207 7208 /* check if this is end device */ 7209 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7210 if (!(_scsih_is_end_device(device_info))) 7211 return; 7212 7213 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7214 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7215 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); 7216 if (!port) 7217 goto out_unlock; 7218 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7219 sas_address, port); 7220 7221 if (!sas_device) 7222 goto out_unlock; 7223 7224 if (unlikely(sas_device->handle != handle)) { 7225 starget = sas_device->starget; 7226 sas_target_priv_data = starget->hostdata; 7227 starget_printk(KERN_INFO, starget, 7228 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7229 sas_device->handle, handle); 7230 sas_target_priv_data->handle = handle; 7231 sas_device->handle = handle; 7232 if (le16_to_cpu(sas_device_pg0.Flags) & 7233 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7234 sas_device->enclosure_level = 7235 sas_device_pg0.EnclosureLevel; 7236 memcpy(sas_device->connector_name, 7237 sas_device_pg0.ConnectorName, 4); 7238 sas_device->connector_name[4] = '\0'; 7239 } else { 7240 sas_device->enclosure_level = 0; 7241 sas_device->connector_name[0] = '\0'; 7242 } 7243 7244 sas_device->enclosure_handle = 7245 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7246 sas_device->is_chassis_slot_valid = 0; 7247 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 7248 sas_device->enclosure_handle); 7249 if (enclosure_dev) { 7250 sas_device->enclosure_logical_id = 7251 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7252 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7253 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7254 sas_device->is_chassis_slot_valid = 1; 7255 sas_device->chassis_slot = 7256 enclosure_dev->pg0.ChassisSlot; 7257 } 7258 } 7259 } 7260 7261 /* check if device is present */ 7262 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7263 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7264 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 7265 handle); 7266 goto out_unlock; 7267 } 7268 7269 /* check if there were any issues with discovery */ 7270 if (_scsih_check_access_status(ioc, sas_address, handle, 7271 sas_device_pg0.AccessStatus)) 7272 goto out_unlock; 7273 7274 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7275 _scsih_ublock_io_device(ioc, sas_address, port); 7276 7277 if (sas_device) 7278 sas_device_put(sas_device); 7279 return; 7280 7281 out_unlock: 7282 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7283 if (sas_device) 7284 sas_device_put(sas_device); 7285 } 7286 7287 /** 7288 * _scsih_add_device - creating sas device object 7289 * @ioc: per adapter object 7290 * @handle: sas device handle 7291 * @phy_num: phy number end device attached to 7292 * @is_pd: is this hidden raid component 7293 * 7294 * Creating end device object, stored in ioc->sas_device_list. 7295 * 7296 * Return: 0 for success, non-zero for failure. 7297 */ 7298 static int 7299 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7300 u8 is_pd) 7301 { 7302 Mpi2ConfigReply_t mpi_reply; 7303 Mpi2SasDevicePage0_t sas_device_pg0; 7304 struct _sas_device *sas_device; 7305 struct _enclosure_node *enclosure_dev = NULL; 7306 u32 ioc_status; 7307 u64 sas_address; 7308 u32 device_info; 7309 u8 port_id; 7310 7311 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7312 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7313 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7314 __FILE__, __LINE__, __func__); 7315 return -1; 7316 } 7317 7318 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7319 MPI2_IOCSTATUS_MASK; 7320 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7321 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7322 __FILE__, __LINE__, __func__); 7323 return -1; 7324 } 7325 7326 /* check if this is end device */ 7327 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7328 if (!(_scsih_is_end_device(device_info))) 7329 return -1; 7330 set_bit(handle, ioc->pend_os_device_add); 7331 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7332 7333 /* check if device is present */ 7334 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7335 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7336 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 7337 handle); 7338 return -1; 7339 } 7340 7341 /* check if there were any issues with discovery */ 7342 if (_scsih_check_access_status(ioc, sas_address, handle, 7343 sas_device_pg0.AccessStatus)) 7344 return -1; 7345 7346 port_id = sas_device_pg0.PhysicalPort; 7347 sas_device = mpt3sas_get_sdev_by_addr(ioc, 7348 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 7349 if (sas_device) { 7350 clear_bit(handle, ioc->pend_os_device_add); 7351 sas_device_put(sas_device); 7352 return -1; 7353 } 7354 7355 if (sas_device_pg0.EnclosureHandle) { 7356 enclosure_dev = 7357 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7358 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 7359 if (enclosure_dev == NULL) 7360 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 7361 sas_device_pg0.EnclosureHandle); 7362 } 7363 7364 sas_device = kzalloc(sizeof(struct _sas_device), 7365 GFP_KERNEL); 7366 if (!sas_device) { 7367 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7368 __FILE__, __LINE__, __func__); 7369 return 0; 7370 } 7371 7372 kref_init(&sas_device->refcount); 7373 sas_device->handle = handle; 7374 if (_scsih_get_sas_address(ioc, 7375 le16_to_cpu(sas_device_pg0.ParentDevHandle), 7376 &sas_device->sas_address_parent) != 0) 7377 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7378 __FILE__, __LINE__, __func__); 7379 sas_device->enclosure_handle = 7380 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7381 if (sas_device->enclosure_handle != 0) 7382 sas_device->slot = 7383 le16_to_cpu(sas_device_pg0.Slot); 7384 sas_device->device_info = device_info; 7385 sas_device->sas_address = sas_address; 7386 sas_device->phy = sas_device_pg0.PhyNum; 7387 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 7388 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 7389 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 7390 if (!sas_device->port) { 7391 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7392 __FILE__, __LINE__, __func__); 7393 goto out; 7394 } 7395 7396 if (le16_to_cpu(sas_device_pg0.Flags) 7397 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7398 sas_device->enclosure_level = 7399 sas_device_pg0.EnclosureLevel; 7400 memcpy(sas_device->connector_name, 7401 sas_device_pg0.ConnectorName, 4); 7402 sas_device->connector_name[4] = '\0'; 7403 } else { 7404 sas_device->enclosure_level = 0; 7405 sas_device->connector_name[0] = '\0'; 7406 } 7407 /* get enclosure_logical_id & chassis_slot*/ 7408 sas_device->is_chassis_slot_valid = 0; 7409 if (enclosure_dev) { 7410 sas_device->enclosure_logical_id = 7411 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7412 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7413 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7414 sas_device->is_chassis_slot_valid = 1; 7415 sas_device->chassis_slot = 7416 enclosure_dev->pg0.ChassisSlot; 7417 } 7418 } 7419 7420 /* get device name */ 7421 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 7422 sas_device->port_type = sas_device_pg0.MaxPortConnections; 7423 ioc_info(ioc, 7424 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", 7425 handle, sas_device->sas_address, sas_device->port_type); 7426 7427 if (ioc->wait_for_discovery_to_complete) 7428 _scsih_sas_device_init_add(ioc, sas_device); 7429 else 7430 _scsih_sas_device_add(ioc, sas_device); 7431 7432 out: 7433 sas_device_put(sas_device); 7434 return 0; 7435 } 7436 7437 /** 7438 * _scsih_remove_device - removing sas device object 7439 * @ioc: per adapter object 7440 * @sas_device: the sas_device object 7441 */ 7442 static void 7443 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 7444 struct _sas_device *sas_device) 7445 { 7446 struct MPT3SAS_TARGET *sas_target_priv_data; 7447 7448 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 7449 (sas_device->pfa_led_on)) { 7450 _scsih_turn_off_pfa_led(ioc, sas_device); 7451 sas_device->pfa_led_on = 0; 7452 } 7453 7454 dewtprintk(ioc, 7455 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 7456 __func__, 7457 sas_device->handle, (u64)sas_device->sas_address)); 7458 7459 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7460 NULL, NULL)); 7461 7462 if (sas_device->starget && sas_device->starget->hostdata) { 7463 sas_target_priv_data = sas_device->starget->hostdata; 7464 sas_target_priv_data->deleted = 1; 7465 _scsih_ublock_io_device(ioc, sas_device->sas_address, 7466 sas_device->port); 7467 sas_target_priv_data->handle = 7468 MPT3SAS_INVALID_DEVICE_HANDLE; 7469 } 7470 7471 if (!ioc->hide_drives) 7472 mpt3sas_transport_port_remove(ioc, 7473 sas_device->sas_address, 7474 sas_device->sas_address_parent, 7475 sas_device->port); 7476 7477 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 7478 sas_device->handle, (u64)sas_device->sas_address); 7479 7480 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 7481 7482 dewtprintk(ioc, 7483 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 7484 __func__, 7485 sas_device->handle, (u64)sas_device->sas_address)); 7486 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7487 NULL, NULL)); 7488 } 7489 7490 /** 7491 * _scsih_sas_topology_change_event_debug - debug for topology event 7492 * @ioc: per adapter object 7493 * @event_data: event data payload 7494 * Context: user. 7495 */ 7496 static void 7497 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7498 Mpi2EventDataSasTopologyChangeList_t *event_data) 7499 { 7500 int i; 7501 u16 handle; 7502 u16 reason_code; 7503 u8 phy_number; 7504 char *status_str = NULL; 7505 u8 link_rate, prev_link_rate; 7506 7507 switch (event_data->ExpStatus) { 7508 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 7509 status_str = "add"; 7510 break; 7511 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 7512 status_str = "remove"; 7513 break; 7514 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 7515 case 0: 7516 status_str = "responding"; 7517 break; 7518 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 7519 status_str = "remove delay"; 7520 break; 7521 default: 7522 status_str = "unknown status"; 7523 break; 7524 } 7525 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 7526 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 7527 "start_phy(%02d), count(%d)\n", 7528 le16_to_cpu(event_data->ExpanderDevHandle), 7529 le16_to_cpu(event_data->EnclosureHandle), 7530 event_data->StartPhyNum, event_data->NumEntries); 7531 for (i = 0; i < event_data->NumEntries; i++) { 7532 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7533 if (!handle) 7534 continue; 7535 phy_number = event_data->StartPhyNum + i; 7536 reason_code = event_data->PHY[i].PhyStatus & 7537 MPI2_EVENT_SAS_TOPO_RC_MASK; 7538 switch (reason_code) { 7539 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7540 status_str = "target add"; 7541 break; 7542 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7543 status_str = "target remove"; 7544 break; 7545 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7546 status_str = "delay target remove"; 7547 break; 7548 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7549 status_str = "link rate change"; 7550 break; 7551 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7552 status_str = "target responding"; 7553 break; 7554 default: 7555 status_str = "unknown"; 7556 break; 7557 } 7558 link_rate = event_data->PHY[i].LinkRate >> 4; 7559 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7560 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 7561 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 7562 handle, status_str, link_rate, prev_link_rate); 7563 7564 } 7565 } 7566 7567 /** 7568 * _scsih_sas_topology_change_event - handle topology changes 7569 * @ioc: per adapter object 7570 * @fw_event: The fw_event_work object 7571 * Context: user. 7572 * 7573 */ 7574 static int 7575 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7576 struct fw_event_work *fw_event) 7577 { 7578 int i; 7579 u16 parent_handle, handle; 7580 u16 reason_code; 7581 u8 phy_number, max_phys; 7582 struct _sas_node *sas_expander; 7583 u64 sas_address; 7584 unsigned long flags; 7585 u8 link_rate, prev_link_rate; 7586 struct hba_port *port; 7587 Mpi2EventDataSasTopologyChangeList_t *event_data = 7588 (Mpi2EventDataSasTopologyChangeList_t *) 7589 fw_event->event_data; 7590 7591 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7592 _scsih_sas_topology_change_event_debug(ioc, event_data); 7593 7594 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 7595 return 0; 7596 7597 if (!ioc->sas_hba.num_phys) 7598 _scsih_sas_host_add(ioc); 7599 else 7600 _scsih_sas_host_refresh(ioc); 7601 7602 if (fw_event->ignore) { 7603 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 7604 return 0; 7605 } 7606 7607 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 7608 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); 7609 7610 /* handle expander add */ 7611 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 7612 if (_scsih_expander_add(ioc, parent_handle) != 0) 7613 return 0; 7614 7615 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7616 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 7617 parent_handle); 7618 if (sas_expander) { 7619 sas_address = sas_expander->sas_address; 7620 max_phys = sas_expander->num_phys; 7621 port = sas_expander->port; 7622 } else if (parent_handle < ioc->sas_hba.num_phys) { 7623 sas_address = ioc->sas_hba.sas_address; 7624 max_phys = ioc->sas_hba.num_phys; 7625 } else { 7626 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7627 return 0; 7628 } 7629 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7630 7631 /* handle siblings events */ 7632 for (i = 0; i < event_data->NumEntries; i++) { 7633 if (fw_event->ignore) { 7634 dewtprintk(ioc, 7635 ioc_info(ioc, "ignoring expander event\n")); 7636 return 0; 7637 } 7638 if (ioc->remove_host || ioc->pci_error_recovery) 7639 return 0; 7640 phy_number = event_data->StartPhyNum + i; 7641 if (phy_number >= max_phys) 7642 continue; 7643 reason_code = event_data->PHY[i].PhyStatus & 7644 MPI2_EVENT_SAS_TOPO_RC_MASK; 7645 if ((event_data->PHY[i].PhyStatus & 7646 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 7647 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 7648 continue; 7649 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7650 if (!handle) 7651 continue; 7652 link_rate = event_data->PHY[i].LinkRate >> 4; 7653 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7654 switch (reason_code) { 7655 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7656 7657 if (ioc->shost_recovery) 7658 break; 7659 7660 if (link_rate == prev_link_rate) 7661 break; 7662 7663 mpt3sas_transport_update_links(ioc, sas_address, 7664 handle, phy_number, link_rate, port); 7665 7666 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7667 break; 7668 7669 _scsih_check_device(ioc, sas_address, handle, 7670 phy_number, link_rate); 7671 7672 if (!test_bit(handle, ioc->pend_os_device_add)) 7673 break; 7674 7675 fallthrough; 7676 7677 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7678 7679 if (ioc->shost_recovery) 7680 break; 7681 7682 mpt3sas_transport_update_links(ioc, sas_address, 7683 handle, phy_number, link_rate, port); 7684 7685 _scsih_add_device(ioc, handle, phy_number, 0); 7686 7687 break; 7688 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7689 7690 _scsih_device_remove_by_handle(ioc, handle); 7691 break; 7692 } 7693 } 7694 7695 /* handle expander removal */ 7696 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 7697 sas_expander) 7698 mpt3sas_expander_remove(ioc, sas_address, port); 7699 7700 return 0; 7701 } 7702 7703 /** 7704 * _scsih_sas_device_status_change_event_debug - debug for device event 7705 * @ioc: ? 7706 * @event_data: event data payload 7707 * Context: user. 7708 */ 7709 static void 7710 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7711 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7712 { 7713 char *reason_str = NULL; 7714 7715 switch (event_data->ReasonCode) { 7716 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7717 reason_str = "smart data"; 7718 break; 7719 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7720 reason_str = "unsupported device discovered"; 7721 break; 7722 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7723 reason_str = "internal device reset"; 7724 break; 7725 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7726 reason_str = "internal task abort"; 7727 break; 7728 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7729 reason_str = "internal task abort set"; 7730 break; 7731 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7732 reason_str = "internal clear task set"; 7733 break; 7734 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7735 reason_str = "internal query task"; 7736 break; 7737 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 7738 reason_str = "sata init failure"; 7739 break; 7740 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7741 reason_str = "internal device reset complete"; 7742 break; 7743 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7744 reason_str = "internal task abort complete"; 7745 break; 7746 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7747 reason_str = "internal async notification"; 7748 break; 7749 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 7750 reason_str = "expander reduced functionality"; 7751 break; 7752 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 7753 reason_str = "expander reduced functionality complete"; 7754 break; 7755 default: 7756 reason_str = "unknown reason"; 7757 break; 7758 } 7759 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 7760 reason_str, le16_to_cpu(event_data->DevHandle), 7761 (u64)le64_to_cpu(event_data->SASAddress), 7762 le16_to_cpu(event_data->TaskTag)); 7763 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 7764 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7765 event_data->ASC, event_data->ASCQ); 7766 pr_cont("\n"); 7767 } 7768 7769 /** 7770 * _scsih_sas_device_status_change_event - handle device status change 7771 * @ioc: per adapter object 7772 * @event_data: The fw event 7773 * Context: user. 7774 */ 7775 static void 7776 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7777 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7778 { 7779 struct MPT3SAS_TARGET *target_priv_data; 7780 struct _sas_device *sas_device; 7781 u64 sas_address; 7782 unsigned long flags; 7783 7784 /* In MPI Revision K (0xC), the internal device reset complete was 7785 * implemented, so avoid setting tm_busy flag for older firmware. 7786 */ 7787 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 7788 return; 7789 7790 if (event_data->ReasonCode != 7791 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 7792 event_data->ReasonCode != 7793 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7794 return; 7795 7796 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7797 sas_address = le64_to_cpu(event_data->SASAddress); 7798 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7799 sas_address, 7800 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); 7801 7802 if (!sas_device || !sas_device->starget) 7803 goto out; 7804 7805 target_priv_data = sas_device->starget->hostdata; 7806 if (!target_priv_data) 7807 goto out; 7808 7809 if (event_data->ReasonCode == 7810 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 7811 target_priv_data->tm_busy = 1; 7812 else 7813 target_priv_data->tm_busy = 0; 7814 7815 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7816 ioc_info(ioc, 7817 "%s tm_busy flag for handle(0x%04x)\n", 7818 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", 7819 target_priv_data->handle); 7820 7821 out: 7822 if (sas_device) 7823 sas_device_put(sas_device); 7824 7825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7826 } 7827 7828 7829 /** 7830 * _scsih_check_pcie_access_status - check access flags 7831 * @ioc: per adapter object 7832 * @wwid: wwid 7833 * @handle: sas device handle 7834 * @access_status: errors returned during discovery of the device 7835 * 7836 * Return: 0 for success, else failure 7837 */ 7838 static u8 7839 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 7840 u16 handle, u8 access_status) 7841 { 7842 u8 rc = 1; 7843 char *desc = NULL; 7844 7845 switch (access_status) { 7846 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 7847 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 7848 rc = 0; 7849 break; 7850 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 7851 desc = "PCIe device capability failed"; 7852 break; 7853 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 7854 desc = "PCIe device blocked"; 7855 ioc_info(ioc, 7856 "Device with Access Status (%s): wwid(0x%016llx), " 7857 "handle(0x%04x)\n ll only be added to the internal list", 7858 desc, (u64)wwid, handle); 7859 rc = 0; 7860 break; 7861 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 7862 desc = "PCIe device mem space access failed"; 7863 break; 7864 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 7865 desc = "PCIe device unsupported"; 7866 break; 7867 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 7868 desc = "PCIe device MSIx Required"; 7869 break; 7870 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 7871 desc = "PCIe device init fail max"; 7872 break; 7873 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 7874 desc = "PCIe device status unknown"; 7875 break; 7876 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 7877 desc = "nvme ready timeout"; 7878 break; 7879 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 7880 desc = "nvme device configuration unsupported"; 7881 break; 7882 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 7883 desc = "nvme identify failed"; 7884 break; 7885 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 7886 desc = "nvme qconfig failed"; 7887 break; 7888 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 7889 desc = "nvme qcreation failed"; 7890 break; 7891 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 7892 desc = "nvme eventcfg failed"; 7893 break; 7894 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 7895 desc = "nvme get feature stat failed"; 7896 break; 7897 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 7898 desc = "nvme idle timeout"; 7899 break; 7900 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 7901 desc = "nvme failure status"; 7902 break; 7903 default: 7904 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 7905 access_status, (u64)wwid, handle); 7906 return rc; 7907 } 7908 7909 if (!rc) 7910 return rc; 7911 7912 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 7913 desc, (u64)wwid, handle); 7914 return rc; 7915 } 7916 7917 /** 7918 * _scsih_pcie_device_remove_from_sml - removing pcie device 7919 * from SML and free up associated memory 7920 * @ioc: per adapter object 7921 * @pcie_device: the pcie_device object 7922 */ 7923 static void 7924 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 7925 struct _pcie_device *pcie_device) 7926 { 7927 struct MPT3SAS_TARGET *sas_target_priv_data; 7928 7929 dewtprintk(ioc, 7930 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 7931 __func__, 7932 pcie_device->handle, (u64)pcie_device->wwid)); 7933 if (pcie_device->enclosure_handle != 0) 7934 dewtprintk(ioc, 7935 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 7936 __func__, 7937 (u64)pcie_device->enclosure_logical_id, 7938 pcie_device->slot)); 7939 if (pcie_device->connector_name[0] != '\0') 7940 dewtprintk(ioc, 7941 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 7942 __func__, 7943 pcie_device->enclosure_level, 7944 pcie_device->connector_name)); 7945 7946 if (pcie_device->starget && pcie_device->starget->hostdata) { 7947 sas_target_priv_data = pcie_device->starget->hostdata; 7948 sas_target_priv_data->deleted = 1; 7949 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); 7950 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 7951 } 7952 7953 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7954 pcie_device->handle, (u64)pcie_device->wwid); 7955 if (pcie_device->enclosure_handle != 0) 7956 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 7957 (u64)pcie_device->enclosure_logical_id, 7958 pcie_device->slot); 7959 if (pcie_device->connector_name[0] != '\0') 7960 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 7961 pcie_device->enclosure_level, 7962 pcie_device->connector_name); 7963 7964 if (pcie_device->starget && (pcie_device->access_status != 7965 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) 7966 scsi_remove_target(&pcie_device->starget->dev); 7967 dewtprintk(ioc, 7968 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 7969 __func__, 7970 pcie_device->handle, (u64)pcie_device->wwid)); 7971 if (pcie_device->enclosure_handle != 0) 7972 dewtprintk(ioc, 7973 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 7974 __func__, 7975 (u64)pcie_device->enclosure_logical_id, 7976 pcie_device->slot)); 7977 if (pcie_device->connector_name[0] != '\0') 7978 dewtprintk(ioc, 7979 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 7980 __func__, 7981 pcie_device->enclosure_level, 7982 pcie_device->connector_name)); 7983 7984 kfree(pcie_device->serial_number); 7985 } 7986 7987 7988 /** 7989 * _scsih_pcie_check_device - checking device responsiveness 7990 * @ioc: per adapter object 7991 * @handle: attached device handle 7992 */ 7993 static void 7994 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7995 { 7996 Mpi2ConfigReply_t mpi_reply; 7997 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7998 u32 ioc_status; 7999 struct _pcie_device *pcie_device; 8000 u64 wwid; 8001 unsigned long flags; 8002 struct scsi_target *starget; 8003 struct MPT3SAS_TARGET *sas_target_priv_data; 8004 u32 device_info; 8005 8006 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8007 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 8008 return; 8009 8010 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 8011 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8012 return; 8013 8014 /* check if this is end device */ 8015 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8016 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 8017 return; 8018 8019 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8020 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8021 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8022 8023 if (!pcie_device) { 8024 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8025 return; 8026 } 8027 8028 if (unlikely(pcie_device->handle != handle)) { 8029 starget = pcie_device->starget; 8030 sas_target_priv_data = starget->hostdata; 8031 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8032 starget_printk(KERN_INFO, starget, 8033 "handle changed from(0x%04x) to (0x%04x)!!!\n", 8034 pcie_device->handle, handle); 8035 sas_target_priv_data->handle = handle; 8036 pcie_device->handle = handle; 8037 8038 if (le32_to_cpu(pcie_device_pg0.Flags) & 8039 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8040 pcie_device->enclosure_level = 8041 pcie_device_pg0.EnclosureLevel; 8042 memcpy(&pcie_device->connector_name[0], 8043 &pcie_device_pg0.ConnectorName[0], 4); 8044 } else { 8045 pcie_device->enclosure_level = 0; 8046 pcie_device->connector_name[0] = '\0'; 8047 } 8048 } 8049 8050 /* check if device is present */ 8051 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8052 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8053 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 8054 handle); 8055 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8056 pcie_device_put(pcie_device); 8057 return; 8058 } 8059 8060 /* check if there were any issues with discovery */ 8061 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8062 pcie_device_pg0.AccessStatus)) { 8063 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8064 pcie_device_put(pcie_device); 8065 return; 8066 } 8067 8068 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8069 pcie_device_put(pcie_device); 8070 8071 _scsih_ublock_io_device(ioc, wwid, NULL); 8072 8073 return; 8074 } 8075 8076 /** 8077 * _scsih_pcie_add_device - creating pcie device object 8078 * @ioc: per adapter object 8079 * @handle: pcie device handle 8080 * 8081 * Creating end device object, stored in ioc->pcie_device_list. 8082 * 8083 * Return: 1 means queue the event later, 0 means complete the event 8084 */ 8085 static int 8086 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8087 { 8088 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8089 Mpi26PCIeDevicePage2_t pcie_device_pg2; 8090 Mpi2ConfigReply_t mpi_reply; 8091 struct _pcie_device *pcie_device; 8092 struct _enclosure_node *enclosure_dev; 8093 u32 ioc_status; 8094 u64 wwid; 8095 8096 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8097 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 8098 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8099 __FILE__, __LINE__, __func__); 8100 return 0; 8101 } 8102 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8103 MPI2_IOCSTATUS_MASK; 8104 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8105 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8106 __FILE__, __LINE__, __func__); 8107 return 0; 8108 } 8109 8110 set_bit(handle, ioc->pend_os_device_add); 8111 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8112 8113 /* check if device is present */ 8114 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8115 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8116 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 8117 handle); 8118 return 0; 8119 } 8120 8121 /* check if there were any issues with discovery */ 8122 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8123 pcie_device_pg0.AccessStatus)) 8124 return 0; 8125 8126 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu 8127 (pcie_device_pg0.DeviceInfo)))) 8128 return 0; 8129 8130 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 8131 if (pcie_device) { 8132 clear_bit(handle, ioc->pend_os_device_add); 8133 pcie_device_put(pcie_device); 8134 return 0; 8135 } 8136 8137 /* PCIe Device Page 2 contains read-only information about a 8138 * specific NVMe device; therefore, this page is only 8139 * valid for NVMe devices and skip for pcie devices of type scsi. 8140 */ 8141 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8142 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8143 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 8144 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8145 handle)) { 8146 ioc_err(ioc, 8147 "failure at %s:%d/%s()!\n", __FILE__, 8148 __LINE__, __func__); 8149 return 0; 8150 } 8151 8152 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8153 MPI2_IOCSTATUS_MASK; 8154 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8155 ioc_err(ioc, 8156 "failure at %s:%d/%s()!\n", __FILE__, 8157 __LINE__, __func__); 8158 return 0; 8159 } 8160 } 8161 8162 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 8163 if (!pcie_device) { 8164 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8165 __FILE__, __LINE__, __func__); 8166 return 0; 8167 } 8168 8169 kref_init(&pcie_device->refcount); 8170 pcie_device->id = ioc->pcie_target_id++; 8171 pcie_device->channel = PCIE_CHANNEL; 8172 pcie_device->handle = handle; 8173 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8174 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8175 pcie_device->wwid = wwid; 8176 pcie_device->port_num = pcie_device_pg0.PortNum; 8177 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 8178 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 8179 8180 pcie_device->enclosure_handle = 8181 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 8182 if (pcie_device->enclosure_handle != 0) 8183 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 8184 8185 if (le32_to_cpu(pcie_device_pg0.Flags) & 8186 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8187 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 8188 memcpy(&pcie_device->connector_name[0], 8189 &pcie_device_pg0.ConnectorName[0], 4); 8190 } else { 8191 pcie_device->enclosure_level = 0; 8192 pcie_device->connector_name[0] = '\0'; 8193 } 8194 8195 /* get enclosure_logical_id */ 8196 if (pcie_device->enclosure_handle) { 8197 enclosure_dev = 8198 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8199 pcie_device->enclosure_handle); 8200 if (enclosure_dev) 8201 pcie_device->enclosure_logical_id = 8202 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8203 } 8204 /* TODO -- Add device name once FW supports it */ 8205 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8206 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8207 pcie_device->nvme_mdts = 8208 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 8209 pcie_device->shutdown_latency = 8210 le16_to_cpu(pcie_device_pg2.ShutdownLatency); 8211 /* 8212 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency 8213 * if drive's RTD3 Entry Latency is greater then IOC's 8214 * max_shutdown_latency. 8215 */ 8216 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) 8217 ioc->max_shutdown_latency = 8218 pcie_device->shutdown_latency; 8219 if (pcie_device_pg2.ControllerResetTO) 8220 pcie_device->reset_timeout = 8221 pcie_device_pg2.ControllerResetTO; 8222 else 8223 pcie_device->reset_timeout = 30; 8224 } else 8225 pcie_device->reset_timeout = 30; 8226 8227 if (ioc->wait_for_discovery_to_complete) 8228 _scsih_pcie_device_init_add(ioc, pcie_device); 8229 else 8230 _scsih_pcie_device_add(ioc, pcie_device); 8231 8232 pcie_device_put(pcie_device); 8233 return 0; 8234 } 8235 8236 /** 8237 * _scsih_pcie_topology_change_event_debug - debug for topology 8238 * event 8239 * @ioc: per adapter object 8240 * @event_data: event data payload 8241 * Context: user. 8242 */ 8243 static void 8244 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8245 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 8246 { 8247 int i; 8248 u16 handle; 8249 u16 reason_code; 8250 u8 port_number; 8251 char *status_str = NULL; 8252 u8 link_rate, prev_link_rate; 8253 8254 switch (event_data->SwitchStatus) { 8255 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 8256 status_str = "add"; 8257 break; 8258 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 8259 status_str = "remove"; 8260 break; 8261 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 8262 case 0: 8263 status_str = "responding"; 8264 break; 8265 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 8266 status_str = "remove delay"; 8267 break; 8268 default: 8269 status_str = "unknown status"; 8270 break; 8271 } 8272 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 8273 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 8274 "start_port(%02d), count(%d)\n", 8275 le16_to_cpu(event_data->SwitchDevHandle), 8276 le16_to_cpu(event_data->EnclosureHandle), 8277 event_data->StartPortNum, event_data->NumEntries); 8278 for (i = 0; i < event_data->NumEntries; i++) { 8279 handle = 8280 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8281 if (!handle) 8282 continue; 8283 port_number = event_data->StartPortNum + i; 8284 reason_code = event_data->PortEntry[i].PortStatus; 8285 switch (reason_code) { 8286 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8287 status_str = "target add"; 8288 break; 8289 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8290 status_str = "target remove"; 8291 break; 8292 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 8293 status_str = "delay target remove"; 8294 break; 8295 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8296 status_str = "link rate change"; 8297 break; 8298 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 8299 status_str = "target responding"; 8300 break; 8301 default: 8302 status_str = "unknown"; 8303 break; 8304 } 8305 link_rate = event_data->PortEntry[i].CurrentPortInfo & 8306 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8307 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 8308 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8309 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 8310 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 8311 handle, status_str, link_rate, prev_link_rate); 8312 } 8313 } 8314 8315 /** 8316 * _scsih_pcie_topology_change_event - handle PCIe topology 8317 * changes 8318 * @ioc: per adapter object 8319 * @fw_event: The fw_event_work object 8320 * Context: user. 8321 * 8322 */ 8323 static void 8324 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 8325 struct fw_event_work *fw_event) 8326 { 8327 int i; 8328 u16 handle; 8329 u16 reason_code; 8330 u8 link_rate, prev_link_rate; 8331 unsigned long flags; 8332 int rc; 8333 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 8334 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 8335 struct _pcie_device *pcie_device; 8336 8337 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8338 _scsih_pcie_topology_change_event_debug(ioc, event_data); 8339 8340 if (ioc->shost_recovery || ioc->remove_host || 8341 ioc->pci_error_recovery) 8342 return; 8343 8344 if (fw_event->ignore) { 8345 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 8346 return; 8347 } 8348 8349 /* handle siblings events */ 8350 for (i = 0; i < event_data->NumEntries; i++) { 8351 if (fw_event->ignore) { 8352 dewtprintk(ioc, 8353 ioc_info(ioc, "ignoring switch event\n")); 8354 return; 8355 } 8356 if (ioc->remove_host || ioc->pci_error_recovery) 8357 return; 8358 reason_code = event_data->PortEntry[i].PortStatus; 8359 handle = 8360 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8361 if (!handle) 8362 continue; 8363 8364 link_rate = event_data->PortEntry[i].CurrentPortInfo 8365 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8366 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 8367 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8368 8369 switch (reason_code) { 8370 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8371 if (ioc->shost_recovery) 8372 break; 8373 if (link_rate == prev_link_rate) 8374 break; 8375 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8376 break; 8377 8378 _scsih_pcie_check_device(ioc, handle); 8379 8380 /* This code after this point handles the test case 8381 * where a device has been added, however its returning 8382 * BUSY for sometime. Then before the Device Missing 8383 * Delay expires and the device becomes READY, the 8384 * device is removed and added back. 8385 */ 8386 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8387 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 8388 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8389 8390 if (pcie_device) { 8391 pcie_device_put(pcie_device); 8392 break; 8393 } 8394 8395 if (!test_bit(handle, ioc->pend_os_device_add)) 8396 break; 8397 8398 dewtprintk(ioc, 8399 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 8400 handle)); 8401 event_data->PortEntry[i].PortStatus &= 0xF0; 8402 event_data->PortEntry[i].PortStatus |= 8403 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 8404 fallthrough; 8405 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8406 if (ioc->shost_recovery) 8407 break; 8408 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8409 break; 8410 8411 rc = _scsih_pcie_add_device(ioc, handle); 8412 if (!rc) { 8413 /* mark entry vacant */ 8414 /* TODO This needs to be reviewed and fixed, 8415 * we dont have an entry 8416 * to make an event void like vacant 8417 */ 8418 event_data->PortEntry[i].PortStatus |= 8419 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 8420 } 8421 break; 8422 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8423 _scsih_pcie_device_remove_by_handle(ioc, handle); 8424 break; 8425 } 8426 } 8427 } 8428 8429 /** 8430 * _scsih_pcie_device_status_change_event_debug - debug for device event 8431 * @ioc: ? 8432 * @event_data: event data payload 8433 * Context: user. 8434 */ 8435 static void 8436 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8437 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 8438 { 8439 char *reason_str = NULL; 8440 8441 switch (event_data->ReasonCode) { 8442 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 8443 reason_str = "smart data"; 8444 break; 8445 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 8446 reason_str = "unsupported device discovered"; 8447 break; 8448 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 8449 reason_str = "internal device reset"; 8450 break; 8451 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 8452 reason_str = "internal task abort"; 8453 break; 8454 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 8455 reason_str = "internal task abort set"; 8456 break; 8457 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 8458 reason_str = "internal clear task set"; 8459 break; 8460 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 8461 reason_str = "internal query task"; 8462 break; 8463 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 8464 reason_str = "device init failure"; 8465 break; 8466 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 8467 reason_str = "internal device reset complete"; 8468 break; 8469 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 8470 reason_str = "internal task abort complete"; 8471 break; 8472 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 8473 reason_str = "internal async notification"; 8474 break; 8475 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 8476 reason_str = "pcie hot reset failed"; 8477 break; 8478 default: 8479 reason_str = "unknown reason"; 8480 break; 8481 } 8482 8483 ioc_info(ioc, "PCIE device status change: (%s)\n" 8484 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 8485 reason_str, le16_to_cpu(event_data->DevHandle), 8486 (u64)le64_to_cpu(event_data->WWID), 8487 le16_to_cpu(event_data->TaskTag)); 8488 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 8489 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 8490 event_data->ASC, event_data->ASCQ); 8491 pr_cont("\n"); 8492 } 8493 8494 /** 8495 * _scsih_pcie_device_status_change_event - handle device status 8496 * change 8497 * @ioc: per adapter object 8498 * @fw_event: The fw_event_work object 8499 * Context: user. 8500 */ 8501 static void 8502 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8503 struct fw_event_work *fw_event) 8504 { 8505 struct MPT3SAS_TARGET *target_priv_data; 8506 struct _pcie_device *pcie_device; 8507 u64 wwid; 8508 unsigned long flags; 8509 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 8510 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 8511 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8512 _scsih_pcie_device_status_change_event_debug(ioc, 8513 event_data); 8514 8515 if (event_data->ReasonCode != 8516 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 8517 event_data->ReasonCode != 8518 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 8519 return; 8520 8521 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8522 wwid = le64_to_cpu(event_data->WWID); 8523 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8524 8525 if (!pcie_device || !pcie_device->starget) 8526 goto out; 8527 8528 target_priv_data = pcie_device->starget->hostdata; 8529 if (!target_priv_data) 8530 goto out; 8531 8532 if (event_data->ReasonCode == 8533 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 8534 target_priv_data->tm_busy = 1; 8535 else 8536 target_priv_data->tm_busy = 0; 8537 out: 8538 if (pcie_device) 8539 pcie_device_put(pcie_device); 8540 8541 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8542 } 8543 8544 /** 8545 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 8546 * event 8547 * @ioc: per adapter object 8548 * @event_data: event data payload 8549 * Context: user. 8550 */ 8551 static void 8552 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8553 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 8554 { 8555 char *reason_str = NULL; 8556 8557 switch (event_data->ReasonCode) { 8558 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8559 reason_str = "enclosure add"; 8560 break; 8561 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8562 reason_str = "enclosure remove"; 8563 break; 8564 default: 8565 reason_str = "unknown reason"; 8566 break; 8567 } 8568 8569 ioc_info(ioc, "enclosure status change: (%s)\n" 8570 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 8571 reason_str, 8572 le16_to_cpu(event_data->EnclosureHandle), 8573 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 8574 le16_to_cpu(event_data->StartSlot)); 8575 } 8576 8577 /** 8578 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 8579 * @ioc: per adapter object 8580 * @fw_event: The fw_event_work object 8581 * Context: user. 8582 */ 8583 static void 8584 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8585 struct fw_event_work *fw_event) 8586 { 8587 Mpi2ConfigReply_t mpi_reply; 8588 struct _enclosure_node *enclosure_dev = NULL; 8589 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 8590 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 8591 int rc; 8592 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 8593 8594 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8595 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 8596 (Mpi2EventDataSasEnclDevStatusChange_t *) 8597 fw_event->event_data); 8598 if (ioc->shost_recovery) 8599 return; 8600 8601 if (enclosure_handle) 8602 enclosure_dev = 8603 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8604 enclosure_handle); 8605 switch (event_data->ReasonCode) { 8606 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8607 if (!enclosure_dev) { 8608 enclosure_dev = 8609 kzalloc(sizeof(struct _enclosure_node), 8610 GFP_KERNEL); 8611 if (!enclosure_dev) { 8612 ioc_info(ioc, "failure at %s:%d/%s()!\n", 8613 __FILE__, __LINE__, __func__); 8614 return; 8615 } 8616 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8617 &enclosure_dev->pg0, 8618 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8619 enclosure_handle); 8620 8621 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8622 MPI2_IOCSTATUS_MASK)) { 8623 kfree(enclosure_dev); 8624 return; 8625 } 8626 8627 list_add_tail(&enclosure_dev->list, 8628 &ioc->enclosure_list); 8629 } 8630 break; 8631 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8632 if (enclosure_dev) { 8633 list_del(&enclosure_dev->list); 8634 kfree(enclosure_dev); 8635 } 8636 break; 8637 default: 8638 break; 8639 } 8640 } 8641 8642 /** 8643 * _scsih_sas_broadcast_primitive_event - handle broadcast events 8644 * @ioc: per adapter object 8645 * @fw_event: The fw_event_work object 8646 * Context: user. 8647 */ 8648 static void 8649 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 8650 struct fw_event_work *fw_event) 8651 { 8652 struct scsi_cmnd *scmd; 8653 struct scsi_device *sdev; 8654 struct scsiio_tracker *st; 8655 u16 smid, handle; 8656 u32 lun; 8657 struct MPT3SAS_DEVICE *sas_device_priv_data; 8658 u32 termination_count; 8659 u32 query_count; 8660 Mpi2SCSITaskManagementReply_t *mpi_reply; 8661 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 8662 (Mpi2EventDataSasBroadcastPrimitive_t *) 8663 fw_event->event_data; 8664 u16 ioc_status; 8665 unsigned long flags; 8666 int r; 8667 u8 max_retries = 0; 8668 u8 task_abort_retries; 8669 8670 mutex_lock(&ioc->tm_cmds.mutex); 8671 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 8672 __func__, event_data->PhyNum, event_data->PortWidth); 8673 8674 _scsih_block_io_all_device(ioc); 8675 8676 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8677 mpi_reply = ioc->tm_cmds.reply; 8678 broadcast_aen_retry: 8679 8680 /* sanity checks for retrying this loop */ 8681 if (max_retries++ == 5) { 8682 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 8683 goto out; 8684 } else if (max_retries > 1) 8685 dewtprintk(ioc, 8686 ioc_info(ioc, "%s: %d retry\n", 8687 __func__, max_retries - 1)); 8688 8689 termination_count = 0; 8690 query_count = 0; 8691 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 8692 if (ioc->shost_recovery) 8693 goto out; 8694 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 8695 if (!scmd) 8696 continue; 8697 st = scsi_cmd_priv(scmd); 8698 sdev = scmd->device; 8699 sas_device_priv_data = sdev->hostdata; 8700 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 8701 continue; 8702 /* skip hidden raid components */ 8703 if (sas_device_priv_data->sas_target->flags & 8704 MPT_TARGET_FLAGS_RAID_COMPONENT) 8705 continue; 8706 /* skip volumes */ 8707 if (sas_device_priv_data->sas_target->flags & 8708 MPT_TARGET_FLAGS_VOLUME) 8709 continue; 8710 /* skip PCIe devices */ 8711 if (sas_device_priv_data->sas_target->flags & 8712 MPT_TARGET_FLAGS_PCIE_DEVICE) 8713 continue; 8714 8715 handle = sas_device_priv_data->sas_target->handle; 8716 lun = sas_device_priv_data->lun; 8717 query_count++; 8718 8719 if (ioc->shost_recovery) 8720 goto out; 8721 8722 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8723 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 8724 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 8725 st->msix_io, 30, 0); 8726 if (r == FAILED) { 8727 sdev_printk(KERN_WARNING, sdev, 8728 "mpt3sas_scsih_issue_tm: FAILED when sending " 8729 "QUERY_TASK: scmd(%p)\n", scmd); 8730 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8731 goto broadcast_aen_retry; 8732 } 8733 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 8734 & MPI2_IOCSTATUS_MASK; 8735 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8736 sdev_printk(KERN_WARNING, sdev, 8737 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 8738 ioc_status, scmd); 8739 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8740 goto broadcast_aen_retry; 8741 } 8742 8743 /* see if IO is still owned by IOC and target */ 8744 if (mpi_reply->ResponseCode == 8745 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 8746 mpi_reply->ResponseCode == 8747 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 8748 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8749 continue; 8750 } 8751 task_abort_retries = 0; 8752 tm_retry: 8753 if (task_abort_retries++ == 60) { 8754 dewtprintk(ioc, 8755 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 8756 __func__)); 8757 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8758 goto broadcast_aen_retry; 8759 } 8760 8761 if (ioc->shost_recovery) 8762 goto out_no_lock; 8763 8764 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 8765 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 8766 st->smid, st->msix_io, 30, 0); 8767 if (r == FAILED || st->cb_idx != 0xFF) { 8768 sdev_printk(KERN_WARNING, sdev, 8769 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 8770 "scmd(%p)\n", scmd); 8771 goto tm_retry; 8772 } 8773 8774 if (task_abort_retries > 1) 8775 sdev_printk(KERN_WARNING, sdev, 8776 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 8777 " scmd(%p)\n", 8778 task_abort_retries - 1, scmd); 8779 8780 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 8781 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8782 } 8783 8784 if (ioc->broadcast_aen_pending) { 8785 dewtprintk(ioc, 8786 ioc_info(ioc, 8787 "%s: loop back due to pending AEN\n", 8788 __func__)); 8789 ioc->broadcast_aen_pending = 0; 8790 goto broadcast_aen_retry; 8791 } 8792 8793 out: 8794 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8795 out_no_lock: 8796 8797 dewtprintk(ioc, 8798 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 8799 __func__, query_count, termination_count)); 8800 8801 ioc->broadcast_aen_busy = 0; 8802 if (!ioc->shost_recovery) 8803 _scsih_ublock_io_all_device(ioc); 8804 mutex_unlock(&ioc->tm_cmds.mutex); 8805 } 8806 8807 /** 8808 * _scsih_sas_discovery_event - handle discovery events 8809 * @ioc: per adapter object 8810 * @fw_event: The fw_event_work object 8811 * Context: user. 8812 */ 8813 static void 8814 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 8815 struct fw_event_work *fw_event) 8816 { 8817 Mpi2EventDataSasDiscovery_t *event_data = 8818 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 8819 8820 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 8821 ioc_info(ioc, "discovery event: (%s)", 8822 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 8823 "start" : "stop"); 8824 if (event_data->DiscoveryStatus) 8825 pr_cont("discovery_status(0x%08x)", 8826 le32_to_cpu(event_data->DiscoveryStatus)); 8827 pr_cont("\n"); 8828 } 8829 8830 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 8831 !ioc->sas_hba.num_phys) { 8832 if (disable_discovery > 0 && ioc->shost_recovery) { 8833 /* Wait for the reset to complete */ 8834 while (ioc->shost_recovery) 8835 ssleep(1); 8836 } 8837 _scsih_sas_host_add(ioc); 8838 } 8839 } 8840 8841 /** 8842 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 8843 * events 8844 * @ioc: per adapter object 8845 * @fw_event: The fw_event_work object 8846 * Context: user. 8847 */ 8848 static void 8849 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 8850 struct fw_event_work *fw_event) 8851 { 8852 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 8853 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 8854 8855 switch (event_data->ReasonCode) { 8856 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 8857 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 8858 le16_to_cpu(event_data->DevHandle), 8859 (u64)le64_to_cpu(event_data->SASAddress), 8860 event_data->PhysicalPort); 8861 break; 8862 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 8863 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 8864 le16_to_cpu(event_data->DevHandle), 8865 (u64)le64_to_cpu(event_data->SASAddress), 8866 event_data->PhysicalPort); 8867 break; 8868 default: 8869 break; 8870 } 8871 } 8872 8873 /** 8874 * _scsih_pcie_enumeration_event - handle enumeration events 8875 * @ioc: per adapter object 8876 * @fw_event: The fw_event_work object 8877 * Context: user. 8878 */ 8879 static void 8880 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 8881 struct fw_event_work *fw_event) 8882 { 8883 Mpi26EventDataPCIeEnumeration_t *event_data = 8884 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 8885 8886 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 8887 return; 8888 8889 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 8890 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 8891 "started" : "completed", 8892 event_data->Flags); 8893 if (event_data->EnumerationStatus) 8894 pr_cont("enumeration_status(0x%08x)", 8895 le32_to_cpu(event_data->EnumerationStatus)); 8896 pr_cont("\n"); 8897 } 8898 8899 /** 8900 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 8901 * @ioc: per adapter object 8902 * @handle: device handle for physical disk 8903 * @phys_disk_num: physical disk number 8904 * 8905 * Return: 0 for success, else failure. 8906 */ 8907 static int 8908 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 8909 { 8910 Mpi2RaidActionRequest_t *mpi_request; 8911 Mpi2RaidActionReply_t *mpi_reply; 8912 u16 smid; 8913 u8 issue_reset = 0; 8914 int rc = 0; 8915 u16 ioc_status; 8916 u32 log_info; 8917 8918 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 8919 return rc; 8920 8921 mutex_lock(&ioc->scsih_cmds.mutex); 8922 8923 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 8924 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 8925 rc = -EAGAIN; 8926 goto out; 8927 } 8928 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 8929 8930 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 8931 if (!smid) { 8932 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 8933 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8934 rc = -EAGAIN; 8935 goto out; 8936 } 8937 8938 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 8939 ioc->scsih_cmds.smid = smid; 8940 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 8941 8942 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 8943 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 8944 mpi_request->PhysDiskNum = phys_disk_num; 8945 8946 dewtprintk(ioc, 8947 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 8948 handle, phys_disk_num)); 8949 8950 init_completion(&ioc->scsih_cmds.done); 8951 ioc->put_smid_default(ioc, smid); 8952 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8953 8954 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8955 mpt3sas_check_cmd_timeout(ioc, 8956 ioc->scsih_cmds.status, mpi_request, 8957 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); 8958 rc = -EFAULT; 8959 goto out; 8960 } 8961 8962 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 8963 8964 mpi_reply = ioc->scsih_cmds.reply; 8965 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 8966 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 8967 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 8968 else 8969 log_info = 0; 8970 ioc_status &= MPI2_IOCSTATUS_MASK; 8971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8972 dewtprintk(ioc, 8973 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 8974 ioc_status, log_info)); 8975 rc = -EFAULT; 8976 } else 8977 dewtprintk(ioc, 8978 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 8979 } 8980 8981 out: 8982 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8983 mutex_unlock(&ioc->scsih_cmds.mutex); 8984 8985 if (issue_reset) 8986 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 8987 return rc; 8988 } 8989 8990 /** 8991 * _scsih_reprobe_lun - reprobing lun 8992 * @sdev: scsi device struct 8993 * @no_uld_attach: sdev->no_uld_attach flag setting 8994 * 8995 **/ 8996 static void 8997 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 8998 { 8999 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 9000 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 9001 sdev->no_uld_attach ? "hiding" : "exposing"); 9002 WARN_ON(scsi_device_reprobe(sdev)); 9003 } 9004 9005 /** 9006 * _scsih_sas_volume_add - add new volume 9007 * @ioc: per adapter object 9008 * @element: IR config element data 9009 * Context: user. 9010 */ 9011 static void 9012 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 9013 Mpi2EventIrConfigElement_t *element) 9014 { 9015 struct _raid_device *raid_device; 9016 unsigned long flags; 9017 u64 wwid; 9018 u16 handle = le16_to_cpu(element->VolDevHandle); 9019 int rc; 9020 9021 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9022 if (!wwid) { 9023 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9024 __FILE__, __LINE__, __func__); 9025 return; 9026 } 9027 9028 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9029 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 9030 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9031 9032 if (raid_device) 9033 return; 9034 9035 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9036 if (!raid_device) { 9037 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9038 __FILE__, __LINE__, __func__); 9039 return; 9040 } 9041 9042 raid_device->id = ioc->sas_id++; 9043 raid_device->channel = RAID_CHANNEL; 9044 raid_device->handle = handle; 9045 raid_device->wwid = wwid; 9046 _scsih_raid_device_add(ioc, raid_device); 9047 if (!ioc->wait_for_discovery_to_complete) { 9048 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9049 raid_device->id, 0); 9050 if (rc) 9051 _scsih_raid_device_remove(ioc, raid_device); 9052 } else { 9053 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9054 _scsih_determine_boot_device(ioc, raid_device, 1); 9055 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9056 } 9057 } 9058 9059 /** 9060 * _scsih_sas_volume_delete - delete volume 9061 * @ioc: per adapter object 9062 * @handle: volume device handle 9063 * Context: user. 9064 */ 9065 static void 9066 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 9067 { 9068 struct _raid_device *raid_device; 9069 unsigned long flags; 9070 struct MPT3SAS_TARGET *sas_target_priv_data; 9071 struct scsi_target *starget = NULL; 9072 9073 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9074 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9075 if (raid_device) { 9076 if (raid_device->starget) { 9077 starget = raid_device->starget; 9078 sas_target_priv_data = starget->hostdata; 9079 sas_target_priv_data->deleted = 1; 9080 } 9081 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 9082 raid_device->handle, (u64)raid_device->wwid); 9083 list_del(&raid_device->list); 9084 kfree(raid_device); 9085 } 9086 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9087 if (starget) 9088 scsi_remove_target(&starget->dev); 9089 } 9090 9091 /** 9092 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 9093 * @ioc: per adapter object 9094 * @element: IR config element data 9095 * Context: user. 9096 */ 9097 static void 9098 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 9099 Mpi2EventIrConfigElement_t *element) 9100 { 9101 struct _sas_device *sas_device; 9102 struct scsi_target *starget = NULL; 9103 struct MPT3SAS_TARGET *sas_target_priv_data; 9104 unsigned long flags; 9105 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9106 9107 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9108 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9109 if (sas_device) { 9110 sas_device->volume_handle = 0; 9111 sas_device->volume_wwid = 0; 9112 clear_bit(handle, ioc->pd_handles); 9113 if (sas_device->starget && sas_device->starget->hostdata) { 9114 starget = sas_device->starget; 9115 sas_target_priv_data = starget->hostdata; 9116 sas_target_priv_data->flags &= 9117 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 9118 } 9119 } 9120 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9121 if (!sas_device) 9122 return; 9123 9124 /* exposing raid component */ 9125 if (starget) 9126 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 9127 9128 sas_device_put(sas_device); 9129 } 9130 9131 /** 9132 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 9133 * @ioc: per adapter object 9134 * @element: IR config element data 9135 * Context: user. 9136 */ 9137 static void 9138 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 9139 Mpi2EventIrConfigElement_t *element) 9140 { 9141 struct _sas_device *sas_device; 9142 struct scsi_target *starget = NULL; 9143 struct MPT3SAS_TARGET *sas_target_priv_data; 9144 unsigned long flags; 9145 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9146 u16 volume_handle = 0; 9147 u64 volume_wwid = 0; 9148 9149 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 9150 if (volume_handle) 9151 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 9152 &volume_wwid); 9153 9154 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9155 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9156 if (sas_device) { 9157 set_bit(handle, ioc->pd_handles); 9158 if (sas_device->starget && sas_device->starget->hostdata) { 9159 starget = sas_device->starget; 9160 sas_target_priv_data = starget->hostdata; 9161 sas_target_priv_data->flags |= 9162 MPT_TARGET_FLAGS_RAID_COMPONENT; 9163 sas_device->volume_handle = volume_handle; 9164 sas_device->volume_wwid = volume_wwid; 9165 } 9166 } 9167 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9168 if (!sas_device) 9169 return; 9170 9171 /* hiding raid component */ 9172 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9173 9174 if (starget) 9175 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 9176 9177 sas_device_put(sas_device); 9178 } 9179 9180 /** 9181 * _scsih_sas_pd_delete - delete pd component 9182 * @ioc: per adapter object 9183 * @element: IR config element data 9184 * Context: user. 9185 */ 9186 static void 9187 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 9188 Mpi2EventIrConfigElement_t *element) 9189 { 9190 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9191 9192 _scsih_device_remove_by_handle(ioc, handle); 9193 } 9194 9195 /** 9196 * _scsih_sas_pd_add - remove pd component 9197 * @ioc: per adapter object 9198 * @element: IR config element data 9199 * Context: user. 9200 */ 9201 static void 9202 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 9203 Mpi2EventIrConfigElement_t *element) 9204 { 9205 struct _sas_device *sas_device; 9206 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9207 Mpi2ConfigReply_t mpi_reply; 9208 Mpi2SasDevicePage0_t sas_device_pg0; 9209 u32 ioc_status; 9210 u64 sas_address; 9211 u16 parent_handle; 9212 9213 set_bit(handle, ioc->pd_handles); 9214 9215 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9216 if (sas_device) { 9217 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9218 sas_device_put(sas_device); 9219 return; 9220 } 9221 9222 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 9223 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 9224 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9225 __FILE__, __LINE__, __func__); 9226 return; 9227 } 9228 9229 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9230 MPI2_IOCSTATUS_MASK; 9231 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9232 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9233 __FILE__, __LINE__, __func__); 9234 return; 9235 } 9236 9237 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9238 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9239 mpt3sas_transport_update_links(ioc, sas_address, handle, 9240 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9241 mpt3sas_get_port_by_id(ioc, 9242 sas_device_pg0.PhysicalPort, 0)); 9243 9244 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9245 _scsih_add_device(ioc, handle, 0, 1); 9246 } 9247 9248 /** 9249 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 9250 * @ioc: per adapter object 9251 * @event_data: event data payload 9252 * Context: user. 9253 */ 9254 static void 9255 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 9256 Mpi2EventDataIrConfigChangeList_t *event_data) 9257 { 9258 Mpi2EventIrConfigElement_t *element; 9259 u8 element_type; 9260 int i; 9261 char *reason_str = NULL, *element_str = NULL; 9262 9263 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9264 9265 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 9266 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 9267 "foreign" : "native", 9268 event_data->NumElements); 9269 for (i = 0; i < event_data->NumElements; i++, element++) { 9270 switch (element->ReasonCode) { 9271 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9272 reason_str = "add"; 9273 break; 9274 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9275 reason_str = "remove"; 9276 break; 9277 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 9278 reason_str = "no change"; 9279 break; 9280 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9281 reason_str = "hide"; 9282 break; 9283 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9284 reason_str = "unhide"; 9285 break; 9286 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9287 reason_str = "volume_created"; 9288 break; 9289 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9290 reason_str = "volume_deleted"; 9291 break; 9292 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9293 reason_str = "pd_created"; 9294 break; 9295 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9296 reason_str = "pd_deleted"; 9297 break; 9298 default: 9299 reason_str = "unknown reason"; 9300 break; 9301 } 9302 element_type = le16_to_cpu(element->ElementFlags) & 9303 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 9304 switch (element_type) { 9305 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 9306 element_str = "volume"; 9307 break; 9308 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 9309 element_str = "phys disk"; 9310 break; 9311 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 9312 element_str = "hot spare"; 9313 break; 9314 default: 9315 element_str = "unknown element"; 9316 break; 9317 } 9318 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 9319 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 9320 reason_str, le16_to_cpu(element->VolDevHandle), 9321 le16_to_cpu(element->PhysDiskDevHandle), 9322 element->PhysDiskNum); 9323 } 9324 } 9325 9326 /** 9327 * _scsih_sas_ir_config_change_event - handle ir configuration change events 9328 * @ioc: per adapter object 9329 * @fw_event: The fw_event_work object 9330 * Context: user. 9331 */ 9332 static void 9333 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 9334 struct fw_event_work *fw_event) 9335 { 9336 Mpi2EventIrConfigElement_t *element; 9337 int i; 9338 u8 foreign_config; 9339 Mpi2EventDataIrConfigChangeList_t *event_data = 9340 (Mpi2EventDataIrConfigChangeList_t *) 9341 fw_event->event_data; 9342 9343 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9344 (!ioc->hide_ir_msg)) 9345 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 9346 9347 foreign_config = (le32_to_cpu(event_data->Flags) & 9348 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 9349 9350 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9351 if (ioc->shost_recovery && 9352 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 9353 for (i = 0; i < event_data->NumElements; i++, element++) { 9354 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 9355 _scsih_ir_fastpath(ioc, 9356 le16_to_cpu(element->PhysDiskDevHandle), 9357 element->PhysDiskNum); 9358 } 9359 return; 9360 } 9361 9362 for (i = 0; i < event_data->NumElements; i++, element++) { 9363 9364 switch (element->ReasonCode) { 9365 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9366 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9367 if (!foreign_config) 9368 _scsih_sas_volume_add(ioc, element); 9369 break; 9370 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9371 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9372 if (!foreign_config) 9373 _scsih_sas_volume_delete(ioc, 9374 le16_to_cpu(element->VolDevHandle)); 9375 break; 9376 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9377 if (!ioc->is_warpdrive) 9378 _scsih_sas_pd_hide(ioc, element); 9379 break; 9380 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9381 if (!ioc->is_warpdrive) 9382 _scsih_sas_pd_expose(ioc, element); 9383 break; 9384 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9385 if (!ioc->is_warpdrive) 9386 _scsih_sas_pd_add(ioc, element); 9387 break; 9388 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9389 if (!ioc->is_warpdrive) 9390 _scsih_sas_pd_delete(ioc, element); 9391 break; 9392 } 9393 } 9394 } 9395 9396 /** 9397 * _scsih_sas_ir_volume_event - IR volume event 9398 * @ioc: per adapter object 9399 * @fw_event: The fw_event_work object 9400 * Context: user. 9401 */ 9402 static void 9403 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 9404 struct fw_event_work *fw_event) 9405 { 9406 u64 wwid; 9407 unsigned long flags; 9408 struct _raid_device *raid_device; 9409 u16 handle; 9410 u32 state; 9411 int rc; 9412 Mpi2EventDataIrVolume_t *event_data = 9413 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 9414 9415 if (ioc->shost_recovery) 9416 return; 9417 9418 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 9419 return; 9420 9421 handle = le16_to_cpu(event_data->VolDevHandle); 9422 state = le32_to_cpu(event_data->NewValue); 9423 if (!ioc->hide_ir_msg) 9424 dewtprintk(ioc, 9425 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9426 __func__, handle, 9427 le32_to_cpu(event_data->PreviousValue), 9428 state)); 9429 switch (state) { 9430 case MPI2_RAID_VOL_STATE_MISSING: 9431 case MPI2_RAID_VOL_STATE_FAILED: 9432 _scsih_sas_volume_delete(ioc, handle); 9433 break; 9434 9435 case MPI2_RAID_VOL_STATE_ONLINE: 9436 case MPI2_RAID_VOL_STATE_DEGRADED: 9437 case MPI2_RAID_VOL_STATE_OPTIMAL: 9438 9439 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9440 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9441 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9442 9443 if (raid_device) 9444 break; 9445 9446 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9447 if (!wwid) { 9448 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9449 __FILE__, __LINE__, __func__); 9450 break; 9451 } 9452 9453 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9454 if (!raid_device) { 9455 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9456 __FILE__, __LINE__, __func__); 9457 break; 9458 } 9459 9460 raid_device->id = ioc->sas_id++; 9461 raid_device->channel = RAID_CHANNEL; 9462 raid_device->handle = handle; 9463 raid_device->wwid = wwid; 9464 _scsih_raid_device_add(ioc, raid_device); 9465 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9466 raid_device->id, 0); 9467 if (rc) 9468 _scsih_raid_device_remove(ioc, raid_device); 9469 break; 9470 9471 case MPI2_RAID_VOL_STATE_INITIALIZING: 9472 default: 9473 break; 9474 } 9475 } 9476 9477 /** 9478 * _scsih_sas_ir_physical_disk_event - PD event 9479 * @ioc: per adapter object 9480 * @fw_event: The fw_event_work object 9481 * Context: user. 9482 */ 9483 static void 9484 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 9485 struct fw_event_work *fw_event) 9486 { 9487 u16 handle, parent_handle; 9488 u32 state; 9489 struct _sas_device *sas_device; 9490 Mpi2ConfigReply_t mpi_reply; 9491 Mpi2SasDevicePage0_t sas_device_pg0; 9492 u32 ioc_status; 9493 Mpi2EventDataIrPhysicalDisk_t *event_data = 9494 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 9495 u64 sas_address; 9496 9497 if (ioc->shost_recovery) 9498 return; 9499 9500 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 9501 return; 9502 9503 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 9504 state = le32_to_cpu(event_data->NewValue); 9505 9506 if (!ioc->hide_ir_msg) 9507 dewtprintk(ioc, 9508 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9509 __func__, handle, 9510 le32_to_cpu(event_data->PreviousValue), 9511 state)); 9512 9513 switch (state) { 9514 case MPI2_RAID_PD_STATE_ONLINE: 9515 case MPI2_RAID_PD_STATE_DEGRADED: 9516 case MPI2_RAID_PD_STATE_REBUILDING: 9517 case MPI2_RAID_PD_STATE_OPTIMAL: 9518 case MPI2_RAID_PD_STATE_HOT_SPARE: 9519 9520 if (!ioc->is_warpdrive) 9521 set_bit(handle, ioc->pd_handles); 9522 9523 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9524 if (sas_device) { 9525 sas_device_put(sas_device); 9526 return; 9527 } 9528 9529 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9530 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 9531 handle))) { 9532 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9533 __FILE__, __LINE__, __func__); 9534 return; 9535 } 9536 9537 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9538 MPI2_IOCSTATUS_MASK; 9539 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9540 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9541 __FILE__, __LINE__, __func__); 9542 return; 9543 } 9544 9545 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9546 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9547 mpt3sas_transport_update_links(ioc, sas_address, handle, 9548 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9549 mpt3sas_get_port_by_id(ioc, 9550 sas_device_pg0.PhysicalPort, 0)); 9551 9552 _scsih_add_device(ioc, handle, 0, 1); 9553 9554 break; 9555 9556 case MPI2_RAID_PD_STATE_OFFLINE: 9557 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 9558 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 9559 default: 9560 break; 9561 } 9562 } 9563 9564 /** 9565 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 9566 * @ioc: per adapter object 9567 * @event_data: event data payload 9568 * Context: user. 9569 */ 9570 static void 9571 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 9572 Mpi2EventDataIrOperationStatus_t *event_data) 9573 { 9574 char *reason_str = NULL; 9575 9576 switch (event_data->RAIDOperation) { 9577 case MPI2_EVENT_IR_RAIDOP_RESYNC: 9578 reason_str = "resync"; 9579 break; 9580 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 9581 reason_str = "online capacity expansion"; 9582 break; 9583 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 9584 reason_str = "consistency check"; 9585 break; 9586 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 9587 reason_str = "background init"; 9588 break; 9589 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 9590 reason_str = "make data consistent"; 9591 break; 9592 } 9593 9594 if (!reason_str) 9595 return; 9596 9597 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 9598 reason_str, 9599 le16_to_cpu(event_data->VolDevHandle), 9600 event_data->PercentComplete); 9601 } 9602 9603 /** 9604 * _scsih_sas_ir_operation_status_event - handle RAID operation events 9605 * @ioc: per adapter object 9606 * @fw_event: The fw_event_work object 9607 * Context: user. 9608 */ 9609 static void 9610 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 9611 struct fw_event_work *fw_event) 9612 { 9613 Mpi2EventDataIrOperationStatus_t *event_data = 9614 (Mpi2EventDataIrOperationStatus_t *) 9615 fw_event->event_data; 9616 static struct _raid_device *raid_device; 9617 unsigned long flags; 9618 u16 handle; 9619 9620 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9621 (!ioc->hide_ir_msg)) 9622 _scsih_sas_ir_operation_status_event_debug(ioc, 9623 event_data); 9624 9625 /* code added for raid transport support */ 9626 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 9627 9628 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9629 handle = le16_to_cpu(event_data->VolDevHandle); 9630 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9631 if (raid_device) 9632 raid_device->percent_complete = 9633 event_data->PercentComplete; 9634 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9635 } 9636 } 9637 9638 /** 9639 * _scsih_prep_device_scan - initialize parameters prior to device scan 9640 * @ioc: per adapter object 9641 * 9642 * Set the deleted flag prior to device scan. If the device is found during 9643 * the scan, then we clear the deleted flag. 9644 */ 9645 static void 9646 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 9647 { 9648 struct MPT3SAS_DEVICE *sas_device_priv_data; 9649 struct scsi_device *sdev; 9650 9651 shost_for_each_device(sdev, ioc->shost) { 9652 sas_device_priv_data = sdev->hostdata; 9653 if (sas_device_priv_data && sas_device_priv_data->sas_target) 9654 sas_device_priv_data->sas_target->deleted = 1; 9655 } 9656 } 9657 9658 /** 9659 * _scsih_update_device_qdepth - Update QD during Reset. 9660 * @ioc: per adapter object 9661 * 9662 */ 9663 static void 9664 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) 9665 { 9666 struct MPT3SAS_DEVICE *sas_device_priv_data; 9667 struct MPT3SAS_TARGET *sas_target_priv_data; 9668 struct _sas_device *sas_device; 9669 struct scsi_device *sdev; 9670 u16 qdepth; 9671 9672 ioc_info(ioc, "Update devices with firmware reported queue depth\n"); 9673 shost_for_each_device(sdev, ioc->shost) { 9674 sas_device_priv_data = sdev->hostdata; 9675 if (sas_device_priv_data && sas_device_priv_data->sas_target) { 9676 sas_target_priv_data = sas_device_priv_data->sas_target; 9677 sas_device = sas_device_priv_data->sas_target->sas_dev; 9678 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) 9679 qdepth = ioc->max_nvme_qd; 9680 else if (sas_device && 9681 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) 9682 qdepth = (sas_device->port_type > 1) ? 9683 ioc->max_wideport_qd : ioc->max_narrowport_qd; 9684 else if (sas_device && 9685 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 9686 qdepth = ioc->max_sata_qd; 9687 else 9688 continue; 9689 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 9690 } 9691 } 9692 } 9693 9694 /** 9695 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 9696 * @ioc: per adapter object 9697 * @sas_device_pg0: SAS Device page 0 9698 * 9699 * After host reset, find out whether devices are still responding. 9700 * Used in _scsih_remove_unresponsive_sas_devices. 9701 */ 9702 static void 9703 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 9704 Mpi2SasDevicePage0_t *sas_device_pg0) 9705 { 9706 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9707 struct scsi_target *starget; 9708 struct _sas_device *sas_device = NULL; 9709 struct _enclosure_node *enclosure_dev = NULL; 9710 unsigned long flags; 9711 struct hba_port *port = mpt3sas_get_port_by_id( 9712 ioc, sas_device_pg0->PhysicalPort, 0); 9713 9714 if (sas_device_pg0->EnclosureHandle) { 9715 enclosure_dev = 9716 mpt3sas_scsih_enclosure_find_by_handle(ioc, 9717 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 9718 if (enclosure_dev == NULL) 9719 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 9720 sas_device_pg0->EnclosureHandle); 9721 } 9722 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9723 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 9724 if (sas_device->sas_address != le64_to_cpu( 9725 sas_device_pg0->SASAddress)) 9726 continue; 9727 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) 9728 continue; 9729 if (sas_device->port != port) 9730 continue; 9731 sas_device->responding = 1; 9732 starget = sas_device->starget; 9733 if (starget && starget->hostdata) { 9734 sas_target_priv_data = starget->hostdata; 9735 sas_target_priv_data->tm_busy = 0; 9736 sas_target_priv_data->deleted = 0; 9737 } else 9738 sas_target_priv_data = NULL; 9739 if (starget) { 9740 starget_printk(KERN_INFO, starget, 9741 "handle(0x%04x), sas_addr(0x%016llx)\n", 9742 le16_to_cpu(sas_device_pg0->DevHandle), 9743 (unsigned long long) 9744 sas_device->sas_address); 9745 9746 if (sas_device->enclosure_handle != 0) 9747 starget_printk(KERN_INFO, starget, 9748 "enclosure logical id(0x%016llx), slot(%d)\n", 9749 (unsigned long long) 9750 sas_device->enclosure_logical_id, 9751 sas_device->slot); 9752 } 9753 if (le16_to_cpu(sas_device_pg0->Flags) & 9754 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 9755 sas_device->enclosure_level = 9756 sas_device_pg0->EnclosureLevel; 9757 memcpy(&sas_device->connector_name[0], 9758 &sas_device_pg0->ConnectorName[0], 4); 9759 } else { 9760 sas_device->enclosure_level = 0; 9761 sas_device->connector_name[0] = '\0'; 9762 } 9763 9764 sas_device->enclosure_handle = 9765 le16_to_cpu(sas_device_pg0->EnclosureHandle); 9766 sas_device->is_chassis_slot_valid = 0; 9767 if (enclosure_dev) { 9768 sas_device->enclosure_logical_id = le64_to_cpu( 9769 enclosure_dev->pg0.EnclosureLogicalID); 9770 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 9771 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 9772 sas_device->is_chassis_slot_valid = 1; 9773 sas_device->chassis_slot = 9774 enclosure_dev->pg0.ChassisSlot; 9775 } 9776 } 9777 9778 if (sas_device->handle == le16_to_cpu( 9779 sas_device_pg0->DevHandle)) 9780 goto out; 9781 pr_info("\thandle changed from(0x%04x)!!!\n", 9782 sas_device->handle); 9783 sas_device->handle = le16_to_cpu( 9784 sas_device_pg0->DevHandle); 9785 if (sas_target_priv_data) 9786 sas_target_priv_data->handle = 9787 le16_to_cpu(sas_device_pg0->DevHandle); 9788 goto out; 9789 } 9790 out: 9791 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9792 } 9793 9794 /** 9795 * _scsih_create_enclosure_list_after_reset - Free Existing list, 9796 * And create enclosure list by scanning all Enclosure Page(0)s 9797 * @ioc: per adapter object 9798 */ 9799 static void 9800 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 9801 { 9802 struct _enclosure_node *enclosure_dev; 9803 Mpi2ConfigReply_t mpi_reply; 9804 u16 enclosure_handle; 9805 int rc; 9806 9807 /* Free existing enclosure list */ 9808 mpt3sas_free_enclosure_list(ioc); 9809 9810 /* Re constructing enclosure list after reset*/ 9811 enclosure_handle = 0xFFFF; 9812 do { 9813 enclosure_dev = 9814 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 9815 if (!enclosure_dev) { 9816 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9817 __FILE__, __LINE__, __func__); 9818 return; 9819 } 9820 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 9821 &enclosure_dev->pg0, 9822 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 9823 enclosure_handle); 9824 9825 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 9826 MPI2_IOCSTATUS_MASK)) { 9827 kfree(enclosure_dev); 9828 return; 9829 } 9830 list_add_tail(&enclosure_dev->list, 9831 &ioc->enclosure_list); 9832 enclosure_handle = 9833 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 9834 } while (1); 9835 } 9836 9837 /** 9838 * _scsih_search_responding_sas_devices - 9839 * @ioc: per adapter object 9840 * 9841 * After host reset, find out whether devices are still responding. 9842 * If not remove. 9843 */ 9844 static void 9845 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 9846 { 9847 Mpi2SasDevicePage0_t sas_device_pg0; 9848 Mpi2ConfigReply_t mpi_reply; 9849 u16 ioc_status; 9850 u16 handle; 9851 u32 device_info; 9852 9853 ioc_info(ioc, "search for end-devices: start\n"); 9854 9855 if (list_empty(&ioc->sas_device_list)) 9856 goto out; 9857 9858 handle = 0xFFFF; 9859 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9860 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9861 handle))) { 9862 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9863 MPI2_IOCSTATUS_MASK; 9864 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9865 break; 9866 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9867 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 9868 if (!(_scsih_is_end_device(device_info))) 9869 continue; 9870 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 9871 } 9872 9873 out: 9874 ioc_info(ioc, "search for end-devices: complete\n"); 9875 } 9876 9877 /** 9878 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 9879 * @ioc: per adapter object 9880 * @pcie_device_pg0: PCIe Device page 0 9881 * 9882 * After host reset, find out whether devices are still responding. 9883 * Used in _scsih_remove_unresponding_devices. 9884 */ 9885 static void 9886 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 9887 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 9888 { 9889 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9890 struct scsi_target *starget; 9891 struct _pcie_device *pcie_device; 9892 unsigned long flags; 9893 9894 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9895 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 9896 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 9897 && (pcie_device->slot == le16_to_cpu( 9898 pcie_device_pg0->Slot))) { 9899 pcie_device->access_status = 9900 pcie_device_pg0->AccessStatus; 9901 pcie_device->responding = 1; 9902 starget = pcie_device->starget; 9903 if (starget && starget->hostdata) { 9904 sas_target_priv_data = starget->hostdata; 9905 sas_target_priv_data->tm_busy = 0; 9906 sas_target_priv_data->deleted = 0; 9907 } else 9908 sas_target_priv_data = NULL; 9909 if (starget) { 9910 starget_printk(KERN_INFO, starget, 9911 "handle(0x%04x), wwid(0x%016llx) ", 9912 pcie_device->handle, 9913 (unsigned long long)pcie_device->wwid); 9914 if (pcie_device->enclosure_handle != 0) 9915 starget_printk(KERN_INFO, starget, 9916 "enclosure logical id(0x%016llx), " 9917 "slot(%d)\n", 9918 (unsigned long long) 9919 pcie_device->enclosure_logical_id, 9920 pcie_device->slot); 9921 } 9922 9923 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 9924 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 9925 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 9926 pcie_device->enclosure_level = 9927 pcie_device_pg0->EnclosureLevel; 9928 memcpy(&pcie_device->connector_name[0], 9929 &pcie_device_pg0->ConnectorName[0], 4); 9930 } else { 9931 pcie_device->enclosure_level = 0; 9932 pcie_device->connector_name[0] = '\0'; 9933 } 9934 9935 if (pcie_device->handle == le16_to_cpu( 9936 pcie_device_pg0->DevHandle)) 9937 goto out; 9938 pr_info("\thandle changed from(0x%04x)!!!\n", 9939 pcie_device->handle); 9940 pcie_device->handle = le16_to_cpu( 9941 pcie_device_pg0->DevHandle); 9942 if (sas_target_priv_data) 9943 sas_target_priv_data->handle = 9944 le16_to_cpu(pcie_device_pg0->DevHandle); 9945 goto out; 9946 } 9947 } 9948 9949 out: 9950 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9951 } 9952 9953 /** 9954 * _scsih_search_responding_pcie_devices - 9955 * @ioc: per adapter object 9956 * 9957 * After host reset, find out whether devices are still responding. 9958 * If not remove. 9959 */ 9960 static void 9961 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 9962 { 9963 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9964 Mpi2ConfigReply_t mpi_reply; 9965 u16 ioc_status; 9966 u16 handle; 9967 u32 device_info; 9968 9969 ioc_info(ioc, "search for end-devices: start\n"); 9970 9971 if (list_empty(&ioc->pcie_device_list)) 9972 goto out; 9973 9974 handle = 0xFFFF; 9975 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9976 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9977 handle))) { 9978 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9979 MPI2_IOCSTATUS_MASK; 9980 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9981 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 9982 __func__, ioc_status, 9983 le32_to_cpu(mpi_reply.IOCLogInfo)); 9984 break; 9985 } 9986 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9987 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 9988 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 9989 continue; 9990 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 9991 } 9992 out: 9993 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 9994 } 9995 9996 /** 9997 * _scsih_mark_responding_raid_device - mark a raid_device as responding 9998 * @ioc: per adapter object 9999 * @wwid: world wide identifier for raid volume 10000 * @handle: device handle 10001 * 10002 * After host reset, find out whether devices are still responding. 10003 * Used in _scsih_remove_unresponsive_raid_devices. 10004 */ 10005 static void 10006 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 10007 u16 handle) 10008 { 10009 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 10010 struct scsi_target *starget; 10011 struct _raid_device *raid_device; 10012 unsigned long flags; 10013 10014 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10015 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 10016 if (raid_device->wwid == wwid && raid_device->starget) { 10017 starget = raid_device->starget; 10018 if (starget && starget->hostdata) { 10019 sas_target_priv_data = starget->hostdata; 10020 sas_target_priv_data->deleted = 0; 10021 } else 10022 sas_target_priv_data = NULL; 10023 raid_device->responding = 1; 10024 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10025 starget_printk(KERN_INFO, raid_device->starget, 10026 "handle(0x%04x), wwid(0x%016llx)\n", handle, 10027 (unsigned long long)raid_device->wwid); 10028 10029 /* 10030 * WARPDRIVE: The handles of the PDs might have changed 10031 * across the host reset so re-initialize the 10032 * required data for Direct IO 10033 */ 10034 mpt3sas_init_warpdrive_properties(ioc, raid_device); 10035 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10036 if (raid_device->handle == handle) { 10037 spin_unlock_irqrestore(&ioc->raid_device_lock, 10038 flags); 10039 return; 10040 } 10041 pr_info("\thandle changed from(0x%04x)!!!\n", 10042 raid_device->handle); 10043 raid_device->handle = handle; 10044 if (sas_target_priv_data) 10045 sas_target_priv_data->handle = handle; 10046 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10047 return; 10048 } 10049 } 10050 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10051 } 10052 10053 /** 10054 * _scsih_search_responding_raid_devices - 10055 * @ioc: per adapter object 10056 * 10057 * After host reset, find out whether devices are still responding. 10058 * If not remove. 10059 */ 10060 static void 10061 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 10062 { 10063 Mpi2RaidVolPage1_t volume_pg1; 10064 Mpi2RaidVolPage0_t volume_pg0; 10065 Mpi2RaidPhysDiskPage0_t pd_pg0; 10066 Mpi2ConfigReply_t mpi_reply; 10067 u16 ioc_status; 10068 u16 handle; 10069 u8 phys_disk_num; 10070 10071 if (!ioc->ir_firmware) 10072 return; 10073 10074 ioc_info(ioc, "search for raid volumes: start\n"); 10075 10076 if (list_empty(&ioc->raid_device_list)) 10077 goto out; 10078 10079 handle = 0xFFFF; 10080 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10081 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10082 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10083 MPI2_IOCSTATUS_MASK; 10084 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10085 break; 10086 handle = le16_to_cpu(volume_pg1.DevHandle); 10087 10088 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10089 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10090 sizeof(Mpi2RaidVolPage0_t))) 10091 continue; 10092 10093 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10094 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10095 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 10096 _scsih_mark_responding_raid_device(ioc, 10097 le64_to_cpu(volume_pg1.WWID), handle); 10098 } 10099 10100 /* refresh the pd_handles */ 10101 if (!ioc->is_warpdrive) { 10102 phys_disk_num = 0xFF; 10103 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 10104 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10105 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10106 phys_disk_num))) { 10107 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10108 MPI2_IOCSTATUS_MASK; 10109 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10110 break; 10111 phys_disk_num = pd_pg0.PhysDiskNum; 10112 handle = le16_to_cpu(pd_pg0.DevHandle); 10113 set_bit(handle, ioc->pd_handles); 10114 } 10115 } 10116 out: 10117 ioc_info(ioc, "search for responding raid volumes: complete\n"); 10118 } 10119 10120 /** 10121 * _scsih_mark_responding_expander - mark a expander as responding 10122 * @ioc: per adapter object 10123 * @expander_pg0:SAS Expander Config Page0 10124 * 10125 * After host reset, find out whether devices are still responding. 10126 * Used in _scsih_remove_unresponsive_expanders. 10127 */ 10128 static void 10129 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 10130 Mpi2ExpanderPage0_t *expander_pg0) 10131 { 10132 struct _sas_node *sas_expander = NULL; 10133 unsigned long flags; 10134 int i; 10135 struct _enclosure_node *enclosure_dev = NULL; 10136 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 10137 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 10138 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 10139 struct hba_port *port = mpt3sas_get_port_by_id( 10140 ioc, expander_pg0->PhysicalPort, 0); 10141 10142 if (enclosure_handle) 10143 enclosure_dev = 10144 mpt3sas_scsih_enclosure_find_by_handle(ioc, 10145 enclosure_handle); 10146 10147 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10148 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 10149 if (sas_expander->sas_address != sas_address) 10150 continue; 10151 if (sas_expander->port != port) 10152 continue; 10153 sas_expander->responding = 1; 10154 10155 if (enclosure_dev) { 10156 sas_expander->enclosure_logical_id = 10157 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 10158 sas_expander->enclosure_handle = 10159 le16_to_cpu(expander_pg0->EnclosureHandle); 10160 } 10161 10162 if (sas_expander->handle == handle) 10163 goto out; 10164 pr_info("\texpander(0x%016llx): handle changed" \ 10165 " from(0x%04x) to (0x%04x)!!!\n", 10166 (unsigned long long)sas_expander->sas_address, 10167 sas_expander->handle, handle); 10168 sas_expander->handle = handle; 10169 for (i = 0 ; i < sas_expander->num_phys ; i++) 10170 sas_expander->phy[i].handle = handle; 10171 goto out; 10172 } 10173 out: 10174 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10175 } 10176 10177 /** 10178 * _scsih_search_responding_expanders - 10179 * @ioc: per adapter object 10180 * 10181 * After host reset, find out whether devices are still responding. 10182 * If not remove. 10183 */ 10184 static void 10185 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 10186 { 10187 Mpi2ExpanderPage0_t expander_pg0; 10188 Mpi2ConfigReply_t mpi_reply; 10189 u16 ioc_status; 10190 u64 sas_address; 10191 u16 handle; 10192 u8 port; 10193 10194 ioc_info(ioc, "search for expanders: start\n"); 10195 10196 if (list_empty(&ioc->sas_expander_list)) 10197 goto out; 10198 10199 handle = 0xFFFF; 10200 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10201 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10202 10203 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10204 MPI2_IOCSTATUS_MASK; 10205 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10206 break; 10207 10208 handle = le16_to_cpu(expander_pg0.DevHandle); 10209 sas_address = le64_to_cpu(expander_pg0.SASAddress); 10210 port = expander_pg0.PhysicalPort; 10211 pr_info( 10212 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10213 handle, (unsigned long long)sas_address, 10214 (ioc->multipath_on_hba ? 10215 port : MULTIPATH_DISABLED_PORT_ID)); 10216 _scsih_mark_responding_expander(ioc, &expander_pg0); 10217 } 10218 10219 out: 10220 ioc_info(ioc, "search for expanders: complete\n"); 10221 } 10222 10223 /** 10224 * _scsih_remove_unresponding_devices - removing unresponding devices 10225 * @ioc: per adapter object 10226 */ 10227 static void 10228 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 10229 { 10230 struct _sas_device *sas_device, *sas_device_next; 10231 struct _sas_node *sas_expander, *sas_expander_next; 10232 struct _raid_device *raid_device, *raid_device_next; 10233 struct _pcie_device *pcie_device, *pcie_device_next; 10234 struct list_head tmp_list; 10235 unsigned long flags; 10236 LIST_HEAD(head); 10237 10238 ioc_info(ioc, "removing unresponding devices: start\n"); 10239 10240 /* removing unresponding end devices */ 10241 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 10242 /* 10243 * Iterate, pulling off devices marked as non-responding. We become the 10244 * owner for the reference the list had on any object we prune. 10245 */ 10246 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10247 10248 /* 10249 * Clean up the sas_device_init_list list as 10250 * driver goes for fresh scan as part of diag reset. 10251 */ 10252 list_for_each_entry_safe(sas_device, sas_device_next, 10253 &ioc->sas_device_init_list, list) { 10254 list_del_init(&sas_device->list); 10255 sas_device_put(sas_device); 10256 } 10257 10258 list_for_each_entry_safe(sas_device, sas_device_next, 10259 &ioc->sas_device_list, list) { 10260 if (!sas_device->responding) 10261 list_move_tail(&sas_device->list, &head); 10262 else 10263 sas_device->responding = 0; 10264 } 10265 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 10266 10267 /* 10268 * Now, uninitialize and remove the unresponding devices we pruned. 10269 */ 10270 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 10271 _scsih_remove_device(ioc, sas_device); 10272 list_del_init(&sas_device->list); 10273 sas_device_put(sas_device); 10274 } 10275 10276 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 10277 INIT_LIST_HEAD(&head); 10278 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 10279 /* 10280 * Clean up the pcie_device_init_list list as 10281 * driver goes for fresh scan as part of diag reset. 10282 */ 10283 list_for_each_entry_safe(pcie_device, pcie_device_next, 10284 &ioc->pcie_device_init_list, list) { 10285 list_del_init(&pcie_device->list); 10286 pcie_device_put(pcie_device); 10287 } 10288 10289 list_for_each_entry_safe(pcie_device, pcie_device_next, 10290 &ioc->pcie_device_list, list) { 10291 if (!pcie_device->responding) 10292 list_move_tail(&pcie_device->list, &head); 10293 else 10294 pcie_device->responding = 0; 10295 } 10296 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 10297 10298 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 10299 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 10300 list_del_init(&pcie_device->list); 10301 pcie_device_put(pcie_device); 10302 } 10303 10304 /* removing unresponding volumes */ 10305 if (ioc->ir_firmware) { 10306 ioc_info(ioc, "removing unresponding devices: volumes\n"); 10307 list_for_each_entry_safe(raid_device, raid_device_next, 10308 &ioc->raid_device_list, list) { 10309 if (!raid_device->responding) 10310 _scsih_sas_volume_delete(ioc, 10311 raid_device->handle); 10312 else 10313 raid_device->responding = 0; 10314 } 10315 } 10316 10317 /* removing unresponding expanders */ 10318 ioc_info(ioc, "removing unresponding devices: expanders\n"); 10319 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10320 INIT_LIST_HEAD(&tmp_list); 10321 list_for_each_entry_safe(sas_expander, sas_expander_next, 10322 &ioc->sas_expander_list, list) { 10323 if (!sas_expander->responding) 10324 list_move_tail(&sas_expander->list, &tmp_list); 10325 else 10326 sas_expander->responding = 0; 10327 } 10328 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10329 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 10330 list) { 10331 _scsih_expander_node_remove(ioc, sas_expander); 10332 } 10333 10334 ioc_info(ioc, "removing unresponding devices: complete\n"); 10335 10336 /* unblock devices */ 10337 _scsih_ublock_io_all_device(ioc); 10338 } 10339 10340 static void 10341 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 10342 struct _sas_node *sas_expander, u16 handle) 10343 { 10344 Mpi2ExpanderPage1_t expander_pg1; 10345 Mpi2ConfigReply_t mpi_reply; 10346 int i; 10347 10348 for (i = 0 ; i < sas_expander->num_phys ; i++) { 10349 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 10350 &expander_pg1, i, handle))) { 10351 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10352 __FILE__, __LINE__, __func__); 10353 return; 10354 } 10355 10356 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 10357 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 10358 expander_pg1.NegotiatedLinkRate >> 4, 10359 sas_expander->port); 10360 } 10361 } 10362 10363 /** 10364 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 10365 * @ioc: per adapter object 10366 */ 10367 static void 10368 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 10369 { 10370 Mpi2ExpanderPage0_t expander_pg0; 10371 Mpi2SasDevicePage0_t sas_device_pg0; 10372 Mpi26PCIeDevicePage0_t pcie_device_pg0; 10373 Mpi2RaidVolPage1_t volume_pg1; 10374 Mpi2RaidVolPage0_t volume_pg0; 10375 Mpi2RaidPhysDiskPage0_t pd_pg0; 10376 Mpi2EventIrConfigElement_t element; 10377 Mpi2ConfigReply_t mpi_reply; 10378 u8 phys_disk_num, port_id; 10379 u16 ioc_status; 10380 u16 handle, parent_handle; 10381 u64 sas_address; 10382 struct _sas_device *sas_device; 10383 struct _pcie_device *pcie_device; 10384 struct _sas_node *expander_device; 10385 static struct _raid_device *raid_device; 10386 u8 retry_count; 10387 unsigned long flags; 10388 10389 ioc_info(ioc, "scan devices: start\n"); 10390 10391 _scsih_sas_host_refresh(ioc); 10392 10393 ioc_info(ioc, "\tscan devices: expanders start\n"); 10394 10395 /* expanders */ 10396 handle = 0xFFFF; 10397 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10398 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10399 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10400 MPI2_IOCSTATUS_MASK; 10401 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10402 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10403 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10404 break; 10405 } 10406 handle = le16_to_cpu(expander_pg0.DevHandle); 10407 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10408 port_id = expander_pg0.PhysicalPort; 10409 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 10410 ioc, le64_to_cpu(expander_pg0.SASAddress), 10411 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10412 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10413 if (expander_device) 10414 _scsih_refresh_expander_links(ioc, expander_device, 10415 handle); 10416 else { 10417 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10418 handle, 10419 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10420 _scsih_expander_add(ioc, handle); 10421 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10422 handle, 10423 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10424 } 10425 } 10426 10427 ioc_info(ioc, "\tscan devices: expanders complete\n"); 10428 10429 if (!ioc->ir_firmware) 10430 goto skip_to_sas; 10431 10432 ioc_info(ioc, "\tscan devices: phys disk start\n"); 10433 10434 /* phys disk */ 10435 phys_disk_num = 0xFF; 10436 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10437 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10438 phys_disk_num))) { 10439 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10440 MPI2_IOCSTATUS_MASK; 10441 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10442 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10443 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10444 break; 10445 } 10446 phys_disk_num = pd_pg0.PhysDiskNum; 10447 handle = le16_to_cpu(pd_pg0.DevHandle); 10448 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 10449 if (sas_device) { 10450 sas_device_put(sas_device); 10451 continue; 10452 } 10453 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10454 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 10455 handle) != 0) 10456 continue; 10457 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10458 MPI2_IOCSTATUS_MASK; 10459 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10460 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 10461 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10462 break; 10463 } 10464 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10465 if (!_scsih_get_sas_address(ioc, parent_handle, 10466 &sas_address)) { 10467 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10468 handle, 10469 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10470 port_id = sas_device_pg0.PhysicalPort; 10471 mpt3sas_transport_update_links(ioc, sas_address, 10472 handle, sas_device_pg0.PhyNum, 10473 MPI2_SAS_NEG_LINK_RATE_1_5, 10474 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10475 set_bit(handle, ioc->pd_handles); 10476 retry_count = 0; 10477 /* This will retry adding the end device. 10478 * _scsih_add_device() will decide on retries and 10479 * return "1" when it should be retried 10480 */ 10481 while (_scsih_add_device(ioc, handle, retry_count++, 10482 1)) { 10483 ssleep(1); 10484 } 10485 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10486 handle, 10487 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10488 } 10489 } 10490 10491 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 10492 10493 ioc_info(ioc, "\tscan devices: volumes start\n"); 10494 10495 /* volumes */ 10496 handle = 0xFFFF; 10497 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10498 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10499 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10500 MPI2_IOCSTATUS_MASK; 10501 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10502 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10503 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10504 break; 10505 } 10506 handle = le16_to_cpu(volume_pg1.DevHandle); 10507 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10508 raid_device = _scsih_raid_device_find_by_wwid(ioc, 10509 le64_to_cpu(volume_pg1.WWID)); 10510 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10511 if (raid_device) 10512 continue; 10513 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10514 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10515 sizeof(Mpi2RaidVolPage0_t))) 10516 continue; 10517 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10518 MPI2_IOCSTATUS_MASK; 10519 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10520 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10521 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10522 break; 10523 } 10524 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10525 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10526 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 10527 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 10528 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 10529 element.VolDevHandle = volume_pg1.DevHandle; 10530 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 10531 volume_pg1.DevHandle); 10532 _scsih_sas_volume_add(ioc, &element); 10533 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 10534 volume_pg1.DevHandle); 10535 } 10536 } 10537 10538 ioc_info(ioc, "\tscan devices: volumes complete\n"); 10539 10540 skip_to_sas: 10541 10542 ioc_info(ioc, "\tscan devices: end devices start\n"); 10543 10544 /* sas devices */ 10545 handle = 0xFFFF; 10546 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10547 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10548 handle))) { 10549 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10550 MPI2_IOCSTATUS_MASK; 10551 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10552 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10553 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10554 break; 10555 } 10556 handle = le16_to_cpu(sas_device_pg0.DevHandle); 10557 if (!(_scsih_is_end_device( 10558 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 10559 continue; 10560 port_id = sas_device_pg0.PhysicalPort; 10561 sas_device = mpt3sas_get_sdev_by_addr(ioc, 10562 le64_to_cpu(sas_device_pg0.SASAddress), 10563 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10564 if (sas_device) { 10565 sas_device_put(sas_device); 10566 continue; 10567 } 10568 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10569 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 10570 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10571 handle, 10572 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10573 mpt3sas_transport_update_links(ioc, sas_address, handle, 10574 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 10575 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10576 retry_count = 0; 10577 /* This will retry adding the end device. 10578 * _scsih_add_device() will decide on retries and 10579 * return "1" when it should be retried 10580 */ 10581 while (_scsih_add_device(ioc, handle, retry_count++, 10582 0)) { 10583 ssleep(1); 10584 } 10585 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10586 handle, 10587 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10588 } 10589 } 10590 ioc_info(ioc, "\tscan devices: end devices complete\n"); 10591 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 10592 10593 /* pcie devices */ 10594 handle = 0xFFFF; 10595 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 10596 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10597 handle))) { 10598 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 10599 & MPI2_IOCSTATUS_MASK; 10600 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10601 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10602 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10603 break; 10604 } 10605 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 10606 if (!(_scsih_is_nvme_pciescsi_device( 10607 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 10608 continue; 10609 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 10610 le64_to_cpu(pcie_device_pg0.WWID)); 10611 if (pcie_device) { 10612 pcie_device_put(pcie_device); 10613 continue; 10614 } 10615 retry_count = 0; 10616 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 10617 _scsih_pcie_add_device(ioc, handle); 10618 10619 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 10620 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 10621 } 10622 10623 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 10624 ioc_info(ioc, "scan devices: complete\n"); 10625 } 10626 10627 /** 10628 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) 10629 * @ioc: per adapter object 10630 * 10631 * The handler for doing any required cleanup or initialization. 10632 */ 10633 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 10634 { 10635 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 10636 } 10637 10638 /** 10639 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding 10640 * scsi & tm cmds. 10641 * @ioc: per adapter object 10642 * 10643 * The handler for doing any required cleanup or initialization. 10644 */ 10645 void 10646 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) 10647 { 10648 dtmprintk(ioc, 10649 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); 10650 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 10651 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 10652 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 10653 complete(&ioc->scsih_cmds.done); 10654 } 10655 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 10656 ioc->tm_cmds.status |= MPT3_CMD_RESET; 10657 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 10658 complete(&ioc->tm_cmds.done); 10659 } 10660 10661 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 10662 memset(ioc->device_remove_in_progress, 0, 10663 ioc->device_remove_in_progress_sz); 10664 _scsih_fw_event_cleanup_queue(ioc); 10665 _scsih_flush_running_cmds(ioc); 10666 } 10667 10668 /** 10669 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) 10670 * @ioc: per adapter object 10671 * 10672 * The handler for doing any required cleanup or initialization. 10673 */ 10674 void 10675 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 10676 { 10677 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 10678 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { 10679 if (ioc->multipath_on_hba) { 10680 _scsih_sas_port_refresh(ioc); 10681 _scsih_update_vphys_after_reset(ioc); 10682 } 10683 _scsih_prep_device_scan(ioc); 10684 _scsih_create_enclosure_list_after_reset(ioc); 10685 _scsih_search_responding_sas_devices(ioc); 10686 _scsih_search_responding_pcie_devices(ioc); 10687 _scsih_search_responding_raid_devices(ioc); 10688 _scsih_search_responding_expanders(ioc); 10689 _scsih_error_recovery_delete_devices(ioc); 10690 } 10691 } 10692 10693 /** 10694 * _mpt3sas_fw_work - delayed task for processing firmware events 10695 * @ioc: per adapter object 10696 * @fw_event: The fw_event_work object 10697 * Context: user. 10698 */ 10699 static void 10700 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 10701 { 10702 ioc->current_event = fw_event; 10703 _scsih_fw_event_del_from_list(ioc, fw_event); 10704 10705 /* the queue is being flushed so ignore this event */ 10706 if (ioc->remove_host || ioc->pci_error_recovery) { 10707 fw_event_work_put(fw_event); 10708 ioc->current_event = NULL; 10709 return; 10710 } 10711 10712 switch (fw_event->event) { 10713 case MPT3SAS_PROCESS_TRIGGER_DIAG: 10714 mpt3sas_process_trigger_data(ioc, 10715 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 10716 fw_event->event_data); 10717 break; 10718 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 10719 while (scsi_host_in_recovery(ioc->shost) || 10720 ioc->shost_recovery) { 10721 /* 10722 * If we're unloading or cancelling the work, bail. 10723 * Otherwise, this can become an infinite loop. 10724 */ 10725 if (ioc->remove_host || ioc->fw_events_cleanup) 10726 goto out; 10727 ssleep(1); 10728 } 10729 _scsih_remove_unresponding_devices(ioc); 10730 _scsih_del_dirty_vphy(ioc); 10731 _scsih_del_dirty_port_entries(ioc); 10732 if (ioc->is_gen35_ioc) 10733 _scsih_update_device_qdepth(ioc); 10734 _scsih_scan_for_devices_after_reset(ioc); 10735 /* 10736 * If diag reset has occurred during the driver load 10737 * then driver has to complete the driver load operation 10738 * by executing the following items: 10739 *- Register the devices from sas_device_init_list to SML 10740 *- clear is_driver_loading flag, 10741 *- start the watchdog thread. 10742 * In happy driver load path, above things are taken care of when 10743 * driver executes scsih_scan_finished(). 10744 */ 10745 if (ioc->is_driver_loading) 10746 _scsih_complete_devices_scanning(ioc); 10747 _scsih_set_nvme_max_shutdown_latency(ioc); 10748 break; 10749 case MPT3SAS_PORT_ENABLE_COMPLETE: 10750 ioc->start_scan = 0; 10751 if (missing_delay[0] != -1 && missing_delay[1] != -1) 10752 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 10753 missing_delay[1]); 10754 dewtprintk(ioc, 10755 ioc_info(ioc, "port enable: complete from worker thread\n")); 10756 break; 10757 case MPT3SAS_TURN_ON_PFA_LED: 10758 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 10759 break; 10760 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10761 _scsih_sas_topology_change_event(ioc, fw_event); 10762 break; 10763 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10764 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 10765 _scsih_sas_device_status_change_event_debug(ioc, 10766 (Mpi2EventDataSasDeviceStatusChange_t *) 10767 fw_event->event_data); 10768 break; 10769 case MPI2_EVENT_SAS_DISCOVERY: 10770 _scsih_sas_discovery_event(ioc, fw_event); 10771 break; 10772 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10773 _scsih_sas_device_discovery_error_event(ioc, fw_event); 10774 break; 10775 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10776 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 10777 break; 10778 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10779 _scsih_sas_enclosure_dev_status_change_event(ioc, 10780 fw_event); 10781 break; 10782 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10783 _scsih_sas_ir_config_change_event(ioc, fw_event); 10784 break; 10785 case MPI2_EVENT_IR_VOLUME: 10786 _scsih_sas_ir_volume_event(ioc, fw_event); 10787 break; 10788 case MPI2_EVENT_IR_PHYSICAL_DISK: 10789 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 10790 break; 10791 case MPI2_EVENT_IR_OPERATION_STATUS: 10792 _scsih_sas_ir_operation_status_event(ioc, fw_event); 10793 break; 10794 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10795 _scsih_pcie_device_status_change_event(ioc, fw_event); 10796 break; 10797 case MPI2_EVENT_PCIE_ENUMERATION: 10798 _scsih_pcie_enumeration_event(ioc, fw_event); 10799 break; 10800 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10801 _scsih_pcie_topology_change_event(ioc, fw_event); 10802 ioc->current_event = NULL; 10803 return; 10804 } 10805 out: 10806 fw_event_work_put(fw_event); 10807 ioc->current_event = NULL; 10808 } 10809 10810 /** 10811 * _firmware_event_work 10812 * @work: The fw_event_work object 10813 * Context: user. 10814 * 10815 * wrappers for the work thread handling firmware events 10816 */ 10817 10818 static void 10819 _firmware_event_work(struct work_struct *work) 10820 { 10821 struct fw_event_work *fw_event = container_of(work, 10822 struct fw_event_work, work); 10823 10824 _mpt3sas_fw_work(fw_event->ioc, fw_event); 10825 } 10826 10827 /** 10828 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 10829 * @ioc: per adapter object 10830 * @msix_index: MSIX table index supplied by the OS 10831 * @reply: reply message frame(lower 32bit addr) 10832 * Context: interrupt. 10833 * 10834 * This function merely adds a new work task into ioc->firmware_event_thread. 10835 * The tasks are worked from _firmware_event_work in user context. 10836 * 10837 * Return: 1 meaning mf should be freed from _base_interrupt 10838 * 0 means the mf is freed from this function. 10839 */ 10840 u8 10841 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 10842 u32 reply) 10843 { 10844 struct fw_event_work *fw_event; 10845 Mpi2EventNotificationReply_t *mpi_reply; 10846 u16 event; 10847 u16 sz; 10848 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 10849 10850 /* events turned off due to host reset */ 10851 if (ioc->pci_error_recovery) 10852 return 1; 10853 10854 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 10855 10856 if (unlikely(!mpi_reply)) { 10857 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 10858 __FILE__, __LINE__, __func__); 10859 return 1; 10860 } 10861 10862 event = le16_to_cpu(mpi_reply->Event); 10863 10864 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 10865 mpt3sas_trigger_event(ioc, event, 0); 10866 10867 switch (event) { 10868 /* handle these */ 10869 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10870 { 10871 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 10872 (Mpi2EventDataSasBroadcastPrimitive_t *) 10873 mpi_reply->EventData; 10874 10875 if (baen_data->Primitive != 10876 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 10877 return 1; 10878 10879 if (ioc->broadcast_aen_busy) { 10880 ioc->broadcast_aen_pending++; 10881 return 1; 10882 } else 10883 ioc->broadcast_aen_busy = 1; 10884 break; 10885 } 10886 10887 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10888 _scsih_check_topo_delete_events(ioc, 10889 (Mpi2EventDataSasTopologyChangeList_t *) 10890 mpi_reply->EventData); 10891 /* 10892 * No need to add the topology change list 10893 * event to fw event work queue when 10894 * diag reset is going on. Since during diag 10895 * reset driver scan the devices by reading 10896 * sas device page0's not by processing the 10897 * events. 10898 */ 10899 if (ioc->shost_recovery) 10900 return 1; 10901 break; 10902 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10903 _scsih_check_pcie_topo_remove_events(ioc, 10904 (Mpi26EventDataPCIeTopologyChangeList_t *) 10905 mpi_reply->EventData); 10906 if (ioc->shost_recovery) 10907 return 1; 10908 break; 10909 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10910 _scsih_check_ir_config_unhide_events(ioc, 10911 (Mpi2EventDataIrConfigChangeList_t *) 10912 mpi_reply->EventData); 10913 break; 10914 case MPI2_EVENT_IR_VOLUME: 10915 _scsih_check_volume_delete_events(ioc, 10916 (Mpi2EventDataIrVolume_t *) 10917 mpi_reply->EventData); 10918 break; 10919 case MPI2_EVENT_LOG_ENTRY_ADDED: 10920 { 10921 Mpi2EventDataLogEntryAdded_t *log_entry; 10922 u32 log_code; 10923 10924 if (!ioc->is_warpdrive) 10925 break; 10926 10927 log_entry = (Mpi2EventDataLogEntryAdded_t *) 10928 mpi_reply->EventData; 10929 log_code = le32_to_cpu(*(__le32 *)log_entry->LogData); 10930 10931 if (le16_to_cpu(log_entry->LogEntryQualifier) 10932 != MPT2_WARPDRIVE_LOGENTRY) 10933 break; 10934 10935 switch (log_code) { 10936 case MPT2_WARPDRIVE_LC_SSDT: 10937 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10938 break; 10939 case MPT2_WARPDRIVE_LC_SSDLW: 10940 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 10941 break; 10942 case MPT2_WARPDRIVE_LC_SSDLF: 10943 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 10944 break; 10945 case MPT2_WARPDRIVE_LC_BRMF: 10946 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10947 break; 10948 } 10949 10950 break; 10951 } 10952 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10953 _scsih_sas_device_status_change_event(ioc, 10954 (Mpi2EventDataSasDeviceStatusChange_t *) 10955 mpi_reply->EventData); 10956 break; 10957 case MPI2_EVENT_IR_OPERATION_STATUS: 10958 case MPI2_EVENT_SAS_DISCOVERY: 10959 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10960 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10961 case MPI2_EVENT_IR_PHYSICAL_DISK: 10962 case MPI2_EVENT_PCIE_ENUMERATION: 10963 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10964 break; 10965 10966 case MPI2_EVENT_TEMP_THRESHOLD: 10967 _scsih_temp_threshold_events(ioc, 10968 (Mpi2EventDataTemperature_t *) 10969 mpi_reply->EventData); 10970 break; 10971 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 10972 ActiveCableEventData = 10973 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 10974 switch (ActiveCableEventData->ReasonCode) { 10975 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 10976 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 10977 ActiveCableEventData->ReceptacleID); 10978 pr_notice("cannot be powered and devices connected\n"); 10979 pr_notice("to this active cable will not be seen\n"); 10980 pr_notice("This active cable requires %d mW of power\n", 10981 le32_to_cpu( 10982 ActiveCableEventData->ActiveCablePowerRequirement)); 10983 break; 10984 10985 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 10986 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 10987 ActiveCableEventData->ReceptacleID); 10988 pr_notice( 10989 "is not running at optimal speed(12 Gb/s rate)\n"); 10990 break; 10991 } 10992 10993 break; 10994 10995 default: /* ignore the rest */ 10996 return 1; 10997 } 10998 10999 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 11000 fw_event = alloc_fw_event_work(sz); 11001 if (!fw_event) { 11002 ioc_err(ioc, "failure at %s:%d/%s()!\n", 11003 __FILE__, __LINE__, __func__); 11004 return 1; 11005 } 11006 11007 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 11008 fw_event->ioc = ioc; 11009 fw_event->VF_ID = mpi_reply->VF_ID; 11010 fw_event->VP_ID = mpi_reply->VP_ID; 11011 fw_event->event = event; 11012 _scsih_fw_event_add(ioc, fw_event); 11013 fw_event_work_put(fw_event); 11014 return 1; 11015 } 11016 11017 /** 11018 * _scsih_expander_node_remove - removing expander device from list. 11019 * @ioc: per adapter object 11020 * @sas_expander: the sas_device object 11021 * 11022 * Removing object and freeing associated memory from the 11023 * ioc->sas_expander_list. 11024 */ 11025 static void 11026 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 11027 struct _sas_node *sas_expander) 11028 { 11029 struct _sas_port *mpt3sas_port, *next; 11030 unsigned long flags; 11031 int port_id; 11032 11033 /* remove sibling ports attached to this expander */ 11034 list_for_each_entry_safe(mpt3sas_port, next, 11035 &sas_expander->sas_port_list, port_list) { 11036 if (ioc->shost_recovery) 11037 return; 11038 if (mpt3sas_port->remote_identify.device_type == 11039 SAS_END_DEVICE) 11040 mpt3sas_device_remove_by_sas_address(ioc, 11041 mpt3sas_port->remote_identify.sas_address, 11042 mpt3sas_port->hba_port); 11043 else if (mpt3sas_port->remote_identify.device_type == 11044 SAS_EDGE_EXPANDER_DEVICE || 11045 mpt3sas_port->remote_identify.device_type == 11046 SAS_FANOUT_EXPANDER_DEVICE) 11047 mpt3sas_expander_remove(ioc, 11048 mpt3sas_port->remote_identify.sas_address, 11049 mpt3sas_port->hba_port); 11050 } 11051 11052 port_id = sas_expander->port->port_id; 11053 11054 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 11055 sas_expander->sas_address_parent, sas_expander->port); 11056 11057 ioc_info(ioc, 11058 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 11059 sas_expander->handle, (unsigned long long) 11060 sas_expander->sas_address, 11061 port_id); 11062 11063 spin_lock_irqsave(&ioc->sas_node_lock, flags); 11064 list_del(&sas_expander->list); 11065 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 11066 11067 kfree(sas_expander->phy); 11068 kfree(sas_expander); 11069 } 11070 11071 /** 11072 * _scsih_nvme_shutdown - NVMe shutdown notification 11073 * @ioc: per adapter object 11074 * 11075 * Sending IoUnitControl request with shutdown operation code to alert IOC that 11076 * the host system is shutting down so that IOC can issue NVMe shutdown to 11077 * NVMe drives attached to it. 11078 */ 11079 static void 11080 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) 11081 { 11082 Mpi26IoUnitControlRequest_t *mpi_request; 11083 Mpi26IoUnitControlReply_t *mpi_reply; 11084 u16 smid; 11085 11086 /* are there any NVMe devices ? */ 11087 if (list_empty(&ioc->pcie_device_list)) 11088 return; 11089 11090 mutex_lock(&ioc->scsih_cmds.mutex); 11091 11092 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11093 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11094 goto out; 11095 } 11096 11097 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11098 11099 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11100 if (!smid) { 11101 ioc_err(ioc, 11102 "%s: failed obtaining a smid\n", __func__); 11103 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11104 goto out; 11105 } 11106 11107 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11108 ioc->scsih_cmds.smid = smid; 11109 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 11110 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 11111 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; 11112 11113 init_completion(&ioc->scsih_cmds.done); 11114 ioc->put_smid_default(ioc, smid); 11115 /* Wait for max_shutdown_latency seconds */ 11116 ioc_info(ioc, 11117 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", 11118 ioc->max_shutdown_latency); 11119 wait_for_completion_timeout(&ioc->scsih_cmds.done, 11120 ioc->max_shutdown_latency*HZ); 11121 11122 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11123 ioc_err(ioc, "%s: timeout\n", __func__); 11124 goto out; 11125 } 11126 11127 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11128 mpi_reply = ioc->scsih_cmds.reply; 11129 ioc_info(ioc, "Io Unit Control shutdown (complete):" 11130 "ioc_status(0x%04x), loginfo(0x%08x)\n", 11131 le16_to_cpu(mpi_reply->IOCStatus), 11132 le32_to_cpu(mpi_reply->IOCLogInfo)); 11133 } 11134 out: 11135 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11136 mutex_unlock(&ioc->scsih_cmds.mutex); 11137 } 11138 11139 11140 /** 11141 * _scsih_ir_shutdown - IR shutdown notification 11142 * @ioc: per adapter object 11143 * 11144 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 11145 * the host system is shutting down. 11146 */ 11147 static void 11148 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 11149 { 11150 Mpi2RaidActionRequest_t *mpi_request; 11151 Mpi2RaidActionReply_t *mpi_reply; 11152 u16 smid; 11153 11154 /* is IR firmware build loaded ? */ 11155 if (!ioc->ir_firmware) 11156 return; 11157 11158 /* are there any volumes ? */ 11159 if (list_empty(&ioc->raid_device_list)) 11160 return; 11161 11162 mutex_lock(&ioc->scsih_cmds.mutex); 11163 11164 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11165 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11166 goto out; 11167 } 11168 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11169 11170 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11171 if (!smid) { 11172 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 11173 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11174 goto out; 11175 } 11176 11177 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11178 ioc->scsih_cmds.smid = smid; 11179 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 11180 11181 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 11182 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 11183 11184 if (!ioc->hide_ir_msg) 11185 ioc_info(ioc, "IR shutdown (sending)\n"); 11186 init_completion(&ioc->scsih_cmds.done); 11187 ioc->put_smid_default(ioc, smid); 11188 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 11189 11190 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11191 ioc_err(ioc, "%s: timeout\n", __func__); 11192 goto out; 11193 } 11194 11195 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11196 mpi_reply = ioc->scsih_cmds.reply; 11197 if (!ioc->hide_ir_msg) 11198 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 11199 le16_to_cpu(mpi_reply->IOCStatus), 11200 le32_to_cpu(mpi_reply->IOCLogInfo)); 11201 } 11202 11203 out: 11204 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11205 mutex_unlock(&ioc->scsih_cmds.mutex); 11206 } 11207 11208 /** 11209 * _scsih_get_shost_and_ioc - get shost and ioc 11210 * and verify whether they are NULL or not 11211 * @pdev: PCI device struct 11212 * @shost: address of scsi host pointer 11213 * @ioc: address of HBA adapter pointer 11214 * 11215 * Return zero if *shost and *ioc are not NULL otherwise return error number. 11216 */ 11217 static int 11218 _scsih_get_shost_and_ioc(struct pci_dev *pdev, 11219 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) 11220 { 11221 *shost = pci_get_drvdata(pdev); 11222 if (*shost == NULL) { 11223 dev_err(&pdev->dev, "pdev's driver data is null\n"); 11224 return -ENXIO; 11225 } 11226 11227 *ioc = shost_priv(*shost); 11228 if (*ioc == NULL) { 11229 dev_err(&pdev->dev, "shost's private data is null\n"); 11230 return -ENXIO; 11231 } 11232 11233 return 0; 11234 } 11235 11236 /** 11237 * scsih_remove - detach and remove add host 11238 * @pdev: PCI device struct 11239 * 11240 * Routine called when unloading the driver. 11241 */ 11242 static void scsih_remove(struct pci_dev *pdev) 11243 { 11244 struct Scsi_Host *shost; 11245 struct MPT3SAS_ADAPTER *ioc; 11246 struct _sas_port *mpt3sas_port, *next_port; 11247 struct _raid_device *raid_device, *next; 11248 struct MPT3SAS_TARGET *sas_target_priv_data; 11249 struct _pcie_device *pcie_device, *pcienext; 11250 struct workqueue_struct *wq; 11251 unsigned long flags; 11252 Mpi2ConfigReply_t mpi_reply; 11253 struct hba_port *port, *port_next; 11254 11255 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11256 return; 11257 11258 ioc->remove_host = 1; 11259 11260 if (!pci_device_is_present(pdev)) { 11261 mpt3sas_base_pause_mq_polling(ioc); 11262 _scsih_flush_running_cmds(ioc); 11263 } 11264 11265 _scsih_fw_event_cleanup_queue(ioc); 11266 11267 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11268 wq = ioc->firmware_event_thread; 11269 ioc->firmware_event_thread = NULL; 11270 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11271 if (wq) 11272 destroy_workqueue(wq); 11273 /* 11274 * Copy back the unmodified ioc page1. so that on next driver load, 11275 * current modified changes on ioc page1 won't take effect. 11276 */ 11277 if (ioc->is_aero_ioc) 11278 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11279 &ioc->ioc_pg1_copy); 11280 /* release all the volumes */ 11281 _scsih_ir_shutdown(ioc); 11282 mpt3sas_destroy_debugfs(ioc); 11283 sas_remove_host(shost); 11284 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 11285 list) { 11286 if (raid_device->starget) { 11287 sas_target_priv_data = 11288 raid_device->starget->hostdata; 11289 sas_target_priv_data->deleted = 1; 11290 scsi_remove_target(&raid_device->starget->dev); 11291 } 11292 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 11293 raid_device->handle, (u64)raid_device->wwid); 11294 _scsih_raid_device_remove(ioc, raid_device); 11295 } 11296 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 11297 list) { 11298 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 11299 list_del_init(&pcie_device->list); 11300 pcie_device_put(pcie_device); 11301 } 11302 11303 /* free ports attached to the sas_host */ 11304 list_for_each_entry_safe(mpt3sas_port, next_port, 11305 &ioc->sas_hba.sas_port_list, port_list) { 11306 if (mpt3sas_port->remote_identify.device_type == 11307 SAS_END_DEVICE) 11308 mpt3sas_device_remove_by_sas_address(ioc, 11309 mpt3sas_port->remote_identify.sas_address, 11310 mpt3sas_port->hba_port); 11311 else if (mpt3sas_port->remote_identify.device_type == 11312 SAS_EDGE_EXPANDER_DEVICE || 11313 mpt3sas_port->remote_identify.device_type == 11314 SAS_FANOUT_EXPANDER_DEVICE) 11315 mpt3sas_expander_remove(ioc, 11316 mpt3sas_port->remote_identify.sas_address, 11317 mpt3sas_port->hba_port); 11318 } 11319 11320 list_for_each_entry_safe(port, port_next, 11321 &ioc->port_table_list, list) { 11322 list_del(&port->list); 11323 kfree(port); 11324 } 11325 11326 /* free phys attached to the sas_host */ 11327 if (ioc->sas_hba.num_phys) { 11328 kfree(ioc->sas_hba.phy); 11329 ioc->sas_hba.phy = NULL; 11330 ioc->sas_hba.num_phys = 0; 11331 } 11332 11333 mpt3sas_base_detach(ioc); 11334 mpt3sas_ctl_release(ioc); 11335 spin_lock(&gioc_lock); 11336 list_del(&ioc->list); 11337 spin_unlock(&gioc_lock); 11338 scsi_host_put(shost); 11339 } 11340 11341 /** 11342 * scsih_shutdown - routine call during system shutdown 11343 * @pdev: PCI device struct 11344 */ 11345 static void 11346 scsih_shutdown(struct pci_dev *pdev) 11347 { 11348 struct Scsi_Host *shost; 11349 struct MPT3SAS_ADAPTER *ioc; 11350 struct workqueue_struct *wq; 11351 unsigned long flags; 11352 Mpi2ConfigReply_t mpi_reply; 11353 11354 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11355 return; 11356 11357 ioc->remove_host = 1; 11358 11359 if (!pci_device_is_present(pdev)) { 11360 mpt3sas_base_pause_mq_polling(ioc); 11361 _scsih_flush_running_cmds(ioc); 11362 } 11363 11364 _scsih_fw_event_cleanup_queue(ioc); 11365 11366 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11367 wq = ioc->firmware_event_thread; 11368 ioc->firmware_event_thread = NULL; 11369 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11370 if (wq) 11371 destroy_workqueue(wq); 11372 /* 11373 * Copy back the unmodified ioc page1 so that on next driver load, 11374 * current modified changes on ioc page1 won't take effect. 11375 */ 11376 if (ioc->is_aero_ioc) 11377 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11378 &ioc->ioc_pg1_copy); 11379 11380 _scsih_ir_shutdown(ioc); 11381 _scsih_nvme_shutdown(ioc); 11382 mpt3sas_base_mask_interrupts(ioc); 11383 mpt3sas_base_stop_watchdog(ioc); 11384 ioc->shost_recovery = 1; 11385 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); 11386 ioc->shost_recovery = 0; 11387 mpt3sas_base_free_irq(ioc); 11388 mpt3sas_base_disable_msix(ioc); 11389 } 11390 11391 11392 /** 11393 * _scsih_probe_boot_devices - reports 1st device 11394 * @ioc: per adapter object 11395 * 11396 * If specified in bios page 2, this routine reports the 1st 11397 * device scsi-ml or sas transport for persistent boot device 11398 * purposes. Please refer to function _scsih_determine_boot_device() 11399 */ 11400 static void 11401 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 11402 { 11403 u32 channel; 11404 void *device; 11405 struct _sas_device *sas_device; 11406 struct _raid_device *raid_device; 11407 struct _pcie_device *pcie_device; 11408 u16 handle; 11409 u64 sas_address_parent; 11410 u64 sas_address; 11411 unsigned long flags; 11412 int rc; 11413 int tid; 11414 struct hba_port *port; 11415 11416 /* no Bios, return immediately */ 11417 if (!ioc->bios_pg3.BiosVersion) 11418 return; 11419 11420 device = NULL; 11421 if (ioc->req_boot_device.device) { 11422 device = ioc->req_boot_device.device; 11423 channel = ioc->req_boot_device.channel; 11424 } else if (ioc->req_alt_boot_device.device) { 11425 device = ioc->req_alt_boot_device.device; 11426 channel = ioc->req_alt_boot_device.channel; 11427 } else if (ioc->current_boot_device.device) { 11428 device = ioc->current_boot_device.device; 11429 channel = ioc->current_boot_device.channel; 11430 } 11431 11432 if (!device) 11433 return; 11434 11435 if (channel == RAID_CHANNEL) { 11436 raid_device = device; 11437 /* 11438 * If this boot vd is already registered with SML then 11439 * no need to register it again as part of device scanning 11440 * after diag reset during driver load operation. 11441 */ 11442 if (raid_device->starget) 11443 return; 11444 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11445 raid_device->id, 0); 11446 if (rc) 11447 _scsih_raid_device_remove(ioc, raid_device); 11448 } else if (channel == PCIE_CHANNEL) { 11449 pcie_device = device; 11450 /* 11451 * If this boot NVMe device is already registered with SML then 11452 * no need to register it again as part of device scanning 11453 * after diag reset during driver load operation. 11454 */ 11455 if (pcie_device->starget) 11456 return; 11457 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11458 tid = pcie_device->id; 11459 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 11460 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11461 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 11462 if (rc) 11463 _scsih_pcie_device_remove(ioc, pcie_device); 11464 } else { 11465 sas_device = device; 11466 /* 11467 * If this boot sas/sata device is already registered with SML 11468 * then no need to register it again as part of device scanning 11469 * after diag reset during driver load operation. 11470 */ 11471 if (sas_device->starget) 11472 return; 11473 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11474 handle = sas_device->handle; 11475 sas_address_parent = sas_device->sas_address_parent; 11476 sas_address = sas_device->sas_address; 11477 port = sas_device->port; 11478 list_move_tail(&sas_device->list, &ioc->sas_device_list); 11479 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11480 11481 if (ioc->hide_drives) 11482 return; 11483 11484 if (!port) 11485 return; 11486 11487 if (!mpt3sas_transport_port_add(ioc, handle, 11488 sas_address_parent, port)) { 11489 _scsih_sas_device_remove(ioc, sas_device); 11490 } else if (!sas_device->starget) { 11491 if (!ioc->is_driver_loading) { 11492 mpt3sas_transport_port_remove(ioc, 11493 sas_address, 11494 sas_address_parent, port); 11495 _scsih_sas_device_remove(ioc, sas_device); 11496 } 11497 } 11498 } 11499 } 11500 11501 /** 11502 * _scsih_probe_raid - reporting raid volumes to scsi-ml 11503 * @ioc: per adapter object 11504 * 11505 * Called during initial loading of the driver. 11506 */ 11507 static void 11508 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 11509 { 11510 struct _raid_device *raid_device, *raid_next; 11511 int rc; 11512 11513 list_for_each_entry_safe(raid_device, raid_next, 11514 &ioc->raid_device_list, list) { 11515 if (raid_device->starget) 11516 continue; 11517 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11518 raid_device->id, 0); 11519 if (rc) 11520 _scsih_raid_device_remove(ioc, raid_device); 11521 } 11522 } 11523 11524 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 11525 { 11526 struct _sas_device *sas_device = NULL; 11527 unsigned long flags; 11528 11529 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11530 if (!list_empty(&ioc->sas_device_init_list)) { 11531 sas_device = list_first_entry(&ioc->sas_device_init_list, 11532 struct _sas_device, list); 11533 sas_device_get(sas_device); 11534 } 11535 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11536 11537 return sas_device; 11538 } 11539 11540 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11541 struct _sas_device *sas_device) 11542 { 11543 unsigned long flags; 11544 11545 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11546 11547 /* 11548 * Since we dropped the lock during the call to port_add(), we need to 11549 * be careful here that somebody else didn't move or delete this item 11550 * while we were busy with other things. 11551 * 11552 * If it was on the list, we need a put() for the reference the list 11553 * had. Either way, we need a get() for the destination list. 11554 */ 11555 if (!list_empty(&sas_device->list)) { 11556 list_del_init(&sas_device->list); 11557 sas_device_put(sas_device); 11558 } 11559 11560 sas_device_get(sas_device); 11561 list_add_tail(&sas_device->list, &ioc->sas_device_list); 11562 11563 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11564 } 11565 11566 /** 11567 * _scsih_probe_sas - reporting sas devices to sas transport 11568 * @ioc: per adapter object 11569 * 11570 * Called during initial loading of the driver. 11571 */ 11572 static void 11573 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 11574 { 11575 struct _sas_device *sas_device; 11576 11577 if (ioc->hide_drives) 11578 return; 11579 11580 while ((sas_device = get_next_sas_device(ioc))) { 11581 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 11582 sas_device->sas_address_parent, sas_device->port)) { 11583 _scsih_sas_device_remove(ioc, sas_device); 11584 sas_device_put(sas_device); 11585 continue; 11586 } else if (!sas_device->starget) { 11587 /* 11588 * When asyn scanning is enabled, its not possible to 11589 * remove devices while scanning is turned on due to an 11590 * oops in scsi_sysfs_add_sdev()->add_device()-> 11591 * sysfs_addrm_start() 11592 */ 11593 if (!ioc->is_driver_loading) { 11594 mpt3sas_transport_port_remove(ioc, 11595 sas_device->sas_address, 11596 sas_device->sas_address_parent, 11597 sas_device->port); 11598 _scsih_sas_device_remove(ioc, sas_device); 11599 sas_device_put(sas_device); 11600 continue; 11601 } 11602 } 11603 sas_device_make_active(ioc, sas_device); 11604 sas_device_put(sas_device); 11605 } 11606 } 11607 11608 /** 11609 * get_next_pcie_device - Get the next pcie device 11610 * @ioc: per adapter object 11611 * 11612 * Get the next pcie device from pcie_device_init_list list. 11613 * 11614 * Return: pcie device structure if pcie_device_init_list list is not empty 11615 * otherwise returns NULL 11616 */ 11617 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 11618 { 11619 struct _pcie_device *pcie_device = NULL; 11620 unsigned long flags; 11621 11622 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11623 if (!list_empty(&ioc->pcie_device_init_list)) { 11624 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 11625 struct _pcie_device, list); 11626 pcie_device_get(pcie_device); 11627 } 11628 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11629 11630 return pcie_device; 11631 } 11632 11633 /** 11634 * pcie_device_make_active - Add pcie device to pcie_device_list list 11635 * @ioc: per adapter object 11636 * @pcie_device: pcie device object 11637 * 11638 * Add the pcie device which has registered with SCSI Transport Later to 11639 * pcie_device_list list 11640 */ 11641 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11642 struct _pcie_device *pcie_device) 11643 { 11644 unsigned long flags; 11645 11646 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11647 11648 if (!list_empty(&pcie_device->list)) { 11649 list_del_init(&pcie_device->list); 11650 pcie_device_put(pcie_device); 11651 } 11652 pcie_device_get(pcie_device); 11653 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 11654 11655 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11656 } 11657 11658 /** 11659 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 11660 * @ioc: per adapter object 11661 * 11662 * Called during initial loading of the driver. 11663 */ 11664 static void 11665 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 11666 { 11667 struct _pcie_device *pcie_device; 11668 int rc; 11669 11670 /* PCIe Device List */ 11671 while ((pcie_device = get_next_pcie_device(ioc))) { 11672 if (pcie_device->starget) { 11673 pcie_device_put(pcie_device); 11674 continue; 11675 } 11676 if (pcie_device->access_status == 11677 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 11678 pcie_device_make_active(ioc, pcie_device); 11679 pcie_device_put(pcie_device); 11680 continue; 11681 } 11682 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 11683 pcie_device->id, 0); 11684 if (rc) { 11685 _scsih_pcie_device_remove(ioc, pcie_device); 11686 pcie_device_put(pcie_device); 11687 continue; 11688 } else if (!pcie_device->starget) { 11689 /* 11690 * When async scanning is enabled, its not possible to 11691 * remove devices while scanning is turned on due to an 11692 * oops in scsi_sysfs_add_sdev()->add_device()-> 11693 * sysfs_addrm_start() 11694 */ 11695 if (!ioc->is_driver_loading) { 11696 /* TODO-- Need to find out whether this condition will 11697 * occur or not 11698 */ 11699 _scsih_pcie_device_remove(ioc, pcie_device); 11700 pcie_device_put(pcie_device); 11701 continue; 11702 } 11703 } 11704 pcie_device_make_active(ioc, pcie_device); 11705 pcie_device_put(pcie_device); 11706 } 11707 } 11708 11709 /** 11710 * _scsih_probe_devices - probing for devices 11711 * @ioc: per adapter object 11712 * 11713 * Called during initial loading of the driver. 11714 */ 11715 static void 11716 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 11717 { 11718 u16 volume_mapping_flags; 11719 11720 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 11721 return; /* return when IOC doesn't support initiator mode */ 11722 11723 _scsih_probe_boot_devices(ioc); 11724 11725 if (ioc->ir_firmware) { 11726 volume_mapping_flags = 11727 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 11728 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 11729 if (volume_mapping_flags == 11730 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 11731 _scsih_probe_raid(ioc); 11732 _scsih_probe_sas(ioc); 11733 } else { 11734 _scsih_probe_sas(ioc); 11735 _scsih_probe_raid(ioc); 11736 } 11737 } else { 11738 _scsih_probe_sas(ioc); 11739 _scsih_probe_pcie(ioc); 11740 } 11741 } 11742 11743 /** 11744 * scsih_scan_start - scsi lld callback for .scan_start 11745 * @shost: SCSI host pointer 11746 * 11747 * The shost has the ability to discover targets on its own instead 11748 * of scanning the entire bus. In our implemention, we will kick off 11749 * firmware discovery. 11750 */ 11751 static void 11752 scsih_scan_start(struct Scsi_Host *shost) 11753 { 11754 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11755 int rc; 11756 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 11757 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 11758 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) 11759 mpt3sas_enable_diag_buffer(ioc, 1); 11760 11761 if (disable_discovery > 0) 11762 return; 11763 11764 ioc->start_scan = 1; 11765 rc = mpt3sas_port_enable(ioc); 11766 11767 if (rc != 0) 11768 ioc_info(ioc, "port enable: FAILED\n"); 11769 } 11770 11771 /** 11772 * _scsih_complete_devices_scanning - add the devices to sml and 11773 * complete ioc initialization. 11774 * @ioc: per adapter object 11775 * 11776 * Return nothing. 11777 */ 11778 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) 11779 { 11780 11781 if (ioc->wait_for_discovery_to_complete) { 11782 ioc->wait_for_discovery_to_complete = 0; 11783 _scsih_probe_devices(ioc); 11784 } 11785 11786 mpt3sas_base_start_watchdog(ioc); 11787 ioc->is_driver_loading = 0; 11788 } 11789 11790 /** 11791 * scsih_scan_finished - scsi lld callback for .scan_finished 11792 * @shost: SCSI host pointer 11793 * @time: elapsed time of the scan in jiffies 11794 * 11795 * This function will be called periodicallyn until it returns 1 with the 11796 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 11797 * we wait for firmware discovery to complete, then return 1. 11798 */ 11799 static int 11800 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 11801 { 11802 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11803 u32 ioc_state; 11804 int issue_hard_reset = 0; 11805 11806 if (disable_discovery > 0) { 11807 ioc->is_driver_loading = 0; 11808 ioc->wait_for_discovery_to_complete = 0; 11809 return 1; 11810 } 11811 11812 if (time >= (300 * HZ)) { 11813 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11814 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 11815 ioc->is_driver_loading = 0; 11816 return 1; 11817 } 11818 11819 if (ioc->start_scan) { 11820 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 11821 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 11822 mpt3sas_print_fault_code(ioc, ioc_state & 11823 MPI2_DOORBELL_DATA_MASK); 11824 issue_hard_reset = 1; 11825 goto out; 11826 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 11827 MPI2_IOC_STATE_COREDUMP) { 11828 mpt3sas_base_coredump_info(ioc, ioc_state & 11829 MPI2_DOORBELL_DATA_MASK); 11830 mpt3sas_base_wait_for_coredump_completion(ioc, __func__); 11831 issue_hard_reset = 1; 11832 goto out; 11833 } 11834 return 0; 11835 } 11836 11837 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { 11838 ioc_info(ioc, 11839 "port enable: aborted due to diag reset\n"); 11840 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11841 goto out; 11842 } 11843 if (ioc->start_scan_failed) { 11844 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 11845 ioc->start_scan_failed); 11846 ioc->is_driver_loading = 0; 11847 ioc->wait_for_discovery_to_complete = 0; 11848 ioc->remove_host = 1; 11849 return 1; 11850 } 11851 11852 ioc_info(ioc, "port enable: SUCCESS\n"); 11853 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11854 _scsih_complete_devices_scanning(ioc); 11855 11856 out: 11857 if (issue_hard_reset) { 11858 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11859 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) 11860 ioc->is_driver_loading = 0; 11861 } 11862 return 1; 11863 } 11864 11865 /** 11866 * scsih_map_queues - map reply queues with request queues 11867 * @shost: SCSI host pointer 11868 */ 11869 static void scsih_map_queues(struct Scsi_Host *shost) 11870 { 11871 struct MPT3SAS_ADAPTER *ioc = 11872 (struct MPT3SAS_ADAPTER *)shost->hostdata; 11873 struct blk_mq_queue_map *map; 11874 int i, qoff, offset; 11875 int nr_msix_vectors = ioc->iopoll_q_start_index; 11876 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; 11877 11878 if (shost->nr_hw_queues == 1) 11879 return; 11880 11881 for (i = 0, qoff = 0; i < shost->nr_maps; i++) { 11882 map = &shost->tag_set.map[i]; 11883 map->nr_queues = 0; 11884 offset = 0; 11885 if (i == HCTX_TYPE_DEFAULT) { 11886 map->nr_queues = 11887 nr_msix_vectors - ioc->high_iops_queues; 11888 offset = ioc->high_iops_queues; 11889 } else if (i == HCTX_TYPE_POLL) 11890 map->nr_queues = iopoll_q_count; 11891 11892 if (!map->nr_queues) 11893 BUG_ON(i == HCTX_TYPE_DEFAULT); 11894 11895 /* 11896 * The poll queue(s) doesn't have an IRQ (and hence IRQ 11897 * affinity), so use the regular blk-mq cpu mapping 11898 */ 11899 map->queue_offset = qoff; 11900 if (i != HCTX_TYPE_POLL) 11901 blk_mq_pci_map_queues(map, ioc->pdev, offset); 11902 else 11903 blk_mq_map_queues(map); 11904 11905 qoff += map->nr_queues; 11906 } 11907 } 11908 11909 /* shost template for SAS 2.0 HBA devices */ 11910 static const struct scsi_host_template mpt2sas_driver_template = { 11911 .module = THIS_MODULE, 11912 .name = "Fusion MPT SAS Host", 11913 .proc_name = MPT2SAS_DRIVER_NAME, 11914 .queuecommand = scsih_qcmd, 11915 .target_alloc = scsih_target_alloc, 11916 .slave_alloc = scsih_slave_alloc, 11917 .slave_configure = scsih_slave_configure, 11918 .target_destroy = scsih_target_destroy, 11919 .slave_destroy = scsih_slave_destroy, 11920 .scan_finished = scsih_scan_finished, 11921 .scan_start = scsih_scan_start, 11922 .change_queue_depth = scsih_change_queue_depth, 11923 .eh_abort_handler = scsih_abort, 11924 .eh_device_reset_handler = scsih_dev_reset, 11925 .eh_target_reset_handler = scsih_target_reset, 11926 .eh_host_reset_handler = scsih_host_reset, 11927 .bios_param = scsih_bios_param, 11928 .can_queue = 1, 11929 .this_id = -1, 11930 .sg_tablesize = MPT2SAS_SG_DEPTH, 11931 .max_sectors = 32767, 11932 .cmd_per_lun = 7, 11933 .shost_groups = mpt3sas_host_groups, 11934 .sdev_groups = mpt3sas_dev_groups, 11935 .track_queue_depth = 1, 11936 .cmd_size = sizeof(struct scsiio_tracker), 11937 }; 11938 11939 /* raid transport support for SAS 2.0 HBA devices */ 11940 static struct raid_function_template mpt2sas_raid_functions = { 11941 .cookie = &mpt2sas_driver_template, 11942 .is_raid = scsih_is_raid, 11943 .get_resync = scsih_get_resync, 11944 .get_state = scsih_get_state, 11945 }; 11946 11947 /* shost template for SAS 3.0 HBA devices */ 11948 static const struct scsi_host_template mpt3sas_driver_template = { 11949 .module = THIS_MODULE, 11950 .name = "Fusion MPT SAS Host", 11951 .proc_name = MPT3SAS_DRIVER_NAME, 11952 .queuecommand = scsih_qcmd, 11953 .target_alloc = scsih_target_alloc, 11954 .slave_alloc = scsih_slave_alloc, 11955 .slave_configure = scsih_slave_configure, 11956 .target_destroy = scsih_target_destroy, 11957 .slave_destroy = scsih_slave_destroy, 11958 .scan_finished = scsih_scan_finished, 11959 .scan_start = scsih_scan_start, 11960 .change_queue_depth = scsih_change_queue_depth, 11961 .eh_abort_handler = scsih_abort, 11962 .eh_device_reset_handler = scsih_dev_reset, 11963 .eh_target_reset_handler = scsih_target_reset, 11964 .eh_host_reset_handler = scsih_host_reset, 11965 .bios_param = scsih_bios_param, 11966 .can_queue = 1, 11967 .this_id = -1, 11968 .sg_tablesize = MPT3SAS_SG_DEPTH, 11969 .max_sectors = 32767, 11970 .max_segment_size = 0xffffffff, 11971 .cmd_per_lun = 128, 11972 .shost_groups = mpt3sas_host_groups, 11973 .sdev_groups = mpt3sas_dev_groups, 11974 .track_queue_depth = 1, 11975 .cmd_size = sizeof(struct scsiio_tracker), 11976 .map_queues = scsih_map_queues, 11977 .mq_poll = mpt3sas_blk_mq_poll, 11978 }; 11979 11980 /* raid transport support for SAS 3.0 HBA devices */ 11981 static struct raid_function_template mpt3sas_raid_functions = { 11982 .cookie = &mpt3sas_driver_template, 11983 .is_raid = scsih_is_raid, 11984 .get_resync = scsih_get_resync, 11985 .get_state = scsih_get_state, 11986 }; 11987 11988 /** 11989 * _scsih_determine_hba_mpi_version - determine in which MPI version class 11990 * this device belongs to. 11991 * @pdev: PCI device struct 11992 * 11993 * return MPI2_VERSION for SAS 2.0 HBA devices, 11994 * MPI25_VERSION for SAS 3.0 HBA devices, and 11995 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 11996 */ 11997 static u16 11998 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 11999 { 12000 12001 switch (pdev->device) { 12002 case MPI2_MFGPAGE_DEVID_SSS6200: 12003 case MPI2_MFGPAGE_DEVID_SAS2004: 12004 case MPI2_MFGPAGE_DEVID_SAS2008: 12005 case MPI2_MFGPAGE_DEVID_SAS2108_1: 12006 case MPI2_MFGPAGE_DEVID_SAS2108_2: 12007 case MPI2_MFGPAGE_DEVID_SAS2108_3: 12008 case MPI2_MFGPAGE_DEVID_SAS2116_1: 12009 case MPI2_MFGPAGE_DEVID_SAS2116_2: 12010 case MPI2_MFGPAGE_DEVID_SAS2208_1: 12011 case MPI2_MFGPAGE_DEVID_SAS2208_2: 12012 case MPI2_MFGPAGE_DEVID_SAS2208_3: 12013 case MPI2_MFGPAGE_DEVID_SAS2208_4: 12014 case MPI2_MFGPAGE_DEVID_SAS2208_5: 12015 case MPI2_MFGPAGE_DEVID_SAS2208_6: 12016 case MPI2_MFGPAGE_DEVID_SAS2308_1: 12017 case MPI2_MFGPAGE_DEVID_SAS2308_2: 12018 case MPI2_MFGPAGE_DEVID_SAS2308_3: 12019 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 12020 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 12021 return MPI2_VERSION; 12022 case MPI25_MFGPAGE_DEVID_SAS3004: 12023 case MPI25_MFGPAGE_DEVID_SAS3008: 12024 case MPI25_MFGPAGE_DEVID_SAS3108_1: 12025 case MPI25_MFGPAGE_DEVID_SAS3108_2: 12026 case MPI25_MFGPAGE_DEVID_SAS3108_5: 12027 case MPI25_MFGPAGE_DEVID_SAS3108_6: 12028 return MPI25_VERSION; 12029 case MPI26_MFGPAGE_DEVID_SAS3216: 12030 case MPI26_MFGPAGE_DEVID_SAS3224: 12031 case MPI26_MFGPAGE_DEVID_SAS3316_1: 12032 case MPI26_MFGPAGE_DEVID_SAS3316_2: 12033 case MPI26_MFGPAGE_DEVID_SAS3316_3: 12034 case MPI26_MFGPAGE_DEVID_SAS3316_4: 12035 case MPI26_MFGPAGE_DEVID_SAS3324_1: 12036 case MPI26_MFGPAGE_DEVID_SAS3324_2: 12037 case MPI26_MFGPAGE_DEVID_SAS3324_3: 12038 case MPI26_MFGPAGE_DEVID_SAS3324_4: 12039 case MPI26_MFGPAGE_DEVID_SAS3508: 12040 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12041 case MPI26_MFGPAGE_DEVID_SAS3408: 12042 case MPI26_MFGPAGE_DEVID_SAS3516: 12043 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12044 case MPI26_MFGPAGE_DEVID_SAS3416: 12045 case MPI26_MFGPAGE_DEVID_SAS3616: 12046 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12047 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12048 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12049 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12050 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12051 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12052 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12053 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12054 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12055 return MPI26_VERSION; 12056 } 12057 return 0; 12058 } 12059 12060 /** 12061 * _scsih_probe - attach and add scsi host 12062 * @pdev: PCI device struct 12063 * @id: pci device id 12064 * 12065 * Return: 0 success, anything else error. 12066 */ 12067 static int 12068 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 12069 { 12070 struct MPT3SAS_ADAPTER *ioc; 12071 struct Scsi_Host *shost = NULL; 12072 int rv; 12073 u16 hba_mpi_version; 12074 int iopoll_q_count = 0; 12075 12076 /* Determine in which MPI version class this pci device belongs */ 12077 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 12078 if (hba_mpi_version == 0) 12079 return -ENODEV; 12080 12081 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 12082 * for other generation HBA's return with -ENODEV 12083 */ 12084 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 12085 return -ENODEV; 12086 12087 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 12088 * for other generation HBA's return with -ENODEV 12089 */ 12090 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 12091 || hba_mpi_version == MPI26_VERSION))) 12092 return -ENODEV; 12093 12094 switch (hba_mpi_version) { 12095 case MPI2_VERSION: 12096 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 12097 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 12098 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 12099 shost = scsi_host_alloc(&mpt2sas_driver_template, 12100 sizeof(struct MPT3SAS_ADAPTER)); 12101 if (!shost) 12102 return -ENODEV; 12103 ioc = shost_priv(shost); 12104 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12105 ioc->hba_mpi_version_belonged = hba_mpi_version; 12106 ioc->id = mpt2_ids++; 12107 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 12108 switch (pdev->device) { 12109 case MPI2_MFGPAGE_DEVID_SSS6200: 12110 ioc->is_warpdrive = 1; 12111 ioc->hide_ir_msg = 1; 12112 break; 12113 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 12114 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 12115 ioc->is_mcpu_endpoint = 1; 12116 break; 12117 default: 12118 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 12119 break; 12120 } 12121 12122 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12123 ioc->multipath_on_hba = 0; 12124 else 12125 ioc->multipath_on_hba = 1; 12126 12127 break; 12128 case MPI25_VERSION: 12129 case MPI26_VERSION: 12130 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 12131 shost = scsi_host_alloc(&mpt3sas_driver_template, 12132 sizeof(struct MPT3SAS_ADAPTER)); 12133 if (!shost) 12134 return -ENODEV; 12135 ioc = shost_priv(shost); 12136 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12137 ioc->hba_mpi_version_belonged = hba_mpi_version; 12138 ioc->id = mpt3_ids++; 12139 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 12140 switch (pdev->device) { 12141 case MPI26_MFGPAGE_DEVID_SAS3508: 12142 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12143 case MPI26_MFGPAGE_DEVID_SAS3408: 12144 case MPI26_MFGPAGE_DEVID_SAS3516: 12145 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12146 case MPI26_MFGPAGE_DEVID_SAS3416: 12147 case MPI26_MFGPAGE_DEVID_SAS3616: 12148 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12149 ioc->is_gen35_ioc = 1; 12150 break; 12151 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12152 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12153 dev_err(&pdev->dev, 12154 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", 12155 pdev->device, pdev->subsystem_vendor, 12156 pdev->subsystem_device); 12157 return 1; 12158 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12159 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12160 dev_err(&pdev->dev, 12161 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", 12162 pdev->device, pdev->subsystem_vendor, 12163 pdev->subsystem_device); 12164 return 1; 12165 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12166 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12167 dev_info(&pdev->dev, 12168 "HBA is in Configurable Secure mode\n"); 12169 fallthrough; 12170 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12171 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12172 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; 12173 break; 12174 default: 12175 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; 12176 } 12177 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 12178 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 12179 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 12180 ioc->combined_reply_queue = 1; 12181 if (ioc->is_gen35_ioc) 12182 ioc->combined_reply_index_count = 12183 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 12184 else 12185 ioc->combined_reply_index_count = 12186 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 12187 } 12188 12189 switch (ioc->is_gen35_ioc) { 12190 case 0: 12191 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12192 ioc->multipath_on_hba = 0; 12193 else 12194 ioc->multipath_on_hba = 1; 12195 break; 12196 case 1: 12197 if (multipath_on_hba == -1 || multipath_on_hba > 0) 12198 ioc->multipath_on_hba = 1; 12199 else 12200 ioc->multipath_on_hba = 0; 12201 break; 12202 default: 12203 break; 12204 } 12205 12206 break; 12207 default: 12208 return -ENODEV; 12209 } 12210 12211 INIT_LIST_HEAD(&ioc->list); 12212 spin_lock(&gioc_lock); 12213 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 12214 spin_unlock(&gioc_lock); 12215 ioc->shost = shost; 12216 ioc->pdev = pdev; 12217 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 12218 ioc->tm_cb_idx = tm_cb_idx; 12219 ioc->ctl_cb_idx = ctl_cb_idx; 12220 ioc->base_cb_idx = base_cb_idx; 12221 ioc->port_enable_cb_idx = port_enable_cb_idx; 12222 ioc->transport_cb_idx = transport_cb_idx; 12223 ioc->scsih_cb_idx = scsih_cb_idx; 12224 ioc->config_cb_idx = config_cb_idx; 12225 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 12226 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 12227 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 12228 ioc->logging_level = logging_level; 12229 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 12230 /* Host waits for minimum of six seconds */ 12231 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 12232 /* 12233 * Enable MEMORY MOVE support flag. 12234 */ 12235 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; 12236 /* Enable ADDITIONAL QUERY support flag. */ 12237 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; 12238 12239 ioc->enable_sdev_max_qd = enable_sdev_max_qd; 12240 12241 /* misc semaphores and spin locks */ 12242 mutex_init(&ioc->reset_in_progress_mutex); 12243 mutex_init(&ioc->hostdiag_unlock_mutex); 12244 /* initializing pci_access_mutex lock */ 12245 mutex_init(&ioc->pci_access_mutex); 12246 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 12247 spin_lock_init(&ioc->scsi_lookup_lock); 12248 spin_lock_init(&ioc->sas_device_lock); 12249 spin_lock_init(&ioc->sas_node_lock); 12250 spin_lock_init(&ioc->fw_event_lock); 12251 spin_lock_init(&ioc->raid_device_lock); 12252 spin_lock_init(&ioc->pcie_device_lock); 12253 spin_lock_init(&ioc->diag_trigger_lock); 12254 12255 INIT_LIST_HEAD(&ioc->sas_device_list); 12256 INIT_LIST_HEAD(&ioc->sas_device_init_list); 12257 INIT_LIST_HEAD(&ioc->sas_expander_list); 12258 INIT_LIST_HEAD(&ioc->enclosure_list); 12259 INIT_LIST_HEAD(&ioc->pcie_device_list); 12260 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 12261 INIT_LIST_HEAD(&ioc->fw_event_list); 12262 INIT_LIST_HEAD(&ioc->raid_device_list); 12263 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 12264 INIT_LIST_HEAD(&ioc->delayed_tr_list); 12265 INIT_LIST_HEAD(&ioc->delayed_sc_list); 12266 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 12267 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 12268 INIT_LIST_HEAD(&ioc->reply_queue_list); 12269 INIT_LIST_HEAD(&ioc->port_table_list); 12270 12271 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 12272 12273 /* init shost parameters */ 12274 shost->max_cmd_len = 32; 12275 shost->max_lun = max_lun; 12276 shost->transportt = mpt3sas_transport_template; 12277 shost->unique_id = ioc->id; 12278 12279 if (ioc->is_mcpu_endpoint) { 12280 /* mCPU MPI support 64K max IO */ 12281 shost->max_sectors = 128; 12282 ioc_info(ioc, "The max_sectors value is set to %d\n", 12283 shost->max_sectors); 12284 } else { 12285 if (max_sectors != 0xFFFF) { 12286 if (max_sectors < 64) { 12287 shost->max_sectors = 64; 12288 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 12289 max_sectors); 12290 } else if (max_sectors > 32767) { 12291 shost->max_sectors = 32767; 12292 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 12293 max_sectors); 12294 } else { 12295 shost->max_sectors = max_sectors & 0xFFFE; 12296 ioc_info(ioc, "The max_sectors value is set to %d\n", 12297 shost->max_sectors); 12298 } 12299 } 12300 } 12301 /* register EEDP capabilities with SCSI layer */ 12302 if (prot_mask >= 0) 12303 scsi_host_set_prot(shost, (prot_mask & 0x07)); 12304 else 12305 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 12306 | SHOST_DIF_TYPE2_PROTECTION 12307 | SHOST_DIF_TYPE3_PROTECTION); 12308 12309 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 12310 12311 /* event thread */ 12312 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 12313 "fw_event_%s%d", ioc->driver_name, ioc->id); 12314 ioc->firmware_event_thread = alloc_ordered_workqueue( 12315 ioc->firmware_event_name, 0); 12316 if (!ioc->firmware_event_thread) { 12317 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12318 __FILE__, __LINE__, __func__); 12319 rv = -ENODEV; 12320 goto out_thread_fail; 12321 } 12322 12323 shost->host_tagset = 0; 12324 12325 if (ioc->is_gen35_ioc && host_tagset_enable) 12326 shost->host_tagset = 1; 12327 12328 ioc->is_driver_loading = 1; 12329 if ((mpt3sas_base_attach(ioc))) { 12330 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12331 __FILE__, __LINE__, __func__); 12332 rv = -ENODEV; 12333 goto out_attach_fail; 12334 } 12335 12336 if (ioc->is_warpdrive) { 12337 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 12338 ioc->hide_drives = 0; 12339 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 12340 ioc->hide_drives = 1; 12341 else { 12342 if (mpt3sas_get_num_volumes(ioc)) 12343 ioc->hide_drives = 1; 12344 else 12345 ioc->hide_drives = 0; 12346 } 12347 } else 12348 ioc->hide_drives = 0; 12349 12350 shost->nr_hw_queues = 1; 12351 12352 if (shost->host_tagset) { 12353 shost->nr_hw_queues = 12354 ioc->reply_queue_count - ioc->high_iops_queues; 12355 12356 iopoll_q_count = 12357 ioc->reply_queue_count - ioc->iopoll_q_start_index; 12358 12359 shost->nr_maps = iopoll_q_count ? 3 : 1; 12360 12361 dev_info(&ioc->pdev->dev, 12362 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", 12363 shost->can_queue, shost->nr_hw_queues); 12364 } 12365 12366 rv = scsi_add_host(shost, &pdev->dev); 12367 if (rv) { 12368 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12369 __FILE__, __LINE__, __func__); 12370 goto out_add_shost_fail; 12371 } 12372 12373 scsi_scan_host(shost); 12374 mpt3sas_setup_debugfs(ioc); 12375 return 0; 12376 out_add_shost_fail: 12377 mpt3sas_base_detach(ioc); 12378 out_attach_fail: 12379 destroy_workqueue(ioc->firmware_event_thread); 12380 out_thread_fail: 12381 spin_lock(&gioc_lock); 12382 list_del(&ioc->list); 12383 spin_unlock(&gioc_lock); 12384 scsi_host_put(shost); 12385 return rv; 12386 } 12387 12388 /** 12389 * scsih_suspend - power management suspend main entry point 12390 * @dev: Device struct 12391 * 12392 * Return: 0 success, anything else error. 12393 */ 12394 static int __maybe_unused 12395 scsih_suspend(struct device *dev) 12396 { 12397 struct pci_dev *pdev = to_pci_dev(dev); 12398 struct Scsi_Host *shost; 12399 struct MPT3SAS_ADAPTER *ioc; 12400 int rc; 12401 12402 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12403 if (rc) 12404 return rc; 12405 12406 mpt3sas_base_stop_watchdog(ioc); 12407 scsi_block_requests(shost); 12408 _scsih_nvme_shutdown(ioc); 12409 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", 12410 pdev, pci_name(pdev)); 12411 12412 mpt3sas_base_free_resources(ioc); 12413 return 0; 12414 } 12415 12416 /** 12417 * scsih_resume - power management resume main entry point 12418 * @dev: Device struct 12419 * 12420 * Return: 0 success, anything else error. 12421 */ 12422 static int __maybe_unused 12423 scsih_resume(struct device *dev) 12424 { 12425 struct pci_dev *pdev = to_pci_dev(dev); 12426 struct Scsi_Host *shost; 12427 struct MPT3SAS_ADAPTER *ioc; 12428 pci_power_t device_state = pdev->current_state; 12429 int r; 12430 12431 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12432 if (r) 12433 return r; 12434 12435 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 12436 pdev, pci_name(pdev), device_state); 12437 12438 ioc->pdev = pdev; 12439 r = mpt3sas_base_map_resources(ioc); 12440 if (r) 12441 return r; 12442 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); 12443 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 12444 scsi_unblock_requests(shost); 12445 mpt3sas_base_start_watchdog(ioc); 12446 return 0; 12447 } 12448 12449 /** 12450 * scsih_pci_error_detected - Called when a PCI error is detected. 12451 * @pdev: PCI device struct 12452 * @state: PCI channel state 12453 * 12454 * Description: Called when a PCI error is detected. 12455 * 12456 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 12457 */ 12458 static pci_ers_result_t 12459 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12460 { 12461 struct Scsi_Host *shost; 12462 struct MPT3SAS_ADAPTER *ioc; 12463 12464 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12465 return PCI_ERS_RESULT_DISCONNECT; 12466 12467 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 12468 12469 switch (state) { 12470 case pci_channel_io_normal: 12471 return PCI_ERS_RESULT_CAN_RECOVER; 12472 case pci_channel_io_frozen: 12473 /* Fatal error, prepare for slot reset */ 12474 ioc->pci_error_recovery = 1; 12475 scsi_block_requests(ioc->shost); 12476 mpt3sas_base_stop_watchdog(ioc); 12477 mpt3sas_base_free_resources(ioc); 12478 return PCI_ERS_RESULT_NEED_RESET; 12479 case pci_channel_io_perm_failure: 12480 /* Permanent error, prepare for device removal */ 12481 ioc->pci_error_recovery = 1; 12482 mpt3sas_base_stop_watchdog(ioc); 12483 mpt3sas_base_pause_mq_polling(ioc); 12484 _scsih_flush_running_cmds(ioc); 12485 return PCI_ERS_RESULT_DISCONNECT; 12486 } 12487 return PCI_ERS_RESULT_NEED_RESET; 12488 } 12489 12490 /** 12491 * scsih_pci_slot_reset - Called when PCI slot has been reset. 12492 * @pdev: PCI device struct 12493 * 12494 * Description: This routine is called by the pci error recovery 12495 * code after the PCI slot has been reset, just before we 12496 * should resume normal operations. 12497 */ 12498 static pci_ers_result_t 12499 scsih_pci_slot_reset(struct pci_dev *pdev) 12500 { 12501 struct Scsi_Host *shost; 12502 struct MPT3SAS_ADAPTER *ioc; 12503 int rc; 12504 12505 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12506 return PCI_ERS_RESULT_DISCONNECT; 12507 12508 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 12509 12510 ioc->pci_error_recovery = 0; 12511 ioc->pdev = pdev; 12512 pci_restore_state(pdev); 12513 rc = mpt3sas_base_map_resources(ioc); 12514 if (rc) 12515 return PCI_ERS_RESULT_DISCONNECT; 12516 12517 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); 12518 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 12519 12520 ioc_warn(ioc, "hard reset: %s\n", 12521 (rc == 0) ? "success" : "failed"); 12522 12523 if (!rc) 12524 return PCI_ERS_RESULT_RECOVERED; 12525 else 12526 return PCI_ERS_RESULT_DISCONNECT; 12527 } 12528 12529 /** 12530 * scsih_pci_resume() - resume normal ops after PCI reset 12531 * @pdev: pointer to PCI device 12532 * 12533 * Called when the error recovery driver tells us that its 12534 * OK to resume normal operation. Use completion to allow 12535 * halted scsi ops to resume. 12536 */ 12537 static void 12538 scsih_pci_resume(struct pci_dev *pdev) 12539 { 12540 struct Scsi_Host *shost; 12541 struct MPT3SAS_ADAPTER *ioc; 12542 12543 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12544 return; 12545 12546 ioc_info(ioc, "PCI error: resume callback!!\n"); 12547 12548 mpt3sas_base_start_watchdog(ioc); 12549 scsi_unblock_requests(ioc->shost); 12550 } 12551 12552 /** 12553 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 12554 * @pdev: pointer to PCI device 12555 */ 12556 static pci_ers_result_t 12557 scsih_pci_mmio_enabled(struct pci_dev *pdev) 12558 { 12559 struct Scsi_Host *shost; 12560 struct MPT3SAS_ADAPTER *ioc; 12561 12562 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12563 return PCI_ERS_RESULT_DISCONNECT; 12564 12565 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 12566 12567 /* TODO - dump whatever for debugging purposes */ 12568 12569 /* This called only if scsih_pci_error_detected returns 12570 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 12571 * works, no need to reset slot. 12572 */ 12573 return PCI_ERS_RESULT_RECOVERED; 12574 } 12575 12576 /** 12577 * scsih_ncq_prio_supp - Check for NCQ command priority support 12578 * @sdev: scsi device struct 12579 * 12580 * This is called when a user indicates they would like to enable 12581 * ncq command priorities. This works only on SATA devices. 12582 */ 12583 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 12584 { 12585 struct scsi_vpd *vpd; 12586 bool ncq_prio_supp = false; 12587 12588 rcu_read_lock(); 12589 vpd = rcu_dereference(sdev->vpd_pg89); 12590 if (!vpd || vpd->len < 214) 12591 goto out; 12592 12593 ncq_prio_supp = (vpd->data[213] >> 4) & 1; 12594 out: 12595 rcu_read_unlock(); 12596 12597 return ncq_prio_supp; 12598 } 12599 /* 12600 * The pci device ids are defined in mpi/mpi2_cnfg.h. 12601 */ 12602 static const struct pci_device_id mpt3sas_pci_table[] = { 12603 /* Spitfire ~ 2004 */ 12604 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 12605 PCI_ANY_ID, PCI_ANY_ID }, 12606 /* Falcon ~ 2008 */ 12607 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 12608 PCI_ANY_ID, PCI_ANY_ID }, 12609 /* Liberator ~ 2108 */ 12610 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 12611 PCI_ANY_ID, PCI_ANY_ID }, 12612 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 12613 PCI_ANY_ID, PCI_ANY_ID }, 12614 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 12615 PCI_ANY_ID, PCI_ANY_ID }, 12616 /* Meteor ~ 2116 */ 12617 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 12618 PCI_ANY_ID, PCI_ANY_ID }, 12619 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 12620 PCI_ANY_ID, PCI_ANY_ID }, 12621 /* Thunderbolt ~ 2208 */ 12622 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 12623 PCI_ANY_ID, PCI_ANY_ID }, 12624 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 12625 PCI_ANY_ID, PCI_ANY_ID }, 12626 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 12627 PCI_ANY_ID, PCI_ANY_ID }, 12628 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 12629 PCI_ANY_ID, PCI_ANY_ID }, 12630 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 12631 PCI_ANY_ID, PCI_ANY_ID }, 12632 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 12633 PCI_ANY_ID, PCI_ANY_ID }, 12634 /* Mustang ~ 2308 */ 12635 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 12636 PCI_ANY_ID, PCI_ANY_ID }, 12637 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 12638 PCI_ANY_ID, PCI_ANY_ID }, 12639 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 12640 PCI_ANY_ID, PCI_ANY_ID }, 12641 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, 12642 PCI_ANY_ID, PCI_ANY_ID }, 12643 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, 12644 PCI_ANY_ID, PCI_ANY_ID }, 12645 /* SSS6200 */ 12646 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 12647 PCI_ANY_ID, PCI_ANY_ID }, 12648 /* Fury ~ 3004 and 3008 */ 12649 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 12650 PCI_ANY_ID, PCI_ANY_ID }, 12651 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 12652 PCI_ANY_ID, PCI_ANY_ID }, 12653 /* Invader ~ 3108 */ 12654 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 12655 PCI_ANY_ID, PCI_ANY_ID }, 12656 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 12657 PCI_ANY_ID, PCI_ANY_ID }, 12658 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 12659 PCI_ANY_ID, PCI_ANY_ID }, 12660 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 12661 PCI_ANY_ID, PCI_ANY_ID }, 12662 /* Cutlass ~ 3216 and 3224 */ 12663 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 12664 PCI_ANY_ID, PCI_ANY_ID }, 12665 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 12666 PCI_ANY_ID, PCI_ANY_ID }, 12667 /* Intruder ~ 3316 and 3324 */ 12668 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 12669 PCI_ANY_ID, PCI_ANY_ID }, 12670 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 12671 PCI_ANY_ID, PCI_ANY_ID }, 12672 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 12673 PCI_ANY_ID, PCI_ANY_ID }, 12674 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 12675 PCI_ANY_ID, PCI_ANY_ID }, 12676 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 12677 PCI_ANY_ID, PCI_ANY_ID }, 12678 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 12679 PCI_ANY_ID, PCI_ANY_ID }, 12680 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 12681 PCI_ANY_ID, PCI_ANY_ID }, 12682 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 12683 PCI_ANY_ID, PCI_ANY_ID }, 12684 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 12685 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 12686 PCI_ANY_ID, PCI_ANY_ID }, 12687 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 12688 PCI_ANY_ID, PCI_ANY_ID }, 12689 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 12690 PCI_ANY_ID, PCI_ANY_ID }, 12691 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 12692 PCI_ANY_ID, PCI_ANY_ID }, 12693 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 12694 PCI_ANY_ID, PCI_ANY_ID }, 12695 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 12696 PCI_ANY_ID, PCI_ANY_ID }, 12697 /* Mercator ~ 3616*/ 12698 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 12699 PCI_ANY_ID, PCI_ANY_ID }, 12700 12701 /* Aero SI 0x00E1 Configurable Secure 12702 * 0x00E2 Hard Secure 12703 */ 12704 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, 12705 PCI_ANY_ID, PCI_ANY_ID }, 12706 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, 12707 PCI_ANY_ID, PCI_ANY_ID }, 12708 12709 /* 12710 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered 12711 */ 12712 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, 12713 PCI_ANY_ID, PCI_ANY_ID }, 12714 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, 12715 PCI_ANY_ID, PCI_ANY_ID }, 12716 12717 /* Atlas PCIe Switch Management Port */ 12718 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, 12719 PCI_ANY_ID, PCI_ANY_ID }, 12720 12721 /* Sea SI 0x00E5 Configurable Secure 12722 * 0x00E6 Hard Secure 12723 */ 12724 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, 12725 PCI_ANY_ID, PCI_ANY_ID }, 12726 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12727 PCI_ANY_ID, PCI_ANY_ID }, 12728 12729 /* 12730 * ATTO Branded ExpressSAS H12xx GT 12731 */ 12732 { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12733 PCI_ANY_ID, PCI_ANY_ID }, 12734 12735 /* 12736 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered 12737 */ 12738 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, 12739 PCI_ANY_ID, PCI_ANY_ID }, 12740 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, 12741 PCI_ANY_ID, PCI_ANY_ID }, 12742 12743 {0} /* Terminating entry */ 12744 }; 12745 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 12746 12747 static struct pci_error_handlers _mpt3sas_err_handler = { 12748 .error_detected = scsih_pci_error_detected, 12749 .mmio_enabled = scsih_pci_mmio_enabled, 12750 .slot_reset = scsih_pci_slot_reset, 12751 .resume = scsih_pci_resume, 12752 }; 12753 12754 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); 12755 12756 static struct pci_driver mpt3sas_driver = { 12757 .name = MPT3SAS_DRIVER_NAME, 12758 .id_table = mpt3sas_pci_table, 12759 .probe = _scsih_probe, 12760 .remove = scsih_remove, 12761 .shutdown = scsih_shutdown, 12762 .err_handler = &_mpt3sas_err_handler, 12763 .driver.pm = &scsih_pm_ops, 12764 }; 12765 12766 /** 12767 * scsih_init - main entry point for this driver. 12768 * 12769 * Return: 0 success, anything else error. 12770 */ 12771 static int 12772 scsih_init(void) 12773 { 12774 mpt2_ids = 0; 12775 mpt3_ids = 0; 12776 12777 mpt3sas_base_initialize_callback_handler(); 12778 12779 /* queuecommand callback hander */ 12780 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 12781 12782 /* task management callback handler */ 12783 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 12784 12785 /* base internal commands callback handler */ 12786 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 12787 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 12788 mpt3sas_port_enable_done); 12789 12790 /* transport internal commands callback handler */ 12791 transport_cb_idx = mpt3sas_base_register_callback_handler( 12792 mpt3sas_transport_done); 12793 12794 /* scsih internal commands callback handler */ 12795 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 12796 12797 /* configuration page API internal commands callback handler */ 12798 config_cb_idx = mpt3sas_base_register_callback_handler( 12799 mpt3sas_config_done); 12800 12801 /* ctl module callback handler */ 12802 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 12803 12804 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 12805 _scsih_tm_tr_complete); 12806 12807 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 12808 _scsih_tm_volume_tr_complete); 12809 12810 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 12811 _scsih_sas_control_complete); 12812 12813 mpt3sas_init_debugfs(); 12814 return 0; 12815 } 12816 12817 /** 12818 * scsih_exit - exit point for this driver (when it is a module). 12819 * 12820 * Return: 0 success, anything else error. 12821 */ 12822 static void 12823 scsih_exit(void) 12824 { 12825 12826 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 12827 mpt3sas_base_release_callback_handler(tm_cb_idx); 12828 mpt3sas_base_release_callback_handler(base_cb_idx); 12829 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 12830 mpt3sas_base_release_callback_handler(transport_cb_idx); 12831 mpt3sas_base_release_callback_handler(scsih_cb_idx); 12832 mpt3sas_base_release_callback_handler(config_cb_idx); 12833 mpt3sas_base_release_callback_handler(ctl_cb_idx); 12834 12835 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 12836 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 12837 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 12838 12839 /* raid transport support */ 12840 if (hbas_to_enumerate != 1) 12841 raid_class_release(mpt3sas_raid_template); 12842 if (hbas_to_enumerate != 2) 12843 raid_class_release(mpt2sas_raid_template); 12844 sas_release_transport(mpt3sas_transport_template); 12845 mpt3sas_exit_debugfs(); 12846 } 12847 12848 /** 12849 * _mpt3sas_init - main entry point for this driver. 12850 * 12851 * Return: 0 success, anything else error. 12852 */ 12853 static int __init 12854 _mpt3sas_init(void) 12855 { 12856 int error; 12857 12858 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 12859 MPT3SAS_DRIVER_VERSION); 12860 12861 mpt3sas_transport_template = 12862 sas_attach_transport(&mpt3sas_transport_functions); 12863 if (!mpt3sas_transport_template) 12864 return -ENODEV; 12865 12866 /* No need attach mpt3sas raid functions template 12867 * if hbas_to_enumarate value is one. 12868 */ 12869 if (hbas_to_enumerate != 1) { 12870 mpt3sas_raid_template = 12871 raid_class_attach(&mpt3sas_raid_functions); 12872 if (!mpt3sas_raid_template) { 12873 sas_release_transport(mpt3sas_transport_template); 12874 return -ENODEV; 12875 } 12876 } 12877 12878 /* No need to attach mpt2sas raid functions template 12879 * if hbas_to_enumarate value is two 12880 */ 12881 if (hbas_to_enumerate != 2) { 12882 mpt2sas_raid_template = 12883 raid_class_attach(&mpt2sas_raid_functions); 12884 if (!mpt2sas_raid_template) { 12885 sas_release_transport(mpt3sas_transport_template); 12886 return -ENODEV; 12887 } 12888 } 12889 12890 error = scsih_init(); 12891 if (error) { 12892 scsih_exit(); 12893 return error; 12894 } 12895 12896 mpt3sas_ctl_init(hbas_to_enumerate); 12897 12898 error = pci_register_driver(&mpt3sas_driver); 12899 if (error) { 12900 mpt3sas_ctl_exit(hbas_to_enumerate); 12901 scsih_exit(); 12902 } 12903 12904 return error; 12905 } 12906 12907 /** 12908 * _mpt3sas_exit - exit point for this driver (when it is a module). 12909 * 12910 */ 12911 static void __exit 12912 _mpt3sas_exit(void) 12913 { 12914 pr_info("mpt3sas version %s unloading\n", 12915 MPT3SAS_DRIVER_VERSION); 12916 12917 pci_unregister_driver(&mpt3sas_driver); 12918 12919 mpt3sas_ctl_exit(hbas_to_enumerate); 12920 12921 scsih_exit(); 12922 } 12923 12924 module_init(_mpt3sas_init); 12925 module_exit(_mpt3sas_exit); 12926