xref: /linux/drivers/scsi/mpt3sas/mpt3sas_scsih.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/raid_class.h>
56 #include <linux/unaligned.h>
57 
58 #include "mpt3sas_base.h"
59 
60 #define RAID_CHANNEL 1
61 
62 #define PCIE_CHANNEL 2
63 
64 #define MPT3_MAX_LUNS (255)
65 
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 	struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70 
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 	struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 	u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
76 	u8 retry_count);
77 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
78 	struct _pcie_device *pcie_device);
79 static void
80 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
81 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
82 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
83 static enum device_responsive_state
84 _scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
85 	u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method);
86 static enum device_responsive_state
87 _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 *is_ssd_device,
88 	u8 tr_timeout, u8 tr_method);
89 static enum device_responsive_state
90 _scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
91 	u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method);
92 static void _firmware_event_work_delayed(struct work_struct *work);
93 
94 /* global parameters */
95 LIST_HEAD(mpt3sas_ioc_list);
96 /* global ioc lock for list operations */
97 DEFINE_SPINLOCK(gioc_lock);
98 
99 MODULE_AUTHOR(MPT3SAS_AUTHOR);
100 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
101 MODULE_LICENSE("GPL");
102 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
103 MODULE_ALIAS("mpt2sas");
104 
105 /* local parameters */
106 static u8 scsi_io_cb_idx = -1;
107 static u8 tm_cb_idx = -1;
108 static u8 ctl_cb_idx = -1;
109 static u8 base_cb_idx = -1;
110 static u8 port_enable_cb_idx = -1;
111 static u8 transport_cb_idx = -1;
112 static u8 scsih_cb_idx = -1;
113 static u8 config_cb_idx = -1;
114 static int mpt2_ids;
115 static int mpt3_ids;
116 
117 static u8 tm_tr_cb_idx = -1 ;
118 static u8 tm_tr_volume_cb_idx = -1 ;
119 static u8 tm_sas_control_cb_idx = -1;
120 
121 /* command line options */
122 static u32 logging_level;
123 MODULE_PARM_DESC(logging_level,
124 	" bits for enabling additional logging info (default=0)");
125 
126 
127 static ushort max_sectors = 0xFFFF;
128 module_param(max_sectors, ushort, 0444);
129 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
130 
131 
132 static int missing_delay[2] = {-1, -1};
133 module_param_array(missing_delay, int, NULL, 0444);
134 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
135 
136 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
137 #define MPT3SAS_MAX_LUN (16895)
138 static u64 max_lun = MPT3SAS_MAX_LUN;
139 module_param(max_lun, ullong, 0444);
140 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
141 
142 static ushort hbas_to_enumerate;
143 module_param(hbas_to_enumerate, ushort, 0444);
144 MODULE_PARM_DESC(hbas_to_enumerate,
145 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
146 		  1 - enumerates only SAS 2.0 generation HBAs\n \
147 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
148 
149 /* diag_buffer_enable is bitwise
150  * bit 0 set = TRACE
151  * bit 1 set = SNAPSHOT
152  * bit 2 set = EXTENDED
153  *
154  * Either bit can be set, or both
155  */
156 static int diag_buffer_enable = -1;
157 module_param(diag_buffer_enable, int, 0444);
158 MODULE_PARM_DESC(diag_buffer_enable,
159 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
160 static int disable_discovery = -1;
161 module_param(disable_discovery, int, 0444);
162 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
163 
164 
165 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
166 static int prot_mask = -1;
167 module_param(prot_mask, int, 0444);
168 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
169 
170 static bool enable_sdev_max_qd;
171 module_param(enable_sdev_max_qd, bool, 0444);
172 MODULE_PARM_DESC(enable_sdev_max_qd,
173 	"Enable sdev max qd as can_queue, def=disabled(0)");
174 
175 /*
176  * permit overriding the SCSI command issuing capability of
177  * the driver to bring the drive to READY state
178  */
179 static int issue_scsi_cmd_to_bringup_drive = 1;
180 module_param(issue_scsi_cmd_to_bringup_drive, int, 0444);
181 MODULE_PARM_DESC(issue_scsi_cmd_to_bringup_drive, "allow host driver to\n"
182 	"issue SCSI commands to bring the drive to READY state, default=1 ");
183 
184 static int multipath_on_hba = -1;
185 module_param(multipath_on_hba, int, 0);
186 MODULE_PARM_DESC(multipath_on_hba,
187 	"Multipath support to add same target device\n\t\t"
188 	"as many times as it is visible to HBA from various paths\n\t\t"
189 	"(by default:\n\t\t"
190 	"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
191 	"\t SAS 3.5 HBA - This will be enabled)");
192 
193 static int host_tagset_enable = 1;
194 module_param(host_tagset_enable, int, 0444);
195 MODULE_PARM_DESC(host_tagset_enable,
196 	"Shared host tagset enable/disable Default: enable(1)");
197 
198 static int command_retry_count = 144;
199 module_param(command_retry_count, int, 0444);
200 MODULE_PARM_DESC(command_retry_count, "Device discovery TUR command retry\n"
201 	"count: (default=144)");
202 
203 /* raid transport support */
204 static struct raid_template *mpt3sas_raid_template;
205 static struct raid_template *mpt2sas_raid_template;
206 
207 /**
208  * enum device_responsive_state - responsive state
209  * @DEVICE_READY: device is ready to be added
210  * @DEVICE_RETRY: device can be retried later
211  * @DEVICE_RETRY_UA: retry unit attentions
212  * @DEVICE_START_UNIT: requires start unit
213  * @DEVICE_STOP_UNIT: requires stop unit
214  * @DEVICE_ERROR: device reported some fatal error
215  *
216  */
217 enum device_responsive_state {
218 	DEVICE_READY,
219 	DEVICE_RETRY,
220 	DEVICE_RETRY_UA,
221 	DEVICE_START_UNIT,
222 	DEVICE_STOP_UNIT,
223 	DEVICE_ERROR,
224 };
225 
226 /**
227  * struct sense_info - common structure for obtaining sense keys
228  * @skey: sense key
229  * @asc: additional sense code
230  * @ascq: additional sense code qualifier
231  */
232 struct sense_info {
233 	u8 skey;
234 	u8 asc;
235 	u8 ascq;
236 };
237 
238 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
239 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
240 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
241 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
242 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
243 
244 /*
245  * SAS Log info code for a NCQ collateral abort after an NCQ error:
246  * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
247  * See: drivers/message/fusion/lsi/mpi_log_sas.h
248  */
249 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
250 
251 /**
252  * struct fw_event_work - firmware event struct
253  * @retries: retry count for processing the event
254  * @delayed_work_active: flag indicating if delayed work is active
255  * @delayed_work: delayed work item for deferred event handling
256  * @list: link list framework
257  * @work: work object (ioc->fault_reset_work_q)
258  * @ioc: per adapter object
259  * @device_handle: device handle
260  * @VF_ID: virtual function id
261  * @VP_ID: virtual port id
262  * @ignore: flag meaning this event has been marked to ignore
263  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
264  * @refcount: kref for this event
265  * @event_data: reply event data payload follows
266  *
267  * This object stored on ioc->fw_event_list.
268  */
269 struct fw_event_work {
270 	u8			*retries;
271 	u8                      delayed_work_active;
272 	struct delayed_work     delayed_work;
273 	struct list_head	list;
274 	struct work_struct	work;
275 
276 	struct MPT3SAS_ADAPTER *ioc;
277 	u16			device_handle;
278 	u8			VF_ID;
279 	u8			VP_ID;
280 	u8			ignore;
281 	u16			event;
282 	struct kref		refcount;
283 	char			event_data[] __aligned(4);
284 
285 };
286 
fw_event_work_free(struct kref * r)287 static void fw_event_work_free(struct kref *r)
288 {
289 	struct fw_event_work *fw_work;
290 
291 	fw_work = container_of(r, struct fw_event_work, refcount);
292 	kfree(fw_work->retries);
293 	kfree(fw_work);
294 }
295 
fw_event_work_get(struct fw_event_work * fw_work)296 static void fw_event_work_get(struct fw_event_work *fw_work)
297 {
298 	kref_get(&fw_work->refcount);
299 }
300 
fw_event_work_put(struct fw_event_work * fw_work)301 static void fw_event_work_put(struct fw_event_work *fw_work)
302 {
303 	kref_put(&fw_work->refcount, fw_event_work_free);
304 }
305 
alloc_fw_event_work(int len)306 static struct fw_event_work *alloc_fw_event_work(int len)
307 {
308 	struct fw_event_work *fw_event;
309 
310 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
311 	if (!fw_event)
312 		return NULL;
313 
314 	kref_init(&fw_event->refcount);
315 	return fw_event;
316 }
317 
318 /**
319  * struct _scsi_io_transfer - scsi io transfer
320  * @handle: sas device handle (assigned by firmware)
321  * @is_raid: flag set for hidden raid components
322  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
323  * @data_length: data transfer length
324  * @data_dma: dma pointer to data
325  * @sense: sense data
326  * @lun: lun number
327  * @cdb_length: cdb length
328  * @cdb: cdb contents
329  * @timeout: timeout for this command
330  * @VF_ID: virtual function id
331  * @VP_ID: virtual port id
332  * @valid_reply: flag set for reply message
333  * @sense_length: sense length
334  * @ioc_status: ioc status
335  * @scsi_state: scsi state
336  * @scsi_status: scsi staus
337  * @log_info: log information
338  * @transfer_length: data length transfer when there is a reply message
339  *
340  * Used for sending internal scsi commands to devices within this module.
341  * Refer to _scsi_send_scsi_io().
342  */
343 struct _scsi_io_transfer {
344 	u16	handle;
345 	u8	is_raid;
346 	enum dma_data_direction dir;
347 	u32	data_length;
348 	dma_addr_t data_dma;
349 	u8	sense[SCSI_SENSE_BUFFERSIZE];
350 	u32	lun;
351 	u8	cdb_length;
352 	u8	cdb[32];
353 	u8	timeout;
354 	u8	VF_ID;
355 	u8	VP_ID;
356 	u8	valid_reply;
357   /* the following bits are only valid when 'valid_reply = 1' */
358 	u32	sense_length;
359 	u16	ioc_status;
360 	u8	scsi_state;
361 	u8	scsi_status;
362 	u32	log_info;
363 	u32	transfer_length;
364 };
365 
366 /**
367  * _scsih_set_debug_level - global setting of ioc->logging_level.
368  * @val: value of the parameter to be set
369  * @kp: pointer to kernel_param structure
370  *
371  * Note: The logging levels are defined in mpt3sas_debug.h.
372  */
373 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)374 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
375 {
376 	int ret = param_set_int(val, kp);
377 	struct MPT3SAS_ADAPTER *ioc;
378 
379 	if (ret)
380 		return ret;
381 
382 	pr_info("setting logging_level(0x%08x)\n", logging_level);
383 	spin_lock(&gioc_lock);
384 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
385 		ioc->logging_level = logging_level;
386 	spin_unlock(&gioc_lock);
387 	return 0;
388 }
389 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
390 	&logging_level, 0644);
391 
392 /**
393  * _scsih_srch_boot_sas_address - search based on sas_address
394  * @sas_address: sas address
395  * @boot_device: boot device object from bios page 2
396  *
397  * Return: 1 when there's a match, 0 means no match.
398  */
399 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)400 _scsih_srch_boot_sas_address(u64 sas_address,
401 	Mpi2BootDeviceSasWwid_t *boot_device)
402 {
403 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
404 }
405 
406 /**
407  * _scsih_srch_boot_device_name - search based on device name
408  * @device_name: device name specified in INDENTIFY fram
409  * @boot_device: boot device object from bios page 2
410  *
411  * Return: 1 when there's a match, 0 means no match.
412  */
413 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)414 _scsih_srch_boot_device_name(u64 device_name,
415 	Mpi2BootDeviceDeviceName_t *boot_device)
416 {
417 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
418 }
419 
420 /**
421  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
422  * @enclosure_logical_id: enclosure logical id
423  * @slot_number: slot number
424  * @boot_device: boot device object from bios page 2
425  *
426  * Return: 1 when there's a match, 0 means no match.
427  */
428 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)429 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
430 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
431 {
432 	return (enclosure_logical_id == le64_to_cpu(boot_device->
433 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
434 	    SlotNumber)) ? 1 : 0;
435 }
436 
437 /**
438  * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
439  *			  port number from port list
440  * @ioc: per adapter object
441  * @port_id: port number
442  * @bypass_dirty_port_flag: when set look the matching hba port entry even
443  *			if hba port entry is marked as dirty.
444  *
445  * Search for hba port entry corresponding to provided port number,
446  * if available return port object otherwise return NULL.
447  */
448 struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)449 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
450 	u8 port_id, u8 bypass_dirty_port_flag)
451 {
452 	struct hba_port *port, *port_next;
453 
454 	/*
455 	 * When multipath_on_hba is disabled then
456 	 * search the hba_port entry using default
457 	 * port id i.e. 255
458 	 */
459 	if (!ioc->multipath_on_hba)
460 		port_id = MULTIPATH_DISABLED_PORT_ID;
461 
462 	list_for_each_entry_safe(port, port_next,
463 	    &ioc->port_table_list, list) {
464 		if (port->port_id != port_id)
465 			continue;
466 		if (bypass_dirty_port_flag)
467 			return port;
468 		if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
469 			continue;
470 		return port;
471 	}
472 
473 	/*
474 	 * Allocate hba_port object for default port id (i.e. 255)
475 	 * when multipath_on_hba is disabled for the HBA.
476 	 * And add this object to port_table_list.
477 	 */
478 	if (!ioc->multipath_on_hba) {
479 		port = kzalloc_obj(struct hba_port, GFP_ATOMIC);
480 		if (!port)
481 			return NULL;
482 
483 		port->port_id = port_id;
484 		ioc_info(ioc,
485 		   "hba_port entry: %p, port: %d is added to hba_port list\n",
486 		   port, port->port_id);
487 		list_add_tail(&port->list,
488 		    &ioc->port_table_list);
489 		return port;
490 	}
491 	return NULL;
492 }
493 
494 /**
495  * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
496  * @ioc: per adapter object
497  * @port: hba_port object
498  * @phy: phy number
499  *
500  * Return virtual_phy object corresponding to phy number.
501  */
502 struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)503 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
504 	struct hba_port *port, u32 phy)
505 {
506 	struct virtual_phy *vphy, *vphy_next;
507 
508 	if (!port->vphys_mask)
509 		return NULL;
510 
511 	list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
512 		if (vphy->phy_mask & (1 << phy))
513 			return vphy;
514 	}
515 	return NULL;
516 }
517 
518 /**
519  * _scsih_is_boot_device - search for matching boot device.
520  * @sas_address: sas address
521  * @device_name: device name specified in INDENTIFY fram
522  * @enclosure_logical_id: enclosure logical id
523  * @slot: slot number
524  * @form: specifies boot device form
525  * @boot_device: boot device object from bios page 2
526  *
527  * Return: 1 when there's a match, 0 means no match.
528  */
529 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)530 _scsih_is_boot_device(u64 sas_address, u64 device_name,
531 	u64 enclosure_logical_id, u16 slot, u8 form,
532 	Mpi2BiosPage2BootDevice_t *boot_device)
533 {
534 	int rc = 0;
535 
536 	switch (form) {
537 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
538 		if (!sas_address)
539 			break;
540 		rc = _scsih_srch_boot_sas_address(
541 		    sas_address, &boot_device->SasWwid);
542 		break;
543 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
544 		if (!enclosure_logical_id)
545 			break;
546 		rc = _scsih_srch_boot_encl_slot(
547 		    enclosure_logical_id,
548 		    slot, &boot_device->EnclosureSlot);
549 		break;
550 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
551 		if (!device_name)
552 			break;
553 		rc = _scsih_srch_boot_device_name(
554 		    device_name, &boot_device->DeviceName);
555 		break;
556 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
557 		break;
558 	}
559 
560 	return rc;
561 }
562 
563 /**
564  * _scsih_get_sas_address - set the sas_address for given device handle
565  * @ioc: ?
566  * @handle: device handle
567  * @sas_address: sas address
568  *
569  * Return: 0 success, non-zero when failure
570  */
571 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)572 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
573 	u64 *sas_address)
574 {
575 	Mpi2SasDevicePage0_t sas_device_pg0;
576 	Mpi2ConfigReply_t mpi_reply;
577 	u32 ioc_status;
578 
579 	*sas_address = 0;
580 
581 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
582 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
583 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
584 			__FILE__, __LINE__, __func__);
585 		return -ENXIO;
586 	}
587 
588 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
589 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
590 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
591 		 * vSES's sas address.
592 		 */
593 		if ((handle <= ioc->sas_hba.num_phys) &&
594 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
595 		   MPI2_SAS_DEVICE_INFO_SEP)))
596 			*sas_address = ioc->sas_hba.sas_address;
597 		else
598 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
599 		return 0;
600 	}
601 
602 	/* we hit this because the given parent handle doesn't exist */
603 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
604 		return -ENXIO;
605 
606 	/* else error case */
607 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
608 		handle, ioc_status, __FILE__, __LINE__, __func__);
609 	return -EIO;
610 }
611 
612 /**
613  * _scsih_determine_boot_device - determine boot device.
614  * @ioc: per adapter object
615  * @device: sas_device or pcie_device object
616  * @channel: SAS or PCIe channel
617  *
618  * Determines whether this device should be first reported device to
619  * to scsi-ml or sas transport, this purpose is for persistent boot device.
620  * There are primary, alternate, and current entries in bios page 2. The order
621  * priority is primary, alternate, then current.  This routine saves
622  * the corresponding device object.
623  * The saved data to be used later in _scsih_probe_boot_devices().
624  */
625 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)626 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
627 	u32 channel)
628 {
629 	struct _sas_device *sas_device;
630 	struct _pcie_device *pcie_device;
631 	struct _raid_device *raid_device;
632 	u64 sas_address;
633 	u64 device_name;
634 	u64 enclosure_logical_id;
635 	u16 slot;
636 
637 	 /* only process this function when driver loads */
638 	if (!ioc->is_driver_loading)
639 		return;
640 
641 	 /* no Bios, return immediately */
642 	if (!ioc->bios_pg3.BiosVersion)
643 		return;
644 
645 	if (channel == RAID_CHANNEL) {
646 		raid_device = device;
647 		sas_address = raid_device->wwid;
648 		device_name = 0;
649 		enclosure_logical_id = 0;
650 		slot = 0;
651 	} else if (channel == PCIE_CHANNEL) {
652 		pcie_device = device;
653 		sas_address = pcie_device->wwid;
654 		device_name = 0;
655 		enclosure_logical_id = 0;
656 		slot = 0;
657 	} else {
658 		sas_device = device;
659 		sas_address = sas_device->sas_address;
660 		device_name = sas_device->device_name;
661 		enclosure_logical_id = sas_device->enclosure_logical_id;
662 		slot = sas_device->slot;
663 	}
664 
665 	if (!ioc->req_boot_device.device) {
666 		if (_scsih_is_boot_device(sas_address, device_name,
667 		    enclosure_logical_id, slot,
668 		    (ioc->bios_pg2.ReqBootDeviceForm &
669 		    MPI2_BIOSPAGE2_FORM_MASK),
670 		    &ioc->bios_pg2.RequestedBootDevice)) {
671 			dinitprintk(ioc,
672 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
673 					     __func__, (u64)sas_address));
674 			ioc->req_boot_device.device = device;
675 			ioc->req_boot_device.channel = channel;
676 		}
677 	}
678 
679 	if (!ioc->req_alt_boot_device.device) {
680 		if (_scsih_is_boot_device(sas_address, device_name,
681 		    enclosure_logical_id, slot,
682 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
683 		    MPI2_BIOSPAGE2_FORM_MASK),
684 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
685 			dinitprintk(ioc,
686 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
687 					     __func__, (u64)sas_address));
688 			ioc->req_alt_boot_device.device = device;
689 			ioc->req_alt_boot_device.channel = channel;
690 		}
691 	}
692 
693 	if (!ioc->current_boot_device.device) {
694 		if (_scsih_is_boot_device(sas_address, device_name,
695 		    enclosure_logical_id, slot,
696 		    (ioc->bios_pg2.CurrentBootDeviceForm &
697 		    MPI2_BIOSPAGE2_FORM_MASK),
698 		    &ioc->bios_pg2.CurrentBootDevice)) {
699 			dinitprintk(ioc,
700 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
701 					     __func__, (u64)sas_address));
702 			ioc->current_boot_device.device = device;
703 			ioc->current_boot_device.channel = channel;
704 		}
705 	}
706 }
707 
708 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)709 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
710 		struct MPT3SAS_TARGET *tgt_priv)
711 {
712 	struct _sas_device *ret;
713 
714 	assert_spin_locked(&ioc->sas_device_lock);
715 
716 	ret = tgt_priv->sas_dev;
717 	if (ret)
718 		sas_device_get(ret);
719 
720 	return ret;
721 }
722 
723 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)724 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
725 		struct MPT3SAS_TARGET *tgt_priv)
726 {
727 	struct _sas_device *ret;
728 	unsigned long flags;
729 
730 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
731 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
732 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
733 
734 	return ret;
735 }
736 
737 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)738 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
739 	struct MPT3SAS_TARGET *tgt_priv)
740 {
741 	struct _pcie_device *ret;
742 
743 	assert_spin_locked(&ioc->pcie_device_lock);
744 
745 	ret = tgt_priv->pcie_dev;
746 	if (ret)
747 		pcie_device_get(ret);
748 
749 	return ret;
750 }
751 
752 /**
753  * mpt3sas_get_pdev_from_target - pcie device search
754  * @ioc: per adapter object
755  * @tgt_priv: starget private object
756  *
757  * Context: This function will acquire ioc->pcie_device_lock and will release
758  * before returning the pcie_device object.
759  *
760  * This searches for pcie_device from target, then return pcie_device object.
761  */
762 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)763 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
764 	struct MPT3SAS_TARGET *tgt_priv)
765 {
766 	struct _pcie_device *ret;
767 	unsigned long flags;
768 
769 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
770 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
771 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
772 
773 	return ret;
774 }
775 
776 
777 /**
778  * __mpt3sas_get_sdev_by_rphy - sas device search
779  * @ioc: per adapter object
780  * @rphy: sas_rphy pointer
781  *
782  * Context: This function will acquire ioc->sas_device_lock and will release
783  * before returning the sas_device object.
784  *
785  * This searches for sas_device from rphy object
786  * then return sas_device object.
787  */
788 struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)789 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
790 	struct sas_rphy *rphy)
791 {
792 	struct _sas_device *sas_device;
793 
794 	assert_spin_locked(&ioc->sas_device_lock);
795 
796 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
797 		if (sas_device->rphy != rphy)
798 			continue;
799 		sas_device_get(sas_device);
800 		return sas_device;
801 	}
802 
803 	sas_device = NULL;
804 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
805 		if (sas_device->rphy != rphy)
806 			continue;
807 		sas_device_get(sas_device);
808 		return sas_device;
809 	}
810 
811 	return NULL;
812 }
813 
814 /**
815  * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
816  *				sas address from sas_device_list list
817  * @ioc: per adapter object
818  * @sas_address: device sas address
819  * @port: port number
820  *
821  * Search for _sas_device object corresponding to provided sas address,
822  * if available return _sas_device object address otherwise return NULL.
823  */
824 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)825 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
826 	u64 sas_address, struct hba_port *port)
827 {
828 	struct _sas_device *sas_device;
829 
830 	if (!port)
831 		return NULL;
832 
833 	assert_spin_locked(&ioc->sas_device_lock);
834 
835 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
836 		if (sas_device->sas_address != sas_address)
837 			continue;
838 		if (sas_device->port != port)
839 			continue;
840 		sas_device_get(sas_device);
841 		return sas_device;
842 	}
843 
844 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
845 		if (sas_device->sas_address != sas_address)
846 			continue;
847 		if (sas_device->port != port)
848 			continue;
849 		sas_device_get(sas_device);
850 		return sas_device;
851 	}
852 
853 	return NULL;
854 }
855 
856 /**
857  * mpt3sas_get_sdev_by_addr - sas device search
858  * @ioc: per adapter object
859  * @sas_address: sas address
860  * @port: hba port entry
861  * Context: Calling function should acquire ioc->sas_device_lock
862  *
863  * This searches for sas_device based on sas_address & port number,
864  * then return sas_device object.
865  */
866 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)867 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
868 	u64 sas_address, struct hba_port *port)
869 {
870 	struct _sas_device *sas_device;
871 	unsigned long flags;
872 
873 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
874 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
875 	    sas_address, port);
876 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
877 
878 	return sas_device;
879 }
880 
881 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)882 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
883 {
884 	struct _sas_device *sas_device;
885 
886 	assert_spin_locked(&ioc->sas_device_lock);
887 
888 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
889 		if (sas_device->handle == handle)
890 			goto found_device;
891 
892 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
893 		if (sas_device->handle == handle)
894 			goto found_device;
895 
896 	return NULL;
897 
898 found_device:
899 	sas_device_get(sas_device);
900 	return sas_device;
901 }
902 
903 /**
904  * mpt3sas_get_sdev_by_handle - sas device search
905  * @ioc: per adapter object
906  * @handle: sas device handle (assigned by firmware)
907  * Context: Calling function should acquire ioc->sas_device_lock
908  *
909  * This searches for sas_device based on sas_address, then return sas_device
910  * object.
911  */
912 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)913 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
914 {
915 	struct _sas_device *sas_device;
916 	unsigned long flags;
917 
918 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
919 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
920 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
921 
922 	return sas_device;
923 }
924 
925 /**
926  * _scsih_display_enclosure_chassis_info - display device location info
927  * @ioc: per adapter object
928  * @sas_device: per sas device object
929  * @sdev: scsi device struct
930  * @starget: scsi target struct
931  */
932 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)933 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
934 	struct _sas_device *sas_device, struct scsi_device *sdev,
935 	struct scsi_target *starget)
936 {
937 	if (sdev) {
938 		if (sas_device->enclosure_handle != 0)
939 			sdev_printk(KERN_INFO, sdev,
940 			    "enclosure logical id (0x%016llx), slot(%d) \n",
941 			    (unsigned long long)
942 			    sas_device->enclosure_logical_id,
943 			    sas_device->slot);
944 		if (sas_device->connector_name[0] != '\0')
945 			sdev_printk(KERN_INFO, sdev,
946 			    "enclosure level(0x%04x), connector name( %s)\n",
947 			    sas_device->enclosure_level,
948 			    sas_device->connector_name);
949 		if (sas_device->is_chassis_slot_valid)
950 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
951 			    sas_device->chassis_slot);
952 	} else if (starget) {
953 		if (sas_device->enclosure_handle != 0)
954 			starget_printk(KERN_INFO, starget,
955 			    "enclosure logical id(0x%016llx), slot(%d) \n",
956 			    (unsigned long long)
957 			    sas_device->enclosure_logical_id,
958 			    sas_device->slot);
959 		if (sas_device->connector_name[0] != '\0')
960 			starget_printk(KERN_INFO, starget,
961 			    "enclosure level(0x%04x), connector name( %s)\n",
962 			    sas_device->enclosure_level,
963 			    sas_device->connector_name);
964 		if (sas_device->is_chassis_slot_valid)
965 			starget_printk(KERN_INFO, starget,
966 			    "chassis slot(0x%04x)\n",
967 			    sas_device->chassis_slot);
968 	} else {
969 		if (sas_device->enclosure_handle != 0)
970 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
971 				 (u64)sas_device->enclosure_logical_id,
972 				 sas_device->slot);
973 		if (sas_device->connector_name[0] != '\0')
974 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
975 				 sas_device->enclosure_level,
976 				 sas_device->connector_name);
977 		if (sas_device->is_chassis_slot_valid)
978 			ioc_info(ioc, "chassis slot(0x%04x)\n",
979 				 sas_device->chassis_slot);
980 	}
981 }
982 
983 /**
984  * _scsih_sas_device_remove - remove sas_device from list.
985  * @ioc: per adapter object
986  * @sas_device: the sas_device object
987  * Context: This function will acquire ioc->sas_device_lock.
988  *
989  * If sas_device is on the list, remove it and decrement its reference count.
990  */
991 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)992 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
993 	struct _sas_device *sas_device)
994 {
995 	unsigned long flags;
996 
997 	if (!sas_device)
998 		return;
999 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
1000 		 sas_device->handle, (u64)sas_device->sas_address);
1001 
1002 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
1003 
1004 	/*
1005 	 * The lock serializes access to the list, but we still need to verify
1006 	 * that nobody removed the entry while we were waiting on the lock.
1007 	 */
1008 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1009 	if (!list_empty(&sas_device->list)) {
1010 		list_del_init(&sas_device->list);
1011 		sas_device_put(sas_device);
1012 	}
1013 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1014 
1015 }
1016 
1017 /**
1018  * _scsih_device_remove_by_handle - removing device object by handle
1019  * @ioc: per adapter object
1020  * @handle: device handle
1021  */
1022 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1023 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1024 {
1025 	struct _sas_device *sas_device;
1026 	unsigned long flags;
1027 
1028 	if (ioc->shost_recovery)
1029 		return;
1030 
1031 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1032 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
1033 	if (sas_device) {
1034 		list_del_init(&sas_device->list);
1035 		sas_device_put(sas_device);
1036 	}
1037 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1038 	if (sas_device) {
1039 		_scsih_remove_device(ioc, sas_device);
1040 		sas_device_put(sas_device);
1041 	}
1042 }
1043 
1044 /**
1045  * mpt3sas_device_remove_by_sas_address - removing device object by
1046  *					sas address & port number
1047  * @ioc: per adapter object
1048  * @sas_address: device sas_address
1049  * @port: hba port entry
1050  *
1051  * Return nothing.
1052  */
1053 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1054 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1055 	u64 sas_address, struct hba_port *port)
1056 {
1057 	struct _sas_device *sas_device;
1058 	unsigned long flags;
1059 
1060 	if (ioc->shost_recovery)
1061 		return;
1062 
1063 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1064 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1065 	if (sas_device) {
1066 		list_del_init(&sas_device->list);
1067 		sas_device_put(sas_device);
1068 	}
1069 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1070 	if (sas_device) {
1071 		_scsih_remove_device(ioc, sas_device);
1072 		sas_device_put(sas_device);
1073 	}
1074 }
1075 
1076 /**
1077  * _scsih_sas_device_add - insert sas_device to the list.
1078  * @ioc: per adapter object
1079  * @sas_device: the sas_device object
1080  * Context: This function will acquire ioc->sas_device_lock.
1081  *
1082  * Adding new object to the ioc->sas_device_list.
1083  */
1084 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1085 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1086 	struct _sas_device *sas_device)
1087 {
1088 	unsigned long flags;
1089 
1090 	dewtprintk(ioc,
1091 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1092 			    __func__, sas_device->handle,
1093 			    (u64)sas_device->sas_address));
1094 
1095 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1096 	    NULL, NULL));
1097 
1098 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1099 	sas_device_get(sas_device);
1100 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
1101 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1102 
1103 	if (ioc->hide_drives) {
1104 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1105 		return;
1106 	}
1107 
1108 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1109 	     sas_device->sas_address_parent, sas_device->port)) {
1110 		_scsih_sas_device_remove(ioc, sas_device);
1111 	} else if (!sas_device->starget) {
1112 		/*
1113 		 * When asyn scanning is enabled, its not possible to remove
1114 		 * devices while scanning is turned on due to an oops in
1115 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1116 		 */
1117 		if (!ioc->is_driver_loading) {
1118 			mpt3sas_transport_port_remove(ioc,
1119 			    sas_device->sas_address,
1120 			    sas_device->sas_address_parent,
1121 			    sas_device->port);
1122 			_scsih_sas_device_remove(ioc, sas_device);
1123 		}
1124 	} else
1125 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1126 }
1127 
1128 /**
1129  * _scsih_sas_device_init_add - insert sas_device to the list.
1130  * @ioc: per adapter object
1131  * @sas_device: the sas_device object
1132  * Context: This function will acquire ioc->sas_device_lock.
1133  *
1134  * Adding new object at driver load time to the ioc->sas_device_init_list.
1135  */
1136 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1137 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1138 	struct _sas_device *sas_device)
1139 {
1140 	unsigned long flags;
1141 
1142 	dewtprintk(ioc,
1143 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1144 			    __func__, sas_device->handle,
1145 			    (u64)sas_device->sas_address));
1146 
1147 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1148 	    NULL, NULL));
1149 
1150 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1151 	sas_device_get(sas_device);
1152 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1153 	_scsih_determine_boot_device(ioc, sas_device, 0);
1154 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1155 }
1156 
1157 
1158 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1159 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1160 {
1161 	struct _pcie_device *pcie_device;
1162 
1163 	assert_spin_locked(&ioc->pcie_device_lock);
1164 
1165 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1166 		if (pcie_device->wwid == wwid)
1167 			goto found_device;
1168 
1169 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1170 		if (pcie_device->wwid == wwid)
1171 			goto found_device;
1172 
1173 	return NULL;
1174 
1175 found_device:
1176 	pcie_device_get(pcie_device);
1177 	return pcie_device;
1178 }
1179 
1180 
1181 /**
1182  * mpt3sas_get_pdev_by_wwid - pcie device search
1183  * @ioc: per adapter object
1184  * @wwid: wwid
1185  *
1186  * Context: This function will acquire ioc->pcie_device_lock and will release
1187  * before returning the pcie_device object.
1188  *
1189  * This searches for pcie_device based on wwid, then return pcie_device object.
1190  */
1191 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1192 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1193 {
1194 	struct _pcie_device *pcie_device;
1195 	unsigned long flags;
1196 
1197 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1198 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1199 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1200 
1201 	return pcie_device;
1202 }
1203 
1204 
1205 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1206 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1207 	int channel)
1208 {
1209 	struct _pcie_device *pcie_device;
1210 
1211 	assert_spin_locked(&ioc->pcie_device_lock);
1212 
1213 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1214 		if (pcie_device->id == id && pcie_device->channel == channel)
1215 			goto found_device;
1216 
1217 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1218 		if (pcie_device->id == id && pcie_device->channel == channel)
1219 			goto found_device;
1220 
1221 	return NULL;
1222 
1223 found_device:
1224 	pcie_device_get(pcie_device);
1225 	return pcie_device;
1226 }
1227 
1228 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1229 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1230 {
1231 	struct _pcie_device *pcie_device;
1232 
1233 	assert_spin_locked(&ioc->pcie_device_lock);
1234 
1235 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1236 		if (pcie_device->handle == handle)
1237 			goto found_device;
1238 
1239 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1240 		if (pcie_device->handle == handle)
1241 			goto found_device;
1242 
1243 	return NULL;
1244 
1245 found_device:
1246 	pcie_device_get(pcie_device);
1247 	return pcie_device;
1248 }
1249 
1250 
1251 /**
1252  * mpt3sas_get_pdev_by_handle - pcie device search
1253  * @ioc: per adapter object
1254  * @handle: Firmware device handle
1255  *
1256  * Context: This function will acquire ioc->pcie_device_lock and will release
1257  * before returning the pcie_device object.
1258  *
1259  * This searches for pcie_device based on handle, then return pcie_device
1260  * object.
1261  */
1262 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1263 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1264 {
1265 	struct _pcie_device *pcie_device;
1266 	unsigned long flags;
1267 
1268 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1270 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1271 
1272 	return pcie_device;
1273 }
1274 
1275 /**
1276  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1277  * @ioc: per adapter object
1278  * Context: This function will acquire ioc->pcie_device_lock
1279  *
1280  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1281  * which has reported maximum among all available NVMe drives.
1282  * Minimum max_shutdown_latency will be six seconds.
1283  */
1284 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1285 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1286 {
1287 	struct _pcie_device *pcie_device;
1288 	unsigned long flags;
1289 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1290 
1291 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1292 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1293 		if (pcie_device->shutdown_latency) {
1294 			if (shutdown_latency < pcie_device->shutdown_latency)
1295 				shutdown_latency =
1296 					pcie_device->shutdown_latency;
1297 		}
1298 	}
1299 	ioc->max_shutdown_latency = shutdown_latency;
1300 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1301 }
1302 
1303 /**
1304  * _scsih_pcie_device_remove - remove pcie_device from list.
1305  * @ioc: per adapter object
1306  * @pcie_device: the pcie_device object
1307  * Context: This function will acquire ioc->pcie_device_lock.
1308  *
1309  * If pcie_device is on the list, remove it and decrement its reference count.
1310  */
1311 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1312 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1313 	struct _pcie_device *pcie_device)
1314 {
1315 	unsigned long flags;
1316 	int was_on_pcie_device_list = 0;
1317 	u8 update_latency = 0;
1318 
1319 	if (!pcie_device)
1320 		return;
1321 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1322 		 pcie_device->handle, (u64)pcie_device->wwid);
1323 	if (pcie_device->enclosure_handle != 0)
1324 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1325 			 (u64)pcie_device->enclosure_logical_id,
1326 			 pcie_device->slot);
1327 	if (pcie_device->connector_name[0] != '\0')
1328 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1329 			 pcie_device->enclosure_level,
1330 			 pcie_device->connector_name);
1331 
1332 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1333 	if (!list_empty(&pcie_device->list)) {
1334 		list_del_init(&pcie_device->list);
1335 		was_on_pcie_device_list = 1;
1336 	}
1337 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1338 		update_latency = 1;
1339 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1340 	if (was_on_pcie_device_list) {
1341 		kfree(pcie_device->serial_number);
1342 		pcie_device_put(pcie_device);
1343 	}
1344 
1345 	/*
1346 	 * This device's RTD3 Entry Latency matches IOC's
1347 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1348 	 * from the available drives as current drive is getting removed.
1349 	 */
1350 	if (update_latency)
1351 		_scsih_set_nvme_max_shutdown_latency(ioc);
1352 }
1353 
1354 
1355 /**
1356  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1357  * @ioc: per adapter object
1358  * @handle: device handle
1359  */
1360 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1361 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1362 {
1363 	struct _pcie_device *pcie_device;
1364 	unsigned long flags;
1365 	int was_on_pcie_device_list = 0;
1366 	u8 update_latency = 0;
1367 
1368 	if (ioc->shost_recovery)
1369 		return;
1370 
1371 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1372 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1373 	if (pcie_device) {
1374 		if (!list_empty(&pcie_device->list)) {
1375 			list_del_init(&pcie_device->list);
1376 			was_on_pcie_device_list = 1;
1377 			pcie_device_put(pcie_device);
1378 		}
1379 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1380 			update_latency = 1;
1381 	}
1382 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1383 	if (was_on_pcie_device_list) {
1384 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1385 		pcie_device_put(pcie_device);
1386 	}
1387 
1388 	/*
1389 	 * This device's RTD3 Entry Latency matches IOC's
1390 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1391 	 * from the available drives as current drive is getting removed.
1392 	 */
1393 	if (update_latency)
1394 		_scsih_set_nvme_max_shutdown_latency(ioc);
1395 }
1396 
1397 /**
1398  * _scsih_pcie_device_add - add pcie_device object
1399  * @ioc: per adapter object
1400  * @pcie_device: pcie_device object
1401  *
1402  * This is added to the pcie_device_list link list.
1403  */
1404 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1405 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1406 	struct _pcie_device *pcie_device)
1407 {
1408 	unsigned long flags;
1409 
1410 	dewtprintk(ioc,
1411 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1412 			    __func__,
1413 			    pcie_device->handle, (u64)pcie_device->wwid));
1414 	if (pcie_device->enclosure_handle != 0)
1415 		dewtprintk(ioc,
1416 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1417 				    __func__,
1418 				    (u64)pcie_device->enclosure_logical_id,
1419 				    pcie_device->slot));
1420 	if (pcie_device->connector_name[0] != '\0')
1421 		dewtprintk(ioc,
1422 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1423 				    __func__, pcie_device->enclosure_level,
1424 				    pcie_device->connector_name));
1425 
1426 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1427 	pcie_device_get(pcie_device);
1428 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1429 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1430 
1431 	if (pcie_device->access_status ==
1432 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1433 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1434 		return;
1435 	}
1436 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1437 		_scsih_pcie_device_remove(ioc, pcie_device);
1438 	} else if (!pcie_device->starget) {
1439 		if (!ioc->is_driver_loading) {
1440 /*TODO-- Need to find out whether this condition will occur or not*/
1441 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1442 		}
1443 	} else
1444 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1445 }
1446 
1447 /*
1448  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1449  * @ioc: per adapter object
1450  * @pcie_device: the pcie_device object
1451  * Context: This function will acquire ioc->pcie_device_lock.
1452  *
1453  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1454  */
1455 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1456 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1457 				struct _pcie_device *pcie_device)
1458 {
1459 	unsigned long flags;
1460 
1461 	dewtprintk(ioc,
1462 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1463 			    __func__,
1464 			    pcie_device->handle, (u64)pcie_device->wwid));
1465 	if (pcie_device->enclosure_handle != 0)
1466 		dewtprintk(ioc,
1467 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1468 				    __func__,
1469 				    (u64)pcie_device->enclosure_logical_id,
1470 				    pcie_device->slot));
1471 	if (pcie_device->connector_name[0] != '\0')
1472 		dewtprintk(ioc,
1473 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1474 				    __func__, pcie_device->enclosure_level,
1475 				    pcie_device->connector_name));
1476 
1477 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1478 	pcie_device_get(pcie_device);
1479 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1480 	if (pcie_device->access_status !=
1481 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1482 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1483 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1484 }
1485 /**
1486  * _scsih_raid_device_find_by_id - raid device search
1487  * @ioc: per adapter object
1488  * @id: sas device target id
1489  * @channel: sas device channel
1490  * Context: Calling function should acquire ioc->raid_device_lock
1491  *
1492  * This searches for raid_device based on target id, then return raid_device
1493  * object.
1494  */
1495 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1496 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1497 {
1498 	struct _raid_device *raid_device, *r;
1499 
1500 	r = NULL;
1501 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1502 		if (raid_device->id == id && raid_device->channel == channel) {
1503 			r = raid_device;
1504 			goto out;
1505 		}
1506 	}
1507 
1508  out:
1509 	return r;
1510 }
1511 
1512 /**
1513  * mpt3sas_raid_device_find_by_handle - raid device search
1514  * @ioc: per adapter object
1515  * @handle: sas device handle (assigned by firmware)
1516  * Context: Calling function should acquire ioc->raid_device_lock
1517  *
1518  * This searches for raid_device based on handle, then return raid_device
1519  * object.
1520  */
1521 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1522 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1523 {
1524 	struct _raid_device *raid_device, *r;
1525 
1526 	r = NULL;
1527 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1528 		if (raid_device->handle != handle)
1529 			continue;
1530 		r = raid_device;
1531 		goto out;
1532 	}
1533 
1534  out:
1535 	return r;
1536 }
1537 
1538 /**
1539  * _scsih_raid_device_find_by_wwid - raid device search
1540  * @ioc: per adapter object
1541  * @wwid: ?
1542  * Context: Calling function should acquire ioc->raid_device_lock
1543  *
1544  * This searches for raid_device based on wwid, then return raid_device
1545  * object.
1546  */
1547 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1548 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1549 {
1550 	struct _raid_device *raid_device, *r;
1551 
1552 	r = NULL;
1553 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1554 		if (raid_device->wwid != wwid)
1555 			continue;
1556 		r = raid_device;
1557 		goto out;
1558 	}
1559 
1560  out:
1561 	return r;
1562 }
1563 
1564 /**
1565  * _scsih_raid_device_add - add raid_device object
1566  * @ioc: per adapter object
1567  * @raid_device: raid_device object
1568  *
1569  * This is added to the raid_device_list link list.
1570  */
1571 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1572 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1573 	struct _raid_device *raid_device)
1574 {
1575 	unsigned long flags;
1576 
1577 	dewtprintk(ioc,
1578 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1579 			    __func__,
1580 			    raid_device->handle, (u64)raid_device->wwid));
1581 
1582 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1583 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1584 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1585 }
1586 
1587 /**
1588  * _scsih_raid_device_remove - delete raid_device object
1589  * @ioc: per adapter object
1590  * @raid_device: raid_device object
1591  *
1592  */
1593 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1594 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1595 	struct _raid_device *raid_device)
1596 {
1597 	unsigned long flags;
1598 
1599 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1600 	list_del(&raid_device->list);
1601 	kfree(raid_device);
1602 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1603 }
1604 
1605 /**
1606  * mpt3sas_scsih_expander_find_by_handle - expander device search
1607  * @ioc: per adapter object
1608  * @handle: expander handle (assigned by firmware)
1609  * Context: Calling function should acquire ioc->sas_device_lock
1610  *
1611  * This searches for expander device based on handle, then returns the
1612  * sas_node object.
1613  */
1614 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1615 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1616 {
1617 	struct _sas_node *sas_expander, *r;
1618 
1619 	r = NULL;
1620 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1621 		if (sas_expander->handle != handle)
1622 			continue;
1623 		r = sas_expander;
1624 		goto out;
1625 	}
1626  out:
1627 	return r;
1628 }
1629 
1630 /**
1631  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1632  * @ioc: per adapter object
1633  * @handle: enclosure handle (assigned by firmware)
1634  * Context: Calling function should acquire ioc->sas_device_lock
1635  *
1636  * This searches for enclosure device based on handle, then returns the
1637  * enclosure object.
1638  */
1639 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1640 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1641 {
1642 	struct _enclosure_node *enclosure_dev, *r;
1643 
1644 	r = NULL;
1645 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1646 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1647 			continue;
1648 		r = enclosure_dev;
1649 		goto out;
1650 	}
1651 out:
1652 	return r;
1653 }
1654 /**
1655  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1656  * @ioc: per adapter object
1657  * @sas_address: sas address
1658  * @port: hba port entry
1659  * Context: Calling function should acquire ioc->sas_node_lock.
1660  *
1661  * This searches for expander device based on sas_address & port number,
1662  * then returns the sas_node object.
1663  */
1664 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1665 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1666 	u64 sas_address, struct hba_port *port)
1667 {
1668 	struct _sas_node *sas_expander, *r = NULL;
1669 
1670 	if (!port)
1671 		return r;
1672 
1673 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1674 		if (sas_expander->sas_address != sas_address)
1675 			continue;
1676 		if (sas_expander->port != port)
1677 			continue;
1678 		r = sas_expander;
1679 		goto out;
1680 	}
1681  out:
1682 	return r;
1683 }
1684 
1685 /**
1686  * _scsih_expander_node_add - insert expander device to the list.
1687  * @ioc: per adapter object
1688  * @sas_expander: the sas_device object
1689  * Context: This function will acquire ioc->sas_node_lock.
1690  *
1691  * Adding new object to the ioc->sas_expander_list.
1692  */
1693 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1694 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1695 	struct _sas_node *sas_expander)
1696 {
1697 	unsigned long flags;
1698 
1699 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1700 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1701 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1702 }
1703 
1704 /**
1705  * _scsih_is_end_device - determines if device is an end device
1706  * @device_info: bitfield providing information about the device.
1707  * Context: none
1708  *
1709  * Return: 1 if end device.
1710  */
1711 static int
_scsih_is_end_device(u32 device_info)1712 _scsih_is_end_device(u32 device_info)
1713 {
1714 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1715 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1716 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1717 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1718 		return 1;
1719 	else
1720 		return 0;
1721 }
1722 
1723 /**
1724  * _scsih_is_nvme_pciescsi_device - determines if
1725  *			device is an pcie nvme/scsi device
1726  * @device_info: bitfield providing information about the device.
1727  * Context: none
1728  *
1729  * Returns 1 if device is pcie device type nvme/scsi.
1730  */
1731 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1732 _scsih_is_nvme_pciescsi_device(u32 device_info)
1733 {
1734 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1735 	    == MPI26_PCIE_DEVINFO_NVME) ||
1736 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1737 	    == MPI26_PCIE_DEVINFO_SCSI))
1738 		return 1;
1739 	else
1740 		return 0;
1741 }
1742 
1743 /**
1744  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1745  * @ioc: per adapter object
1746  * @id: target id
1747  * @channel: channel
1748  * Context: This function will acquire ioc->scsi_lookup_lock.
1749  *
1750  * This will search for a matching channel:id in the scsi_lookup array,
1751  * returning 1 if found.
1752  */
1753 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1754 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1755 	int channel)
1756 {
1757 	int smid;
1758 	struct scsi_cmnd *scmd;
1759 
1760 	for (smid = 1;
1761 	     smid <= ioc->shost->can_queue; smid++) {
1762 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1763 		if (!scmd)
1764 			continue;
1765 		if (scmd->device->id == id &&
1766 		    scmd->device->channel == channel)
1767 			return 1;
1768 	}
1769 	return 0;
1770 }
1771 
1772 /**
1773  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1774  * @ioc: per adapter object
1775  * @id: target id
1776  * @lun: lun number
1777  * @channel: channel
1778  * Context: This function will acquire ioc->scsi_lookup_lock.
1779  *
1780  * This will search for a matching channel:id:lun in the scsi_lookup array,
1781  * returning 1 if found.
1782  */
1783 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1784 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1785 	unsigned int lun, int channel)
1786 {
1787 	int smid;
1788 	struct scsi_cmnd *scmd;
1789 
1790 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1791 
1792 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1793 		if (!scmd)
1794 			continue;
1795 		if (scmd->device->id == id &&
1796 		    scmd->device->channel == channel &&
1797 		    scmd->device->lun == lun)
1798 			return 1;
1799 	}
1800 	return 0;
1801 }
1802 
1803 /**
1804  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1805  * @ioc: per adapter object
1806  * @smid: system request message index
1807  *
1808  * Return: the smid stored scmd pointer.
1809  * Then will dereference the stored scmd pointer.
1810  */
1811 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1812 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1813 {
1814 	struct scsi_cmnd *scmd = NULL;
1815 	struct scsiio_tracker *st;
1816 	Mpi25SCSIIORequest_t *mpi_request;
1817 	u16 tag = smid - 1;
1818 
1819 	if (smid > 0  &&
1820 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1821 		u32 unique_tag =
1822 		    ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1823 
1824 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1825 
1826 		/*
1827 		 * If SCSI IO request is outstanding at driver level then
1828 		 * DevHandle filed must be non-zero. If DevHandle is zero
1829 		 * then it means that this smid is free at driver level,
1830 		 * so return NULL.
1831 		 */
1832 		if (!mpi_request->DevHandle)
1833 			return scmd;
1834 
1835 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1836 		if (scmd) {
1837 			st = scsi_cmd_priv(scmd);
1838 			if (st->cb_idx == 0xFF || st->smid == 0)
1839 				scmd = NULL;
1840 		}
1841 	}
1842 	return scmd;
1843 }
1844 
1845 /**
1846  * scsih_change_queue_depth - setting device queue depth
1847  * @sdev: scsi device struct
1848  * @qdepth: requested queue depth
1849  *
1850  * Return: queue depth.
1851  */
1852 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1853 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1854 {
1855 	struct Scsi_Host *shost = sdev->host;
1856 	int max_depth;
1857 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1858 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1859 	struct MPT3SAS_TARGET *sas_target_priv_data;
1860 	struct _sas_device *sas_device;
1861 	unsigned long flags;
1862 
1863 	max_depth = shost->can_queue;
1864 
1865 	/*
1866 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1867 	 * is disabled.
1868 	 */
1869 	if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1870 		goto not_sata;
1871 
1872 	sas_device_priv_data = sdev->hostdata;
1873 	if (!sas_device_priv_data)
1874 		goto not_sata;
1875 	sas_target_priv_data = sas_device_priv_data->sas_target;
1876 	if (!sas_target_priv_data)
1877 		goto not_sata;
1878 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1879 		goto not_sata;
1880 
1881 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1882 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1883 	if (sas_device) {
1884 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1885 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1886 
1887 		sas_device_put(sas_device);
1888 	}
1889 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1890 
1891  not_sata:
1892 
1893 	if (!sdev->tagged_supported)
1894 		max_depth = 1;
1895 	if (qdepth > max_depth)
1896 		qdepth = max_depth;
1897 	scsi_change_queue_depth(sdev, qdepth);
1898 	sdev_printk(KERN_INFO, sdev,
1899 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1900 	    sdev->queue_depth, sdev->tagged_supported,
1901 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1902 	return sdev->queue_depth;
1903 }
1904 
1905 /**
1906  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1907  * @sdev: scsi device struct
1908  * @qdepth: requested queue depth
1909  *
1910  * Returns nothing.
1911  */
1912 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1913 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1914 {
1915 	struct Scsi_Host *shost = sdev->host;
1916 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1917 
1918 	if (ioc->enable_sdev_max_qd)
1919 		qdepth = shost->can_queue;
1920 
1921 	scsih_change_queue_depth(sdev, qdepth);
1922 }
1923 
1924 /**
1925  * scsih_target_alloc - target add routine
1926  * @starget: scsi target struct
1927  *
1928  * Return: 0 if ok. Any other return is assumed to be an error and
1929  * the device is ignored.
1930  */
1931 static int
scsih_target_alloc(struct scsi_target * starget)1932 scsih_target_alloc(struct scsi_target *starget)
1933 {
1934 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1935 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1936 	struct MPT3SAS_TARGET *sas_target_priv_data;
1937 	struct _sas_device *sas_device;
1938 	struct _raid_device *raid_device;
1939 	struct _pcie_device *pcie_device;
1940 	unsigned long flags;
1941 	struct sas_rphy *rphy;
1942 
1943 	sas_target_priv_data = kzalloc_obj(*sas_target_priv_data);
1944 	if (!sas_target_priv_data)
1945 		return -ENOMEM;
1946 
1947 	starget->hostdata = sas_target_priv_data;
1948 	sas_target_priv_data->starget = starget;
1949 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1950 
1951 	/* RAID volumes */
1952 	if (starget->channel == RAID_CHANNEL) {
1953 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1954 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1955 		    starget->channel);
1956 		if (raid_device) {
1957 			sas_target_priv_data->handle = raid_device->handle;
1958 			sas_target_priv_data->sas_address = raid_device->wwid;
1959 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1960 			if (ioc->is_warpdrive)
1961 				sas_target_priv_data->raid_device = raid_device;
1962 			raid_device->starget = starget;
1963 		}
1964 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1965 		return 0;
1966 	}
1967 
1968 	/* PCIe devices */
1969 	if (starget->channel == PCIE_CHANNEL) {
1970 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1971 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1972 			starget->channel);
1973 		if (pcie_device) {
1974 			sas_target_priv_data->handle = pcie_device->handle;
1975 			sas_target_priv_data->sas_address = pcie_device->wwid;
1976 			sas_target_priv_data->port = NULL;
1977 			sas_target_priv_data->pcie_dev = pcie_device;
1978 			pcie_device->starget = starget;
1979 			pcie_device->id = starget->id;
1980 			pcie_device->channel = starget->channel;
1981 			sas_target_priv_data->flags |=
1982 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1983 			if (pcie_device->fast_path)
1984 				sas_target_priv_data->flags |=
1985 					MPT_TARGET_FASTPATH_IO;
1986 		}
1987 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1988 		return 0;
1989 	}
1990 
1991 	/* sas/sata devices */
1992 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1993 	rphy = dev_to_rphy(starget->dev.parent);
1994 	sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1995 
1996 	if (sas_device) {
1997 		sas_target_priv_data->handle = sas_device->handle;
1998 		sas_target_priv_data->sas_address = sas_device->sas_address;
1999 		sas_target_priv_data->port = sas_device->port;
2000 		sas_target_priv_data->sas_dev = sas_device;
2001 		sas_device->starget = starget;
2002 		sas_device->id = starget->id;
2003 		sas_device->channel = starget->channel;
2004 		if (test_bit(sas_device->handle, ioc->pd_handles))
2005 			sas_target_priv_data->flags |=
2006 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
2007 		if (sas_device->fast_path)
2008 			sas_target_priv_data->flags |=
2009 					MPT_TARGET_FASTPATH_IO;
2010 	}
2011 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2012 
2013 	return 0;
2014 }
2015 
2016 /**
2017  * scsih_target_destroy - target destroy routine
2018  * @starget: scsi target struct
2019  */
2020 static void
scsih_target_destroy(struct scsi_target * starget)2021 scsih_target_destroy(struct scsi_target *starget)
2022 {
2023 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
2024 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2025 	struct MPT3SAS_TARGET *sas_target_priv_data;
2026 	struct _sas_device *sas_device;
2027 	struct _raid_device *raid_device;
2028 	struct _pcie_device *pcie_device;
2029 	unsigned long flags;
2030 
2031 	sas_target_priv_data = starget->hostdata;
2032 	if (!sas_target_priv_data)
2033 		return;
2034 
2035 	if (starget->channel == RAID_CHANNEL) {
2036 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2037 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
2038 		    starget->channel);
2039 		if (raid_device) {
2040 			raid_device->starget = NULL;
2041 			raid_device->sdev = NULL;
2042 		}
2043 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2044 		goto out;
2045 	}
2046 
2047 	if (starget->channel == PCIE_CHANNEL) {
2048 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2049 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2050 							sas_target_priv_data);
2051 		if (pcie_device && (pcie_device->starget == starget) &&
2052 			(pcie_device->id == starget->id) &&
2053 			(pcie_device->channel == starget->channel))
2054 			pcie_device->starget = NULL;
2055 
2056 		if (pcie_device) {
2057 			/*
2058 			 * Corresponding get() is in _scsih_target_alloc()
2059 			 */
2060 			sas_target_priv_data->pcie_dev = NULL;
2061 			pcie_device_put(pcie_device);
2062 			pcie_device_put(pcie_device);
2063 		}
2064 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2065 		goto out;
2066 	}
2067 
2068 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2069 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2070 	if (sas_device && (sas_device->starget == starget) &&
2071 	    (sas_device->id == starget->id) &&
2072 	    (sas_device->channel == starget->channel))
2073 		sas_device->starget = NULL;
2074 
2075 	if (sas_device) {
2076 		/*
2077 		 * Corresponding get() is in _scsih_target_alloc()
2078 		 */
2079 		sas_target_priv_data->sas_dev = NULL;
2080 		sas_device_put(sas_device);
2081 
2082 		sas_device_put(sas_device);
2083 	}
2084 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2085 
2086  out:
2087 	kfree(sas_target_priv_data);
2088 	starget->hostdata = NULL;
2089 }
2090 
2091 /**
2092  * scsih_sdev_init - device add routine
2093  * @sdev: scsi device struct
2094  *
2095  * Return: 0 if ok. Any other return is assumed to be an error and
2096  * the device is ignored.
2097  */
2098 static int
scsih_sdev_init(struct scsi_device * sdev)2099 scsih_sdev_init(struct scsi_device *sdev)
2100 {
2101 	struct Scsi_Host *shost;
2102 	struct MPT3SAS_ADAPTER *ioc;
2103 	struct MPT3SAS_TARGET *sas_target_priv_data;
2104 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2105 	struct scsi_target *starget;
2106 	struct _raid_device *raid_device;
2107 	struct _sas_device *sas_device;
2108 	struct _pcie_device *pcie_device;
2109 	unsigned long flags;
2110 
2111 	sas_device_priv_data = kzalloc_obj(*sas_device_priv_data);
2112 	if (!sas_device_priv_data)
2113 		return -ENOMEM;
2114 
2115 	sas_device_priv_data->lun = sdev->lun;
2116 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2117 
2118 	starget = scsi_target(sdev);
2119 	sas_target_priv_data = starget->hostdata;
2120 	sas_target_priv_data->num_luns++;
2121 	sas_device_priv_data->sas_target = sas_target_priv_data;
2122 	sdev->hostdata = sas_device_priv_data;
2123 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2124 		sdev->no_uld_attach = 1;
2125 
2126 	shost = dev_to_shost(&starget->dev);
2127 	ioc = shost_priv(shost);
2128 	if (starget->channel == RAID_CHANNEL) {
2129 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2130 		raid_device = _scsih_raid_device_find_by_id(ioc,
2131 		    starget->id, starget->channel);
2132 		if (raid_device)
2133 			raid_device->sdev = sdev; /* raid is single lun */
2134 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2135 	}
2136 	if (starget->channel == PCIE_CHANNEL) {
2137 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2139 				sas_target_priv_data->sas_address);
2140 		if (pcie_device && (pcie_device->starget == NULL)) {
2141 			sdev_printk(KERN_INFO, sdev,
2142 			    "%s : pcie_device->starget set to starget @ %d\n",
2143 			    __func__, __LINE__);
2144 			pcie_device->starget = starget;
2145 		}
2146 
2147 		if (pcie_device)
2148 			pcie_device_put(pcie_device);
2149 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2150 
2151 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2152 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2153 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2154 		    sas_target_priv_data->sas_address,
2155 		    sas_target_priv_data->port);
2156 		if (sas_device && (sas_device->starget == NULL)) {
2157 			sdev_printk(KERN_INFO, sdev,
2158 			"%s : sas_device->starget set to starget @ %d\n",
2159 			     __func__, __LINE__);
2160 			sas_device->starget = starget;
2161 		}
2162 
2163 		if (sas_device)
2164 			sas_device_put(sas_device);
2165 
2166 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2167 	}
2168 
2169 	return 0;
2170 }
2171 
2172 /**
2173  * scsih_sdev_destroy - device destroy routine
2174  * @sdev: scsi device struct
2175  */
2176 static void
scsih_sdev_destroy(struct scsi_device * sdev)2177 scsih_sdev_destroy(struct scsi_device *sdev)
2178 {
2179 	struct MPT3SAS_TARGET *sas_target_priv_data;
2180 	struct scsi_target *starget;
2181 	struct Scsi_Host *shost;
2182 	struct MPT3SAS_ADAPTER *ioc;
2183 	struct _sas_device *sas_device;
2184 	struct _pcie_device *pcie_device;
2185 	unsigned long flags;
2186 
2187 	if (!sdev->hostdata)
2188 		return;
2189 
2190 	starget = scsi_target(sdev);
2191 	sas_target_priv_data = starget->hostdata;
2192 	sas_target_priv_data->num_luns--;
2193 
2194 	shost = dev_to_shost(&starget->dev);
2195 	ioc = shost_priv(shost);
2196 
2197 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2198 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2199 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2200 				sas_target_priv_data);
2201 		if (pcie_device && !sas_target_priv_data->num_luns)
2202 			pcie_device->starget = NULL;
2203 
2204 		if (pcie_device)
2205 			pcie_device_put(pcie_device);
2206 
2207 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2208 
2209 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2210 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2211 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
2212 				sas_target_priv_data);
2213 		if (sas_device && !sas_target_priv_data->num_luns)
2214 			sas_device->starget = NULL;
2215 
2216 		if (sas_device)
2217 			sas_device_put(sas_device);
2218 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2219 	}
2220 
2221 	kfree(sdev->hostdata);
2222 	sdev->hostdata = NULL;
2223 }
2224 
2225 /**
2226  * _scsih_display_sata_capabilities - sata capabilities
2227  * @ioc: per adapter object
2228  * @handle: device handle
2229  * @sdev: scsi device struct
2230  */
2231 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2232 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2233 	u16 handle, struct scsi_device *sdev)
2234 {
2235 	Mpi2ConfigReply_t mpi_reply;
2236 	Mpi2SasDevicePage0_t sas_device_pg0;
2237 	u32 ioc_status;
2238 	u16 flags;
2239 	u32 device_info;
2240 
2241 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2242 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2243 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2244 			__FILE__, __LINE__, __func__);
2245 		return;
2246 	}
2247 
2248 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2249 	    MPI2_IOCSTATUS_MASK;
2250 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2251 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2252 			__FILE__, __LINE__, __func__);
2253 		return;
2254 	}
2255 
2256 	flags = le16_to_cpu(sas_device_pg0.Flags);
2257 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2258 
2259 	sdev_printk(KERN_INFO, sdev,
2260 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2261 	    "sw_preserve(%s)\n",
2262 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2263 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2264 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2265 	    "n",
2266 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2267 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2268 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2269 }
2270 
2271 /*
2272  * raid transport support -
2273  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2274  * unloading the driver followed by a load - I believe that the subroutine
2275  * raid_class_release() is not cleaning up properly.
2276  */
2277 
2278 /**
2279  * scsih_is_raid - return boolean indicating device is raid volume
2280  * @dev: the device struct object
2281  */
2282 static int
scsih_is_raid(struct device * dev)2283 scsih_is_raid(struct device *dev)
2284 {
2285 	struct scsi_device *sdev = to_scsi_device(dev);
2286 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2287 
2288 	if (ioc->is_warpdrive)
2289 		return 0;
2290 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2291 }
2292 
2293 static int
scsih_is_nvme(struct device * dev)2294 scsih_is_nvme(struct device *dev)
2295 {
2296 	struct scsi_device *sdev = to_scsi_device(dev);
2297 
2298 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2299 }
2300 
2301 /**
2302  * scsih_get_resync - get raid volume resync percent complete
2303  * @dev: the device struct object
2304  */
2305 static void
scsih_get_resync(struct device * dev)2306 scsih_get_resync(struct device *dev)
2307 {
2308 	struct scsi_device *sdev = to_scsi_device(dev);
2309 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310 	static struct _raid_device *raid_device;
2311 	unsigned long flags;
2312 	Mpi2RaidVolPage0_t vol_pg0;
2313 	Mpi2ConfigReply_t mpi_reply;
2314 	u32 volume_status_flags;
2315 	u8 percent_complete;
2316 	u16 handle;
2317 
2318 	percent_complete = 0;
2319 	handle = 0;
2320 	if (ioc->is_warpdrive)
2321 		goto out;
2322 
2323 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2324 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2325 	    sdev->channel);
2326 	if (raid_device) {
2327 		handle = raid_device->handle;
2328 		percent_complete = raid_device->percent_complete;
2329 	}
2330 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2331 
2332 	if (!handle)
2333 		goto out;
2334 
2335 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2336 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2337 	     sizeof(Mpi2RaidVolPage0_t))) {
2338 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2339 			__FILE__, __LINE__, __func__);
2340 		percent_complete = 0;
2341 		goto out;
2342 	}
2343 
2344 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2345 	if (!(volume_status_flags &
2346 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2347 		percent_complete = 0;
2348 
2349  out:
2350 
2351 	switch (ioc->hba_mpi_version_belonged) {
2352 	case MPI2_VERSION:
2353 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2354 		break;
2355 	case MPI25_VERSION:
2356 	case MPI26_VERSION:
2357 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2358 		break;
2359 	}
2360 }
2361 
2362 /**
2363  * scsih_get_state - get raid volume level
2364  * @dev: the device struct object
2365  */
2366 static void
scsih_get_state(struct device * dev)2367 scsih_get_state(struct device *dev)
2368 {
2369 	struct scsi_device *sdev = to_scsi_device(dev);
2370 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2371 	static struct _raid_device *raid_device;
2372 	unsigned long flags;
2373 	Mpi2RaidVolPage0_t vol_pg0;
2374 	Mpi2ConfigReply_t mpi_reply;
2375 	u32 volstate;
2376 	enum raid_state state = RAID_STATE_UNKNOWN;
2377 	u16 handle = 0;
2378 
2379 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2380 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2381 	    sdev->channel);
2382 	if (raid_device)
2383 		handle = raid_device->handle;
2384 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2385 
2386 	if (!raid_device)
2387 		goto out;
2388 
2389 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2390 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2391 	     sizeof(Mpi2RaidVolPage0_t))) {
2392 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2393 			__FILE__, __LINE__, __func__);
2394 		goto out;
2395 	}
2396 
2397 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2398 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2399 		state = RAID_STATE_RESYNCING;
2400 		goto out;
2401 	}
2402 
2403 	switch (vol_pg0.VolumeState) {
2404 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2405 	case MPI2_RAID_VOL_STATE_ONLINE:
2406 		state = RAID_STATE_ACTIVE;
2407 		break;
2408 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2409 		state = RAID_STATE_DEGRADED;
2410 		break;
2411 	case MPI2_RAID_VOL_STATE_FAILED:
2412 	case MPI2_RAID_VOL_STATE_MISSING:
2413 		state = RAID_STATE_OFFLINE;
2414 		break;
2415 	}
2416  out:
2417 	switch (ioc->hba_mpi_version_belonged) {
2418 	case MPI2_VERSION:
2419 		raid_set_state(mpt2sas_raid_template, dev, state);
2420 		break;
2421 	case MPI25_VERSION:
2422 	case MPI26_VERSION:
2423 		raid_set_state(mpt3sas_raid_template, dev, state);
2424 		break;
2425 	}
2426 }
2427 
2428 /**
2429  * _scsih_set_level - set raid level
2430  * @ioc: ?
2431  * @sdev: scsi device struct
2432  * @volume_type: volume type
2433  */
2434 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2435 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2436 	struct scsi_device *sdev, u8 volume_type)
2437 {
2438 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2439 
2440 	switch (volume_type) {
2441 	case MPI2_RAID_VOL_TYPE_RAID0:
2442 		level = RAID_LEVEL_0;
2443 		break;
2444 	case MPI2_RAID_VOL_TYPE_RAID10:
2445 		level = RAID_LEVEL_10;
2446 		break;
2447 	case MPI2_RAID_VOL_TYPE_RAID1E:
2448 		level = RAID_LEVEL_1E;
2449 		break;
2450 	case MPI2_RAID_VOL_TYPE_RAID1:
2451 		level = RAID_LEVEL_1;
2452 		break;
2453 	}
2454 
2455 	switch (ioc->hba_mpi_version_belonged) {
2456 	case MPI2_VERSION:
2457 		raid_set_level(mpt2sas_raid_template,
2458 			&sdev->sdev_gendev, level);
2459 		break;
2460 	case MPI25_VERSION:
2461 	case MPI26_VERSION:
2462 		raid_set_level(mpt3sas_raid_template,
2463 			&sdev->sdev_gendev, level);
2464 		break;
2465 	}
2466 }
2467 
2468 
2469 /**
2470  * _scsih_get_volume_capabilities - volume capabilities
2471  * @ioc: per adapter object
2472  * @raid_device: the raid_device object
2473  *
2474  * Return: 0 for success, else 1
2475  */
2476 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2477 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2478 	struct _raid_device *raid_device)
2479 {
2480 	Mpi2RaidVolPage0_t *vol_pg0;
2481 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2482 	Mpi2SasDevicePage0_t sas_device_pg0;
2483 	Mpi2ConfigReply_t mpi_reply;
2484 	u16 sz;
2485 	u8 num_pds;
2486 
2487 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2488 	    &num_pds)) || !num_pds) {
2489 		dfailprintk(ioc,
2490 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2491 				     __FILE__, __LINE__, __func__));
2492 		return 1;
2493 	}
2494 
2495 	raid_device->num_pds = num_pds;
2496 	sz = struct_size(vol_pg0, PhysDisk, num_pds);
2497 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2498 	if (!vol_pg0) {
2499 		dfailprintk(ioc,
2500 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2501 				     __FILE__, __LINE__, __func__));
2502 		return 1;
2503 	}
2504 
2505 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2506 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2507 		dfailprintk(ioc,
2508 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2509 				     __FILE__, __LINE__, __func__));
2510 		kfree(vol_pg0);
2511 		return 1;
2512 	}
2513 
2514 	raid_device->volume_type = vol_pg0->VolumeType;
2515 
2516 	/* figure out what the underlying devices are by
2517 	 * obtaining the device_info bits for the 1st device
2518 	 */
2519 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2520 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2521 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2522 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2523 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2524 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2525 			raid_device->device_info =
2526 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2527 		}
2528 	}
2529 
2530 	kfree(vol_pg0);
2531 	return 0;
2532 }
2533 
2534 /**
2535  * _scsih_enable_tlr - setting TLR flags
2536  * @ioc: per adapter object
2537  * @sdev: scsi device struct
2538  *
2539  * Enabling Transaction Layer Retries for tape devices when
2540  * vpd page 0x90 is present
2541  *
2542  */
2543 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2544 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2545 {
2546 
2547 	/* only for TAPE */
2548 	if (sdev->type != TYPE_TAPE)
2549 		return;
2550 
2551 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2552 		return;
2553 
2554 	sas_enable_tlr(sdev);
2555 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2556 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2557 	return;
2558 
2559 }
2560 
2561 /**
2562  * scsih_sdev_configure - device configure routine.
2563  * @sdev: scsi device struct
2564  * @lim: queue limits
2565  *
2566  * Return: 0 if ok. Any other return is assumed to be an error and
2567  * the device is ignored.
2568  */
2569 static int
scsih_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)2570 scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
2571 {
2572 	struct Scsi_Host *shost = sdev->host;
2573 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2574 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2575 	struct MPT3SAS_TARGET *sas_target_priv_data;
2576 	struct _sas_device *sas_device;
2577 	struct _pcie_device *pcie_device;
2578 	struct _raid_device *raid_device;
2579 	unsigned long flags;
2580 	int qdepth;
2581 	u8 ssp_target = 0;
2582 	char *ds = "";
2583 	char *r_level = "";
2584 	u16 handle, volume_handle = 0;
2585 	u64 volume_wwid = 0;
2586 	enum device_responsive_state retval;
2587 	u8 count = 0;
2588 
2589 	qdepth = 1;
2590 	sas_device_priv_data = sdev->hostdata;
2591 	sas_device_priv_data->configured_lun = 1;
2592 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2593 	sas_target_priv_data = sas_device_priv_data->sas_target;
2594 	handle = sas_target_priv_data->handle;
2595 
2596 	/* raid volume handling */
2597 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2598 
2599 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2600 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2601 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2602 		if (!raid_device) {
2603 			dfailprintk(ioc,
2604 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2605 					     __FILE__, __LINE__, __func__));
2606 			return 1;
2607 		}
2608 
2609 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2610 			dfailprintk(ioc,
2611 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2612 					     __FILE__, __LINE__, __func__));
2613 			return 1;
2614 		}
2615 
2616 		/*
2617 		 * WARPDRIVE: Initialize the required data for Direct IO
2618 		 */
2619 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2620 
2621 		/* RAID Queue Depth Support
2622 		 * IS volume = underlying qdepth of drive type, either
2623 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2624 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2625 		 */
2626 		if (raid_device->device_info &
2627 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2628 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2629 			ds = "SSP";
2630 		} else {
2631 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2632 			if (raid_device->device_info &
2633 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2634 				ds = "SATA";
2635 			else
2636 				ds = "STP";
2637 		}
2638 
2639 		switch (raid_device->volume_type) {
2640 		case MPI2_RAID_VOL_TYPE_RAID0:
2641 			r_level = "RAID0";
2642 			break;
2643 		case MPI2_RAID_VOL_TYPE_RAID1E:
2644 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2645 			if (ioc->manu_pg10.OEMIdentifier &&
2646 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2647 			    MFG10_GF0_R10_DISPLAY) &&
2648 			    !(raid_device->num_pds % 2))
2649 				r_level = "RAID10";
2650 			else
2651 				r_level = "RAID1E";
2652 			break;
2653 		case MPI2_RAID_VOL_TYPE_RAID1:
2654 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2655 			r_level = "RAID1";
2656 			break;
2657 		case MPI2_RAID_VOL_TYPE_RAID10:
2658 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2659 			r_level = "RAID10";
2660 			break;
2661 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2662 		default:
2663 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2664 			r_level = "RAIDX";
2665 			break;
2666 		}
2667 
2668 		if (!ioc->hide_ir_msg)
2669 			sdev_printk(KERN_INFO, sdev,
2670 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2671 			    " pd_count(%d), type(%s)\n",
2672 			    r_level, raid_device->handle,
2673 			    (unsigned long long)raid_device->wwid,
2674 			    raid_device->num_pds, ds);
2675 
2676 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2677 			lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
2678 			sdev_printk(KERN_INFO, sdev,
2679 					"Set queue's max_sector to: %u\n",
2680 						MPT3SAS_RAID_MAX_SECTORS);
2681 		}
2682 
2683 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2684 
2685 		/* raid transport support */
2686 		if (!ioc->is_warpdrive)
2687 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2688 		return 0;
2689 	}
2690 
2691 	/* non-raid handling */
2692 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2693 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2694 		    &volume_handle)) {
2695 			dfailprintk(ioc,
2696 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2697 					     __FILE__, __LINE__, __func__));
2698 			return 1;
2699 		}
2700 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2701 		    volume_handle, &volume_wwid)) {
2702 			dfailprintk(ioc,
2703 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2704 					     __FILE__, __LINE__, __func__));
2705 			return 1;
2706 		}
2707 	}
2708 
2709 	/* PCIe handling */
2710 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2711 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2712 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2713 				sas_device_priv_data->sas_target->sas_address);
2714 		if (!pcie_device) {
2715 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2716 			dfailprintk(ioc,
2717 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2718 					     __FILE__, __LINE__, __func__));
2719 			return 1;
2720 		}
2721 
2722 		qdepth = ioc->max_nvme_qd;
2723 		ds = "NVMe";
2724 		sdev_printk(KERN_INFO, sdev,
2725 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2726 			ds, handle, (unsigned long long)pcie_device->wwid,
2727 			pcie_device->port_num);
2728 		if (pcie_device->enclosure_handle != 0)
2729 			sdev_printk(KERN_INFO, sdev,
2730 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2731 			ds,
2732 			(unsigned long long)pcie_device->enclosure_logical_id,
2733 			pcie_device->slot);
2734 		if (pcie_device->connector_name[0] != '\0')
2735 			sdev_printk(KERN_INFO, sdev,
2736 				"%s: enclosure level(0x%04x),"
2737 				"connector name( %s)\n", ds,
2738 				pcie_device->enclosure_level,
2739 				pcie_device->connector_name);
2740 
2741 		if (pcie_device->nvme_mdts)
2742 			lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
2743 
2744 		pcie_device_put(pcie_device);
2745 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2746 
2747 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2748 		lim->virt_boundary_mask = ioc->page_size - 1;
2749 		return 0;
2750 	}
2751 
2752 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2753 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2754 	   sas_device_priv_data->sas_target->sas_address,
2755 	   sas_device_priv_data->sas_target->port);
2756 	if (!sas_device) {
2757 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2758 		dfailprintk(ioc,
2759 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2760 				     __FILE__, __LINE__, __func__));
2761 		return 1;
2762 	}
2763 
2764 	sas_device->volume_handle = volume_handle;
2765 	sas_device->volume_wwid = volume_wwid;
2766 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2767 		qdepth = (sas_device->port_type > 1) ?
2768 			ioc->max_wideport_qd : ioc->max_narrowport_qd;
2769 		ssp_target = 1;
2770 		if (sas_device->device_info &
2771 				MPI2_SAS_DEVICE_INFO_SEP) {
2772 			sdev_printk(KERN_INFO, sdev,
2773 			"set ignore_delay_remove for handle(0x%04x)\n",
2774 			sas_device_priv_data->sas_target->handle);
2775 			sas_device_priv_data->ignore_delay_remove = 1;
2776 			ds = "SES";
2777 		} else
2778 			ds = "SSP";
2779 	} else {
2780 		qdepth = ioc->max_sata_qd;
2781 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2782 			ds = "STP";
2783 		else if (sas_device->device_info &
2784 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2785 			ds = "SATA";
2786 	}
2787 
2788 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2789 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2790 	    ds, handle, (unsigned long long)sas_device->sas_address,
2791 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2792 
2793 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2794 
2795 	sas_device_put(sas_device);
2796 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2797 
2798 	if (!ssp_target) {
2799 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2800 
2801 		do {
2802 			retval = _scsih_ata_pass_thru_idd(ioc, handle,
2803 				    &sas_device->ssd_device, 30, 0);
2804 		} while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA)
2805 			&& count++ < 3);
2806 	}
2807 
2808 
2809 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2810 
2811 	if (ssp_target) {
2812 		sas_read_port_mode_page(sdev);
2813 		_scsih_enable_tlr(ioc, sdev);
2814 	}
2815 
2816 	return 0;
2817 }
2818 
2819 /**
2820  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2821  * @sdev: scsi device struct
2822  * @unused: pointer to gendisk
2823  * @capacity: device size (in 512 byte sectors)
2824  * @params: three element array to place output:
2825  *              params[0] number of heads (max 255)
2826  *              params[1] number of sectors (max 63)
2827  *              params[2] number of cylinders
2828  */
2829 static int
scsih_bios_param(struct scsi_device * sdev,struct gendisk * unused,sector_t capacity,int params[])2830 scsih_bios_param(struct scsi_device *sdev, struct gendisk *unused,
2831 	sector_t capacity, int params[])
2832 {
2833 	int		heads;
2834 	int		sectors;
2835 	sector_t	cylinders;
2836 	ulong		dummy;
2837 
2838 	heads = 64;
2839 	sectors = 32;
2840 
2841 	dummy = heads * sectors;
2842 	cylinders = capacity;
2843 	sector_div(cylinders, dummy);
2844 
2845 	/*
2846 	 * Handle extended translation size for logical drives
2847 	 * > 1Gb
2848 	 */
2849 	if ((ulong)capacity >= 0x200000) {
2850 		heads = 255;
2851 		sectors = 63;
2852 		dummy = heads * sectors;
2853 		cylinders = capacity;
2854 		sector_div(cylinders, dummy);
2855 	}
2856 
2857 	/* return result */
2858 	params[0] = heads;
2859 	params[1] = sectors;
2860 	params[2] = cylinders;
2861 
2862 	return 0;
2863 }
2864 
2865 /**
2866  * _scsih_response_code - translation of device response code
2867  * @ioc: per adapter object
2868  * @response_code: response code returned by the device
2869  */
2870 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2871 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2872 {
2873 	char *desc;
2874 
2875 	switch (response_code) {
2876 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2877 		desc = "task management request completed";
2878 		break;
2879 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2880 		desc = "invalid frame";
2881 		break;
2882 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2883 		desc = "task management request not supported";
2884 		break;
2885 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2886 		desc = "task management request failed";
2887 		break;
2888 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2889 		desc = "task management request succeeded";
2890 		break;
2891 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2892 		desc = "invalid lun";
2893 		break;
2894 	case 0xA:
2895 		desc = "overlapped tag attempted";
2896 		break;
2897 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2898 		desc = "task queued, however not sent to target";
2899 		break;
2900 	default:
2901 		desc = "unknown";
2902 		break;
2903 	}
2904 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2905 }
2906 
2907 /**
2908  * _scsih_tm_done - tm completion routine
2909  * @ioc: per adapter object
2910  * @smid: system request message index
2911  * @msix_index: MSIX table index supplied by the OS
2912  * @reply: reply message frame(lower 32bit addr)
2913  * Context: none.
2914  *
2915  * The callback handler when using scsih_issue_tm.
2916  *
2917  * Return: 1 meaning mf should be freed from _base_interrupt
2918  *         0 means the mf is freed from this function.
2919  */
2920 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2921 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2922 {
2923 	MPI2DefaultReply_t *mpi_reply;
2924 
2925 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2926 		return 1;
2927 	if (ioc->tm_cmds.smid != smid)
2928 		return 1;
2929 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2930 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2931 	if (mpi_reply) {
2932 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2933 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2934 	}
2935 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2936 	complete(&ioc->tm_cmds.done);
2937 	return 1;
2938 }
2939 
2940 /**
2941  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2942  * @ioc: per adapter object
2943  * @handle: device handle
2944  *
2945  * During taskmangement request, we need to freeze the device queue.
2946  */
2947 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2948 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2949 {
2950 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2951 	struct scsi_device *sdev;
2952 	u8 skip = 0;
2953 
2954 	shost_for_each_device(sdev, ioc->shost) {
2955 		if (skip)
2956 			continue;
2957 		sas_device_priv_data = sdev->hostdata;
2958 		if (!sas_device_priv_data)
2959 			continue;
2960 		if (sas_device_priv_data->sas_target->handle == handle) {
2961 			sas_device_priv_data->sas_target->tm_busy = 1;
2962 			skip = 1;
2963 			ioc->ignore_loginfos = 1;
2964 		}
2965 	}
2966 }
2967 
2968 /**
2969  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2970  * @ioc: per adapter object
2971  * @handle: device handle
2972  *
2973  * During taskmangement request, we need to freeze the device queue.
2974  */
2975 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2976 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2977 {
2978 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2979 	struct scsi_device *sdev;
2980 	u8 skip = 0;
2981 
2982 	shost_for_each_device(sdev, ioc->shost) {
2983 		if (skip)
2984 			continue;
2985 		sas_device_priv_data = sdev->hostdata;
2986 		if (!sas_device_priv_data)
2987 			continue;
2988 		if (sas_device_priv_data->sas_target->handle == handle) {
2989 			sas_device_priv_data->sas_target->tm_busy = 0;
2990 			skip = 1;
2991 			ioc->ignore_loginfos = 0;
2992 		}
2993 	}
2994 }
2995 
2996 /**
2997  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2998  * @ioc: per adapter object
2999  * @channel: the channel assigned by the OS
3000  * @id: the id assigned by the OS
3001  * @lun: lun number
3002  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3003  * @smid_task: smid assigned to the task
3004  *
3005  * Look whether TM has aborted the timed out SCSI command, if
3006  * TM has aborted the IO then return SUCCESS else return FAILED.
3007  */
3008 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)3009 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
3010 	uint id, uint lun, u8 type, u16 smid_task)
3011 {
3012 
3013 	if (smid_task <= ioc->shost->can_queue) {
3014 		switch (type) {
3015 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3016 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
3017 			    id, channel)))
3018 				return SUCCESS;
3019 			break;
3020 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3021 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3022 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
3023 			    lun, channel)))
3024 				return SUCCESS;
3025 			break;
3026 		default:
3027 			return SUCCESS;
3028 		}
3029 	} else if (smid_task == ioc->scsih_cmds.smid) {
3030 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
3031 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
3032 			return SUCCESS;
3033 	} else if (smid_task == ioc->ctl_cmds.smid) {
3034 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
3035 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
3036 			return SUCCESS;
3037 	}
3038 
3039 	return FAILED;
3040 }
3041 
3042 /**
3043  * scsih_tm_post_processing - post processing of target & LUN reset
3044  * @ioc: per adapter object
3045  * @handle: device handle
3046  * @channel: the channel assigned by the OS
3047  * @id: the id assigned by the OS
3048  * @lun: lun number
3049  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3050  * @smid_task: smid assigned to the task
3051  *
3052  * Post processing of target & LUN reset. Due to interrupt latency
3053  * issue it possible that interrupt for aborted IO might not be
3054  * received yet. So before returning failure status, poll the
3055  * reply descriptor pools for the reply of timed out SCSI command.
3056  * Return FAILED status if reply for timed out is not received
3057  * otherwise return SUCCESS.
3058  */
3059 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)3060 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3061 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
3062 {
3063 	int rc;
3064 
3065 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3066 	if (rc == SUCCESS)
3067 		return rc;
3068 
3069 	ioc_info(ioc,
3070 	    "Poll ReplyDescriptor queues for completion of"
3071 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3072 	    smid_task, type, handle);
3073 
3074 	/*
3075 	 * Due to interrupt latency issues, driver may receive interrupt for
3076 	 * TM first and then for aborted SCSI IO command. So, poll all the
3077 	 * ReplyDescriptor pools before returning the FAILED status to SML.
3078 	 */
3079 	mpt3sas_base_mask_interrupts(ioc);
3080 	mpt3sas_base_sync_reply_irqs(ioc, 1);
3081 	mpt3sas_base_unmask_interrupts(ioc);
3082 
3083 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3084 }
3085 
3086 /**
3087  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3088  * @ioc: per adapter struct
3089  * @handle: device handle
3090  * @channel: the channel assigned by the OS
3091  * @id: the id assigned by the OS
3092  * @lun: lun number
3093  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3094  * @smid_task: smid assigned to the task
3095  * @msix_task: MSIX table index supplied by the OS
3096  * @timeout: timeout in seconds
3097  * @tr_method: Target Reset Method
3098  * Context: user
3099  *
3100  * A generic API for sending task management requests to firmware.
3101  *
3102  * The callback index is set inside `ioc->tm_cb_idx`.
3103  * The caller is responsible to check for outstanding commands.
3104  *
3105  * Return: SUCCESS or FAILED.
3106  */
3107 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3108 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3109 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3110 	u8 timeout, u8 tr_method)
3111 {
3112 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3113 	Mpi2SCSITaskManagementReply_t *mpi_reply;
3114 	Mpi25SCSIIORequest_t *request;
3115 	u16 smid = 0;
3116 	u32 ioc_state;
3117 	int rc;
3118 	u8 issue_reset = 0;
3119 
3120 	lockdep_assert_held(&ioc->tm_cmds.mutex);
3121 
3122 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3123 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3124 		return FAILED;
3125 	}
3126 
3127 	if (ioc->shost_recovery || ioc->remove_host ||
3128 	    ioc->pci_error_recovery) {
3129 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3130 		return FAILED;
3131 	}
3132 
3133 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3134 	if (ioc_state & MPI2_DOORBELL_USED) {
3135 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3136 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3137 		return (!rc) ? SUCCESS : FAILED;
3138 	}
3139 
3140 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3141 		mpt3sas_print_fault_code(ioc, ioc_state &
3142 		    MPI2_DOORBELL_DATA_MASK);
3143 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3144 		return (!rc) ? SUCCESS : FAILED;
3145 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3146 	    MPI2_IOC_STATE_COREDUMP) {
3147 		mpt3sas_print_coredump_info(ioc, ioc_state &
3148 		    MPI2_DOORBELL_DATA_MASK);
3149 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3150 		return (!rc) ? SUCCESS : FAILED;
3151 	}
3152 
3153 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3154 	if (!smid) {
3155 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3156 		return FAILED;
3157 	}
3158 
3159 	dtmprintk(ioc,
3160 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3161 			   handle, type, smid_task, timeout, tr_method));
3162 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
3163 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3164 	ioc->tm_cmds.smid = smid;
3165 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3166 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3167 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3168 	mpi_request->DevHandle = cpu_to_le16(handle);
3169 	mpi_request->TaskType = type;
3170 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3171 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3172 		mpi_request->MsgFlags = tr_method;
3173 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3174 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3175 	mpt3sas_scsih_set_tm_flag(ioc, handle);
3176 	init_completion(&ioc->tm_cmds.done);
3177 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
3178 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3179 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3180 		mpt3sas_check_cmd_timeout(ioc,
3181 		    ioc->tm_cmds.status, mpi_request,
3182 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3183 		if (issue_reset) {
3184 			rc = mpt3sas_base_hard_reset_handler(ioc,
3185 					FORCE_BIG_HAMMER);
3186 			rc = (!rc) ? SUCCESS : FAILED;
3187 			goto out;
3188 		}
3189 	}
3190 
3191 	/* sync IRQs in case those were busy during flush. */
3192 	mpt3sas_base_sync_reply_irqs(ioc, 0);
3193 
3194 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3195 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3196 		mpi_reply = ioc->tm_cmds.reply;
3197 		dtmprintk(ioc,
3198 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3199 				   le16_to_cpu(mpi_reply->IOCStatus),
3200 				   le32_to_cpu(mpi_reply->IOCLogInfo),
3201 				   le32_to_cpu(mpi_reply->TerminationCount)));
3202 		if (ioc->logging_level & MPT_DEBUG_TM) {
3203 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
3204 			if (mpi_reply->IOCStatus)
3205 				_debug_dump_mf(mpi_request,
3206 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3207 		}
3208 	}
3209 
3210 	switch (type) {
3211 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3212 		rc = SUCCESS;
3213 		/*
3214 		 * If DevHandle filed in smid_task's entry of request pool
3215 		 * doesn't match with device handle on which this task abort
3216 		 * TM is received then it means that TM has successfully
3217 		 * aborted the timed out command. Since smid_task's entry in
3218 		 * request pool will be memset to zero once the timed out
3219 		 * command is returned to the SML. If the command is not
3220 		 * aborted then smid_task’s entry won’t be cleared and it
3221 		 * will have same DevHandle value on which this task abort TM
3222 		 * is received and driver will return the TM status as FAILED.
3223 		 */
3224 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3225 		if (le16_to_cpu(request->DevHandle) != handle)
3226 			break;
3227 
3228 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3229 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3230 		    handle, timeout, tr_method, smid_task, msix_task);
3231 		rc = FAILED;
3232 		break;
3233 
3234 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3235 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3236 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3237 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3238 		    type, smid_task);
3239 		break;
3240 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3241 		rc = SUCCESS;
3242 		break;
3243 	default:
3244 		rc = FAILED;
3245 		break;
3246 	}
3247 
3248 out:
3249 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3250 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3251 	return rc;
3252 }
3253 
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3254 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3255 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3256 		u16 msix_task, u8 timeout, u8 tr_method)
3257 {
3258 	int ret;
3259 
3260 	mutex_lock(&ioc->tm_cmds.mutex);
3261 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3262 			smid_task, msix_task, timeout, tr_method);
3263 	mutex_unlock(&ioc->tm_cmds.mutex);
3264 
3265 	return ret;
3266 }
3267 
3268 /**
3269  * _scsih_tm_display_info - displays info about the device
3270  * @ioc: per adapter struct
3271  * @scmd: pointer to scsi command object
3272  *
3273  * Called by task management callback handlers.
3274  */
3275 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3276 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3277 {
3278 	struct scsi_target *starget = scmd->device->sdev_target;
3279 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3280 	struct _sas_device *sas_device = NULL;
3281 	struct _pcie_device *pcie_device = NULL;
3282 	unsigned long flags;
3283 	char *device_str = NULL;
3284 
3285 	if (!priv_target)
3286 		return;
3287 	if (ioc->hide_ir_msg)
3288 		device_str = "WarpDrive";
3289 	else
3290 		device_str = "volume";
3291 
3292 	scsi_print_command(scmd);
3293 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3294 		starget_printk(KERN_INFO, starget,
3295 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3296 			device_str, priv_target->handle,
3297 		    device_str, (unsigned long long)priv_target->sas_address);
3298 
3299 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3300 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3301 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3302 		if (pcie_device) {
3303 			starget_printk(KERN_INFO, starget,
3304 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3305 				pcie_device->handle,
3306 				(unsigned long long)pcie_device->wwid,
3307 				pcie_device->port_num);
3308 			if (pcie_device->enclosure_handle != 0)
3309 				starget_printk(KERN_INFO, starget,
3310 					"enclosure logical id(0x%016llx), slot(%d)\n",
3311 					(unsigned long long)
3312 					pcie_device->enclosure_logical_id,
3313 					pcie_device->slot);
3314 			if (pcie_device->connector_name[0] != '\0')
3315 				starget_printk(KERN_INFO, starget,
3316 					"enclosure level(0x%04x), connector name( %s)\n",
3317 					pcie_device->enclosure_level,
3318 					pcie_device->connector_name);
3319 			pcie_device_put(pcie_device);
3320 		}
3321 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3322 
3323 	} else {
3324 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3325 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3326 		if (sas_device) {
3327 			if (priv_target->flags &
3328 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3329 				starget_printk(KERN_INFO, starget,
3330 				    "volume handle(0x%04x), "
3331 				    "volume wwid(0x%016llx)\n",
3332 				    sas_device->volume_handle,
3333 				   (unsigned long long)sas_device->volume_wwid);
3334 			}
3335 			starget_printk(KERN_INFO, starget,
3336 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3337 			    sas_device->handle,
3338 			    (unsigned long long)sas_device->sas_address,
3339 			    sas_device->phy);
3340 
3341 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3342 			    NULL, starget);
3343 
3344 			sas_device_put(sas_device);
3345 		}
3346 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3347 	}
3348 }
3349 
3350 /**
3351  * scsih_abort - eh threads main abort routine
3352  * @scmd: pointer to scsi command object
3353  *
3354  * Return: SUCCESS if command aborted else FAILED
3355  */
3356 static int
scsih_abort(struct scsi_cmnd * scmd)3357 scsih_abort(struct scsi_cmnd *scmd)
3358 {
3359 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3360 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3361 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3362 	u16 handle;
3363 	int r;
3364 
3365 	u8 timeout = 30;
3366 	struct _pcie_device *pcie_device = NULL;
3367 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3368 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3369 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3370 	    (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3371 	_scsih_tm_display_info(ioc, scmd);
3372 
3373 	sas_device_priv_data = scmd->device->hostdata;
3374 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3375 	    ioc->remove_host) {
3376 		sdev_printk(KERN_INFO, scmd->device,
3377 		    "device been deleted! scmd(0x%p)\n", scmd);
3378 		scmd->result = DID_NO_CONNECT << 16;
3379 		scsi_done(scmd);
3380 		r = SUCCESS;
3381 		goto out;
3382 	}
3383 
3384 	/* check for completed command */
3385 	if (st == NULL || st->cb_idx == 0xFF) {
3386 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3387 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3388 		scmd->result = DID_RESET << 16;
3389 		r = SUCCESS;
3390 		goto out;
3391 	}
3392 
3393 	/* for hidden raid components and volumes this is not supported */
3394 	if (sas_device_priv_data->sas_target->flags &
3395 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3396 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3397 		scmd->result = DID_RESET << 16;
3398 		r = FAILED;
3399 		goto out;
3400 	}
3401 
3402 	mpt3sas_halt_firmware(ioc);
3403 
3404 	handle = sas_device_priv_data->sas_target->handle;
3405 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3406 	if (pcie_device && (!ioc->tm_custom_handling) &&
3407 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3408 		timeout = ioc->nvme_abort_timeout;
3409 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3410 		scmd->device->id, scmd->device->lun,
3411 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3412 		st->smid, st->msix_io, timeout, 0);
3413 	/* Command must be cleared after abort */
3414 	if (r == SUCCESS && st->cb_idx != 0xFF)
3415 		r = FAILED;
3416  out:
3417 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3418 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3419 	if (pcie_device)
3420 		pcie_device_put(pcie_device);
3421 	return r;
3422 }
3423 
3424 /**
3425  * scsih_dev_reset - eh threads main device reset routine
3426  * @scmd: pointer to scsi command object
3427  *
3428  * Return: SUCCESS if command aborted else FAILED
3429  */
3430 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3431 scsih_dev_reset(struct scsi_cmnd *scmd)
3432 {
3433 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3434 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3435 	struct _sas_device *sas_device = NULL;
3436 	struct _pcie_device *pcie_device = NULL;
3437 	u16	handle;
3438 	u8	tr_method = 0;
3439 	u8	tr_timeout = 30;
3440 	int r;
3441 
3442 	struct scsi_target *starget = scmd->device->sdev_target;
3443 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3444 
3445 	sdev_printk(KERN_INFO, scmd->device,
3446 	    "attempting device reset! scmd(0x%p)\n", scmd);
3447 	_scsih_tm_display_info(ioc, scmd);
3448 
3449 	sas_device_priv_data = scmd->device->hostdata;
3450 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3451 	    ioc->remove_host) {
3452 		sdev_printk(KERN_INFO, scmd->device,
3453 		    "device been deleted! scmd(0x%p)\n", scmd);
3454 		scmd->result = DID_NO_CONNECT << 16;
3455 		scsi_done(scmd);
3456 		r = SUCCESS;
3457 		goto out;
3458 	}
3459 
3460 	/* for hidden raid components obtain the volume_handle */
3461 	handle = 0;
3462 	if (sas_device_priv_data->sas_target->flags &
3463 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3464 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3465 				target_priv_data);
3466 		if (sas_device)
3467 			handle = sas_device->volume_handle;
3468 	} else
3469 		handle = sas_device_priv_data->sas_target->handle;
3470 
3471 	if (!handle) {
3472 		scmd->result = DID_RESET << 16;
3473 		r = FAILED;
3474 		goto out;
3475 	}
3476 
3477 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3478 
3479 	if (pcie_device && (!ioc->tm_custom_handling) &&
3480 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3481 		tr_timeout = pcie_device->reset_timeout;
3482 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3483 	} else
3484 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3485 
3486 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3487 		scmd->device->id, scmd->device->lun,
3488 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3489 		tr_timeout, tr_method);
3490 	/* Check for busy commands after reset */
3491 	if (r == SUCCESS && scsi_device_busy(scmd->device))
3492 		r = FAILED;
3493  out:
3494 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3495 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3496 
3497 	if (sas_device)
3498 		sas_device_put(sas_device);
3499 	if (pcie_device)
3500 		pcie_device_put(pcie_device);
3501 
3502 	return r;
3503 }
3504 
3505 /**
3506  * scsih_target_reset - eh threads main target reset routine
3507  * @scmd: pointer to scsi command object
3508  *
3509  * Return: SUCCESS if command aborted else FAILED
3510  */
3511 static int
scsih_target_reset(struct scsi_cmnd * scmd)3512 scsih_target_reset(struct scsi_cmnd *scmd)
3513 {
3514 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3515 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3516 	struct _sas_device *sas_device = NULL;
3517 	struct _pcie_device *pcie_device = NULL;
3518 	u16	handle;
3519 	u8	tr_method = 0;
3520 	u8	tr_timeout = 30;
3521 	int r;
3522 	struct scsi_target *starget = scmd->device->sdev_target;
3523 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3524 
3525 	starget_printk(KERN_INFO, starget,
3526 	    "attempting target reset! scmd(0x%p)\n", scmd);
3527 	_scsih_tm_display_info(ioc, scmd);
3528 
3529 	sas_device_priv_data = scmd->device->hostdata;
3530 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3531 	    ioc->remove_host) {
3532 		starget_printk(KERN_INFO, starget,
3533 		    "target been deleted! scmd(0x%p)\n", scmd);
3534 		scmd->result = DID_NO_CONNECT << 16;
3535 		scsi_done(scmd);
3536 		r = SUCCESS;
3537 		goto out;
3538 	}
3539 
3540 	/* for hidden raid components obtain the volume_handle */
3541 	handle = 0;
3542 	if (sas_device_priv_data->sas_target->flags &
3543 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3544 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3545 				target_priv_data);
3546 		if (sas_device)
3547 			handle = sas_device->volume_handle;
3548 	} else
3549 		handle = sas_device_priv_data->sas_target->handle;
3550 
3551 	if (!handle) {
3552 		scmd->result = DID_RESET << 16;
3553 		r = FAILED;
3554 		goto out;
3555 	}
3556 
3557 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3558 
3559 	if (pcie_device && (!ioc->tm_custom_handling) &&
3560 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3561 		tr_timeout = pcie_device->reset_timeout;
3562 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3563 	} else
3564 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3565 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3566 		scmd->device->id, 0,
3567 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3568 	    tr_timeout, tr_method);
3569 	/* Check for busy commands after reset */
3570 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3571 		r = FAILED;
3572  out:
3573 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3574 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3575 
3576 	if (sas_device)
3577 		sas_device_put(sas_device);
3578 	if (pcie_device)
3579 		pcie_device_put(pcie_device);
3580 	return r;
3581 }
3582 
3583 
3584 /**
3585  * scsih_host_reset - eh threads main host reset routine
3586  * @scmd: pointer to scsi command object
3587  *
3588  * Return: SUCCESS if command aborted else FAILED
3589  */
3590 static int
scsih_host_reset(struct scsi_cmnd * scmd)3591 scsih_host_reset(struct scsi_cmnd *scmd)
3592 {
3593 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3594 	int r, retval;
3595 
3596 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3597 	scsi_print_command(scmd);
3598 
3599 	if (ioc->is_driver_loading || ioc->remove_host) {
3600 		ioc_info(ioc, "Blocking the host reset\n");
3601 		r = FAILED;
3602 		goto out;
3603 	}
3604 
3605 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3606 	r = (retval < 0) ? FAILED : SUCCESS;
3607 out:
3608 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3609 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3610 
3611 	return r;
3612 }
3613 
3614 /**
3615  * _scsih_fw_event_add - insert and queue up fw_event
3616  * @ioc: per adapter object
3617  * @fw_event: object describing the event
3618  * Context: This function will acquire ioc->fw_event_lock.
3619  *
3620  * This adds the firmware event object into link list, then queues it up to
3621  * be processed from user context.
3622  */
3623 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3624 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3625 {
3626 	unsigned long flags;
3627 
3628 	if (ioc->firmware_event_thread == NULL)
3629 		return;
3630 
3631 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3632 	fw_event_work_get(fw_event);
3633 	INIT_LIST_HEAD(&fw_event->list);
3634 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3635 	INIT_WORK(&fw_event->work, _firmware_event_work);
3636 	fw_event_work_get(fw_event);
3637 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3638 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3639 }
3640 
3641 /**
3642  * _scsih_fw_event_del_from_list - delete fw_event from the list
3643  * @ioc: per adapter object
3644  * @fw_event: object describing the event
3645  * Context: This function will acquire ioc->fw_event_lock.
3646  *
3647  * If the fw_event is on the fw_event_list, remove it and do a put.
3648  */
3649 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3650 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3651 	*fw_event)
3652 {
3653 	unsigned long flags;
3654 
3655 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3656 	if (!list_empty(&fw_event->list)) {
3657 		list_del_init(&fw_event->list);
3658 		fw_event_work_put(fw_event);
3659 	}
3660 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3661 }
3662 
3663 /**
3664  * _scsih_fw_event_requeue - requeue an event
3665  * @ioc: per adapter object
3666  * @fw_event: object describing the event
3667  * @delay: time in milliseconds to wait before retrying the event
3668  *
3669  * Context: This function will acquire ioc->fw_event_lock.
3670  *
3671  * Return nothing.
3672  */
3673 static void
_scsih_fw_event_requeue(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event,unsigned long delay)3674 _scsih_fw_event_requeue(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3675 	*fw_event, unsigned long delay)
3676 {
3677 	unsigned long flags;
3678 
3679 	if (ioc->firmware_event_thread == NULL)
3680 		return;
3681 
3682 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3683 	fw_event_work_get(fw_event);
3684 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3685 	if (!fw_event->delayed_work_active) {
3686 		fw_event->delayed_work_active = 1;
3687 		INIT_DELAYED_WORK(&fw_event->delayed_work,
3688 		    _firmware_event_work_delayed);
3689 	}
3690 	queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work,
3691 	    msecs_to_jiffies(delay));
3692 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3693 }
3694 
3695  /**
3696  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3697  * @ioc: per adapter object
3698  * @event_data: trigger event data
3699  */
3700 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3701 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3702 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3703 {
3704 	struct fw_event_work *fw_event;
3705 	u16 sz;
3706 
3707 	if (ioc->is_driver_loading)
3708 		return;
3709 	sz = sizeof(*event_data);
3710 	fw_event = alloc_fw_event_work(sz);
3711 	if (!fw_event)
3712 		return;
3713 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3714 	fw_event->ioc = ioc;
3715 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3716 	_scsih_fw_event_add(ioc, fw_event);
3717 	fw_event_work_put(fw_event);
3718 }
3719 
3720 /**
3721  * _scsih_error_recovery_delete_devices - remove devices not responding
3722  * @ioc: per adapter object
3723  */
3724 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3725 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3726 {
3727 	struct fw_event_work *fw_event;
3728 
3729 	fw_event = alloc_fw_event_work(0);
3730 	if (!fw_event)
3731 		return;
3732 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3733 	fw_event->ioc = ioc;
3734 	_scsih_fw_event_add(ioc, fw_event);
3735 	fw_event_work_put(fw_event);
3736 }
3737 
3738 /**
3739  * mpt3sas_port_enable_complete - port enable completed (fake event)
3740  * @ioc: per adapter object
3741  */
3742 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3743 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3744 {
3745 	struct fw_event_work *fw_event;
3746 
3747 	fw_event = alloc_fw_event_work(0);
3748 	if (!fw_event)
3749 		return;
3750 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3751 	fw_event->ioc = ioc;
3752 	_scsih_fw_event_add(ioc, fw_event);
3753 	fw_event_work_put(fw_event);
3754 }
3755 
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3756 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3757 {
3758 	unsigned long flags;
3759 	struct fw_event_work *fw_event = NULL;
3760 
3761 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3762 	if (!list_empty(&ioc->fw_event_list)) {
3763 		fw_event = list_first_entry(&ioc->fw_event_list,
3764 				struct fw_event_work, list);
3765 		list_del_init(&fw_event->list);
3766 		fw_event_work_put(fw_event);
3767 	}
3768 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3769 
3770 	return fw_event;
3771 }
3772 
3773 /**
3774  * _scsih_fw_event_cleanup_queue - cleanup event queue
3775  * @ioc: per adapter object
3776  *
3777  * Walk the firmware event queue, either killing timers, or waiting
3778  * for outstanding events to complete
3779  *
3780  * Context: task, can sleep
3781  */
3782 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3783 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3784 {
3785 	struct fw_event_work *fw_event;
3786 
3787 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3788 	    !ioc->firmware_event_thread)
3789 		return;
3790 	/*
3791 	 * Set current running event as ignore, so that
3792 	 * current running event will exit quickly.
3793 	 * As diag reset has occurred it is of no use
3794 	 * to process remaining stale event data entries.
3795 	 */
3796 	if (ioc->shost_recovery && ioc->current_event)
3797 		ioc->current_event->ignore = 1;
3798 
3799 	ioc->fw_events_cleanup = 1;
3800 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3801 	     (fw_event = ioc->current_event)) {
3802 
3803 		/*
3804 		 * Don't call cancel_work_sync() for current_event
3805 		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3806 		 * otherwise we may observe deadlock if current
3807 		 * hard reset issued as part of processing the current_event.
3808 		 *
3809 		 * Orginal logic of cleaning the current_event is added
3810 		 * for handling the back to back host reset issued by the user.
3811 		 * i.e. during back to back host reset, driver use to process
3812 		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3813 		 * event back to back and this made the drives to unregister
3814 		 * the devices from SML.
3815 		 */
3816 
3817 		if (fw_event == ioc->current_event &&
3818 		    ioc->current_event->event !=
3819 		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3820 			ioc->current_event = NULL;
3821 			continue;
3822 		}
3823 
3824 		/*
3825 		 * Driver has to clear ioc->start_scan flag when
3826 		 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3827 		 * otherwise scsi_scan_host() API waits for the
3828 		 * 5 minute timer to expire. If we exit from
3829 		 * scsi_scan_host() early then we can issue the
3830 		 * new port enable request as part of current diag reset.
3831 		 */
3832 		if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3833 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3834 			ioc->start_scan = 0;
3835 		}
3836 
3837 		/*
3838 		 * Wait on the fw_event to complete. If this returns 1, then
3839 		 * the event was never executed, and we need a put for the
3840 		 * reference the work had on the fw_event.
3841 		 *
3842 		 * If it did execute, we wait for it to finish, and the put will
3843 		 * happen from _firmware_event_work()
3844 		 */
3845 		if (cancel_work_sync(&fw_event->work))
3846 			fw_event_work_put(fw_event);
3847 
3848 	}
3849 	ioc->fw_events_cleanup = 0;
3850 }
3851 
3852 /**
3853  * _scsih_internal_device_block - block the sdev device
3854  * @sdev: per device object
3855  * @sas_device_priv_data : per device driver private data
3856  *
3857  * make sure device is blocked without error, if not
3858  * print an error
3859  */
3860 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3861 _scsih_internal_device_block(struct scsi_device *sdev,
3862 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3863 {
3864 	int r = 0;
3865 
3866 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3867 	    sas_device_priv_data->sas_target->handle);
3868 	sas_device_priv_data->block = 1;
3869 
3870 	r = scsi_internal_device_block_nowait(sdev);
3871 	if (r == -EINVAL)
3872 		sdev_printk(KERN_WARNING, sdev,
3873 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3874 		    r, sas_device_priv_data->sas_target->handle);
3875 }
3876 
3877 /**
3878  * _scsih_internal_device_unblock - unblock the sdev device
3879  * @sdev: per device object
3880  * @sas_device_priv_data : per device driver private data
3881  * make sure device is unblocked without error, if not retry
3882  * by blocking and then unblocking
3883  */
3884 
3885 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3886 _scsih_internal_device_unblock(struct scsi_device *sdev,
3887 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3888 {
3889 	int r = 0;
3890 
3891 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3892 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3893 	sas_device_priv_data->block = 0;
3894 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3895 	if (r == -EINVAL) {
3896 		/* The device has been set to SDEV_RUNNING by SD layer during
3897 		 * device addition but the request queue is still stopped by
3898 		 * our earlier block call. We need to perform a block again
3899 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3900 
3901 		sdev_printk(KERN_WARNING, sdev,
3902 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3903 		    "performing a block followed by an unblock\n",
3904 		    r, sas_device_priv_data->sas_target->handle);
3905 		sas_device_priv_data->block = 1;
3906 		r = scsi_internal_device_block_nowait(sdev);
3907 		if (r)
3908 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3909 			    "failed with return(%d) for handle(0x%04x)\n",
3910 			    r, sas_device_priv_data->sas_target->handle);
3911 
3912 		sas_device_priv_data->block = 0;
3913 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3914 		if (r)
3915 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3916 			    " failed with return(%d) for handle(0x%04x)\n",
3917 			    r, sas_device_priv_data->sas_target->handle);
3918 	}
3919 }
3920 
3921 /**
3922  * _scsih_ublock_io_all_device - unblock every device
3923  * @ioc: per adapter object
3924  * @no_turs: flag to disable TEST UNIT READY checks during device unblocking
3925  *
3926  * change the device state from block to running
3927  */
3928 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc,u8 no_turs)3929 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc, u8 no_turs)
3930 {
3931 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3932 	struct scsi_device *sdev;
3933 	struct MPT3SAS_TARGET *sas_target;
3934 	enum device_responsive_state rc;
3935 	struct _sas_device *sas_device = NULL;
3936 	struct _pcie_device *pcie_device = NULL;
3937 	int count = 0;
3938 	u8 tr_method = 0;
3939 	u8 tr_timeout = 30;
3940 
3941 
3942 	shost_for_each_device(sdev, ioc->shost) {
3943 		sas_device_priv_data = sdev->hostdata;
3944 		if (!sas_device_priv_data)
3945 			continue;
3946 
3947 		sas_target = sas_device_priv_data->sas_target;
3948 		if (!sas_target || sas_target->deleted)
3949 			continue;
3950 
3951 		if (!sas_device_priv_data->block)
3952 			continue;
3953 
3954 		if ((no_turs) || (!issue_scsi_cmd_to_bringup_drive)) {
3955 			sdev_printk(KERN_WARNING, sdev, "device_unblocked handle(0x%04x)\n",
3956 				sas_device_priv_data->sas_target->handle);
3957 			_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3958 			continue;
3959 		}
3960 
3961 		do {
3962 			pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle);
3963 			if (pcie_device && (!ioc->tm_custom_handling) &&
3964 				(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3965 				tr_timeout = pcie_device->reset_timeout;
3966 				tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3967 			}
3968 			rc = _scsih_wait_for_device_to_become_ready(ioc,
3969 			    sas_target->handle, 0, (sas_target->flags &
3970 			    MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method);
3971 			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
3972 			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
3973 				ssleep(1);
3974 			if (pcie_device)
3975 				pcie_device_put(pcie_device);
3976 		} while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
3977 		    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
3978 			&& count++ < command_retry_count);
3979 		sas_device_priv_data->block = 0;
3980 		if (rc != DEVICE_READY)
3981 			sas_device_priv_data->deleted = 1;
3982 
3983 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3984 
3985 		if (rc != DEVICE_READY) {
3986 			sdev_printk(KERN_WARNING, sdev, "%s: device_offlined,\n"
3987 			    "handle(0x%04x)\n",
3988 			    __func__, sas_device_priv_data->sas_target->handle);
3989 			scsi_device_set_state(sdev, SDEV_OFFLINE);
3990 			sas_device = mpt3sas_get_sdev_by_addr(ioc,
3991 					sas_device_priv_data->sas_target->sas_address,
3992 					sas_device_priv_data->sas_target->port);
3993 			if (sas_device) {
3994 				_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
3995 				sas_device_put(sas_device);
3996 			} else {
3997 				pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
3998 						    sas_device_priv_data->sas_target->sas_address);
3999 				if (pcie_device) {
4000 					if (pcie_device->enclosure_handle != 0)
4001 						sdev_printk(KERN_INFO, sdev, "enclosure logical id\n"
4002 						    "(0x%016llx), slot(%d)\n", (unsigned long long)
4003 							pcie_device->enclosure_logical_id,
4004 							pcie_device->slot);
4005 					if (pcie_device->connector_name[0] != '\0')
4006 						sdev_printk(KERN_INFO, sdev, "enclosure level(0x%04x),\n"
4007 							" connector name( %s)\n",
4008 							pcie_device->enclosure_level,
4009 							pcie_device->connector_name);
4010 					pcie_device_put(pcie_device);
4011 				}
4012 			}
4013 		} else
4014 			sdev_printk(KERN_WARNING, sdev, "device_unblocked,\n"
4015 			    "handle(0x%04x)\n",
4016 			    sas_device_priv_data->sas_target->handle);
4017 	}
4018 }
4019 
4020 /**
4021  * _scsih_ublock_io_device_wait - unblock IO for target
4022  * @ioc: per adapter object
4023  * @sas_address: sas address
4024  * @port: hba port entry
4025  *
4026  * make sure device is reponsponding before unblocking
4027  */
4028 static void
_scsih_ublock_io_device_wait(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)4029 _scsih_ublock_io_device_wait(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
4030 			     struct hba_port *port)
4031 {
4032 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4033 	struct MPT3SAS_TARGET *sas_target;
4034 	enum device_responsive_state rc;
4035 	struct scsi_device *sdev;
4036 	int host_reset_completion_count;
4037 	struct _sas_device *sas_device;
4038 	struct _pcie_device *pcie_device;
4039 	u8 tr_timeout = 30;
4040 	u8 tr_method = 0;
4041 	int count = 0;
4042 
4043 	/* moving devices from SDEV_OFFLINE to SDEV_BLOCK */
4044 	shost_for_each_device(sdev, ioc->shost) {
4045 		sas_device_priv_data = sdev->hostdata;
4046 		if (!sas_device_priv_data)
4047 			continue;
4048 		sas_target = sas_device_priv_data->sas_target;
4049 		if (!sas_target)
4050 			continue;
4051 		if (sas_target->sas_address != sas_address ||
4052 		    sas_target->port != port)
4053 			continue;
4054 		if (sdev->sdev_state == SDEV_OFFLINE) {
4055 			sas_device_priv_data->block = 1;
4056 			sas_device_priv_data->deleted = 0;
4057 			scsi_device_set_state(sdev, SDEV_RUNNING);
4058 			scsi_internal_device_block_nowait(sdev);
4059 		}
4060 	}
4061 
4062 	/* moving devices from SDEV_BLOCK to SDEV_RUNNING state */
4063 	shost_for_each_device(sdev, ioc->shost) {
4064 		sas_device_priv_data = sdev->hostdata;
4065 		if (!sas_device_priv_data)
4066 			continue;
4067 		sas_target = sas_device_priv_data->sas_target;
4068 		if (!sas_target)
4069 			continue;
4070 		if (sas_target->sas_address != sas_address ||
4071 		    sas_target->port != port)
4072 			continue;
4073 		if (!sas_device_priv_data->block)
4074 			continue;
4075 
4076 		do {
4077 			host_reset_completion_count = 0;
4078 			pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle);
4079 			if (pcie_device && (!ioc->tm_custom_handling) &&
4080 				(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
4081 				tr_timeout = pcie_device->reset_timeout;
4082 				tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4083 			}
4084 			rc = _scsih_wait_for_device_to_become_ready(ioc,
4085 			      sas_target->handle, 0, (sas_target->flags &
4086 			      MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method);
4087 			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
4088 			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) {
4089 				do {
4090 					msleep(500);
4091 					host_reset_completion_count++;
4092 				} while (rc == DEVICE_RETRY &&
4093 							ioc->shost_recovery);
4094 				if (host_reset_completion_count > 1) {
4095 					rc = _scsih_wait_for_device_to_become_ready(ioc,
4096 						sas_target->handle, 0, (sas_target->flags &
4097 						MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun,
4098 						tr_timeout, tr_method);
4099 					if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
4100 					    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
4101 						msleep(500);
4102 				}
4103 				continue;
4104 			}
4105 			if (pcie_device)
4106 				pcie_device_put(pcie_device);
4107 		} while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
4108 		    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
4109 			&& count++ <= command_retry_count);
4110 
4111 		sas_device_priv_data->block = 0;
4112 		if (rc != DEVICE_READY)
4113 			sas_device_priv_data->deleted = 1;
4114 		scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
4115 
4116 		if (rc != DEVICE_READY) {
4117 			sdev_printk(KERN_WARNING, sdev,
4118 			    "%s: device_offlined, handle(0x%04x)\n",
4119 			    __func__, sas_device_priv_data->sas_target->handle);
4120 
4121 			sas_device = mpt3sas_get_sdev_by_handle(ioc,
4122 				sas_device_priv_data->sas_target->handle);
4123 			if (sas_device) {
4124 				_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
4125 				sas_device_put(sas_device);
4126 			} else {
4127 				pcie_device = mpt3sas_get_pdev_by_handle(ioc,
4128 							sas_device_priv_data->sas_target->handle);
4129 				if (pcie_device) {
4130 					if (pcie_device->enclosure_handle != 0)
4131 						sdev_printk(KERN_INFO, sdev,
4132 							"device_offlined, enclosure logical id(0x%016llx),\n"
4133 							" slot(%d)\n", (unsigned long long)
4134 							pcie_device->enclosure_logical_id,
4135 							pcie_device->slot);
4136 					if (pcie_device->connector_name[0] != '\0')
4137 						sdev_printk(KERN_WARNING, sdev,
4138 							"device_offlined, enclosure level(0x%04x),\n"
4139 							"connector name( %s)\n",
4140 							pcie_device->enclosure_level,
4141 							pcie_device->connector_name);
4142 					pcie_device_put(pcie_device);
4143 				}
4144 			}
4145 			scsi_device_set_state(sdev, SDEV_OFFLINE);
4146 		} else {
4147 			sdev_printk(KERN_WARNING, sdev,
4148 				"device_unblocked, handle(0x%04x)\n",
4149 				sas_device_priv_data->sas_target->handle);
4150 		}
4151 	}
4152 }
4153 
4154 /**
4155  * _scsih_ublock_io_device - prepare device to be deleted
4156  * @ioc: per adapter object
4157  * @sas_address: sas address
4158  * @port: hba port entry
4159  *
4160  * unblock then put device in offline state
4161  */
4162 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)4163 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
4164 	u64 sas_address, struct hba_port *port)
4165 {
4166 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4167 	struct scsi_device *sdev;
4168 
4169 	shost_for_each_device(sdev, ioc->shost) {
4170 		sas_device_priv_data = sdev->hostdata;
4171 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
4172 			continue;
4173 		if (sas_device_priv_data->sas_target->sas_address
4174 		    != sas_address)
4175 			continue;
4176 		if (sas_device_priv_data->sas_target->port != port)
4177 			continue;
4178 		if (sas_device_priv_data->block)
4179 			_scsih_internal_device_unblock(sdev,
4180 				sas_device_priv_data);
4181 	}
4182 }
4183 
4184 /**
4185  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
4186  * @ioc: per adapter object
4187  *
4188  * During device pull we need to appropriately set the sdev state.
4189  */
4190 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)4191 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
4192 {
4193 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4194 	struct scsi_device *sdev;
4195 
4196 	shost_for_each_device(sdev, ioc->shost) {
4197 		sas_device_priv_data = sdev->hostdata;
4198 		if (!sas_device_priv_data)
4199 			continue;
4200 		if (sas_device_priv_data->block)
4201 			continue;
4202 		if (sas_device_priv_data->ignore_delay_remove) {
4203 			sdev_printk(KERN_INFO, sdev,
4204 			"%s skip device_block for SES handle(0x%04x)\n",
4205 			__func__, sas_device_priv_data->sas_target->handle);
4206 			continue;
4207 		}
4208 		_scsih_internal_device_block(sdev, sas_device_priv_data);
4209 	}
4210 }
4211 
4212 /**
4213  * _scsih_block_io_device - set the device state to SDEV_BLOCK
4214  * @ioc: per adapter object
4215  * @handle: device handle
4216  *
4217  * During device pull we need to appropriately set the sdev state.
4218  */
4219 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)4220 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4221 {
4222 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4223 	struct scsi_device *sdev;
4224 	struct _sas_device *sas_device;
4225 
4226 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
4227 
4228 	shost_for_each_device(sdev, ioc->shost) {
4229 		sas_device_priv_data = sdev->hostdata;
4230 		if (!sas_device_priv_data)
4231 			continue;
4232 		if (sas_device_priv_data->sas_target->handle != handle)
4233 			continue;
4234 		if (sas_device_priv_data->block)
4235 			continue;
4236 		if (sas_device && sas_device->pend_sas_rphy_add)
4237 			continue;
4238 		if (sas_device_priv_data->ignore_delay_remove) {
4239 			sdev_printk(KERN_INFO, sdev,
4240 			"%s skip device_block for SES handle(0x%04x)\n",
4241 			__func__, sas_device_priv_data->sas_target->handle);
4242 			continue;
4243 		}
4244 		_scsih_internal_device_block(sdev, sas_device_priv_data);
4245 	}
4246 
4247 	if (sas_device)
4248 		sas_device_put(sas_device);
4249 }
4250 
4251 /**
4252  * _scsih_block_io_to_children_attached_to_ex
4253  * @ioc: per adapter object
4254  * @sas_expander: the sas_device object
4255  *
4256  * This routine set sdev state to SDEV_BLOCK for all devices
4257  * attached to this expander. This function called when expander is
4258  * pulled.
4259  */
4260 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)4261 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
4262 	struct _sas_node *sas_expander)
4263 {
4264 	struct _sas_port *mpt3sas_port;
4265 	struct _sas_device *sas_device;
4266 	struct _sas_node *expander_sibling;
4267 	unsigned long flags;
4268 
4269 	if (!sas_expander)
4270 		return;
4271 
4272 	list_for_each_entry(mpt3sas_port,
4273 	   &sas_expander->sas_port_list, port_list) {
4274 		if (mpt3sas_port->remote_identify.device_type ==
4275 		    SAS_END_DEVICE) {
4276 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
4277 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
4278 			    mpt3sas_port->remote_identify.sas_address,
4279 			    mpt3sas_port->hba_port);
4280 			if (sas_device) {
4281 				set_bit(sas_device->handle,
4282 						ioc->blocking_handles);
4283 				sas_device_put(sas_device);
4284 			}
4285 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4286 		}
4287 	}
4288 
4289 	list_for_each_entry(mpt3sas_port,
4290 	   &sas_expander->sas_port_list, port_list) {
4291 
4292 		if (mpt3sas_port->remote_identify.device_type ==
4293 		    SAS_EDGE_EXPANDER_DEVICE ||
4294 		    mpt3sas_port->remote_identify.device_type ==
4295 		    SAS_FANOUT_EXPANDER_DEVICE) {
4296 			expander_sibling =
4297 			    mpt3sas_scsih_expander_find_by_sas_address(
4298 			    ioc, mpt3sas_port->remote_identify.sas_address,
4299 			    mpt3sas_port->hba_port);
4300 			_scsih_block_io_to_children_attached_to_ex(ioc,
4301 			    expander_sibling);
4302 		}
4303 	}
4304 }
4305 
4306 /**
4307  * _scsih_block_io_to_children_attached_directly
4308  * @ioc: per adapter object
4309  * @event_data: topology change event data
4310  *
4311  * This routine set sdev state to SDEV_BLOCK for all devices
4312  * direct attached during device pull.
4313  */
4314 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4315 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4316 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4317 {
4318 	int i;
4319 	u16 handle;
4320 	u16 reason_code;
4321 
4322 	for (i = 0; i < event_data->NumEntries; i++) {
4323 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4324 		if (!handle)
4325 			continue;
4326 		reason_code = event_data->PHY[i].PhyStatus &
4327 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4328 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4329 			_scsih_block_io_device(ioc, handle);
4330 	}
4331 }
4332 
4333 /**
4334  * _scsih_block_io_to_pcie_children_attached_directly
4335  * @ioc: per adapter object
4336  * @event_data: topology change event data
4337  *
4338  * This routine set sdev state to SDEV_BLOCK for all devices
4339  * direct attached during device pull/reconnect.
4340  */
4341 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4342 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4343 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4344 {
4345 	int i;
4346 	u16 handle;
4347 	u16 reason_code;
4348 
4349 	for (i = 0; i < event_data->NumEntries; i++) {
4350 		handle =
4351 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4352 		if (!handle)
4353 			continue;
4354 		reason_code = event_data->PortEntry[i].PortStatus;
4355 		if (reason_code ==
4356 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4357 			_scsih_block_io_device(ioc, handle);
4358 	}
4359 }
4360 /**
4361  * _scsih_tm_tr_send - send task management request
4362  * @ioc: per adapter object
4363  * @handle: device handle
4364  * Context: interrupt time.
4365  *
4366  * This code is to initiate the device removal handshake protocol
4367  * with controller firmware.  This function will issue target reset
4368  * using high priority request queue.  It will send a sas iounit
4369  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4370  *
4371  * This is designed to send muliple task management request at the same
4372  * time to the fifo. If the fifo is full, we will append the request,
4373  * and process it in a future completion.
4374  */
4375 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4376 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4377 {
4378 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4379 	u16 smid;
4380 	struct _sas_device *sas_device = NULL;
4381 	struct _pcie_device *pcie_device = NULL;
4382 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4383 	u64 sas_address = 0;
4384 	unsigned long flags;
4385 	struct _tr_list *delayed_tr;
4386 	u32 ioc_state;
4387 	u8 tr_method = 0;
4388 	struct hba_port *port = NULL;
4389 
4390 	if (ioc->pci_error_recovery) {
4391 		dewtprintk(ioc,
4392 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4393 				    __func__, handle));
4394 		return;
4395 	}
4396 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4397 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4398 		dewtprintk(ioc,
4399 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4400 				    __func__, handle));
4401 		return;
4402 	}
4403 
4404 	/* if PD, then return */
4405 	if (test_bit(handle, ioc->pd_handles))
4406 		return;
4407 
4408 	clear_bit(handle, ioc->pend_os_device_add);
4409 
4410 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
4411 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4412 	if (sas_device && sas_device->starget &&
4413 	    sas_device->starget->hostdata) {
4414 		sas_target_priv_data = sas_device->starget->hostdata;
4415 		sas_target_priv_data->deleted = 1;
4416 		sas_address = sas_device->sas_address;
4417 		port = sas_device->port;
4418 	}
4419 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4420 	if (!sas_device) {
4421 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4422 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4423 		if (pcie_device && pcie_device->starget &&
4424 			pcie_device->starget->hostdata) {
4425 			sas_target_priv_data = pcie_device->starget->hostdata;
4426 			sas_target_priv_data->deleted = 1;
4427 			sas_address = pcie_device->wwid;
4428 		}
4429 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4430 		if (pcie_device && (!ioc->tm_custom_handling) &&
4431 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
4432 		    pcie_device->device_info))))
4433 			tr_method =
4434 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4435 		else
4436 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4437 	}
4438 	if (sas_target_priv_data) {
4439 		dewtprintk(ioc,
4440 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4441 				    handle, (u64)sas_address));
4442 		if (sas_device) {
4443 			if (sas_device->enclosure_handle != 0)
4444 				dewtprintk(ioc,
4445 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4446 						    (u64)sas_device->enclosure_logical_id,
4447 						    sas_device->slot));
4448 			if (sas_device->connector_name[0] != '\0')
4449 				dewtprintk(ioc,
4450 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4451 						    sas_device->enclosure_level,
4452 						    sas_device->connector_name));
4453 		} else if (pcie_device) {
4454 			if (pcie_device->enclosure_handle != 0)
4455 				dewtprintk(ioc,
4456 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4457 						    (u64)pcie_device->enclosure_logical_id,
4458 						    pcie_device->slot));
4459 			if (pcie_device->connector_name[0] != '\0')
4460 				dewtprintk(ioc,
4461 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4462 						    pcie_device->enclosure_level,
4463 						    pcie_device->connector_name));
4464 		}
4465 		_scsih_ublock_io_device(ioc, sas_address, port);
4466 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4467 	}
4468 
4469 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4470 	if (!smid) {
4471 		delayed_tr = kzalloc_obj(*delayed_tr, GFP_ATOMIC);
4472 		if (!delayed_tr)
4473 			goto out;
4474 		INIT_LIST_HEAD(&delayed_tr->list);
4475 		delayed_tr->handle = handle;
4476 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4477 		dewtprintk(ioc,
4478 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4479 				    handle));
4480 		goto out;
4481 	}
4482 
4483 	dewtprintk(ioc,
4484 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4485 			    handle, smid, ioc->tm_tr_cb_idx));
4486 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4487 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4488 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4489 	mpi_request->DevHandle = cpu_to_le16(handle);
4490 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4491 	mpi_request->MsgFlags = tr_method;
4492 	set_bit(handle, ioc->device_remove_in_progress);
4493 	ioc->put_smid_hi_priority(ioc, smid, 0);
4494 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4495 
4496 out:
4497 	if (sas_device)
4498 		sas_device_put(sas_device);
4499 	if (pcie_device)
4500 		pcie_device_put(pcie_device);
4501 }
4502 
4503 /**
4504  * _scsih_tm_tr_complete -
4505  * @ioc: per adapter object
4506  * @smid: system request message index
4507  * @msix_index: MSIX table index supplied by the OS
4508  * @reply: reply message frame(lower 32bit addr)
4509  * Context: interrupt time.
4510  *
4511  * This is the target reset completion routine.
4512  * This code is part of the code to initiate the device removal
4513  * handshake protocol with controller firmware.
4514  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4515  *
4516  * Return: 1 meaning mf should be freed from _base_interrupt
4517  *         0 means the mf is freed from this function.
4518  */
4519 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4520 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4521 	u32 reply)
4522 {
4523 	u16 handle;
4524 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4525 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4526 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4527 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4528 	u16 smid_sas_ctrl;
4529 	u32 ioc_state;
4530 	struct _sc_list *delayed_sc;
4531 
4532 	if (ioc->pci_error_recovery) {
4533 		dewtprintk(ioc,
4534 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4535 				    __func__));
4536 		return 1;
4537 	}
4538 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4539 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4540 		dewtprintk(ioc,
4541 			   ioc_info(ioc, "%s: host is not operational\n",
4542 				    __func__));
4543 		return 1;
4544 	}
4545 	if (unlikely(!mpi_reply)) {
4546 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4547 			__FILE__, __LINE__, __func__);
4548 		return 1;
4549 	}
4550 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4551 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4552 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4553 		dewtprintk(ioc,
4554 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4555 				   handle,
4556 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4557 		return 0;
4558 	}
4559 
4560 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4561 	dewtprintk(ioc,
4562 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4563 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4564 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4565 			    le32_to_cpu(mpi_reply->TerminationCount)));
4566 
4567 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4568 	if (!smid_sas_ctrl) {
4569 		delayed_sc = kzalloc_obj(*delayed_sc, GFP_ATOMIC);
4570 		if (!delayed_sc)
4571 			return _scsih_check_for_pending_tm(ioc, smid);
4572 		INIT_LIST_HEAD(&delayed_sc->list);
4573 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4574 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4575 		dewtprintk(ioc,
4576 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4577 				    handle));
4578 		return _scsih_check_for_pending_tm(ioc, smid);
4579 	}
4580 
4581 	dewtprintk(ioc,
4582 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4583 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4584 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4585 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4586 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4587 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4588 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4589 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4590 
4591 	return _scsih_check_for_pending_tm(ioc, smid);
4592 }
4593 
4594 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4595  *				 issue to IOC or not.
4596  * @ioc: per adapter object
4597  * @scmd: pointer to scsi command object
4598  *
4599  * Returns true if scmd can be issued to IOC otherwise returns false.
4600  */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4601 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4602 	struct scsi_cmnd *scmd)
4603 {
4604 
4605 	if (ioc->pci_error_recovery)
4606 		return false;
4607 
4608 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4609 		if (ioc->remove_host)
4610 			return false;
4611 
4612 		return true;
4613 	}
4614 
4615 	if (ioc->remove_host) {
4616 
4617 		switch (scmd->cmnd[0]) {
4618 		case SYNCHRONIZE_CACHE:
4619 		case START_STOP:
4620 			return true;
4621 		default:
4622 			return false;
4623 		}
4624 	}
4625 
4626 	return true;
4627 }
4628 
4629 /**
4630  * _scsih_sas_control_complete - completion routine
4631  * @ioc: per adapter object
4632  * @smid: system request message index
4633  * @msix_index: MSIX table index supplied by the OS
4634  * @reply: reply message frame(lower 32bit addr)
4635  * Context: interrupt time.
4636  *
4637  * This is the sas iounit control completion routine.
4638  * This code is part of the code to initiate the device removal
4639  * handshake protocol with controller firmware.
4640  *
4641  * Return: 1 meaning mf should be freed from _base_interrupt
4642  *         0 means the mf is freed from this function.
4643  */
4644 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4645 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4646 	u8 msix_index, u32 reply)
4647 {
4648 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4649 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4650 
4651 	if (likely(mpi_reply)) {
4652 		dewtprintk(ioc,
4653 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4654 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4655 				    le16_to_cpu(mpi_reply->IOCStatus),
4656 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4657 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4658 		     MPI2_IOCSTATUS_SUCCESS) {
4659 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4660 			    ioc->device_remove_in_progress);
4661 		}
4662 	} else {
4663 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4664 			__FILE__, __LINE__, __func__);
4665 	}
4666 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4667 }
4668 
4669 /**
4670  * _scsih_tm_tr_volume_send - send target reset request for volumes
4671  * @ioc: per adapter object
4672  * @handle: device handle
4673  * Context: interrupt time.
4674  *
4675  * This is designed to send muliple task management request at the same
4676  * time to the fifo. If the fifo is full, we will append the request,
4677  * and process it in a future completion.
4678  */
4679 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4680 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4681 {
4682 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4683 	u16 smid;
4684 	struct _tr_list *delayed_tr;
4685 
4686 	if (ioc->pci_error_recovery) {
4687 		dewtprintk(ioc,
4688 			   ioc_info(ioc, "%s: host reset in progress!\n",
4689 				    __func__));
4690 		return;
4691 	}
4692 
4693 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4694 	if (!smid) {
4695 		delayed_tr = kzalloc_obj(*delayed_tr, GFP_ATOMIC);
4696 		if (!delayed_tr)
4697 			return;
4698 		INIT_LIST_HEAD(&delayed_tr->list);
4699 		delayed_tr->handle = handle;
4700 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4701 		dewtprintk(ioc,
4702 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4703 				    handle));
4704 		return;
4705 	}
4706 
4707 	dewtprintk(ioc,
4708 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4709 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4710 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4711 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4712 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4713 	mpi_request->DevHandle = cpu_to_le16(handle);
4714 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4715 	ioc->put_smid_hi_priority(ioc, smid, 0);
4716 }
4717 
4718 /**
4719  * _scsih_tm_volume_tr_complete - target reset completion
4720  * @ioc: per adapter object
4721  * @smid: system request message index
4722  * @msix_index: MSIX table index supplied by the OS
4723  * @reply: reply message frame(lower 32bit addr)
4724  * Context: interrupt time.
4725  *
4726  * Return: 1 meaning mf should be freed from _base_interrupt
4727  *         0 means the mf is freed from this function.
4728  */
4729 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4730 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4731 	u8 msix_index, u32 reply)
4732 {
4733 	u16 handle;
4734 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4735 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4736 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4737 
4738 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4739 		dewtprintk(ioc,
4740 			   ioc_info(ioc, "%s: host reset in progress!\n",
4741 				    __func__));
4742 		return 1;
4743 	}
4744 	if (unlikely(!mpi_reply)) {
4745 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4746 			__FILE__, __LINE__, __func__);
4747 		return 1;
4748 	}
4749 
4750 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4751 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4752 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4753 		dewtprintk(ioc,
4754 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4755 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4756 				   smid));
4757 		return 0;
4758 	}
4759 
4760 	dewtprintk(ioc,
4761 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4762 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4763 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4764 			    le32_to_cpu(mpi_reply->TerminationCount)));
4765 
4766 	return _scsih_check_for_pending_tm(ioc, smid);
4767 }
4768 
4769 /**
4770  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4771  * @ioc: per adapter object
4772  * @smid: system request message index
4773  * @event: Event ID
4774  * @event_context: used to track events uniquely
4775  *
4776  * Context - processed in interrupt context.
4777  */
4778 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4779 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4780 				U32 event_context)
4781 {
4782 	Mpi2EventAckRequest_t *ack_request;
4783 	int i = smid - ioc->internal_smid;
4784 	unsigned long flags;
4785 
4786 	/* Without releasing the smid just update the
4787 	 * call back index and reuse the same smid for
4788 	 * processing this delayed request
4789 	 */
4790 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4791 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4792 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4793 
4794 	dewtprintk(ioc,
4795 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4796 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4797 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4798 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4799 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4800 	ack_request->Event = event;
4801 	ack_request->EventContext = event_context;
4802 	ack_request->VF_ID = 0;  /* TODO */
4803 	ack_request->VP_ID = 0;
4804 	ioc->put_smid_default(ioc, smid);
4805 }
4806 
4807 /**
4808  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4809  *				sas_io_unit_ctrl messages
4810  * @ioc: per adapter object
4811  * @smid: system request message index
4812  * @handle: device handle
4813  *
4814  * Context - processed in interrupt context.
4815  */
4816 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4817 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4818 					u16 smid, u16 handle)
4819 {
4820 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4821 	u32 ioc_state;
4822 	int i = smid - ioc->internal_smid;
4823 	unsigned long flags;
4824 
4825 	if (ioc->remove_host) {
4826 		dewtprintk(ioc,
4827 			   ioc_info(ioc, "%s: host has been removed\n",
4828 				    __func__));
4829 		return;
4830 	} else if (ioc->pci_error_recovery) {
4831 		dewtprintk(ioc,
4832 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4833 				    __func__));
4834 		return;
4835 	}
4836 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4837 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4838 		dewtprintk(ioc,
4839 			   ioc_info(ioc, "%s: host is not operational\n",
4840 				    __func__));
4841 		return;
4842 	}
4843 
4844 	/* Without releasing the smid just update the
4845 	 * call back index and reuse the same smid for
4846 	 * processing this delayed request
4847 	 */
4848 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4849 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4850 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4851 
4852 	dewtprintk(ioc,
4853 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4854 			    handle, smid, ioc->tm_sas_control_cb_idx));
4855 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4856 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4857 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4858 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4859 	mpi_request->DevHandle = cpu_to_le16(handle);
4860 	ioc->put_smid_default(ioc, smid);
4861 }
4862 
4863 /**
4864  * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4865  * @ioc: per adapter object
4866  * @smid: system request message index
4867  *
4868  * Context: Executed in interrupt context
4869  *
4870  * This will check delayed internal messages list, and process the
4871  * next request.
4872  *
4873  * Return: 1 meaning mf should be freed from _base_interrupt
4874  *         0 means the mf is freed from this function.
4875  */
4876 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4877 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4878 {
4879 	struct _sc_list *delayed_sc;
4880 	struct _event_ack_list *delayed_event_ack;
4881 
4882 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4883 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4884 						struct _event_ack_list, list);
4885 		_scsih_issue_delayed_event_ack(ioc, smid,
4886 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4887 		list_del(&delayed_event_ack->list);
4888 		kfree(delayed_event_ack);
4889 		return 0;
4890 	}
4891 
4892 	if (!list_empty(&ioc->delayed_sc_list)) {
4893 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4894 						struct _sc_list, list);
4895 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4896 						 delayed_sc->handle);
4897 		list_del(&delayed_sc->list);
4898 		kfree(delayed_sc);
4899 		return 0;
4900 	}
4901 	return 1;
4902 }
4903 
4904 /**
4905  * _scsih_check_for_pending_tm - check for pending task management
4906  * @ioc: per adapter object
4907  * @smid: system request message index
4908  *
4909  * This will check delayed target reset list, and feed the
4910  * next reqeust.
4911  *
4912  * Return: 1 meaning mf should be freed from _base_interrupt
4913  *         0 means the mf is freed from this function.
4914  */
4915 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4916 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4917 {
4918 	struct _tr_list *delayed_tr;
4919 
4920 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4921 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4922 		    struct _tr_list, list);
4923 		mpt3sas_base_free_smid(ioc, smid);
4924 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4925 		list_del(&delayed_tr->list);
4926 		kfree(delayed_tr);
4927 		return 0;
4928 	}
4929 
4930 	if (!list_empty(&ioc->delayed_tr_list)) {
4931 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4932 		    struct _tr_list, list);
4933 		mpt3sas_base_free_smid(ioc, smid);
4934 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4935 		list_del(&delayed_tr->list);
4936 		kfree(delayed_tr);
4937 		return 0;
4938 	}
4939 
4940 	return 1;
4941 }
4942 
4943 /**
4944  * _scsih_check_topo_delete_events - sanity check on topo events
4945  * @ioc: per adapter object
4946  * @event_data: the event data payload
4947  *
4948  * This routine added to better handle cable breaker.
4949  *
4950  * This handles the case where driver receives multiple expander
4951  * add and delete events in a single shot.  When there is a delete event
4952  * the routine will void any pending add events waiting in the event queue.
4953  */
4954 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4955 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4956 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4957 {
4958 	struct fw_event_work *fw_event;
4959 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4960 	u16 expander_handle;
4961 	struct _sas_node *sas_expander;
4962 	unsigned long flags;
4963 	int i, reason_code;
4964 	u16 handle;
4965 
4966 	for (i = 0 ; i < event_data->NumEntries; i++) {
4967 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4968 		if (!handle)
4969 			continue;
4970 		reason_code = event_data->PHY[i].PhyStatus &
4971 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4972 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4973 			_scsih_tm_tr_send(ioc, handle);
4974 	}
4975 
4976 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4977 	if (expander_handle < ioc->sas_hba.num_phys) {
4978 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4979 		return;
4980 	}
4981 	if (event_data->ExpStatus ==
4982 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4983 		/* put expander attached devices into blocking state */
4984 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4985 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4986 		    expander_handle);
4987 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4988 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4989 		do {
4990 			handle = find_first_bit(ioc->blocking_handles,
4991 			    ioc->facts.MaxDevHandle);
4992 			if (handle < ioc->facts.MaxDevHandle)
4993 				_scsih_block_io_device(ioc, handle);
4994 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4995 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4996 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4997 
4998 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4999 		return;
5000 
5001 	/* mark ignore flag for pending events */
5002 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
5003 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
5004 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
5005 		    fw_event->ignore)
5006 			continue;
5007 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
5008 				   fw_event->event_data;
5009 		if (local_event_data->ExpStatus ==
5010 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
5011 		    local_event_data->ExpStatus ==
5012 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
5013 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
5014 			    expander_handle) {
5015 				dewtprintk(ioc,
5016 					   ioc_info(ioc, "setting ignoring flag\n"));
5017 				fw_event->ignore = 1;
5018 			}
5019 		}
5020 	}
5021 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5022 }
5023 
5024 /**
5025  * _scsih_check_pcie_topo_remove_events - sanity check on topo
5026  * events
5027  * @ioc: per adapter object
5028  * @event_data: the event data payload
5029  *
5030  * This handles the case where driver receives multiple switch
5031  * or device add and delete events in a single shot.  When there
5032  * is a delete event the routine will void any pending add
5033  * events waiting in the event queue.
5034  */
5035 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)5036 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
5037 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
5038 {
5039 	struct fw_event_work *fw_event;
5040 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
5041 	unsigned long flags;
5042 	int i, reason_code;
5043 	u16 handle, switch_handle;
5044 
5045 	for (i = 0; i < event_data->NumEntries; i++) {
5046 		handle =
5047 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
5048 		if (!handle)
5049 			continue;
5050 		reason_code = event_data->PortEntry[i].PortStatus;
5051 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
5052 			_scsih_tm_tr_send(ioc, handle);
5053 	}
5054 
5055 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
5056 	if (!switch_handle) {
5057 		_scsih_block_io_to_pcie_children_attached_directly(
5058 							ioc, event_data);
5059 		return;
5060 	}
5061     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
5062 	if ((event_data->SwitchStatus
5063 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
5064 		(event_data->SwitchStatus ==
5065 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
5066 		_scsih_block_io_to_pcie_children_attached_directly(
5067 							ioc, event_data);
5068 
5069 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
5070 		return;
5071 
5072 	/* mark ignore flag for pending events */
5073 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
5074 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
5075 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
5076 			fw_event->ignore)
5077 			continue;
5078 		local_event_data =
5079 			(Mpi26EventDataPCIeTopologyChangeList_t *)
5080 			fw_event->event_data;
5081 		if (local_event_data->SwitchStatus ==
5082 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
5083 		    local_event_data->SwitchStatus ==
5084 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
5085 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
5086 				switch_handle) {
5087 				dewtprintk(ioc,
5088 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
5089 				fw_event->ignore = 1;
5090 			}
5091 		}
5092 	}
5093 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5094 }
5095 
5096 /**
5097  * _scsih_set_volume_delete_flag - setting volume delete flag
5098  * @ioc: per adapter object
5099  * @handle: device handle
5100  *
5101  * This returns nothing.
5102  */
5103 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)5104 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5105 {
5106 	struct _raid_device *raid_device;
5107 	struct MPT3SAS_TARGET *sas_target_priv_data;
5108 	unsigned long flags;
5109 
5110 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
5111 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
5112 	if (raid_device && raid_device->starget &&
5113 	    raid_device->starget->hostdata) {
5114 		sas_target_priv_data =
5115 		    raid_device->starget->hostdata;
5116 		sas_target_priv_data->deleted = 1;
5117 		dewtprintk(ioc,
5118 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
5119 				    handle, (u64)raid_device->wwid));
5120 	}
5121 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
5122 }
5123 
5124 /**
5125  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
5126  * @handle: input handle
5127  * @a: handle for volume a
5128  * @b: handle for volume b
5129  *
5130  * IR firmware only supports two raid volumes.  The purpose of this
5131  * routine is to set the volume handle in either a or b. When the given
5132  * input handle is non-zero, or when a and b have not been set before.
5133  */
5134 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)5135 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
5136 {
5137 	if (!handle || handle == *a || handle == *b)
5138 		return;
5139 	if (!*a)
5140 		*a = handle;
5141 	else if (!*b)
5142 		*b = handle;
5143 }
5144 
5145 /**
5146  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
5147  * @ioc: per adapter object
5148  * @event_data: the event data payload
5149  * Context: interrupt time.
5150  *
5151  * This routine will send target reset to volume, followed by target
5152  * resets to the PDs. This is called when a PD has been removed, or
5153  * volume has been deleted or removed. When the target reset is sent
5154  * to volume, the PD target resets need to be queued to start upon
5155  * completion of the volume target reset.
5156  */
5157 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)5158 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
5159 	Mpi2EventDataIrConfigChangeList_t *event_data)
5160 {
5161 	Mpi2EventIrConfigElement_t *element;
5162 	int i;
5163 	u16 handle, volume_handle, a, b;
5164 	struct _tr_list *delayed_tr;
5165 
5166 	a = 0;
5167 	b = 0;
5168 
5169 	if (ioc->is_warpdrive)
5170 		return;
5171 
5172 	/* Volume Resets for Deleted or Removed */
5173 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
5174 	for (i = 0; i < event_data->NumElements; i++, element++) {
5175 		if (le32_to_cpu(event_data->Flags) &
5176 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
5177 			continue;
5178 		if (element->ReasonCode ==
5179 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
5180 		    element->ReasonCode ==
5181 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
5182 			volume_handle = le16_to_cpu(element->VolDevHandle);
5183 			_scsih_set_volume_delete_flag(ioc, volume_handle);
5184 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
5185 		}
5186 	}
5187 
5188 	/* Volume Resets for UNHIDE events */
5189 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
5190 	for (i = 0; i < event_data->NumElements; i++, element++) {
5191 		if (le32_to_cpu(event_data->Flags) &
5192 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
5193 			continue;
5194 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
5195 			volume_handle = le16_to_cpu(element->VolDevHandle);
5196 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
5197 		}
5198 	}
5199 
5200 	if (a)
5201 		_scsih_tm_tr_volume_send(ioc, a);
5202 	if (b)
5203 		_scsih_tm_tr_volume_send(ioc, b);
5204 
5205 	/* PD target resets */
5206 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
5207 	for (i = 0; i < event_data->NumElements; i++, element++) {
5208 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
5209 			continue;
5210 		handle = le16_to_cpu(element->PhysDiskDevHandle);
5211 		volume_handle = le16_to_cpu(element->VolDevHandle);
5212 		clear_bit(handle, ioc->pd_handles);
5213 		if (!volume_handle)
5214 			_scsih_tm_tr_send(ioc, handle);
5215 		else if (volume_handle == a || volume_handle == b) {
5216 			delayed_tr = kzalloc_obj(*delayed_tr, GFP_ATOMIC);
5217 			BUG_ON(!delayed_tr);
5218 			INIT_LIST_HEAD(&delayed_tr->list);
5219 			delayed_tr->handle = handle;
5220 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
5221 			dewtprintk(ioc,
5222 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
5223 					    handle));
5224 		} else
5225 			_scsih_tm_tr_send(ioc, handle);
5226 	}
5227 }
5228 
5229 
5230 /**
5231  * _scsih_check_volume_delete_events - set delete flag for volumes
5232  * @ioc: per adapter object
5233  * @event_data: the event data payload
5234  * Context: interrupt time.
5235  *
5236  * This will handle the case when the cable connected to entire volume is
5237  * pulled. We will take care of setting the deleted flag so normal IO will
5238  * not be sent.
5239  */
5240 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)5241 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
5242 	Mpi2EventDataIrVolume_t *event_data)
5243 {
5244 	u32 state;
5245 
5246 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
5247 		return;
5248 	state = le32_to_cpu(event_data->NewValue);
5249 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
5250 	    MPI2_RAID_VOL_STATE_FAILED)
5251 		_scsih_set_volume_delete_flag(ioc,
5252 		    le16_to_cpu(event_data->VolDevHandle));
5253 }
5254 
5255 /**
5256  * _scsih_temp_threshold_events - display temperature threshold exceeded events
5257  * @ioc: per adapter object
5258  * @event_data: the temp threshold event data
5259  * Context: interrupt time.
5260  */
5261 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)5262 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
5263 	Mpi2EventDataTemperature_t *event_data)
5264 {
5265 	u32 doorbell;
5266 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
5267 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
5268 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
5269 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
5270 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
5271 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
5272 			event_data->SensorNum);
5273 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
5274 			event_data->CurrentTemperature);
5275 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5276 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
5277 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
5278 			    MPI2_IOC_STATE_FAULT) {
5279 				mpt3sas_print_fault_code(ioc,
5280 				    doorbell & MPI2_DOORBELL_DATA_MASK);
5281 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
5282 			    MPI2_IOC_STATE_COREDUMP) {
5283 				mpt3sas_print_coredump_info(ioc,
5284 				    doorbell & MPI2_DOORBELL_DATA_MASK);
5285 			}
5286 		}
5287 	}
5288 }
5289 
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)5290 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
5291 {
5292 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
5293 
5294 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
5295 		return 0;
5296 
5297 	if (pending)
5298 		return test_and_set_bit(0, &priv->ata_command_pending);
5299 
5300 	clear_bit(0, &priv->ata_command_pending);
5301 	return 0;
5302 }
5303 
5304 /**
5305  * _scsih_flush_running_cmds - completing outstanding commands.
5306  * @ioc: per adapter object
5307  *
5308  * The flushing out of all pending scmd commands following host reset,
5309  * where all IO is dropped to the floor.
5310  */
5311 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)5312 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5313 {
5314 	struct scsi_cmnd *scmd;
5315 	struct scsiio_tracker *st;
5316 	u16 smid;
5317 	int count = 0;
5318 
5319 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5320 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5321 		if (!scmd)
5322 			continue;
5323 		count++;
5324 		_scsih_set_satl_pending(scmd, false);
5325 		st = scsi_cmd_priv(scmd);
5326 		mpt3sas_base_clear_st(ioc, st);
5327 		scsi_dma_unmap(scmd);
5328 		if (ioc->pci_error_recovery || ioc->remove_host)
5329 			scmd->result = DID_NO_CONNECT << 16;
5330 		else
5331 			scmd->result = DID_RESET << 16;
5332 		scsi_done(scmd);
5333 	}
5334 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5335 }
5336 
5337 /**
5338  * _scsih_setup_eedp - setup MPI request for EEDP transfer
5339  * @ioc: per adapter object
5340  * @scmd: pointer to scsi command object
5341  * @mpi_request: pointer to the SCSI_IO request message frame
5342  *
5343  * Supporting protection 1 and 3.
5344  */
5345 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5346 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5347 	Mpi25SCSIIORequest_t *mpi_request)
5348 {
5349 	u16 eedp_flags;
5350 	Mpi25SCSIIORequest_t *mpi_request_3v =
5351 	   (Mpi25SCSIIORequest_t *)mpi_request;
5352 
5353 	switch (scsi_get_prot_op(scmd)) {
5354 	case SCSI_PROT_READ_STRIP:
5355 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5356 		break;
5357 	case SCSI_PROT_WRITE_INSERT:
5358 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5359 		break;
5360 	default:
5361 		return;
5362 	}
5363 
5364 	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5365 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5366 
5367 	if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5368 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5369 
5370 	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5371 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5372 
5373 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5374 			cpu_to_be32(scsi_prot_ref_tag(scmd));
5375 	}
5376 
5377 	mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5378 
5379 	if (ioc->is_gen35_ioc)
5380 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5381 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5382 }
5383 
5384 /**
5385  * _scsih_eedp_error_handling - return sense code for EEDP errors
5386  * @scmd: pointer to scsi command object
5387  * @ioc_status: ioc status
5388  */
5389 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5390 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5391 {
5392 	u8 ascq;
5393 
5394 	switch (ioc_status) {
5395 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5396 		ascq = 0x01;
5397 		break;
5398 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5399 		ascq = 0x02;
5400 		break;
5401 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5402 		ascq = 0x03;
5403 		break;
5404 	default:
5405 		ascq = 0x00;
5406 		break;
5407 	}
5408 	scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5409 	set_host_byte(scmd, DID_ABORT);
5410 }
5411 
5412 /**
5413  * scsih_qcmd - main scsi request entry point
5414  * @shost: SCSI host pointer
5415  * @scmd: pointer to scsi command object
5416  *
5417  * The callback index is set inside `ioc->scsi_io_cb_idx`.
5418  *
5419  * Return: 0 on success.  If there's a failure, return either:
5420  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5421  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5422  */
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5423 static enum scsi_qc_status scsih_qcmd(struct Scsi_Host *shost,
5424 				      struct scsi_cmnd *scmd)
5425 {
5426 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5427 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5428 	struct MPT3SAS_TARGET *sas_target_priv_data;
5429 	struct _raid_device *raid_device;
5430 	struct request *rq = scsi_cmd_to_rq(scmd);
5431 	int class;
5432 	Mpi25SCSIIORequest_t *mpi_request;
5433 	struct _pcie_device *pcie_device = NULL;
5434 	u32 mpi_control;
5435 	u16 smid;
5436 	u16 handle;
5437 
5438 	if (ioc->logging_level & MPT_DEBUG_SCSI)
5439 		scsi_print_command(scmd);
5440 
5441 	sas_device_priv_data = scmd->device->hostdata;
5442 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5443 		scmd->result = DID_NO_CONNECT << 16;
5444 		scsi_done(scmd);
5445 		return 0;
5446 	}
5447 
5448 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5449 		scmd->result = DID_NO_CONNECT << 16;
5450 		scsi_done(scmd);
5451 		return 0;
5452 	}
5453 
5454 	sas_target_priv_data = sas_device_priv_data->sas_target;
5455 
5456 	/* invalid device handle */
5457 	handle = sas_target_priv_data->handle;
5458 
5459 	/*
5460 	 * Avoid error handling escallation when device is disconnected
5461 	 */
5462 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5463 		if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5464 		    scmd->cmnd[0] == TEST_UNIT_READY) {
5465 			scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5466 			scsi_done(scmd);
5467 			return 0;
5468 		}
5469 	}
5470 
5471 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5472 		scmd->result = DID_NO_CONNECT << 16;
5473 		scsi_done(scmd);
5474 		return 0;
5475 	}
5476 
5477 
5478 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5479 		/* host recovery or link resets sent via IOCTLs */
5480 		return SCSI_MLQUEUE_HOST_BUSY;
5481 	} else if (sas_target_priv_data->deleted) {
5482 		/* device has been deleted */
5483 		scmd->result = DID_NO_CONNECT << 16;
5484 		scsi_done(scmd);
5485 		return 0;
5486 	} else if (sas_target_priv_data->tm_busy ||
5487 		   sas_device_priv_data->block) {
5488 		/* device busy with task management */
5489 		return SCSI_MLQUEUE_DEVICE_BUSY;
5490 	}
5491 
5492 	/*
5493 	 * Bug work around for firmware SATL handling.  The loop
5494 	 * is based on atomic operations and ensures consistency
5495 	 * since we're lockless at this point
5496 	 */
5497 	do {
5498 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5499 			return SCSI_MLQUEUE_DEVICE_BUSY;
5500 	} while (_scsih_set_satl_pending(scmd, true));
5501 
5502 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5503 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5504 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5505 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5506 	else
5507 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5508 
5509 	/* set tags */
5510 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5511 	/* NCQ Prio supported, make sure control indicated high priority */
5512 	if (sas_device_priv_data->ncq_prio_enable) {
5513 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5514 		if (class == IOPRIO_CLASS_RT)
5515 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5516 	}
5517 	/* Make sure Device is not raid volume.
5518 	 * We do not expose raid functionality to upper layer for warpdrive.
5519 	 */
5520 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5521 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5522 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5523 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5524 
5525 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5526 	if (!smid) {
5527 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5528 		_scsih_set_satl_pending(scmd, false);
5529 		goto out;
5530 	}
5531 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5532 	memset(mpi_request, 0, ioc->request_sz);
5533 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5534 
5535 	if (scmd->cmd_len == 32)
5536 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5537 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5538 	if (sas_device_priv_data->sas_target->flags &
5539 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5540 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5541 	else
5542 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5543 	mpi_request->DevHandle = cpu_to_le16(handle);
5544 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5545 	mpi_request->Control = cpu_to_le32(mpi_control);
5546 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5547 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5548 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5549 	mpi_request->SenseBufferLowAddress =
5550 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5551 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5552 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5553 	    mpi_request->LUN);
5554 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5555 
5556 	if (mpi_request->DataLength) {
5557 		pcie_device = sas_target_priv_data->pcie_dev;
5558 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5559 			mpt3sas_base_free_smid(ioc, smid);
5560 			_scsih_set_satl_pending(scmd, false);
5561 			goto out;
5562 		}
5563 	} else
5564 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5565 
5566 	raid_device = sas_target_priv_data->raid_device;
5567 	if (raid_device && raid_device->direct_io_enabled)
5568 		mpt3sas_setup_direct_io(ioc, scmd,
5569 			raid_device, mpi_request);
5570 
5571 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5572 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5573 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5574 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5575 			ioc->put_smid_fast_path(ioc, smid, handle);
5576 		} else
5577 			ioc->put_smid_scsi_io(ioc, smid,
5578 			    le16_to_cpu(mpi_request->DevHandle));
5579 	} else
5580 		ioc->put_smid_default(ioc, smid);
5581 	return 0;
5582 
5583  out:
5584 	return SCSI_MLQUEUE_HOST_BUSY;
5585 }
5586 
5587 /**
5588  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5589  * @sense_buffer: sense data returned by target
5590  * @data: normalized skey/asc/ascq
5591  */
5592 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5593 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5594 {
5595 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5596 		/* descriptor format */
5597 		data->skey = sense_buffer[1] & 0x0F;
5598 		data->asc = sense_buffer[2];
5599 		data->ascq = sense_buffer[3];
5600 	} else {
5601 		/* fixed format */
5602 		data->skey = sense_buffer[2] & 0x0F;
5603 		data->asc = sense_buffer[12];
5604 		data->ascq = sense_buffer[13];
5605 	}
5606 }
5607 
5608 /**
5609  * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5610  * @ioc: per adapter object
5611  * @scmd: pointer to scsi command object
5612  * @mpi_reply: reply mf payload returned from firmware
5613  * @smid: ?
5614  *
5615  * scsi_status - SCSI Status code returned from target device
5616  * scsi_state - state info associated with SCSI_IO determined by ioc
5617  * ioc_status - ioc supplied status info
5618  */
5619 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5620 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5621 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5622 {
5623 	u32 response_info;
5624 	u8 *response_bytes;
5625 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5626 	    MPI2_IOCSTATUS_MASK;
5627 	u8 scsi_state = mpi_reply->SCSIState;
5628 	u8 scsi_status = mpi_reply->SCSIStatus;
5629 	char *desc_ioc_state = NULL;
5630 	char *desc_scsi_status = NULL;
5631 	char *desc_scsi_state = ioc->tmp_string;
5632 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5633 	struct _sas_device *sas_device = NULL;
5634 	struct _pcie_device *pcie_device = NULL;
5635 	struct scsi_target *starget = scmd->device->sdev_target;
5636 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5637 	char *device_str = NULL;
5638 
5639 	if (!priv_target)
5640 		return;
5641 	if (ioc->hide_ir_msg)
5642 		device_str = "WarpDrive";
5643 	else
5644 		device_str = "volume";
5645 
5646 	if (log_info == 0x31170000)
5647 		return;
5648 
5649 	switch (ioc_status) {
5650 	case MPI2_IOCSTATUS_SUCCESS:
5651 		desc_ioc_state = "success";
5652 		break;
5653 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5654 		desc_ioc_state = "invalid function";
5655 		break;
5656 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5657 		desc_ioc_state = "scsi recovered error";
5658 		break;
5659 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5660 		desc_ioc_state = "scsi invalid dev handle";
5661 		break;
5662 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5663 		desc_ioc_state = "scsi device not there";
5664 		break;
5665 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5666 		desc_ioc_state = "scsi data overrun";
5667 		break;
5668 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5669 		desc_ioc_state = "scsi data underrun";
5670 		break;
5671 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5672 		desc_ioc_state = "scsi io data error";
5673 		break;
5674 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5675 		desc_ioc_state = "scsi protocol error";
5676 		break;
5677 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5678 		desc_ioc_state = "scsi task terminated";
5679 		break;
5680 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5681 		desc_ioc_state = "scsi residual mismatch";
5682 		break;
5683 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5684 		desc_ioc_state = "scsi task mgmt failed";
5685 		break;
5686 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5687 		desc_ioc_state = "scsi ioc terminated";
5688 		break;
5689 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5690 		desc_ioc_state = "scsi ext terminated";
5691 		break;
5692 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5693 		desc_ioc_state = "eedp guard error";
5694 		break;
5695 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5696 		desc_ioc_state = "eedp ref tag error";
5697 		break;
5698 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5699 		desc_ioc_state = "eedp app tag error";
5700 		break;
5701 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5702 		desc_ioc_state = "insufficient power";
5703 		break;
5704 	default:
5705 		desc_ioc_state = "unknown";
5706 		break;
5707 	}
5708 
5709 	switch (scsi_status) {
5710 	case MPI2_SCSI_STATUS_GOOD:
5711 		desc_scsi_status = "good";
5712 		break;
5713 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5714 		desc_scsi_status = "check condition";
5715 		break;
5716 	case MPI2_SCSI_STATUS_CONDITION_MET:
5717 		desc_scsi_status = "condition met";
5718 		break;
5719 	case MPI2_SCSI_STATUS_BUSY:
5720 		desc_scsi_status = "busy";
5721 		break;
5722 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5723 		desc_scsi_status = "intermediate";
5724 		break;
5725 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5726 		desc_scsi_status = "intermediate condmet";
5727 		break;
5728 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5729 		desc_scsi_status = "reservation conflict";
5730 		break;
5731 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5732 		desc_scsi_status = "command terminated";
5733 		break;
5734 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5735 		desc_scsi_status = "task set full";
5736 		break;
5737 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5738 		desc_scsi_status = "aca active";
5739 		break;
5740 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5741 		desc_scsi_status = "task aborted";
5742 		break;
5743 	default:
5744 		desc_scsi_status = "unknown";
5745 		break;
5746 	}
5747 
5748 	desc_scsi_state[0] = '\0';
5749 	if (!scsi_state)
5750 		desc_scsi_state = " ";
5751 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5752 		strcat(desc_scsi_state, "response info ");
5753 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5754 		strcat(desc_scsi_state, "state terminated ");
5755 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5756 		strcat(desc_scsi_state, "no status ");
5757 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5758 		strcat(desc_scsi_state, "autosense failed ");
5759 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5760 		strcat(desc_scsi_state, "autosense valid ");
5761 
5762 	scsi_print_command(scmd);
5763 
5764 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5765 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5766 			 device_str, (u64)priv_target->sas_address);
5767 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5768 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5769 		if (pcie_device) {
5770 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5771 				 (u64)pcie_device->wwid, pcie_device->port_num);
5772 			if (pcie_device->enclosure_handle != 0)
5773 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5774 					 (u64)pcie_device->enclosure_logical_id,
5775 					 pcie_device->slot);
5776 			if (pcie_device->connector_name[0])
5777 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5778 					 pcie_device->enclosure_level,
5779 					 pcie_device->connector_name);
5780 			pcie_device_put(pcie_device);
5781 		}
5782 	} else {
5783 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5784 		if (sas_device) {
5785 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5786 				 (u64)sas_device->sas_address, sas_device->phy);
5787 
5788 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5789 			    NULL, NULL);
5790 
5791 			sas_device_put(sas_device);
5792 		}
5793 	}
5794 
5795 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5796 		 le16_to_cpu(mpi_reply->DevHandle),
5797 		 desc_ioc_state, ioc_status, smid);
5798 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5799 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5800 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5801 		 le16_to_cpu(mpi_reply->TaskTag),
5802 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5803 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5804 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5805 
5806 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5807 		struct sense_info data;
5808 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5809 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5810 			 data.skey, data.asc, data.ascq,
5811 			 le32_to_cpu(mpi_reply->SenseCount));
5812 	}
5813 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5814 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5815 		response_bytes = (u8 *)&response_info;
5816 		_scsih_response_code(ioc, response_bytes[0]);
5817 	}
5818 }
5819 
5820 /**
5821  * _scsih_turn_on_pfa_led - illuminate PFA LED
5822  * @ioc: per adapter object
5823  * @handle: device handle
5824  * Context: process
5825  */
5826 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5827 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5828 {
5829 	Mpi2SepReply_t mpi_reply;
5830 	Mpi2SepRequest_t mpi_request;
5831 	struct _sas_device *sas_device;
5832 
5833 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5834 	if (!sas_device)
5835 		return;
5836 
5837 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5838 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5839 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5840 	mpi_request.SlotStatus =
5841 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5842 	mpi_request.DevHandle = cpu_to_le16(handle);
5843 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5844 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5845 	    &mpi_request)) != 0) {
5846 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5847 			__FILE__, __LINE__, __func__);
5848 		goto out;
5849 	}
5850 	sas_device->pfa_led_on = 1;
5851 
5852 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5853 		dewtprintk(ioc,
5854 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5855 				    le16_to_cpu(mpi_reply.IOCStatus),
5856 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5857 		goto out;
5858 	}
5859 out:
5860 	sas_device_put(sas_device);
5861 }
5862 
5863 /**
5864  * _scsih_turn_off_pfa_led - turn off Fault LED
5865  * @ioc: per adapter object
5866  * @sas_device: sas device whose PFA LED has to turned off
5867  * Context: process
5868  */
5869 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5870 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5871 	struct _sas_device *sas_device)
5872 {
5873 	Mpi2SepReply_t mpi_reply;
5874 	Mpi2SepRequest_t mpi_request;
5875 
5876 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5877 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5878 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5879 	mpi_request.SlotStatus = 0;
5880 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5881 	mpi_request.DevHandle = 0;
5882 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5883 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5884 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5885 		&mpi_request)) != 0) {
5886 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5887 			__FILE__, __LINE__, __func__);
5888 		return;
5889 	}
5890 
5891 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5892 		dewtprintk(ioc,
5893 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5894 				    le16_to_cpu(mpi_reply.IOCStatus),
5895 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5896 		return;
5897 	}
5898 }
5899 
5900 /**
5901  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5902  * @ioc: per adapter object
5903  * @handle: device handle
5904  * Context: interrupt.
5905  */
5906 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5907 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5908 {
5909 	struct fw_event_work *fw_event;
5910 
5911 	fw_event = alloc_fw_event_work(0);
5912 	if (!fw_event)
5913 		return;
5914 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5915 	fw_event->device_handle = handle;
5916 	fw_event->ioc = ioc;
5917 	_scsih_fw_event_add(ioc, fw_event);
5918 	fw_event_work_put(fw_event);
5919 }
5920 
5921 /**
5922  * _scsih_smart_predicted_fault - process smart errors
5923  * @ioc: per adapter object
5924  * @handle: device handle
5925  * Context: interrupt.
5926  */
5927 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5928 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5929 {
5930 	struct scsi_target *starget;
5931 	struct MPT3SAS_TARGET *sas_target_priv_data;
5932 	Mpi2EventNotificationReply_t *event_reply;
5933 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5934 	struct _sas_device *sas_device;
5935 	ssize_t sz;
5936 	unsigned long flags;
5937 
5938 	/* only handle non-raid devices */
5939 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5940 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5941 	if (!sas_device)
5942 		goto out_unlock;
5943 
5944 	starget = sas_device->starget;
5945 	sas_target_priv_data = starget->hostdata;
5946 
5947 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5948 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5949 		goto out_unlock;
5950 
5951 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5952 
5953 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5954 
5955 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5956 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5957 
5958 	/* insert into event log */
5959 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5960 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5961 	event_reply = kzalloc(sz, GFP_ATOMIC);
5962 	if (!event_reply) {
5963 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5964 			__FILE__, __LINE__, __func__);
5965 		goto out;
5966 	}
5967 
5968 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5969 	event_reply->Event =
5970 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5971 	event_reply->MsgLength = sz/4;
5972 	event_reply->EventDataLength =
5973 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5974 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5975 	    event_reply->EventData;
5976 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5977 	event_data->ASC = 0x5D;
5978 	event_data->DevHandle = cpu_to_le16(handle);
5979 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5980 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5981 	kfree(event_reply);
5982 out:
5983 	if (sas_device)
5984 		sas_device_put(sas_device);
5985 	return;
5986 
5987 out_unlock:
5988 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5989 	goto out;
5990 }
5991 
5992 /**
5993  * _scsih_io_done - scsi request callback
5994  * @ioc: per adapter object
5995  * @smid: system request message index
5996  * @msix_index: MSIX table index supplied by the OS
5997  * @reply: reply message frame(lower 32bit addr)
5998  *
5999  * Callback handler when using _scsih_qcmd.
6000  *
6001  * Return: 1 meaning mf should be freed from _base_interrupt
6002  *         0 means the mf is freed from this function.
6003  */
6004 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)6005 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6006 {
6007 	Mpi25SCSIIORequest_t *mpi_request;
6008 	Mpi2SCSIIOReply_t *mpi_reply;
6009 	struct scsi_cmnd *scmd;
6010 	struct scsiio_tracker *st;
6011 	u16 ioc_status;
6012 	u32 xfer_cnt;
6013 	u8 scsi_state;
6014 	u8 scsi_status;
6015 	u32 log_info;
6016 	struct MPT3SAS_DEVICE *sas_device_priv_data;
6017 	u32 response_code = 0;
6018 
6019 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6020 
6021 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
6022 	if (scmd == NULL)
6023 		return 1;
6024 
6025 	_scsih_set_satl_pending(scmd, false);
6026 
6027 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6028 
6029 	if (mpi_reply == NULL) {
6030 		scmd->result = DID_OK << 16;
6031 		goto out;
6032 	}
6033 
6034 	sas_device_priv_data = scmd->device->hostdata;
6035 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
6036 	     sas_device_priv_data->sas_target->deleted) {
6037 		scmd->result = DID_NO_CONNECT << 16;
6038 		goto out;
6039 	}
6040 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
6041 
6042 	/*
6043 	 * WARPDRIVE: If direct_io is set then it is directIO,
6044 	 * the failed direct I/O should be redirected to volume
6045 	 */
6046 	st = scsi_cmd_priv(scmd);
6047 	if (st->direct_io &&
6048 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
6049 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
6050 		st->direct_io = 0;
6051 		st->scmd = scmd;
6052 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
6053 		mpi_request->DevHandle =
6054 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
6055 		ioc->put_smid_scsi_io(ioc, smid,
6056 		    sas_device_priv_data->sas_target->handle);
6057 		return 0;
6058 	}
6059 	/* turning off TLR */
6060 	scsi_state = mpi_reply->SCSIState;
6061 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
6062 		response_code =
6063 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
6064 	if (!sas_device_priv_data->tlr_snoop_check) {
6065 		sas_device_priv_data->tlr_snoop_check++;
6066 		if ((!ioc->is_warpdrive &&
6067 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
6068 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
6069 		    && sas_is_tlr_enabled(scmd->device) &&
6070 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
6071 			sas_disable_tlr(scmd->device);
6072 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
6073 		}
6074 	}
6075 
6076 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
6077 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
6078 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
6079 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
6080 	else
6081 		log_info = 0;
6082 	ioc_status &= MPI2_IOCSTATUS_MASK;
6083 	scsi_status = mpi_reply->SCSIStatus;
6084 
6085 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
6086 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
6087 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
6088 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
6089 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
6090 	}
6091 
6092 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
6093 		struct sense_info data;
6094 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
6095 		    smid);
6096 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
6097 		    le32_to_cpu(mpi_reply->SenseCount));
6098 		memcpy(scmd->sense_buffer, sense_data, sz);
6099 		_scsih_normalize_sense(scmd->sense_buffer, &data);
6100 		/* failure prediction threshold exceeded */
6101 		if (data.asc == 0x5D)
6102 			_scsih_smart_predicted_fault(ioc,
6103 			    le16_to_cpu(mpi_reply->DevHandle));
6104 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
6105 
6106 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
6107 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
6108 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
6109 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
6110 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
6111 	}
6112 	switch (ioc_status) {
6113 	case MPI2_IOCSTATUS_BUSY:
6114 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
6115 		scmd->result = SAM_STAT_BUSY;
6116 		break;
6117 
6118 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
6119 		scmd->result = DID_NO_CONNECT << 16;
6120 		break;
6121 
6122 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
6123 		if (sas_device_priv_data->block) {
6124 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
6125 			goto out;
6126 		}
6127 		if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
6128 			/*
6129 			 * This is a ATA NCQ command aborted due to another NCQ
6130 			 * command failure. We must retry this command
6131 			 * immediately but without incrementing its retry
6132 			 * counter.
6133 			 */
6134 			WARN_ON_ONCE(xfer_cnt != 0);
6135 			scmd->result = DID_IMM_RETRY << 16;
6136 			break;
6137 		}
6138 		if (log_info == 0x31110630) {
6139 			if (scmd->retries > 2) {
6140 				scmd->result = DID_NO_CONNECT << 16;
6141 				scsi_device_set_state(scmd->device,
6142 				    SDEV_OFFLINE);
6143 			} else {
6144 				scmd->result = DID_SOFT_ERROR << 16;
6145 				scmd->device->expecting_cc_ua = 1;
6146 			}
6147 			break;
6148 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
6149 			scmd->result = DID_RESET << 16;
6150 			break;
6151 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
6152 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
6153 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
6154 			scmd->result = DID_RESET << 16;
6155 			break;
6156 		}
6157 		scmd->result = DID_SOFT_ERROR << 16;
6158 		break;
6159 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
6160 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
6161 		scmd->result = DID_RESET << 16;
6162 		break;
6163 
6164 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
6165 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
6166 			scmd->result = DID_SOFT_ERROR << 16;
6167 		else
6168 			scmd->result = (DID_OK << 16) | scsi_status;
6169 		break;
6170 
6171 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
6172 		scmd->result = (DID_OK << 16) | scsi_status;
6173 
6174 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
6175 			break;
6176 
6177 		if (xfer_cnt < scmd->underflow) {
6178 			if (scsi_status == SAM_STAT_BUSY)
6179 				scmd->result = SAM_STAT_BUSY;
6180 			else
6181 				scmd->result = DID_SOFT_ERROR << 16;
6182 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
6183 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
6184 			scmd->result = DID_SOFT_ERROR << 16;
6185 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
6186 			scmd->result = DID_RESET << 16;
6187 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
6188 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
6189 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
6190 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
6191 					 0x20, 0);
6192 		}
6193 		break;
6194 
6195 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
6196 		scsi_set_resid(scmd, 0);
6197 		fallthrough;
6198 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
6199 	case MPI2_IOCSTATUS_SUCCESS:
6200 		scmd->result = (DID_OK << 16) | scsi_status;
6201 		if (response_code ==
6202 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
6203 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
6204 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
6205 			scmd->result = DID_SOFT_ERROR << 16;
6206 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
6207 			scmd->result = DID_RESET << 16;
6208 		break;
6209 
6210 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
6211 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
6212 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
6213 		_scsih_eedp_error_handling(scmd, ioc_status);
6214 		break;
6215 
6216 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
6217 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
6218 	case MPI2_IOCSTATUS_INVALID_SGL:
6219 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
6220 	case MPI2_IOCSTATUS_INVALID_FIELD:
6221 	case MPI2_IOCSTATUS_INVALID_STATE:
6222 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
6223 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
6224 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
6225 	default:
6226 		scmd->result = DID_SOFT_ERROR << 16;
6227 		break;
6228 
6229 	}
6230 
6231 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
6232 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
6233 
6234  out:
6235 
6236 	scsi_dma_unmap(scmd);
6237 	mpt3sas_base_free_smid(ioc, smid);
6238 	scsi_done(scmd);
6239 	return 0;
6240 }
6241 
6242 /**
6243  * _scsih_update_vphys_after_reset - update the Port's
6244  *			vphys_list after reset
6245  * @ioc: per adapter object
6246  *
6247  * Returns nothing.
6248  */
6249 static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)6250 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
6251 {
6252 	u16 sz, ioc_status;
6253 	int i;
6254 	Mpi2ConfigReply_t mpi_reply;
6255 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6256 	u16 attached_handle;
6257 	u64 attached_sas_addr;
6258 	u8 found = 0, port_id;
6259 	Mpi2SasPhyPage0_t phy_pg0;
6260 	struct hba_port *port, *port_next, *mport;
6261 	struct virtual_phy *vphy, *vphy_next;
6262 	struct _sas_device *sas_device;
6263 
6264 	/*
6265 	 * Mark all the vphys objects as dirty.
6266 	 */
6267 	list_for_each_entry_safe(port, port_next,
6268 	    &ioc->port_table_list, list) {
6269 		if (!port->vphys_mask)
6270 			continue;
6271 		list_for_each_entry_safe(vphy, vphy_next,
6272 		    &port->vphys_list, list) {
6273 			vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
6274 		}
6275 	}
6276 
6277 	/*
6278 	 * Read SASIOUnitPage0 to get each HBA Phy's data.
6279 	 */
6280 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6281 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6282 	if (!sas_iounit_pg0) {
6283 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6284 		    __FILE__, __LINE__, __func__);
6285 		return;
6286 	}
6287 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6288 	    sas_iounit_pg0, sz)) != 0)
6289 		goto out;
6290 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6291 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6292 		goto out;
6293 	/*
6294 	 * Loop over each HBA Phy.
6295 	 */
6296 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6297 		/*
6298 		 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
6299 		 */
6300 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6301 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6302 			continue;
6303 		/*
6304 		 * Check whether Phy is connected to SEP device or not,
6305 		 * if it is SEP device then read the Phy's SASPHYPage0 data to
6306 		 * determine whether Phy is a virtual Phy or not. if it is
6307 		 * virtual phy then it is conformed that the attached remote
6308 		 * device is a HBA's vSES device.
6309 		 */
6310 		if (!(le32_to_cpu(
6311 		    sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6312 		    MPI2_SAS_DEVICE_INFO_SEP))
6313 			continue;
6314 
6315 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6316 		    i))) {
6317 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6318 			    __FILE__, __LINE__, __func__);
6319 			continue;
6320 		}
6321 
6322 		if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6323 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6324 			continue;
6325 		/*
6326 		 * Get the vSES device's SAS Address.
6327 		 */
6328 		attached_handle = le16_to_cpu(
6329 		    sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6330 		if (_scsih_get_sas_address(ioc, attached_handle,
6331 		    &attached_sas_addr) != 0) {
6332 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6333 			    __FILE__, __LINE__, __func__);
6334 			continue;
6335 		}
6336 
6337 		found = 0;
6338 		port = port_next = NULL;
6339 		/*
6340 		 * Loop over each virtual_phy object from
6341 		 * each port's vphys_list.
6342 		 */
6343 		list_for_each_entry_safe(port,
6344 		    port_next, &ioc->port_table_list, list) {
6345 			if (!port->vphys_mask)
6346 				continue;
6347 			list_for_each_entry_safe(vphy, vphy_next,
6348 			    &port->vphys_list, list) {
6349 				/*
6350 				 * Continue with next virtual_phy object
6351 				 * if the object is not marked as dirty.
6352 				 */
6353 				if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6354 					continue;
6355 
6356 				/*
6357 				 * Continue with next virtual_phy object
6358 				 * if the object's SAS Address is not equals
6359 				 * to current Phy's vSES device SAS Address.
6360 				 */
6361 				if (vphy->sas_address != attached_sas_addr)
6362 					continue;
6363 				/*
6364 				 * Enable current Phy number bit in object's
6365 				 * phy_mask field.
6366 				 */
6367 				if (!(vphy->phy_mask & (1 << i)))
6368 					vphy->phy_mask = (1 << i);
6369 				/*
6370 				 * Get hba_port object from hba_port table
6371 				 * corresponding to current phy's Port ID.
6372 				 * if there is no hba_port object corresponding
6373 				 * to Phy's Port ID then create a new hba_port
6374 				 * object & add to hba_port table.
6375 				 */
6376 				port_id = sas_iounit_pg0->PhyData[i].Port;
6377 				mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6378 				if (!mport) {
6379 					mport = kzalloc_obj(struct hba_port);
6380 					if (!mport)
6381 						break;
6382 					mport->port_id = port_id;
6383 					ioc_info(ioc,
6384 					    "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6385 					    __func__, mport, mport->port_id);
6386 					list_add_tail(&mport->list,
6387 						&ioc->port_table_list);
6388 				}
6389 				/*
6390 				 * If mport & port pointers are not pointing to
6391 				 * same hba_port object then it means that vSES
6392 				 * device's Port ID got changed after reset and
6393 				 * hence move current virtual_phy object from
6394 				 * port's vphys_list to mport's vphys_list.
6395 				 */
6396 				if (port != mport) {
6397 					if (!mport->vphys_mask)
6398 						INIT_LIST_HEAD(
6399 						    &mport->vphys_list);
6400 					mport->vphys_mask |= (1 << i);
6401 					port->vphys_mask &= ~(1 << i);
6402 					list_move(&vphy->list,
6403 					    &mport->vphys_list);
6404 					sas_device = mpt3sas_get_sdev_by_addr(
6405 					    ioc, attached_sas_addr, port);
6406 					if (sas_device)
6407 						sas_device->port = mport;
6408 				}
6409 				/*
6410 				 * Earlier while updating the hba_port table,
6411 				 * it is determined that there is no other
6412 				 * direct attached device with mport's Port ID,
6413 				 * Hence mport was marked as dirty. Only vSES
6414 				 * device has this Port ID, so unmark the mport
6415 				 * as dirt.
6416 				 */
6417 				if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6418 					mport->sas_address = 0;
6419 					mport->phy_mask = 0;
6420 					mport->flags &=
6421 					    ~HBA_PORT_FLAG_DIRTY_PORT;
6422 				}
6423 				/*
6424 				 * Unmark current virtual_phy object as dirty.
6425 				 */
6426 				vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6427 				found = 1;
6428 				break;
6429 			}
6430 			if (found)
6431 				break;
6432 		}
6433 	}
6434 out:
6435 	kfree(sas_iounit_pg0);
6436 }
6437 
6438 /**
6439  * _scsih_get_port_table_after_reset - Construct temporary port table
6440  * @ioc: per adapter object
6441  * @port_table: address where port table needs to be constructed
6442  *
6443  * return number of HBA port entries available after reset.
6444  */
6445 static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6446 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6447 	struct hba_port *port_table)
6448 {
6449 	u16 sz, ioc_status;
6450 	int i, j;
6451 	Mpi2ConfigReply_t mpi_reply;
6452 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6453 	u16 attached_handle;
6454 	u64 attached_sas_addr;
6455 	u8 found = 0, port_count = 0, port_id;
6456 
6457 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6458 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6459 	if (!sas_iounit_pg0) {
6460 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6461 		    __FILE__, __LINE__, __func__);
6462 		return port_count;
6463 	}
6464 
6465 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6466 	    sas_iounit_pg0, sz)) != 0)
6467 		goto out;
6468 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6469 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6470 		goto out;
6471 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6472 		found = 0;
6473 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6474 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6475 			continue;
6476 		attached_handle =
6477 		    le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6478 		if (_scsih_get_sas_address(
6479 		    ioc, attached_handle, &attached_sas_addr) != 0) {
6480 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6481 			    __FILE__, __LINE__, __func__);
6482 			continue;
6483 		}
6484 
6485 		for (j = 0; j < port_count; j++) {
6486 			port_id = sas_iounit_pg0->PhyData[i].Port;
6487 			if (port_table[j].port_id == port_id &&
6488 			    port_table[j].sas_address == attached_sas_addr) {
6489 				port_table[j].phy_mask |= (1 << i);
6490 				found = 1;
6491 				break;
6492 			}
6493 		}
6494 
6495 		if (found)
6496 			continue;
6497 
6498 		port_id = sas_iounit_pg0->PhyData[i].Port;
6499 		port_table[port_count].port_id = port_id;
6500 		port_table[port_count].phy_mask = (1 << i);
6501 		port_table[port_count].sas_address = attached_sas_addr;
6502 		port_count++;
6503 	}
6504 out:
6505 	kfree(sas_iounit_pg0);
6506 	return port_count;
6507 }
6508 
6509 enum hba_port_matched_codes {
6510 	NOT_MATCHED = 0,
6511 	MATCHED_WITH_ADDR_AND_PHYMASK,
6512 	MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6513 	MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6514 	MATCHED_WITH_ADDR,
6515 };
6516 
6517 /**
6518  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6519  *					from HBA port table
6520  * @ioc: per adapter object
6521  * @port_entry: hba port entry from temporary port table which needs to be
6522  *		searched for matched entry in the HBA port table
6523  * @matched_port_entry: save matched hba port entry here
6524  * @count: count of matched entries
6525  *
6526  * return type of matched entry found.
6527  */
6528 static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6529 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6530 	struct hba_port *port_entry,
6531 	struct hba_port **matched_port_entry, int *count)
6532 {
6533 	struct hba_port *port_table_entry, *matched_port = NULL;
6534 	enum hba_port_matched_codes matched_code = NOT_MATCHED;
6535 	int lcount = 0;
6536 	*matched_port_entry = NULL;
6537 
6538 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6539 		if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6540 			continue;
6541 
6542 		if ((port_table_entry->sas_address == port_entry->sas_address)
6543 		    && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6544 			matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6545 			matched_port = port_table_entry;
6546 			break;
6547 		}
6548 
6549 		if ((port_table_entry->sas_address == port_entry->sas_address)
6550 		    && (port_table_entry->phy_mask & port_entry->phy_mask)
6551 		    && (port_table_entry->port_id == port_entry->port_id)) {
6552 			matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6553 			matched_port = port_table_entry;
6554 			continue;
6555 		}
6556 
6557 		if ((port_table_entry->sas_address == port_entry->sas_address)
6558 		    && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6559 			if (matched_code ==
6560 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6561 				continue;
6562 			matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6563 			matched_port = port_table_entry;
6564 			continue;
6565 		}
6566 
6567 		if (port_table_entry->sas_address == port_entry->sas_address) {
6568 			if (matched_code ==
6569 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6570 				continue;
6571 			if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6572 				continue;
6573 			matched_code = MATCHED_WITH_ADDR;
6574 			matched_port = port_table_entry;
6575 			lcount++;
6576 		}
6577 	}
6578 
6579 	*matched_port_entry = matched_port;
6580 	if (matched_code ==  MATCHED_WITH_ADDR)
6581 		*count = lcount;
6582 	return matched_code;
6583 }
6584 
6585 /**
6586  * _scsih_del_phy_part_of_anther_port - remove phy if it
6587  *				is a part of anther port
6588  *@ioc: per adapter object
6589  *@port_table: port table after reset
6590  *@index: hba port entry index
6591  *@port_count: number of ports available after host reset
6592  *@offset: HBA phy bit offset
6593  *
6594  */
6595 static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6596 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6597 	struct hba_port *port_table,
6598 	int index, u8 port_count, int offset)
6599 {
6600 	struct _sas_node *sas_node = &ioc->sas_hba;
6601 	u32 i, found = 0;
6602 
6603 	for (i = 0; i < port_count; i++) {
6604 		if (i == index)
6605 			continue;
6606 
6607 		if (port_table[i].phy_mask & (1 << offset)) {
6608 			mpt3sas_transport_del_phy_from_an_existing_port(
6609 			    ioc, sas_node, &sas_node->phy[offset]);
6610 			found = 1;
6611 			break;
6612 		}
6613 	}
6614 	if (!found)
6615 		port_table[index].phy_mask |= (1 << offset);
6616 }
6617 
6618 /**
6619  * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6620  *						right port
6621  *@ioc: per adapter object
6622  *@hba_port_entry: hba port table entry
6623  *@port_table: temporary port table
6624  *@index: hba port entry index
6625  *@port_count: number of ports available after host reset
6626  *
6627  */
6628 static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6629 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6630 	struct hba_port *hba_port_entry, struct hba_port *port_table,
6631 	int index, int port_count)
6632 {
6633 	u32 phy_mask, offset = 0;
6634 	struct _sas_node *sas_node = &ioc->sas_hba;
6635 
6636 	phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6637 
6638 	for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6639 		if (phy_mask & (1 << offset)) {
6640 			if (!(port_table[index].phy_mask & (1 << offset))) {
6641 				_scsih_del_phy_part_of_anther_port(
6642 				    ioc, port_table, index, port_count,
6643 				    offset);
6644 				continue;
6645 			}
6646 			if (sas_node->phy[offset].phy_belongs_to_port)
6647 				mpt3sas_transport_del_phy_from_an_existing_port(
6648 				    ioc, sas_node, &sas_node->phy[offset]);
6649 			mpt3sas_transport_add_phy_to_an_existing_port(
6650 			    ioc, sas_node, &sas_node->phy[offset],
6651 			    hba_port_entry->sas_address,
6652 			    hba_port_entry);
6653 		}
6654 	}
6655 }
6656 
6657 /**
6658  * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6659  * @ioc: per adapter object
6660  *
6661  * Returns nothing.
6662  */
6663 static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6664 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6665 {
6666 	struct hba_port *port, *port_next;
6667 	struct virtual_phy *vphy, *vphy_next;
6668 
6669 	list_for_each_entry_safe(port, port_next,
6670 	    &ioc->port_table_list, list) {
6671 		if (!port->vphys_mask)
6672 			continue;
6673 		list_for_each_entry_safe(vphy, vphy_next,
6674 		    &port->vphys_list, list) {
6675 			if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6676 				drsprintk(ioc, ioc_info(ioc,
6677 				    "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6678 				    vphy, port->port_id,
6679 				    vphy->phy_mask));
6680 				port->vphys_mask &= ~vphy->phy_mask;
6681 				list_del(&vphy->list);
6682 				kfree(vphy);
6683 			}
6684 		}
6685 		if (!port->vphys_mask && !port->sas_address)
6686 			port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6687 	}
6688 }
6689 
6690 /**
6691  * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6692  *					after host reset
6693  *@ioc: per adapter object
6694  *
6695  */
6696 static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6697 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6698 {
6699 	struct hba_port *port, *port_next;
6700 
6701 	list_for_each_entry_safe(port, port_next,
6702 	    &ioc->port_table_list, list) {
6703 		if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6704 		    port->flags & HBA_PORT_FLAG_NEW_PORT)
6705 			continue;
6706 
6707 		drsprintk(ioc, ioc_info(ioc,
6708 		    "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6709 		    port, port->port_id, port->phy_mask));
6710 		list_del(&port->list);
6711 		kfree(port);
6712 	}
6713 }
6714 
6715 /**
6716  * _scsih_sas_port_refresh - Update HBA port table after host reset
6717  * @ioc: per adapter object
6718  */
6719 static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6720 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6721 {
6722 	u32 port_count = 0;
6723 	struct hba_port *port_table;
6724 	struct hba_port *port_table_entry;
6725 	struct hba_port *port_entry = NULL;
6726 	int i, j, count = 0, lcount = 0;
6727 	int ret;
6728 	u64 sas_addr;
6729 	u8 num_phys;
6730 
6731 	drsprintk(ioc, ioc_info(ioc,
6732 	    "updating ports for sas_host(0x%016llx)\n",
6733 	    (unsigned long long)ioc->sas_hba.sas_address));
6734 
6735 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6736 	if (!num_phys) {
6737 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6738 		    __FILE__, __LINE__, __func__);
6739 		return;
6740 	}
6741 
6742 	if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6743 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6744 		   __FILE__, __LINE__, __func__);
6745 		return;
6746 	}
6747 	ioc->sas_hba.num_phys = num_phys;
6748 
6749 	port_table = kzalloc_objs(struct hba_port, ioc->sas_hba.num_phys);
6750 	if (!port_table)
6751 		return;
6752 
6753 	port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6754 	if (!port_count)
6755 		return;
6756 
6757 	drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6758 	for (j = 0; j < port_count; j++)
6759 		drsprintk(ioc, ioc_info(ioc,
6760 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6761 		    port_table[j].port_id,
6762 		    port_table[j].phy_mask, port_table[j].sas_address));
6763 
6764 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6765 		port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6766 
6767 	drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6768 	port_table_entry = NULL;
6769 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6770 		drsprintk(ioc, ioc_info(ioc,
6771 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6772 		    port_table_entry->port_id,
6773 		    port_table_entry->phy_mask,
6774 		    port_table_entry->sas_address));
6775 	}
6776 
6777 	for (j = 0; j < port_count; j++) {
6778 		ret = _scsih_look_and_get_matched_port_entry(ioc,
6779 		    &port_table[j], &port_entry, &count);
6780 		if (!port_entry) {
6781 			drsprintk(ioc, ioc_info(ioc,
6782 			    "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6783 			    port_table[j].sas_address,
6784 			    port_table[j].port_id));
6785 			continue;
6786 		}
6787 
6788 		switch (ret) {
6789 		case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6790 		case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6791 			_scsih_add_or_del_phys_from_existing_port(ioc,
6792 			    port_entry, port_table, j, port_count);
6793 			break;
6794 		case MATCHED_WITH_ADDR:
6795 			sas_addr = port_table[j].sas_address;
6796 			for (i = 0; i < port_count; i++) {
6797 				if (port_table[i].sas_address == sas_addr)
6798 					lcount++;
6799 			}
6800 
6801 			if (count > 1 || lcount > 1)
6802 				port_entry = NULL;
6803 			else
6804 				_scsih_add_or_del_phys_from_existing_port(ioc,
6805 				    port_entry, port_table, j, port_count);
6806 		}
6807 
6808 		if (!port_entry)
6809 			continue;
6810 
6811 		if (port_entry->port_id != port_table[j].port_id)
6812 			port_entry->port_id = port_table[j].port_id;
6813 		port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6814 		port_entry->phy_mask = port_table[j].phy_mask;
6815 	}
6816 
6817 	port_table_entry = NULL;
6818 }
6819 
6820 /**
6821  * _scsih_alloc_vphy - allocate virtual_phy object
6822  * @ioc: per adapter object
6823  * @port_id: Port ID number
6824  * @phy_num: HBA Phy number
6825  *
6826  * Returns allocated virtual_phy object.
6827  */
6828 static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6829 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6830 {
6831 	struct virtual_phy *vphy;
6832 	struct hba_port *port;
6833 
6834 	port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6835 	if (!port)
6836 		return NULL;
6837 
6838 	vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6839 	if (!vphy) {
6840 		vphy = kzalloc_obj(struct virtual_phy);
6841 		if (!vphy)
6842 			return NULL;
6843 
6844 		if (!port->vphys_mask)
6845 			INIT_LIST_HEAD(&port->vphys_list);
6846 
6847 		/*
6848 		 * Enable bit corresponding to HBA phy number on its
6849 		 * parent hba_port object's vphys_mask field.
6850 		 */
6851 		port->vphys_mask |= (1 << phy_num);
6852 		vphy->phy_mask |= (1 << phy_num);
6853 
6854 		list_add_tail(&vphy->list, &port->vphys_list);
6855 
6856 		ioc_info(ioc,
6857 		    "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6858 		    vphy, port->port_id, phy_num);
6859 	}
6860 	return vphy;
6861 }
6862 
6863 /**
6864  * _scsih_sas_host_refresh - refreshing sas host object contents
6865  * @ioc: per adapter object
6866  * Context: user
6867  *
6868  * During port enable, fw will send topology events for every device. Its
6869  * possible that the handles may change from the previous setting, so this
6870  * code keeping handles updating if changed.
6871  */
6872 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6873 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6874 {
6875 	u16 sz;
6876 	u16 ioc_status;
6877 	int i;
6878 	Mpi2ConfigReply_t mpi_reply;
6879 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6880 	u16 attached_handle;
6881 	u8 link_rate, port_id;
6882 	struct hba_port *port;
6883 	Mpi2SasPhyPage0_t phy_pg0;
6884 
6885 	dtmprintk(ioc,
6886 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6887 			   (u64)ioc->sas_hba.sas_address));
6888 
6889 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6890 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6891 	if (!sas_iounit_pg0) {
6892 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6893 			__FILE__, __LINE__, __func__);
6894 		return;
6895 	}
6896 
6897 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6898 	    sas_iounit_pg0, sz)) != 0)
6899 		goto out;
6900 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6901 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6902 		goto out;
6903 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6904 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6905 		if (i == 0)
6906 			ioc->sas_hba.handle = le16_to_cpu(
6907 			    sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6908 		port_id = sas_iounit_pg0->PhyData[i].Port;
6909 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6910 			port = kzalloc_obj(struct hba_port);
6911 			if (!port)
6912 				goto out;
6913 
6914 			port->port_id = port_id;
6915 			ioc_info(ioc,
6916 			    "hba_port entry: %p, port: %d is added to hba_port list\n",
6917 			    port, port->port_id);
6918 			if (ioc->shost_recovery)
6919 				port->flags = HBA_PORT_FLAG_NEW_PORT;
6920 			list_add_tail(&port->list, &ioc->port_table_list);
6921 		}
6922 		/*
6923 		 * Check whether current Phy belongs to HBA vSES device or not.
6924 		 */
6925 		if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6926 		    MPI2_SAS_DEVICE_INFO_SEP &&
6927 		    (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6928 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6929 			    &phy_pg0, i))) {
6930 				ioc_err(ioc,
6931 				    "failure at %s:%d/%s()!\n",
6932 				     __FILE__, __LINE__, __func__);
6933 				goto out;
6934 			}
6935 			if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6936 			    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6937 				continue;
6938 			/*
6939 			 * Allocate a virtual_phy object for vSES device, if
6940 			 * this vSES device is hot added.
6941 			 */
6942 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6943 				goto out;
6944 			ioc->sas_hba.phy[i].hba_vphy = 1;
6945 		}
6946 
6947 		/*
6948 		 * Add new HBA phys to STL if these new phys got added as part
6949 		 * of HBA Firmware upgrade/downgrade operation.
6950 		 */
6951 		if (!ioc->sas_hba.phy[i].phy) {
6952 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6953 							&phy_pg0, i))) {
6954 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6955 					__FILE__, __LINE__, __func__);
6956 				continue;
6957 			}
6958 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6959 				MPI2_IOCSTATUS_MASK;
6960 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6961 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6962 					__FILE__, __LINE__, __func__);
6963 				continue;
6964 			}
6965 			ioc->sas_hba.phy[i].phy_id = i;
6966 			mpt3sas_transport_add_host_phy(ioc,
6967 				&ioc->sas_hba.phy[i], phy_pg0,
6968 				ioc->sas_hba.parent_dev);
6969 			continue;
6970 		}
6971 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6972 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6973 		    AttachedDevHandle);
6974 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6975 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6976 		ioc->sas_hba.phy[i].port =
6977 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6978 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6979 		    attached_handle, i, link_rate,
6980 		    ioc->sas_hba.phy[i].port);
6981 	}
6982 	/*
6983 	 * Clear the phy details if this phy got disabled as part of
6984 	 * HBA Firmware upgrade/downgrade operation.
6985 	 */
6986 	for (i = ioc->sas_hba.num_phys;
6987 	     i < ioc->sas_hba.nr_phys_allocated; i++) {
6988 		if (ioc->sas_hba.phy[i].phy &&
6989 		    ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6990 		    SAS_LINK_RATE_1_5_GBPS)
6991 			mpt3sas_transport_update_links(ioc,
6992 				ioc->sas_hba.sas_address, 0, i,
6993 				MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6994 	}
6995  out:
6996 	kfree(sas_iounit_pg0);
6997 }
6998 
6999 /**
7000  * _scsih_sas_host_add - create sas host object
7001  * @ioc: per adapter object
7002  *
7003  * Creating host side data object, stored in ioc->sas_hba
7004  */
7005 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)7006 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
7007 {
7008 	int i;
7009 	Mpi2ConfigReply_t mpi_reply;
7010 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
7011 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
7012 	Mpi2SasPhyPage0_t phy_pg0;
7013 	Mpi2SasDevicePage0_t sas_device_pg0;
7014 	Mpi2SasEnclosurePage0_t enclosure_pg0;
7015 	u16 ioc_status;
7016 	u16 sz;
7017 	u8 device_missing_delay;
7018 	u8 num_phys, port_id;
7019 	struct hba_port *port;
7020 
7021 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
7022 	if (!num_phys) {
7023 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7024 			__FILE__, __LINE__, __func__);
7025 		return;
7026 	}
7027 
7028 	ioc->sas_hba.nr_phys_allocated = max_t(u8,
7029 	    MPT_MAX_HBA_NUM_PHYS, num_phys);
7030 	ioc->sas_hba.phy = kzalloc_objs(struct _sas_phy,
7031 					ioc->sas_hba.nr_phys_allocated);
7032 	if (!ioc->sas_hba.phy) {
7033 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7034 			__FILE__, __LINE__, __func__);
7035 		goto out;
7036 	}
7037 	ioc->sas_hba.num_phys = num_phys;
7038 
7039 	/* sas_iounit page 0 */
7040 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
7041 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
7042 	if (!sas_iounit_pg0) {
7043 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7044 			__FILE__, __LINE__, __func__);
7045 		return;
7046 	}
7047 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
7048 	    sas_iounit_pg0, sz))) {
7049 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7050 			__FILE__, __LINE__, __func__);
7051 		goto out;
7052 	}
7053 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7054 	    MPI2_IOCSTATUS_MASK;
7055 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7056 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7057 			__FILE__, __LINE__, __func__);
7058 		goto out;
7059 	}
7060 
7061 	/* sas_iounit page 1 */
7062 	sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
7063 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
7064 	if (!sas_iounit_pg1) {
7065 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7066 			__FILE__, __LINE__, __func__);
7067 		goto out;
7068 	}
7069 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
7070 	    sas_iounit_pg1, sz))) {
7071 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7072 			__FILE__, __LINE__, __func__);
7073 		goto out;
7074 	}
7075 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7076 	    MPI2_IOCSTATUS_MASK;
7077 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7078 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7079 			__FILE__, __LINE__, __func__);
7080 		goto out;
7081 	}
7082 
7083 	ioc->io_missing_delay =
7084 	    sas_iounit_pg1->IODeviceMissingDelay;
7085 	device_missing_delay =
7086 	    sas_iounit_pg1->ReportDeviceMissingDelay;
7087 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
7088 		ioc->device_missing_delay = (device_missing_delay &
7089 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
7090 	else
7091 		ioc->device_missing_delay = device_missing_delay &
7092 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
7093 
7094 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
7095 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
7096 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
7097 		    i))) {
7098 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7099 				__FILE__, __LINE__, __func__);
7100 			goto out;
7101 		}
7102 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7103 		    MPI2_IOCSTATUS_MASK;
7104 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7105 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7106 				__FILE__, __LINE__, __func__);
7107 			goto out;
7108 		}
7109 
7110 		if (i == 0)
7111 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
7112 			    PhyData[0].ControllerDevHandle);
7113 
7114 		port_id = sas_iounit_pg0->PhyData[i].Port;
7115 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
7116 			port = kzalloc_obj(struct hba_port);
7117 			if (!port)
7118 				goto out;
7119 
7120 			port->port_id = port_id;
7121 			ioc_info(ioc,
7122 			   "hba_port entry: %p, port: %d is added to hba_port list\n",
7123 			   port, port->port_id);
7124 			list_add_tail(&port->list,
7125 			    &ioc->port_table_list);
7126 		}
7127 
7128 		/*
7129 		 * Check whether current Phy belongs to HBA vSES device or not.
7130 		 */
7131 		if ((le32_to_cpu(phy_pg0.PhyInfo) &
7132 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
7133 		    (phy_pg0.NegotiatedLinkRate >> 4) >=
7134 		    MPI2_SAS_NEG_LINK_RATE_1_5) {
7135 			/*
7136 			 * Allocate a virtual_phy object for vSES device.
7137 			 */
7138 			if (!_scsih_alloc_vphy(ioc, port_id, i))
7139 				goto out;
7140 			ioc->sas_hba.phy[i].hba_vphy = 1;
7141 		}
7142 
7143 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
7144 		ioc->sas_hba.phy[i].phy_id = i;
7145 		ioc->sas_hba.phy[i].port =
7146 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
7147 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
7148 		    phy_pg0, ioc->sas_hba.parent_dev);
7149 	}
7150 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7151 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
7152 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7153 			__FILE__, __LINE__, __func__);
7154 		goto out;
7155 	}
7156 	ioc->sas_hba.enclosure_handle =
7157 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
7158 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7159 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
7160 		 ioc->sas_hba.handle,
7161 		 (u64)ioc->sas_hba.sas_address,
7162 		 ioc->sas_hba.num_phys);
7163 
7164 	if (ioc->sas_hba.enclosure_handle) {
7165 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7166 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7167 		   ioc->sas_hba.enclosure_handle)))
7168 			ioc->sas_hba.enclosure_logical_id =
7169 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
7170 	}
7171 
7172  out:
7173 	kfree(sas_iounit_pg1);
7174 	kfree(sas_iounit_pg0);
7175 }
7176 
7177 /**
7178  * _scsih_expander_add -  creating expander object
7179  * @ioc: per adapter object
7180  * @handle: expander handle
7181  *
7182  * Creating expander object, stored in ioc->sas_expander_list.
7183  *
7184  * Return: 0 for success, else error.
7185  */
7186 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)7187 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7188 {
7189 	struct _sas_node *sas_expander;
7190 	struct _enclosure_node *enclosure_dev;
7191 	Mpi2ConfigReply_t mpi_reply;
7192 	Mpi2ExpanderPage0_t expander_pg0;
7193 	Mpi2ExpanderPage1_t expander_pg1;
7194 	u32 ioc_status;
7195 	u16 parent_handle;
7196 	u64 sas_address, sas_address_parent = 0;
7197 	int i;
7198 	unsigned long flags;
7199 	struct _sas_port *mpt3sas_port = NULL;
7200 	u8 port_id;
7201 
7202 	int rc = 0;
7203 
7204 	if (!handle)
7205 		return -1;
7206 
7207 	if (ioc->shost_recovery || ioc->pci_error_recovery)
7208 		return -1;
7209 
7210 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
7211 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
7212 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7213 			__FILE__, __LINE__, __func__);
7214 		return -1;
7215 	}
7216 
7217 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7218 	    MPI2_IOCSTATUS_MASK;
7219 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7220 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7221 			__FILE__, __LINE__, __func__);
7222 		return -1;
7223 	}
7224 
7225 	/* handle out of order topology events */
7226 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
7227 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
7228 	    != 0) {
7229 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7230 			__FILE__, __LINE__, __func__);
7231 		return -1;
7232 	}
7233 
7234 	port_id = expander_pg0.PhysicalPort;
7235 	if (sas_address_parent != ioc->sas_hba.sas_address) {
7236 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
7237 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7238 		    sas_address_parent,
7239 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
7240 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7241 		if (!sas_expander) {
7242 			rc = _scsih_expander_add(ioc, parent_handle);
7243 			if (rc != 0)
7244 				return rc;
7245 		}
7246 	}
7247 
7248 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7249 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
7250 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7251 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7252 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7253 
7254 	if (sas_expander)
7255 		return 0;
7256 
7257 	sas_expander = kzalloc_obj(struct _sas_node);
7258 	if (!sas_expander) {
7259 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7260 			__FILE__, __LINE__, __func__);
7261 		return -1;
7262 	}
7263 
7264 	sas_expander->handle = handle;
7265 	sas_expander->num_phys = expander_pg0.NumPhys;
7266 	sas_expander->sas_address_parent = sas_address_parent;
7267 	sas_expander->sas_address = sas_address;
7268 	sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7269 	if (!sas_expander->port) {
7270 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7271 		    __FILE__, __LINE__, __func__);
7272 		rc = -1;
7273 		goto out_fail;
7274 	}
7275 
7276 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
7277 		 handle, parent_handle,
7278 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
7279 
7280 	if (!sas_expander->num_phys) {
7281 		rc = -1;
7282 		goto out_fail;
7283 	}
7284 	sas_expander->phy = kzalloc_objs(struct _sas_phy,
7285 					 sas_expander->num_phys);
7286 	if (!sas_expander->phy) {
7287 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7288 			__FILE__, __LINE__, __func__);
7289 		rc = -1;
7290 		goto out_fail;
7291 	}
7292 
7293 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
7294 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
7295 	    sas_address_parent, sas_expander->port);
7296 	if (!mpt3sas_port) {
7297 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7298 			__FILE__, __LINE__, __func__);
7299 		rc = -1;
7300 		goto out_fail;
7301 	}
7302 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
7303 	sas_expander->rphy = mpt3sas_port->rphy;
7304 
7305 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
7306 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
7307 		    &expander_pg1, i, handle))) {
7308 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7309 				__FILE__, __LINE__, __func__);
7310 			rc = -1;
7311 			goto out_fail;
7312 		}
7313 		sas_expander->phy[i].handle = handle;
7314 		sas_expander->phy[i].phy_id = i;
7315 		sas_expander->phy[i].port =
7316 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
7317 
7318 		if ((mpt3sas_transport_add_expander_phy(ioc,
7319 		    &sas_expander->phy[i], expander_pg1,
7320 		    sas_expander->parent_dev))) {
7321 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7322 				__FILE__, __LINE__, __func__);
7323 			rc = -1;
7324 			goto out_fail;
7325 		}
7326 	}
7327 
7328 	if (sas_expander->enclosure_handle) {
7329 		enclosure_dev =
7330 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7331 						sas_expander->enclosure_handle);
7332 		if (enclosure_dev)
7333 			sas_expander->enclosure_logical_id =
7334 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7335 	}
7336 
7337 	_scsih_expander_node_add(ioc, sas_expander);
7338 	return 0;
7339 
7340  out_fail:
7341 
7342 	if (mpt3sas_port)
7343 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7344 		    sas_address_parent, sas_expander->port);
7345 	kfree(sas_expander);
7346 	return rc;
7347 }
7348 
7349 /**
7350  * mpt3sas_expander_remove - removing expander object
7351  * @ioc: per adapter object
7352  * @sas_address: expander sas_address
7353  * @port: hba port entry
7354  */
7355 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)7356 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7357 	struct hba_port *port)
7358 {
7359 	struct _sas_node *sas_expander;
7360 	unsigned long flags;
7361 
7362 	if (ioc->shost_recovery)
7363 		return;
7364 
7365 	if (!port)
7366 		return;
7367 
7368 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7369 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7370 	    sas_address, port);
7371 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7372 	if (sas_expander)
7373 		_scsih_expander_node_remove(ioc, sas_expander);
7374 }
7375 
7376 /**
7377  * _scsih_done -  internal SCSI_IO callback handler.
7378  * @ioc: per adapter object
7379  * @smid: system request message index
7380  * @msix_index: MSIX table index supplied by the OS
7381  * @reply: reply message frame(lower 32bit addr)
7382  *
7383  * Callback handler when sending internal generated SCSI_IO.
7384  * The callback index passed is `ioc->scsih_cb_idx`
7385  *
7386  * Return: 1 meaning mf should be freed from _base_interrupt
7387  *         0 means the mf is freed from this function.
7388  */
7389 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7390 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7391 {
7392 	MPI2DefaultReply_t *mpi_reply;
7393 
7394 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
7395 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7396 		return 1;
7397 	if (ioc->scsih_cmds.smid != smid)
7398 		return 1;
7399 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7400 	if (mpi_reply) {
7401 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
7402 		    mpi_reply->MsgLength*4);
7403 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7404 	}
7405 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7406 	complete(&ioc->scsih_cmds.done);
7407 	return 1;
7408 }
7409 
7410 /**
7411  * _scsi_send_scsi_io - send internal SCSI_IO to target
7412  * @ioc: per adapter object
7413  * @transfer_packet: packet describing the transfer
7414  * @tr_timeout: Target Reset Timeout
7415  * @tr_method: Target Reset Method
7416  * Context: user
7417  *
7418  * Returns 0 for success, non-zero for failure.
7419  */
7420 static int
_scsi_send_scsi_io(struct MPT3SAS_ADAPTER * ioc,struct _scsi_io_transfer * transfer_packet,u8 tr_timeout,u8 tr_method)7421 _scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer
7422 	*transfer_packet, u8 tr_timeout, u8 tr_method)
7423 {
7424 	Mpi2SCSIIOReply_t *mpi_reply;
7425 	Mpi2SCSIIORequest_t *mpi_request;
7426 	u16 smid;
7427 	u8 issue_reset = 0;
7428 	int rc;
7429 	void *priv_sense;
7430 	u32 mpi_control;
7431 	void *psge;
7432 	dma_addr_t data_out_dma = 0;
7433 	dma_addr_t data_in_dma = 0;
7434 	size_t data_in_sz = 0;
7435 	size_t data_out_sz = 0;
7436 	u16 handle;
7437 	u8 retry_count = 0, host_reset_count = 0;
7438 	int tm_return_code;
7439 
7440 	if (ioc->pci_error_recovery) {
7441 		pr_info("%s: pci error recovery in progress!\n", __func__);
7442 		return -EFAULT;
7443 	}
7444 
7445 	if (ioc->shost_recovery) {
7446 		pr_info("%s: host recovery in progress!\n", __func__);
7447 		return -EAGAIN;
7448 	}
7449 
7450 	handle = transfer_packet->handle;
7451 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
7452 		pr_info("%s: no device!\n",  __func__);
7453 		return -EFAULT;
7454 	}
7455 
7456 	mutex_lock(&ioc->scsih_cmds.mutex);
7457 
7458 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7459 		pr_err("%s: scsih_cmd in use\n", __func__);
7460 		rc = -EAGAIN;
7461 		goto out;
7462 	}
7463 
7464  retry_loop:
7465 	if (test_bit(handle, ioc->device_remove_in_progress)) {
7466 		pr_info("%s: device removal in progress\n", __func__);
7467 		rc = -EFAULT;
7468 		goto out;
7469 	}
7470 
7471 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7472 
7473 	rc = mpt3sas_wait_for_ioc(ioc, 10);
7474 	if (rc)
7475 		goto out;
7476 
7477 	/* Use second reserved smid for discovery related IOs */
7478 	smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY;
7479 
7480 	rc = 0;
7481 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7482 	ioc->scsih_cmds.smid = smid;
7483 	memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
7484 	if (transfer_packet->is_raid)
7485 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
7486 	else
7487 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
7488 	mpi_request->DevHandle = cpu_to_le16(handle);
7489 
7490 	switch (transfer_packet->dir) {
7491 	case DMA_TO_DEVICE:
7492 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
7493 		data_out_dma = transfer_packet->data_dma;
7494 		data_out_sz = transfer_packet->data_length;
7495 		break;
7496 	case DMA_FROM_DEVICE:
7497 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
7498 		data_in_dma = transfer_packet->data_dma;
7499 		data_in_sz = transfer_packet->data_length;
7500 		break;
7501 	case DMA_BIDIRECTIONAL:
7502 		mpi_control = MPI2_SCSIIO_CONTROL_BIDIRECTIONAL;
7503 		/* TODO - is BIDI support needed ?? */
7504 		WARN_ON_ONCE(true);
7505 		break;
7506 	default:
7507 	case DMA_NONE:
7508 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
7509 		break;
7510 	}
7511 
7512 	psge = &mpi_request->SGL;
7513 	ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
7514 	    data_in_sz);
7515 
7516 	mpi_request->Control = cpu_to_le32(mpi_control |
7517 	    MPI2_SCSIIO_CONTROL_SIMPLEQ);
7518 	mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length);
7519 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
7520 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
7521 	mpi_request->SenseBufferLowAddress =
7522 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
7523 	priv_sense = mpt3sas_base_get_sense_buffer(ioc, smid);
7524 	mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
7525 	mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length);
7526 	int_to_scsilun(transfer_packet->lun, (struct scsi_lun *)
7527 	    mpi_request->LUN);
7528 	memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb,
7529 	    transfer_packet->cdb_length);
7530 	init_completion(&ioc->scsih_cmds.done);
7531 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
7532 		ioc->put_smid_scsi_io(ioc, smid, handle);
7533 	else
7534 		ioc->put_smid_default(ioc, smid);
7535 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
7536 	    transfer_packet->timeout*HZ);
7537 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7538 		mpt3sas_check_cmd_timeout(ioc,
7539 		    ioc->scsih_cmds.status, mpi_request,
7540 		    sizeof(Mpi2SCSIIORequest_t)/4, issue_reset);
7541 		goto issue_target_reset;
7542 	}
7543 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7544 		transfer_packet->valid_reply = 1;
7545 		mpi_reply = ioc->scsih_cmds.reply;
7546 		transfer_packet->sense_length =
7547 		   le32_to_cpu(mpi_reply->SenseCount);
7548 		if (transfer_packet->sense_length)
7549 			memcpy(transfer_packet->sense, priv_sense,
7550 			    transfer_packet->sense_length);
7551 		transfer_packet->transfer_length =
7552 		    le32_to_cpu(mpi_reply->TransferCount);
7553 		transfer_packet->ioc_status =
7554 		    le16_to_cpu(mpi_reply->IOCStatus) &
7555 		    MPI2_IOCSTATUS_MASK;
7556 		transfer_packet->scsi_state = mpi_reply->SCSIState;
7557 		transfer_packet->scsi_status = mpi_reply->SCSIStatus;
7558 		transfer_packet->log_info =
7559 		    le32_to_cpu(mpi_reply->IOCLogInfo);
7560 	}
7561 	goto out;
7562 
7563  issue_target_reset:
7564 	if (issue_reset) {
7565 		pr_info("issue target reset: handle (0x%04x)\n", handle);
7566 		tm_return_code =
7567 			mpt3sas_scsih_issue_locked_tm(ioc, handle,
7568 				0xFFFFFFFF, 0xFFFFFFFF, 0,
7569 				MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, 0,
7570 				tr_timeout, tr_method);
7571 
7572 		if (tm_return_code == SUCCESS) {
7573 			pr_info("target reset completed: handle (0x%04x)\n", handle);
7574 			/* If the command is successfully aborted due to
7575 			 * target reset TM then do up to three retries else
7576 			 * command will be terminated by the host reset TM and
7577 			 * hence retry once.
7578 			 */
7579 			if (((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) &&
7580 			    retry_count++ < 3) ||
7581 			    ((ioc->scsih_cmds.status & MPT3_CMD_RESET) &&
7582 			    host_reset_count++ == 0)) {
7583 				pr_info("issue retry: handle (0x%04x)\n", handle);
7584 				goto retry_loop;
7585 			}
7586 		} else
7587 			pr_info("target reset didn't complete:  handle(0x%04x)\n", handle);
7588 		rc = -EFAULT;
7589 	} else
7590 		rc = -EAGAIN;
7591 
7592  out:
7593 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7594 	mutex_unlock(&ioc->scsih_cmds.mutex);
7595 	return rc;
7596 }
7597 
7598 /**
7599  * _scsih_determine_disposition -
7600  * @ioc: per adapter object
7601  * @transfer_packet: packet describing the transfer
7602  * Context: user
7603  *
7604  * Determines if an internal generated scsi_io is good data, or
7605  * whether it needs to be retried or treated as an error.
7606  *
7607  * Returns device_responsive_state
7608  */
7609 static enum device_responsive_state
_scsih_determine_disposition(struct MPT3SAS_ADAPTER * ioc,struct _scsi_io_transfer * transfer_packet)7610 _scsih_determine_disposition(struct MPT3SAS_ADAPTER *ioc,
7611 	struct _scsi_io_transfer *transfer_packet)
7612 {
7613 	static enum device_responsive_state rc;
7614 	struct sense_info sense_info = {0, 0, 0};
7615 	u8 check_sense = 0;
7616 	char *desc = NULL;
7617 
7618 	if (!transfer_packet->valid_reply)
7619 		return DEVICE_READY;
7620 
7621 	switch (transfer_packet->ioc_status) {
7622 	case MPI2_IOCSTATUS_BUSY:
7623 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
7624 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
7625 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
7626 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
7627 		rc = DEVICE_RETRY;
7628 		break;
7629 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
7630 		if (transfer_packet->log_info ==  0x31170000) {
7631 			rc = DEVICE_RETRY;
7632 			break;
7633 		}
7634 		if (transfer_packet->cdb[0] == REPORT_LUNS)
7635 			rc = DEVICE_READY;
7636 		else
7637 			rc = DEVICE_RETRY;
7638 		break;
7639 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
7640 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
7641 	case MPI2_IOCSTATUS_SUCCESS:
7642 		if (!transfer_packet->scsi_state &&
7643 		    !transfer_packet->scsi_status) {
7644 			rc = DEVICE_READY;
7645 			break;
7646 		}
7647 		if (transfer_packet->scsi_state &
7648 		    MPI2_SCSI_STATE_AUTOSENSE_VALID) {
7649 			rc = DEVICE_ERROR;
7650 			check_sense = 1;
7651 			break;
7652 		}
7653 		if (transfer_packet->scsi_state &
7654 		    (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
7655 		    MPI2_SCSI_STATE_NO_SCSI_STATUS |
7656 		    MPI2_SCSI_STATE_TERMINATED)) {
7657 			rc = DEVICE_RETRY;
7658 			break;
7659 		}
7660 		if (transfer_packet->scsi_status >=
7661 		    MPI2_SCSI_STATUS_BUSY) {
7662 			rc = DEVICE_RETRY;
7663 			break;
7664 		}
7665 		rc = DEVICE_READY;
7666 		break;
7667 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
7668 		if (transfer_packet->scsi_state &
7669 		    MPI2_SCSI_STATE_TERMINATED)
7670 			rc = DEVICE_RETRY;
7671 		else
7672 			rc = DEVICE_ERROR;
7673 		break;
7674 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
7675 	default:
7676 		rc = DEVICE_ERROR;
7677 		break;
7678 	}
7679 
7680 	if (check_sense) {
7681 		_scsih_normalize_sense(transfer_packet->sense, &sense_info);
7682 		if (sense_info.skey == UNIT_ATTENTION)
7683 			rc = DEVICE_RETRY_UA;
7684 		else if (sense_info.skey == NOT_READY) {
7685 			/* medium isn't present */
7686 			if (sense_info.asc == 0x3a)
7687 				rc = DEVICE_READY;
7688 			/* LOGICAL UNIT NOT READY */
7689 			else if (sense_info.asc == 0x04) {
7690 				if (sense_info.ascq == 0x03 ||
7691 				   sense_info.ascq == 0x0b ||
7692 				   sense_info.ascq == 0x0c) {
7693 					rc = DEVICE_ERROR;
7694 				} else
7695 					rc = DEVICE_START_UNIT;
7696 			}
7697 			/* LOGICAL UNIT HAS NOT SELF-CONFIGURED YET */
7698 			else if (sense_info.asc == 0x3e && !sense_info.ascq)
7699 				rc = DEVICE_START_UNIT;
7700 		} else if (sense_info.skey == ILLEGAL_REQUEST &&
7701 		    transfer_packet->cdb[0] == REPORT_LUNS) {
7702 			rc = DEVICE_READY;
7703 		} else if (sense_info.skey == MEDIUM_ERROR) {
7704 
7705 			/* medium is corrupt, lets add the device so
7706 			 * users can collect some info as needed
7707 			 */
7708 
7709 			if (sense_info.asc == 0x31)
7710 				rc = DEVICE_READY;
7711 		} else if (sense_info.skey == HARDWARE_ERROR) {
7712 			/* Defect List Error, still add the device */
7713 			if (sense_info.asc == 0x19)
7714 				rc = DEVICE_READY;
7715 		}
7716 	}
7717 
7718 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7719 		switch (rc) {
7720 		case DEVICE_READY:
7721 			desc = "ready";
7722 			break;
7723 		case DEVICE_RETRY:
7724 			desc = "retry";
7725 			break;
7726 		case DEVICE_RETRY_UA:
7727 			desc = "retry_ua";
7728 			break;
7729 		case DEVICE_START_UNIT:
7730 			desc = "start_unit";
7731 			break;
7732 		case DEVICE_STOP_UNIT:
7733 			desc = "stop_unit";
7734 			break;
7735 		case DEVICE_ERROR:
7736 			desc = "error";
7737 			break;
7738 		}
7739 
7740 		pr_info("ioc_status(0x%04x),\n"
7741 		    "loginfo(0x%08x), scsi_status(0x%02x),\n"
7742 		    "scsi_state(0x%02x), rc(%s)\n",
7743 			transfer_packet->ioc_status,
7744 			transfer_packet->log_info, transfer_packet->scsi_status,
7745 			transfer_packet->scsi_state, desc);
7746 
7747 		if (check_sense)
7748 			pr_info("\t[sense_key,asc,ascq]:\n"
7749 			    "[0x%02x,0x%02x,0x%02x]\n",
7750 			    sense_info.skey, sense_info.asc, sense_info.ascq);
7751 	}
7752 	return rc;
7753 }
7754 
7755 /**
7756  * _scsih_report_luns - send REPORT_LUNS to target
7757  * @ioc: per adapter object
7758  * @handle: expander handle
7759  * @data: report luns data payload
7760  * @data_length: length of data in bytes
7761  * @retry_count: Requeue count
7762  * @is_pd: is this hidden raid component
7763  * @tr_timeout: Target Reset Timeout
7764  * @tr_method: Target Reset Method
7765  * Context: user
7766  *
7767  * Returns device_responsive_state
7768  */
7769 static enum device_responsive_state
_scsih_report_luns(struct MPT3SAS_ADAPTER * ioc,u16 handle,void * data,u32 data_length,u8 retry_count,u8 is_pd,u8 tr_timeout,u8 tr_method)7770 _scsih_report_luns(struct MPT3SAS_ADAPTER *ioc, u16 handle, void *data,
7771 	u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method)
7772 {
7773 	struct _scsi_io_transfer *transfer_packet;
7774 	enum device_responsive_state rc;
7775 	void *lun_data;
7776 	int return_code;
7777 	int retries;
7778 
7779 	lun_data = NULL;
7780 	transfer_packet = kzalloc_obj(struct _scsi_io_transfer);
7781 	if (!transfer_packet) {
7782 
7783 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7784 		rc = DEVICE_RETRY;
7785 		goto out;
7786 	}
7787 
7788 	lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
7789 		&transfer_packet->data_dma, GFP_ATOMIC);
7790 	if (!lun_data) {
7791 
7792 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7793 		rc = DEVICE_RETRY;
7794 		goto out;
7795 	}
7796 
7797 	for (retries = 0; retries < 4; retries++) {
7798 		rc = DEVICE_ERROR;
7799 		ioc_info(ioc, "REPORT_LUNS: handle(0x%04x),\n"
7800 		    "retries(%d)\n", handle, retries);
7801 		memset(lun_data, 0, data_length);
7802 		transfer_packet->handle = handle;
7803 		transfer_packet->dir = DMA_FROM_DEVICE;
7804 		transfer_packet->data_length = data_length;
7805 		transfer_packet->cdb_length = 12;
7806 		transfer_packet->cdb[0] = REPORT_LUNS;
7807 		transfer_packet->cdb[6] = (data_length >> 24) & 0xFF;
7808 		transfer_packet->cdb[7] = (data_length >> 16) & 0xFF;
7809 		transfer_packet->cdb[8] = (data_length >>  8) & 0xFF;
7810 		transfer_packet->cdb[9] = data_length & 0xFF;
7811 		transfer_packet->timeout = 30;
7812 		transfer_packet->is_raid = is_pd;
7813 
7814 		return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
7815 		switch (return_code) {
7816 		case 0:
7817 			rc = _scsih_determine_disposition(ioc, transfer_packet);
7818 			if (rc == DEVICE_READY) {
7819 				memcpy(data, lun_data, data_length);
7820 				goto out;
7821 			} else if (rc == DEVICE_ERROR)
7822 				goto out;
7823 			break;
7824 		case -EAGAIN:
7825 			rc = DEVICE_RETRY;
7826 			break;
7827 		case -EFAULT:
7828 		default:
7829 			ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7830 			goto out;
7831 		}
7832 	}
7833  out:
7834 
7835 	if (lun_data)
7836 		dma_free_coherent(&ioc->pdev->dev, data_length, lun_data,
7837 		    transfer_packet->data_dma);
7838 	kfree(transfer_packet);
7839 
7840 	if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
7841 	    rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count)
7842 		rc = DEVICE_ERROR;
7843 
7844 	return rc;
7845 }
7846 
7847 /**
7848  * _scsih_start_unit - send START_UNIT to target
7849  * @ioc: per adapter object
7850  * @handle: expander handle
7851  * @lun: lun number
7852  * @is_pd: is this hidden raid component
7853  * @tr_timeout: Target Reset Timeout
7854  * @tr_method: Target Reset Method
7855  * Context: user
7856  *
7857  * Returns device_responsive_state
7858  */
7859 static enum device_responsive_state
_scsih_start_unit(struct MPT3SAS_ADAPTER * ioc,u16 handle,u32 lun,u8 is_pd,u8 tr_timeout,u8 tr_method)7860 _scsih_start_unit(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, u8 is_pd,
7861 	u8 tr_timeout, u8 tr_method)
7862 {
7863 	struct _scsi_io_transfer *transfer_packet;
7864 	enum device_responsive_state rc;
7865 	int return_code;
7866 
7867 	transfer_packet = kzalloc_obj(struct _scsi_io_transfer);
7868 	if (!transfer_packet) {
7869 
7870 		pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7871 		rc = DEVICE_RETRY;
7872 		goto out;
7873 	}
7874 
7875 	rc = DEVICE_READY;
7876 	transfer_packet->handle = handle;
7877 	transfer_packet->dir = DMA_NONE;
7878 	transfer_packet->lun = lun;
7879 	transfer_packet->cdb_length = 6;
7880 	transfer_packet->cdb[0] = START_STOP;
7881 	transfer_packet->cdb[1] = 1;
7882 	transfer_packet->cdb[4] = 1;
7883 	transfer_packet->timeout = 30;
7884 	transfer_packet->is_raid = is_pd;
7885 
7886 	pr_info("START_UNIT: handle(0x%04x), lun(%d)\n", handle, lun);
7887 
7888 	return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
7889 	switch (return_code) {
7890 	case 0:
7891 		rc = _scsih_determine_disposition(ioc, transfer_packet);
7892 		break;
7893 	case -EAGAIN:
7894 		rc = DEVICE_RETRY;
7895 		break;
7896 	case -EFAULT:
7897 	default:
7898 		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7899 		rc = DEVICE_ERROR;
7900 		break;
7901 	}
7902  out:
7903 	kfree(transfer_packet);
7904 	return rc;
7905 }
7906 
7907 /**
7908  * _scsih_test_unit_ready - send TUR to target
7909  * @ioc: per adapter object
7910  * @handle: expander handle
7911  * @lun: lun number
7912  * @is_pd: is this hidden raid component
7913  * @tr_timeout: Target Reset timeout value for Pcie devie
7914  * @tr_method: pcie device Target reset method
7915  * Context: user
7916  *
7917  * Returns device_responsive_state
7918  */
7919 static enum device_responsive_state
_scsih_test_unit_ready(struct MPT3SAS_ADAPTER * ioc,u16 handle,u32 lun,u8 is_pd,u8 tr_timeout,u8 tr_method)7920 _scsih_test_unit_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun,
7921 	u8 is_pd, u8 tr_timeout, u8 tr_method)
7922 {
7923 	struct _scsi_io_transfer *transfer_packet;
7924 	enum device_responsive_state rc;
7925 	int return_code;
7926 	int sata_init_failure = 0;
7927 
7928 	transfer_packet = kzalloc_obj(struct _scsi_io_transfer);
7929 	if (!transfer_packet) {
7930 
7931 		pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7932 		rc = DEVICE_RETRY;
7933 		goto out;
7934 	}
7935 
7936 	rc = DEVICE_READY;
7937 	transfer_packet->handle = handle;
7938 	transfer_packet->dir = DMA_NONE;
7939 	transfer_packet->lun = lun;
7940 	transfer_packet->cdb_length = 6;
7941 	transfer_packet->cdb[0] = TEST_UNIT_READY;
7942 	transfer_packet->timeout = 30;
7943 	transfer_packet->is_raid = is_pd;
7944 
7945  sata_init_retry:
7946 	pr_info("TEST_UNIT_READY: handle(0x%04x) lun(%d)\n", handle, lun);
7947 
7948 	return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
7949 	switch (return_code) {
7950 	case 0:
7951 		rc = _scsih_determine_disposition(ioc, transfer_packet);
7952 		if (rc == DEVICE_RETRY &&
7953 		    transfer_packet->log_info == 0x31111000) {
7954 			if (!sata_init_failure++) {
7955 				pr_info("SATA Initialization Timeout sending a retry\n");
7956 				rc = DEVICE_READY;
7957 				goto sata_init_retry;
7958 			} else {
7959 				pr_err("SATA Initialization Failed\n");
7960 				rc = DEVICE_ERROR;
7961 			}
7962 		}
7963 		break;
7964 	case -EAGAIN:
7965 		rc = DEVICE_RETRY;
7966 		break;
7967 	case -EFAULT:
7968 	default:
7969 		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
7970 		rc = DEVICE_ERROR;
7971 		break;
7972 	}
7973  out:
7974 	kfree(transfer_packet);
7975 	return rc;
7976 }
7977 
7978 /**
7979  * _scsih_ata_pass_thru_idd - obtain SATA device Identify Device Data
7980  * @ioc: per adapter object
7981  * @handle: device handle
7982  * @is_ssd_device : is this SATA SSD device
7983  * @tr_timeout: Target Reset Timeout
7984  * @tr_method: Target Reset Method
7985  * Context: user
7986  *
7987  * Returns device_responsive_state
7988  */
7989 static enum device_responsive_state
_scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 * is_ssd_device,u8 tr_timeout,u8 tr_method)7990 _scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle,
7991 	u8 *is_ssd_device, u8 tr_timeout, u8 tr_method)
7992 {
7993 	struct _scsi_io_transfer *transfer_packet;
7994 	enum device_responsive_state rc;
7995 	u16 *idd_data;
7996 	int return_code;
7997 	u32 data_length;
7998 
7999 	idd_data = NULL;
8000 	transfer_packet = kzalloc_obj(struct _scsi_io_transfer);
8001 	if (!transfer_packet) {
8002 
8003 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
8004 		rc = DEVICE_RETRY;
8005 		goto out;
8006 	}
8007 	data_length = 512;
8008 	idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
8009 		&transfer_packet->data_dma, GFP_ATOMIC);
8010 	if (!idd_data) {
8011 
8012 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
8013 		rc = DEVICE_RETRY;
8014 		goto out;
8015 	}
8016 	rc = DEVICE_READY;
8017 	memset(idd_data, 0, data_length);
8018 	transfer_packet->handle = handle;
8019 	transfer_packet->dir = DMA_FROM_DEVICE;
8020 	transfer_packet->data_length = data_length;
8021 	transfer_packet->cdb_length = 12;
8022 	transfer_packet->cdb[0] = ATA_12;
8023 	transfer_packet->cdb[1] = 0x8;
8024 	transfer_packet->cdb[2] = 0xd;
8025 	transfer_packet->cdb[3] = 0x1;
8026 	transfer_packet->cdb[9] = 0xec;
8027 	transfer_packet->timeout = 30;
8028 
8029 	return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0);
8030 	switch (return_code) {
8031 	case 0:
8032 		rc = _scsih_determine_disposition(ioc, transfer_packet);
8033 		if (rc == DEVICE_READY) {
8034 			// Check if nominal media rotation rate is set to 1 i.e. SSD device
8035 			if (idd_data[217] == 1)
8036 				*is_ssd_device = 1;
8037 		}
8038 		break;
8039 	case -EAGAIN:
8040 		rc = DEVICE_RETRY;
8041 		break;
8042 	case -EFAULT:
8043 	default:
8044 
8045 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
8046 		rc = DEVICE_ERROR;
8047 		break;
8048 	}
8049 
8050  out:
8051 	if (idd_data) {
8052 		dma_free_coherent(&ioc->pdev->dev, data_length, idd_data,
8053 		    transfer_packet->data_dma);
8054 	}
8055 	kfree(transfer_packet);
8056 	return rc;
8057 }
8058 
8059 /**
8060  * _scsih_wait_for_device_to_become_ready - handle busy devices
8061  * @ioc: per adapter object
8062  * @handle: expander handle
8063  * @retry_count: number of times this event has been retried
8064  * @is_pd: is this hidden raid component
8065  * @lun: lun number
8066  * @tr_timeout: Target Reset Timeout
8067  * @tr_method: Target Reset Method
8068  *
8069  * Some devices spend too much time in busy state, queue event later
8070  *
8071  * Return the device_responsive_state.
8072  */
8073 
8074 static enum device_responsive_state
_scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 retry_count,u8 is_pd,int lun,u8 tr_timeout,u8 tr_method)8075 _scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
8076 	u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method)
8077 {
8078 	enum device_responsive_state rc;
8079 
8080 	if (ioc->pci_error_recovery)
8081 		return DEVICE_ERROR;
8082 
8083 	if (ioc->shost_recovery)
8084 		return DEVICE_RETRY;
8085 
8086 	rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method);
8087 	if (rc == DEVICE_READY || rc == DEVICE_ERROR)
8088 		return rc;
8089 	else if (rc == DEVICE_START_UNIT) {
8090 		rc = _scsih_start_unit(ioc, handle, lun, is_pd, tr_timeout, tr_method);
8091 		if (rc == DEVICE_ERROR)
8092 			return rc;
8093 		rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method);
8094 	}
8095 
8096 	if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
8097 	    rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count)
8098 		rc = DEVICE_ERROR;
8099 	return rc;
8100 }
8101 
mpt_scsilun_to_int(struct scsi_lun * scsilun)8102 static inline int mpt_scsilun_to_int(struct scsi_lun *scsilun)
8103 {
8104 	return scsilun_to_int(scsilun);
8105 }
8106 
8107 /**
8108  * _scsih_wait_for_target_to_become_ready - handle busy devices
8109  * @ioc: per adapter object
8110  * @handle: expander handle
8111  * @retry_count: number of times this event has been retried
8112  * @is_pd: is this hidden raid component
8113  * @tr_timeout: Target Reset timeout value
8114  * @tr_method: Target Reset method Hot/Protocol level.
8115  *
8116  * Some devices spend too much time in busy state, queue event later
8117  *
8118  * Return the device_responsive_state.
8119  */
8120 static enum device_responsive_state
_scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 retry_count,u8 is_pd,u8 tr_timeout,u8 tr_method)8121 _scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
8122 	u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method)
8123 {
8124 	enum device_responsive_state rc;
8125 	struct scsi_lun *lun_data;
8126 	u32 length, num_luns;
8127 	u8 *data;
8128 	int lun;
8129 	struct scsi_lun *lunp;
8130 
8131 	lun_data = kzalloc_objs(struct scsi_lun, MPT3_MAX_LUNS);
8132 	if (!lun_data) {
8133 
8134 		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
8135 		return DEVICE_RETRY;
8136 	}
8137 
8138 	rc = _scsih_report_luns(ioc, handle, lun_data,
8139 	    MPT3_MAX_LUNS * sizeof(struct scsi_lun), retry_count, is_pd,
8140 	    tr_timeout, tr_method);
8141 
8142 	if (rc != DEVICE_READY)
8143 		goto out;
8144 
8145 	/* some debug bits*/
8146 	data = (u8 *)lun_data;
8147 	length = ((data[0] << 24) | (data[1] << 16) |
8148 		(data[2] << 8) | (data[3] << 0));
8149 
8150 	num_luns = (length / sizeof(struct scsi_lun));
8151 
8152 	lunp = &lun_data[1];
8153 	lun = (num_luns) ? mpt_scsilun_to_int(&lun_data[1]) : 0;
8154 	rc = _scsih_wait_for_device_to_become_ready(ioc, handle, retry_count,
8155 	    is_pd, lun, tr_timeout, tr_method);
8156 
8157 	if (rc == DEVICE_ERROR) {
8158 		struct scsi_lun *lunq;
8159 
8160 		for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) {
8161 
8162 			rc = _scsih_wait_for_device_to_become_ready(ioc, handle,
8163 					retry_count, is_pd, mpt_scsilun_to_int(lunq),
8164 					tr_timeout, tr_method);
8165 			if (rc != DEVICE_ERROR)
8166 				goto out;
8167 		}
8168 	}
8169 out:
8170 	kfree(lun_data);
8171 	return rc;
8172 }
8173 
8174 
8175 /**
8176  * _scsih_check_access_status - check access flags
8177  * @ioc: per adapter object
8178  * @sas_address: sas address
8179  * @handle: sas device handle
8180  * @access_status: errors returned during discovery of the device
8181  *
8182  * Return: 0 for success, else failure
8183  */
8184 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)8185 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
8186 	u16 handle, u8 access_status)
8187 {
8188 	u8 rc = 1;
8189 	char *desc = NULL;
8190 
8191 	switch (access_status) {
8192 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
8193 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
8194 		rc = 0;
8195 		break;
8196 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
8197 		desc = "sata capability failed";
8198 		break;
8199 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
8200 		desc = "sata affiliation conflict";
8201 		break;
8202 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
8203 		desc = "route not addressable";
8204 		break;
8205 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
8206 		desc = "smp error not addressable";
8207 		break;
8208 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
8209 		desc = "device blocked";
8210 		break;
8211 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
8212 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
8213 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
8214 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
8215 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
8216 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
8217 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
8218 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
8219 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
8220 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
8221 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
8222 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
8223 		desc = "sata initialization failed";
8224 		break;
8225 	default:
8226 		desc = "unknown";
8227 		break;
8228 	}
8229 
8230 	if (!rc)
8231 		return 0;
8232 
8233 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
8234 		desc, (u64)sas_address, handle);
8235 	return rc;
8236 }
8237 
8238 /**
8239  * _scsih_check_device - checking device responsiveness
8240  * @ioc: per adapter object
8241  * @parent_sas_address: sas address of parent expander or sas host
8242  * @handle: attached device handle
8243  * @phy_number: phy number
8244  * @link_rate: new link rate
8245  */
8246 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)8247 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
8248 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
8249 {
8250 	Mpi2ConfigReply_t mpi_reply;
8251 	Mpi2SasDevicePage0_t sas_device_pg0;
8252 	struct _sas_device *sas_device = NULL;
8253 	struct _enclosure_node *enclosure_dev = NULL;
8254 	u32 ioc_status;
8255 	unsigned long flags;
8256 	u64 sas_address;
8257 	struct scsi_target *starget;
8258 	struct MPT3SAS_TARGET *sas_target_priv_data;
8259 	u32 device_info;
8260 	struct hba_port *port;
8261 
8262 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8263 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
8264 		return;
8265 
8266 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8267 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8268 		return;
8269 
8270 	/* wide port handling ~ we need only handle device once for the phy that
8271 	 * is matched in sas device page zero
8272 	 */
8273 	if (phy_number != sas_device_pg0.PhyNum)
8274 		return;
8275 
8276 	/* check if this is end device */
8277 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8278 	if (!(_scsih_is_end_device(device_info)))
8279 		return;
8280 
8281 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8282 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
8283 	port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
8284 	if (!port)
8285 		goto out_unlock;
8286 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
8287 	    sas_address, port);
8288 
8289 	if (!sas_device)
8290 		goto out_unlock;
8291 
8292 	if (unlikely(sas_device->handle != handle)) {
8293 		starget = sas_device->starget;
8294 		sas_target_priv_data = starget->hostdata;
8295 		starget_printk(KERN_INFO, starget,
8296 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
8297 			sas_device->handle, handle);
8298 		sas_target_priv_data->handle = handle;
8299 		sas_device->handle = handle;
8300 		if ((le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID)
8301 		    && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8302 			sas_device->enclosure_level =
8303 				sas_device_pg0.EnclosureLevel;
8304 			memcpy(sas_device->connector_name,
8305 				sas_device_pg0.ConnectorName, 4);
8306 			sas_device->connector_name[4] = '\0';
8307 		} else {
8308 			sas_device->enclosure_level = 0;
8309 			sas_device->connector_name[0] = '\0';
8310 		}
8311 
8312 		sas_device->enclosure_handle =
8313 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
8314 		sas_device->is_chassis_slot_valid = 0;
8315 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
8316 						sas_device->enclosure_handle);
8317 		if (enclosure_dev) {
8318 			sas_device->enclosure_logical_id =
8319 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8320 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8321 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8322 				sas_device->is_chassis_slot_valid = 1;
8323 				sas_device->chassis_slot =
8324 					enclosure_dev->pg0.ChassisSlot;
8325 			}
8326 		}
8327 	}
8328 
8329 	/* check if device is present */
8330 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
8331 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
8332 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
8333 			handle);
8334 		goto out_unlock;
8335 	}
8336 
8337 	/* check if there were any issues with discovery */
8338 	if (_scsih_check_access_status(ioc, sas_address, handle,
8339 	    sas_device_pg0.AccessStatus))
8340 		goto out_unlock;
8341 
8342 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8343 
8344 	if (issue_scsi_cmd_to_bringup_drive)
8345 		_scsih_ublock_io_device_wait(ioc, sas_address, port);
8346 	else
8347 		_scsih_ublock_io_device(ioc, sas_address, port);
8348 
8349 	if (sas_device)
8350 		sas_device_put(sas_device);
8351 	return;
8352 
8353 out_unlock:
8354 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8355 	if (sas_device)
8356 		sas_device_put(sas_device);
8357 }
8358 
8359 /**
8360  * _scsih_add_device -  creating sas device object
8361  * @ioc: per adapter object
8362  * @handle: sas device handle
8363  * @retry_count: number of times this event has been retried
8364  * @is_pd: is this hidden raid component
8365  *
8366  * Creating end device object, stored in ioc->sas_device_list.
8367  *
8368  * Return: 0 for success, non-zero for failure.
8369  */
8370 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 retry_count,u8 is_pd)8371 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count,
8372 	u8 is_pd)
8373 {
8374 	Mpi2ConfigReply_t mpi_reply;
8375 	Mpi2SasDevicePage0_t sas_device_pg0;
8376 	struct _sas_device *sas_device;
8377 	struct _enclosure_node *enclosure_dev = NULL;
8378 	enum device_responsive_state rc;
8379 	u32 ioc_status;
8380 	u64 sas_address;
8381 	u32 device_info;
8382 	u8 connector_name[5];
8383 	u8 port_id;
8384 
8385 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8386 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
8387 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8388 			__FILE__, __LINE__, __func__);
8389 		return -1;
8390 	}
8391 
8392 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8393 	    MPI2_IOCSTATUS_MASK;
8394 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8395 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8396 			__FILE__, __LINE__, __func__);
8397 		return -1;
8398 	}
8399 
8400 	/* check if this is end device */
8401 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8402 	if (!(_scsih_is_end_device(device_info)))
8403 		return -1;
8404 	set_bit(handle, ioc->pend_os_device_add);
8405 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
8406 
8407 	/* check if device is present */
8408 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
8409 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
8410 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8411 			handle);
8412 		return -1;
8413 	}
8414 
8415 	/* check if there were any issues with discovery */
8416 	if (_scsih_check_access_status(ioc, sas_address, handle,
8417 	    sas_device_pg0.AccessStatus))
8418 		return -1;
8419 
8420 	port_id = sas_device_pg0.PhysicalPort;
8421 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
8422 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
8423 	if (sas_device) {
8424 		clear_bit(handle, ioc->pend_os_device_add);
8425 		sas_device_put(sas_device);
8426 		return -1;
8427 	}
8428 
8429 	if (sas_device_pg0.EnclosureHandle) {
8430 		enclosure_dev =
8431 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8432 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
8433 		if (enclosure_dev == NULL)
8434 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8435 				 sas_device_pg0.EnclosureHandle);
8436 	}
8437 
8438 	/*
8439 	 * Wait for device that is becoming ready
8440 	 * queue request later if device is busy.
8441 	 */
8442 	if ((!ioc->wait_for_discovery_to_complete) &&
8443 		(issue_scsi_cmd_to_bringup_drive)) {
8444 		ioc_info(ioc, "detecting: handle(0x%04x),\n"
8445 				"sas_address(0x%016llx), phy(%d)\n", handle,
8446 				(unsigned long long)sas_address, sas_device_pg0.PhyNum);
8447 		rc = _scsih_wait_for_target_to_become_ready(ioc, handle,
8448 		    retry_count, is_pd, 30, 0);
8449 		if (rc != DEVICE_READY) {
8450 			if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0)
8451 				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
8452 				    "device not ready: slot(%d)\n", __func__,
8453 				    le16_to_cpu(sas_device_pg0.Slot)));
8454 			if ((le16_to_cpu(sas_device_pg0.Flags) &
8455 			    MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) &&
8456 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8457 				memcpy(connector_name,
8458 					sas_device_pg0.ConnectorName, 4);
8459 				connector_name[4] = '\0';
8460 				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
8461 				    "device not ready:\n"
8462 				    "enclosure level(0x%04x),\n"
8463 				    "connector name( %s)\n",  __func__,
8464 				    sas_device_pg0.EnclosureLevel, connector_name));
8465 			}
8466 
8467 			if ((enclosure_dev) && (le16_to_cpu(enclosure_dev->pg0.Flags) &
8468 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID))
8469 				ioc_info(ioc, "chassis slot(0x%04x)\n",
8470 						enclosure_dev->pg0.ChassisSlot);
8471 
8472 			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
8473 			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
8474 				return 1;
8475 			else if (rc == DEVICE_ERROR)
8476 				return 0;
8477 		}
8478 	}
8479 
8480 	sas_device = kzalloc_obj(struct _sas_device);
8481 	if (!sas_device) {
8482 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8483 			__FILE__, __LINE__, __func__);
8484 		return 0;
8485 	}
8486 
8487 	kref_init(&sas_device->refcount);
8488 	sas_device->handle = handle;
8489 	if (_scsih_get_sas_address(ioc,
8490 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
8491 	    &sas_device->sas_address_parent) != 0)
8492 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8493 			__FILE__, __LINE__, __func__);
8494 	sas_device->enclosure_handle =
8495 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
8496 	if (sas_device->enclosure_handle != 0)
8497 		sas_device->slot =
8498 		    le16_to_cpu(sas_device_pg0.Slot);
8499 	sas_device->device_info = device_info;
8500 	sas_device->sas_address = sas_address;
8501 	sas_device->phy = sas_device_pg0.PhyNum;
8502 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
8503 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8504 	sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
8505 	if (!sas_device->port) {
8506 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8507 		    __FILE__, __LINE__, __func__);
8508 		goto out;
8509 	}
8510 
8511 	if (le16_to_cpu(sas_device_pg0.Flags)
8512 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8513 		sas_device->enclosure_level =
8514 			sas_device_pg0.EnclosureLevel;
8515 		memcpy(sas_device->connector_name,
8516 			sas_device_pg0.ConnectorName, 4);
8517 		sas_device->connector_name[4] = '\0';
8518 	} else {
8519 		sas_device->enclosure_level = 0;
8520 		sas_device->connector_name[0] = '\0';
8521 	}
8522 	/* get enclosure_logical_id & chassis_slot*/
8523 	sas_device->is_chassis_slot_valid = 0;
8524 	if (enclosure_dev) {
8525 		sas_device->enclosure_logical_id =
8526 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8527 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8528 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8529 			sas_device->is_chassis_slot_valid = 1;
8530 			sas_device->chassis_slot =
8531 					enclosure_dev->pg0.ChassisSlot;
8532 		}
8533 	}
8534 
8535 	/* get device name */
8536 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
8537 	sas_device->port_type = sas_device_pg0.MaxPortConnections;
8538 	ioc_info(ioc,
8539 	    "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
8540 	    handle, sas_device->sas_address, sas_device->port_type);
8541 
8542 	if (ioc->wait_for_discovery_to_complete)
8543 		_scsih_sas_device_init_add(ioc, sas_device);
8544 	else
8545 		_scsih_sas_device_add(ioc, sas_device);
8546 
8547 out:
8548 	sas_device_put(sas_device);
8549 	return 0;
8550 }
8551 
8552 /**
8553  * _scsih_remove_device -  removing sas device object
8554  * @ioc: per adapter object
8555  * @sas_device: the sas_device object
8556  */
8557 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)8558 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
8559 	struct _sas_device *sas_device)
8560 {
8561 	struct MPT3SAS_TARGET *sas_target_priv_data;
8562 
8563 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
8564 	     (sas_device->pfa_led_on)) {
8565 		_scsih_turn_off_pfa_led(ioc, sas_device);
8566 		sas_device->pfa_led_on = 0;
8567 	}
8568 
8569 	dewtprintk(ioc,
8570 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
8571 			    __func__,
8572 			    sas_device->handle, (u64)sas_device->sas_address));
8573 
8574 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
8575 	    NULL, NULL));
8576 
8577 	if (sas_device->starget && sas_device->starget->hostdata) {
8578 		sas_target_priv_data = sas_device->starget->hostdata;
8579 		sas_target_priv_data->deleted = 1;
8580 		_scsih_ublock_io_device(ioc, sas_device->sas_address,
8581 		    sas_device->port);
8582 		sas_target_priv_data->handle =
8583 		     MPT3SAS_INVALID_DEVICE_HANDLE;
8584 	}
8585 
8586 	if (!ioc->hide_drives)
8587 		mpt3sas_transport_port_remove(ioc,
8588 		    sas_device->sas_address,
8589 		    sas_device->sas_address_parent,
8590 		    sas_device->port);
8591 
8592 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
8593 		 sas_device->handle, (u64)sas_device->sas_address);
8594 
8595 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
8596 
8597 	dewtprintk(ioc,
8598 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
8599 			    __func__,
8600 			    sas_device->handle, (u64)sas_device->sas_address));
8601 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
8602 	    NULL, NULL));
8603 }
8604 
8605 /**
8606  * _scsih_sas_topology_change_event_debug - debug for topology event
8607  * @ioc: per adapter object
8608  * @event_data: event data payload
8609  * Context: user.
8610  */
8611 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)8612 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8613 	Mpi2EventDataSasTopologyChangeList_t *event_data)
8614 {
8615 	int i;
8616 	u16 handle;
8617 	u16 reason_code;
8618 	u8 phy_number;
8619 	char *status_str = NULL;
8620 	u8 link_rate, prev_link_rate;
8621 
8622 	switch (event_data->ExpStatus) {
8623 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
8624 		status_str = "add";
8625 		break;
8626 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
8627 		status_str = "remove";
8628 		break;
8629 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
8630 	case 0:
8631 		status_str =  "responding";
8632 		break;
8633 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
8634 		status_str = "remove delay";
8635 		break;
8636 	default:
8637 		status_str = "unknown status";
8638 		break;
8639 	}
8640 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
8641 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
8642 	    "start_phy(%02d), count(%d)\n",
8643 	    le16_to_cpu(event_data->ExpanderDevHandle),
8644 	    le16_to_cpu(event_data->EnclosureHandle),
8645 	    event_data->StartPhyNum, event_data->NumEntries);
8646 	for (i = 0; i < event_data->NumEntries; i++) {
8647 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
8648 		if (!handle)
8649 			continue;
8650 		phy_number = event_data->StartPhyNum + i;
8651 		reason_code = event_data->PHY[i].PhyStatus &
8652 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
8653 		switch (reason_code) {
8654 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
8655 			status_str = "target add";
8656 			break;
8657 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
8658 			status_str = "target remove";
8659 			break;
8660 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
8661 			status_str = "delay target remove";
8662 			break;
8663 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
8664 			status_str = "link rate change";
8665 			break;
8666 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
8667 			status_str = "target responding";
8668 			break;
8669 		default:
8670 			status_str = "unknown";
8671 			break;
8672 		}
8673 		link_rate = event_data->PHY[i].LinkRate >> 4;
8674 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
8675 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
8676 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
8677 		    handle, status_str, link_rate, prev_link_rate);
8678 
8679 	}
8680 }
8681 
8682 /**
8683  * _scsih_sas_topology_change_event - handle topology changes
8684  * @ioc: per adapter object
8685  * @fw_event: The fw_event_work object
8686  * Context: user.
8687  *
8688  */
8689 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8690 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8691 	struct fw_event_work *fw_event)
8692 {
8693 	int i;
8694 	int rc;
8695 	int requeue_event;
8696 	u16 parent_handle, handle;
8697 	u16 reason_code;
8698 	u8 phy_number, max_phys;
8699 	struct _sas_node *sas_expander;
8700 	struct _sas_device *sas_device;
8701 	u64 sas_address;
8702 	unsigned long flags;
8703 	u8 link_rate, prev_link_rate;
8704 	struct hba_port *port;
8705 	Mpi2EventDataSasTopologyChangeList_t *event_data =
8706 		(Mpi2EventDataSasTopologyChangeList_t *)
8707 		fw_event->event_data;
8708 
8709 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8710 		_scsih_sas_topology_change_event_debug(ioc, event_data);
8711 
8712 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
8713 		return 0;
8714 
8715 	if (!ioc->sas_hba.num_phys)
8716 		_scsih_sas_host_add(ioc);
8717 	else
8718 		_scsih_sas_host_refresh(ioc);
8719 
8720 	if (fw_event->ignore) {
8721 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
8722 		return 0;
8723 	}
8724 
8725 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
8726 	port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
8727 
8728 	/* handle expander add */
8729 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
8730 		if (_scsih_expander_add(ioc, parent_handle) != 0)
8731 			return 0;
8732 
8733 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8734 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
8735 	    parent_handle);
8736 	if (sas_expander) {
8737 		sas_address = sas_expander->sas_address;
8738 		max_phys = sas_expander->num_phys;
8739 		port = sas_expander->port;
8740 	} else if (parent_handle < ioc->sas_hba.num_phys) {
8741 		sas_address = ioc->sas_hba.sas_address;
8742 		max_phys = ioc->sas_hba.num_phys;
8743 	} else {
8744 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8745 		return 0;
8746 	}
8747 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8748 
8749 	/* handle siblings events */
8750 	for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) {
8751 		if (fw_event->ignore) {
8752 			dewtprintk(ioc,
8753 				   ioc_info(ioc, "ignoring expander event\n"));
8754 			return 0;
8755 		}
8756 		if (ioc->remove_host || ioc->pci_error_recovery)
8757 			return 0;
8758 		phy_number = event_data->StartPhyNum + i;
8759 		if (phy_number >= max_phys)
8760 			continue;
8761 		reason_code = event_data->PHY[i].PhyStatus &
8762 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
8763 		if ((event_data->PHY[i].PhyStatus &
8764 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
8765 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
8766 				continue;
8767 		if (fw_event->delayed_work_active && (reason_code ==
8768 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
8769 			dewtprintk(ioc, ioc_info(ioc, "ignoring\n"
8770 			    "Target not responding event phy in re-queued event processing\n"));
8771 			continue;
8772 		}
8773 
8774 		if (fw_event->delayed_work_active && (reason_code ==
8775 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
8776 			dewtprintk(ioc, ioc_info(ioc, "ignoring Target not responding\n"
8777 						"event phy in re-queued event processing\n"));
8778 			continue;
8779 		}
8780 
8781 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
8782 		if (!handle)
8783 			continue;
8784 		link_rate = event_data->PHY[i].LinkRate >> 4;
8785 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
8786 		switch (reason_code) {
8787 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
8788 
8789 			if (ioc->shost_recovery)
8790 				break;
8791 
8792 			if (link_rate == prev_link_rate)
8793 				break;
8794 
8795 			mpt3sas_transport_update_links(ioc, sas_address,
8796 			    handle, phy_number, link_rate, port);
8797 
8798 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
8799 				break;
8800 
8801 			_scsih_check_device(ioc, sas_address, handle,
8802 			    phy_number, link_rate);
8803 
8804 			/* This code after this point handles the test case
8805 			 * where a device has been added, however its returning
8806 			 * BUSY for sometime.  Then before the Device Missing
8807 			 * Delay expires and the device becomes READY, the
8808 			 * device is removed and added back.
8809 			 */
8810 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
8811 			sas_device = __mpt3sas_get_sdev_by_handle(ioc,
8812 			    handle);
8813 			spin_unlock_irqrestore(&ioc->sas_device_lock,
8814 			    flags);
8815 
8816 			if (sas_device) {
8817 				sas_device_put(sas_device);
8818 				break;
8819 			}
8820 
8821 			if (!test_bit(handle, ioc->pend_os_device_add))
8822 				break;
8823 
8824 			dewtprintk(ioc, ioc_info(ioc, "handle(0x%04x) device not found: convert\n"
8825 			    "event to a device add\n", handle));
8826 			event_data->PHY[i].PhyStatus &= 0xF0;
8827 			event_data->PHY[i].PhyStatus |=
8828 						MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
8829 
8830 			fallthrough;
8831 
8832 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
8833 
8834 			if (ioc->shost_recovery)
8835 				break;
8836 
8837 			mpt3sas_transport_update_links(ioc, sas_address,
8838 			    handle, phy_number, link_rate, port);
8839 
8840 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
8841 				break;
8842 
8843 			rc = _scsih_add_device(ioc, handle,
8844 			    fw_event->retries[i], 0);
8845 			if (rc) {/* retry due to busy device */
8846 				fw_event->retries[i]++;
8847 				requeue_event = 1;
8848 			} else {/* mark entry vacant */
8849 				event_data->PHY[i].PhyStatus |=
8850 			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT;
8851 			}
8852 
8853 			break;
8854 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
8855 
8856 			_scsih_device_remove_by_handle(ioc, handle);
8857 			break;
8858 		}
8859 	}
8860 
8861 	/* handle expander removal */
8862 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
8863 	    sas_expander)
8864 		mpt3sas_expander_remove(ioc, sas_address, port);
8865 
8866 	return requeue_event;
8867 }
8868 
8869 /**
8870  * _scsih_sas_device_status_change_event_debug - debug for device event
8871  * @ioc: ?
8872  * @event_data: event data payload
8873  * Context: user.
8874  */
8875 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)8876 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8877 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
8878 {
8879 	char *reason_str = NULL;
8880 
8881 	switch (event_data->ReasonCode) {
8882 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
8883 		reason_str = "smart data";
8884 		break;
8885 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
8886 		reason_str = "unsupported device discovered";
8887 		break;
8888 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
8889 		reason_str = "internal device reset";
8890 		break;
8891 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
8892 		reason_str = "internal task abort";
8893 		break;
8894 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8895 		reason_str = "internal task abort set";
8896 		break;
8897 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8898 		reason_str = "internal clear task set";
8899 		break;
8900 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
8901 		reason_str = "internal query task";
8902 		break;
8903 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
8904 		reason_str = "sata init failure";
8905 		break;
8906 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8907 		reason_str = "internal device reset complete";
8908 		break;
8909 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8910 		reason_str = "internal task abort complete";
8911 		break;
8912 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
8913 		reason_str = "internal async notification";
8914 		break;
8915 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
8916 		reason_str = "expander reduced functionality";
8917 		break;
8918 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
8919 		reason_str = "expander reduced functionality complete";
8920 		break;
8921 	default:
8922 		reason_str = "unknown reason";
8923 		break;
8924 	}
8925 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
8926 		 reason_str, le16_to_cpu(event_data->DevHandle),
8927 		 (u64)le64_to_cpu(event_data->SASAddress),
8928 		 le16_to_cpu(event_data->TaskTag));
8929 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
8930 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8931 			event_data->ASC, event_data->ASCQ);
8932 	pr_cont("\n");
8933 }
8934 
8935 /**
8936  * _scsih_sas_device_status_change_event - handle device status change
8937  * @ioc: per adapter object
8938  * @event_data: The fw event
8939  * Context: user.
8940  */
8941 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)8942 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8943 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
8944 {
8945 	struct MPT3SAS_TARGET *target_priv_data;
8946 	struct _sas_device *sas_device;
8947 	u64 sas_address;
8948 	unsigned long flags;
8949 
8950 	/* In MPI Revision K (0xC), the internal device reset complete was
8951 	 * implemented, so avoid setting tm_busy flag for older firmware.
8952 	 */
8953 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
8954 		return;
8955 
8956 	if (event_data->ReasonCode !=
8957 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8958 	   event_data->ReasonCode !=
8959 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8960 		return;
8961 
8962 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8963 	sas_address = le64_to_cpu(event_data->SASAddress);
8964 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
8965 	    sas_address,
8966 	    mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
8967 
8968 	if (!sas_device || !sas_device->starget)
8969 		goto out;
8970 
8971 	target_priv_data = sas_device->starget->hostdata;
8972 	if (!target_priv_data)
8973 		goto out;
8974 
8975 	if (event_data->ReasonCode ==
8976 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
8977 		target_priv_data->tm_busy = 1;
8978 	else
8979 		target_priv_data->tm_busy = 0;
8980 
8981 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8982 		ioc_info(ioc,
8983 		    "%s tm_busy flag for handle(0x%04x)\n",
8984 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
8985 		    target_priv_data->handle);
8986 
8987 out:
8988 	if (sas_device)
8989 		sas_device_put(sas_device);
8990 
8991 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8992 }
8993 
8994 
8995 /**
8996  * _scsih_check_pcie_access_status - check access flags
8997  * @ioc: per adapter object
8998  * @wwid: wwid
8999  * @handle: sas device handle
9000  * @access_status: errors returned during discovery of the device
9001  *
9002  * Return: 0 for success, else failure
9003  */
9004 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)9005 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9006 	u16 handle, u8 access_status)
9007 {
9008 	u8 rc = 1;
9009 	char *desc = NULL;
9010 
9011 	switch (access_status) {
9012 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
9013 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
9014 		rc = 0;
9015 		break;
9016 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
9017 		desc = "PCIe device capability failed";
9018 		break;
9019 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
9020 		desc = "PCIe device blocked";
9021 		ioc_info(ioc,
9022 		    "Device with Access Status (%s): wwid(0x%016llx), "
9023 		    "handle(0x%04x)\n ll only be added to the internal list",
9024 		    desc, (u64)wwid, handle);
9025 		rc = 0;
9026 		break;
9027 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
9028 		desc = "PCIe device mem space access failed";
9029 		break;
9030 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
9031 		desc = "PCIe device unsupported";
9032 		break;
9033 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
9034 		desc = "PCIe device MSIx Required";
9035 		break;
9036 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
9037 		desc = "PCIe device init fail max";
9038 		break;
9039 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
9040 		desc = "PCIe device status unknown";
9041 		break;
9042 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
9043 		desc = "nvme ready timeout";
9044 		break;
9045 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
9046 		desc = "nvme device configuration unsupported";
9047 		break;
9048 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
9049 		desc = "nvme identify failed";
9050 		break;
9051 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
9052 		desc = "nvme qconfig failed";
9053 		break;
9054 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
9055 		desc = "nvme qcreation failed";
9056 		break;
9057 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
9058 		desc = "nvme eventcfg failed";
9059 		break;
9060 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
9061 		desc = "nvme get feature stat failed";
9062 		break;
9063 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
9064 		desc = "nvme idle timeout";
9065 		break;
9066 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
9067 		desc = "nvme failure status";
9068 		break;
9069 	default:
9070 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
9071 			access_status, (u64)wwid, handle);
9072 		return rc;
9073 	}
9074 
9075 	if (!rc)
9076 		return rc;
9077 
9078 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
9079 		 desc, (u64)wwid, handle);
9080 	return rc;
9081 }
9082 
9083 /**
9084  * _scsih_pcie_device_remove_from_sml -  removing pcie device
9085  * from SML and free up associated memory
9086  * @ioc: per adapter object
9087  * @pcie_device: the pcie_device object
9088  */
9089 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)9090 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
9091 	struct _pcie_device *pcie_device)
9092 {
9093 	struct MPT3SAS_TARGET *sas_target_priv_data;
9094 
9095 	dewtprintk(ioc,
9096 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
9097 			    __func__,
9098 			    pcie_device->handle, (u64)pcie_device->wwid));
9099 	if (pcie_device->enclosure_handle != 0)
9100 		dewtprintk(ioc,
9101 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
9102 				    __func__,
9103 				    (u64)pcie_device->enclosure_logical_id,
9104 				    pcie_device->slot));
9105 	if (pcie_device->connector_name[0] != '\0')
9106 		dewtprintk(ioc,
9107 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
9108 				    __func__,
9109 				    pcie_device->enclosure_level,
9110 				    pcie_device->connector_name));
9111 
9112 	if (pcie_device->starget && pcie_device->starget->hostdata) {
9113 		sas_target_priv_data = pcie_device->starget->hostdata;
9114 		sas_target_priv_data->deleted = 1;
9115 		_scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
9116 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
9117 	}
9118 
9119 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9120 		 pcie_device->handle, (u64)pcie_device->wwid);
9121 	if (pcie_device->enclosure_handle != 0)
9122 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
9123 			 (u64)pcie_device->enclosure_logical_id,
9124 			 pcie_device->slot);
9125 	if (pcie_device->connector_name[0] != '\0')
9126 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
9127 			 pcie_device->enclosure_level,
9128 			 pcie_device->connector_name);
9129 
9130 	if (pcie_device->starget && (pcie_device->access_status !=
9131 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
9132 		scsi_remove_target(&pcie_device->starget->dev);
9133 	dewtprintk(ioc,
9134 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
9135 			    __func__,
9136 			    pcie_device->handle, (u64)pcie_device->wwid));
9137 	if (pcie_device->enclosure_handle != 0)
9138 		dewtprintk(ioc,
9139 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
9140 				    __func__,
9141 				    (u64)pcie_device->enclosure_logical_id,
9142 				    pcie_device->slot));
9143 	if (pcie_device->connector_name[0] != '\0')
9144 		dewtprintk(ioc,
9145 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
9146 				    __func__,
9147 				    pcie_device->enclosure_level,
9148 				    pcie_device->connector_name));
9149 
9150 	kfree(pcie_device->serial_number);
9151 }
9152 
9153 
9154 /**
9155  * _scsih_pcie_check_device - checking device responsiveness
9156  * @ioc: per adapter object
9157  * @handle: attached device handle
9158  */
9159 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)9160 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9161 {
9162 	Mpi2ConfigReply_t mpi_reply;
9163 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9164 	u32 ioc_status;
9165 	struct _pcie_device *pcie_device;
9166 	u64 wwid;
9167 	unsigned long flags;
9168 	struct scsi_target *starget;
9169 	struct MPT3SAS_TARGET *sas_target_priv_data;
9170 	u32 device_info;
9171 
9172 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9173 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
9174 		return;
9175 
9176 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
9177 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9178 		return;
9179 
9180 	/* check if this is end device */
9181 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9182 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9183 		return;
9184 
9185 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
9186 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9187 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
9188 
9189 	if (!pcie_device) {
9190 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9191 		return;
9192 	}
9193 
9194 	if (unlikely(pcie_device->handle != handle)) {
9195 		starget = pcie_device->starget;
9196 		sas_target_priv_data = starget->hostdata;
9197 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
9198 		starget_printk(KERN_INFO, starget,
9199 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
9200 		    pcie_device->handle, handle);
9201 		sas_target_priv_data->handle = handle;
9202 		pcie_device->handle = handle;
9203 
9204 		if (le32_to_cpu(pcie_device_pg0.Flags) &
9205 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
9206 			pcie_device->enclosure_level =
9207 			    pcie_device_pg0.EnclosureLevel;
9208 			memcpy(&pcie_device->connector_name[0],
9209 			    &pcie_device_pg0.ConnectorName[0], 4);
9210 		} else {
9211 			pcie_device->enclosure_level = 0;
9212 			pcie_device->connector_name[0] = '\0';
9213 		}
9214 	}
9215 
9216 	/* check if device is present */
9217 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
9218 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
9219 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
9220 			 handle);
9221 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9222 		pcie_device_put(pcie_device);
9223 		return;
9224 	}
9225 
9226 	/* check if there were any issues with discovery */
9227 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
9228 	    pcie_device_pg0.AccessStatus)) {
9229 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9230 		pcie_device_put(pcie_device);
9231 		return;
9232 	}
9233 
9234 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9235 	pcie_device_put(pcie_device);
9236 
9237 	if (issue_scsi_cmd_to_bringup_drive)
9238 		_scsih_ublock_io_device_wait(ioc, wwid, NULL);
9239 	else
9240 		_scsih_ublock_io_device(ioc, wwid, NULL);
9241 
9242 	return;
9243 }
9244 
9245 /**
9246  * _scsih_pcie_add_device -  creating pcie device object
9247  * @ioc: per adapter object
9248  * @handle: pcie device handle
9249  * @retry_count: number of times this event has been retried
9250  *
9251  * Creating end device object, stored in ioc->pcie_device_list.
9252  *
9253  * Return: 1 means queue the event later, 0 means complete the event
9254  */
9255 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 retry_count)9256 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count)
9257 {
9258 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9259 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
9260 	Mpi2ConfigReply_t mpi_reply;
9261 	struct _pcie_device *pcie_device;
9262 	struct _enclosure_node *enclosure_dev;
9263 	enum device_responsive_state rc;
9264 	u8 connector_name[5];
9265 	u8 tr_timeout = 30;
9266 	u8 tr_method = 0;
9267 	u32 ioc_status;
9268 	u64 wwid;
9269 
9270 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9271 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
9272 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9273 			__FILE__, __LINE__, __func__);
9274 		return 0;
9275 	}
9276 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9277 	    MPI2_IOCSTATUS_MASK;
9278 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9279 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9280 			__FILE__, __LINE__, __func__);
9281 		return 0;
9282 	}
9283 
9284 	set_bit(handle, ioc->pend_os_device_add);
9285 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
9286 
9287 	/* check if device is present */
9288 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
9289 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
9290 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
9291 			handle);
9292 		return 0;
9293 	}
9294 
9295 	/* check if there were any issues with discovery */
9296 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
9297 	    pcie_device_pg0.AccessStatus))
9298 		return 0;
9299 
9300 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
9301 	    (pcie_device_pg0.DeviceInfo))))
9302 		return 0;
9303 
9304 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
9305 	if (pcie_device) {
9306 		clear_bit(handle, ioc->pend_os_device_add);
9307 		pcie_device_put(pcie_device);
9308 		return 0;
9309 	}
9310 
9311 	/* PCIe Device Page 2 contains read-only information about a
9312 	 * specific NVMe device; therefore, this page is only
9313 	 * valid for NVMe devices and skip for pcie devices of type scsi.
9314 	 */
9315 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
9316 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
9317 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
9318 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9319 		    handle)) {
9320 			ioc_err(ioc,
9321 			    "failure at %s:%d/%s()!\n", __FILE__,
9322 			    __LINE__, __func__);
9323 			return 0;
9324 		}
9325 
9326 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9327 					MPI2_IOCSTATUS_MASK;
9328 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9329 			ioc_err(ioc,
9330 			    "failure at %s:%d/%s()!\n", __FILE__,
9331 			    __LINE__, __func__);
9332 			return 0;
9333 		}
9334 
9335 		if (!ioc->tm_custom_handling) {
9336 			tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
9337 			if (pcie_device_pg2.ControllerResetTO)
9338 				tr_timeout = pcie_device_pg2.ControllerResetTO;
9339 
9340 		}
9341 	}
9342 
9343 	/*
9344 	 * Wait for device that is becoming ready
9345 	 * queue request later if device is busy.
9346 	 */
9347 	if ((!ioc->wait_for_discovery_to_complete) &&
9348 		(issue_scsi_cmd_to_bringup_drive) &&
9349 		(pcie_device_pg0.AccessStatus !=
9350 			MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) {
9351 		ioc_info(ioc, "detecting: handle(0x%04x),\n"
9352 		    "wwid(0x%016llx), port(%d)\n", handle,
9353 		    (unsigned long long)wwid, pcie_device_pg0.PortNum);
9354 
9355 		rc = _scsih_wait_for_target_to_become_ready(ioc, handle,
9356 		    retry_count, 0, tr_timeout, tr_method);
9357 		if (rc != DEVICE_READY) {
9358 			if (le16_to_cpu(pcie_device_pg0.EnclosureHandle) != 0)
9359 				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
9360 				    "device not ready: slot(%d)\n",
9361 				    __func__,
9362 				    le16_to_cpu(pcie_device_pg0.Slot)));
9363 
9364 			if (le32_to_cpu(pcie_device_pg0.Flags) &
9365 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
9366 				memcpy(connector_name,
9367 				    pcie_device_pg0.ConnectorName, 4);
9368 				connector_name[4] = '\0';
9369 				dewtprintk(ioc, ioc_info(ioc, "%s: device not ready: enclosure\n"
9370 				    "level(0x%04x), connector name( %s)\n", __func__,
9371 				    pcie_device_pg0.EnclosureLevel,
9372 				    connector_name));
9373 			}
9374 
9375 			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
9376 				rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
9377 				return 1;
9378 			else if (rc == DEVICE_ERROR)
9379 				return 0;
9380 		}
9381 	}
9382 
9383 	pcie_device = kzalloc_obj(struct _pcie_device);
9384 	if (!pcie_device) {
9385 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9386 			__FILE__, __LINE__, __func__);
9387 		return 0;
9388 	}
9389 
9390 	kref_init(&pcie_device->refcount);
9391 	pcie_device->id = ioc->pcie_target_id++;
9392 	pcie_device->channel = PCIE_CHANNEL;
9393 	pcie_device->handle = handle;
9394 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
9395 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9396 	pcie_device->wwid = wwid;
9397 	pcie_device->port_num = pcie_device_pg0.PortNum;
9398 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
9399 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
9400 
9401 	pcie_device->enclosure_handle =
9402 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
9403 	if (pcie_device->enclosure_handle != 0)
9404 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
9405 
9406 	if (le32_to_cpu(pcie_device_pg0.Flags) &
9407 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
9408 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
9409 		memcpy(&pcie_device->connector_name[0],
9410 		    &pcie_device_pg0.ConnectorName[0], 4);
9411 	} else {
9412 		pcie_device->enclosure_level = 0;
9413 		pcie_device->connector_name[0] = '\0';
9414 	}
9415 
9416 	/* get enclosure_logical_id */
9417 	if (pcie_device->enclosure_handle) {
9418 		enclosure_dev =
9419 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9420 						pcie_device->enclosure_handle);
9421 		if (enclosure_dev)
9422 			pcie_device->enclosure_logical_id =
9423 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
9424 	}
9425 	/* TODO -- Add device name once FW supports it */
9426 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
9427 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
9428 		pcie_device->nvme_mdts =
9429 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
9430 		pcie_device->shutdown_latency =
9431 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
9432 		/*
9433 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
9434 		 * if drive's RTD3 Entry Latency is greater then IOC's
9435 		 * max_shutdown_latency.
9436 		 */
9437 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
9438 			ioc->max_shutdown_latency =
9439 				pcie_device->shutdown_latency;
9440 		if (pcie_device_pg2.ControllerResetTO)
9441 			pcie_device->reset_timeout =
9442 			    pcie_device_pg2.ControllerResetTO;
9443 		else
9444 			pcie_device->reset_timeout = 30;
9445 	} else
9446 		pcie_device->reset_timeout = 30;
9447 
9448 	if (ioc->wait_for_discovery_to_complete)
9449 		_scsih_pcie_device_init_add(ioc, pcie_device);
9450 	else
9451 		_scsih_pcie_device_add(ioc, pcie_device);
9452 
9453 	pcie_device_put(pcie_device);
9454 	return 0;
9455 }
9456 
9457 /**
9458  * _scsih_pcie_topology_change_event_debug - debug for topology
9459  * event
9460  * @ioc: per adapter object
9461  * @event_data: event data payload
9462  * Context: user.
9463  */
9464 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)9465 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9466 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
9467 {
9468 	int i;
9469 	u16 handle;
9470 	u16 reason_code;
9471 	u8 port_number;
9472 	char *status_str = NULL;
9473 	u8 link_rate, prev_link_rate;
9474 
9475 	switch (event_data->SwitchStatus) {
9476 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
9477 		status_str = "add";
9478 		break;
9479 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
9480 		status_str = "remove";
9481 		break;
9482 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
9483 	case 0:
9484 		status_str =  "responding";
9485 		break;
9486 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
9487 		status_str = "remove delay";
9488 		break;
9489 	default:
9490 		status_str = "unknown status";
9491 		break;
9492 	}
9493 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
9494 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
9495 		"start_port(%02d), count(%d)\n",
9496 		le16_to_cpu(event_data->SwitchDevHandle),
9497 		le16_to_cpu(event_data->EnclosureHandle),
9498 		event_data->StartPortNum, event_data->NumEntries);
9499 	for (i = 0; i < event_data->NumEntries; i++) {
9500 		handle =
9501 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
9502 		if (!handle)
9503 			continue;
9504 		port_number = event_data->StartPortNum + i;
9505 		reason_code = event_data->PortEntry[i].PortStatus;
9506 		switch (reason_code) {
9507 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
9508 			status_str = "target add";
9509 			break;
9510 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
9511 			status_str = "target remove";
9512 			break;
9513 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
9514 			status_str = "delay target remove";
9515 			break;
9516 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
9517 			status_str = "link rate change";
9518 			break;
9519 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
9520 			status_str = "target responding";
9521 			break;
9522 		default:
9523 			status_str = "unknown";
9524 			break;
9525 		}
9526 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
9527 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
9528 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
9529 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
9530 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
9531 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
9532 			handle, status_str, link_rate, prev_link_rate);
9533 	}
9534 }
9535 
9536 /**
9537  * _scsih_pcie_topology_change_event - handle PCIe topology
9538  *  changes
9539  * @ioc: per adapter object
9540  * @fw_event: The fw_event_work object
9541  * Context: user.
9542  *
9543  */
9544 static int
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9545 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
9546 	struct fw_event_work *fw_event)
9547 {
9548 	int i;
9549 	u16 handle;
9550 	u16 reason_code;
9551 	u8 link_rate, prev_link_rate;
9552 	unsigned long flags;
9553 	int rc;
9554 	int requeue_event;
9555 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
9556 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
9557 	struct _pcie_device *pcie_device;
9558 
9559 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9560 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
9561 
9562 	if (ioc->shost_recovery || ioc->remove_host ||
9563 		ioc->pci_error_recovery)
9564 		return 0;
9565 
9566 	if (fw_event->ignore) {
9567 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
9568 		return 0;
9569 	}
9570 
9571 	/* handle siblings events */
9572 	for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) {
9573 		if (fw_event->ignore) {
9574 			dewtprintk(ioc,
9575 				   ioc_info(ioc, "ignoring switch event\n"));
9576 			return 0;
9577 		}
9578 		if (ioc->remove_host || ioc->pci_error_recovery)
9579 			return 0;
9580 		reason_code = event_data->PortEntry[i].PortStatus;
9581 		handle =
9582 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
9583 		if (!handle)
9584 			continue;
9585 
9586 		link_rate = event_data->PortEntry[i].CurrentPortInfo
9587 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
9588 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
9589 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
9590 
9591 		switch (reason_code) {
9592 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
9593 			if (ioc->shost_recovery)
9594 				break;
9595 			if (link_rate == prev_link_rate)
9596 				break;
9597 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
9598 				break;
9599 
9600 			_scsih_pcie_check_device(ioc, handle);
9601 
9602 			/* This code after this point handles the test case
9603 			 * where a device has been added, however its returning
9604 			 * BUSY for sometime.  Then before the Device Missing
9605 			 * Delay expires and the device becomes READY, the
9606 			 * device is removed and added back.
9607 			 */
9608 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9609 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
9610 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9611 
9612 			if (pcie_device) {
9613 				pcie_device_put(pcie_device);
9614 				break;
9615 			}
9616 
9617 			if (!test_bit(handle, ioc->pend_os_device_add))
9618 				break;
9619 
9620 			dewtprintk(ioc,
9621 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
9622 					    handle));
9623 			event_data->PortEntry[i].PortStatus &= 0xF0;
9624 			event_data->PortEntry[i].PortStatus |=
9625 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
9626 			fallthrough;
9627 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
9628 			if (ioc->shost_recovery)
9629 				break;
9630 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
9631 				break;
9632 
9633 			rc = _scsih_pcie_add_device(ioc, handle, fw_event->retries[i]);
9634 			if (rc) {/* retry due to busy device */
9635 				fw_event->retries[i]++;
9636 				requeue_event = 1;
9637 			} else {
9638 				/* mark entry vacant */
9639 				/* TODO This needs to be reviewed and fixed,
9640 				 * we dont have an entry
9641 				 * to make an event void like vacant
9642 				 */
9643 				event_data->PortEntry[i].PortStatus |=
9644 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
9645 			}
9646 			break;
9647 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
9648 			_scsih_pcie_device_remove_by_handle(ioc, handle);
9649 			break;
9650 		}
9651 	}
9652 	return requeue_event;
9653 }
9654 
9655 /**
9656  * _scsih_pcie_device_status_change_event_debug - debug for device event
9657  * @ioc: per adapter object
9658  * @event_data: event data payload
9659  * Context: user.
9660  */
9661 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)9662 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9663 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
9664 {
9665 	char *reason_str = NULL;
9666 
9667 	switch (event_data->ReasonCode) {
9668 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
9669 		reason_str = "smart data";
9670 		break;
9671 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
9672 		reason_str = "unsupported device discovered";
9673 		break;
9674 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
9675 		reason_str = "internal device reset";
9676 		break;
9677 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
9678 		reason_str = "internal task abort";
9679 		break;
9680 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
9681 		reason_str = "internal task abort set";
9682 		break;
9683 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
9684 		reason_str = "internal clear task set";
9685 		break;
9686 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
9687 		reason_str = "internal query task";
9688 		break;
9689 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
9690 		reason_str = "device init failure";
9691 		break;
9692 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
9693 		reason_str = "internal device reset complete";
9694 		break;
9695 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
9696 		reason_str = "internal task abort complete";
9697 		break;
9698 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
9699 		reason_str = "internal async notification";
9700 		break;
9701 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
9702 		reason_str = "pcie hot reset failed";
9703 		break;
9704 	default:
9705 		reason_str = "unknown reason";
9706 		break;
9707 	}
9708 
9709 	ioc_info(ioc, "PCIE device status change: (%s)\n"
9710 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
9711 		 reason_str, le16_to_cpu(event_data->DevHandle),
9712 		 (u64)le64_to_cpu(event_data->WWID),
9713 		 le16_to_cpu(event_data->TaskTag));
9714 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
9715 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
9716 			event_data->ASC, event_data->ASCQ);
9717 	pr_cont("\n");
9718 }
9719 
9720 /**
9721  * _scsih_pcie_device_status_change_event - handle device status
9722  * change
9723  * @ioc: per adapter object
9724  * @fw_event: The fw_event_work object
9725  * Context: user.
9726  */
9727 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9728 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
9729 	struct fw_event_work *fw_event)
9730 {
9731 	struct MPT3SAS_TARGET *target_priv_data;
9732 	struct _pcie_device *pcie_device;
9733 	u64 wwid;
9734 	unsigned long flags;
9735 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
9736 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
9737 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9738 		_scsih_pcie_device_status_change_event_debug(ioc,
9739 			event_data);
9740 
9741 	if (event_data->ReasonCode !=
9742 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
9743 		event_data->ReasonCode !=
9744 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
9745 		return;
9746 
9747 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9748 	wwid = le64_to_cpu(event_data->WWID);
9749 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
9750 
9751 	if (!pcie_device || !pcie_device->starget)
9752 		goto out;
9753 
9754 	target_priv_data = pcie_device->starget->hostdata;
9755 	if (!target_priv_data)
9756 		goto out;
9757 
9758 	if (event_data->ReasonCode ==
9759 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
9760 		target_priv_data->tm_busy = 1;
9761 	else
9762 		target_priv_data->tm_busy = 0;
9763 out:
9764 	if (pcie_device)
9765 		pcie_device_put(pcie_device);
9766 
9767 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9768 }
9769 
9770 /**
9771  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
9772  * event
9773  * @ioc: per adapter object
9774  * @event_data: event data payload
9775  * Context: user.
9776  */
9777 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)9778 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9779 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
9780 {
9781 	char *reason_str = NULL;
9782 
9783 	switch (event_data->ReasonCode) {
9784 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
9785 		reason_str = "enclosure add";
9786 		break;
9787 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
9788 		reason_str = "enclosure remove";
9789 		break;
9790 	default:
9791 		reason_str = "unknown reason";
9792 		break;
9793 	}
9794 
9795 	ioc_info(ioc, "enclosure status change: (%s)\n"
9796 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
9797 		 reason_str,
9798 		 le16_to_cpu(event_data->EnclosureHandle),
9799 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
9800 		 le16_to_cpu(event_data->StartSlot));
9801 }
9802 
9803 /**
9804  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
9805  * @ioc: per adapter object
9806  * @fw_event: The fw_event_work object
9807  * Context: user.
9808  */
9809 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9810 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
9811 	struct fw_event_work *fw_event)
9812 {
9813 	Mpi2ConfigReply_t mpi_reply;
9814 	struct _enclosure_node *enclosure_dev = NULL;
9815 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
9816 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
9817 	int rc;
9818 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
9819 
9820 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9821 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
9822 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
9823 		     fw_event->event_data);
9824 	if (ioc->shost_recovery)
9825 		return;
9826 
9827 	if (enclosure_handle)
9828 		enclosure_dev =
9829 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9830 						enclosure_handle);
9831 	switch (event_data->ReasonCode) {
9832 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
9833 		if (!enclosure_dev) {
9834 			enclosure_dev =
9835 				kzalloc_obj(struct _enclosure_node);
9836 			if (!enclosure_dev) {
9837 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
9838 					 __FILE__, __LINE__, __func__);
9839 				return;
9840 			}
9841 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9842 				&enclosure_dev->pg0,
9843 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
9844 				enclosure_handle);
9845 
9846 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9847 						MPI2_IOCSTATUS_MASK)) {
9848 				kfree(enclosure_dev);
9849 				return;
9850 			}
9851 
9852 			list_add_tail(&enclosure_dev->list,
9853 							&ioc->enclosure_list);
9854 		}
9855 		break;
9856 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
9857 		if (enclosure_dev) {
9858 			list_del(&enclosure_dev->list);
9859 			kfree(enclosure_dev);
9860 		}
9861 		break;
9862 	default:
9863 		break;
9864 	}
9865 }
9866 
9867 /**
9868  * _scsih_sas_broadcast_primitive_event - handle broadcast events
9869  * @ioc: per adapter object
9870  * @fw_event: The fw_event_work object
9871  * Context: user.
9872  */
9873 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9874 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
9875 	struct fw_event_work *fw_event)
9876 {
9877 	struct scsi_cmnd *scmd;
9878 	struct scsi_device *sdev;
9879 	struct scsiio_tracker *st;
9880 	u16 smid, handle;
9881 	u32 lun;
9882 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9883 	u32 termination_count;
9884 	u32 query_count;
9885 	Mpi2SCSITaskManagementReply_t *mpi_reply;
9886 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
9887 		(Mpi2EventDataSasBroadcastPrimitive_t *)
9888 		fw_event->event_data;
9889 	u16 ioc_status;
9890 	unsigned long flags;
9891 	int r;
9892 	u8 max_retries = 0;
9893 	u8 task_abort_retries;
9894 
9895 	mutex_lock(&ioc->tm_cmds.mutex);
9896 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
9897 		 __func__, event_data->PhyNum, event_data->PortWidth);
9898 
9899 	_scsih_block_io_all_device(ioc);
9900 
9901 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
9902 	mpi_reply = ioc->tm_cmds.reply;
9903  broadcast_aen_retry:
9904 
9905 	/* sanity checks for retrying this loop */
9906 	if (max_retries++ == 5) {
9907 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
9908 		goto out;
9909 	} else if (max_retries > 1)
9910 		dewtprintk(ioc,
9911 			   ioc_info(ioc, "%s: %d retry\n",
9912 				    __func__, max_retries - 1));
9913 
9914 	termination_count = 0;
9915 	query_count = 0;
9916 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
9917 		if (ioc->shost_recovery)
9918 			goto out;
9919 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
9920 		if (!scmd)
9921 			continue;
9922 		st = scsi_cmd_priv(scmd);
9923 		sdev = scmd->device;
9924 		sas_device_priv_data = sdev->hostdata;
9925 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
9926 			continue;
9927 		 /* skip hidden raid components */
9928 		if (sas_device_priv_data->sas_target->flags &
9929 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
9930 			continue;
9931 		 /* skip volumes */
9932 		if (sas_device_priv_data->sas_target->flags &
9933 		    MPT_TARGET_FLAGS_VOLUME)
9934 			continue;
9935 		 /* skip PCIe devices */
9936 		if (sas_device_priv_data->sas_target->flags &
9937 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
9938 			continue;
9939 
9940 		handle = sas_device_priv_data->sas_target->handle;
9941 		lun = sas_device_priv_data->lun;
9942 		query_count++;
9943 
9944 		if (ioc->shost_recovery)
9945 			goto out;
9946 
9947 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
9948 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
9949 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
9950 			st->msix_io, 30, 0);
9951 		if (r == FAILED) {
9952 			sdev_printk(KERN_WARNING, sdev,
9953 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
9954 			    "QUERY_TASK: scmd(%p)\n", scmd);
9955 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
9956 			goto broadcast_aen_retry;
9957 		}
9958 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
9959 		    & MPI2_IOCSTATUS_MASK;
9960 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9961 			sdev_printk(KERN_WARNING, sdev,
9962 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
9963 				ioc_status, scmd);
9964 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
9965 			goto broadcast_aen_retry;
9966 		}
9967 
9968 		/* see if IO is still owned by IOC and target */
9969 		if (mpi_reply->ResponseCode ==
9970 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
9971 		     mpi_reply->ResponseCode ==
9972 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
9973 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
9974 			continue;
9975 		}
9976 		task_abort_retries = 0;
9977  tm_retry:
9978 		if (task_abort_retries++ == 60) {
9979 			dewtprintk(ioc,
9980 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
9981 					    __func__));
9982 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
9983 			goto broadcast_aen_retry;
9984 		}
9985 
9986 		if (ioc->shost_recovery)
9987 			goto out_no_lock;
9988 
9989 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
9990 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
9991 			st->smid, st->msix_io, 30, 0);
9992 		if (r == FAILED || st->cb_idx != 0xFF) {
9993 			sdev_printk(KERN_WARNING, sdev,
9994 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
9995 			    "scmd(%p)\n", scmd);
9996 			goto tm_retry;
9997 		}
9998 
9999 		if (task_abort_retries > 1)
10000 			sdev_printk(KERN_WARNING, sdev,
10001 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
10002 			    " scmd(%p)\n",
10003 			    task_abort_retries - 1, scmd);
10004 
10005 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
10006 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
10007 	}
10008 
10009 	if (ioc->broadcast_aen_pending) {
10010 		dewtprintk(ioc,
10011 			   ioc_info(ioc,
10012 				    "%s: loop back due to pending AEN\n",
10013 				    __func__));
10014 		 ioc->broadcast_aen_pending = 0;
10015 		 goto broadcast_aen_retry;
10016 	}
10017 
10018  out:
10019 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
10020  out_no_lock:
10021 
10022 	dewtprintk(ioc,
10023 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
10024 			    __func__, query_count, termination_count));
10025 
10026 	ioc->broadcast_aen_busy = 0;
10027 	if (!ioc->shost_recovery)
10028 		_scsih_ublock_io_all_device(ioc, 1);
10029 	mutex_unlock(&ioc->tm_cmds.mutex);
10030 }
10031 
10032 /**
10033  * _scsih_sas_discovery_event - handle discovery events
10034  * @ioc: per adapter object
10035  * @fw_event: The fw_event_work object
10036  * Context: user.
10037  */
10038 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10039 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
10040 	struct fw_event_work *fw_event)
10041 {
10042 	Mpi2EventDataSasDiscovery_t *event_data =
10043 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
10044 
10045 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
10046 		ioc_info(ioc, "discovery event: (%s)",
10047 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
10048 			 "start" : "stop");
10049 		if (event_data->DiscoveryStatus)
10050 			pr_cont("discovery_status(0x%08x)",
10051 				le32_to_cpu(event_data->DiscoveryStatus));
10052 		pr_cont("\n");
10053 	}
10054 
10055 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
10056 	    !ioc->sas_hba.num_phys) {
10057 		if (disable_discovery > 0 && ioc->shost_recovery) {
10058 			/* Wait for the reset to complete */
10059 			while (ioc->shost_recovery)
10060 				ssleep(1);
10061 		}
10062 		_scsih_sas_host_add(ioc);
10063 	}
10064 }
10065 
10066 /**
10067  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
10068  *						events
10069  * @ioc: per adapter object
10070  * @fw_event: The fw_event_work object
10071  * Context: user.
10072  */
10073 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10074 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
10075 	struct fw_event_work *fw_event)
10076 {
10077 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
10078 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
10079 
10080 	switch (event_data->ReasonCode) {
10081 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
10082 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
10083 			 le16_to_cpu(event_data->DevHandle),
10084 			 (u64)le64_to_cpu(event_data->SASAddress),
10085 			 event_data->PhysicalPort);
10086 		break;
10087 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
10088 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
10089 			 le16_to_cpu(event_data->DevHandle),
10090 			 (u64)le64_to_cpu(event_data->SASAddress),
10091 			 event_data->PhysicalPort);
10092 		break;
10093 	default:
10094 		break;
10095 	}
10096 }
10097 
10098 /**
10099  * _scsih_pcie_enumeration_event - handle enumeration events
10100  * @ioc: per adapter object
10101  * @fw_event: The fw_event_work object
10102  * Context: user.
10103  */
10104 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10105 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
10106 	struct fw_event_work *fw_event)
10107 {
10108 	Mpi26EventDataPCIeEnumeration_t *event_data =
10109 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
10110 
10111 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
10112 		return;
10113 
10114 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
10115 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
10116 		 "started" : "completed",
10117 		 event_data->Flags);
10118 	if (event_data->EnumerationStatus)
10119 		pr_cont("enumeration_status(0x%08x)",
10120 			le32_to_cpu(event_data->EnumerationStatus));
10121 	pr_cont("\n");
10122 }
10123 
10124 /**
10125  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
10126  * @ioc: per adapter object
10127  * @handle: device handle for physical disk
10128  * @phys_disk_num: physical disk number
10129  *
10130  * Return: 0 for success, else failure.
10131  */
10132 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)10133 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
10134 {
10135 	Mpi2RaidActionRequest_t *mpi_request;
10136 	Mpi2RaidActionReply_t *mpi_reply;
10137 	u16 smid;
10138 	u8 issue_reset = 0;
10139 	int rc = 0;
10140 	u16 ioc_status;
10141 	u32 log_info;
10142 
10143 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
10144 		return rc;
10145 
10146 	mutex_lock(&ioc->scsih_cmds.mutex);
10147 
10148 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10149 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10150 		rc = -EAGAIN;
10151 		goto out;
10152 	}
10153 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10154 
10155 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10156 	if (!smid) {
10157 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10158 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10159 		rc = -EAGAIN;
10160 		goto out;
10161 	}
10162 
10163 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10164 	ioc->scsih_cmds.smid = smid;
10165 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10166 
10167 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10168 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
10169 	mpi_request->PhysDiskNum = phys_disk_num;
10170 
10171 	dewtprintk(ioc,
10172 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
10173 			    handle, phys_disk_num));
10174 
10175 	init_completion(&ioc->scsih_cmds.done);
10176 	ioc->put_smid_default(ioc, smid);
10177 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
10178 
10179 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10180 		mpt3sas_check_cmd_timeout(ioc,
10181 		    ioc->scsih_cmds.status, mpi_request,
10182 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
10183 		rc = -EFAULT;
10184 		goto out;
10185 	}
10186 
10187 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10188 
10189 		mpi_reply = ioc->scsih_cmds.reply;
10190 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
10191 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
10192 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
10193 		else
10194 			log_info = 0;
10195 		ioc_status &= MPI2_IOCSTATUS_MASK;
10196 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10197 			dewtprintk(ioc,
10198 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
10199 					    ioc_status, log_info));
10200 			rc = -EFAULT;
10201 		} else
10202 			dewtprintk(ioc,
10203 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
10204 	}
10205 
10206  out:
10207 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10208 	mutex_unlock(&ioc->scsih_cmds.mutex);
10209 
10210 	if (issue_reset)
10211 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10212 	return rc;
10213 }
10214 
10215 /**
10216  * _scsih_reprobe_lun - reprobing lun
10217  * @sdev: scsi device struct
10218  * @no_uld_attach: sdev->no_uld_attach flag setting
10219  *
10220  **/
10221 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)10222 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
10223 {
10224 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
10225 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
10226 	    sdev->no_uld_attach ? "hiding" : "exposing");
10227 	WARN_ON(scsi_device_reprobe(sdev));
10228 }
10229 
10230 /**
10231  * _scsih_sas_volume_add - add new volume
10232  * @ioc: per adapter object
10233  * @element: IR config element data
10234  * Context: user.
10235  */
10236 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)10237 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
10238 	Mpi2EventIrConfigElement_t *element)
10239 {
10240 	struct _raid_device *raid_device;
10241 	unsigned long flags;
10242 	u64 wwid;
10243 	u16 handle = le16_to_cpu(element->VolDevHandle);
10244 	int rc;
10245 
10246 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
10247 	if (!wwid) {
10248 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10249 			__FILE__, __LINE__, __func__);
10250 		return;
10251 	}
10252 
10253 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
10254 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
10255 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10256 
10257 	if (raid_device)
10258 		return;
10259 
10260 	raid_device = kzalloc_obj(struct _raid_device);
10261 	if (!raid_device) {
10262 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10263 			__FILE__, __LINE__, __func__);
10264 		return;
10265 	}
10266 
10267 	raid_device->id = ioc->sas_id++;
10268 	raid_device->channel = RAID_CHANNEL;
10269 	raid_device->handle = handle;
10270 	raid_device->wwid = wwid;
10271 	_scsih_raid_device_add(ioc, raid_device);
10272 	if (!ioc->wait_for_discovery_to_complete) {
10273 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10274 		    raid_device->id, 0);
10275 		if (rc)
10276 			_scsih_raid_device_remove(ioc, raid_device);
10277 	} else {
10278 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10279 		_scsih_determine_boot_device(ioc, raid_device, 1);
10280 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10281 	}
10282 }
10283 
10284 /**
10285  * _scsih_sas_volume_delete - delete volume
10286  * @ioc: per adapter object
10287  * @handle: volume device handle
10288  * Context: user.
10289  */
10290 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)10291 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
10292 {
10293 	struct _raid_device *raid_device;
10294 	unsigned long flags;
10295 	struct MPT3SAS_TARGET *sas_target_priv_data;
10296 	struct scsi_target *starget = NULL;
10297 
10298 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
10299 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
10300 	if (raid_device) {
10301 		if (raid_device->starget) {
10302 			starget = raid_device->starget;
10303 			sas_target_priv_data = starget->hostdata;
10304 			sas_target_priv_data->deleted = 1;
10305 		}
10306 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
10307 			 raid_device->handle, (u64)raid_device->wwid);
10308 		list_del(&raid_device->list);
10309 		kfree(raid_device);
10310 	}
10311 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10312 	if (starget)
10313 		scsi_remove_target(&starget->dev);
10314 }
10315 
10316 /**
10317  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
10318  * @ioc: per adapter object
10319  * @element: IR config element data
10320  * Context: user.
10321  */
10322 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)10323 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
10324 	Mpi2EventIrConfigElement_t *element)
10325 {
10326 	struct _sas_device *sas_device;
10327 	struct scsi_target *starget = NULL;
10328 	struct MPT3SAS_TARGET *sas_target_priv_data;
10329 	unsigned long flags;
10330 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
10331 
10332 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10333 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
10334 	if (sas_device) {
10335 		sas_device->volume_handle = 0;
10336 		sas_device->volume_wwid = 0;
10337 		clear_bit(handle, ioc->pd_handles);
10338 		if (sas_device->starget && sas_device->starget->hostdata) {
10339 			starget = sas_device->starget;
10340 			sas_target_priv_data = starget->hostdata;
10341 			sas_target_priv_data->flags &=
10342 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
10343 		}
10344 	}
10345 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10346 	if (!sas_device)
10347 		return;
10348 
10349 	/* exposing raid component */
10350 	if (starget)
10351 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
10352 
10353 	sas_device_put(sas_device);
10354 }
10355 
10356 /**
10357  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
10358  * @ioc: per adapter object
10359  * @element: IR config element data
10360  * Context: user.
10361  */
10362 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)10363 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
10364 	Mpi2EventIrConfigElement_t *element)
10365 {
10366 	struct _sas_device *sas_device;
10367 	struct scsi_target *starget = NULL;
10368 	struct MPT3SAS_TARGET *sas_target_priv_data;
10369 	unsigned long flags;
10370 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
10371 	u16 volume_handle = 0;
10372 	u64 volume_wwid = 0;
10373 
10374 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
10375 	if (volume_handle)
10376 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
10377 		    &volume_wwid);
10378 
10379 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10380 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
10381 	if (sas_device) {
10382 		set_bit(handle, ioc->pd_handles);
10383 		if (sas_device->starget && sas_device->starget->hostdata) {
10384 			starget = sas_device->starget;
10385 			sas_target_priv_data = starget->hostdata;
10386 			sas_target_priv_data->flags |=
10387 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
10388 			sas_device->volume_handle = volume_handle;
10389 			sas_device->volume_wwid = volume_wwid;
10390 		}
10391 	}
10392 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10393 	if (!sas_device)
10394 		return;
10395 
10396 	/* hiding raid component */
10397 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
10398 
10399 	if (starget)
10400 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
10401 
10402 	sas_device_put(sas_device);
10403 }
10404 
10405 /**
10406  * _scsih_sas_pd_delete - delete pd component
10407  * @ioc: per adapter object
10408  * @element: IR config element data
10409  * Context: user.
10410  */
10411 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)10412 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
10413 	Mpi2EventIrConfigElement_t *element)
10414 {
10415 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
10416 
10417 	_scsih_device_remove_by_handle(ioc, handle);
10418 }
10419 
10420 /**
10421  * _scsih_sas_pd_add - remove pd component
10422  * @ioc: per adapter object
10423  * @element: IR config element data
10424  * Context: user.
10425  */
10426 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)10427 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
10428 	Mpi2EventIrConfigElement_t *element)
10429 {
10430 	struct _sas_device *sas_device;
10431 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
10432 	Mpi2ConfigReply_t mpi_reply;
10433 	Mpi2SasDevicePage0_t sas_device_pg0;
10434 	u32 ioc_status;
10435 	u64 sas_address;
10436 	u16 parent_handle;
10437 
10438 	set_bit(handle, ioc->pd_handles);
10439 
10440 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10441 	if (sas_device) {
10442 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
10443 		sas_device_put(sas_device);
10444 		return;
10445 	}
10446 
10447 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
10448 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
10449 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10450 			__FILE__, __LINE__, __func__);
10451 		return;
10452 	}
10453 
10454 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10455 	    MPI2_IOCSTATUS_MASK;
10456 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10457 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10458 			__FILE__, __LINE__, __func__);
10459 		return;
10460 	}
10461 
10462 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10463 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
10464 		mpt3sas_transport_update_links(ioc, sas_address, handle,
10465 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10466 		    mpt3sas_get_port_by_id(ioc,
10467 		    sas_device_pg0.PhysicalPort, 0));
10468 
10469 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
10470 	_scsih_add_device(ioc, handle, 0, 1);
10471 }
10472 
10473 /**
10474  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
10475  * @ioc: per adapter object
10476  * @event_data: event data payload
10477  * Context: user.
10478  */
10479 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)10480 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
10481 	Mpi2EventDataIrConfigChangeList_t *event_data)
10482 {
10483 	Mpi2EventIrConfigElement_t *element;
10484 	u8 element_type;
10485 	int i;
10486 	char *reason_str = NULL, *element_str = NULL;
10487 
10488 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
10489 
10490 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
10491 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
10492 		 "foreign" : "native",
10493 		 event_data->NumElements);
10494 	for (i = 0; i < event_data->NumElements; i++, element++) {
10495 		switch (element->ReasonCode) {
10496 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
10497 			reason_str = "add";
10498 			break;
10499 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
10500 			reason_str = "remove";
10501 			break;
10502 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
10503 			reason_str = "no change";
10504 			break;
10505 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
10506 			reason_str = "hide";
10507 			break;
10508 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
10509 			reason_str = "unhide";
10510 			break;
10511 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
10512 			reason_str = "volume_created";
10513 			break;
10514 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
10515 			reason_str = "volume_deleted";
10516 			break;
10517 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
10518 			reason_str = "pd_created";
10519 			break;
10520 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
10521 			reason_str = "pd_deleted";
10522 			break;
10523 		default:
10524 			reason_str = "unknown reason";
10525 			break;
10526 		}
10527 		element_type = le16_to_cpu(element->ElementFlags) &
10528 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
10529 		switch (element_type) {
10530 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
10531 			element_str = "volume";
10532 			break;
10533 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
10534 			element_str = "phys disk";
10535 			break;
10536 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
10537 			element_str = "hot spare";
10538 			break;
10539 		default:
10540 			element_str = "unknown element";
10541 			break;
10542 		}
10543 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
10544 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
10545 		    reason_str, le16_to_cpu(element->VolDevHandle),
10546 		    le16_to_cpu(element->PhysDiskDevHandle),
10547 		    element->PhysDiskNum);
10548 	}
10549 }
10550 
10551 /**
10552  * _scsih_sas_ir_config_change_event - handle ir configuration change events
10553  * @ioc: per adapter object
10554  * @fw_event: The fw_event_work object
10555  * Context: user.
10556  */
10557 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10558 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
10559 	struct fw_event_work *fw_event)
10560 {
10561 	Mpi2EventIrConfigElement_t *element;
10562 	int i;
10563 	u8 foreign_config;
10564 	Mpi2EventDataIrConfigChangeList_t *event_data =
10565 		(Mpi2EventDataIrConfigChangeList_t *)
10566 		fw_event->event_data;
10567 
10568 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
10569 	     (!ioc->hide_ir_msg))
10570 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
10571 
10572 	foreign_config = (le32_to_cpu(event_data->Flags) &
10573 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
10574 
10575 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
10576 	if (ioc->shost_recovery &&
10577 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
10578 		for (i = 0; i < event_data->NumElements; i++, element++) {
10579 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
10580 				_scsih_ir_fastpath(ioc,
10581 					le16_to_cpu(element->PhysDiskDevHandle),
10582 					element->PhysDiskNum);
10583 		}
10584 		return;
10585 	}
10586 
10587 	for (i = 0; i < event_data->NumElements; i++, element++) {
10588 
10589 		switch (element->ReasonCode) {
10590 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
10591 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
10592 			if (!foreign_config)
10593 				_scsih_sas_volume_add(ioc, element);
10594 			break;
10595 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
10596 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
10597 			if (!foreign_config)
10598 				_scsih_sas_volume_delete(ioc,
10599 				    le16_to_cpu(element->VolDevHandle));
10600 			break;
10601 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
10602 			if (!ioc->is_warpdrive)
10603 				_scsih_sas_pd_hide(ioc, element);
10604 			break;
10605 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
10606 			if (!ioc->is_warpdrive)
10607 				_scsih_sas_pd_expose(ioc, element);
10608 			break;
10609 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
10610 			if (!ioc->is_warpdrive)
10611 				_scsih_sas_pd_add(ioc, element);
10612 			break;
10613 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
10614 			if (!ioc->is_warpdrive)
10615 				_scsih_sas_pd_delete(ioc, element);
10616 			break;
10617 		}
10618 	}
10619 }
10620 
10621 /**
10622  * _scsih_sas_ir_volume_event - IR volume event
10623  * @ioc: per adapter object
10624  * @fw_event: The fw_event_work object
10625  * Context: user.
10626  */
10627 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10628 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
10629 	struct fw_event_work *fw_event)
10630 {
10631 	u64 wwid;
10632 	unsigned long flags;
10633 	struct _raid_device *raid_device;
10634 	u16 handle;
10635 	u32 state;
10636 	int rc;
10637 	Mpi2EventDataIrVolume_t *event_data =
10638 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
10639 
10640 	if (ioc->shost_recovery)
10641 		return;
10642 
10643 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
10644 		return;
10645 
10646 	handle = le16_to_cpu(event_data->VolDevHandle);
10647 	state = le32_to_cpu(event_data->NewValue);
10648 	if (!ioc->hide_ir_msg)
10649 		dewtprintk(ioc,
10650 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
10651 				    __func__, handle,
10652 				    le32_to_cpu(event_data->PreviousValue),
10653 				    state));
10654 	switch (state) {
10655 	case MPI2_RAID_VOL_STATE_MISSING:
10656 	case MPI2_RAID_VOL_STATE_FAILED:
10657 		_scsih_sas_volume_delete(ioc, handle);
10658 		break;
10659 
10660 	case MPI2_RAID_VOL_STATE_ONLINE:
10661 	case MPI2_RAID_VOL_STATE_DEGRADED:
10662 	case MPI2_RAID_VOL_STATE_OPTIMAL:
10663 
10664 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10665 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
10666 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10667 
10668 		if (raid_device)
10669 			break;
10670 
10671 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
10672 		if (!wwid) {
10673 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10674 				__FILE__, __LINE__, __func__);
10675 			break;
10676 		}
10677 
10678 		raid_device = kzalloc_obj(struct _raid_device);
10679 		if (!raid_device) {
10680 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10681 				__FILE__, __LINE__, __func__);
10682 			break;
10683 		}
10684 
10685 		raid_device->id = ioc->sas_id++;
10686 		raid_device->channel = RAID_CHANNEL;
10687 		raid_device->handle = handle;
10688 		raid_device->wwid = wwid;
10689 		_scsih_raid_device_add(ioc, raid_device);
10690 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10691 		    raid_device->id, 0);
10692 		if (rc)
10693 			_scsih_raid_device_remove(ioc, raid_device);
10694 		break;
10695 
10696 	case MPI2_RAID_VOL_STATE_INITIALIZING:
10697 	default:
10698 		break;
10699 	}
10700 }
10701 
10702 /**
10703  * _scsih_sas_ir_physical_disk_event - PD event
10704  * @ioc: per adapter object
10705  * @fw_event: The fw_event_work object
10706  * Context: user.
10707  */
10708 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10709 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
10710 	struct fw_event_work *fw_event)
10711 {
10712 	u16 handle, parent_handle;
10713 	u32 state;
10714 	struct _sas_device *sas_device;
10715 	Mpi2ConfigReply_t mpi_reply;
10716 	Mpi2SasDevicePage0_t sas_device_pg0;
10717 	u32 ioc_status;
10718 	Mpi2EventDataIrPhysicalDisk_t *event_data =
10719 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
10720 	u64 sas_address;
10721 
10722 	if (ioc->shost_recovery)
10723 		return;
10724 
10725 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
10726 		return;
10727 
10728 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
10729 	state = le32_to_cpu(event_data->NewValue);
10730 
10731 	if (!ioc->hide_ir_msg)
10732 		dewtprintk(ioc,
10733 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
10734 				    __func__, handle,
10735 				    le32_to_cpu(event_data->PreviousValue),
10736 				    state));
10737 
10738 	switch (state) {
10739 	case MPI2_RAID_PD_STATE_ONLINE:
10740 	case MPI2_RAID_PD_STATE_DEGRADED:
10741 	case MPI2_RAID_PD_STATE_REBUILDING:
10742 	case MPI2_RAID_PD_STATE_OPTIMAL:
10743 	case MPI2_RAID_PD_STATE_HOT_SPARE:
10744 
10745 		if (!ioc->is_warpdrive)
10746 			set_bit(handle, ioc->pd_handles);
10747 
10748 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10749 		if (sas_device) {
10750 			sas_device_put(sas_device);
10751 			return;
10752 		}
10753 
10754 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10755 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10756 		    handle))) {
10757 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10758 				__FILE__, __LINE__, __func__);
10759 			return;
10760 		}
10761 
10762 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10763 		    MPI2_IOCSTATUS_MASK;
10764 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10765 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10766 				__FILE__, __LINE__, __func__);
10767 			return;
10768 		}
10769 
10770 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10771 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
10772 			mpt3sas_transport_update_links(ioc, sas_address, handle,
10773 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10774 			    mpt3sas_get_port_by_id(ioc,
10775 			    sas_device_pg0.PhysicalPort, 0));
10776 
10777 		_scsih_add_device(ioc, handle, 0, 1);
10778 
10779 		break;
10780 
10781 	case MPI2_RAID_PD_STATE_OFFLINE:
10782 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
10783 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
10784 	default:
10785 		break;
10786 	}
10787 }
10788 
10789 /**
10790  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
10791  * @ioc: per adapter object
10792  * @event_data: event data payload
10793  * Context: user.
10794  */
10795 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)10796 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
10797 	Mpi2EventDataIrOperationStatus_t *event_data)
10798 {
10799 	char *reason_str = NULL;
10800 
10801 	switch (event_data->RAIDOperation) {
10802 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
10803 		reason_str = "resync";
10804 		break;
10805 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
10806 		reason_str = "online capacity expansion";
10807 		break;
10808 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
10809 		reason_str = "consistency check";
10810 		break;
10811 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
10812 		reason_str = "background init";
10813 		break;
10814 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
10815 		reason_str = "make data consistent";
10816 		break;
10817 	}
10818 
10819 	if (!reason_str)
10820 		return;
10821 
10822 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
10823 		 reason_str,
10824 		 le16_to_cpu(event_data->VolDevHandle),
10825 		 event_data->PercentComplete);
10826 }
10827 
10828 /**
10829  * _scsih_sas_ir_operation_status_event - handle RAID operation events
10830  * @ioc: per adapter object
10831  * @fw_event: The fw_event_work object
10832  * Context: user.
10833  */
10834 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10835 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
10836 	struct fw_event_work *fw_event)
10837 {
10838 	Mpi2EventDataIrOperationStatus_t *event_data =
10839 		(Mpi2EventDataIrOperationStatus_t *)
10840 		fw_event->event_data;
10841 	static struct _raid_device *raid_device;
10842 	unsigned long flags;
10843 	u16 handle;
10844 
10845 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
10846 	    (!ioc->hide_ir_msg))
10847 		_scsih_sas_ir_operation_status_event_debug(ioc,
10848 		     event_data);
10849 
10850 	/* code added for raid transport support */
10851 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
10852 
10853 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10854 		handle = le16_to_cpu(event_data->VolDevHandle);
10855 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
10856 		if (raid_device)
10857 			raid_device->percent_complete =
10858 			    event_data->PercentComplete;
10859 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10860 	}
10861 }
10862 
10863 /**
10864  * _scsih_prep_device_scan - initialize parameters prior to device scan
10865  * @ioc: per adapter object
10866  *
10867  * Set the deleted flag prior to device scan.  If the device is found during
10868  * the scan, then we clear the deleted flag.
10869  */
10870 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)10871 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
10872 {
10873 	struct MPT3SAS_DEVICE *sas_device_priv_data;
10874 	struct scsi_device *sdev;
10875 
10876 	shost_for_each_device(sdev, ioc->shost) {
10877 		sas_device_priv_data = sdev->hostdata;
10878 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
10879 			sas_device_priv_data->sas_target->deleted = 1;
10880 	}
10881 }
10882 
10883 /**
10884  * _scsih_update_device_qdepth - Update QD during Reset.
10885  * @ioc: per adapter object
10886  *
10887  */
10888 static void
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER * ioc)10889 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
10890 {
10891 	struct MPT3SAS_DEVICE *sas_device_priv_data;
10892 	struct MPT3SAS_TARGET *sas_target_priv_data;
10893 	struct _sas_device *sas_device;
10894 	struct scsi_device *sdev;
10895 	u16 qdepth;
10896 
10897 	ioc_info(ioc, "Update devices with firmware reported queue depth\n");
10898 	shost_for_each_device(sdev, ioc->shost) {
10899 		sas_device_priv_data = sdev->hostdata;
10900 		if (sas_device_priv_data && sas_device_priv_data->sas_target) {
10901 			sas_target_priv_data = sas_device_priv_data->sas_target;
10902 			sas_device = sas_device_priv_data->sas_target->sas_dev;
10903 			if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
10904 				qdepth = ioc->max_nvme_qd;
10905 			else if (sas_device &&
10906 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
10907 				qdepth = (sas_device->port_type > 1) ?
10908 				    ioc->max_wideport_qd : ioc->max_narrowport_qd;
10909 			else if (sas_device &&
10910 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
10911 				qdepth = ioc->max_sata_qd;
10912 			else
10913 				continue;
10914 			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
10915 		}
10916 	}
10917 }
10918 
10919 /**
10920  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
10921  * @ioc: per adapter object
10922  * @sas_device_pg0: SAS Device page 0
10923  *
10924  * After host reset, find out whether devices are still responding.
10925  * Used in _scsih_remove_unresponsive_sas_devices.
10926  */
10927 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)10928 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
10929 Mpi2SasDevicePage0_t *sas_device_pg0)
10930 {
10931 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10932 	struct scsi_target *starget;
10933 	struct _sas_device *sas_device = NULL;
10934 	struct _enclosure_node *enclosure_dev = NULL;
10935 	unsigned long flags;
10936 	struct hba_port *port = mpt3sas_get_port_by_id(
10937 	    ioc, sas_device_pg0->PhysicalPort, 0);
10938 
10939 	if (sas_device_pg0->EnclosureHandle) {
10940 		enclosure_dev =
10941 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
10942 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
10943 		if (enclosure_dev == NULL)
10944 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
10945 				 sas_device_pg0->EnclosureHandle);
10946 	}
10947 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10948 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
10949 		if (sas_device->sas_address != le64_to_cpu(
10950 		    sas_device_pg0->SASAddress))
10951 			continue;
10952 		if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
10953 			continue;
10954 		if (sas_device->port != port)
10955 			continue;
10956 		sas_device->responding = 1;
10957 		starget = sas_device->starget;
10958 		if (starget && starget->hostdata) {
10959 			sas_target_priv_data = starget->hostdata;
10960 			sas_target_priv_data->tm_busy = 0;
10961 			sas_target_priv_data->deleted = 0;
10962 		} else
10963 			sas_target_priv_data = NULL;
10964 		if (starget) {
10965 			starget_printk(KERN_INFO, starget,
10966 			    "handle(0x%04x), sas_addr(0x%016llx)\n",
10967 			    le16_to_cpu(sas_device_pg0->DevHandle),
10968 			    (unsigned long long)
10969 			    sas_device->sas_address);
10970 
10971 			if (sas_device->enclosure_handle != 0)
10972 				starget_printk(KERN_INFO, starget,
10973 				 "enclosure logical id(0x%016llx), slot(%d)\n",
10974 				 (unsigned long long)
10975 				 sas_device->enclosure_logical_id,
10976 				 sas_device->slot);
10977 		}
10978 		if (le16_to_cpu(sas_device_pg0->Flags) &
10979 		      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
10980 			sas_device->enclosure_level =
10981 			   sas_device_pg0->EnclosureLevel;
10982 			memcpy(&sas_device->connector_name[0],
10983 				&sas_device_pg0->ConnectorName[0], 4);
10984 		} else {
10985 			sas_device->enclosure_level = 0;
10986 			sas_device->connector_name[0] = '\0';
10987 		}
10988 
10989 		sas_device->enclosure_handle =
10990 			le16_to_cpu(sas_device_pg0->EnclosureHandle);
10991 		sas_device->is_chassis_slot_valid = 0;
10992 		if (enclosure_dev) {
10993 			sas_device->enclosure_logical_id = le64_to_cpu(
10994 				enclosure_dev->pg0.EnclosureLogicalID);
10995 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
10996 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
10997 				sas_device->is_chassis_slot_valid = 1;
10998 				sas_device->chassis_slot =
10999 					enclosure_dev->pg0.ChassisSlot;
11000 			}
11001 		}
11002 
11003 		if (sas_device->handle == le16_to_cpu(
11004 		    sas_device_pg0->DevHandle))
11005 			goto out;
11006 		pr_info("\thandle changed from(0x%04x)!!!\n",
11007 		    sas_device->handle);
11008 		sas_device->handle = le16_to_cpu(
11009 		    sas_device_pg0->DevHandle);
11010 		if (sas_target_priv_data)
11011 			sas_target_priv_data->handle =
11012 			    le16_to_cpu(sas_device_pg0->DevHandle);
11013 		goto out;
11014 	}
11015  out:
11016 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11017 }
11018 
11019 /**
11020  * _scsih_create_enclosure_list_after_reset - Free Existing list,
11021  *	And create enclosure list by scanning all Enclosure Page(0)s
11022  * @ioc: per adapter object
11023  */
11024 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)11025 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
11026 {
11027 	struct _enclosure_node *enclosure_dev;
11028 	Mpi2ConfigReply_t mpi_reply;
11029 	u16 enclosure_handle;
11030 	int rc;
11031 
11032 	/* Free existing enclosure list */
11033 	mpt3sas_free_enclosure_list(ioc);
11034 
11035 	/* Re constructing enclosure list after reset*/
11036 	enclosure_handle = 0xFFFF;
11037 	do {
11038 		enclosure_dev =
11039 			kzalloc_obj(struct _enclosure_node);
11040 		if (!enclosure_dev) {
11041 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
11042 				__FILE__, __LINE__, __func__);
11043 			return;
11044 		}
11045 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
11046 				&enclosure_dev->pg0,
11047 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
11048 				enclosure_handle);
11049 
11050 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
11051 						MPI2_IOCSTATUS_MASK)) {
11052 			kfree(enclosure_dev);
11053 			return;
11054 		}
11055 		list_add_tail(&enclosure_dev->list,
11056 						&ioc->enclosure_list);
11057 		enclosure_handle =
11058 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
11059 	} while (1);
11060 }
11061 
11062 /**
11063  * _scsih_search_responding_sas_devices -
11064  * @ioc: per adapter object
11065  *
11066  * After host reset, find out whether devices are still responding.
11067  * If not remove.
11068  */
11069 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)11070 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
11071 {
11072 	Mpi2SasDevicePage0_t sas_device_pg0;
11073 	Mpi2ConfigReply_t mpi_reply;
11074 	u16 ioc_status;
11075 	u16 handle;
11076 	u32 device_info;
11077 
11078 	ioc_info(ioc, "search for end-devices: start\n");
11079 
11080 	if (list_empty(&ioc->sas_device_list))
11081 		goto out;
11082 
11083 	handle = 0xFFFF;
11084 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
11085 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
11086 	    handle))) {
11087 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11088 		    MPI2_IOCSTATUS_MASK;
11089 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
11090 			break;
11091 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
11092 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
11093 		if (!(_scsih_is_end_device(device_info)))
11094 			continue;
11095 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
11096 	}
11097 
11098  out:
11099 	ioc_info(ioc, "search for end-devices: complete\n");
11100 }
11101 
11102 /**
11103  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
11104  * @ioc: per adapter object
11105  * @pcie_device_pg0: PCIe Device page 0
11106  *
11107  * After host reset, find out whether devices are still responding.
11108  * Used in _scsih_remove_unresponding_devices.
11109  */
11110 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)11111 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
11112 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
11113 {
11114 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
11115 	struct scsi_target *starget;
11116 	struct _pcie_device *pcie_device;
11117 	unsigned long flags;
11118 
11119 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11120 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
11121 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
11122 		    && (pcie_device->slot == le16_to_cpu(
11123 		    pcie_device_pg0->Slot))) {
11124 			pcie_device->access_status =
11125 					pcie_device_pg0->AccessStatus;
11126 			pcie_device->responding = 1;
11127 			starget = pcie_device->starget;
11128 			if (starget && starget->hostdata) {
11129 				sas_target_priv_data = starget->hostdata;
11130 				sas_target_priv_data->tm_busy = 0;
11131 				sas_target_priv_data->deleted = 0;
11132 			} else
11133 				sas_target_priv_data = NULL;
11134 			if (starget) {
11135 				starget_printk(KERN_INFO, starget,
11136 				    "handle(0x%04x), wwid(0x%016llx) ",
11137 				    pcie_device->handle,
11138 				    (unsigned long long)pcie_device->wwid);
11139 				if (pcie_device->enclosure_handle != 0)
11140 					starget_printk(KERN_INFO, starget,
11141 					    "enclosure logical id(0x%016llx), "
11142 					    "slot(%d)\n",
11143 					    (unsigned long long)
11144 					    pcie_device->enclosure_logical_id,
11145 					    pcie_device->slot);
11146 			}
11147 
11148 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
11149 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
11150 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
11151 				pcie_device->enclosure_level =
11152 				    pcie_device_pg0->EnclosureLevel;
11153 				memcpy(&pcie_device->connector_name[0],
11154 				    &pcie_device_pg0->ConnectorName[0], 4);
11155 			} else {
11156 				pcie_device->enclosure_level = 0;
11157 				pcie_device->connector_name[0] = '\0';
11158 			}
11159 
11160 			if (pcie_device->handle == le16_to_cpu(
11161 			    pcie_device_pg0->DevHandle))
11162 				goto out;
11163 			pr_info("\thandle changed from(0x%04x)!!!\n",
11164 			    pcie_device->handle);
11165 			pcie_device->handle = le16_to_cpu(
11166 			    pcie_device_pg0->DevHandle);
11167 			if (sas_target_priv_data)
11168 				sas_target_priv_data->handle =
11169 				    le16_to_cpu(pcie_device_pg0->DevHandle);
11170 			goto out;
11171 		}
11172 	}
11173 
11174  out:
11175 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11176 }
11177 
11178 /**
11179  * _scsih_search_responding_pcie_devices -
11180  * @ioc: per adapter object
11181  *
11182  * After host reset, find out whether devices are still responding.
11183  * If not remove.
11184  */
11185 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)11186 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
11187 {
11188 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
11189 	Mpi2ConfigReply_t mpi_reply;
11190 	u16 ioc_status;
11191 	u16 handle;
11192 	u32 device_info;
11193 
11194 	ioc_info(ioc, "search for end-devices: start\n");
11195 
11196 	if (list_empty(&ioc->pcie_device_list))
11197 		goto out;
11198 
11199 	handle = 0xFFFF;
11200 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
11201 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
11202 		handle))) {
11203 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11204 		    MPI2_IOCSTATUS_MASK;
11205 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11206 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
11207 				 __func__, ioc_status,
11208 				 le32_to_cpu(mpi_reply.IOCLogInfo));
11209 			break;
11210 		}
11211 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
11212 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
11213 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
11214 			continue;
11215 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
11216 	}
11217 out:
11218 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
11219 }
11220 
11221 /**
11222  * _scsih_mark_responding_raid_device - mark a raid_device as responding
11223  * @ioc: per adapter object
11224  * @wwid: world wide identifier for raid volume
11225  * @handle: device handle
11226  *
11227  * After host reset, find out whether devices are still responding.
11228  * Used in _scsih_remove_unresponsive_raid_devices.
11229  */
11230 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)11231 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
11232 	u16 handle)
11233 {
11234 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
11235 	struct scsi_target *starget;
11236 	struct _raid_device *raid_device;
11237 	unsigned long flags;
11238 
11239 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
11240 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
11241 		if (raid_device->wwid == wwid && raid_device->starget) {
11242 			starget = raid_device->starget;
11243 			if (starget && starget->hostdata) {
11244 				sas_target_priv_data = starget->hostdata;
11245 				sas_target_priv_data->deleted = 0;
11246 			} else
11247 				sas_target_priv_data = NULL;
11248 			raid_device->responding = 1;
11249 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
11250 			starget_printk(KERN_INFO, raid_device->starget,
11251 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
11252 			    (unsigned long long)raid_device->wwid);
11253 
11254 			/*
11255 			 * WARPDRIVE: The handles of the PDs might have changed
11256 			 * across the host reset so re-initialize the
11257 			 * required data for Direct IO
11258 			 */
11259 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
11260 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
11261 			if (raid_device->handle == handle) {
11262 				spin_unlock_irqrestore(&ioc->raid_device_lock,
11263 				    flags);
11264 				return;
11265 			}
11266 			pr_info("\thandle changed from(0x%04x)!!!\n",
11267 			    raid_device->handle);
11268 			raid_device->handle = handle;
11269 			if (sas_target_priv_data)
11270 				sas_target_priv_data->handle = handle;
11271 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
11272 			return;
11273 		}
11274 	}
11275 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
11276 }
11277 
11278 /**
11279  * _scsih_search_responding_raid_devices -
11280  * @ioc: per adapter object
11281  *
11282  * After host reset, find out whether devices are still responding.
11283  * If not remove.
11284  */
11285 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)11286 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
11287 {
11288 	Mpi2RaidVolPage1_t volume_pg1;
11289 	Mpi2RaidVolPage0_t volume_pg0;
11290 	Mpi2RaidPhysDiskPage0_t pd_pg0;
11291 	Mpi2ConfigReply_t mpi_reply;
11292 	u16 ioc_status;
11293 	u16 handle;
11294 	u8 phys_disk_num;
11295 
11296 	if (!ioc->ir_firmware)
11297 		return;
11298 
11299 	ioc_info(ioc, "search for raid volumes: start\n");
11300 
11301 	if (list_empty(&ioc->raid_device_list))
11302 		goto out;
11303 
11304 	handle = 0xFFFF;
11305 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
11306 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
11307 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11308 		    MPI2_IOCSTATUS_MASK;
11309 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
11310 			break;
11311 		handle = le16_to_cpu(volume_pg1.DevHandle);
11312 
11313 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
11314 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
11315 		     sizeof(Mpi2RaidVolPage0_t)))
11316 			continue;
11317 
11318 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
11319 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
11320 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
11321 			_scsih_mark_responding_raid_device(ioc,
11322 			    le64_to_cpu(volume_pg1.WWID), handle);
11323 	}
11324 
11325 	/* refresh the pd_handles */
11326 	if (!ioc->is_warpdrive) {
11327 		phys_disk_num = 0xFF;
11328 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
11329 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
11330 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
11331 		    phys_disk_num))) {
11332 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11333 			    MPI2_IOCSTATUS_MASK;
11334 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
11335 				break;
11336 			phys_disk_num = pd_pg0.PhysDiskNum;
11337 			handle = le16_to_cpu(pd_pg0.DevHandle);
11338 			set_bit(handle, ioc->pd_handles);
11339 		}
11340 	}
11341  out:
11342 	ioc_info(ioc, "search for responding raid volumes: complete\n");
11343 }
11344 
11345 /**
11346  * _scsih_mark_responding_expander - mark a expander as responding
11347  * @ioc: per adapter object
11348  * @expander_pg0:SAS Expander Config Page0
11349  *
11350  * After host reset, find out whether devices are still responding.
11351  * Used in _scsih_remove_unresponsive_expanders.
11352  */
11353 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)11354 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
11355 	Mpi2ExpanderPage0_t *expander_pg0)
11356 {
11357 	struct _sas_node *sas_expander = NULL;
11358 	unsigned long flags;
11359 	int i;
11360 	struct _enclosure_node *enclosure_dev = NULL;
11361 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
11362 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
11363 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
11364 	struct hba_port *port = mpt3sas_get_port_by_id(
11365 	    ioc, expander_pg0->PhysicalPort, 0);
11366 
11367 	if (enclosure_handle)
11368 		enclosure_dev =
11369 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
11370 							enclosure_handle);
11371 
11372 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
11373 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
11374 		if (sas_expander->sas_address != sas_address)
11375 			continue;
11376 		if (sas_expander->port != port)
11377 			continue;
11378 		sas_expander->responding = 1;
11379 
11380 		if (enclosure_dev) {
11381 			sas_expander->enclosure_logical_id =
11382 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
11383 			sas_expander->enclosure_handle =
11384 			    le16_to_cpu(expander_pg0->EnclosureHandle);
11385 		}
11386 
11387 		if (sas_expander->handle == handle)
11388 			goto out;
11389 		pr_info("\texpander(0x%016llx): handle changed" \
11390 		    " from(0x%04x) to (0x%04x)!!!\n",
11391 		    (unsigned long long)sas_expander->sas_address,
11392 		    sas_expander->handle, handle);
11393 		sas_expander->handle = handle;
11394 		for (i = 0 ; i < sas_expander->num_phys ; i++)
11395 			sas_expander->phy[i].handle = handle;
11396 		goto out;
11397 	}
11398  out:
11399 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11400 }
11401 
11402 /**
11403  * _scsih_search_responding_expanders -
11404  * @ioc: per adapter object
11405  *
11406  * After host reset, find out whether devices are still responding.
11407  * If not remove.
11408  */
11409 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)11410 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
11411 {
11412 	Mpi2ExpanderPage0_t expander_pg0;
11413 	Mpi2ConfigReply_t mpi_reply;
11414 	u16 ioc_status;
11415 	u64 sas_address;
11416 	u16 handle;
11417 	u8 port;
11418 
11419 	ioc_info(ioc, "search for expanders: start\n");
11420 
11421 	if (list_empty(&ioc->sas_expander_list))
11422 		goto out;
11423 
11424 	handle = 0xFFFF;
11425 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
11426 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
11427 
11428 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11429 		    MPI2_IOCSTATUS_MASK;
11430 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
11431 			break;
11432 
11433 		handle = le16_to_cpu(expander_pg0.DevHandle);
11434 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
11435 		port = expander_pg0.PhysicalPort;
11436 		pr_info(
11437 		    "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11438 		    handle, (unsigned long long)sas_address,
11439 		    (ioc->multipath_on_hba ?
11440 		    port : MULTIPATH_DISABLED_PORT_ID));
11441 		_scsih_mark_responding_expander(ioc, &expander_pg0);
11442 	}
11443 
11444  out:
11445 	ioc_info(ioc, "search for expanders: complete\n");
11446 }
11447 
11448 /**
11449  * _scsih_remove_unresponding_devices - removing unresponding devices
11450  * @ioc: per adapter object
11451  */
11452 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)11453 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
11454 {
11455 	struct _sas_device *sas_device, *sas_device_next;
11456 	struct _sas_node *sas_expander, *sas_expander_next;
11457 	struct _raid_device *raid_device, *raid_device_next;
11458 	struct _pcie_device *pcie_device, *pcie_device_next;
11459 	struct list_head tmp_list;
11460 	unsigned long flags;
11461 	LIST_HEAD(head);
11462 
11463 	ioc_info(ioc, "removing unresponding devices: start\n");
11464 
11465 	/* removing unresponding end devices */
11466 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
11467 	/*
11468 	 * Iterate, pulling off devices marked as non-responding. We become the
11469 	 * owner for the reference the list had on any object we prune.
11470 	 */
11471 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11472 
11473 	/*
11474 	 * Clean up the sas_device_init_list list as
11475 	 * driver goes for fresh scan as part of diag reset.
11476 	 */
11477 	list_for_each_entry_safe(sas_device, sas_device_next,
11478 	    &ioc->sas_device_init_list, list) {
11479 		list_del_init(&sas_device->list);
11480 		sas_device_put(sas_device);
11481 	}
11482 
11483 	list_for_each_entry_safe(sas_device, sas_device_next,
11484 	    &ioc->sas_device_list, list) {
11485 		if (!sas_device->responding)
11486 			list_move_tail(&sas_device->list, &head);
11487 		else
11488 			sas_device->responding = 0;
11489 	}
11490 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11491 
11492 	/*
11493 	 * Now, uninitialize and remove the unresponding devices we pruned.
11494 	 */
11495 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
11496 		_scsih_remove_device(ioc, sas_device);
11497 		list_del_init(&sas_device->list);
11498 		sas_device_put(sas_device);
11499 	}
11500 
11501 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
11502 	INIT_LIST_HEAD(&head);
11503 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11504 	/*
11505 	 * Clean up the pcie_device_init_list list as
11506 	 * driver goes for fresh scan as part of diag reset.
11507 	 */
11508 	list_for_each_entry_safe(pcie_device, pcie_device_next,
11509 	    &ioc->pcie_device_init_list, list) {
11510 		list_del_init(&pcie_device->list);
11511 		pcie_device_put(pcie_device);
11512 	}
11513 
11514 	list_for_each_entry_safe(pcie_device, pcie_device_next,
11515 	    &ioc->pcie_device_list, list) {
11516 		if (!pcie_device->responding)
11517 			list_move_tail(&pcie_device->list, &head);
11518 		else
11519 			pcie_device->responding = 0;
11520 	}
11521 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11522 
11523 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
11524 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11525 		list_del_init(&pcie_device->list);
11526 		pcie_device_put(pcie_device);
11527 	}
11528 
11529 	/* removing unresponding volumes */
11530 	if (ioc->ir_firmware) {
11531 		ioc_info(ioc, "removing unresponding devices: volumes\n");
11532 		list_for_each_entry_safe(raid_device, raid_device_next,
11533 		    &ioc->raid_device_list, list) {
11534 			if (!raid_device->responding)
11535 				_scsih_sas_volume_delete(ioc,
11536 				    raid_device->handle);
11537 			else
11538 				raid_device->responding = 0;
11539 		}
11540 	}
11541 
11542 	/* removing unresponding expanders */
11543 	ioc_info(ioc, "removing unresponding devices: expanders\n");
11544 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
11545 	INIT_LIST_HEAD(&tmp_list);
11546 	list_for_each_entry_safe(sas_expander, sas_expander_next,
11547 	    &ioc->sas_expander_list, list) {
11548 		if (!sas_expander->responding)
11549 			list_move_tail(&sas_expander->list, &tmp_list);
11550 		else
11551 			sas_expander->responding = 0;
11552 	}
11553 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11554 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
11555 	    list) {
11556 		_scsih_expander_node_remove(ioc, sas_expander);
11557 	}
11558 
11559 	ioc_info(ioc, "removing unresponding devices: complete\n");
11560 
11561 	/* unblock devices */
11562 	_scsih_ublock_io_all_device(ioc, 0);
11563 }
11564 
11565 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)11566 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
11567 	struct _sas_node *sas_expander, u16 handle)
11568 {
11569 	Mpi2ExpanderPage1_t expander_pg1;
11570 	Mpi2ConfigReply_t mpi_reply;
11571 	int i;
11572 
11573 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
11574 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
11575 		    &expander_pg1, i, handle))) {
11576 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
11577 				__FILE__, __LINE__, __func__);
11578 			return;
11579 		}
11580 
11581 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
11582 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
11583 		    expander_pg1.NegotiatedLinkRate >> 4,
11584 		    sas_expander->port);
11585 	}
11586 }
11587 
11588 /**
11589  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
11590  * @ioc: per adapter object
11591  */
11592 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)11593 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
11594 {
11595 	Mpi2ExpanderPage0_t expander_pg0;
11596 	Mpi2SasDevicePage0_t sas_device_pg0;
11597 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
11598 	Mpi2RaidVolPage1_t volume_pg1;
11599 	Mpi2RaidVolPage0_t volume_pg0;
11600 	Mpi2RaidPhysDiskPage0_t pd_pg0;
11601 	Mpi2EventIrConfigElement_t element;
11602 	Mpi2ConfigReply_t mpi_reply;
11603 	u8 phys_disk_num, port_id;
11604 	u16 ioc_status;
11605 	u16 handle, parent_handle;
11606 	u64 sas_address;
11607 	struct _sas_device *sas_device;
11608 	struct _pcie_device *pcie_device;
11609 	struct _sas_node *expander_device;
11610 	static struct _raid_device *raid_device;
11611 	u8 retry_count;
11612 	unsigned long flags;
11613 
11614 	ioc_info(ioc, "scan devices: start\n");
11615 
11616 	_scsih_sas_host_refresh(ioc);
11617 
11618 	ioc_info(ioc, "\tscan devices: expanders start\n");
11619 
11620 	/* expanders */
11621 	handle = 0xFFFF;
11622 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
11623 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
11624 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11625 		    MPI2_IOCSTATUS_MASK;
11626 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11627 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11628 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11629 			break;
11630 		}
11631 		handle = le16_to_cpu(expander_pg0.DevHandle);
11632 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
11633 		port_id = expander_pg0.PhysicalPort;
11634 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
11635 		    ioc, le64_to_cpu(expander_pg0.SASAddress),
11636 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
11637 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11638 		if (expander_device)
11639 			_scsih_refresh_expander_links(ioc, expander_device,
11640 			    handle);
11641 		else {
11642 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
11643 				 handle,
11644 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
11645 			_scsih_expander_add(ioc, handle);
11646 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
11647 				 handle,
11648 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
11649 		}
11650 	}
11651 
11652 	ioc_info(ioc, "\tscan devices: expanders complete\n");
11653 
11654 	if (!ioc->ir_firmware)
11655 		goto skip_to_sas;
11656 
11657 	ioc_info(ioc, "\tscan devices: phys disk start\n");
11658 
11659 	/* phys disk */
11660 	phys_disk_num = 0xFF;
11661 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
11662 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
11663 	    phys_disk_num))) {
11664 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11665 		    MPI2_IOCSTATUS_MASK;
11666 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11667 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11668 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11669 			break;
11670 		}
11671 		phys_disk_num = pd_pg0.PhysDiskNum;
11672 		handle = le16_to_cpu(pd_pg0.DevHandle);
11673 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
11674 		if (sas_device) {
11675 			sas_device_put(sas_device);
11676 			continue;
11677 		}
11678 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
11679 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
11680 		    handle) != 0)
11681 			continue;
11682 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11683 		    MPI2_IOCSTATUS_MASK;
11684 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11685 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
11686 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11687 			break;
11688 		}
11689 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
11690 		if (!_scsih_get_sas_address(ioc, parent_handle,
11691 		    &sas_address)) {
11692 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
11693 				 handle,
11694 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
11695 			port_id = sas_device_pg0.PhysicalPort;
11696 			mpt3sas_transport_update_links(ioc, sas_address,
11697 			    handle, sas_device_pg0.PhyNum,
11698 			    MPI2_SAS_NEG_LINK_RATE_1_5,
11699 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
11700 			set_bit(handle, ioc->pd_handles);
11701 			retry_count = 0;
11702 			/* This will retry adding the end device.
11703 			 * _scsih_add_device() will decide on retries and
11704 			 * return "1" when it should be retried
11705 			 */
11706 			while (_scsih_add_device(ioc, handle, retry_count++,
11707 			    1)) {
11708 				ssleep(1);
11709 			}
11710 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
11711 				 handle,
11712 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
11713 		}
11714 	}
11715 
11716 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
11717 
11718 	ioc_info(ioc, "\tscan devices: volumes start\n");
11719 
11720 	/* volumes */
11721 	handle = 0xFFFF;
11722 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
11723 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
11724 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11725 		    MPI2_IOCSTATUS_MASK;
11726 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11727 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11728 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11729 			break;
11730 		}
11731 		handle = le16_to_cpu(volume_pg1.DevHandle);
11732 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
11733 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
11734 		    le64_to_cpu(volume_pg1.WWID));
11735 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
11736 		if (raid_device)
11737 			continue;
11738 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
11739 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
11740 		     sizeof(Mpi2RaidVolPage0_t)))
11741 			continue;
11742 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11743 		    MPI2_IOCSTATUS_MASK;
11744 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11745 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11746 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11747 			break;
11748 		}
11749 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
11750 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
11751 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
11752 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
11753 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
11754 			element.VolDevHandle = volume_pg1.DevHandle;
11755 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
11756 				 volume_pg1.DevHandle);
11757 			_scsih_sas_volume_add(ioc, &element);
11758 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
11759 				 volume_pg1.DevHandle);
11760 		}
11761 	}
11762 
11763 	ioc_info(ioc, "\tscan devices: volumes complete\n");
11764 
11765  skip_to_sas:
11766 
11767 	ioc_info(ioc, "\tscan devices: end devices start\n");
11768 
11769 	/* sas devices */
11770 	handle = 0xFFFF;
11771 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
11772 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
11773 	    handle))) {
11774 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
11775 		    MPI2_IOCSTATUS_MASK;
11776 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11777 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11778 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11779 			break;
11780 		}
11781 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
11782 		if (!(_scsih_is_end_device(
11783 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
11784 			continue;
11785 		port_id = sas_device_pg0.PhysicalPort;
11786 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
11787 		    le64_to_cpu(sas_device_pg0.SASAddress),
11788 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
11789 		if (sas_device) {
11790 			sas_device_put(sas_device);
11791 			continue;
11792 		}
11793 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
11794 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
11795 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
11796 				 handle,
11797 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
11798 			mpt3sas_transport_update_links(ioc, sas_address, handle,
11799 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
11800 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
11801 			retry_count = 0;
11802 			/* This will retry adding the end device.
11803 			 * _scsih_add_device() will decide on retries and
11804 			 * return "1" when it should be retried
11805 			 */
11806 			while (_scsih_add_device(ioc, handle, retry_count++,
11807 			    0)) {
11808 				ssleep(1);
11809 			}
11810 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
11811 				 handle,
11812 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
11813 		}
11814 	}
11815 	ioc_info(ioc, "\tscan devices: end devices complete\n");
11816 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
11817 
11818 	/* pcie devices */
11819 	handle = 0xFFFF;
11820 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
11821 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
11822 		handle))) {
11823 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
11824 				& MPI2_IOCSTATUS_MASK;
11825 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
11826 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
11827 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
11828 			break;
11829 		}
11830 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
11831 		if (!(_scsih_is_nvme_pciescsi_device(
11832 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
11833 			continue;
11834 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
11835 				le64_to_cpu(pcie_device_pg0.WWID));
11836 		if (pcie_device) {
11837 			pcie_device_put(pcie_device);
11838 			continue;
11839 		}
11840 		retry_count = 0;
11841 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
11842 		while (_scsih_pcie_add_device(ioc, handle, retry_count++))
11843 			ssleep(1);
11844 
11845 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
11846 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
11847 	}
11848 
11849 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
11850 	ioc_info(ioc, "scan devices: complete\n");
11851 }
11852 
11853 /**
11854  * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
11855  * @ioc: per adapter object
11856  *
11857  * The handler for doing any required cleanup or initialization.
11858  */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)11859 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
11860 {
11861 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
11862 }
11863 
11864 /**
11865  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
11866  *							scsi & tm cmds.
11867  * @ioc: per adapter object
11868  *
11869  * The handler for doing any required cleanup or initialization.
11870  */
11871 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)11872 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
11873 {
11874 	dtmprintk(ioc,
11875 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
11876 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
11877 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
11878 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
11879 		complete(&ioc->scsih_cmds.done);
11880 	}
11881 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
11882 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
11883 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
11884 		complete(&ioc->tm_cmds.done);
11885 	}
11886 
11887 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
11888 	memset(ioc->device_remove_in_progress, 0,
11889 	       ioc->device_remove_in_progress_sz);
11890 	_scsih_fw_event_cleanup_queue(ioc);
11891 	_scsih_flush_running_cmds(ioc);
11892 }
11893 
11894 /**
11895  * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
11896  * @ioc: per adapter object
11897  *
11898  * The handler for doing any required cleanup or initialization.
11899  */
11900 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)11901 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
11902 {
11903 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
11904 	if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
11905 		if (ioc->multipath_on_hba) {
11906 			_scsih_sas_port_refresh(ioc);
11907 			_scsih_update_vphys_after_reset(ioc);
11908 		}
11909 		_scsih_prep_device_scan(ioc);
11910 		_scsih_create_enclosure_list_after_reset(ioc);
11911 		_scsih_search_responding_sas_devices(ioc);
11912 		_scsih_search_responding_pcie_devices(ioc);
11913 		_scsih_search_responding_raid_devices(ioc);
11914 		_scsih_search_responding_expanders(ioc);
11915 		_scsih_error_recovery_delete_devices(ioc);
11916 	}
11917 }
11918 
11919 /**
11920  * _mpt3sas_fw_work - delayed task for processing firmware events
11921  * @ioc: per adapter object
11922  * @fw_event: The fw_event_work object
11923  * Context: user.
11924  */
11925 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)11926 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
11927 {
11928 	ioc->current_event = fw_event;
11929 	_scsih_fw_event_del_from_list(ioc, fw_event);
11930 
11931 	/* the queue is being flushed so ignore this event */
11932 	if (ioc->remove_host || ioc->pci_error_recovery) {
11933 		fw_event_work_put(fw_event);
11934 		ioc->current_event = NULL;
11935 		return;
11936 	}
11937 
11938 	switch (fw_event->event) {
11939 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
11940 		mpt3sas_process_trigger_data(ioc,
11941 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
11942 			fw_event->event_data);
11943 		break;
11944 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
11945 		while (scsi_host_in_recovery(ioc->shost) ||
11946 					 ioc->shost_recovery) {
11947 			/*
11948 			 * If we're unloading or cancelling the work, bail.
11949 			 * Otherwise, this can become an infinite loop.
11950 			 */
11951 			if (ioc->remove_host || ioc->fw_events_cleanup)
11952 				goto out;
11953 			ssleep(1);
11954 		}
11955 		_scsih_remove_unresponding_devices(ioc);
11956 		_scsih_del_dirty_vphy(ioc);
11957 		_scsih_del_dirty_port_entries(ioc);
11958 		if (ioc->is_gen35_ioc)
11959 			_scsih_update_device_qdepth(ioc);
11960 		_scsih_scan_for_devices_after_reset(ioc);
11961 		/*
11962 		 * If diag reset has occurred during the driver load
11963 		 * then driver has to complete the driver load operation
11964 		 * by executing the following items:
11965 		 *- Register the devices from sas_device_init_list to SML
11966 		 *- clear is_driver_loading flag,
11967 		 *- start the watchdog thread.
11968 		 * In happy driver load path, above things are taken care of when
11969 		 * driver executes scsih_scan_finished().
11970 		 */
11971 		if (ioc->is_driver_loading)
11972 			_scsih_complete_devices_scanning(ioc);
11973 		_scsih_set_nvme_max_shutdown_latency(ioc);
11974 		break;
11975 	case MPT3SAS_PORT_ENABLE_COMPLETE:
11976 		ioc->start_scan = 0;
11977 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
11978 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
11979 			    missing_delay[1]);
11980 		dewtprintk(ioc,
11981 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
11982 		break;
11983 	case MPT3SAS_TURN_ON_PFA_LED:
11984 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
11985 		break;
11986 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
11987 		if (_scsih_sas_topology_change_event(ioc, fw_event)) {
11988 			_scsih_fw_event_requeue(ioc, fw_event, 1000);
11989 			ioc->current_event = NULL;
11990 			return;
11991 		}
11992 		break;
11993 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
11994 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
11995 			_scsih_sas_device_status_change_event_debug(ioc,
11996 			    (Mpi2EventDataSasDeviceStatusChange_t *)
11997 			    fw_event->event_data);
11998 		break;
11999 	case MPI2_EVENT_SAS_DISCOVERY:
12000 		_scsih_sas_discovery_event(ioc, fw_event);
12001 		break;
12002 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
12003 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
12004 		break;
12005 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
12006 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
12007 		break;
12008 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
12009 		_scsih_sas_enclosure_dev_status_change_event(ioc,
12010 		    fw_event);
12011 		break;
12012 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
12013 		_scsih_sas_ir_config_change_event(ioc, fw_event);
12014 		break;
12015 	case MPI2_EVENT_IR_VOLUME:
12016 		_scsih_sas_ir_volume_event(ioc, fw_event);
12017 		break;
12018 	case MPI2_EVENT_IR_PHYSICAL_DISK:
12019 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
12020 		break;
12021 	case MPI2_EVENT_IR_OPERATION_STATUS:
12022 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
12023 		break;
12024 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
12025 		_scsih_pcie_device_status_change_event(ioc, fw_event);
12026 		break;
12027 	case MPI2_EVENT_PCIE_ENUMERATION:
12028 		_scsih_pcie_enumeration_event(ioc, fw_event);
12029 		break;
12030 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
12031 		if (_scsih_pcie_topology_change_event(ioc, fw_event)) {
12032 			_scsih_fw_event_requeue(ioc, fw_event, 1000);
12033 			ioc->current_event = NULL;
12034 			return;
12035 		}
12036 		break;
12037 	}
12038 out:
12039 	fw_event_work_put(fw_event);
12040 	ioc->current_event = NULL;
12041 }
12042 
12043 /**
12044  * _firmware_event_work
12045  * @work: The fw_event_work object
12046  * Context: user.
12047  *
12048  * wrappers for the work thread handling firmware events
12049  */
12050 
12051 static void
_firmware_event_work(struct work_struct * work)12052 _firmware_event_work(struct work_struct *work)
12053 {
12054 	struct fw_event_work *fw_event = container_of(work,
12055 	    struct fw_event_work, work);
12056 
12057 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
12058 }
12059 
12060 static void
_firmware_event_work_delayed(struct work_struct * work)12061 _firmware_event_work_delayed(struct work_struct *work)
12062 {
12063 	struct fw_event_work *fw_event = container_of(work,
12064 	    struct fw_event_work, delayed_work.work);
12065 
12066 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
12067 }
12068 
12069 /**
12070  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
12071  * @ioc: per adapter object
12072  * @msix_index: MSIX table index supplied by the OS
12073  * @reply: reply message frame(lower 32bit addr)
12074  * Context: interrupt.
12075  *
12076  * This function merely adds a new work task into ioc->firmware_event_thread.
12077  * The tasks are worked from _firmware_event_work in user context.
12078  *
12079  * Return: 1 meaning mf should be freed from _base_interrupt
12080  *         0 means the mf is freed from this function.
12081  */
12082 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)12083 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
12084 	u32 reply)
12085 {
12086 	struct fw_event_work *fw_event;
12087 	Mpi2EventNotificationReply_t *mpi_reply;
12088 	u16 event;
12089 	u16 sz;
12090 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
12091 
12092 	/* events turned off due to host reset */
12093 	if (ioc->pci_error_recovery)
12094 		return 1;
12095 
12096 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
12097 
12098 	if (unlikely(!mpi_reply)) {
12099 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
12100 			__FILE__, __LINE__, __func__);
12101 		return 1;
12102 	}
12103 
12104 	event = le16_to_cpu(mpi_reply->Event);
12105 
12106 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
12107 		mpt3sas_trigger_event(ioc, event, 0);
12108 
12109 	switch (event) {
12110 	/* handle these */
12111 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
12112 	{
12113 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
12114 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
12115 		    mpi_reply->EventData;
12116 
12117 		if (baen_data->Primitive !=
12118 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
12119 			return 1;
12120 
12121 		if (ioc->broadcast_aen_busy) {
12122 			ioc->broadcast_aen_pending++;
12123 			return 1;
12124 		} else
12125 			ioc->broadcast_aen_busy = 1;
12126 		break;
12127 	}
12128 
12129 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
12130 		_scsih_check_topo_delete_events(ioc,
12131 		    (Mpi2EventDataSasTopologyChangeList_t *)
12132 		    mpi_reply->EventData);
12133 		/*
12134 		 * No need to add the topology change list
12135 		 * event to fw event work queue when
12136 		 * diag reset is going on. Since during diag
12137 		 * reset driver scan the devices by reading
12138 		 * sas device page0's not by processing the
12139 		 * events.
12140 		 */
12141 		if (ioc->shost_recovery)
12142 			return 1;
12143 		break;
12144 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
12145 	_scsih_check_pcie_topo_remove_events(ioc,
12146 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
12147 		    mpi_reply->EventData);
12148 		if (ioc->shost_recovery)
12149 			return 1;
12150 		break;
12151 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
12152 		_scsih_check_ir_config_unhide_events(ioc,
12153 		    (Mpi2EventDataIrConfigChangeList_t *)
12154 		    mpi_reply->EventData);
12155 		break;
12156 	case MPI2_EVENT_IR_VOLUME:
12157 		_scsih_check_volume_delete_events(ioc,
12158 		    (Mpi2EventDataIrVolume_t *)
12159 		    mpi_reply->EventData);
12160 		break;
12161 	case MPI2_EVENT_LOG_ENTRY_ADDED:
12162 	{
12163 		Mpi2EventDataLogEntryAdded_t *log_entry;
12164 		u32 log_code;
12165 
12166 		if (!ioc->is_warpdrive)
12167 			break;
12168 
12169 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
12170 		    mpi_reply->EventData;
12171 		log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
12172 
12173 		if (le16_to_cpu(log_entry->LogEntryQualifier)
12174 		    != MPT2_WARPDRIVE_LOGENTRY)
12175 			break;
12176 
12177 		switch (log_code) {
12178 		case MPT2_WARPDRIVE_LC_SSDT:
12179 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
12180 			break;
12181 		case MPT2_WARPDRIVE_LC_SSDLW:
12182 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
12183 			break;
12184 		case MPT2_WARPDRIVE_LC_SSDLF:
12185 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
12186 			break;
12187 		case MPT2_WARPDRIVE_LC_BRMF:
12188 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
12189 			break;
12190 		}
12191 
12192 		break;
12193 	}
12194 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
12195 		_scsih_sas_device_status_change_event(ioc,
12196 		    (Mpi2EventDataSasDeviceStatusChange_t *)
12197 		    mpi_reply->EventData);
12198 		break;
12199 	case MPI2_EVENT_IR_OPERATION_STATUS:
12200 	case MPI2_EVENT_SAS_DISCOVERY:
12201 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
12202 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
12203 	case MPI2_EVENT_IR_PHYSICAL_DISK:
12204 	case MPI2_EVENT_PCIE_ENUMERATION:
12205 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
12206 		break;
12207 
12208 	case MPI2_EVENT_TEMP_THRESHOLD:
12209 		_scsih_temp_threshold_events(ioc,
12210 			(Mpi2EventDataTemperature_t *)
12211 			mpi_reply->EventData);
12212 		break;
12213 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
12214 		ActiveCableEventData =
12215 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
12216 		switch (ActiveCableEventData->ReasonCode) {
12217 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
12218 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
12219 				   ActiveCableEventData->ReceptacleID);
12220 			pr_notice("cannot be powered and devices connected\n");
12221 			pr_notice("to this active cable will not be seen\n");
12222 			pr_notice("This active cable requires %d mW of power\n",
12223 			    le32_to_cpu(
12224 			    ActiveCableEventData->ActiveCablePowerRequirement));
12225 			break;
12226 
12227 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
12228 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
12229 				   ActiveCableEventData->ReceptacleID);
12230 			pr_notice(
12231 			    "is not running at optimal speed(12 Gb/s rate)\n");
12232 			break;
12233 		}
12234 
12235 		break;
12236 
12237 	default: /* ignore the rest */
12238 		return 1;
12239 	}
12240 
12241 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
12242 	fw_event = alloc_fw_event_work(sz);
12243 	if (!fw_event) {
12244 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12245 			__FILE__, __LINE__, __func__);
12246 		return 1;
12247 	}
12248 
12249 	if (event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST) {
12250 		Mpi2EventDataSasTopologyChangeList_t *topo_event_data =
12251 		    (Mpi2EventDataSasTopologyChangeList_t *)
12252 		    mpi_reply->EventData;
12253 		fw_event->retries = kzalloc(topo_event_data->NumEntries,
12254 		    GFP_ATOMIC);
12255 		if (!fw_event->retries) {
12256 
12257 			ioc_err(ioc, "failure at %s:%d/%s()!\n",  __FILE__, __LINE__, __func__);
12258 			kfree(fw_event->event_data);
12259 			fw_event_work_put(fw_event);
12260 			return 1;
12261 		}
12262 	}
12263 
12264 	if (event == MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST) {
12265 		Mpi26EventDataPCIeTopologyChangeList_t *topo_event_data =
12266 			(Mpi26EventDataPCIeTopologyChangeList_t *) mpi_reply->EventData;
12267 		fw_event->retries = kzalloc(topo_event_data->NumEntries,
12268 			GFP_ATOMIC);
12269 		if (!fw_event->retries) {
12270 
12271 			ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
12272 			fw_event_work_put(fw_event);
12273 			return 1;
12274 		}
12275 	}
12276 
12277 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
12278 	fw_event->ioc = ioc;
12279 	fw_event->VF_ID = mpi_reply->VF_ID;
12280 	fw_event->VP_ID = mpi_reply->VP_ID;
12281 	fw_event->event = event;
12282 	_scsih_fw_event_add(ioc, fw_event);
12283 	fw_event_work_put(fw_event);
12284 	return 1;
12285 }
12286 
12287 /**
12288  * _scsih_expander_node_remove - removing expander device from list.
12289  * @ioc: per adapter object
12290  * @sas_expander: the sas_device object
12291  *
12292  * Removing object and freeing associated memory from the
12293  * ioc->sas_expander_list.
12294  */
12295 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)12296 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
12297 	struct _sas_node *sas_expander)
12298 {
12299 	struct _sas_port *mpt3sas_port, *next;
12300 	unsigned long flags;
12301 	int port_id;
12302 
12303 	/* remove sibling ports attached to this expander */
12304 	list_for_each_entry_safe(mpt3sas_port, next,
12305 	   &sas_expander->sas_port_list, port_list) {
12306 		if (ioc->shost_recovery)
12307 			return;
12308 		if (mpt3sas_port->remote_identify.device_type ==
12309 		    SAS_END_DEVICE)
12310 			mpt3sas_device_remove_by_sas_address(ioc,
12311 			    mpt3sas_port->remote_identify.sas_address,
12312 			    mpt3sas_port->hba_port);
12313 		else if (mpt3sas_port->remote_identify.device_type ==
12314 		    SAS_EDGE_EXPANDER_DEVICE ||
12315 		    mpt3sas_port->remote_identify.device_type ==
12316 		    SAS_FANOUT_EXPANDER_DEVICE)
12317 			mpt3sas_expander_remove(ioc,
12318 			    mpt3sas_port->remote_identify.sas_address,
12319 			    mpt3sas_port->hba_port);
12320 	}
12321 
12322 	port_id = sas_expander->port->port_id;
12323 
12324 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
12325 	    sas_expander->sas_address_parent, sas_expander->port);
12326 
12327 	ioc_info(ioc,
12328 	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
12329 	    sas_expander->handle, (unsigned long long)
12330 	    sas_expander->sas_address,
12331 	    port_id);
12332 
12333 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
12334 	list_del(&sas_expander->list);
12335 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
12336 
12337 	kfree(sas_expander->phy);
12338 	kfree(sas_expander);
12339 }
12340 
12341 /**
12342  * _scsih_nvme_shutdown - NVMe shutdown notification
12343  * @ioc: per adapter object
12344  *
12345  * Sending IoUnitControl request with shutdown operation code to alert IOC that
12346  * the host system is shutting down so that IOC can issue NVMe shutdown to
12347  * NVMe drives attached to it.
12348  */
12349 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)12350 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
12351 {
12352 	Mpi26IoUnitControlRequest_t *mpi_request;
12353 	Mpi26IoUnitControlReply_t *mpi_reply;
12354 	u16 smid;
12355 
12356 	/* are there any NVMe devices ? */
12357 	if (list_empty(&ioc->pcie_device_list))
12358 		return;
12359 
12360 	mutex_lock(&ioc->scsih_cmds.mutex);
12361 
12362 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
12363 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
12364 		goto out;
12365 	}
12366 
12367 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
12368 
12369 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
12370 	if (!smid) {
12371 		ioc_err(ioc,
12372 		    "%s: failed obtaining a smid\n", __func__);
12373 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
12374 		goto out;
12375 	}
12376 
12377 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
12378 	ioc->scsih_cmds.smid = smid;
12379 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
12380 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
12381 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
12382 
12383 	init_completion(&ioc->scsih_cmds.done);
12384 	ioc->put_smid_default(ioc, smid);
12385 	/* Wait for max_shutdown_latency seconds */
12386 	ioc_info(ioc,
12387 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
12388 		ioc->max_shutdown_latency);
12389 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
12390 			ioc->max_shutdown_latency*HZ);
12391 
12392 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
12393 		ioc_err(ioc, "%s: timeout\n", __func__);
12394 		goto out;
12395 	}
12396 
12397 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
12398 		mpi_reply = ioc->scsih_cmds.reply;
12399 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
12400 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
12401 			le16_to_cpu(mpi_reply->IOCStatus),
12402 			le32_to_cpu(mpi_reply->IOCLogInfo));
12403 	}
12404  out:
12405 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
12406 	mutex_unlock(&ioc->scsih_cmds.mutex);
12407 }
12408 
12409 
12410 /**
12411  * _scsih_ir_shutdown - IR shutdown notification
12412  * @ioc: per adapter object
12413  *
12414  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
12415  * the host system is shutting down.
12416  */
12417 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)12418 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
12419 {
12420 	Mpi2RaidActionRequest_t *mpi_request;
12421 	Mpi2RaidActionReply_t *mpi_reply;
12422 	u16 smid;
12423 
12424 	/* is IR firmware build loaded ? */
12425 	if (!ioc->ir_firmware)
12426 		return;
12427 
12428 	/* are there any volumes ? */
12429 	if (list_empty(&ioc->raid_device_list))
12430 		return;
12431 
12432 	mutex_lock(&ioc->scsih_cmds.mutex);
12433 
12434 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
12435 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
12436 		goto out;
12437 	}
12438 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
12439 
12440 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
12441 	if (!smid) {
12442 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
12443 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
12444 		goto out;
12445 	}
12446 
12447 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
12448 	ioc->scsih_cmds.smid = smid;
12449 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
12450 
12451 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
12452 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
12453 
12454 	if (!ioc->hide_ir_msg)
12455 		ioc_info(ioc, "IR shutdown (sending)\n");
12456 	init_completion(&ioc->scsih_cmds.done);
12457 	ioc->put_smid_default(ioc, smid);
12458 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
12459 
12460 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
12461 		ioc_err(ioc, "%s: timeout\n", __func__);
12462 		goto out;
12463 	}
12464 
12465 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
12466 		mpi_reply = ioc->scsih_cmds.reply;
12467 		if (!ioc->hide_ir_msg)
12468 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
12469 				 le16_to_cpu(mpi_reply->IOCStatus),
12470 				 le32_to_cpu(mpi_reply->IOCLogInfo));
12471 	}
12472 
12473  out:
12474 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
12475 	mutex_unlock(&ioc->scsih_cmds.mutex);
12476 }
12477 
12478 /**
12479  * _scsih_get_shost_and_ioc - get shost and ioc
12480  *			and verify whether they are NULL or not
12481  * @pdev: PCI device struct
12482  * @shost: address of scsi host pointer
12483  * @ioc: address of HBA adapter pointer
12484  *
12485  * Return zero if *shost and *ioc are not NULL otherwise return error number.
12486  */
12487 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)12488 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
12489 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
12490 {
12491 	*shost = pci_get_drvdata(pdev);
12492 	if (*shost == NULL) {
12493 		dev_err(&pdev->dev, "pdev's driver data is null\n");
12494 		return -ENXIO;
12495 	}
12496 
12497 	*ioc = shost_priv(*shost);
12498 	if (*ioc == NULL) {
12499 		dev_err(&pdev->dev, "shost's private data is null\n");
12500 		return -ENXIO;
12501 	}
12502 
12503 	return 0;
12504 }
12505 
12506 /**
12507  * scsih_remove - detach and remove add host
12508  * @pdev: PCI device struct
12509  *
12510  * Routine called when unloading the driver.
12511  */
scsih_remove(struct pci_dev * pdev)12512 static void scsih_remove(struct pci_dev *pdev)
12513 {
12514 	struct Scsi_Host *shost;
12515 	struct MPT3SAS_ADAPTER *ioc;
12516 	struct _sas_port *mpt3sas_port, *next_port;
12517 	struct _raid_device *raid_device, *next;
12518 	struct MPT3SAS_TARGET *sas_target_priv_data;
12519 	struct _pcie_device *pcie_device, *pcienext;
12520 	struct workqueue_struct	*wq;
12521 	unsigned long flags;
12522 	Mpi2ConfigReply_t mpi_reply;
12523 	struct hba_port *port, *port_next;
12524 
12525 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12526 		return;
12527 
12528 	ioc->remove_host = 1;
12529 
12530 	if (!pci_device_is_present(pdev)) {
12531 		mpt3sas_base_pause_mq_polling(ioc);
12532 		_scsih_flush_running_cmds(ioc);
12533 	}
12534 
12535 	_scsih_fw_event_cleanup_queue(ioc);
12536 
12537 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
12538 	wq = ioc->firmware_event_thread;
12539 	ioc->firmware_event_thread = NULL;
12540 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
12541 	if (wq)
12542 		destroy_workqueue(wq);
12543 	/*
12544 	 * Copy back the unmodified ioc page1. so that on next driver load,
12545 	 * current modified changes on ioc page1 won't take effect.
12546 	 */
12547 	if (ioc->is_aero_ioc)
12548 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
12549 				&ioc->ioc_pg1_copy);
12550 	/* release all the volumes */
12551 	_scsih_ir_shutdown(ioc);
12552 	mpt3sas_destroy_debugfs(ioc);
12553 	sas_remove_host(shost);
12554 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
12555 	    list) {
12556 		if (raid_device->starget) {
12557 			sas_target_priv_data =
12558 			    raid_device->starget->hostdata;
12559 			sas_target_priv_data->deleted = 1;
12560 			scsi_remove_target(&raid_device->starget->dev);
12561 		}
12562 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
12563 			 raid_device->handle, (u64)raid_device->wwid);
12564 		_scsih_raid_device_remove(ioc, raid_device);
12565 	}
12566 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
12567 		list) {
12568 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
12569 		list_del_init(&pcie_device->list);
12570 		pcie_device_put(pcie_device);
12571 	}
12572 
12573 	/* free ports attached to the sas_host */
12574 	list_for_each_entry_safe(mpt3sas_port, next_port,
12575 	   &ioc->sas_hba.sas_port_list, port_list) {
12576 		if (mpt3sas_port->remote_identify.device_type ==
12577 		    SAS_END_DEVICE)
12578 			mpt3sas_device_remove_by_sas_address(ioc,
12579 			    mpt3sas_port->remote_identify.sas_address,
12580 			    mpt3sas_port->hba_port);
12581 		else if (mpt3sas_port->remote_identify.device_type ==
12582 		    SAS_EDGE_EXPANDER_DEVICE ||
12583 		    mpt3sas_port->remote_identify.device_type ==
12584 		    SAS_FANOUT_EXPANDER_DEVICE)
12585 			mpt3sas_expander_remove(ioc,
12586 			    mpt3sas_port->remote_identify.sas_address,
12587 			    mpt3sas_port->hba_port);
12588 	}
12589 
12590 	list_for_each_entry_safe(port, port_next,
12591 	    &ioc->port_table_list, list) {
12592 		list_del(&port->list);
12593 		kfree(port);
12594 	}
12595 
12596 	/* free phys attached to the sas_host */
12597 	if (ioc->sas_hba.num_phys) {
12598 		kfree(ioc->sas_hba.phy);
12599 		ioc->sas_hba.phy = NULL;
12600 		ioc->sas_hba.num_phys = 0;
12601 	}
12602 
12603 	mpt3sas_base_detach(ioc);
12604 	mpt3sas_ctl_release(ioc);
12605 	spin_lock(&gioc_lock);
12606 	list_del(&ioc->list);
12607 	spin_unlock(&gioc_lock);
12608 	scsi_host_put(shost);
12609 }
12610 
12611 /**
12612  * scsih_shutdown - routine call during system shutdown
12613  * @pdev: PCI device struct
12614  */
12615 static void
scsih_shutdown(struct pci_dev * pdev)12616 scsih_shutdown(struct pci_dev *pdev)
12617 {
12618 	struct Scsi_Host *shost;
12619 	struct MPT3SAS_ADAPTER *ioc;
12620 	struct workqueue_struct	*wq;
12621 	unsigned long flags;
12622 	Mpi2ConfigReply_t mpi_reply;
12623 
12624 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12625 		return;
12626 
12627 	ioc->remove_host = 1;
12628 
12629 	if (!pci_device_is_present(pdev)) {
12630 		mpt3sas_base_pause_mq_polling(ioc);
12631 		_scsih_flush_running_cmds(ioc);
12632 	}
12633 
12634 	_scsih_fw_event_cleanup_queue(ioc);
12635 
12636 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
12637 	wq = ioc->firmware_event_thread;
12638 	ioc->firmware_event_thread = NULL;
12639 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
12640 	if (wq)
12641 		destroy_workqueue(wq);
12642 	/*
12643 	 * Copy back the unmodified ioc page1 so that on next driver load,
12644 	 * current modified changes on ioc page1 won't take effect.
12645 	 */
12646 	if (ioc->is_aero_ioc)
12647 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
12648 				&ioc->ioc_pg1_copy);
12649 
12650 	_scsih_ir_shutdown(ioc);
12651 	_scsih_nvme_shutdown(ioc);
12652 	mpt3sas_base_mask_interrupts(ioc);
12653 	mpt3sas_base_stop_watchdog(ioc);
12654 	ioc->shost_recovery = 1;
12655 	mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
12656 	ioc->shost_recovery = 0;
12657 	mpt3sas_base_free_irq(ioc);
12658 	mpt3sas_base_disable_msix(ioc);
12659 }
12660 
12661 
12662 /**
12663  * _scsih_probe_boot_devices - reports 1st device
12664  * @ioc: per adapter object
12665  *
12666  * If specified in bios page 2, this routine reports the 1st
12667  * device scsi-ml or sas transport for persistent boot device
12668  * purposes.  Please refer to function _scsih_determine_boot_device()
12669  */
12670 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)12671 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
12672 {
12673 	u32 channel;
12674 	void *device;
12675 	struct _sas_device *sas_device;
12676 	struct _raid_device *raid_device;
12677 	struct _pcie_device *pcie_device;
12678 	u16 handle;
12679 	u64 sas_address_parent;
12680 	u64 sas_address;
12681 	unsigned long flags;
12682 	int rc;
12683 	int tid;
12684 	struct hba_port *port;
12685 
12686 	 /* no Bios, return immediately */
12687 	if (!ioc->bios_pg3.BiosVersion)
12688 		return;
12689 
12690 	device = NULL;
12691 	if (ioc->req_boot_device.device) {
12692 		device =  ioc->req_boot_device.device;
12693 		channel = ioc->req_boot_device.channel;
12694 	} else if (ioc->req_alt_boot_device.device) {
12695 		device =  ioc->req_alt_boot_device.device;
12696 		channel = ioc->req_alt_boot_device.channel;
12697 	} else if (ioc->current_boot_device.device) {
12698 		device =  ioc->current_boot_device.device;
12699 		channel = ioc->current_boot_device.channel;
12700 	}
12701 
12702 	if (!device)
12703 		return;
12704 
12705 	if (channel == RAID_CHANNEL) {
12706 		raid_device = device;
12707 		/*
12708 		 * If this boot vd is already registered with SML then
12709 		 * no need to register it again as part of device scanning
12710 		 * after diag reset during driver load operation.
12711 		 */
12712 		if (raid_device->starget)
12713 			return;
12714 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
12715 		    raid_device->id, 0);
12716 		if (rc)
12717 			_scsih_raid_device_remove(ioc, raid_device);
12718 	} else if (channel == PCIE_CHANNEL) {
12719 		pcie_device = device;
12720 		/*
12721 		 * If this boot NVMe device is already registered with SML then
12722 		 * no need to register it again as part of device scanning
12723 		 * after diag reset during driver load operation.
12724 		 */
12725 		if (pcie_device->starget)
12726 			return;
12727 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
12728 		tid = pcie_device->id;
12729 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
12730 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
12731 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
12732 		if (rc)
12733 			_scsih_pcie_device_remove(ioc, pcie_device);
12734 	} else {
12735 		sas_device = device;
12736 		/*
12737 		 * If this boot sas/sata device is already registered with SML
12738 		 * then no need to register it again as part of device scanning
12739 		 * after diag reset during driver load operation.
12740 		 */
12741 		if (sas_device->starget)
12742 			return;
12743 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
12744 		handle = sas_device->handle;
12745 		sas_address_parent = sas_device->sas_address_parent;
12746 		sas_address = sas_device->sas_address;
12747 		port = sas_device->port;
12748 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
12749 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
12750 
12751 		if (ioc->hide_drives)
12752 			return;
12753 
12754 		if (!port)
12755 			return;
12756 
12757 		if (!mpt3sas_transport_port_add(ioc, handle,
12758 		    sas_address_parent, port)) {
12759 			_scsih_sas_device_remove(ioc, sas_device);
12760 		} else if (!sas_device->starget) {
12761 			if (!ioc->is_driver_loading) {
12762 				mpt3sas_transport_port_remove(ioc,
12763 				    sas_address,
12764 				    sas_address_parent, port);
12765 				_scsih_sas_device_remove(ioc, sas_device);
12766 			}
12767 		}
12768 	}
12769 }
12770 
12771 /**
12772  * _scsih_probe_raid - reporting raid volumes to scsi-ml
12773  * @ioc: per adapter object
12774  *
12775  * Called during initial loading of the driver.
12776  */
12777 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)12778 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
12779 {
12780 	struct _raid_device *raid_device, *raid_next;
12781 	int rc;
12782 
12783 	list_for_each_entry_safe(raid_device, raid_next,
12784 	    &ioc->raid_device_list, list) {
12785 		if (raid_device->starget)
12786 			continue;
12787 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
12788 		    raid_device->id, 0);
12789 		if (rc)
12790 			_scsih_raid_device_remove(ioc, raid_device);
12791 	}
12792 }
12793 
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)12794 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
12795 {
12796 	struct _sas_device *sas_device = NULL;
12797 	unsigned long flags;
12798 
12799 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
12800 	if (!list_empty(&ioc->sas_device_init_list)) {
12801 		sas_device = list_first_entry(&ioc->sas_device_init_list,
12802 				struct _sas_device, list);
12803 		sas_device_get(sas_device);
12804 	}
12805 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
12806 
12807 	return sas_device;
12808 }
12809 
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)12810 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
12811 		struct _sas_device *sas_device)
12812 {
12813 	unsigned long flags;
12814 
12815 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
12816 
12817 	/*
12818 	 * Since we dropped the lock during the call to port_add(), we need to
12819 	 * be careful here that somebody else didn't move or delete this item
12820 	 * while we were busy with other things.
12821 	 *
12822 	 * If it was on the list, we need a put() for the reference the list
12823 	 * had. Either way, we need a get() for the destination list.
12824 	 */
12825 	if (!list_empty(&sas_device->list)) {
12826 		list_del_init(&sas_device->list);
12827 		sas_device_put(sas_device);
12828 	}
12829 
12830 	sas_device_get(sas_device);
12831 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
12832 
12833 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
12834 }
12835 
12836 /**
12837  * _scsih_probe_sas - reporting sas devices to sas transport
12838  * @ioc: per adapter object
12839  *
12840  * Called during initial loading of the driver.
12841  */
12842 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)12843 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
12844 {
12845 	struct _sas_device *sas_device;
12846 
12847 	if (ioc->hide_drives)
12848 		return;
12849 
12850 	while ((sas_device = get_next_sas_device(ioc))) {
12851 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
12852 		    sas_device->sas_address_parent, sas_device->port)) {
12853 			_scsih_sas_device_remove(ioc, sas_device);
12854 			sas_device_put(sas_device);
12855 			continue;
12856 		} else if (!sas_device->starget) {
12857 			/*
12858 			 * When asyn scanning is enabled, its not possible to
12859 			 * remove devices while scanning is turned on due to an
12860 			 * oops in scsi_sysfs_add_sdev()->add_device()->
12861 			 * sysfs_addrm_start()
12862 			 */
12863 			if (!ioc->is_driver_loading) {
12864 				mpt3sas_transport_port_remove(ioc,
12865 				    sas_device->sas_address,
12866 				    sas_device->sas_address_parent,
12867 				    sas_device->port);
12868 				_scsih_sas_device_remove(ioc, sas_device);
12869 				sas_device_put(sas_device);
12870 				continue;
12871 			}
12872 		}
12873 		sas_device_make_active(ioc, sas_device);
12874 		sas_device_put(sas_device);
12875 	}
12876 }
12877 
12878 /**
12879  * get_next_pcie_device - Get the next pcie device
12880  * @ioc: per adapter object
12881  *
12882  * Get the next pcie device from pcie_device_init_list list.
12883  *
12884  * Return: pcie device structure if pcie_device_init_list list is not empty
12885  * otherwise returns NULL
12886  */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)12887 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
12888 {
12889 	struct _pcie_device *pcie_device = NULL;
12890 	unsigned long flags;
12891 
12892 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
12893 	if (!list_empty(&ioc->pcie_device_init_list)) {
12894 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
12895 				struct _pcie_device, list);
12896 		pcie_device_get(pcie_device);
12897 	}
12898 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
12899 
12900 	return pcie_device;
12901 }
12902 
12903 /**
12904  * pcie_device_make_active - Add pcie device to pcie_device_list list
12905  * @ioc: per adapter object
12906  * @pcie_device: pcie device object
12907  *
12908  * Add the pcie device which has registered with SCSI Transport Later to
12909  * pcie_device_list list
12910  */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)12911 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
12912 		struct _pcie_device *pcie_device)
12913 {
12914 	unsigned long flags;
12915 
12916 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
12917 
12918 	if (!list_empty(&pcie_device->list)) {
12919 		list_del_init(&pcie_device->list);
12920 		pcie_device_put(pcie_device);
12921 	}
12922 	pcie_device_get(pcie_device);
12923 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
12924 
12925 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
12926 }
12927 
12928 /**
12929  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
12930  * @ioc: per adapter object
12931  *
12932  * Called during initial loading of the driver.
12933  */
12934 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)12935 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
12936 {
12937 	struct _pcie_device *pcie_device;
12938 	int rc;
12939 
12940 	/* PCIe Device List */
12941 	while ((pcie_device = get_next_pcie_device(ioc))) {
12942 		if (pcie_device->starget) {
12943 			pcie_device_put(pcie_device);
12944 			continue;
12945 		}
12946 		if (pcie_device->access_status ==
12947 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
12948 			pcie_device_make_active(ioc, pcie_device);
12949 			pcie_device_put(pcie_device);
12950 			continue;
12951 		}
12952 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
12953 			pcie_device->id, 0);
12954 		if (rc) {
12955 			_scsih_pcie_device_remove(ioc, pcie_device);
12956 			pcie_device_put(pcie_device);
12957 			continue;
12958 		} else if (!pcie_device->starget) {
12959 			/*
12960 			 * When async scanning is enabled, its not possible to
12961 			 * remove devices while scanning is turned on due to an
12962 			 * oops in scsi_sysfs_add_sdev()->add_device()->
12963 			 * sysfs_addrm_start()
12964 			 */
12965 			if (!ioc->is_driver_loading) {
12966 			/* TODO-- Need to find out whether this condition will
12967 			 * occur or not
12968 			 */
12969 				_scsih_pcie_device_remove(ioc, pcie_device);
12970 				pcie_device_put(pcie_device);
12971 				continue;
12972 			}
12973 		}
12974 		pcie_device_make_active(ioc, pcie_device);
12975 		pcie_device_put(pcie_device);
12976 	}
12977 }
12978 
12979 /**
12980  * _scsih_probe_devices - probing for devices
12981  * @ioc: per adapter object
12982  *
12983  * Called during initial loading of the driver.
12984  */
12985 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)12986 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
12987 {
12988 	u16 volume_mapping_flags;
12989 
12990 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
12991 		return;  /* return when IOC doesn't support initiator mode */
12992 
12993 	_scsih_probe_boot_devices(ioc);
12994 
12995 	if (ioc->ir_firmware) {
12996 		volume_mapping_flags =
12997 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
12998 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
12999 		if (volume_mapping_flags ==
13000 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
13001 			_scsih_probe_raid(ioc);
13002 			_scsih_probe_sas(ioc);
13003 		} else {
13004 			_scsih_probe_sas(ioc);
13005 			_scsih_probe_raid(ioc);
13006 		}
13007 	} else {
13008 		_scsih_probe_sas(ioc);
13009 		_scsih_probe_pcie(ioc);
13010 	}
13011 }
13012 
13013 /**
13014  * scsih_scan_start - scsi lld callback for .scan_start
13015  * @shost: SCSI host pointer
13016  *
13017  * The shost has the ability to discover targets on its own instead
13018  * of scanning the entire bus.  In our implemention, we will kick off
13019  * firmware discovery.
13020  */
13021 static void
scsih_scan_start(struct Scsi_Host * shost)13022 scsih_scan_start(struct Scsi_Host *shost)
13023 {
13024 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
13025 	int rc;
13026 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
13027 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
13028 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
13029 		mpt3sas_enable_diag_buffer(ioc, 1);
13030 
13031 	if (disable_discovery > 0)
13032 		return;
13033 
13034 	ioc->start_scan = 1;
13035 	rc = mpt3sas_port_enable(ioc);
13036 
13037 	if (rc != 0)
13038 		ioc_info(ioc, "port enable: FAILED\n");
13039 }
13040 
13041 /**
13042  * _scsih_complete_devices_scanning - add the devices to sml and
13043  * complete ioc initialization.
13044  * @ioc: per adapter object
13045  *
13046  * Return nothing.
13047  */
_scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER * ioc)13048 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
13049 {
13050 
13051 	if (ioc->wait_for_discovery_to_complete) {
13052 		ioc->wait_for_discovery_to_complete = 0;
13053 		_scsih_probe_devices(ioc);
13054 	}
13055 
13056 	mpt3sas_base_start_watchdog(ioc);
13057 	ioc->is_driver_loading = 0;
13058 }
13059 
13060 /**
13061  * scsih_scan_finished - scsi lld callback for .scan_finished
13062  * @shost: SCSI host pointer
13063  * @time: elapsed time of the scan in jiffies
13064  *
13065  * This function will be called periodicallyn until it returns 1 with the
13066  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
13067  * we wait for firmware discovery to complete, then return 1.
13068  */
13069 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)13070 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
13071 {
13072 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
13073 	u32 ioc_state;
13074 	int issue_hard_reset = 0;
13075 
13076 	if (disable_discovery > 0) {
13077 		ioc->is_driver_loading = 0;
13078 		ioc->wait_for_discovery_to_complete = 0;
13079 		return 1;
13080 	}
13081 
13082 	if (time >= (300 * HZ)) {
13083 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
13084 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
13085 		ioc->is_driver_loading = 0;
13086 		return 1;
13087 	}
13088 
13089 	if (ioc->start_scan) {
13090 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
13091 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
13092 			mpt3sas_print_fault_code(ioc, ioc_state &
13093 			    MPI2_DOORBELL_DATA_MASK);
13094 			issue_hard_reset = 1;
13095 			goto out;
13096 		} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
13097 				MPI2_IOC_STATE_COREDUMP) {
13098 			mpt3sas_base_coredump_info(ioc, ioc_state &
13099 			    MPI2_DOORBELL_DATA_MASK);
13100 			mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
13101 			issue_hard_reset = 1;
13102 			goto out;
13103 		}
13104 		return 0;
13105 	}
13106 
13107 	if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
13108 		ioc_info(ioc,
13109 		    "port enable: aborted due to diag reset\n");
13110 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
13111 		goto out;
13112 	}
13113 	if (ioc->start_scan_failed) {
13114 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
13115 			 ioc->start_scan_failed);
13116 		ioc->is_driver_loading = 0;
13117 		ioc->wait_for_discovery_to_complete = 0;
13118 		ioc->remove_host = 1;
13119 		return 1;
13120 	}
13121 
13122 	ioc_info(ioc, "port enable: SUCCESS\n");
13123 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
13124 	_scsih_complete_devices_scanning(ioc);
13125 
13126 out:
13127 	if (issue_hard_reset) {
13128 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
13129 		if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
13130 			ioc->is_driver_loading = 0;
13131 	}
13132 	return 1;
13133 }
13134 
13135 /**
13136  * scsih_map_queues - map reply queues with request queues
13137  * @shost: SCSI host pointer
13138  */
scsih_map_queues(struct Scsi_Host * shost)13139 static void scsih_map_queues(struct Scsi_Host *shost)
13140 {
13141 	struct MPT3SAS_ADAPTER *ioc =
13142 	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
13143 	struct blk_mq_queue_map *map;
13144 	int i, qoff, offset;
13145 	int nr_msix_vectors = ioc->iopoll_q_start_index;
13146 	int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
13147 
13148 	if (shost->nr_hw_queues == 1)
13149 		return;
13150 
13151 	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
13152 		map = &shost->tag_set.map[i];
13153 		map->nr_queues = 0;
13154 		offset = 0;
13155 		if (i == HCTX_TYPE_DEFAULT) {
13156 			map->nr_queues =
13157 			    nr_msix_vectors - ioc->high_iops_queues;
13158 			offset = ioc->high_iops_queues;
13159 		} else if (i == HCTX_TYPE_POLL)
13160 			map->nr_queues = iopoll_q_count;
13161 
13162 		if (!map->nr_queues)
13163 			BUG_ON(i == HCTX_TYPE_DEFAULT);
13164 
13165 		/*
13166 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
13167 		 * affinity), so use the regular blk-mq cpu mapping
13168 		 */
13169 		map->queue_offset = qoff;
13170 		if (i != HCTX_TYPE_POLL)
13171 			blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
13172 		else
13173 			blk_mq_map_queues(map);
13174 
13175 		qoff += map->nr_queues;
13176 	}
13177 }
13178 
13179 /* shost template for SAS 2.0 HBA devices */
13180 static const struct scsi_host_template mpt2sas_driver_template = {
13181 	.module				= THIS_MODULE,
13182 	.name				= "Fusion MPT SAS Host",
13183 	.proc_name			= MPT2SAS_DRIVER_NAME,
13184 	.queuecommand			= scsih_qcmd,
13185 	.target_alloc			= scsih_target_alloc,
13186 	.sdev_init			= scsih_sdev_init,
13187 	.sdev_configure			= scsih_sdev_configure,
13188 	.target_destroy			= scsih_target_destroy,
13189 	.sdev_destroy			= scsih_sdev_destroy,
13190 	.scan_finished			= scsih_scan_finished,
13191 	.scan_start			= scsih_scan_start,
13192 	.change_queue_depth		= scsih_change_queue_depth,
13193 	.eh_abort_handler		= scsih_abort,
13194 	.eh_device_reset_handler	= scsih_dev_reset,
13195 	.eh_target_reset_handler	= scsih_target_reset,
13196 	.eh_host_reset_handler		= scsih_host_reset,
13197 	.bios_param			= scsih_bios_param,
13198 	.can_queue			= 1,
13199 	.this_id			= -1,
13200 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
13201 	.max_sectors			= 32767,
13202 	.cmd_per_lun			= 7,
13203 	.shost_groups			= mpt3sas_host_groups,
13204 	.sdev_groups			= mpt3sas_dev_groups,
13205 	.track_queue_depth		= 1,
13206 	.cmd_size			= sizeof(struct scsiio_tracker),
13207 };
13208 
13209 /* raid transport support for SAS 2.0 HBA devices */
13210 static struct raid_function_template mpt2sas_raid_functions = {
13211 	.cookie		= &mpt2sas_driver_template,
13212 	.is_raid	= scsih_is_raid,
13213 	.get_resync	= scsih_get_resync,
13214 	.get_state	= scsih_get_state,
13215 };
13216 
13217 /* shost template for SAS 3.0 HBA devices */
13218 static const struct scsi_host_template mpt3sas_driver_template = {
13219 	.module				= THIS_MODULE,
13220 	.name				= "Fusion MPT SAS Host",
13221 	.proc_name			= MPT3SAS_DRIVER_NAME,
13222 	.queuecommand			= scsih_qcmd,
13223 	.target_alloc			= scsih_target_alloc,
13224 	.sdev_init			= scsih_sdev_init,
13225 	.sdev_configure			= scsih_sdev_configure,
13226 	.target_destroy			= scsih_target_destroy,
13227 	.sdev_destroy			= scsih_sdev_destroy,
13228 	.scan_finished			= scsih_scan_finished,
13229 	.scan_start			= scsih_scan_start,
13230 	.change_queue_depth		= scsih_change_queue_depth,
13231 	.eh_abort_handler		= scsih_abort,
13232 	.eh_device_reset_handler	= scsih_dev_reset,
13233 	.eh_target_reset_handler	= scsih_target_reset,
13234 	.eh_host_reset_handler		= scsih_host_reset,
13235 	.bios_param			= scsih_bios_param,
13236 	.can_queue			= 1,
13237 	.this_id			= -1,
13238 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
13239 	.max_sectors			= 32767,
13240 	.max_segment_size		= 0xffffffff,
13241 	.cmd_per_lun			= 128,
13242 	.shost_groups			= mpt3sas_host_groups,
13243 	.sdev_groups			= mpt3sas_dev_groups,
13244 	.track_queue_depth		= 1,
13245 	.cmd_size			= sizeof(struct scsiio_tracker),
13246 	.map_queues			= scsih_map_queues,
13247 	.mq_poll			= mpt3sas_blk_mq_poll,
13248 };
13249 
13250 /* raid transport support for SAS 3.0 HBA devices */
13251 static struct raid_function_template mpt3sas_raid_functions = {
13252 	.cookie		= &mpt3sas_driver_template,
13253 	.is_raid	= scsih_is_raid,
13254 	.get_resync	= scsih_get_resync,
13255 	.get_state	= scsih_get_state,
13256 };
13257 
13258 /**
13259  * _scsih_determine_hba_mpi_version - determine in which MPI version class
13260  *					this device belongs to.
13261  * @pdev: PCI device struct
13262  *
13263  * return MPI2_VERSION for SAS 2.0 HBA devices,
13264  *	MPI25_VERSION for SAS 3.0 HBA devices, and
13265  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
13266  */
13267 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)13268 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
13269 {
13270 
13271 	switch (pdev->device) {
13272 	case MPI2_MFGPAGE_DEVID_SSS6200:
13273 	case MPI2_MFGPAGE_DEVID_SAS2004:
13274 	case MPI2_MFGPAGE_DEVID_SAS2008:
13275 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
13276 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
13277 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
13278 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
13279 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
13280 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
13281 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
13282 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
13283 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
13284 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
13285 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
13286 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
13287 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
13288 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
13289 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
13290 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
13291 		return MPI2_VERSION;
13292 	case MPI25_MFGPAGE_DEVID_SAS3004:
13293 	case MPI25_MFGPAGE_DEVID_SAS3008:
13294 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
13295 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
13296 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
13297 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
13298 		return MPI25_VERSION;
13299 	case MPI26_MFGPAGE_DEVID_SAS3216:
13300 	case MPI26_MFGPAGE_DEVID_SAS3224:
13301 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
13302 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
13303 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
13304 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
13305 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
13306 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
13307 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
13308 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
13309 	case MPI26_MFGPAGE_DEVID_SAS3508:
13310 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
13311 	case MPI26_MFGPAGE_DEVID_SAS3408:
13312 	case MPI26_MFGPAGE_DEVID_SAS3516:
13313 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
13314 	case MPI26_MFGPAGE_DEVID_SAS3416:
13315 	case MPI26_MFGPAGE_DEVID_SAS3616:
13316 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
13317 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
13318 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
13319 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
13320 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
13321 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
13322 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
13323 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
13324 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
13325 		return MPI26_VERSION;
13326 	}
13327 	return 0;
13328 }
13329 
13330 /**
13331  * _scsih_probe - attach and add scsi host
13332  * @pdev: PCI device struct
13333  * @id: pci device id
13334  *
13335  * Return: 0 success, anything else error.
13336  */
13337 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)13338 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
13339 {
13340 	struct MPT3SAS_ADAPTER *ioc;
13341 	struct Scsi_Host *shost = NULL;
13342 	int rv;
13343 	u16 hba_mpi_version;
13344 	int iopoll_q_count = 0;
13345 
13346 	/* Determine in which MPI version class this pci device belongs */
13347 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
13348 	if (hba_mpi_version == 0)
13349 		return -ENODEV;
13350 
13351 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
13352 	 * for other generation HBA's return with -ENODEV
13353 	 */
13354 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
13355 		return -ENODEV;
13356 
13357 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
13358 	 * for other generation HBA's return with -ENODEV
13359 	 */
13360 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
13361 		|| hba_mpi_version ==  MPI26_VERSION)))
13362 		return -ENODEV;
13363 
13364 	switch (hba_mpi_version) {
13365 	case MPI2_VERSION:
13366 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
13367 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
13368 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
13369 		shost = scsi_host_alloc(&mpt2sas_driver_template,
13370 		  sizeof(struct MPT3SAS_ADAPTER));
13371 		if (!shost)
13372 			return -ENODEV;
13373 		ioc = shost_priv(shost);
13374 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
13375 		ioc->hba_mpi_version_belonged = hba_mpi_version;
13376 		ioc->id = mpt2_ids++;
13377 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
13378 		switch (pdev->device) {
13379 		case MPI2_MFGPAGE_DEVID_SSS6200:
13380 			ioc->is_warpdrive = 1;
13381 			ioc->hide_ir_msg = 1;
13382 			break;
13383 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
13384 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
13385 			ioc->is_mcpu_endpoint = 1;
13386 			break;
13387 		default:
13388 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
13389 			break;
13390 		}
13391 
13392 		if (multipath_on_hba == -1 || multipath_on_hba == 0)
13393 			ioc->multipath_on_hba = 0;
13394 		else
13395 			ioc->multipath_on_hba = 1;
13396 
13397 		break;
13398 	case MPI25_VERSION:
13399 	case MPI26_VERSION:
13400 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
13401 		shost = scsi_host_alloc(&mpt3sas_driver_template,
13402 		  sizeof(struct MPT3SAS_ADAPTER));
13403 		if (!shost)
13404 			return -ENODEV;
13405 		ioc = shost_priv(shost);
13406 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
13407 		ioc->hba_mpi_version_belonged = hba_mpi_version;
13408 		ioc->id = mpt3_ids++;
13409 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
13410 		switch (pdev->device) {
13411 		case MPI26_MFGPAGE_DEVID_SAS3508:
13412 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
13413 		case MPI26_MFGPAGE_DEVID_SAS3408:
13414 		case MPI26_MFGPAGE_DEVID_SAS3516:
13415 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
13416 		case MPI26_MFGPAGE_DEVID_SAS3416:
13417 		case MPI26_MFGPAGE_DEVID_SAS3616:
13418 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
13419 			ioc->is_gen35_ioc = 1;
13420 			break;
13421 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
13422 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
13423 			dev_err(&pdev->dev,
13424 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
13425 			    pdev->device, pdev->subsystem_vendor,
13426 			    pdev->subsystem_device);
13427 			return 1;
13428 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
13429 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
13430 			dev_err(&pdev->dev,
13431 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
13432 			    pdev->device, pdev->subsystem_vendor,
13433 			    pdev->subsystem_device);
13434 			return 1;
13435 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
13436 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
13437 			dev_info(&pdev->dev,
13438 			    "HBA is in Configurable Secure mode\n");
13439 			fallthrough;
13440 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
13441 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
13442 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
13443 			break;
13444 		default:
13445 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
13446 		}
13447 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
13448 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
13449 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
13450 			ioc->combined_reply_queue = 1;
13451 			if (ioc->is_gen35_ioc)
13452 				ioc->combined_reply_index_count =
13453 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
13454 			else
13455 				ioc->combined_reply_index_count =
13456 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
13457 		}
13458 
13459 		switch (ioc->is_gen35_ioc) {
13460 		case 0:
13461 			if (multipath_on_hba == -1 || multipath_on_hba == 0)
13462 				ioc->multipath_on_hba = 0;
13463 			else
13464 				ioc->multipath_on_hba = 1;
13465 			break;
13466 		case 1:
13467 			if (multipath_on_hba == -1 || multipath_on_hba > 0)
13468 				ioc->multipath_on_hba = 1;
13469 			else
13470 				ioc->multipath_on_hba = 0;
13471 			break;
13472 		default:
13473 			break;
13474 		}
13475 
13476 		break;
13477 	default:
13478 		return -ENODEV;
13479 	}
13480 
13481 	INIT_LIST_HEAD(&ioc->list);
13482 	spin_lock(&gioc_lock);
13483 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
13484 	spin_unlock(&gioc_lock);
13485 	ioc->shost = shost;
13486 	ioc->pdev = pdev;
13487 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
13488 	ioc->tm_cb_idx = tm_cb_idx;
13489 	ioc->ctl_cb_idx = ctl_cb_idx;
13490 	ioc->base_cb_idx = base_cb_idx;
13491 	ioc->port_enable_cb_idx = port_enable_cb_idx;
13492 	ioc->transport_cb_idx = transport_cb_idx;
13493 	ioc->scsih_cb_idx = scsih_cb_idx;
13494 	ioc->config_cb_idx = config_cb_idx;
13495 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
13496 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
13497 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
13498 	ioc->logging_level = logging_level;
13499 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
13500 	/* Host waits for minimum of six seconds */
13501 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
13502 	/*
13503 	 * Enable MEMORY MOVE support flag.
13504 	 */
13505 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
13506 	/* Enable ADDITIONAL QUERY support flag. */
13507 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
13508 
13509 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
13510 
13511 	/* misc semaphores and spin locks */
13512 	mutex_init(&ioc->reset_in_progress_mutex);
13513 	mutex_init(&ioc->hostdiag_unlock_mutex);
13514 	/* initializing pci_access_mutex lock */
13515 	mutex_init(&ioc->pci_access_mutex);
13516 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
13517 	spin_lock_init(&ioc->scsi_lookup_lock);
13518 	spin_lock_init(&ioc->sas_device_lock);
13519 	spin_lock_init(&ioc->sas_node_lock);
13520 	spin_lock_init(&ioc->fw_event_lock);
13521 	spin_lock_init(&ioc->raid_device_lock);
13522 	spin_lock_init(&ioc->pcie_device_lock);
13523 	spin_lock_init(&ioc->diag_trigger_lock);
13524 
13525 	INIT_LIST_HEAD(&ioc->sas_device_list);
13526 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
13527 	INIT_LIST_HEAD(&ioc->sas_expander_list);
13528 	INIT_LIST_HEAD(&ioc->enclosure_list);
13529 	INIT_LIST_HEAD(&ioc->pcie_device_list);
13530 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
13531 	INIT_LIST_HEAD(&ioc->fw_event_list);
13532 	INIT_LIST_HEAD(&ioc->raid_device_list);
13533 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
13534 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
13535 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
13536 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
13537 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
13538 	INIT_LIST_HEAD(&ioc->reply_queue_list);
13539 	INIT_LIST_HEAD(&ioc->port_table_list);
13540 
13541 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
13542 
13543 	/* init shost parameters */
13544 	shost->max_cmd_len = 32;
13545 	shost->max_lun = max_lun;
13546 	shost->transportt = mpt3sas_transport_template;
13547 	shost->unique_id = ioc->id;
13548 
13549 	if (ioc->is_mcpu_endpoint) {
13550 		/* mCPU MPI support 64K max IO */
13551 		shost->max_sectors = 128;
13552 		ioc_info(ioc, "The max_sectors value is set to %d\n",
13553 			 shost->max_sectors);
13554 	} else {
13555 		if (max_sectors != 0xFFFF) {
13556 			if (max_sectors < 64) {
13557 				shost->max_sectors = 64;
13558 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
13559 					 max_sectors);
13560 			} else if (max_sectors > 32767) {
13561 				shost->max_sectors = 32767;
13562 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
13563 					 max_sectors);
13564 			} else {
13565 				shost->max_sectors = max_sectors & 0xFFFE;
13566 				ioc_info(ioc, "The max_sectors value is set to %d\n",
13567 					 shost->max_sectors);
13568 			}
13569 		}
13570 	}
13571 	/* register EEDP capabilities with SCSI layer */
13572 	if (prot_mask >= 0)
13573 		scsi_host_set_prot(shost, (prot_mask & 0x07));
13574 	else
13575 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
13576 				   | SHOST_DIF_TYPE2_PROTECTION
13577 				   | SHOST_DIF_TYPE3_PROTECTION);
13578 
13579 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
13580 
13581 	/* event thread */
13582 	ioc->firmware_event_thread = alloc_ordered_workqueue(
13583 		"fw_event_%s%d", 0, ioc->driver_name, ioc->id);
13584 	if (!ioc->firmware_event_thread) {
13585 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
13586 			__FILE__, __LINE__, __func__);
13587 		rv = -ENODEV;
13588 		goto out_thread_fail;
13589 	}
13590 
13591 	shost->host_tagset = 0;
13592 
13593 	if (ioc->is_gen35_ioc && host_tagset_enable)
13594 		shost->host_tagset = 1;
13595 
13596 	ioc->is_driver_loading = 1;
13597 	if ((mpt3sas_base_attach(ioc))) {
13598 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
13599 			__FILE__, __LINE__, __func__);
13600 		rv = -ENODEV;
13601 		goto out_attach_fail;
13602 	}
13603 
13604 	if (ioc->is_warpdrive) {
13605 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
13606 			ioc->hide_drives = 0;
13607 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
13608 			ioc->hide_drives = 1;
13609 		else {
13610 			if (mpt3sas_get_num_volumes(ioc))
13611 				ioc->hide_drives = 1;
13612 			else
13613 				ioc->hide_drives = 0;
13614 		}
13615 	} else
13616 		ioc->hide_drives = 0;
13617 
13618 	shost->nr_hw_queues = 1;
13619 
13620 	if (shost->host_tagset) {
13621 		shost->nr_hw_queues =
13622 		    ioc->reply_queue_count - ioc->high_iops_queues;
13623 
13624 		iopoll_q_count =
13625 		    ioc->reply_queue_count - ioc->iopoll_q_start_index;
13626 
13627 		shost->nr_maps = iopoll_q_count ? 3 : 1;
13628 
13629 		dev_info(&ioc->pdev->dev,
13630 		    "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
13631 		    shost->can_queue, shost->nr_hw_queues);
13632 	}
13633 
13634 	rv = scsi_add_host(shost, &pdev->dev);
13635 	if (rv) {
13636 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
13637 			__FILE__, __LINE__, __func__);
13638 		goto out_add_shost_fail;
13639 	}
13640 
13641 	scsi_scan_host(shost);
13642 	mpt3sas_setup_debugfs(ioc);
13643 	return 0;
13644 out_add_shost_fail:
13645 	mpt3sas_base_detach(ioc);
13646  out_attach_fail:
13647 	destroy_workqueue(ioc->firmware_event_thread);
13648  out_thread_fail:
13649 	spin_lock(&gioc_lock);
13650 	list_del(&ioc->list);
13651 	spin_unlock(&gioc_lock);
13652 	scsi_host_put(shost);
13653 	return rv;
13654 }
13655 
13656 /**
13657  * scsih_suspend - power management suspend main entry point
13658  * @dev: Device struct
13659  *
13660  * Return: 0 success, anything else error.
13661  */
13662 static int __maybe_unused
scsih_suspend(struct device * dev)13663 scsih_suspend(struct device *dev)
13664 {
13665 	struct pci_dev *pdev = to_pci_dev(dev);
13666 	struct Scsi_Host *shost;
13667 	struct MPT3SAS_ADAPTER *ioc;
13668 	int rc;
13669 
13670 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
13671 	if (rc)
13672 		return rc;
13673 
13674 	mpt3sas_base_stop_watchdog(ioc);
13675 	scsi_block_requests(shost);
13676 	_scsih_nvme_shutdown(ioc);
13677 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
13678 		 pdev, pci_name(pdev));
13679 
13680 	mpt3sas_base_free_resources(ioc);
13681 	return 0;
13682 }
13683 
13684 /**
13685  * scsih_resume - power management resume main entry point
13686  * @dev: Device struct
13687  *
13688  * Return: 0 success, anything else error.
13689  */
13690 static int __maybe_unused
scsih_resume(struct device * dev)13691 scsih_resume(struct device *dev)
13692 {
13693 	struct pci_dev *pdev = to_pci_dev(dev);
13694 	struct Scsi_Host *shost;
13695 	struct MPT3SAS_ADAPTER *ioc;
13696 	pci_power_t device_state = pdev->current_state;
13697 	int r;
13698 
13699 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
13700 	if (r)
13701 		return r;
13702 
13703 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
13704 		 pdev, pci_name(pdev), device_state);
13705 
13706 	ioc->pdev = pdev;
13707 	r = mpt3sas_base_map_resources(ioc);
13708 	if (r)
13709 		return r;
13710 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
13711 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
13712 	scsi_unblock_requests(shost);
13713 	mpt3sas_base_start_watchdog(ioc);
13714 	return 0;
13715 }
13716 
13717 /**
13718  * scsih_pci_error_detected - Called when a PCI error is detected.
13719  * @pdev: PCI device struct
13720  * @state: PCI channel state
13721  *
13722  * Description: Called when a PCI error is detected.
13723  *
13724  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
13725  */
13726 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)13727 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13728 {
13729 	struct Scsi_Host *shost;
13730 	struct MPT3SAS_ADAPTER *ioc;
13731 
13732 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
13733 		return PCI_ERS_RESULT_DISCONNECT;
13734 
13735 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
13736 
13737 	switch (state) {
13738 	case pci_channel_io_normal:
13739 		return PCI_ERS_RESULT_CAN_RECOVER;
13740 	case pci_channel_io_frozen:
13741 		/* Fatal error, prepare for slot reset */
13742 		ioc->pci_error_recovery = 1;
13743 		scsi_block_requests(ioc->shost);
13744 		mpt3sas_base_stop_watchdog(ioc);
13745 		mpt3sas_base_free_resources(ioc);
13746 		return PCI_ERS_RESULT_NEED_RESET;
13747 	case pci_channel_io_perm_failure:
13748 		/* Permanent error, prepare for device removal */
13749 		ioc->pci_error_recovery = 1;
13750 		mpt3sas_base_stop_watchdog(ioc);
13751 		mpt3sas_base_pause_mq_polling(ioc);
13752 		_scsih_flush_running_cmds(ioc);
13753 		return PCI_ERS_RESULT_DISCONNECT;
13754 	}
13755 	return PCI_ERS_RESULT_NEED_RESET;
13756 }
13757 
13758 /**
13759  * scsih_pci_slot_reset - Called when PCI slot has been reset.
13760  * @pdev: PCI device struct
13761  *
13762  * Description: This routine is called by the pci error recovery
13763  * code after the PCI slot has been reset, just before we
13764  * should resume normal operations.
13765  */
13766 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)13767 scsih_pci_slot_reset(struct pci_dev *pdev)
13768 {
13769 	struct Scsi_Host *shost;
13770 	struct MPT3SAS_ADAPTER *ioc;
13771 	int rc;
13772 
13773 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
13774 		return PCI_ERS_RESULT_DISCONNECT;
13775 
13776 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
13777 
13778 	ioc->pci_error_recovery = 0;
13779 	ioc->pdev = pdev;
13780 	pci_restore_state(pdev);
13781 	rc = mpt3sas_base_map_resources(ioc);
13782 	if (rc)
13783 		return PCI_ERS_RESULT_DISCONNECT;
13784 
13785 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
13786 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
13787 
13788 	ioc_warn(ioc, "hard reset: %s\n",
13789 		 (rc == 0) ? "success" : "failed");
13790 
13791 	if (!rc)
13792 		return PCI_ERS_RESULT_RECOVERED;
13793 	else
13794 		return PCI_ERS_RESULT_DISCONNECT;
13795 }
13796 
13797 /**
13798  * scsih_pci_resume() - resume normal ops after PCI reset
13799  * @pdev: pointer to PCI device
13800  *
13801  * Called when the error recovery driver tells us that its
13802  * OK to resume normal operation. Use completion to allow
13803  * halted scsi ops to resume.
13804  */
13805 static void
scsih_pci_resume(struct pci_dev * pdev)13806 scsih_pci_resume(struct pci_dev *pdev)
13807 {
13808 	struct Scsi_Host *shost;
13809 	struct MPT3SAS_ADAPTER *ioc;
13810 
13811 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
13812 		return;
13813 
13814 	ioc_info(ioc, "PCI error: resume callback!!\n");
13815 
13816 	mpt3sas_base_start_watchdog(ioc);
13817 	scsi_unblock_requests(ioc->shost);
13818 }
13819 
13820 /**
13821  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
13822  * @pdev: pointer to PCI device
13823  */
13824 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)13825 scsih_pci_mmio_enabled(struct pci_dev *pdev)
13826 {
13827 	struct Scsi_Host *shost;
13828 	struct MPT3SAS_ADAPTER *ioc;
13829 
13830 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
13831 		return PCI_ERS_RESULT_DISCONNECT;
13832 
13833 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
13834 
13835 	/* TODO - dump whatever for debugging purposes */
13836 
13837 	/* This called only if scsih_pci_error_detected returns
13838 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
13839 	 * works, no need to reset slot.
13840 	 */
13841 	return PCI_ERS_RESULT_RECOVERED;
13842 }
13843 
13844 /*
13845  * The pci device ids are defined in mpi/mpi2_cnfg.h.
13846  */
13847 static const struct pci_device_id mpt3sas_pci_table[] = {
13848 	/* Spitfire ~ 2004 */
13849 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
13850 		PCI_ANY_ID, PCI_ANY_ID },
13851 	/* Falcon ~ 2008 */
13852 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
13853 		PCI_ANY_ID, PCI_ANY_ID },
13854 	/* Liberator ~ 2108 */
13855 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
13856 		PCI_ANY_ID, PCI_ANY_ID },
13857 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
13858 		PCI_ANY_ID, PCI_ANY_ID },
13859 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
13860 		PCI_ANY_ID, PCI_ANY_ID },
13861 	/* Meteor ~ 2116 */
13862 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
13863 		PCI_ANY_ID, PCI_ANY_ID },
13864 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
13865 		PCI_ANY_ID, PCI_ANY_ID },
13866 	/* Thunderbolt ~ 2208 */
13867 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
13868 		PCI_ANY_ID, PCI_ANY_ID },
13869 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
13870 		PCI_ANY_ID, PCI_ANY_ID },
13871 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
13872 		PCI_ANY_ID, PCI_ANY_ID },
13873 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
13874 		PCI_ANY_ID, PCI_ANY_ID },
13875 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
13876 		PCI_ANY_ID, PCI_ANY_ID },
13877 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
13878 		PCI_ANY_ID, PCI_ANY_ID },
13879 	/* Mustang ~ 2308 */
13880 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
13881 		PCI_ANY_ID, PCI_ANY_ID },
13882 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
13883 		PCI_ANY_ID, PCI_ANY_ID },
13884 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
13885 		PCI_ANY_ID, PCI_ANY_ID },
13886 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
13887 		PCI_ANY_ID, PCI_ANY_ID },
13888 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
13889 		PCI_ANY_ID, PCI_ANY_ID },
13890 	/* SSS6200 */
13891 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
13892 		PCI_ANY_ID, PCI_ANY_ID },
13893 	/* Fury ~ 3004 and 3008 */
13894 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
13895 		PCI_ANY_ID, PCI_ANY_ID },
13896 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
13897 		PCI_ANY_ID, PCI_ANY_ID },
13898 	/* Invader ~ 3108 */
13899 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
13900 		PCI_ANY_ID, PCI_ANY_ID },
13901 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
13902 		PCI_ANY_ID, PCI_ANY_ID },
13903 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
13904 		PCI_ANY_ID, PCI_ANY_ID },
13905 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
13906 		PCI_ANY_ID, PCI_ANY_ID },
13907 	/* Cutlass ~ 3216 and 3224 */
13908 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
13909 		PCI_ANY_ID, PCI_ANY_ID },
13910 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
13911 		PCI_ANY_ID, PCI_ANY_ID },
13912 	/* Intruder ~ 3316 and 3324 */
13913 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
13914 		PCI_ANY_ID, PCI_ANY_ID },
13915 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
13916 		PCI_ANY_ID, PCI_ANY_ID },
13917 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
13918 		PCI_ANY_ID, PCI_ANY_ID },
13919 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
13920 		PCI_ANY_ID, PCI_ANY_ID },
13921 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
13922 		PCI_ANY_ID, PCI_ANY_ID },
13923 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
13924 		PCI_ANY_ID, PCI_ANY_ID },
13925 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
13926 		PCI_ANY_ID, PCI_ANY_ID },
13927 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
13928 		PCI_ANY_ID, PCI_ANY_ID },
13929 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
13930 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
13931 		PCI_ANY_ID, PCI_ANY_ID },
13932 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
13933 		PCI_ANY_ID, PCI_ANY_ID },
13934 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
13935 		PCI_ANY_ID, PCI_ANY_ID },
13936 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
13937 		PCI_ANY_ID, PCI_ANY_ID },
13938 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
13939 		PCI_ANY_ID, PCI_ANY_ID },
13940 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
13941 		PCI_ANY_ID, PCI_ANY_ID },
13942 	/* Mercator ~ 3616*/
13943 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
13944 		PCI_ANY_ID, PCI_ANY_ID },
13945 
13946 	/* Aero SI 0x00E1 Configurable Secure
13947 	 * 0x00E2 Hard Secure
13948 	 */
13949 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
13950 		PCI_ANY_ID, PCI_ANY_ID },
13951 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
13952 		PCI_ANY_ID, PCI_ANY_ID },
13953 
13954 	/*
13955 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
13956 	 */
13957 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
13958 		PCI_ANY_ID, PCI_ANY_ID },
13959 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
13960 		PCI_ANY_ID, PCI_ANY_ID },
13961 
13962 	/* Atlas PCIe Switch Management Port */
13963 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
13964 		PCI_ANY_ID, PCI_ANY_ID },
13965 
13966 	/* Sea SI 0x00E5 Configurable Secure
13967 	 * 0x00E6 Hard Secure
13968 	 */
13969 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
13970 		PCI_ANY_ID, PCI_ANY_ID },
13971 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
13972 		PCI_ANY_ID, PCI_ANY_ID },
13973 
13974 	/*
13975 	 * ATTO Branded ExpressSAS H12xx GT
13976 	 */
13977 	{ MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
13978 		PCI_ANY_ID, PCI_ANY_ID },
13979 
13980 	/*
13981 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
13982 	 */
13983 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
13984 		PCI_ANY_ID, PCI_ANY_ID },
13985 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
13986 		PCI_ANY_ID, PCI_ANY_ID },
13987 
13988 	{0}     /* Terminating entry */
13989 };
13990 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
13991 
13992 static const struct pci_error_handlers _mpt3sas_err_handler = {
13993 	.error_detected	= scsih_pci_error_detected,
13994 	.mmio_enabled	= scsih_pci_mmio_enabled,
13995 	.slot_reset	= scsih_pci_slot_reset,
13996 	.resume		= scsih_pci_resume,
13997 };
13998 
13999 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
14000 
14001 static struct pci_driver mpt3sas_driver = {
14002 	.name		= MPT3SAS_DRIVER_NAME,
14003 	.id_table	= mpt3sas_pci_table,
14004 	.probe		= _scsih_probe,
14005 	.remove		= scsih_remove,
14006 	.shutdown	= scsih_shutdown,
14007 	.err_handler	= &_mpt3sas_err_handler,
14008 	.driver.pm	= &scsih_pm_ops,
14009 };
14010 
14011 /**
14012  * scsih_init - main entry point for this driver.
14013  *
14014  * Return: 0 success, anything else error.
14015  */
14016 static int
scsih_init(void)14017 scsih_init(void)
14018 {
14019 	mpt2_ids = 0;
14020 	mpt3_ids = 0;
14021 
14022 	mpt3sas_base_initialize_callback_handler();
14023 
14024 	 /* queuecommand callback hander */
14025 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
14026 
14027 	/* task management callback handler */
14028 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
14029 
14030 	/* base internal commands callback handler */
14031 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
14032 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
14033 	    mpt3sas_port_enable_done);
14034 
14035 	/* transport internal commands callback handler */
14036 	transport_cb_idx = mpt3sas_base_register_callback_handler(
14037 	    mpt3sas_transport_done);
14038 
14039 	/* scsih internal commands callback handler */
14040 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
14041 
14042 	/* configuration page API internal commands callback handler */
14043 	config_cb_idx = mpt3sas_base_register_callback_handler(
14044 	    mpt3sas_config_done);
14045 
14046 	/* ctl module callback handler */
14047 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
14048 
14049 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
14050 	    _scsih_tm_tr_complete);
14051 
14052 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
14053 	    _scsih_tm_volume_tr_complete);
14054 
14055 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
14056 	    _scsih_sas_control_complete);
14057 
14058 	mpt3sas_init_debugfs();
14059 	return 0;
14060 }
14061 
14062 /**
14063  * scsih_exit - exit point for this driver (when it is a module).
14064  *
14065  * Return: 0 success, anything else error.
14066  */
14067 static void
scsih_exit(void)14068 scsih_exit(void)
14069 {
14070 
14071 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
14072 	mpt3sas_base_release_callback_handler(tm_cb_idx);
14073 	mpt3sas_base_release_callback_handler(base_cb_idx);
14074 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
14075 	mpt3sas_base_release_callback_handler(transport_cb_idx);
14076 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
14077 	mpt3sas_base_release_callback_handler(config_cb_idx);
14078 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
14079 
14080 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
14081 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
14082 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
14083 
14084 /* raid transport support */
14085 	if (hbas_to_enumerate != 1)
14086 		raid_class_release(mpt3sas_raid_template);
14087 	if (hbas_to_enumerate != 2)
14088 		raid_class_release(mpt2sas_raid_template);
14089 	sas_release_transport(mpt3sas_transport_template);
14090 	mpt3sas_exit_debugfs();
14091 }
14092 
14093 /**
14094  * _mpt3sas_init - main entry point for this driver.
14095  *
14096  * Return: 0 success, anything else error.
14097  */
14098 static int __init
_mpt3sas_init(void)14099 _mpt3sas_init(void)
14100 {
14101 	int error;
14102 
14103 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
14104 					MPT3SAS_DRIVER_VERSION);
14105 
14106 	mpt3sas_transport_template =
14107 	    sas_attach_transport(&mpt3sas_transport_functions);
14108 	if (!mpt3sas_transport_template)
14109 		return -ENODEV;
14110 
14111 	/* No need attach mpt3sas raid functions template
14112 	 * if hbas_to_enumarate value is one.
14113 	 */
14114 	if (hbas_to_enumerate != 1) {
14115 		mpt3sas_raid_template =
14116 				raid_class_attach(&mpt3sas_raid_functions);
14117 		if (!mpt3sas_raid_template) {
14118 			sas_release_transport(mpt3sas_transport_template);
14119 			return -ENODEV;
14120 		}
14121 	}
14122 
14123 	/* No need to attach mpt2sas raid functions template
14124 	 * if hbas_to_enumarate value is two
14125 	 */
14126 	if (hbas_to_enumerate != 2) {
14127 		mpt2sas_raid_template =
14128 				raid_class_attach(&mpt2sas_raid_functions);
14129 		if (!mpt2sas_raid_template) {
14130 			sas_release_transport(mpt3sas_transport_template);
14131 			return -ENODEV;
14132 		}
14133 	}
14134 
14135 	error = scsih_init();
14136 	if (error) {
14137 		scsih_exit();
14138 		return error;
14139 	}
14140 
14141 	mpt3sas_ctl_init(hbas_to_enumerate);
14142 
14143 	error = pci_register_driver(&mpt3sas_driver);
14144 	if (error) {
14145 		mpt3sas_ctl_exit(hbas_to_enumerate);
14146 		scsih_exit();
14147 	}
14148 
14149 	return error;
14150 }
14151 
14152 /**
14153  * _mpt3sas_exit - exit point for this driver (when it is a module).
14154  *
14155  */
14156 static void __exit
_mpt3sas_exit(void)14157 _mpt3sas_exit(void)
14158 {
14159 	pr_info("mpt3sas version %s unloading\n",
14160 				MPT3SAS_DRIVER_VERSION);
14161 
14162 	pci_unregister_driver(&mpt3sas_driver);
14163 
14164 	mpt3sas_ctl_exit(hbas_to_enumerate);
14165 
14166 	scsih_exit();
14167 }
14168 
14169 module_init(_mpt3sas_init);
14170 module_exit(_mpt3sas_exit);
14171