xref: /linux/drivers/scsi/smartpqi/smartpqi_init.c (revision 7adf8b1afc14832de099f9e178f08f91dc0dd6d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/crash_dump.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <linux/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31 
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35 
36 #define DRIVER_VERSION		"2.1.30-031"
37 #define DRIVER_MAJOR		2
38 #define DRIVER_MINOR		1
39 #define DRIVER_RELEASE		30
40 #define DRIVER_REVISION		31
41 
42 #define DRIVER_NAME		"Microchip SmartPQI Driver (v" \
43 				DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT	"smartpqi"
45 
46 #define PQI_EXTRA_SGL_MEMORY	(12 * sizeof(struct pqi_sg_descriptor))
47 
48 #define PQI_POST_RESET_DELAY_SECS			5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS	10
50 
51 #define PQI_NO_COMPLETION	((void *)-1)
52 
53 MODULE_AUTHOR("Microchip");
54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
55 	DRIVER_VERSION);
56 MODULE_VERSION(DRIVER_VERSION);
57 MODULE_LICENSE("GPL");
58 
59 struct pqi_cmd_priv {
60 	int this_residual;
61 };
62 
pqi_cmd_priv(struct scsi_cmnd * cmd)63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
64 {
65 	return scsi_cmd_priv(cmd);
66 }
67 
68 static void pqi_verify_structures(void);
69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
70 	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
71 static void pqi_ctrl_offline_worker(struct work_struct *work);
72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_scan_start(struct Scsi_Host *shost);
74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
75 	struct pqi_queue_group *queue_group, enum pqi_io_path path,
76 	struct pqi_io_request *io_request);
77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
78 	struct pqi_iu_header *request, unsigned int flags,
79 	struct pqi_raid_error_info *error_info);
80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
81 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
82 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
83 	struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
84 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
85 	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
86 	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
87 	struct pqi_scsi_dev_raid_map_data *rmd);
88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
89 	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
90 	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
91 	struct pqi_scsi_dev_raid_map_data *rmd);
92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
95 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
96 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
97 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
99 	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
101 static void pqi_tmf_worker(struct work_struct *work);
102 
103 /* for flags argument to pqi_submit_raid_request_synchronous() */
104 #define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
105 
106 static struct scsi_transport_template *pqi_sas_transport_template;
107 
108 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
109 
110 enum pqi_lockup_action {
111 	NONE,
112 	REBOOT,
113 	PANIC
114 };
115 
116 static enum pqi_lockup_action pqi_lockup_action = NONE;
117 
118 static struct {
119 	enum pqi_lockup_action	action;
120 	char			*name;
121 } pqi_lockup_actions[] = {
122 	{
123 		.action = NONE,
124 		.name = "none",
125 	},
126 	{
127 		.action = REBOOT,
128 		.name = "reboot",
129 	},
130 	{
131 		.action = PANIC,
132 		.name = "panic",
133 	},
134 };
135 
136 static unsigned int pqi_supported_event_types[] = {
137 	PQI_EVENT_TYPE_HOTPLUG,
138 	PQI_EVENT_TYPE_HARDWARE,
139 	PQI_EVENT_TYPE_PHYSICAL_DEVICE,
140 	PQI_EVENT_TYPE_LOGICAL_DEVICE,
141 	PQI_EVENT_TYPE_OFA,
142 	PQI_EVENT_TYPE_AIO_STATE_CHANGE,
143 	PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
144 };
145 
146 static int pqi_disable_device_id_wildcards;
147 module_param_named(disable_device_id_wildcards,
148 	pqi_disable_device_id_wildcards, int, 0644);
149 MODULE_PARM_DESC(disable_device_id_wildcards,
150 	"Disable device ID wildcards.");
151 
152 static int pqi_disable_heartbeat;
153 module_param_named(disable_heartbeat,
154 	pqi_disable_heartbeat, int, 0644);
155 MODULE_PARM_DESC(disable_heartbeat,
156 	"Disable heartbeat.");
157 
158 static int pqi_disable_ctrl_shutdown;
159 module_param_named(disable_ctrl_shutdown,
160 	pqi_disable_ctrl_shutdown, int, 0644);
161 MODULE_PARM_DESC(disable_ctrl_shutdown,
162 	"Disable controller shutdown when controller locked up.");
163 
164 static char *pqi_lockup_action_param;
165 module_param_named(lockup_action,
166 	pqi_lockup_action_param, charp, 0644);
167 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
168 	"\t\tSupported: none, reboot, panic\n"
169 	"\t\tDefault: none");
170 
171 static int pqi_expose_ld_first;
172 module_param_named(expose_ld_first,
173 	pqi_expose_ld_first, int, 0644);
174 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
175 
176 static int pqi_hide_vsep;
177 module_param_named(hide_vsep,
178 	pqi_hide_vsep, int, 0644);
179 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
180 
181 static int pqi_disable_managed_interrupts;
182 module_param_named(disable_managed_interrupts,
183 	pqi_disable_managed_interrupts, int, 0644);
184 MODULE_PARM_DESC(disable_managed_interrupts,
185 	"Disable the kernel automatically assigning SMP affinity to IRQs.");
186 
187 static unsigned int pqi_ctrl_ready_timeout_secs;
188 module_param_named(ctrl_ready_timeout,
189 	pqi_ctrl_ready_timeout_secs, uint, 0644);
190 MODULE_PARM_DESC(ctrl_ready_timeout,
191 	"Timeout in seconds for driver to wait for controller ready.");
192 
193 static char *raid_levels[] = {
194 	"RAID-0",
195 	"RAID-4",
196 	"RAID-1(1+0)",
197 	"RAID-5",
198 	"RAID-5+1",
199 	"RAID-6",
200 	"RAID-1(Triple)",
201 };
202 
pqi_raid_level_to_string(u8 raid_level)203 static char *pqi_raid_level_to_string(u8 raid_level)
204 {
205 	if (raid_level < ARRAY_SIZE(raid_levels))
206 		return raid_levels[raid_level];
207 
208 	return "RAID UNKNOWN";
209 }
210 
211 #define SA_RAID_0		0
212 #define SA_RAID_4		1
213 #define SA_RAID_1		2	/* also used for RAID 10 */
214 #define SA_RAID_5		3	/* also used for RAID 50 */
215 #define SA_RAID_51		4
216 #define SA_RAID_6		5	/* also used for RAID 60 */
217 #define SA_RAID_TRIPLE		6	/* also used for RAID 1+0 Triple */
218 #define SA_RAID_MAX		SA_RAID_TRIPLE
219 #define SA_RAID_UNKNOWN		0xff
220 
pqi_scsi_done(struct scsi_cmnd * scmd)221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
222 {
223 	pqi_prep_for_scsi_done(scmd);
224 	scsi_done(scmd);
225 }
226 
pqi_disable_write_same(struct scsi_device * sdev)227 static inline void pqi_disable_write_same(struct scsi_device *sdev)
228 {
229 	sdev->no_write_same = 1;
230 }
231 
pqi_scsi3addr_equal(u8 * scsi3addr1,u8 * scsi3addr2)232 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
233 {
234 	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
235 }
236 
pqi_is_logical_device(struct pqi_scsi_dev * device)237 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
238 {
239 	return !device->is_physical_device;
240 }
241 
pqi_is_external_raid_addr(u8 * scsi3addr)242 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
243 {
244 	return scsi3addr[2] != 0;
245 }
246 
pqi_ctrl_offline(struct pqi_ctrl_info * ctrl_info)247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
248 {
249 	return !ctrl_info->controller_online;
250 }
251 
pqi_check_ctrl_health(struct pqi_ctrl_info * ctrl_info)252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
253 {
254 	if (ctrl_info->controller_online)
255 		if (!sis_is_firmware_running(ctrl_info))
256 			pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
257 }
258 
pqi_is_hba_lunid(u8 * scsi3addr)259 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
260 {
261 	return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
262 }
263 
264 #define PQI_DRIVER_SCRATCH_PQI_MODE			0x1
265 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED		0x2
266 
pqi_get_ctrl_mode(struct pqi_ctrl_info * ctrl_info)267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
268 {
269 	return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
270 }
271 
pqi_save_ctrl_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_mode mode)272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
273 	enum pqi_ctrl_mode mode)
274 {
275 	u32 driver_scratch;
276 
277 	driver_scratch = sis_read_driver_scratch(ctrl_info);
278 
279 	if (mode == PQI_MODE)
280 		driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
281 	else
282 		driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
283 
284 	sis_write_driver_scratch(ctrl_info, driver_scratch);
285 }
286 
pqi_is_fw_triage_supported(struct pqi_ctrl_info * ctrl_info)287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
288 {
289 	return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
290 }
291 
pqi_save_fw_triage_setting(struct pqi_ctrl_info * ctrl_info,bool is_supported)292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
293 {
294 	u32 driver_scratch;
295 
296 	driver_scratch = sis_read_driver_scratch(ctrl_info);
297 
298 	if (is_supported)
299 		driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
300 	else
301 		driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
302 
303 	sis_write_driver_scratch(ctrl_info, driver_scratch);
304 }
305 
pqi_ctrl_block_scan(struct pqi_ctrl_info * ctrl_info)306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
307 {
308 	ctrl_info->scan_blocked = true;
309 	mutex_lock(&ctrl_info->scan_mutex);
310 }
311 
pqi_ctrl_unblock_scan(struct pqi_ctrl_info * ctrl_info)312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
313 {
314 	ctrl_info->scan_blocked = false;
315 	mutex_unlock(&ctrl_info->scan_mutex);
316 }
317 
pqi_ctrl_scan_blocked(struct pqi_ctrl_info * ctrl_info)318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
319 {
320 	return ctrl_info->scan_blocked;
321 }
322 
pqi_ctrl_block_device_reset(struct pqi_ctrl_info * ctrl_info)323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
324 {
325 	mutex_lock(&ctrl_info->lun_reset_mutex);
326 }
327 
pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info * ctrl_info)328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
329 {
330 	mutex_unlock(&ctrl_info->lun_reset_mutex);
331 }
332 
pqi_scsi_block_requests(struct pqi_ctrl_info * ctrl_info)333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
334 {
335 	struct Scsi_Host *shost;
336 	unsigned int num_loops;
337 	int msecs_sleep;
338 
339 	shost = ctrl_info->scsi_host;
340 
341 	scsi_block_requests(shost);
342 
343 	num_loops = 0;
344 	msecs_sleep = 20;
345 	while (scsi_host_busy(shost)) {
346 		num_loops++;
347 		if (num_loops == 10)
348 			msecs_sleep = 500;
349 		msleep(msecs_sleep);
350 	}
351 }
352 
pqi_scsi_unblock_requests(struct pqi_ctrl_info * ctrl_info)353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
354 {
355 	scsi_unblock_requests(ctrl_info->scsi_host);
356 }
357 
pqi_ctrl_busy(struct pqi_ctrl_info * ctrl_info)358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
359 {
360 	atomic_inc(&ctrl_info->num_busy_threads);
361 }
362 
pqi_ctrl_unbusy(struct pqi_ctrl_info * ctrl_info)363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
364 {
365 	atomic_dec(&ctrl_info->num_busy_threads);
366 }
367 
pqi_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
369 {
370 	return ctrl_info->block_requests;
371 }
372 
pqi_ctrl_block_requests(struct pqi_ctrl_info * ctrl_info)373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
374 {
375 	ctrl_info->block_requests = true;
376 }
377 
pqi_ctrl_unblock_requests(struct pqi_ctrl_info * ctrl_info)378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
379 {
380 	ctrl_info->block_requests = false;
381 	wake_up_all(&ctrl_info->block_requests_wait);
382 }
383 
pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
385 {
386 	if (!pqi_ctrl_blocked(ctrl_info))
387 		return;
388 
389 	atomic_inc(&ctrl_info->num_blocked_threads);
390 	wait_event(ctrl_info->block_requests_wait,
391 		!pqi_ctrl_blocked(ctrl_info));
392 	atomic_dec(&ctrl_info->num_blocked_threads);
393 }
394 
395 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS		10
396 
pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info * ctrl_info)397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
398 {
399 	unsigned long start_jiffies;
400 	unsigned long warning_timeout;
401 	bool displayed_warning;
402 
403 	displayed_warning = false;
404 	start_jiffies = jiffies;
405 	warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
406 
407 	while (atomic_read(&ctrl_info->num_busy_threads) >
408 		atomic_read(&ctrl_info->num_blocked_threads)) {
409 		if (time_after(jiffies, warning_timeout)) {
410 			dev_warn(&ctrl_info->pci_dev->dev,
411 				"waiting %u seconds for driver activity to quiesce\n",
412 				jiffies_to_msecs(jiffies - start_jiffies) / 1000);
413 			displayed_warning = true;
414 			warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
415 		}
416 		usleep_range(1000, 2000);
417 	}
418 
419 	if (displayed_warning)
420 		dev_warn(&ctrl_info->pci_dev->dev,
421 			"driver activity quiesced after waiting for %u seconds\n",
422 			jiffies_to_msecs(jiffies - start_jiffies) / 1000);
423 }
424 
pqi_device_offline(struct pqi_scsi_dev * device)425 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
426 {
427 	return device->device_offline;
428 }
429 
pqi_ctrl_ofa_start(struct pqi_ctrl_info * ctrl_info)430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
431 {
432 	mutex_lock(&ctrl_info->ofa_mutex);
433 }
434 
pqi_ctrl_ofa_done(struct pqi_ctrl_info * ctrl_info)435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
436 {
437 	mutex_unlock(&ctrl_info->ofa_mutex);
438 }
439 
pqi_wait_until_ofa_finished(struct pqi_ctrl_info * ctrl_info)440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
441 {
442 	mutex_lock(&ctrl_info->ofa_mutex);
443 	mutex_unlock(&ctrl_info->ofa_mutex);
444 }
445 
pqi_ofa_in_progress(struct pqi_ctrl_info * ctrl_info)446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
447 {
448 	return mutex_is_locked(&ctrl_info->ofa_mutex);
449 }
450 
pqi_device_remove_start(struct pqi_scsi_dev * device)451 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
452 {
453 	device->in_remove = true;
454 }
455 
pqi_device_in_remove(struct pqi_scsi_dev * device)456 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
457 {
458 	return device->in_remove;
459 }
460 
pqi_device_reset_start(struct pqi_scsi_dev * device,u8 lun)461 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
462 {
463 	device->in_reset[lun] = true;
464 }
465 
pqi_device_reset_done(struct pqi_scsi_dev * device,u8 lun)466 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
467 {
468 	device->in_reset[lun] = false;
469 }
470 
pqi_device_in_reset(struct pqi_scsi_dev * device,u8 lun)471 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
472 {
473 	return device->in_reset[lun];
474 }
475 
pqi_event_type_to_event_index(unsigned int event_type)476 static inline int pqi_event_type_to_event_index(unsigned int event_type)
477 {
478 	int index;
479 
480 	for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
481 		if (event_type == pqi_supported_event_types[index])
482 			return index;
483 
484 	return -1;
485 }
486 
pqi_is_supported_event(unsigned int event_type)487 static inline bool pqi_is_supported_event(unsigned int event_type)
488 {
489 	return pqi_event_type_to_event_index(event_type) != -1;
490 }
491 
pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info * ctrl_info,unsigned long delay)492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
493 	unsigned long delay)
494 {
495 	if (pqi_ctrl_offline(ctrl_info))
496 		return;
497 
498 	schedule_delayed_work(&ctrl_info->rescan_work, delay);
499 }
500 
pqi_schedule_rescan_worker(struct pqi_ctrl_info * ctrl_info)501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
502 {
503 	pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
504 }
505 
506 #define PQI_RESCAN_WORK_DELAY	(10 * HZ)
507 
pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info * ctrl_info)508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
509 {
510 	pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
511 }
512 
pqi_cancel_rescan_worker(struct pqi_ctrl_info * ctrl_info)513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
514 {
515 	cancel_delayed_work_sync(&ctrl_info->rescan_work);
516 }
517 
pqi_read_heartbeat_counter(struct pqi_ctrl_info * ctrl_info)518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
519 {
520 	if (!ctrl_info->heartbeat_counter)
521 		return 0;
522 
523 	return readl(ctrl_info->heartbeat_counter);
524 }
525 
pqi_read_soft_reset_status(struct pqi_ctrl_info * ctrl_info)526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
527 {
528 	return readb(ctrl_info->soft_reset_status);
529 }
530 
pqi_clear_soft_reset_status(struct pqi_ctrl_info * ctrl_info)531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
532 {
533 	u8 status;
534 
535 	status = pqi_read_soft_reset_status(ctrl_info);
536 	status &= ~PQI_SOFT_RESET_ABORT;
537 	writeb(status, ctrl_info->soft_reset_status);
538 }
539 
pqi_is_io_high_priority(struct pqi_scsi_dev * device,struct scsi_cmnd * scmd)540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
541 {
542 	bool io_high_prio;
543 	int priority_class;
544 
545 	io_high_prio = false;
546 
547 	if (device->ncq_prio_enable) {
548 		priority_class =
549 			IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
550 		if (priority_class == IOPRIO_CLASS_RT) {
551 			/* Set NCQ priority for read/write commands. */
552 			switch (scmd->cmnd[0]) {
553 			case WRITE_16:
554 			case READ_16:
555 			case WRITE_12:
556 			case READ_12:
557 			case WRITE_10:
558 			case READ_10:
559 			case WRITE_6:
560 			case READ_6:
561 				io_high_prio = true;
562 				break;
563 			}
564 		}
565 	}
566 
567 	return io_high_prio;
568 }
569 
pqi_map_single(struct pci_dev * pci_dev,struct pqi_sg_descriptor * sg_descriptor,void * buffer,size_t buffer_length,enum dma_data_direction data_direction)570 static int pqi_map_single(struct pci_dev *pci_dev,
571 	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
572 	size_t buffer_length, enum dma_data_direction data_direction)
573 {
574 	dma_addr_t bus_address;
575 
576 	if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
577 		return 0;
578 
579 	bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
580 		data_direction);
581 	if (dma_mapping_error(&pci_dev->dev, bus_address))
582 		return -ENOMEM;
583 
584 	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
585 	put_unaligned_le32(buffer_length, &sg_descriptor->length);
586 	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
587 
588 	return 0;
589 }
590 
pqi_pci_unmap(struct pci_dev * pci_dev,struct pqi_sg_descriptor * descriptors,int num_descriptors,enum dma_data_direction data_direction)591 static void pqi_pci_unmap(struct pci_dev *pci_dev,
592 	struct pqi_sg_descriptor *descriptors, int num_descriptors,
593 	enum dma_data_direction data_direction)
594 {
595 	int i;
596 
597 	if (data_direction == DMA_NONE)
598 		return;
599 
600 	for (i = 0; i < num_descriptors; i++)
601 		dma_unmap_single(&pci_dev->dev,
602 			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
603 			get_unaligned_le32(&descriptors[i].length),
604 			data_direction);
605 }
606 
pqi_build_raid_path_request(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,enum dma_data_direction * dir)607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
608 	struct pqi_raid_path_request *request, u8 cmd,
609 	u8 *scsi3addr, void *buffer, size_t buffer_length,
610 	u16 vpd_page, enum dma_data_direction *dir)
611 {
612 	u8 *cdb;
613 	size_t cdb_length = buffer_length;
614 
615 	memset(request, 0, sizeof(*request));
616 
617 	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
618 	put_unaligned_le16(offsetof(struct pqi_raid_path_request,
619 		sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
620 		&request->header.iu_length);
621 	put_unaligned_le32(buffer_length, &request->buffer_length);
622 	memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
623 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
624 	request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
625 
626 	cdb = request->cdb;
627 
628 	switch (cmd) {
629 	case INQUIRY:
630 		request->data_direction = SOP_READ_FLAG;
631 		cdb[0] = INQUIRY;
632 		if (vpd_page & VPD_PAGE) {
633 			cdb[1] = 0x1;
634 			cdb[2] = (u8)vpd_page;
635 		}
636 		cdb[4] = (u8)cdb_length;
637 		break;
638 	case CISS_REPORT_LOG:
639 	case CISS_REPORT_PHYS:
640 		request->data_direction = SOP_READ_FLAG;
641 		cdb[0] = cmd;
642 		if (cmd == CISS_REPORT_PHYS) {
643 			if (ctrl_info->rpl_extended_format_4_5_supported)
644 				cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
645 			else
646 				cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
647 		} else {
648 			cdb[1] = ctrl_info->ciss_report_log_flags;
649 		}
650 		put_unaligned_be32(cdb_length, &cdb[6]);
651 		break;
652 	case CISS_GET_RAID_MAP:
653 		request->data_direction = SOP_READ_FLAG;
654 		cdb[0] = CISS_READ;
655 		cdb[1] = CISS_GET_RAID_MAP;
656 		put_unaligned_be32(cdb_length, &cdb[6]);
657 		break;
658 	case SA_FLUSH_CACHE:
659 		request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
660 		request->data_direction = SOP_WRITE_FLAG;
661 		cdb[0] = BMIC_WRITE;
662 		cdb[6] = BMIC_FLUSH_CACHE;
663 		put_unaligned_be16(cdb_length, &cdb[7]);
664 		break;
665 	case BMIC_SENSE_DIAG_OPTIONS:
666 		cdb_length = 0;
667 		fallthrough;
668 	case BMIC_IDENTIFY_CONTROLLER:
669 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
670 	case BMIC_SENSE_SUBSYSTEM_INFORMATION:
671 	case BMIC_SENSE_FEATURE:
672 		request->data_direction = SOP_READ_FLAG;
673 		cdb[0] = BMIC_READ;
674 		cdb[6] = cmd;
675 		put_unaligned_be16(cdb_length, &cdb[7]);
676 		break;
677 	case BMIC_SET_DIAG_OPTIONS:
678 		cdb_length = 0;
679 		fallthrough;
680 	case BMIC_WRITE_HOST_WELLNESS:
681 		request->data_direction = SOP_WRITE_FLAG;
682 		cdb[0] = BMIC_WRITE;
683 		cdb[6] = cmd;
684 		put_unaligned_be16(cdb_length, &cdb[7]);
685 		break;
686 	case BMIC_CSMI_PASSTHRU:
687 		request->data_direction = SOP_BIDIRECTIONAL;
688 		cdb[0] = BMIC_WRITE;
689 		cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
690 		cdb[6] = cmd;
691 		put_unaligned_be16(cdb_length, &cdb[7]);
692 		break;
693 	default:
694 		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
695 		break;
696 	}
697 
698 	switch (request->data_direction) {
699 	case SOP_READ_FLAG:
700 		*dir = DMA_FROM_DEVICE;
701 		break;
702 	case SOP_WRITE_FLAG:
703 		*dir = DMA_TO_DEVICE;
704 		break;
705 	case SOP_NO_DIRECTION_FLAG:
706 		*dir = DMA_NONE;
707 		break;
708 	default:
709 		*dir = DMA_BIDIRECTIONAL;
710 		break;
711 	}
712 
713 	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
714 		buffer, buffer_length, *dir);
715 }
716 
pqi_reinit_io_request(struct pqi_io_request * io_request)717 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
718 {
719 	io_request->scmd = NULL;
720 	io_request->status = 0;
721 	io_request->error_info = NULL;
722 	io_request->raid_bypass = false;
723 }
724 
pqi_alloc_io_request(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
726 {
727 	struct pqi_io_request *io_request;
728 	u16 i;
729 
730 	if (scmd) { /* SML I/O request */
731 		u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
732 
733 		i = blk_mq_unique_tag_to_tag(blk_tag);
734 		io_request = &ctrl_info->io_request_pool[i];
735 		if (atomic_inc_return(&io_request->refcount) > 1) {
736 			atomic_dec(&io_request->refcount);
737 			return NULL;
738 		}
739 	} else { /* IOCTL or driver internal request */
740 		/*
741 		 * benignly racy - may have to wait for an open slot.
742 		 * command slot range is scsi_ml_can_queue -
743 		 *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
744 		 */
745 		i = 0;
746 		while (1) {
747 			io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
748 			if (atomic_inc_return(&io_request->refcount) == 1)
749 				break;
750 			atomic_dec(&io_request->refcount);
751 			i = (i + 1) % PQI_RESERVED_IO_SLOTS;
752 		}
753 	}
754 
755 	if (io_request)
756 		pqi_reinit_io_request(io_request);
757 
758 	return io_request;
759 }
760 
pqi_free_io_request(struct pqi_io_request * io_request)761 static void pqi_free_io_request(struct pqi_io_request *io_request)
762 {
763 	atomic_dec(&io_request->refcount);
764 }
765 
pqi_send_scsi_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,struct pqi_raid_error_info * error_info)766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
767 	u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
768 	struct pqi_raid_error_info *error_info)
769 {
770 	int rc;
771 	struct pqi_raid_path_request request;
772 	enum dma_data_direction dir;
773 
774 	rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
775 		buffer, buffer_length, vpd_page, &dir);
776 	if (rc)
777 		return rc;
778 
779 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
780 
781 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
782 
783 	return rc;
784 }
785 
786 /* helper functions for pqi_send_scsi_raid_request */
787 
pqi_send_ctrl_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
789 	u8 cmd, void *buffer, size_t buffer_length)
790 {
791 	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
792 		buffer, buffer_length, 0, NULL);
793 }
794 
pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
796 	u8 cmd, void *buffer, size_t buffer_length,
797 	struct pqi_raid_error_info *error_info)
798 {
799 	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
800 		buffer, buffer_length, 0, error_info);
801 }
802 
pqi_identify_controller(struct pqi_ctrl_info * ctrl_info,struct bmic_identify_controller * buffer)803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
804 	struct bmic_identify_controller *buffer)
805 {
806 	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
807 		buffer, sizeof(*buffer));
808 }
809 
pqi_sense_subsystem_info(struct pqi_ctrl_info * ctrl_info,struct bmic_sense_subsystem_info * sense_info)810 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
811 	struct bmic_sense_subsystem_info *sense_info)
812 {
813 	return pqi_send_ctrl_raid_request(ctrl_info,
814 		BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
815 		sizeof(*sense_info));
816 }
817 
pqi_scsi_inquiry(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u16 vpd_page,void * buffer,size_t buffer_length)818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
819 	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
820 {
821 	return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
822 		buffer, buffer_length, vpd_page, NULL);
823 }
824 
pqi_identify_physical_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * buffer,size_t buffer_length)825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
826 	struct pqi_scsi_dev *device,
827 	struct bmic_identify_physical_device *buffer, size_t buffer_length)
828 {
829 	int rc;
830 	enum dma_data_direction dir;
831 	u16 bmic_device_index;
832 	struct pqi_raid_path_request request;
833 
834 	rc = pqi_build_raid_path_request(ctrl_info, &request,
835 		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
836 		buffer_length, 0, &dir);
837 	if (rc)
838 		return rc;
839 
840 	bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
841 	request.cdb[2] = (u8)bmic_device_index;
842 	request.cdb[9] = (u8)(bmic_device_index >> 8);
843 
844 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
845 
846 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
847 
848 	return rc;
849 }
850 
pqi_aio_limit_to_bytes(__le16 * limit)851 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
852 {
853 	u32 bytes;
854 
855 	bytes = get_unaligned_le16(limit);
856 	if (bytes == 0)
857 		bytes = ~0;
858 	else
859 		bytes *= 1024;
860 
861 	return bytes;
862 }
863 
864 #pragma pack(1)
865 
866 struct bmic_sense_feature_buffer {
867 	struct bmic_sense_feature_buffer_header header;
868 	struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
869 };
870 
871 #pragma pack()
872 
873 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH	\
874 	offsetofend(struct bmic_sense_feature_buffer, \
875 		aio_subpage.max_write_raid_1_10_3drive)
876 
877 #define MINIMUM_AIO_SUBPAGE_LENGTH	\
878 	(offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
879 		max_write_raid_1_10_3drive) - \
880 		sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
881 
pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info * ctrl_info)882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
883 {
884 	int rc;
885 	enum dma_data_direction dir;
886 	struct pqi_raid_path_request request;
887 	struct bmic_sense_feature_buffer *buffer;
888 
889 	buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
890 	if (!buffer)
891 		return -ENOMEM;
892 
893 	rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
894 		buffer, sizeof(*buffer), 0, &dir);
895 	if (rc)
896 		goto error;
897 
898 	request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
899 	request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
900 
901 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
902 
903 	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
904 
905 	if (rc)
906 		goto error;
907 
908 	if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
909 		buffer->header.subpage_code !=
910 			BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
911 		get_unaligned_le16(&buffer->header.buffer_length) <
912 			MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
913 		buffer->aio_subpage.header.page_code !=
914 			BMIC_SENSE_FEATURE_IO_PAGE ||
915 		buffer->aio_subpage.header.subpage_code !=
916 			BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
917 		get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
918 			MINIMUM_AIO_SUBPAGE_LENGTH) {
919 		goto error;
920 	}
921 
922 	ctrl_info->max_transfer_encrypted_sas_sata =
923 		pqi_aio_limit_to_bytes(
924 			&buffer->aio_subpage.max_transfer_encrypted_sas_sata);
925 
926 	ctrl_info->max_transfer_encrypted_nvme =
927 		pqi_aio_limit_to_bytes(
928 			&buffer->aio_subpage.max_transfer_encrypted_nvme);
929 
930 	ctrl_info->max_write_raid_5_6 =
931 		pqi_aio_limit_to_bytes(
932 			&buffer->aio_subpage.max_write_raid_5_6);
933 
934 	ctrl_info->max_write_raid_1_10_2drive =
935 		pqi_aio_limit_to_bytes(
936 			&buffer->aio_subpage.max_write_raid_1_10_2drive);
937 
938 	ctrl_info->max_write_raid_1_10_3drive =
939 		pqi_aio_limit_to_bytes(
940 			&buffer->aio_subpage.max_write_raid_1_10_3drive);
941 
942 error:
943 	kfree(buffer);
944 
945 	return rc;
946 }
947 
pqi_flush_cache(struct pqi_ctrl_info * ctrl_info,enum bmic_flush_cache_shutdown_event shutdown_event)948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
949 	enum bmic_flush_cache_shutdown_event shutdown_event)
950 {
951 	int rc;
952 	struct bmic_flush_cache *flush_cache;
953 
954 	flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
955 	if (!flush_cache)
956 		return -ENOMEM;
957 
958 	flush_cache->shutdown_event = shutdown_event;
959 
960 	rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
961 		sizeof(*flush_cache));
962 
963 	kfree(flush_cache);
964 
965 	return rc;
966 }
967 
pqi_csmi_smp_passthru(struct pqi_ctrl_info * ctrl_info,struct bmic_csmi_smp_passthru_buffer * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
969 	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
970 	struct pqi_raid_error_info *error_info)
971 {
972 	return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
973 		buffer, buffer_length, error_info);
974 }
975 
976 #define PQI_FETCH_PTRAID_DATA		(1 << 31)
977 
pqi_set_diag_rescan(struct pqi_ctrl_info * ctrl_info)978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
979 {
980 	int rc;
981 	struct bmic_diag_options *diag;
982 
983 	diag = kzalloc(sizeof(*diag), GFP_KERNEL);
984 	if (!diag)
985 		return -ENOMEM;
986 
987 	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
988 		diag, sizeof(*diag));
989 	if (rc)
990 		goto out;
991 
992 	diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
993 
994 	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
995 		sizeof(*diag));
996 
997 out:
998 	kfree(diag);
999 
1000 	return rc;
1001 }
1002 
pqi_write_host_wellness(struct pqi_ctrl_info * ctrl_info,void * buffer,size_t buffer_length)1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1004 	void *buffer, size_t buffer_length)
1005 {
1006 	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1007 		buffer, buffer_length);
1008 }
1009 
1010 #pragma pack(1)
1011 
1012 struct bmic_host_wellness_driver_version {
1013 	u8	start_tag[4];
1014 	u8	driver_version_tag[2];
1015 	__le16	driver_version_length;
1016 	char	driver_version[32];
1017 	u8	dont_write_tag[2];
1018 	u8	end_tag[2];
1019 };
1020 
1021 #pragma pack()
1022 
pqi_write_driver_version_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1023 static int pqi_write_driver_version_to_host_wellness(
1024 	struct pqi_ctrl_info *ctrl_info)
1025 {
1026 	int rc;
1027 	struct bmic_host_wellness_driver_version *buffer;
1028 	size_t buffer_length;
1029 
1030 	buffer_length = sizeof(*buffer);
1031 
1032 	buffer = kmalloc(buffer_length, GFP_KERNEL);
1033 	if (!buffer)
1034 		return -ENOMEM;
1035 
1036 	buffer->start_tag[0] = '<';
1037 	buffer->start_tag[1] = 'H';
1038 	buffer->start_tag[2] = 'W';
1039 	buffer->start_tag[3] = '>';
1040 	buffer->driver_version_tag[0] = 'D';
1041 	buffer->driver_version_tag[1] = 'V';
1042 	put_unaligned_le16(sizeof(buffer->driver_version),
1043 		&buffer->driver_version_length);
1044 	strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1045 		sizeof(buffer->driver_version));
1046 	buffer->dont_write_tag[0] = 'D';
1047 	buffer->dont_write_tag[1] = 'W';
1048 	buffer->end_tag[0] = 'Z';
1049 	buffer->end_tag[1] = 'Z';
1050 
1051 	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1052 
1053 	kfree(buffer);
1054 
1055 	return rc;
1056 }
1057 
1058 #pragma pack(1)
1059 
1060 struct bmic_host_wellness_time {
1061 	u8	start_tag[4];
1062 	u8	time_tag[2];
1063 	__le16	time_length;
1064 	u8	time[8];
1065 	u8	dont_write_tag[2];
1066 	u8	end_tag[2];
1067 };
1068 
1069 #pragma pack()
1070 
pqi_write_current_time_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1071 static int pqi_write_current_time_to_host_wellness(
1072 	struct pqi_ctrl_info *ctrl_info)
1073 {
1074 	int rc;
1075 	struct bmic_host_wellness_time *buffer;
1076 	size_t buffer_length;
1077 	time64_t local_time;
1078 	unsigned int year;
1079 	struct tm tm;
1080 
1081 	buffer_length = sizeof(*buffer);
1082 
1083 	buffer = kmalloc(buffer_length, GFP_KERNEL);
1084 	if (!buffer)
1085 		return -ENOMEM;
1086 
1087 	buffer->start_tag[0] = '<';
1088 	buffer->start_tag[1] = 'H';
1089 	buffer->start_tag[2] = 'W';
1090 	buffer->start_tag[3] = '>';
1091 	buffer->time_tag[0] = 'T';
1092 	buffer->time_tag[1] = 'D';
1093 	put_unaligned_le16(sizeof(buffer->time),
1094 		&buffer->time_length);
1095 
1096 	local_time = ktime_get_real_seconds();
1097 	time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1098 	year = tm.tm_year + 1900;
1099 
1100 	buffer->time[0] = bin2bcd(tm.tm_hour);
1101 	buffer->time[1] = bin2bcd(tm.tm_min);
1102 	buffer->time[2] = bin2bcd(tm.tm_sec);
1103 	buffer->time[3] = 0;
1104 	buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1105 	buffer->time[5] = bin2bcd(tm.tm_mday);
1106 	buffer->time[6] = bin2bcd(year / 100);
1107 	buffer->time[7] = bin2bcd(year % 100);
1108 
1109 	buffer->dont_write_tag[0] = 'D';
1110 	buffer->dont_write_tag[1] = 'W';
1111 	buffer->end_tag[0] = 'Z';
1112 	buffer->end_tag[1] = 'Z';
1113 
1114 	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1115 
1116 	kfree(buffer);
1117 
1118 	return rc;
1119 }
1120 
1121 #define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
1122 
pqi_update_time_worker(struct work_struct * work)1123 static void pqi_update_time_worker(struct work_struct *work)
1124 {
1125 	int rc;
1126 	struct pqi_ctrl_info *ctrl_info;
1127 
1128 	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1129 		update_time_work);
1130 
1131 	rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1132 	if (rc)
1133 		dev_warn(&ctrl_info->pci_dev->dev,
1134 			"error updating time on controller\n");
1135 
1136 	schedule_delayed_work(&ctrl_info->update_time_work,
1137 		PQI_UPDATE_TIME_WORK_INTERVAL);
1138 }
1139 
pqi_schedule_update_time_worker(struct pqi_ctrl_info * ctrl_info)1140 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1141 {
1142 	schedule_delayed_work(&ctrl_info->update_time_work, 0);
1143 }
1144 
pqi_cancel_update_time_worker(struct pqi_ctrl_info * ctrl_info)1145 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1146 {
1147 	cancel_delayed_work_sync(&ctrl_info->update_time_work);
1148 }
1149 
pqi_report_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)1150 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1151 	size_t buffer_length)
1152 {
1153 	return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1154 }
1155 
pqi_report_phys_logical_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void ** buffer)1156 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1157 {
1158 	int rc;
1159 	size_t lun_list_length;
1160 	size_t lun_data_length;
1161 	size_t new_lun_list_length;
1162 	void *lun_data = NULL;
1163 	struct report_lun_header *report_lun_header;
1164 
1165 	report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1166 	if (!report_lun_header) {
1167 		rc = -ENOMEM;
1168 		goto out;
1169 	}
1170 
1171 	rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1172 	if (rc)
1173 		goto out;
1174 
1175 	lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1176 
1177 again:
1178 	lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1179 
1180 	lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1181 	if (!lun_data) {
1182 		rc = -ENOMEM;
1183 		goto out;
1184 	}
1185 
1186 	if (lun_list_length == 0) {
1187 		memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1188 		goto out;
1189 	}
1190 
1191 	rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1192 	if (rc)
1193 		goto out;
1194 
1195 	new_lun_list_length =
1196 		get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1197 
1198 	if (new_lun_list_length > lun_list_length) {
1199 		lun_list_length = new_lun_list_length;
1200 		kfree(lun_data);
1201 		goto again;
1202 	}
1203 
1204 out:
1205 	kfree(report_lun_header);
1206 
1207 	if (rc) {
1208 		kfree(lun_data);
1209 		lun_data = NULL;
1210 	}
1211 
1212 	*buffer = lun_data;
1213 
1214 	return rc;
1215 }
1216 
pqi_report_phys_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1217 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1218 {
1219 	int rc;
1220 	unsigned int i;
1221 	u8 rpl_response_format;
1222 	u32 num_physicals;
1223 	void *rpl_list;
1224 	struct report_lun_header *rpl_header;
1225 	struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1226 	struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1227 
1228 	rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1229 	if (rc)
1230 		return rc;
1231 
1232 	if (ctrl_info->rpl_extended_format_4_5_supported) {
1233 		rpl_header = rpl_list;
1234 		rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1235 		if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1236 			*buffer = rpl_list;
1237 			return 0;
1238 		} else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1239 			dev_err(&ctrl_info->pci_dev->dev,
1240 				"RPL returned unsupported data format %u\n",
1241 				rpl_response_format);
1242 			return -EINVAL;
1243 		} else {
1244 			dev_warn(&ctrl_info->pci_dev->dev,
1245 				"RPL returned extended format 2 instead of 4\n");
1246 		}
1247 	}
1248 
1249 	rpl_8byte_wwid_list = rpl_list;
1250 	num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1251 
1252 	rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1253 						   num_physicals), GFP_KERNEL);
1254 	if (!rpl_16byte_wwid_list)
1255 		return -ENOMEM;
1256 
1257 	put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1258 		&rpl_16byte_wwid_list->header.list_length);
1259 	rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1260 
1261 	for (i = 0; i < num_physicals; i++) {
1262 		memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1263 		memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1264 		memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1265 		rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1266 		rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1267 		rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1268 		rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1269 		rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1270 	}
1271 
1272 	kfree(rpl_8byte_wwid_list);
1273 	*buffer = rpl_16byte_wwid_list;
1274 
1275 	return 0;
1276 }
1277 
pqi_report_logical_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1278 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1279 {
1280 	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1281 }
1282 
pqi_get_device_lists(struct pqi_ctrl_info * ctrl_info,struct report_phys_lun_16byte_wwid_list ** physdev_list,struct report_log_lun_list ** logdev_list)1283 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1284 	struct report_phys_lun_16byte_wwid_list **physdev_list,
1285 	struct report_log_lun_list **logdev_list)
1286 {
1287 	int rc;
1288 	size_t logdev_list_length;
1289 	size_t logdev_data_length;
1290 	struct report_log_lun_list *internal_logdev_list;
1291 	struct report_log_lun_list *logdev_data;
1292 	struct report_lun_header report_lun_header;
1293 
1294 	rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1295 	if (rc)
1296 		dev_err(&ctrl_info->pci_dev->dev,
1297 			"report physical LUNs failed\n");
1298 
1299 	rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1300 	if (rc)
1301 		dev_err(&ctrl_info->pci_dev->dev,
1302 			"report logical LUNs failed\n");
1303 
1304 	/*
1305 	 * Tack the controller itself onto the end of the logical device list
1306 	 * by adding a list entry that is all zeros.
1307 	 */
1308 
1309 	logdev_data = *logdev_list;
1310 
1311 	if (logdev_data) {
1312 		logdev_list_length =
1313 			get_unaligned_be32(&logdev_data->header.list_length);
1314 	} else {
1315 		memset(&report_lun_header, 0, sizeof(report_lun_header));
1316 		logdev_data =
1317 			(struct report_log_lun_list *)&report_lun_header;
1318 		logdev_list_length = 0;
1319 	}
1320 
1321 	logdev_data_length = sizeof(struct report_lun_header) +
1322 		logdev_list_length;
1323 
1324 	internal_logdev_list = kmalloc(logdev_data_length +
1325 		sizeof(struct report_log_lun), GFP_KERNEL);
1326 	if (!internal_logdev_list) {
1327 		kfree(*logdev_list);
1328 		*logdev_list = NULL;
1329 		return -ENOMEM;
1330 	}
1331 
1332 	memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1333 	memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1334 		sizeof(struct report_log_lun));
1335 	put_unaligned_be32(logdev_list_length +
1336 		sizeof(struct report_log_lun),
1337 		&internal_logdev_list->header.list_length);
1338 
1339 	kfree(*logdev_list);
1340 	*logdev_list = internal_logdev_list;
1341 
1342 	return 0;
1343 }
1344 
pqi_set_bus_target_lun(struct pqi_scsi_dev * device,int bus,int target,int lun)1345 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1346 	int bus, int target, int lun)
1347 {
1348 	device->bus = bus;
1349 	device->target = target;
1350 	device->lun = lun;
1351 }
1352 
pqi_assign_bus_target_lun(struct pqi_scsi_dev * device)1353 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1354 {
1355 	u8 *scsi3addr;
1356 	u32 lunid;
1357 	int bus;
1358 	int target;
1359 	int lun;
1360 
1361 	scsi3addr = device->scsi3addr;
1362 	lunid = get_unaligned_le32(scsi3addr);
1363 
1364 	if (pqi_is_hba_lunid(scsi3addr)) {
1365 		/* The specified device is the controller. */
1366 		pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1367 		device->target_lun_valid = true;
1368 		return;
1369 	}
1370 
1371 	if (pqi_is_logical_device(device)) {
1372 		if (device->is_external_raid_device) {
1373 			bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1374 			target = (lunid >> 16) & 0x3fff;
1375 			lun = lunid & 0xff;
1376 		} else {
1377 			bus = PQI_RAID_VOLUME_BUS;
1378 			target = 0;
1379 			lun = lunid & 0x3fff;
1380 		}
1381 		pqi_set_bus_target_lun(device, bus, target, lun);
1382 		device->target_lun_valid = true;
1383 		return;
1384 	}
1385 
1386 	/*
1387 	 * Defer target and LUN assignment for non-controller physical devices
1388 	 * because the SAS transport layer will make these assignments later.
1389 	 */
1390 	pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1391 }
1392 
pqi_get_raid_level(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1393 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1394 	struct pqi_scsi_dev *device)
1395 {
1396 	int rc;
1397 	u8 raid_level;
1398 	u8 *buffer;
1399 
1400 	raid_level = SA_RAID_UNKNOWN;
1401 
1402 	buffer = kmalloc(64, GFP_KERNEL);
1403 	if (buffer) {
1404 		rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1405 			VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1406 		if (rc == 0) {
1407 			raid_level = buffer[8];
1408 			if (raid_level > SA_RAID_MAX)
1409 				raid_level = SA_RAID_UNKNOWN;
1410 		}
1411 		kfree(buffer);
1412 	}
1413 
1414 	device->raid_level = raid_level;
1415 }
1416 
pqi_validate_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct raid_map * raid_map)1417 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1418 	struct pqi_scsi_dev *device, struct raid_map *raid_map)
1419 {
1420 	char *err_msg;
1421 	u32 raid_map_size;
1422 	u32 r5or6_blocks_per_row;
1423 
1424 	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1425 
1426 	if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1427 		err_msg = "RAID map too small";
1428 		goto bad_raid_map;
1429 	}
1430 
1431 	if (device->raid_level == SA_RAID_1) {
1432 		if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1433 			err_msg = "invalid RAID-1 map";
1434 			goto bad_raid_map;
1435 		}
1436 	} else if (device->raid_level == SA_RAID_TRIPLE) {
1437 		if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1438 			err_msg = "invalid RAID-1(Triple) map";
1439 			goto bad_raid_map;
1440 		}
1441 	} else if ((device->raid_level == SA_RAID_5 ||
1442 		device->raid_level == SA_RAID_6) &&
1443 		get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1444 		/* RAID 50/60 */
1445 		r5or6_blocks_per_row =
1446 			get_unaligned_le16(&raid_map->strip_size) *
1447 			get_unaligned_le16(&raid_map->data_disks_per_row);
1448 		if (r5or6_blocks_per_row == 0) {
1449 			err_msg = "invalid RAID-5 or RAID-6 map";
1450 			goto bad_raid_map;
1451 		}
1452 	}
1453 
1454 	return 0;
1455 
1456 bad_raid_map:
1457 	dev_warn(&ctrl_info->pci_dev->dev,
1458 		"logical device %08x%08x %s\n",
1459 		*((u32 *)&device->scsi3addr),
1460 		*((u32 *)&device->scsi3addr[4]), err_msg);
1461 
1462 	return -EINVAL;
1463 }
1464 
pqi_get_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1465 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1466 	struct pqi_scsi_dev *device)
1467 {
1468 	int rc;
1469 	u32 raid_map_size;
1470 	struct raid_map *raid_map;
1471 
1472 	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1473 	if (!raid_map)
1474 		return -ENOMEM;
1475 
1476 	rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1477 		device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1478 	if (rc)
1479 		goto error;
1480 
1481 	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1482 
1483 	if (raid_map_size > sizeof(*raid_map)) {
1484 
1485 		kfree(raid_map);
1486 
1487 		raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1488 		if (!raid_map)
1489 			return -ENOMEM;
1490 
1491 		rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1492 			device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1493 		if (rc)
1494 			goto error;
1495 
1496 		if (get_unaligned_le32(&raid_map->structure_size)
1497 			!= raid_map_size) {
1498 			dev_warn(&ctrl_info->pci_dev->dev,
1499 				"requested %u bytes, received %u bytes\n",
1500 				raid_map_size,
1501 				get_unaligned_le32(&raid_map->structure_size));
1502 			rc = -EINVAL;
1503 			goto error;
1504 		}
1505 	}
1506 
1507 	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1508 	if (rc)
1509 		goto error;
1510 
1511 	device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
1512 	if (!device->raid_io_stats) {
1513 		rc = -ENOMEM;
1514 		goto error;
1515 	}
1516 
1517 	device->raid_map = raid_map;
1518 
1519 	return 0;
1520 
1521 error:
1522 	kfree(raid_map);
1523 
1524 	return rc;
1525 }
1526 
pqi_set_max_transfer_encrypted(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1527 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1528 	struct pqi_scsi_dev *device)
1529 {
1530 	if (!ctrl_info->lv_drive_type_mix_valid) {
1531 		device->max_transfer_encrypted = ~0;
1532 		return;
1533 	}
1534 
1535 	switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1536 	case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1537 	case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1538 	case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1539 	case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1540 	case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1541 	case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1542 	case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1543 		device->max_transfer_encrypted =
1544 			ctrl_info->max_transfer_encrypted_sas_sata;
1545 		break;
1546 	case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1547 		device->max_transfer_encrypted =
1548 			ctrl_info->max_transfer_encrypted_nvme;
1549 		break;
1550 	case LV_DRIVE_TYPE_MIX_UNKNOWN:
1551 	case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1552 	default:
1553 		device->max_transfer_encrypted =
1554 			min(ctrl_info->max_transfer_encrypted_sas_sata,
1555 				ctrl_info->max_transfer_encrypted_nvme);
1556 		break;
1557 	}
1558 }
1559 
pqi_get_raid_bypass_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1560 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1561 	struct pqi_scsi_dev *device)
1562 {
1563 	int rc;
1564 	u8 *buffer;
1565 	u8 bypass_status;
1566 
1567 	buffer = kmalloc(64, GFP_KERNEL);
1568 	if (!buffer)
1569 		return;
1570 
1571 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1572 		VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1573 	if (rc)
1574 		goto out;
1575 
1576 #define RAID_BYPASS_STATUS		4
1577 #define RAID_BYPASS_CONFIGURED		0x1
1578 #define RAID_BYPASS_ENABLED		0x2
1579 
1580 	bypass_status = buffer[RAID_BYPASS_STATUS];
1581 	device->raid_bypass_configured =
1582 		(bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1583 	if (device->raid_bypass_configured &&
1584 		(bypass_status & RAID_BYPASS_ENABLED) &&
1585 		pqi_get_raid_map(ctrl_info, device) == 0) {
1586 		device->raid_bypass_enabled = true;
1587 		if (get_unaligned_le16(&device->raid_map->flags) &
1588 			RAID_MAP_ENCRYPTION_ENABLED)
1589 			pqi_set_max_transfer_encrypted(ctrl_info, device);
1590 	}
1591 
1592 out:
1593 	kfree(buffer);
1594 }
1595 
1596 /*
1597  * Use vendor-specific VPD to determine online/offline status of a volume.
1598  */
1599 
pqi_get_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1600 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1601 	struct pqi_scsi_dev *device)
1602 {
1603 	int rc;
1604 	size_t page_length;
1605 	u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1606 	bool volume_offline = true;
1607 	u32 volume_flags;
1608 	struct ciss_vpd_logical_volume_status *vpd;
1609 
1610 	vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1611 	if (!vpd)
1612 		goto no_buffer;
1613 
1614 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1615 		VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1616 	if (rc)
1617 		goto out;
1618 
1619 	if (vpd->page_code != CISS_VPD_LV_STATUS)
1620 		goto out;
1621 
1622 	page_length = offsetof(struct ciss_vpd_logical_volume_status,
1623 		volume_status) + vpd->page_length;
1624 	if (page_length < sizeof(*vpd))
1625 		goto out;
1626 
1627 	volume_status = vpd->volume_status;
1628 	volume_flags = get_unaligned_be32(&vpd->flags);
1629 	volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1630 
1631 out:
1632 	kfree(vpd);
1633 no_buffer:
1634 	device->volume_status = volume_status;
1635 	device->volume_offline = volume_offline;
1636 }
1637 
1638 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED	0x01
1639 #define PQI_DEVICE_PHY_MAP_SUPPORTED	0x10
1640 #define PQI_DEVICE_ERASE_IN_PROGRESS	0x10
1641 
pqi_get_physical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1642 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1643 	struct pqi_scsi_dev *device,
1644 	struct bmic_identify_physical_device *id_phys)
1645 {
1646 	int rc;
1647 
1648 	memset(id_phys, 0, sizeof(*id_phys));
1649 
1650 	rc = pqi_identify_physical_device(ctrl_info, device,
1651 		id_phys, sizeof(*id_phys));
1652 	if (rc) {
1653 		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1654 		return rc;
1655 	}
1656 
1657 	scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1658 	scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1659 
1660 	memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1661 	memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1662 
1663 	device->box_index = id_phys->box_index;
1664 	device->phys_box_on_bus = id_phys->phys_box_on_bus;
1665 	device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1666 	device->queue_depth =
1667 		get_unaligned_le16(&id_phys->current_queue_depth_limit);
1668 	device->active_path_index = id_phys->active_path_number;
1669 	device->path_map = id_phys->redundant_path_present_map;
1670 	memcpy(&device->box,
1671 		&id_phys->alternate_paths_phys_box_on_port,
1672 		sizeof(device->box));
1673 	memcpy(&device->phys_connector,
1674 		&id_phys->alternate_paths_phys_connector,
1675 		sizeof(device->phys_connector));
1676 	device->bay = id_phys->phys_bay_in_box;
1677 	device->lun_count = id_phys->multi_lun_device_lun_count;
1678 	if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1679 		id_phys->phy_count)
1680 		device->phy_id =
1681 			id_phys->phy_to_phy_map[device->active_path_index];
1682 	else
1683 		device->phy_id = 0xFF;
1684 
1685 	device->ncq_prio_support =
1686 		((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1687 		PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1688 
1689 	device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1690 
1691 	return 0;
1692 }
1693 
pqi_get_logical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1694 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1695 	struct pqi_scsi_dev *device)
1696 {
1697 	int rc;
1698 	u8 *buffer;
1699 
1700 	buffer = kmalloc(64, GFP_KERNEL);
1701 	if (!buffer)
1702 		return -ENOMEM;
1703 
1704 	/* Send an inquiry to the device to see what it is. */
1705 	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1706 	if (rc)
1707 		goto out;
1708 
1709 	scsi_sanitize_inquiry_string(&buffer[8], 8);
1710 	scsi_sanitize_inquiry_string(&buffer[16], 16);
1711 
1712 	device->devtype = buffer[0] & 0x1f;
1713 	memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1714 	memcpy(device->model, &buffer[16], sizeof(device->model));
1715 
1716 	if (device->devtype == TYPE_DISK) {
1717 		if (device->is_external_raid_device) {
1718 			device->raid_level = SA_RAID_UNKNOWN;
1719 			device->volume_status = CISS_LV_OK;
1720 			device->volume_offline = false;
1721 		} else {
1722 			pqi_get_raid_level(ctrl_info, device);
1723 			pqi_get_raid_bypass_status(ctrl_info, device);
1724 			pqi_get_volume_status(ctrl_info, device);
1725 		}
1726 	}
1727 
1728 out:
1729 	kfree(buffer);
1730 
1731 	return rc;
1732 }
1733 
1734 /*
1735  * Prevent adding drive to OS for some corner cases such as a drive
1736  * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1737  * the drive until the sanitize completes, which can take hours,
1738  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1739  * are allowed, but READ/WRITE cause check condition. So the OS
1740  * cannot check/read the partition table.
1741  * Note: devices that have completed sanitize must be re-enabled
1742  *       using the management utility.
1743  */
pqi_keep_device_offline(struct pqi_scsi_dev * device)1744 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1745 {
1746 	return device->erase_in_progress;
1747 }
1748 
pqi_get_device_info_phys_logical(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1749 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1750 	struct pqi_scsi_dev *device,
1751 	struct bmic_identify_physical_device *id_phys)
1752 {
1753 	int rc;
1754 
1755 	if (device->is_expander_smp_device)
1756 		return 0;
1757 
1758 	if (pqi_is_logical_device(device))
1759 		rc = pqi_get_logical_device_info(ctrl_info, device);
1760 	else
1761 		rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1762 
1763 	return rc;
1764 }
1765 
pqi_get_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1766 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1767 	struct pqi_scsi_dev *device,
1768 	struct bmic_identify_physical_device *id_phys)
1769 {
1770 	int rc;
1771 
1772 	rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1773 
1774 	if (rc == 0 && device->lun_count == 0)
1775 		device->lun_count = 1;
1776 
1777 	return rc;
1778 }
1779 
pqi_show_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1780 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1781 	struct pqi_scsi_dev *device)
1782 {
1783 	char *status;
1784 	static const char unknown_state_str[] =
1785 		"Volume is in an unknown state (%u)";
1786 	char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1787 
1788 	switch (device->volume_status) {
1789 	case CISS_LV_OK:
1790 		status = "Volume online";
1791 		break;
1792 	case CISS_LV_FAILED:
1793 		status = "Volume failed";
1794 		break;
1795 	case CISS_LV_NOT_CONFIGURED:
1796 		status = "Volume not configured";
1797 		break;
1798 	case CISS_LV_DEGRADED:
1799 		status = "Volume degraded";
1800 		break;
1801 	case CISS_LV_READY_FOR_RECOVERY:
1802 		status = "Volume ready for recovery operation";
1803 		break;
1804 	case CISS_LV_UNDERGOING_RECOVERY:
1805 		status = "Volume undergoing recovery";
1806 		break;
1807 	case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1808 		status = "Wrong physical drive was replaced";
1809 		break;
1810 	case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1811 		status = "A physical drive not properly connected";
1812 		break;
1813 	case CISS_LV_HARDWARE_OVERHEATING:
1814 		status = "Hardware is overheating";
1815 		break;
1816 	case CISS_LV_HARDWARE_HAS_OVERHEATED:
1817 		status = "Hardware has overheated";
1818 		break;
1819 	case CISS_LV_UNDERGOING_EXPANSION:
1820 		status = "Volume undergoing expansion";
1821 		break;
1822 	case CISS_LV_NOT_AVAILABLE:
1823 		status = "Volume waiting for transforming volume";
1824 		break;
1825 	case CISS_LV_QUEUED_FOR_EXPANSION:
1826 		status = "Volume queued for expansion";
1827 		break;
1828 	case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1829 		status = "Volume disabled due to SCSI ID conflict";
1830 		break;
1831 	case CISS_LV_EJECTED:
1832 		status = "Volume has been ejected";
1833 		break;
1834 	case CISS_LV_UNDERGOING_ERASE:
1835 		status = "Volume undergoing background erase";
1836 		break;
1837 	case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1838 		status = "Volume ready for predictive spare rebuild";
1839 		break;
1840 	case CISS_LV_UNDERGOING_RPI:
1841 		status = "Volume undergoing rapid parity initialization";
1842 		break;
1843 	case CISS_LV_PENDING_RPI:
1844 		status = "Volume queued for rapid parity initialization";
1845 		break;
1846 	case CISS_LV_ENCRYPTED_NO_KEY:
1847 		status = "Encrypted volume inaccessible - key not present";
1848 		break;
1849 	case CISS_LV_UNDERGOING_ENCRYPTION:
1850 		status = "Volume undergoing encryption process";
1851 		break;
1852 	case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1853 		status = "Volume undergoing encryption re-keying process";
1854 		break;
1855 	case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1856 		status = "Volume encrypted but encryption is disabled";
1857 		break;
1858 	case CISS_LV_PENDING_ENCRYPTION:
1859 		status = "Volume pending migration to encrypted state";
1860 		break;
1861 	case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1862 		status = "Volume pending encryption rekeying";
1863 		break;
1864 	case CISS_LV_NOT_SUPPORTED:
1865 		status = "Volume not supported on this controller";
1866 		break;
1867 	case CISS_LV_STATUS_UNAVAILABLE:
1868 		status = "Volume status not available";
1869 		break;
1870 	default:
1871 		snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1872 			unknown_state_str, device->volume_status);
1873 		status = unknown_state_buffer;
1874 		break;
1875 	}
1876 
1877 	dev_info(&ctrl_info->pci_dev->dev,
1878 		"scsi %d:%d:%d:%d %s\n",
1879 		ctrl_info->scsi_host->host_no,
1880 		device->bus, device->target, device->lun, status);
1881 }
1882 
pqi_rescan_worker(struct work_struct * work)1883 static void pqi_rescan_worker(struct work_struct *work)
1884 {
1885 	struct pqi_ctrl_info *ctrl_info;
1886 
1887 	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1888 		rescan_work);
1889 
1890 	pqi_scan_scsi_devices(ctrl_info);
1891 }
1892 
pqi_add_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1893 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1894 	struct pqi_scsi_dev *device)
1895 {
1896 	int rc;
1897 
1898 	if (pqi_is_logical_device(device))
1899 		rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1900 			device->target, device->lun);
1901 	else
1902 		rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1903 
1904 	return rc;
1905 }
1906 
1907 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS	(20 * 1000)
1908 
pqi_remove_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1909 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1910 {
1911 	int rc;
1912 	int lun;
1913 
1914 	for (lun = 0; lun < device->lun_count; lun++) {
1915 		rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1916 			PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1917 		if (rc)
1918 			dev_err(&ctrl_info->pci_dev->dev,
1919 				"scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1920 				ctrl_info->scsi_host->host_no, device->bus,
1921 				device->target, lun,
1922 				atomic_read(&device->scsi_cmds_outstanding[lun]));
1923 	}
1924 
1925 	if (pqi_is_logical_device(device))
1926 		scsi_remove_device(device->sdev);
1927 	else
1928 		pqi_remove_sas_device(device);
1929 
1930 	pqi_device_remove_start(device);
1931 }
1932 
1933 /* Assumes the SCSI device list lock is held. */
1934 
pqi_find_scsi_dev(struct pqi_ctrl_info * ctrl_info,int bus,int target,int lun)1935 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1936 	int bus, int target, int lun)
1937 {
1938 	struct pqi_scsi_dev *device;
1939 
1940 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1941 		if (device->bus == bus && device->target == target && device->lun == lun)
1942 			return device;
1943 
1944 	return NULL;
1945 }
1946 
pqi_device_equal(struct pqi_scsi_dev * dev1,struct pqi_scsi_dev * dev2)1947 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1948 {
1949 	if (dev1->is_physical_device != dev2->is_physical_device)
1950 		return false;
1951 
1952 	if (dev1->is_physical_device)
1953 		return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1954 
1955 	return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1956 }
1957 
1958 enum pqi_find_result {
1959 	DEVICE_NOT_FOUND,
1960 	DEVICE_CHANGED,
1961 	DEVICE_SAME,
1962 };
1963 
pqi_scsi_find_entry(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device_to_find,struct pqi_scsi_dev ** matching_device)1964 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1965 	struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1966 {
1967 	struct pqi_scsi_dev *device;
1968 
1969 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1970 		if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1971 			*matching_device = device;
1972 			if (pqi_device_equal(device_to_find, device)) {
1973 				if (device_to_find->volume_offline)
1974 					return DEVICE_CHANGED;
1975 				return DEVICE_SAME;
1976 			}
1977 			return DEVICE_CHANGED;
1978 		}
1979 	}
1980 
1981 	return DEVICE_NOT_FOUND;
1982 }
1983 
pqi_device_type(struct pqi_scsi_dev * device)1984 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1985 {
1986 	if (device->is_expander_smp_device)
1987 		return "Enclosure SMP    ";
1988 
1989 	return scsi_device_type(device->devtype);
1990 }
1991 
1992 #define PQI_DEV_INFO_BUFFER_LENGTH	128
1993 
pqi_dev_info(struct pqi_ctrl_info * ctrl_info,char * action,struct pqi_scsi_dev * device)1994 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1995 	char *action, struct pqi_scsi_dev *device)
1996 {
1997 	ssize_t count;
1998 	char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1999 
2000 	count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
2001 		"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2002 
2003 	if (device->target_lun_valid)
2004 		count += scnprintf(buffer + count,
2005 			PQI_DEV_INFO_BUFFER_LENGTH - count,
2006 			"%d:%d",
2007 			device->target,
2008 			device->lun);
2009 	else
2010 		count += scnprintf(buffer + count,
2011 			PQI_DEV_INFO_BUFFER_LENGTH - count,
2012 			"-:-");
2013 
2014 	if (pqi_is_logical_device(device))
2015 		count += scnprintf(buffer + count,
2016 			PQI_DEV_INFO_BUFFER_LENGTH - count,
2017 			" %08x%08x",
2018 			*((u32 *)&device->scsi3addr),
2019 			*((u32 *)&device->scsi3addr[4]));
2020 	else
2021 		count += scnprintf(buffer + count,
2022 			PQI_DEV_INFO_BUFFER_LENGTH - count,
2023 			" %016llx%016llx",
2024 			get_unaligned_be64(&device->wwid[0]),
2025 			get_unaligned_be64(&device->wwid[8]));
2026 
2027 	count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2028 		" %s %.8s %.16s ",
2029 		pqi_device_type(device),
2030 		device->vendor,
2031 		device->model);
2032 
2033 	if (pqi_is_logical_device(device)) {
2034 		if (device->devtype == TYPE_DISK)
2035 			count += scnprintf(buffer + count,
2036 				PQI_DEV_INFO_BUFFER_LENGTH - count,
2037 				"SSDSmartPathCap%c En%c %-12s",
2038 				device->raid_bypass_configured ? '+' : '-',
2039 				device->raid_bypass_enabled ? '+' : '-',
2040 				pqi_raid_level_to_string(device->raid_level));
2041 	} else {
2042 		count += scnprintf(buffer + count,
2043 			PQI_DEV_INFO_BUFFER_LENGTH - count,
2044 			"AIO%c", device->aio_enabled ? '+' : '-');
2045 		if (device->devtype == TYPE_DISK ||
2046 			device->devtype == TYPE_ZBC)
2047 			count += scnprintf(buffer + count,
2048 				PQI_DEV_INFO_BUFFER_LENGTH - count,
2049 				" qd=%-6d", device->queue_depth);
2050 	}
2051 
2052 	dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2053 }
2054 
pqi_raid_maps_equal(struct raid_map * raid_map1,struct raid_map * raid_map2)2055 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2056 {
2057 	u32 raid_map1_size;
2058 	u32 raid_map2_size;
2059 
2060 	if (raid_map1 == NULL || raid_map2 == NULL)
2061 		return raid_map1 == raid_map2;
2062 
2063 	raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2064 	raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2065 
2066 	if (raid_map1_size != raid_map2_size)
2067 		return false;
2068 
2069 	return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2070 }
2071 
2072 /* Assumes the SCSI device list lock is held. */
2073 
pqi_scsi_update_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * existing_device,struct pqi_scsi_dev * new_device)2074 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2075 	struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2076 {
2077 	existing_device->device_type = new_device->device_type;
2078 	existing_device->bus = new_device->bus;
2079 	if (new_device->target_lun_valid) {
2080 		existing_device->target = new_device->target;
2081 		existing_device->lun = new_device->lun;
2082 		existing_device->target_lun_valid = true;
2083 	}
2084 
2085 	/* By definition, the scsi3addr and wwid fields are already the same. */
2086 
2087 	existing_device->is_physical_device = new_device->is_physical_device;
2088 	memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2089 	memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2090 	existing_device->sas_address = new_device->sas_address;
2091 	existing_device->queue_depth = new_device->queue_depth;
2092 	existing_device->device_offline = false;
2093 	existing_device->lun_count = new_device->lun_count;
2094 
2095 	if (pqi_is_logical_device(existing_device)) {
2096 		existing_device->is_external_raid_device = new_device->is_external_raid_device;
2097 
2098 		if (existing_device->devtype == TYPE_DISK) {
2099 			existing_device->raid_level = new_device->raid_level;
2100 			existing_device->volume_status = new_device->volume_status;
2101 			memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2102 			if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2103 				kfree(existing_device->raid_map);
2104 				existing_device->raid_map = new_device->raid_map;
2105 				/* To prevent this from being freed later. */
2106 				new_device->raid_map = NULL;
2107 			}
2108 			if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
2109 				existing_device->raid_io_stats = new_device->raid_io_stats;
2110 				new_device->raid_io_stats = NULL;
2111 			}
2112 			existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2113 			existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2114 		}
2115 	} else {
2116 		existing_device->aio_enabled = new_device->aio_enabled;
2117 		existing_device->aio_handle = new_device->aio_handle;
2118 		existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2119 		existing_device->active_path_index = new_device->active_path_index;
2120 		existing_device->phy_id = new_device->phy_id;
2121 		existing_device->path_map = new_device->path_map;
2122 		existing_device->bay = new_device->bay;
2123 		existing_device->box_index = new_device->box_index;
2124 		existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2125 		existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2126 		memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2127 		memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2128 	}
2129 }
2130 
pqi_free_device(struct pqi_scsi_dev * device)2131 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2132 {
2133 	if (device) {
2134 		free_percpu(device->raid_io_stats);
2135 		kfree(device->raid_map);
2136 		kfree(device);
2137 	}
2138 }
2139 
2140 /*
2141  * Called when exposing a new device to the OS fails in order to re-adjust
2142  * our internal SCSI device list to match the SCSI ML's view.
2143  */
2144 
pqi_fixup_botched_add(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)2145 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2146 	struct pqi_scsi_dev *device)
2147 {
2148 	unsigned long flags;
2149 
2150 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2151 	list_del(&device->scsi_device_list_entry);
2152 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2153 
2154 	/* Allow the device structure to be freed later. */
2155 	device->keep_device = false;
2156 }
2157 
pqi_is_device_added(struct pqi_scsi_dev * device)2158 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2159 {
2160 	if (device->is_expander_smp_device)
2161 		return device->sas_port != NULL;
2162 
2163 	return device->sdev != NULL;
2164 }
2165 
pqi_init_device_tmf_work(struct pqi_scsi_dev * device)2166 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2167 {
2168 	unsigned int lun;
2169 	struct pqi_tmf_work *tmf_work;
2170 
2171 	for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2172 		INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2173 }
2174 
pqi_volume_rescan_needed(struct pqi_scsi_dev * device)2175 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
2176 {
2177 	if (pqi_device_in_remove(device))
2178 		return false;
2179 
2180 	if (device->sdev == NULL)
2181 		return false;
2182 
2183 	if (!scsi_device_online(device->sdev))
2184 		return false;
2185 
2186 	return device->rescan;
2187 }
2188 
pqi_update_device_list(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * new_device_list[],unsigned int num_new_devices)2189 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2190 	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2191 {
2192 	int rc;
2193 	unsigned int i;
2194 	unsigned long flags;
2195 	enum pqi_find_result find_result;
2196 	struct pqi_scsi_dev *device;
2197 	struct pqi_scsi_dev *next;
2198 	struct pqi_scsi_dev *matching_device;
2199 	LIST_HEAD(add_list);
2200 	LIST_HEAD(delete_list);
2201 
2202 	/*
2203 	 * The idea here is to do as little work as possible while holding the
2204 	 * spinlock.  That's why we go to great pains to defer anything other
2205 	 * than updating the internal device list until after we release the
2206 	 * spinlock.
2207 	 */
2208 
2209 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2210 
2211 	/* Assume that all devices in the existing list have gone away. */
2212 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2213 		device->device_gone = true;
2214 
2215 	for (i = 0; i < num_new_devices; i++) {
2216 		device = new_device_list[i];
2217 
2218 		find_result = pqi_scsi_find_entry(ctrl_info, device,
2219 			&matching_device);
2220 
2221 		switch (find_result) {
2222 		case DEVICE_SAME:
2223 			/*
2224 			 * The newly found device is already in the existing
2225 			 * device list.
2226 			 */
2227 			device->new_device = false;
2228 			matching_device->device_gone = false;
2229 			pqi_scsi_update_device(ctrl_info, matching_device, device);
2230 			break;
2231 		case DEVICE_NOT_FOUND:
2232 			/*
2233 			 * The newly found device is NOT in the existing device
2234 			 * list.
2235 			 */
2236 			device->new_device = true;
2237 			break;
2238 		case DEVICE_CHANGED:
2239 			/*
2240 			 * The original device has gone away and we need to add
2241 			 * the new device.
2242 			 */
2243 			device->new_device = true;
2244 			break;
2245 		}
2246 	}
2247 
2248 	/* Process all devices that have gone away. */
2249 	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2250 		scsi_device_list_entry) {
2251 		if (device->device_gone) {
2252 			list_del(&device->scsi_device_list_entry);
2253 			list_add_tail(&device->delete_list_entry, &delete_list);
2254 		}
2255 	}
2256 
2257 	/* Process all new devices. */
2258 	for (i = 0; i < num_new_devices; i++) {
2259 		device = new_device_list[i];
2260 		if (!device->new_device)
2261 			continue;
2262 		if (device->volume_offline)
2263 			continue;
2264 		list_add_tail(&device->scsi_device_list_entry,
2265 			&ctrl_info->scsi_device_list);
2266 		list_add_tail(&device->add_list_entry, &add_list);
2267 		/* To prevent this device structure from being freed later. */
2268 		device->keep_device = true;
2269 		pqi_init_device_tmf_work(device);
2270 	}
2271 
2272 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2273 
2274 	/*
2275 	 * If OFA is in progress and there are devices that need to be deleted,
2276 	 * allow any pending reset operations to continue and unblock any SCSI
2277 	 * requests before removal.
2278 	 */
2279 	if (pqi_ofa_in_progress(ctrl_info)) {
2280 		list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2281 			if (pqi_is_device_added(device))
2282 				pqi_device_remove_start(device);
2283 		pqi_ctrl_unblock_device_reset(ctrl_info);
2284 		pqi_scsi_unblock_requests(ctrl_info);
2285 	}
2286 
2287 	/* Remove all devices that have gone away. */
2288 	list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2289 		if (device->volume_offline) {
2290 			pqi_dev_info(ctrl_info, "offline", device);
2291 			pqi_show_volume_status(ctrl_info, device);
2292 		} else {
2293 			pqi_dev_info(ctrl_info, "removed", device);
2294 		}
2295 		if (pqi_is_device_added(device))
2296 			pqi_remove_device(ctrl_info, device);
2297 		list_del(&device->delete_list_entry);
2298 		pqi_free_device(device);
2299 	}
2300 
2301 	/*
2302 	 * Notify the SML of any existing device changes such as;
2303 	 * queue depth, device size.
2304 	 */
2305 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2306 		/*
2307 		 * Check for queue depth change.
2308 		 */
2309 		if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2310 			device->advertised_queue_depth = device->queue_depth;
2311 			scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2312 		}
2313 		spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2314 		/*
2315 		 * Check for changes in the device, such as size.
2316 		 */
2317 		if (pqi_volume_rescan_needed(device)) {
2318 			device->rescan = false;
2319 			spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2320 			scsi_rescan_device(device->sdev);
2321 		} else {
2322 			spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2323 		}
2324 	}
2325 
2326 	/* Expose any new devices. */
2327 	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2328 		if (!pqi_is_device_added(device)) {
2329 			rc = pqi_add_device(ctrl_info, device);
2330 			if (rc == 0) {
2331 				pqi_dev_info(ctrl_info, "added", device);
2332 			} else {
2333 				dev_warn(&ctrl_info->pci_dev->dev,
2334 					"scsi %d:%d:%d:%d addition failed, device not added\n",
2335 					ctrl_info->scsi_host->host_no,
2336 					device->bus, device->target,
2337 					device->lun);
2338 				pqi_fixup_botched_add(ctrl_info, device);
2339 			}
2340 		}
2341 	}
2342 
2343 }
2344 
pqi_is_supported_device(struct pqi_scsi_dev * device)2345 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2346 {
2347 	/*
2348 	 * Only support the HBA controller itself as a RAID
2349 	 * controller.  If it's a RAID controller other than
2350 	 * the HBA itself (an external RAID controller, for
2351 	 * example), we don't support it.
2352 	 */
2353 	if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2354 		!pqi_is_hba_lunid(device->scsi3addr))
2355 			return false;
2356 
2357 	return true;
2358 }
2359 
pqi_skip_device(u8 * scsi3addr)2360 static inline bool pqi_skip_device(u8 *scsi3addr)
2361 {
2362 	/* Ignore all masked devices. */
2363 	if (MASKED_DEVICE(scsi3addr))
2364 		return true;
2365 
2366 	return false;
2367 }
2368 
pqi_mask_device(u8 * scsi3addr)2369 static inline void pqi_mask_device(u8 *scsi3addr)
2370 {
2371 	scsi3addr[3] |= 0xc0;
2372 }
2373 
pqi_expose_device(struct pqi_scsi_dev * device)2374 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2375 {
2376 	return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2377 }
2378 
pqi_update_scsi_devices(struct pqi_ctrl_info * ctrl_info)2379 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2380 {
2381 	int i;
2382 	int rc;
2383 	LIST_HEAD(new_device_list_head);
2384 	struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2385 	struct report_log_lun_list *logdev_list = NULL;
2386 	struct report_phys_lun_16byte_wwid *phys_lun;
2387 	struct report_log_lun *log_lun;
2388 	struct bmic_identify_physical_device *id_phys = NULL;
2389 	u32 num_physicals;
2390 	u32 num_logicals;
2391 	struct pqi_scsi_dev **new_device_list = NULL;
2392 	struct pqi_scsi_dev *device;
2393 	struct pqi_scsi_dev *next;
2394 	unsigned int num_new_devices;
2395 	unsigned int num_valid_devices;
2396 	bool is_physical_device;
2397 	u8 *scsi3addr;
2398 	unsigned int physical_index;
2399 	unsigned int logical_index;
2400 	static char *out_of_memory_msg =
2401 		"failed to allocate memory, device discovery stopped";
2402 
2403 	rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2404 	if (rc)
2405 		goto out;
2406 
2407 	if (physdev_list)
2408 		num_physicals =
2409 			get_unaligned_be32(&physdev_list->header.list_length)
2410 				/ sizeof(physdev_list->lun_entries[0]);
2411 	else
2412 		num_physicals = 0;
2413 
2414 	if (logdev_list)
2415 		num_logicals =
2416 			get_unaligned_be32(&logdev_list->header.list_length)
2417 				/ sizeof(logdev_list->lun_entries[0]);
2418 	else
2419 		num_logicals = 0;
2420 
2421 	if (num_physicals) {
2422 		/*
2423 		 * We need this buffer for calls to pqi_get_physical_disk_info()
2424 		 * below.  We allocate it here instead of inside
2425 		 * pqi_get_physical_disk_info() because it's a fairly large
2426 		 * buffer.
2427 		 */
2428 		id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2429 		if (!id_phys) {
2430 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2431 				out_of_memory_msg);
2432 			rc = -ENOMEM;
2433 			goto out;
2434 		}
2435 
2436 		if (pqi_hide_vsep) {
2437 			for (i = num_physicals - 1; i >= 0; i--) {
2438 				phys_lun = &physdev_list->lun_entries[i];
2439 				if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2440 					pqi_mask_device(phys_lun->lunid);
2441 					break;
2442 				}
2443 			}
2444 		}
2445 	}
2446 
2447 	if (num_logicals &&
2448 		(logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2449 		ctrl_info->lv_drive_type_mix_valid = true;
2450 
2451 	num_new_devices = num_physicals + num_logicals;
2452 
2453 	new_device_list = kmalloc_array(num_new_devices,
2454 					sizeof(*new_device_list),
2455 					GFP_KERNEL);
2456 	if (!new_device_list) {
2457 		dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2458 		rc = -ENOMEM;
2459 		goto out;
2460 	}
2461 
2462 	for (i = 0; i < num_new_devices; i++) {
2463 		device = kzalloc(sizeof(*device), GFP_KERNEL);
2464 		if (!device) {
2465 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2466 				out_of_memory_msg);
2467 			rc = -ENOMEM;
2468 			goto out;
2469 		}
2470 		list_add_tail(&device->new_device_list_entry,
2471 			&new_device_list_head);
2472 	}
2473 
2474 	device = NULL;
2475 	num_valid_devices = 0;
2476 	physical_index = 0;
2477 	logical_index = 0;
2478 
2479 	for (i = 0; i < num_new_devices; i++) {
2480 
2481 		if ((!pqi_expose_ld_first && i < num_physicals) ||
2482 			(pqi_expose_ld_first && i >= num_logicals)) {
2483 			is_physical_device = true;
2484 			phys_lun = &physdev_list->lun_entries[physical_index++];
2485 			log_lun = NULL;
2486 			scsi3addr = phys_lun->lunid;
2487 		} else {
2488 			is_physical_device = false;
2489 			phys_lun = NULL;
2490 			log_lun = &logdev_list->lun_entries[logical_index++];
2491 			scsi3addr = log_lun->lunid;
2492 		}
2493 
2494 		if (is_physical_device && pqi_skip_device(scsi3addr))
2495 			continue;
2496 
2497 		if (device)
2498 			device = list_next_entry(device, new_device_list_entry);
2499 		else
2500 			device = list_first_entry(&new_device_list_head,
2501 				struct pqi_scsi_dev, new_device_list_entry);
2502 
2503 		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2504 		device->is_physical_device = is_physical_device;
2505 		if (is_physical_device) {
2506 			device->device_type = phys_lun->device_type;
2507 			if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2508 				device->is_expander_smp_device = true;
2509 		} else {
2510 			device->is_external_raid_device =
2511 				pqi_is_external_raid_addr(scsi3addr);
2512 		}
2513 
2514 		if (!pqi_is_supported_device(device))
2515 			continue;
2516 
2517 		/* Gather information about the device. */
2518 		rc = pqi_get_device_info(ctrl_info, device, id_phys);
2519 		if (rc == -ENOMEM) {
2520 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2521 				out_of_memory_msg);
2522 			goto out;
2523 		}
2524 		if (rc) {
2525 			if (device->is_physical_device)
2526 				dev_warn(&ctrl_info->pci_dev->dev,
2527 					"obtaining device info failed, skipping physical device %016llx%016llx\n",
2528 					get_unaligned_be64(&phys_lun->wwid[0]),
2529 					get_unaligned_be64(&phys_lun->wwid[8]));
2530 			else
2531 				dev_warn(&ctrl_info->pci_dev->dev,
2532 					"obtaining device info failed, skipping logical device %08x%08x\n",
2533 					*((u32 *)&device->scsi3addr),
2534 					*((u32 *)&device->scsi3addr[4]));
2535 			rc = 0;
2536 			continue;
2537 		}
2538 
2539 		/* Do not present disks that the OS cannot fully probe. */
2540 		if (pqi_keep_device_offline(device))
2541 			continue;
2542 
2543 		pqi_assign_bus_target_lun(device);
2544 
2545 		if (device->is_physical_device) {
2546 			memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2547 			if ((phys_lun->device_flags &
2548 				CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2549 				phys_lun->aio_handle) {
2550 					device->aio_enabled = true;
2551 					device->aio_handle =
2552 						phys_lun->aio_handle;
2553 			}
2554 		} else {
2555 			memcpy(device->volume_id, log_lun->volume_id,
2556 				sizeof(device->volume_id));
2557 		}
2558 
2559 		device->sas_address = get_unaligned_be64(&device->wwid[0]);
2560 
2561 		new_device_list[num_valid_devices++] = device;
2562 	}
2563 
2564 	pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2565 
2566 out:
2567 	list_for_each_entry_safe(device, next, &new_device_list_head,
2568 		new_device_list_entry) {
2569 		if (device->keep_device)
2570 			continue;
2571 		list_del(&device->new_device_list_entry);
2572 		pqi_free_device(device);
2573 	}
2574 
2575 	kfree(new_device_list);
2576 	kfree(physdev_list);
2577 	kfree(logdev_list);
2578 	kfree(id_phys);
2579 
2580 	return rc;
2581 }
2582 
pqi_scan_scsi_devices(struct pqi_ctrl_info * ctrl_info)2583 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2584 {
2585 	int rc;
2586 	int mutex_acquired;
2587 
2588 	if (pqi_ctrl_offline(ctrl_info))
2589 		return -ENXIO;
2590 
2591 	mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2592 
2593 	if (!mutex_acquired) {
2594 		if (pqi_ctrl_scan_blocked(ctrl_info))
2595 			return -EBUSY;
2596 		pqi_schedule_rescan_worker_delayed(ctrl_info);
2597 		return -EINPROGRESS;
2598 	}
2599 
2600 	rc = pqi_update_scsi_devices(ctrl_info);
2601 	if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2602 		pqi_schedule_rescan_worker_delayed(ctrl_info);
2603 
2604 	mutex_unlock(&ctrl_info->scan_mutex);
2605 
2606 	return rc;
2607 }
2608 
pqi_scan_start(struct Scsi_Host * shost)2609 static void pqi_scan_start(struct Scsi_Host *shost)
2610 {
2611 	struct pqi_ctrl_info *ctrl_info;
2612 
2613 	ctrl_info = shost_to_hba(shost);
2614 
2615 	pqi_scan_scsi_devices(ctrl_info);
2616 }
2617 
2618 /* Returns TRUE if scan is finished. */
2619 
pqi_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)2620 static int pqi_scan_finished(struct Scsi_Host *shost,
2621 	unsigned long elapsed_time)
2622 {
2623 	struct pqi_ctrl_info *ctrl_info;
2624 
2625 	ctrl_info = shost_priv(shost);
2626 
2627 	return !mutex_is_locked(&ctrl_info->scan_mutex);
2628 }
2629 
pqi_set_encryption_info(struct pqi_encryption_info * encryption_info,struct raid_map * raid_map,u64 first_block)2630 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2631 	struct raid_map *raid_map, u64 first_block)
2632 {
2633 	u32 volume_blk_size;
2634 
2635 	/*
2636 	 * Set the encryption tweak values based on logical block address.
2637 	 * If the block size is 512, the tweak value is equal to the LBA.
2638 	 * For other block sizes, tweak value is (LBA * block size) / 512.
2639 	 */
2640 	volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2641 	if (volume_blk_size != 512)
2642 		first_block = (first_block * volume_blk_size) / 512;
2643 
2644 	encryption_info->data_encryption_key_index =
2645 		get_unaligned_le16(&raid_map->data_encryption_key_index);
2646 	encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2647 	encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2648 }
2649 
2650 /*
2651  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2652  */
2653 
pqi_aio_raid_level_supported(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd)2654 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2655 	struct pqi_scsi_dev_raid_map_data *rmd)
2656 {
2657 	bool is_supported = true;
2658 
2659 	switch (rmd->raid_level) {
2660 	case SA_RAID_0:
2661 		break;
2662 	case SA_RAID_1:
2663 		if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2664 			rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2665 			is_supported = false;
2666 		break;
2667 	case SA_RAID_TRIPLE:
2668 		if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2669 			rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2670 			is_supported = false;
2671 		break;
2672 	case SA_RAID_5:
2673 		if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2674 			rmd->data_length > ctrl_info->max_write_raid_5_6))
2675 			is_supported = false;
2676 		break;
2677 	case SA_RAID_6:
2678 		if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2679 			rmd->data_length > ctrl_info->max_write_raid_5_6))
2680 			is_supported = false;
2681 		break;
2682 	default:
2683 		is_supported = false;
2684 		break;
2685 	}
2686 
2687 	return is_supported;
2688 }
2689 
2690 #define PQI_RAID_BYPASS_INELIGIBLE	1
2691 
pqi_get_aio_lba_and_block_count(struct scsi_cmnd * scmd,struct pqi_scsi_dev_raid_map_data * rmd)2692 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2693 	struct pqi_scsi_dev_raid_map_data *rmd)
2694 {
2695 	/* Check for valid opcode, get LBA and block count. */
2696 	switch (scmd->cmnd[0]) {
2697 	case WRITE_6:
2698 		rmd->is_write = true;
2699 		fallthrough;
2700 	case READ_6:
2701 		rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2702 			(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2703 		rmd->block_cnt = (u32)scmd->cmnd[4];
2704 		if (rmd->block_cnt == 0)
2705 			rmd->block_cnt = 256;
2706 		break;
2707 	case WRITE_10:
2708 		rmd->is_write = true;
2709 		fallthrough;
2710 	case READ_10:
2711 		rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2712 		rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2713 		break;
2714 	case WRITE_12:
2715 		rmd->is_write = true;
2716 		fallthrough;
2717 	case READ_12:
2718 		rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2719 		rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2720 		break;
2721 	case WRITE_16:
2722 		rmd->is_write = true;
2723 		fallthrough;
2724 	case READ_16:
2725 		rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2726 		rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2727 		break;
2728 	default:
2729 		/* Process via normal I/O path. */
2730 		return PQI_RAID_BYPASS_INELIGIBLE;
2731 	}
2732 
2733 	put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2734 
2735 	return 0;
2736 }
2737 
pci_get_aio_common_raid_map_values(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2738 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2739 	struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2740 {
2741 #if BITS_PER_LONG == 32
2742 	u64 tmpdiv;
2743 #endif
2744 
2745 	rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2746 
2747 	/* Check for invalid block or wraparound. */
2748 	if (rmd->last_block >=
2749 		get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2750 		rmd->last_block < rmd->first_block)
2751 		return PQI_RAID_BYPASS_INELIGIBLE;
2752 
2753 	rmd->data_disks_per_row =
2754 		get_unaligned_le16(&raid_map->data_disks_per_row);
2755 	rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2756 	rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2757 
2758 	/* Calculate stripe information for the request. */
2759 	rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2760 	if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2761 		return PQI_RAID_BYPASS_INELIGIBLE;
2762 #if BITS_PER_LONG == 32
2763 	tmpdiv = rmd->first_block;
2764 	do_div(tmpdiv, rmd->blocks_per_row);
2765 	rmd->first_row = tmpdiv;
2766 	tmpdiv = rmd->last_block;
2767 	do_div(tmpdiv, rmd->blocks_per_row);
2768 	rmd->last_row = tmpdiv;
2769 	rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2770 	rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2771 	tmpdiv = rmd->first_row_offset;
2772 	do_div(tmpdiv, rmd->strip_size);
2773 	rmd->first_column = tmpdiv;
2774 	tmpdiv = rmd->last_row_offset;
2775 	do_div(tmpdiv, rmd->strip_size);
2776 	rmd->last_column = tmpdiv;
2777 #else
2778 	rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2779 	rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2780 	rmd->first_row_offset = (u32)(rmd->first_block -
2781 		(rmd->first_row * rmd->blocks_per_row));
2782 	rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2783 		rmd->blocks_per_row));
2784 	rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2785 	rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2786 #endif
2787 
2788 	/* If this isn't a single row/column then give to the controller. */
2789 	if (rmd->first_row != rmd->last_row ||
2790 		rmd->first_column != rmd->last_column)
2791 		return PQI_RAID_BYPASS_INELIGIBLE;
2792 
2793 	/* Proceeding with driver mapping. */
2794 	rmd->total_disks_per_row = rmd->data_disks_per_row +
2795 		get_unaligned_le16(&raid_map->metadata_disks_per_row);
2796 	rmd->map_row = ((u32)(rmd->first_row >>
2797 		raid_map->parity_rotation_shift)) %
2798 		get_unaligned_le16(&raid_map->row_cnt);
2799 	rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2800 		rmd->first_column;
2801 
2802 	return 0;
2803 }
2804 
pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2805 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2806 	struct raid_map *raid_map)
2807 {
2808 #if BITS_PER_LONG == 32
2809 	u64 tmpdiv;
2810 #endif
2811 
2812 	if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2813 		return PQI_RAID_BYPASS_INELIGIBLE;
2814 
2815 	/* RAID 50/60 */
2816 	/* Verify first and last block are in same RAID group. */
2817 	rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2818 #if BITS_PER_LONG == 32
2819 	tmpdiv = rmd->first_block;
2820 	rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2821 	tmpdiv = rmd->first_group;
2822 	do_div(tmpdiv, rmd->blocks_per_row);
2823 	rmd->first_group = tmpdiv;
2824 	tmpdiv = rmd->last_block;
2825 	rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2826 	tmpdiv = rmd->last_group;
2827 	do_div(tmpdiv, rmd->blocks_per_row);
2828 	rmd->last_group = tmpdiv;
2829 #else
2830 	rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2831 	rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2832 #endif
2833 	if (rmd->first_group != rmd->last_group)
2834 		return PQI_RAID_BYPASS_INELIGIBLE;
2835 
2836 	/* Verify request is in a single row of RAID 5/6. */
2837 #if BITS_PER_LONG == 32
2838 	tmpdiv = rmd->first_block;
2839 	do_div(tmpdiv, rmd->stripesize);
2840 	rmd->first_row = tmpdiv;
2841 	rmd->r5or6_first_row = tmpdiv;
2842 	tmpdiv = rmd->last_block;
2843 	do_div(tmpdiv, rmd->stripesize);
2844 	rmd->r5or6_last_row = tmpdiv;
2845 #else
2846 	rmd->first_row = rmd->r5or6_first_row =
2847 		rmd->first_block / rmd->stripesize;
2848 	rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2849 #endif
2850 	if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2851 		return PQI_RAID_BYPASS_INELIGIBLE;
2852 
2853 	/* Verify request is in a single column. */
2854 #if BITS_PER_LONG == 32
2855 	tmpdiv = rmd->first_block;
2856 	rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2857 	tmpdiv = rmd->first_row_offset;
2858 	rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2859 	rmd->r5or6_first_row_offset = rmd->first_row_offset;
2860 	tmpdiv = rmd->last_block;
2861 	rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2862 	tmpdiv = rmd->r5or6_last_row_offset;
2863 	rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2864 	tmpdiv = rmd->r5or6_first_row_offset;
2865 	do_div(tmpdiv, rmd->strip_size);
2866 	rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2867 	tmpdiv = rmd->r5or6_last_row_offset;
2868 	do_div(tmpdiv, rmd->strip_size);
2869 	rmd->r5or6_last_column = tmpdiv;
2870 #else
2871 	rmd->first_row_offset = rmd->r5or6_first_row_offset =
2872 		(u32)((rmd->first_block % rmd->stripesize) %
2873 		rmd->blocks_per_row);
2874 
2875 	rmd->r5or6_last_row_offset =
2876 		(u32)((rmd->last_block % rmd->stripesize) %
2877 		rmd->blocks_per_row);
2878 
2879 	rmd->first_column =
2880 		rmd->r5or6_first_row_offset / rmd->strip_size;
2881 	rmd->r5or6_first_column = rmd->first_column;
2882 	rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2883 #endif
2884 	if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2885 		return PQI_RAID_BYPASS_INELIGIBLE;
2886 
2887 	/* Request is eligible. */
2888 	rmd->map_row =
2889 		((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2890 		get_unaligned_le16(&raid_map->row_cnt);
2891 
2892 	rmd->map_index = (rmd->first_group *
2893 		(get_unaligned_le16(&raid_map->row_cnt) *
2894 		rmd->total_disks_per_row)) +
2895 		(rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2896 
2897 	if (rmd->is_write) {
2898 		u32 index;
2899 
2900 		/*
2901 		 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2902 		 * parity entries inside the device's raid_map.
2903 		 *
2904 		 * A device's RAID map is bounded by: number of RAID disks squared.
2905 		 *
2906 		 * The devices RAID map size is checked during device
2907 		 * initialization.
2908 		 */
2909 		index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2910 		index *= rmd->total_disks_per_row;
2911 		index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2912 
2913 		rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2914 		if (rmd->raid_level == SA_RAID_6) {
2915 			rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2916 			rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2917 		}
2918 #if BITS_PER_LONG == 32
2919 		tmpdiv = rmd->first_block;
2920 		do_div(tmpdiv, rmd->blocks_per_row);
2921 		rmd->row = tmpdiv;
2922 #else
2923 		rmd->row = rmd->first_block / rmd->blocks_per_row;
2924 #endif
2925 	}
2926 
2927 	return 0;
2928 }
2929 
pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data * rmd)2930 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2931 {
2932 	/* Build the new CDB for the physical disk I/O. */
2933 	if (rmd->disk_block > 0xffffffff) {
2934 		rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2935 		rmd->cdb[1] = 0;
2936 		put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2937 		put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2938 		rmd->cdb[14] = 0;
2939 		rmd->cdb[15] = 0;
2940 		rmd->cdb_length = 16;
2941 	} else {
2942 		rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2943 		rmd->cdb[1] = 0;
2944 		put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2945 		rmd->cdb[6] = 0;
2946 		put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2947 		rmd->cdb[9] = 0;
2948 		rmd->cdb_length = 10;
2949 	}
2950 }
2951 
pqi_calc_aio_r1_nexus(struct raid_map * raid_map,struct pqi_scsi_dev_raid_map_data * rmd)2952 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2953 	struct pqi_scsi_dev_raid_map_data *rmd)
2954 {
2955 	u32 index;
2956 	u32 group;
2957 
2958 	group = rmd->map_index / rmd->data_disks_per_row;
2959 
2960 	index = rmd->map_index - (group * rmd->data_disks_per_row);
2961 	rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2962 	index += rmd->data_disks_per_row;
2963 	rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2964 	if (rmd->layout_map_count > 2) {
2965 		index += rmd->data_disks_per_row;
2966 		rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2967 	}
2968 
2969 	rmd->num_it_nexus_entries = rmd->layout_map_count;
2970 }
2971 
pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)2972 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2973 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2974 	struct pqi_queue_group *queue_group)
2975 {
2976 	int rc;
2977 	struct raid_map *raid_map;
2978 	u32 group;
2979 	u32 next_bypass_group;
2980 	struct pqi_encryption_info *encryption_info_ptr;
2981 	struct pqi_encryption_info encryption_info;
2982 	struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2983 
2984 	rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2985 	if (rc)
2986 		return PQI_RAID_BYPASS_INELIGIBLE;
2987 
2988 	rmd.raid_level = device->raid_level;
2989 
2990 	if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2991 		return PQI_RAID_BYPASS_INELIGIBLE;
2992 
2993 	if (unlikely(rmd.block_cnt == 0))
2994 		return PQI_RAID_BYPASS_INELIGIBLE;
2995 
2996 	raid_map = device->raid_map;
2997 
2998 	rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2999 	if (rc)
3000 		return PQI_RAID_BYPASS_INELIGIBLE;
3001 
3002 	if (device->raid_level == SA_RAID_1 ||
3003 		device->raid_level == SA_RAID_TRIPLE) {
3004 		if (rmd.is_write) {
3005 			pqi_calc_aio_r1_nexus(raid_map, &rmd);
3006 		} else {
3007 			group = device->next_bypass_group[rmd.map_index];
3008 			next_bypass_group = group + 1;
3009 			if (next_bypass_group >= rmd.layout_map_count)
3010 				next_bypass_group = 0;
3011 			device->next_bypass_group[rmd.map_index] = next_bypass_group;
3012 			rmd.map_index += group * rmd.data_disks_per_row;
3013 		}
3014 	} else if ((device->raid_level == SA_RAID_5 ||
3015 		device->raid_level == SA_RAID_6) &&
3016 		(rmd.layout_map_count > 1 || rmd.is_write)) {
3017 		rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3018 		if (rc)
3019 			return PQI_RAID_BYPASS_INELIGIBLE;
3020 	}
3021 
3022 	if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3023 		return PQI_RAID_BYPASS_INELIGIBLE;
3024 
3025 	rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3026 	rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3027 		rmd.first_row * rmd.strip_size +
3028 		(rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3029 	rmd.disk_block_cnt = rmd.block_cnt;
3030 
3031 	/* Handle differing logical/physical block sizes. */
3032 	if (raid_map->phys_blk_shift) {
3033 		rmd.disk_block <<= raid_map->phys_blk_shift;
3034 		rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3035 	}
3036 
3037 	if (unlikely(rmd.disk_block_cnt > 0xffff))
3038 		return PQI_RAID_BYPASS_INELIGIBLE;
3039 
3040 	pqi_set_aio_cdb(&rmd);
3041 
3042 	if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3043 		if (rmd.data_length > device->max_transfer_encrypted)
3044 			return PQI_RAID_BYPASS_INELIGIBLE;
3045 		pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3046 		encryption_info_ptr = &encryption_info;
3047 	} else {
3048 		encryption_info_ptr = NULL;
3049 	}
3050 
3051 	if (rmd.is_write) {
3052 		switch (device->raid_level) {
3053 		case SA_RAID_1:
3054 		case SA_RAID_TRIPLE:
3055 			return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3056 				encryption_info_ptr, device, &rmd);
3057 		case SA_RAID_5:
3058 		case SA_RAID_6:
3059 			return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3060 				encryption_info_ptr, device, &rmd);
3061 		}
3062 	}
3063 
3064 	return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3065 		rmd.cdb, rmd.cdb_length, queue_group,
3066 		encryption_info_ptr, true, false);
3067 }
3068 
3069 #define PQI_STATUS_IDLE		0x0
3070 
3071 #define PQI_CREATE_ADMIN_QUEUE_PAIR	1
3072 #define PQI_DELETE_ADMIN_QUEUE_PAIR	2
3073 
3074 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET		0x0
3075 #define PQI_DEVICE_STATE_STATUS_AVAILABLE		0x1
3076 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY		0x2
3077 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY		0x3
3078 #define PQI_DEVICE_STATE_ERROR				0x4
3079 
3080 #define PQI_MODE_READY_TIMEOUT_SECS		30
3081 #define PQI_MODE_READY_POLL_INTERVAL_MSECS	1
3082 
pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info * ctrl_info)3083 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3084 {
3085 	struct pqi_device_registers __iomem *pqi_registers;
3086 	unsigned long timeout;
3087 	u64 signature;
3088 	u8 status;
3089 
3090 	pqi_registers = ctrl_info->pqi_registers;
3091 	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3092 
3093 	while (1) {
3094 		signature = readq(&pqi_registers->signature);
3095 		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3096 			sizeof(signature)) == 0)
3097 			break;
3098 		if (time_after(jiffies, timeout)) {
3099 			dev_err(&ctrl_info->pci_dev->dev,
3100 				"timed out waiting for PQI signature\n");
3101 			return -ETIMEDOUT;
3102 		}
3103 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3104 	}
3105 
3106 	while (1) {
3107 		status = readb(&pqi_registers->function_and_status_code);
3108 		if (status == PQI_STATUS_IDLE)
3109 			break;
3110 		if (time_after(jiffies, timeout)) {
3111 			dev_err(&ctrl_info->pci_dev->dev,
3112 				"timed out waiting for PQI IDLE\n");
3113 			return -ETIMEDOUT;
3114 		}
3115 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3116 	}
3117 
3118 	while (1) {
3119 		if (readl(&pqi_registers->device_status) ==
3120 			PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3121 			break;
3122 		if (time_after(jiffies, timeout)) {
3123 			dev_err(&ctrl_info->pci_dev->dev,
3124 				"timed out waiting for PQI all registers ready\n");
3125 			return -ETIMEDOUT;
3126 		}
3127 		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3128 	}
3129 
3130 	return 0;
3131 }
3132 
pqi_aio_path_disabled(struct pqi_io_request * io_request)3133 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3134 {
3135 	struct pqi_scsi_dev *device;
3136 
3137 	device = io_request->scmd->device->hostdata;
3138 	device->raid_bypass_enabled = false;
3139 	device->aio_enabled = false;
3140 }
3141 
pqi_take_device_offline(struct scsi_device * sdev,char * path)3142 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3143 {
3144 	struct pqi_ctrl_info *ctrl_info;
3145 	struct pqi_scsi_dev *device;
3146 
3147 	device = sdev->hostdata;
3148 	if (device->device_offline)
3149 		return;
3150 
3151 	device->device_offline = true;
3152 	ctrl_info = shost_to_hba(sdev->host);
3153 	pqi_schedule_rescan_worker(ctrl_info);
3154 	dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3155 		path, ctrl_info->scsi_host->host_no, device->bus,
3156 		device->target, device->lun);
3157 }
3158 
pqi_process_raid_io_error(struct pqi_io_request * io_request)3159 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3160 {
3161 	u8 scsi_status;
3162 	u8 host_byte;
3163 	struct scsi_cmnd *scmd;
3164 	struct pqi_raid_error_info *error_info;
3165 	size_t sense_data_length;
3166 	int residual_count;
3167 	int xfer_count;
3168 	struct scsi_sense_hdr sshdr;
3169 
3170 	scmd = io_request->scmd;
3171 	if (!scmd)
3172 		return;
3173 
3174 	error_info = io_request->error_info;
3175 	scsi_status = error_info->status;
3176 	host_byte = DID_OK;
3177 
3178 	switch (error_info->data_out_result) {
3179 	case PQI_DATA_IN_OUT_GOOD:
3180 		break;
3181 	case PQI_DATA_IN_OUT_UNDERFLOW:
3182 		xfer_count =
3183 			get_unaligned_le32(&error_info->data_out_transferred);
3184 		residual_count = scsi_bufflen(scmd) - xfer_count;
3185 		scsi_set_resid(scmd, residual_count);
3186 		if (xfer_count < scmd->underflow)
3187 			host_byte = DID_SOFT_ERROR;
3188 		break;
3189 	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3190 	case PQI_DATA_IN_OUT_ABORTED:
3191 		host_byte = DID_ABORT;
3192 		break;
3193 	case PQI_DATA_IN_OUT_TIMEOUT:
3194 		host_byte = DID_TIME_OUT;
3195 		break;
3196 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3197 	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3198 	case PQI_DATA_IN_OUT_BUFFER_ERROR:
3199 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3200 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3201 	case PQI_DATA_IN_OUT_ERROR:
3202 	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3203 	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3204 	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3205 	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3206 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3207 	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3208 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3209 	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3210 	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3211 	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3212 	default:
3213 		host_byte = DID_ERROR;
3214 		break;
3215 	}
3216 
3217 	sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3218 	if (sense_data_length == 0)
3219 		sense_data_length =
3220 			get_unaligned_le16(&error_info->response_data_length);
3221 	if (sense_data_length) {
3222 		if (sense_data_length > sizeof(error_info->data))
3223 			sense_data_length = sizeof(error_info->data);
3224 
3225 		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3226 			scsi_normalize_sense(error_info->data,
3227 				sense_data_length, &sshdr) &&
3228 				sshdr.sense_key == HARDWARE_ERROR &&
3229 				sshdr.asc == 0x3e) {
3230 			struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3231 			struct pqi_scsi_dev *device = scmd->device->hostdata;
3232 
3233 			switch (sshdr.ascq) {
3234 			case 0x1: /* LOGICAL UNIT FAILURE */
3235 				if (printk_ratelimit())
3236 					scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3237 						ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3238 				pqi_take_device_offline(scmd->device, "RAID");
3239 				host_byte = DID_NO_CONNECT;
3240 				break;
3241 
3242 			default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3243 				if (printk_ratelimit())
3244 					scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3245 						sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3246 				break;
3247 			}
3248 		}
3249 
3250 		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3251 			sense_data_length = SCSI_SENSE_BUFFERSIZE;
3252 		memcpy(scmd->sense_buffer, error_info->data,
3253 			sense_data_length);
3254 	}
3255 
3256 	if (pqi_cmd_priv(scmd)->this_residual &&
3257 	    !pqi_is_logical_device(scmd->device->hostdata) &&
3258 	    scsi_status == SAM_STAT_CHECK_CONDITION &&
3259 	    host_byte == DID_OK &&
3260 	    sense_data_length &&
3261 	    scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
3262 	    sshdr.sense_key == ILLEGAL_REQUEST &&
3263 	    sshdr.asc == 0x26 &&
3264 	    sshdr.ascq == 0x0) {
3265 		host_byte = DID_NO_CONNECT;
3266 		pqi_take_device_offline(scmd->device, "AIO");
3267 		scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
3268 	}
3269 
3270 	scmd->result = scsi_status;
3271 	set_host_byte(scmd, host_byte);
3272 }
3273 
pqi_process_aio_io_error(struct pqi_io_request * io_request)3274 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3275 {
3276 	u8 scsi_status;
3277 	u8 host_byte;
3278 	struct scsi_cmnd *scmd;
3279 	struct pqi_aio_error_info *error_info;
3280 	size_t sense_data_length;
3281 	int residual_count;
3282 	int xfer_count;
3283 	bool device_offline;
3284 
3285 	scmd = io_request->scmd;
3286 	error_info = io_request->error_info;
3287 	host_byte = DID_OK;
3288 	sense_data_length = 0;
3289 	device_offline = false;
3290 
3291 	switch (error_info->service_response) {
3292 	case PQI_AIO_SERV_RESPONSE_COMPLETE:
3293 		scsi_status = error_info->status;
3294 		break;
3295 	case PQI_AIO_SERV_RESPONSE_FAILURE:
3296 		switch (error_info->status) {
3297 		case PQI_AIO_STATUS_IO_ABORTED:
3298 			scsi_status = SAM_STAT_TASK_ABORTED;
3299 			break;
3300 		case PQI_AIO_STATUS_UNDERRUN:
3301 			scsi_status = SAM_STAT_GOOD;
3302 			residual_count = get_unaligned_le32(
3303 						&error_info->residual_count);
3304 			scsi_set_resid(scmd, residual_count);
3305 			xfer_count = scsi_bufflen(scmd) - residual_count;
3306 			if (xfer_count < scmd->underflow)
3307 				host_byte = DID_SOFT_ERROR;
3308 			break;
3309 		case PQI_AIO_STATUS_OVERRUN:
3310 			scsi_status = SAM_STAT_GOOD;
3311 			break;
3312 		case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3313 			pqi_aio_path_disabled(io_request);
3314 			scsi_status = SAM_STAT_GOOD;
3315 			io_request->status = -EAGAIN;
3316 			break;
3317 		case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3318 		case PQI_AIO_STATUS_INVALID_DEVICE:
3319 			if (!io_request->raid_bypass) {
3320 				device_offline = true;
3321 				pqi_take_device_offline(scmd->device, "AIO");
3322 				host_byte = DID_NO_CONNECT;
3323 			}
3324 			scsi_status = SAM_STAT_CHECK_CONDITION;
3325 			break;
3326 		case PQI_AIO_STATUS_IO_ERROR:
3327 		default:
3328 			scsi_status = SAM_STAT_CHECK_CONDITION;
3329 			break;
3330 		}
3331 		break;
3332 	case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3333 	case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3334 		scsi_status = SAM_STAT_GOOD;
3335 		break;
3336 	case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3337 	case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3338 	default:
3339 		scsi_status = SAM_STAT_CHECK_CONDITION;
3340 		break;
3341 	}
3342 
3343 	if (error_info->data_present) {
3344 		sense_data_length =
3345 			get_unaligned_le16(&error_info->data_length);
3346 		if (sense_data_length) {
3347 			if (sense_data_length > sizeof(error_info->data))
3348 				sense_data_length = sizeof(error_info->data);
3349 			if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3350 				sense_data_length = SCSI_SENSE_BUFFERSIZE;
3351 			memcpy(scmd->sense_buffer, error_info->data,
3352 				sense_data_length);
3353 		}
3354 	}
3355 
3356 	if (device_offline && sense_data_length == 0)
3357 		scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3358 
3359 	scmd->result = scsi_status;
3360 	set_host_byte(scmd, host_byte);
3361 }
3362 
pqi_process_io_error(unsigned int iu_type,struct pqi_io_request * io_request)3363 static void pqi_process_io_error(unsigned int iu_type,
3364 	struct pqi_io_request *io_request)
3365 {
3366 	switch (iu_type) {
3367 	case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3368 		pqi_process_raid_io_error(io_request);
3369 		break;
3370 	case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3371 		pqi_process_aio_io_error(io_request);
3372 		break;
3373 	}
3374 }
3375 
pqi_interpret_task_management_response(struct pqi_ctrl_info * ctrl_info,struct pqi_task_management_response * response)3376 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3377 	struct pqi_task_management_response *response)
3378 {
3379 	int rc;
3380 
3381 	switch (response->response_code) {
3382 	case SOP_TMF_COMPLETE:
3383 	case SOP_TMF_FUNCTION_SUCCEEDED:
3384 		rc = 0;
3385 		break;
3386 	case SOP_TMF_REJECTED:
3387 		rc = -EAGAIN;
3388 		break;
3389 	case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3390 		rc = -ENODEV;
3391 		break;
3392 	default:
3393 		rc = -EIO;
3394 		break;
3395 	}
3396 
3397 	if (rc)
3398 		dev_err(&ctrl_info->pci_dev->dev,
3399 			"Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3400 
3401 	return rc;
3402 }
3403 
pqi_invalid_response(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)3404 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3405 	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3406 {
3407 	pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3408 }
3409 
pqi_process_io_intr(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group)3410 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3411 {
3412 	int num_responses;
3413 	pqi_index_t oq_pi;
3414 	pqi_index_t oq_ci;
3415 	struct pqi_io_request *io_request;
3416 	struct pqi_io_response *response;
3417 	u16 request_id;
3418 
3419 	num_responses = 0;
3420 	oq_ci = queue_group->oq_ci_copy;
3421 
3422 	while (1) {
3423 		oq_pi = readl(queue_group->oq_pi);
3424 		if (oq_pi >= ctrl_info->num_elements_per_oq) {
3425 			pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3426 			dev_err(&ctrl_info->pci_dev->dev,
3427 				"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3428 				oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3429 			return -1;
3430 		}
3431 		if (oq_pi == oq_ci)
3432 			break;
3433 
3434 		num_responses++;
3435 		response = queue_group->oq_element_array +
3436 			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3437 
3438 		request_id = get_unaligned_le16(&response->request_id);
3439 		if (request_id >= ctrl_info->max_io_slots) {
3440 			pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3441 			dev_err(&ctrl_info->pci_dev->dev,
3442 				"request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3443 				request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3444 			return -1;
3445 		}
3446 
3447 		io_request = &ctrl_info->io_request_pool[request_id];
3448 		if (atomic_read(&io_request->refcount) == 0) {
3449 			pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3450 			dev_err(&ctrl_info->pci_dev->dev,
3451 				"request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3452 				request_id, oq_pi, oq_ci);
3453 			return -1;
3454 		}
3455 
3456 		switch (response->header.iu_type) {
3457 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3458 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3459 			if (io_request->scmd)
3460 				io_request->scmd->result = 0;
3461 			fallthrough;
3462 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3463 			break;
3464 		case PQI_RESPONSE_IU_VENDOR_GENERAL:
3465 			io_request->status =
3466 				get_unaligned_le16(
3467 				&((struct pqi_vendor_general_response *)response)->status);
3468 			break;
3469 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3470 			io_request->status = pqi_interpret_task_management_response(ctrl_info,
3471 				(void *)response);
3472 			break;
3473 		case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3474 			pqi_aio_path_disabled(io_request);
3475 			io_request->status = -EAGAIN;
3476 			break;
3477 		case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3478 		case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3479 			io_request->error_info = ctrl_info->error_buffer +
3480 				(get_unaligned_le16(&response->error_index) *
3481 				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3482 			pqi_process_io_error(response->header.iu_type, io_request);
3483 			break;
3484 		default:
3485 			pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3486 			dev_err(&ctrl_info->pci_dev->dev,
3487 				"unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3488 				response->header.iu_type, oq_pi, oq_ci);
3489 			return -1;
3490 		}
3491 
3492 		io_request->io_complete_callback(io_request, io_request->context);
3493 
3494 		/*
3495 		 * Note that the I/O request structure CANNOT BE TOUCHED after
3496 		 * returning from the I/O completion callback!
3497 		 */
3498 		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3499 	}
3500 
3501 	if (num_responses) {
3502 		queue_group->oq_ci_copy = oq_ci;
3503 		writel(oq_ci, queue_group->oq_ci);
3504 	}
3505 
3506 	return num_responses;
3507 }
3508 
pqi_num_elements_free(unsigned int pi,unsigned int ci,unsigned int elements_in_queue)3509 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3510 	unsigned int ci, unsigned int elements_in_queue)
3511 {
3512 	unsigned int num_elements_used;
3513 
3514 	if (pi >= ci)
3515 		num_elements_used = pi - ci;
3516 	else
3517 		num_elements_used = elements_in_queue - ci + pi;
3518 
3519 	return elements_in_queue - num_elements_used - 1;
3520 }
3521 
pqi_send_event_ack(struct pqi_ctrl_info * ctrl_info,struct pqi_event_acknowledge_request * iu,size_t iu_length)3522 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3523 	struct pqi_event_acknowledge_request *iu, size_t iu_length)
3524 {
3525 	pqi_index_t iq_pi;
3526 	pqi_index_t iq_ci;
3527 	unsigned long flags;
3528 	void *next_element;
3529 	struct pqi_queue_group *queue_group;
3530 
3531 	queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3532 	put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3533 
3534 	while (1) {
3535 		spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3536 
3537 		iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3538 		iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3539 
3540 		if (pqi_num_elements_free(iq_pi, iq_ci,
3541 			ctrl_info->num_elements_per_iq))
3542 			break;
3543 
3544 		spin_unlock_irqrestore(
3545 			&queue_group->submit_lock[RAID_PATH], flags);
3546 
3547 		if (pqi_ctrl_offline(ctrl_info))
3548 			return;
3549 	}
3550 
3551 	next_element = queue_group->iq_element_array[RAID_PATH] +
3552 		(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3553 
3554 	memcpy(next_element, iu, iu_length);
3555 
3556 	iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3557 	queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3558 
3559 	/*
3560 	 * This write notifies the controller that an IU is available to be
3561 	 * processed.
3562 	 */
3563 	writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3564 
3565 	spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3566 }
3567 
pqi_acknowledge_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3568 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3569 	struct pqi_event *event)
3570 {
3571 	struct pqi_event_acknowledge_request request;
3572 
3573 	memset(&request, 0, sizeof(request));
3574 
3575 	request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3576 	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3577 		&request.header.iu_length);
3578 	request.event_type = event->event_type;
3579 	put_unaligned_le16(event->event_id, &request.event_id);
3580 	put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3581 
3582 	pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3583 }
3584 
3585 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS		30
3586 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS	1
3587 
pqi_poll_for_soft_reset_status(struct pqi_ctrl_info * ctrl_info)3588 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3589 	struct pqi_ctrl_info *ctrl_info)
3590 {
3591 	u8 status;
3592 	unsigned long timeout;
3593 
3594 	timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3595 
3596 	while (1) {
3597 		status = pqi_read_soft_reset_status(ctrl_info);
3598 		if (status & PQI_SOFT_RESET_INITIATE)
3599 			return RESET_INITIATE_DRIVER;
3600 
3601 		if (status & PQI_SOFT_RESET_ABORT)
3602 			return RESET_ABORT;
3603 
3604 		if (!sis_is_firmware_running(ctrl_info))
3605 			return RESET_NORESPONSE;
3606 
3607 		if (time_after(jiffies, timeout)) {
3608 			dev_warn(&ctrl_info->pci_dev->dev,
3609 				"timed out waiting for soft reset status\n");
3610 			return RESET_TIMEDOUT;
3611 		}
3612 
3613 		ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3614 	}
3615 }
3616 
pqi_process_soft_reset(struct pqi_ctrl_info * ctrl_info)3617 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3618 {
3619 	int rc;
3620 	unsigned int delay_secs;
3621 	enum pqi_soft_reset_status reset_status;
3622 
3623 	if (ctrl_info->soft_reset_handshake_supported)
3624 		reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3625 	else
3626 		reset_status = RESET_INITIATE_FIRMWARE;
3627 
3628 	delay_secs = PQI_POST_RESET_DELAY_SECS;
3629 
3630 	switch (reset_status) {
3631 	case RESET_TIMEDOUT:
3632 		delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3633 		fallthrough;
3634 	case RESET_INITIATE_DRIVER:
3635 		dev_info(&ctrl_info->pci_dev->dev,
3636 				"Online Firmware Activation: resetting controller\n");
3637 		sis_soft_reset(ctrl_info);
3638 		fallthrough;
3639 	case RESET_INITIATE_FIRMWARE:
3640 		ctrl_info->pqi_mode_enabled = false;
3641 		pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3642 		rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3643 		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3644 		pqi_ctrl_ofa_done(ctrl_info);
3645 		dev_info(&ctrl_info->pci_dev->dev,
3646 				"Online Firmware Activation: %s\n",
3647 				rc == 0 ? "SUCCESS" : "FAILED");
3648 		break;
3649 	case RESET_ABORT:
3650 		dev_info(&ctrl_info->pci_dev->dev,
3651 				"Online Firmware Activation ABORTED\n");
3652 		if (ctrl_info->soft_reset_handshake_supported)
3653 			pqi_clear_soft_reset_status(ctrl_info);
3654 		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3655 		pqi_ctrl_ofa_done(ctrl_info);
3656 		pqi_ofa_ctrl_unquiesce(ctrl_info);
3657 		break;
3658 	case RESET_NORESPONSE:
3659 		fallthrough;
3660 	default:
3661 		dev_err(&ctrl_info->pci_dev->dev,
3662 			"unexpected Online Firmware Activation reset status: 0x%x\n",
3663 			reset_status);
3664 		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3665 		pqi_ctrl_ofa_done(ctrl_info);
3666 		pqi_ofa_ctrl_unquiesce(ctrl_info);
3667 		pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3668 		break;
3669 	}
3670 }
3671 
pqi_ofa_memory_alloc_worker(struct work_struct * work)3672 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3673 {
3674 	struct pqi_ctrl_info *ctrl_info;
3675 
3676 	ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3677 
3678 	pqi_ctrl_ofa_start(ctrl_info);
3679 	pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
3680 	pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
3681 }
3682 
pqi_ofa_quiesce_worker(struct work_struct * work)3683 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3684 {
3685 	struct pqi_ctrl_info *ctrl_info;
3686 	struct pqi_event *event;
3687 
3688 	ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3689 
3690 	event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3691 
3692 	pqi_ofa_ctrl_quiesce(ctrl_info);
3693 	pqi_acknowledge_event(ctrl_info, event);
3694 	pqi_process_soft_reset(ctrl_info);
3695 }
3696 
pqi_ofa_process_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3697 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3698 	struct pqi_event *event)
3699 {
3700 	bool ack_event;
3701 
3702 	ack_event = true;
3703 
3704 	switch (event->event_id) {
3705 	case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3706 		dev_info(&ctrl_info->pci_dev->dev,
3707 			"received Online Firmware Activation memory allocation request\n");
3708 		schedule_work(&ctrl_info->ofa_memory_alloc_work);
3709 		break;
3710 	case PQI_EVENT_OFA_QUIESCE:
3711 		dev_info(&ctrl_info->pci_dev->dev,
3712 			"received Online Firmware Activation quiesce request\n");
3713 		schedule_work(&ctrl_info->ofa_quiesce_work);
3714 		ack_event = false;
3715 		break;
3716 	case PQI_EVENT_OFA_CANCELED:
3717 		dev_info(&ctrl_info->pci_dev->dev,
3718 			"received Online Firmware Activation cancel request: reason: %u\n",
3719 			ctrl_info->ofa_cancel_reason);
3720 		pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3721 		pqi_ctrl_ofa_done(ctrl_info);
3722 		break;
3723 	default:
3724 		dev_err(&ctrl_info->pci_dev->dev,
3725 			"received unknown Online Firmware Activation request: event ID: %u\n",
3726 			event->event_id);
3727 		break;
3728 	}
3729 
3730 	return ack_event;
3731 }
3732 
pqi_mark_volumes_for_rescan(struct pqi_ctrl_info * ctrl_info)3733 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
3734 {
3735 	unsigned long flags;
3736 	struct pqi_scsi_dev *device;
3737 
3738 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3739 
3740 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
3741 		if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
3742 			device->rescan = true;
3743 	}
3744 
3745 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3746 }
3747 
pqi_disable_raid_bypass(struct pqi_ctrl_info * ctrl_info)3748 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3749 {
3750 	unsigned long flags;
3751 	struct pqi_scsi_dev *device;
3752 
3753 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3754 
3755 	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3756 		if (device->raid_bypass_enabled)
3757 			device->raid_bypass_enabled = false;
3758 
3759 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3760 }
3761 
pqi_event_worker(struct work_struct * work)3762 static void pqi_event_worker(struct work_struct *work)
3763 {
3764 	unsigned int i;
3765 	bool rescan_needed;
3766 	struct pqi_ctrl_info *ctrl_info;
3767 	struct pqi_event *event;
3768 	bool ack_event;
3769 
3770 	ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3771 
3772 	pqi_ctrl_busy(ctrl_info);
3773 	pqi_wait_if_ctrl_blocked(ctrl_info);
3774 	if (pqi_ctrl_offline(ctrl_info))
3775 		goto out;
3776 
3777 	rescan_needed = false;
3778 	event = ctrl_info->events;
3779 	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3780 		if (event->pending) {
3781 			event->pending = false;
3782 			if (event->event_type == PQI_EVENT_TYPE_OFA) {
3783 				ack_event = pqi_ofa_process_event(ctrl_info, event);
3784 			} else {
3785 				ack_event = true;
3786 				rescan_needed = true;
3787 				if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3788 					pqi_mark_volumes_for_rescan(ctrl_info);
3789 				else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3790 					pqi_disable_raid_bypass(ctrl_info);
3791 			}
3792 			if (ack_event)
3793 				pqi_acknowledge_event(ctrl_info, event);
3794 		}
3795 		event++;
3796 	}
3797 
3798 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY		(5 * HZ)
3799 
3800 	if (rescan_needed)
3801 		pqi_schedule_rescan_worker_with_delay(ctrl_info,
3802 			PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3803 
3804 out:
3805 	pqi_ctrl_unbusy(ctrl_info);
3806 }
3807 
3808 #define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * HZ)
3809 
pqi_heartbeat_timer_handler(struct timer_list * t)3810 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3811 {
3812 	int num_interrupts;
3813 	u32 heartbeat_count;
3814 	struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3815 
3816 	pqi_check_ctrl_health(ctrl_info);
3817 	if (pqi_ctrl_offline(ctrl_info))
3818 		return;
3819 
3820 	num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3821 	heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3822 
3823 	if (num_interrupts == ctrl_info->previous_num_interrupts) {
3824 		if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3825 			dev_err(&ctrl_info->pci_dev->dev,
3826 				"no heartbeat detected - last heartbeat count: %u\n",
3827 				heartbeat_count);
3828 			pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3829 			return;
3830 		}
3831 	} else {
3832 		ctrl_info->previous_num_interrupts = num_interrupts;
3833 	}
3834 
3835 	ctrl_info->previous_heartbeat_count = heartbeat_count;
3836 	mod_timer(&ctrl_info->heartbeat_timer,
3837 		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3838 }
3839 
pqi_start_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3840 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3841 {
3842 	if (!ctrl_info->heartbeat_counter)
3843 		return;
3844 
3845 	ctrl_info->previous_num_interrupts =
3846 		atomic_read(&ctrl_info->num_interrupts);
3847 	ctrl_info->previous_heartbeat_count =
3848 		pqi_read_heartbeat_counter(ctrl_info);
3849 
3850 	ctrl_info->heartbeat_timer.expires =
3851 		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3852 	add_timer(&ctrl_info->heartbeat_timer);
3853 }
3854 
pqi_stop_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3855 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3856 {
3857 	timer_delete_sync(&ctrl_info->heartbeat_timer);
3858 }
3859 
pqi_ofa_capture_event_payload(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event,struct pqi_event_response * response)3860 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3861 	struct pqi_event *event, struct pqi_event_response *response)
3862 {
3863 	switch (event->event_id) {
3864 	case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3865 		ctrl_info->ofa_bytes_requested =
3866 			get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3867 		break;
3868 	case PQI_EVENT_OFA_CANCELED:
3869 		ctrl_info->ofa_cancel_reason =
3870 			get_unaligned_le16(&response->data.ofa_cancelled.reason);
3871 		break;
3872 	}
3873 }
3874 
pqi_process_event_intr(struct pqi_ctrl_info * ctrl_info)3875 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3876 {
3877 	int num_events;
3878 	pqi_index_t oq_pi;
3879 	pqi_index_t oq_ci;
3880 	struct pqi_event_queue *event_queue;
3881 	struct pqi_event_response *response;
3882 	struct pqi_event *event;
3883 	int event_index;
3884 
3885 	event_queue = &ctrl_info->event_queue;
3886 	num_events = 0;
3887 	oq_ci = event_queue->oq_ci_copy;
3888 
3889 	while (1) {
3890 		oq_pi = readl(event_queue->oq_pi);
3891 		if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3892 			pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3893 			dev_err(&ctrl_info->pci_dev->dev,
3894 				"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3895 				oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3896 			return -1;
3897 		}
3898 
3899 		if (oq_pi == oq_ci)
3900 			break;
3901 
3902 		num_events++;
3903 		response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3904 
3905 		event_index = pqi_event_type_to_event_index(response->event_type);
3906 
3907 		if (event_index >= 0 && response->request_acknowledge) {
3908 			event = &ctrl_info->events[event_index];
3909 			event->pending = true;
3910 			event->event_type = response->event_type;
3911 			event->event_id = get_unaligned_le16(&response->event_id);
3912 			event->additional_event_id =
3913 				get_unaligned_le32(&response->additional_event_id);
3914 			if (event->event_type == PQI_EVENT_TYPE_OFA)
3915 				pqi_ofa_capture_event_payload(ctrl_info, event, response);
3916 		}
3917 
3918 		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3919 	}
3920 
3921 	if (num_events) {
3922 		event_queue->oq_ci_copy = oq_ci;
3923 		writel(oq_ci, event_queue->oq_ci);
3924 		schedule_work(&ctrl_info->event_work);
3925 	}
3926 
3927 	return num_events;
3928 }
3929 
3930 #define PQI_LEGACY_INTX_MASK	0x1
3931 
pqi_configure_legacy_intx(struct pqi_ctrl_info * ctrl_info,bool enable_intx)3932 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3933 {
3934 	u32 intx_mask;
3935 	struct pqi_device_registers __iomem *pqi_registers;
3936 	volatile void __iomem *register_addr;
3937 
3938 	pqi_registers = ctrl_info->pqi_registers;
3939 
3940 	if (enable_intx)
3941 		register_addr = &pqi_registers->legacy_intx_mask_clear;
3942 	else
3943 		register_addr = &pqi_registers->legacy_intx_mask_set;
3944 
3945 	intx_mask = readl(register_addr);
3946 	intx_mask |= PQI_LEGACY_INTX_MASK;
3947 	writel(intx_mask, register_addr);
3948 }
3949 
pqi_change_irq_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_irq_mode new_mode)3950 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3951 	enum pqi_irq_mode new_mode)
3952 {
3953 	switch (ctrl_info->irq_mode) {
3954 	case IRQ_MODE_MSIX:
3955 		switch (new_mode) {
3956 		case IRQ_MODE_MSIX:
3957 			break;
3958 		case IRQ_MODE_INTX:
3959 			pqi_configure_legacy_intx(ctrl_info, true);
3960 			sis_enable_intx(ctrl_info);
3961 			break;
3962 		case IRQ_MODE_NONE:
3963 			break;
3964 		}
3965 		break;
3966 	case IRQ_MODE_INTX:
3967 		switch (new_mode) {
3968 		case IRQ_MODE_MSIX:
3969 			pqi_configure_legacy_intx(ctrl_info, false);
3970 			sis_enable_msix(ctrl_info);
3971 			break;
3972 		case IRQ_MODE_INTX:
3973 			break;
3974 		case IRQ_MODE_NONE:
3975 			pqi_configure_legacy_intx(ctrl_info, false);
3976 			break;
3977 		}
3978 		break;
3979 	case IRQ_MODE_NONE:
3980 		switch (new_mode) {
3981 		case IRQ_MODE_MSIX:
3982 			sis_enable_msix(ctrl_info);
3983 			break;
3984 		case IRQ_MODE_INTX:
3985 			pqi_configure_legacy_intx(ctrl_info, true);
3986 			sis_enable_intx(ctrl_info);
3987 			break;
3988 		case IRQ_MODE_NONE:
3989 			break;
3990 		}
3991 		break;
3992 	}
3993 
3994 	ctrl_info->irq_mode = new_mode;
3995 }
3996 
3997 #define PQI_LEGACY_INTX_PENDING		0x1
3998 
pqi_is_valid_irq(struct pqi_ctrl_info * ctrl_info)3999 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
4000 {
4001 	bool valid_irq;
4002 	u32 intx_status;
4003 
4004 	switch (ctrl_info->irq_mode) {
4005 	case IRQ_MODE_MSIX:
4006 		valid_irq = true;
4007 		break;
4008 	case IRQ_MODE_INTX:
4009 		intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
4010 		if (intx_status & PQI_LEGACY_INTX_PENDING)
4011 			valid_irq = true;
4012 		else
4013 			valid_irq = false;
4014 		break;
4015 	case IRQ_MODE_NONE:
4016 	default:
4017 		valid_irq = false;
4018 		break;
4019 	}
4020 
4021 	return valid_irq;
4022 }
4023 
pqi_irq_handler(int irq,void * data)4024 static irqreturn_t pqi_irq_handler(int irq, void *data)
4025 {
4026 	struct pqi_ctrl_info *ctrl_info;
4027 	struct pqi_queue_group *queue_group;
4028 	int num_io_responses_handled;
4029 	int num_events_handled;
4030 
4031 	queue_group = data;
4032 	ctrl_info = queue_group->ctrl_info;
4033 
4034 	if (!pqi_is_valid_irq(ctrl_info))
4035 		return IRQ_NONE;
4036 
4037 	num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
4038 	if (num_io_responses_handled < 0)
4039 		goto out;
4040 
4041 	if (irq == ctrl_info->event_irq) {
4042 		num_events_handled = pqi_process_event_intr(ctrl_info);
4043 		if (num_events_handled < 0)
4044 			goto out;
4045 	} else {
4046 		num_events_handled = 0;
4047 	}
4048 
4049 	if (num_io_responses_handled + num_events_handled > 0)
4050 		atomic_inc(&ctrl_info->num_interrupts);
4051 
4052 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4053 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4054 
4055 out:
4056 	return IRQ_HANDLED;
4057 }
4058 
pqi_request_irqs(struct pqi_ctrl_info * ctrl_info)4059 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4060 {
4061 	struct pci_dev *pci_dev = ctrl_info->pci_dev;
4062 	int i;
4063 	int rc;
4064 
4065 	ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4066 
4067 	for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4068 		rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4069 			DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4070 		if (rc) {
4071 			dev_err(&pci_dev->dev,
4072 				"irq %u init failed with error %d\n",
4073 				pci_irq_vector(pci_dev, i), rc);
4074 			return rc;
4075 		}
4076 		ctrl_info->num_msix_vectors_initialized++;
4077 	}
4078 
4079 	return 0;
4080 }
4081 
pqi_free_irqs(struct pqi_ctrl_info * ctrl_info)4082 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4083 {
4084 	int i;
4085 
4086 	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4087 		free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4088 			&ctrl_info->queue_groups[i]);
4089 
4090 	ctrl_info->num_msix_vectors_initialized = 0;
4091 }
4092 
pqi_enable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4093 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4094 {
4095 	int num_vectors_enabled;
4096 	unsigned int flags = PCI_IRQ_MSIX;
4097 
4098 	if (!pqi_disable_managed_interrupts)
4099 		flags |= PCI_IRQ_AFFINITY;
4100 
4101 	num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4102 			PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4103 			flags);
4104 	if (num_vectors_enabled < 0) {
4105 		dev_err(&ctrl_info->pci_dev->dev,
4106 			"MSI-X init failed with error %d\n",
4107 			num_vectors_enabled);
4108 		return num_vectors_enabled;
4109 	}
4110 
4111 	ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4112 	ctrl_info->irq_mode = IRQ_MODE_MSIX;
4113 	return 0;
4114 }
4115 
pqi_disable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4116 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4117 {
4118 	if (ctrl_info->num_msix_vectors_enabled) {
4119 		pci_free_irq_vectors(ctrl_info->pci_dev);
4120 		ctrl_info->num_msix_vectors_enabled = 0;
4121 	}
4122 }
4123 
pqi_alloc_operational_queues(struct pqi_ctrl_info * ctrl_info)4124 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4125 {
4126 	unsigned int i;
4127 	size_t alloc_length;
4128 	size_t element_array_length_per_iq;
4129 	size_t element_array_length_per_oq;
4130 	void *element_array;
4131 	void __iomem *next_queue_index;
4132 	void *aligned_pointer;
4133 	unsigned int num_inbound_queues;
4134 	unsigned int num_outbound_queues;
4135 	unsigned int num_queue_indexes;
4136 	struct pqi_queue_group *queue_group;
4137 
4138 	element_array_length_per_iq =
4139 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4140 		ctrl_info->num_elements_per_iq;
4141 	element_array_length_per_oq =
4142 		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4143 		ctrl_info->num_elements_per_oq;
4144 	num_inbound_queues = ctrl_info->num_queue_groups * 2;
4145 	num_outbound_queues = ctrl_info->num_queue_groups;
4146 	num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4147 
4148 	aligned_pointer = NULL;
4149 
4150 	for (i = 0; i < num_inbound_queues; i++) {
4151 		aligned_pointer = PTR_ALIGN(aligned_pointer,
4152 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4153 		aligned_pointer += element_array_length_per_iq;
4154 	}
4155 
4156 	for (i = 0; i < num_outbound_queues; i++) {
4157 		aligned_pointer = PTR_ALIGN(aligned_pointer,
4158 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4159 		aligned_pointer += element_array_length_per_oq;
4160 	}
4161 
4162 	aligned_pointer = PTR_ALIGN(aligned_pointer,
4163 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4164 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4165 		PQI_EVENT_OQ_ELEMENT_LENGTH;
4166 
4167 	for (i = 0; i < num_queue_indexes; i++) {
4168 		aligned_pointer = PTR_ALIGN(aligned_pointer,
4169 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
4170 		aligned_pointer += sizeof(pqi_index_t);
4171 	}
4172 
4173 	alloc_length = (size_t)aligned_pointer +
4174 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4175 
4176 	alloc_length += PQI_EXTRA_SGL_MEMORY;
4177 
4178 	ctrl_info->queue_memory_base =
4179 		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4180 				   &ctrl_info->queue_memory_base_dma_handle,
4181 				   GFP_KERNEL);
4182 
4183 	if (!ctrl_info->queue_memory_base)
4184 		return -ENOMEM;
4185 
4186 	ctrl_info->queue_memory_length = alloc_length;
4187 
4188 	element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4189 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4190 
4191 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4192 		queue_group = &ctrl_info->queue_groups[i];
4193 		queue_group->iq_element_array[RAID_PATH] = element_array;
4194 		queue_group->iq_element_array_bus_addr[RAID_PATH] =
4195 			ctrl_info->queue_memory_base_dma_handle +
4196 				(element_array - ctrl_info->queue_memory_base);
4197 		element_array += element_array_length_per_iq;
4198 		element_array = PTR_ALIGN(element_array,
4199 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4200 		queue_group->iq_element_array[AIO_PATH] = element_array;
4201 		queue_group->iq_element_array_bus_addr[AIO_PATH] =
4202 			ctrl_info->queue_memory_base_dma_handle +
4203 			(element_array - ctrl_info->queue_memory_base);
4204 		element_array += element_array_length_per_iq;
4205 		element_array = PTR_ALIGN(element_array,
4206 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4207 	}
4208 
4209 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4210 		queue_group = &ctrl_info->queue_groups[i];
4211 		queue_group->oq_element_array = element_array;
4212 		queue_group->oq_element_array_bus_addr =
4213 			ctrl_info->queue_memory_base_dma_handle +
4214 			(element_array - ctrl_info->queue_memory_base);
4215 		element_array += element_array_length_per_oq;
4216 		element_array = PTR_ALIGN(element_array,
4217 			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4218 	}
4219 
4220 	ctrl_info->event_queue.oq_element_array = element_array;
4221 	ctrl_info->event_queue.oq_element_array_bus_addr =
4222 		ctrl_info->queue_memory_base_dma_handle +
4223 		(element_array - ctrl_info->queue_memory_base);
4224 	element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4225 		PQI_EVENT_OQ_ELEMENT_LENGTH;
4226 
4227 	next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4228 		PQI_OPERATIONAL_INDEX_ALIGNMENT);
4229 
4230 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4231 		queue_group = &ctrl_info->queue_groups[i];
4232 		queue_group->iq_ci[RAID_PATH] = next_queue_index;
4233 		queue_group->iq_ci_bus_addr[RAID_PATH] =
4234 			ctrl_info->queue_memory_base_dma_handle +
4235 			(next_queue_index -
4236 			(void __iomem *)ctrl_info->queue_memory_base);
4237 		next_queue_index += sizeof(pqi_index_t);
4238 		next_queue_index = PTR_ALIGN(next_queue_index,
4239 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
4240 		queue_group->iq_ci[AIO_PATH] = next_queue_index;
4241 		queue_group->iq_ci_bus_addr[AIO_PATH] =
4242 			ctrl_info->queue_memory_base_dma_handle +
4243 			(next_queue_index -
4244 			(void __iomem *)ctrl_info->queue_memory_base);
4245 		next_queue_index += sizeof(pqi_index_t);
4246 		next_queue_index = PTR_ALIGN(next_queue_index,
4247 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
4248 		queue_group->oq_pi = next_queue_index;
4249 		queue_group->oq_pi_bus_addr =
4250 			ctrl_info->queue_memory_base_dma_handle +
4251 			(next_queue_index -
4252 			(void __iomem *)ctrl_info->queue_memory_base);
4253 		next_queue_index += sizeof(pqi_index_t);
4254 		next_queue_index = PTR_ALIGN(next_queue_index,
4255 			PQI_OPERATIONAL_INDEX_ALIGNMENT);
4256 	}
4257 
4258 	ctrl_info->event_queue.oq_pi = next_queue_index;
4259 	ctrl_info->event_queue.oq_pi_bus_addr =
4260 		ctrl_info->queue_memory_base_dma_handle +
4261 		(next_queue_index -
4262 		(void __iomem *)ctrl_info->queue_memory_base);
4263 
4264 	return 0;
4265 }
4266 
pqi_init_operational_queues(struct pqi_ctrl_info * ctrl_info)4267 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4268 {
4269 	unsigned int i;
4270 	u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4271 	u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4272 
4273 	/*
4274 	 * Initialize the backpointers to the controller structure in
4275 	 * each operational queue group structure.
4276 	 */
4277 	for (i = 0; i < ctrl_info->num_queue_groups; i++)
4278 		ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4279 
4280 	/*
4281 	 * Assign IDs to all operational queues.  Note that the IDs
4282 	 * assigned to operational IQs are independent of the IDs
4283 	 * assigned to operational OQs.
4284 	 */
4285 	ctrl_info->event_queue.oq_id = next_oq_id++;
4286 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4287 		ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4288 		ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4289 		ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4290 	}
4291 
4292 	/*
4293 	 * Assign MSI-X table entry indexes to all queues.  Note that the
4294 	 * interrupt for the event queue is shared with the first queue group.
4295 	 */
4296 	ctrl_info->event_queue.int_msg_num = 0;
4297 	for (i = 0; i < ctrl_info->num_queue_groups; i++)
4298 		ctrl_info->queue_groups[i].int_msg_num = i;
4299 
4300 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4301 		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4302 		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4303 		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4304 		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4305 	}
4306 }
4307 
pqi_alloc_admin_queues(struct pqi_ctrl_info * ctrl_info)4308 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4309 {
4310 	size_t alloc_length;
4311 	struct pqi_admin_queues_aligned *admin_queues_aligned;
4312 	struct pqi_admin_queues *admin_queues;
4313 
4314 	alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4315 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4316 
4317 	ctrl_info->admin_queue_memory_base =
4318 		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4319 				   &ctrl_info->admin_queue_memory_base_dma_handle,
4320 				   GFP_KERNEL);
4321 
4322 	if (!ctrl_info->admin_queue_memory_base)
4323 		return -ENOMEM;
4324 
4325 	ctrl_info->admin_queue_memory_length = alloc_length;
4326 
4327 	admin_queues = &ctrl_info->admin_queues;
4328 	admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4329 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4330 	admin_queues->iq_element_array =
4331 		&admin_queues_aligned->iq_element_array;
4332 	admin_queues->oq_element_array =
4333 		&admin_queues_aligned->oq_element_array;
4334 	admin_queues->iq_ci =
4335 		(pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4336 	admin_queues->oq_pi =
4337 		(pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4338 
4339 	admin_queues->iq_element_array_bus_addr =
4340 		ctrl_info->admin_queue_memory_base_dma_handle +
4341 		(admin_queues->iq_element_array -
4342 		ctrl_info->admin_queue_memory_base);
4343 	admin_queues->oq_element_array_bus_addr =
4344 		ctrl_info->admin_queue_memory_base_dma_handle +
4345 		(admin_queues->oq_element_array -
4346 		ctrl_info->admin_queue_memory_base);
4347 	admin_queues->iq_ci_bus_addr =
4348 		ctrl_info->admin_queue_memory_base_dma_handle +
4349 		((void __iomem *)admin_queues->iq_ci -
4350 		(void __iomem *)ctrl_info->admin_queue_memory_base);
4351 	admin_queues->oq_pi_bus_addr =
4352 		ctrl_info->admin_queue_memory_base_dma_handle +
4353 		((void __iomem *)admin_queues->oq_pi -
4354 		(void __iomem *)ctrl_info->admin_queue_memory_base);
4355 
4356 	return 0;
4357 }
4358 
4359 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
4360 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
4361 
pqi_create_admin_queues(struct pqi_ctrl_info * ctrl_info)4362 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4363 {
4364 	struct pqi_device_registers __iomem *pqi_registers;
4365 	struct pqi_admin_queues *admin_queues;
4366 	unsigned long timeout;
4367 	u8 status;
4368 	u32 reg;
4369 
4370 	pqi_registers = ctrl_info->pqi_registers;
4371 	admin_queues = &ctrl_info->admin_queues;
4372 
4373 	writeq((u64)admin_queues->iq_element_array_bus_addr,
4374 		&pqi_registers->admin_iq_element_array_addr);
4375 	writeq((u64)admin_queues->oq_element_array_bus_addr,
4376 		&pqi_registers->admin_oq_element_array_addr);
4377 	writeq((u64)admin_queues->iq_ci_bus_addr,
4378 		&pqi_registers->admin_iq_ci_addr);
4379 	writeq((u64)admin_queues->oq_pi_bus_addr,
4380 		&pqi_registers->admin_oq_pi_addr);
4381 
4382 	reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4383 		(PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4384 		(admin_queues->int_msg_num << 16);
4385 	writel(reg, &pqi_registers->admin_iq_num_elements);
4386 
4387 	writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4388 		&pqi_registers->function_and_status_code);
4389 
4390 	timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4391 	while (1) {
4392 		msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4393 		status = readb(&pqi_registers->function_and_status_code);
4394 		if (status == PQI_STATUS_IDLE)
4395 			break;
4396 		if (time_after(jiffies, timeout))
4397 			return -ETIMEDOUT;
4398 	}
4399 
4400 	/*
4401 	 * The offset registers are not initialized to the correct
4402 	 * offsets until *after* the create admin queue pair command
4403 	 * completes successfully.
4404 	 */
4405 	admin_queues->iq_pi = ctrl_info->iomem_base +
4406 		PQI_DEVICE_REGISTERS_OFFSET +
4407 		readq(&pqi_registers->admin_iq_pi_offset);
4408 	admin_queues->oq_ci = ctrl_info->iomem_base +
4409 		PQI_DEVICE_REGISTERS_OFFSET +
4410 		readq(&pqi_registers->admin_oq_ci_offset);
4411 
4412 	return 0;
4413 }
4414 
pqi_submit_admin_request(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request)4415 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4416 	struct pqi_general_admin_request *request)
4417 {
4418 	struct pqi_admin_queues *admin_queues;
4419 	void *next_element;
4420 	pqi_index_t iq_pi;
4421 
4422 	admin_queues = &ctrl_info->admin_queues;
4423 	iq_pi = admin_queues->iq_pi_copy;
4424 
4425 	next_element = admin_queues->iq_element_array +
4426 		(iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4427 
4428 	memcpy(next_element, request, sizeof(*request));
4429 
4430 	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4431 	admin_queues->iq_pi_copy = iq_pi;
4432 
4433 	/*
4434 	 * This write notifies the controller that an IU is available to be
4435 	 * processed.
4436 	 */
4437 	writel(iq_pi, admin_queues->iq_pi);
4438 }
4439 
4440 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS	60
4441 
pqi_poll_for_admin_response(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_response * response)4442 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4443 	struct pqi_general_admin_response *response)
4444 {
4445 	struct pqi_admin_queues *admin_queues;
4446 	pqi_index_t oq_pi;
4447 	pqi_index_t oq_ci;
4448 	unsigned long timeout;
4449 
4450 	admin_queues = &ctrl_info->admin_queues;
4451 	oq_ci = admin_queues->oq_ci_copy;
4452 
4453 	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4454 
4455 	while (1) {
4456 		oq_pi = readl(admin_queues->oq_pi);
4457 		if (oq_pi != oq_ci)
4458 			break;
4459 		if (time_after(jiffies, timeout)) {
4460 			dev_err(&ctrl_info->pci_dev->dev,
4461 				"timed out waiting for admin response\n");
4462 			return -ETIMEDOUT;
4463 		}
4464 		if (!sis_is_firmware_running(ctrl_info))
4465 			return -ENXIO;
4466 		usleep_range(1000, 2000);
4467 	}
4468 
4469 	memcpy(response, admin_queues->oq_element_array +
4470 		(oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4471 
4472 	oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4473 	admin_queues->oq_ci_copy = oq_ci;
4474 	writel(oq_ci, admin_queues->oq_ci);
4475 
4476 	return 0;
4477 }
4478 
pqi_start_io(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group,enum pqi_io_path path,struct pqi_io_request * io_request)4479 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4480 	struct pqi_queue_group *queue_group, enum pqi_io_path path,
4481 	struct pqi_io_request *io_request)
4482 {
4483 	struct pqi_io_request *next;
4484 	void *next_element;
4485 	pqi_index_t iq_pi;
4486 	pqi_index_t iq_ci;
4487 	size_t iu_length;
4488 	unsigned long flags;
4489 	unsigned int num_elements_needed;
4490 	unsigned int num_elements_to_end_of_queue;
4491 	size_t copy_count;
4492 	struct pqi_iu_header *request;
4493 
4494 	spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4495 
4496 	if (io_request) {
4497 		io_request->queue_group = queue_group;
4498 		list_add_tail(&io_request->request_list_entry,
4499 			&queue_group->request_list[path]);
4500 	}
4501 
4502 	iq_pi = queue_group->iq_pi_copy[path];
4503 
4504 	list_for_each_entry_safe(io_request, next,
4505 		&queue_group->request_list[path], request_list_entry) {
4506 
4507 		request = io_request->iu;
4508 
4509 		iu_length = get_unaligned_le16(&request->iu_length) +
4510 			PQI_REQUEST_HEADER_LENGTH;
4511 		num_elements_needed =
4512 			DIV_ROUND_UP(iu_length,
4513 				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4514 
4515 		iq_ci = readl(queue_group->iq_ci[path]);
4516 
4517 		if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4518 			ctrl_info->num_elements_per_iq))
4519 			break;
4520 
4521 		put_unaligned_le16(queue_group->oq_id,
4522 			&request->response_queue_id);
4523 
4524 		next_element = queue_group->iq_element_array[path] +
4525 			(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4526 
4527 		num_elements_to_end_of_queue =
4528 			ctrl_info->num_elements_per_iq - iq_pi;
4529 
4530 		if (num_elements_needed <= num_elements_to_end_of_queue) {
4531 			memcpy(next_element, request, iu_length);
4532 		} else {
4533 			copy_count = num_elements_to_end_of_queue *
4534 				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4535 			memcpy(next_element, request, copy_count);
4536 			memcpy(queue_group->iq_element_array[path],
4537 				(u8 *)request + copy_count,
4538 				iu_length - copy_count);
4539 		}
4540 
4541 		iq_pi = (iq_pi + num_elements_needed) %
4542 			ctrl_info->num_elements_per_iq;
4543 
4544 		list_del(&io_request->request_list_entry);
4545 	}
4546 
4547 	if (iq_pi != queue_group->iq_pi_copy[path]) {
4548 		queue_group->iq_pi_copy[path] = iq_pi;
4549 		/*
4550 		 * This write notifies the controller that one or more IUs are
4551 		 * available to be processed.
4552 		 */
4553 		writel(iq_pi, queue_group->iq_pi[path]);
4554 	}
4555 
4556 	spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4557 }
4558 
4559 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS		10
4560 
pqi_wait_for_completion_io(struct pqi_ctrl_info * ctrl_info,struct completion * wait)4561 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4562 	struct completion *wait)
4563 {
4564 	int rc;
4565 
4566 	while (1) {
4567 		if (wait_for_completion_io_timeout(wait,
4568 			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4569 			rc = 0;
4570 			break;
4571 		}
4572 
4573 		pqi_check_ctrl_health(ctrl_info);
4574 		if (pqi_ctrl_offline(ctrl_info)) {
4575 			rc = -ENXIO;
4576 			break;
4577 		}
4578 	}
4579 
4580 	return rc;
4581 }
4582 
pqi_raid_synchronous_complete(struct pqi_io_request * io_request,void * context)4583 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4584 	void *context)
4585 {
4586 	struct completion *waiting = context;
4587 
4588 	complete(waiting);
4589 }
4590 
pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info * error_info)4591 static int pqi_process_raid_io_error_synchronous(
4592 	struct pqi_raid_error_info *error_info)
4593 {
4594 	int rc = -EIO;
4595 
4596 	switch (error_info->data_out_result) {
4597 	case PQI_DATA_IN_OUT_GOOD:
4598 		if (error_info->status == SAM_STAT_GOOD)
4599 			rc = 0;
4600 		break;
4601 	case PQI_DATA_IN_OUT_UNDERFLOW:
4602 		if (error_info->status == SAM_STAT_GOOD ||
4603 			error_info->status == SAM_STAT_CHECK_CONDITION)
4604 			rc = 0;
4605 		break;
4606 	case PQI_DATA_IN_OUT_ABORTED:
4607 		rc = PQI_CMD_STATUS_ABORTED;
4608 		break;
4609 	}
4610 
4611 	return rc;
4612 }
4613 
pqi_is_blockable_request(struct pqi_iu_header * request)4614 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4615 {
4616 	return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4617 }
4618 
pqi_submit_raid_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_iu_header * request,unsigned int flags,struct pqi_raid_error_info * error_info)4619 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4620 	struct pqi_iu_header *request, unsigned int flags,
4621 	struct pqi_raid_error_info *error_info)
4622 {
4623 	int rc = 0;
4624 	struct pqi_io_request *io_request;
4625 	size_t iu_length;
4626 	DECLARE_COMPLETION_ONSTACK(wait);
4627 
4628 	if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4629 		if (down_interruptible(&ctrl_info->sync_request_sem))
4630 			return -ERESTARTSYS;
4631 	} else {
4632 		down(&ctrl_info->sync_request_sem);
4633 	}
4634 
4635 	pqi_ctrl_busy(ctrl_info);
4636 	/*
4637 	 * Wait for other admin queue updates such as;
4638 	 * config table changes, OFA memory updates, ...
4639 	 */
4640 	if (pqi_is_blockable_request(request))
4641 		pqi_wait_if_ctrl_blocked(ctrl_info);
4642 
4643 	if (pqi_ctrl_offline(ctrl_info)) {
4644 		rc = -ENXIO;
4645 		goto out;
4646 	}
4647 
4648 	io_request = pqi_alloc_io_request(ctrl_info, NULL);
4649 
4650 	put_unaligned_le16(io_request->index,
4651 		&(((struct pqi_raid_path_request *)request)->request_id));
4652 
4653 	if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4654 		((struct pqi_raid_path_request *)request)->error_index =
4655 			((struct pqi_raid_path_request *)request)->request_id;
4656 
4657 	iu_length = get_unaligned_le16(&request->iu_length) +
4658 		PQI_REQUEST_HEADER_LENGTH;
4659 	memcpy(io_request->iu, request, iu_length);
4660 
4661 	io_request->io_complete_callback = pqi_raid_synchronous_complete;
4662 	io_request->context = &wait;
4663 
4664 	pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4665 		io_request);
4666 
4667 	pqi_wait_for_completion_io(ctrl_info, &wait);
4668 
4669 	if (error_info) {
4670 		if (io_request->error_info)
4671 			memcpy(error_info, io_request->error_info, sizeof(*error_info));
4672 		else
4673 			memset(error_info, 0, sizeof(*error_info));
4674 	} else if (rc == 0 && io_request->error_info) {
4675 		rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4676 	}
4677 
4678 	pqi_free_io_request(io_request);
4679 
4680 out:
4681 	pqi_ctrl_unbusy(ctrl_info);
4682 	up(&ctrl_info->sync_request_sem);
4683 
4684 	return rc;
4685 }
4686 
pqi_validate_admin_response(struct pqi_general_admin_response * response,u8 expected_function_code)4687 static int pqi_validate_admin_response(
4688 	struct pqi_general_admin_response *response, u8 expected_function_code)
4689 {
4690 	if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4691 		return -EINVAL;
4692 
4693 	if (get_unaligned_le16(&response->header.iu_length) !=
4694 		PQI_GENERAL_ADMIN_IU_LENGTH)
4695 		return -EINVAL;
4696 
4697 	if (response->function_code != expected_function_code)
4698 		return -EINVAL;
4699 
4700 	if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4701 		return -EINVAL;
4702 
4703 	return 0;
4704 }
4705 
pqi_submit_admin_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request,struct pqi_general_admin_response * response)4706 static int pqi_submit_admin_request_synchronous(
4707 	struct pqi_ctrl_info *ctrl_info,
4708 	struct pqi_general_admin_request *request,
4709 	struct pqi_general_admin_response *response)
4710 {
4711 	int rc;
4712 
4713 	pqi_submit_admin_request(ctrl_info, request);
4714 
4715 	rc = pqi_poll_for_admin_response(ctrl_info, response);
4716 
4717 	if (rc == 0)
4718 		rc = pqi_validate_admin_response(response, request->function_code);
4719 
4720 	return rc;
4721 }
4722 
pqi_report_device_capability(struct pqi_ctrl_info * ctrl_info)4723 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4724 {
4725 	int rc;
4726 	struct pqi_general_admin_request request;
4727 	struct pqi_general_admin_response response;
4728 	struct pqi_device_capability *capability;
4729 	struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4730 
4731 	capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4732 	if (!capability)
4733 		return -ENOMEM;
4734 
4735 	memset(&request, 0, sizeof(request));
4736 
4737 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4738 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4739 		&request.header.iu_length);
4740 	request.function_code =
4741 		PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4742 	put_unaligned_le32(sizeof(*capability),
4743 		&request.data.report_device_capability.buffer_length);
4744 
4745 	rc = pqi_map_single(ctrl_info->pci_dev,
4746 		&request.data.report_device_capability.sg_descriptor,
4747 		capability, sizeof(*capability),
4748 		DMA_FROM_DEVICE);
4749 	if (rc)
4750 		goto out;
4751 
4752 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4753 
4754 	pqi_pci_unmap(ctrl_info->pci_dev,
4755 		&request.data.report_device_capability.sg_descriptor, 1,
4756 		DMA_FROM_DEVICE);
4757 
4758 	if (rc)
4759 		goto out;
4760 
4761 	if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4762 		rc = -EIO;
4763 		goto out;
4764 	}
4765 
4766 	ctrl_info->max_inbound_queues =
4767 		get_unaligned_le16(&capability->max_inbound_queues);
4768 	ctrl_info->max_elements_per_iq =
4769 		get_unaligned_le16(&capability->max_elements_per_iq);
4770 	ctrl_info->max_iq_element_length =
4771 		get_unaligned_le16(&capability->max_iq_element_length)
4772 		* 16;
4773 	ctrl_info->max_outbound_queues =
4774 		get_unaligned_le16(&capability->max_outbound_queues);
4775 	ctrl_info->max_elements_per_oq =
4776 		get_unaligned_le16(&capability->max_elements_per_oq);
4777 	ctrl_info->max_oq_element_length =
4778 		get_unaligned_le16(&capability->max_oq_element_length)
4779 		* 16;
4780 
4781 	sop_iu_layer_descriptor =
4782 		&capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4783 
4784 	ctrl_info->max_inbound_iu_length_per_firmware =
4785 		get_unaligned_le16(
4786 			&sop_iu_layer_descriptor->max_inbound_iu_length);
4787 	ctrl_info->inbound_spanning_supported =
4788 		sop_iu_layer_descriptor->inbound_spanning_supported;
4789 	ctrl_info->outbound_spanning_supported =
4790 		sop_iu_layer_descriptor->outbound_spanning_supported;
4791 
4792 out:
4793 	kfree(capability);
4794 
4795 	return rc;
4796 }
4797 
pqi_validate_device_capability(struct pqi_ctrl_info * ctrl_info)4798 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4799 {
4800 	if (ctrl_info->max_iq_element_length <
4801 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4802 		dev_err(&ctrl_info->pci_dev->dev,
4803 			"max. inbound queue element length of %d is less than the required length of %d\n",
4804 			ctrl_info->max_iq_element_length,
4805 			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4806 		return -EINVAL;
4807 	}
4808 
4809 	if (ctrl_info->max_oq_element_length <
4810 		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4811 		dev_err(&ctrl_info->pci_dev->dev,
4812 			"max. outbound queue element length of %d is less than the required length of %d\n",
4813 			ctrl_info->max_oq_element_length,
4814 			PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4815 		return -EINVAL;
4816 	}
4817 
4818 	if (ctrl_info->max_inbound_iu_length_per_firmware <
4819 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4820 		dev_err(&ctrl_info->pci_dev->dev,
4821 			"max. inbound IU length of %u is less than the min. required length of %d\n",
4822 			ctrl_info->max_inbound_iu_length_per_firmware,
4823 			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4824 		return -EINVAL;
4825 	}
4826 
4827 	if (!ctrl_info->inbound_spanning_supported) {
4828 		dev_err(&ctrl_info->pci_dev->dev,
4829 			"the controller does not support inbound spanning\n");
4830 		return -EINVAL;
4831 	}
4832 
4833 	if (ctrl_info->outbound_spanning_supported) {
4834 		dev_err(&ctrl_info->pci_dev->dev,
4835 			"the controller supports outbound spanning but this driver does not\n");
4836 		return -EINVAL;
4837 	}
4838 
4839 	return 0;
4840 }
4841 
pqi_create_event_queue(struct pqi_ctrl_info * ctrl_info)4842 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4843 {
4844 	int rc;
4845 	struct pqi_event_queue *event_queue;
4846 	struct pqi_general_admin_request request;
4847 	struct pqi_general_admin_response response;
4848 
4849 	event_queue = &ctrl_info->event_queue;
4850 
4851 	/*
4852 	 * Create OQ (Outbound Queue - device to host queue) to dedicate
4853 	 * to events.
4854 	 */
4855 	memset(&request, 0, sizeof(request));
4856 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4857 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4858 		&request.header.iu_length);
4859 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4860 	put_unaligned_le16(event_queue->oq_id,
4861 		&request.data.create_operational_oq.queue_id);
4862 	put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4863 		&request.data.create_operational_oq.element_array_addr);
4864 	put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4865 		&request.data.create_operational_oq.pi_addr);
4866 	put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4867 		&request.data.create_operational_oq.num_elements);
4868 	put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4869 		&request.data.create_operational_oq.element_length);
4870 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4871 	put_unaligned_le16(event_queue->int_msg_num,
4872 		&request.data.create_operational_oq.int_msg_num);
4873 
4874 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4875 		&response);
4876 	if (rc)
4877 		return rc;
4878 
4879 	event_queue->oq_ci = ctrl_info->iomem_base +
4880 		PQI_DEVICE_REGISTERS_OFFSET +
4881 		get_unaligned_le64(
4882 			&response.data.create_operational_oq.oq_ci_offset);
4883 
4884 	return 0;
4885 }
4886 
pqi_create_queue_group(struct pqi_ctrl_info * ctrl_info,unsigned int group_number)4887 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4888 	unsigned int group_number)
4889 {
4890 	int rc;
4891 	struct pqi_queue_group *queue_group;
4892 	struct pqi_general_admin_request request;
4893 	struct pqi_general_admin_response response;
4894 
4895 	queue_group = &ctrl_info->queue_groups[group_number];
4896 
4897 	/*
4898 	 * Create IQ (Inbound Queue - host to device queue) for
4899 	 * RAID path.
4900 	 */
4901 	memset(&request, 0, sizeof(request));
4902 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4903 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4904 		&request.header.iu_length);
4905 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4906 	put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4907 		&request.data.create_operational_iq.queue_id);
4908 	put_unaligned_le64(
4909 		(u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4910 		&request.data.create_operational_iq.element_array_addr);
4911 	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4912 		&request.data.create_operational_iq.ci_addr);
4913 	put_unaligned_le16(ctrl_info->num_elements_per_iq,
4914 		&request.data.create_operational_iq.num_elements);
4915 	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4916 		&request.data.create_operational_iq.element_length);
4917 	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4918 
4919 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4920 		&response);
4921 	if (rc) {
4922 		dev_err(&ctrl_info->pci_dev->dev,
4923 			"error creating inbound RAID queue\n");
4924 		return rc;
4925 	}
4926 
4927 	queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4928 		PQI_DEVICE_REGISTERS_OFFSET +
4929 		get_unaligned_le64(
4930 			&response.data.create_operational_iq.iq_pi_offset);
4931 
4932 	/*
4933 	 * Create IQ (Inbound Queue - host to device queue) for
4934 	 * Advanced I/O (AIO) path.
4935 	 */
4936 	memset(&request, 0, sizeof(request));
4937 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4938 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4939 		&request.header.iu_length);
4940 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4941 	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4942 		&request.data.create_operational_iq.queue_id);
4943 	put_unaligned_le64((u64)queue_group->
4944 		iq_element_array_bus_addr[AIO_PATH],
4945 		&request.data.create_operational_iq.element_array_addr);
4946 	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4947 		&request.data.create_operational_iq.ci_addr);
4948 	put_unaligned_le16(ctrl_info->num_elements_per_iq,
4949 		&request.data.create_operational_iq.num_elements);
4950 	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4951 		&request.data.create_operational_iq.element_length);
4952 	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4953 
4954 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4955 		&response);
4956 	if (rc) {
4957 		dev_err(&ctrl_info->pci_dev->dev,
4958 			"error creating inbound AIO queue\n");
4959 		return rc;
4960 	}
4961 
4962 	queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4963 		PQI_DEVICE_REGISTERS_OFFSET +
4964 		get_unaligned_le64(
4965 			&response.data.create_operational_iq.iq_pi_offset);
4966 
4967 	/*
4968 	 * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4969 	 * assumed to be for RAID path I/O unless we change the queue's
4970 	 * property.
4971 	 */
4972 	memset(&request, 0, sizeof(request));
4973 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4974 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4975 		&request.header.iu_length);
4976 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4977 	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4978 		&request.data.change_operational_iq_properties.queue_id);
4979 	put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4980 		&request.data.change_operational_iq_properties.vendor_specific);
4981 
4982 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4983 		&response);
4984 	if (rc) {
4985 		dev_err(&ctrl_info->pci_dev->dev,
4986 			"error changing queue property\n");
4987 		return rc;
4988 	}
4989 
4990 	/*
4991 	 * Create OQ (Outbound Queue - device to host queue).
4992 	 */
4993 	memset(&request, 0, sizeof(request));
4994 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4995 	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4996 		&request.header.iu_length);
4997 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4998 	put_unaligned_le16(queue_group->oq_id,
4999 		&request.data.create_operational_oq.queue_id);
5000 	put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
5001 		&request.data.create_operational_oq.element_array_addr);
5002 	put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
5003 		&request.data.create_operational_oq.pi_addr);
5004 	put_unaligned_le16(ctrl_info->num_elements_per_oq,
5005 		&request.data.create_operational_oq.num_elements);
5006 	put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
5007 		&request.data.create_operational_oq.element_length);
5008 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
5009 	put_unaligned_le16(queue_group->int_msg_num,
5010 		&request.data.create_operational_oq.int_msg_num);
5011 
5012 	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
5013 		&response);
5014 	if (rc) {
5015 		dev_err(&ctrl_info->pci_dev->dev,
5016 			"error creating outbound queue\n");
5017 		return rc;
5018 	}
5019 
5020 	queue_group->oq_ci = ctrl_info->iomem_base +
5021 		PQI_DEVICE_REGISTERS_OFFSET +
5022 		get_unaligned_le64(
5023 			&response.data.create_operational_oq.oq_ci_offset);
5024 
5025 	return 0;
5026 }
5027 
pqi_create_queues(struct pqi_ctrl_info * ctrl_info)5028 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
5029 {
5030 	int rc;
5031 	unsigned int i;
5032 
5033 	rc = pqi_create_event_queue(ctrl_info);
5034 	if (rc) {
5035 		dev_err(&ctrl_info->pci_dev->dev,
5036 			"error creating event queue\n");
5037 		return rc;
5038 	}
5039 
5040 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5041 		rc = pqi_create_queue_group(ctrl_info, i);
5042 		if (rc) {
5043 			dev_err(&ctrl_info->pci_dev->dev,
5044 				"error creating queue group number %u/%u\n",
5045 				i, ctrl_info->num_queue_groups);
5046 			return rc;
5047 		}
5048 	}
5049 
5050 	return 0;
5051 }
5052 
5053 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH	\
5054 	struct_size_t(struct pqi_event_config,  descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5055 
pqi_configure_events(struct pqi_ctrl_info * ctrl_info,bool enable_events)5056 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5057 	bool enable_events)
5058 {
5059 	int rc;
5060 	unsigned int i;
5061 	struct pqi_event_config *event_config;
5062 	struct pqi_event_descriptor *event_descriptor;
5063 	struct pqi_general_management_request request;
5064 
5065 	event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5066 		GFP_KERNEL);
5067 	if (!event_config)
5068 		return -ENOMEM;
5069 
5070 	memset(&request, 0, sizeof(request));
5071 
5072 	request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5073 	put_unaligned_le16(offsetof(struct pqi_general_management_request,
5074 		data.report_event_configuration.sg_descriptors[1]) -
5075 		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5076 	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5077 		&request.data.report_event_configuration.buffer_length);
5078 
5079 	rc = pqi_map_single(ctrl_info->pci_dev,
5080 		request.data.report_event_configuration.sg_descriptors,
5081 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5082 		DMA_FROM_DEVICE);
5083 	if (rc)
5084 		goto out;
5085 
5086 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5087 
5088 	pqi_pci_unmap(ctrl_info->pci_dev,
5089 		request.data.report_event_configuration.sg_descriptors, 1,
5090 		DMA_FROM_DEVICE);
5091 
5092 	if (rc)
5093 		goto out;
5094 
5095 	for (i = 0; i < event_config->num_event_descriptors; i++) {
5096 		event_descriptor = &event_config->descriptors[i];
5097 		if (enable_events &&
5098 			pqi_is_supported_event(event_descriptor->event_type))
5099 				put_unaligned_le16(ctrl_info->event_queue.oq_id,
5100 					&event_descriptor->oq_id);
5101 		else
5102 			put_unaligned_le16(0, &event_descriptor->oq_id);
5103 	}
5104 
5105 	memset(&request, 0, sizeof(request));
5106 
5107 	request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5108 	put_unaligned_le16(offsetof(struct pqi_general_management_request,
5109 		data.report_event_configuration.sg_descriptors[1]) -
5110 		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5111 	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5112 		&request.data.report_event_configuration.buffer_length);
5113 
5114 	rc = pqi_map_single(ctrl_info->pci_dev,
5115 		request.data.report_event_configuration.sg_descriptors,
5116 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5117 		DMA_TO_DEVICE);
5118 	if (rc)
5119 		goto out;
5120 
5121 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5122 
5123 	pqi_pci_unmap(ctrl_info->pci_dev,
5124 		request.data.report_event_configuration.sg_descriptors, 1,
5125 		DMA_TO_DEVICE);
5126 
5127 out:
5128 	kfree(event_config);
5129 
5130 	return rc;
5131 }
5132 
pqi_enable_events(struct pqi_ctrl_info * ctrl_info)5133 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5134 {
5135 	return pqi_configure_events(ctrl_info, true);
5136 }
5137 
pqi_free_all_io_requests(struct pqi_ctrl_info * ctrl_info)5138 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5139 {
5140 	unsigned int i;
5141 	struct device *dev;
5142 	size_t sg_chain_buffer_length;
5143 	struct pqi_io_request *io_request;
5144 
5145 	if (!ctrl_info->io_request_pool)
5146 		return;
5147 
5148 	dev = &ctrl_info->pci_dev->dev;
5149 	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5150 	io_request = ctrl_info->io_request_pool;
5151 
5152 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
5153 		kfree(io_request->iu);
5154 		if (!io_request->sg_chain_buffer)
5155 			break;
5156 		dma_free_coherent(dev, sg_chain_buffer_length,
5157 			io_request->sg_chain_buffer,
5158 			io_request->sg_chain_buffer_dma_handle);
5159 		io_request++;
5160 	}
5161 
5162 	kfree(ctrl_info->io_request_pool);
5163 	ctrl_info->io_request_pool = NULL;
5164 }
5165 
pqi_alloc_error_buffer(struct pqi_ctrl_info * ctrl_info)5166 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5167 {
5168 	ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5169 				     ctrl_info->error_buffer_length,
5170 				     &ctrl_info->error_buffer_dma_handle,
5171 				     GFP_KERNEL);
5172 	if (!ctrl_info->error_buffer)
5173 		return -ENOMEM;
5174 
5175 	return 0;
5176 }
5177 
pqi_alloc_io_resources(struct pqi_ctrl_info * ctrl_info)5178 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5179 {
5180 	unsigned int i;
5181 	void *sg_chain_buffer;
5182 	size_t sg_chain_buffer_length;
5183 	dma_addr_t sg_chain_buffer_dma_handle;
5184 	struct device *dev;
5185 	struct pqi_io_request *io_request;
5186 
5187 	ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5188 		sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5189 
5190 	if (!ctrl_info->io_request_pool) {
5191 		dev_err(&ctrl_info->pci_dev->dev,
5192 			"failed to allocate I/O request pool\n");
5193 		goto error;
5194 	}
5195 
5196 	dev = &ctrl_info->pci_dev->dev;
5197 	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5198 	io_request = ctrl_info->io_request_pool;
5199 
5200 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
5201 		io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5202 
5203 		if (!io_request->iu) {
5204 			dev_err(&ctrl_info->pci_dev->dev,
5205 				"failed to allocate IU buffers\n");
5206 			goto error;
5207 		}
5208 
5209 		sg_chain_buffer = dma_alloc_coherent(dev,
5210 			sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5211 			GFP_KERNEL);
5212 
5213 		if (!sg_chain_buffer) {
5214 			dev_err(&ctrl_info->pci_dev->dev,
5215 				"failed to allocate PQI scatter-gather chain buffers\n");
5216 			goto error;
5217 		}
5218 
5219 		io_request->index = i;
5220 		io_request->sg_chain_buffer = sg_chain_buffer;
5221 		io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5222 		io_request++;
5223 	}
5224 
5225 	return 0;
5226 
5227 error:
5228 	pqi_free_all_io_requests(ctrl_info);
5229 
5230 	return -ENOMEM;
5231 }
5232 
5233 /*
5234  * Calculate required resources that are sized based on max. outstanding
5235  * requests and max. transfer size.
5236  */
5237 
pqi_calculate_io_resources(struct pqi_ctrl_info * ctrl_info)5238 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5239 {
5240 	u32 max_transfer_size;
5241 	u32 max_sg_entries;
5242 
5243 	ctrl_info->scsi_ml_can_queue =
5244 		ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5245 	ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5246 
5247 	ctrl_info->error_buffer_length =
5248 		ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5249 
5250 	if (is_kdump_kernel())
5251 		max_transfer_size = min(ctrl_info->max_transfer_size,
5252 			PQI_MAX_TRANSFER_SIZE_KDUMP);
5253 	else
5254 		max_transfer_size = min(ctrl_info->max_transfer_size,
5255 			PQI_MAX_TRANSFER_SIZE);
5256 
5257 	max_sg_entries = max_transfer_size / PAGE_SIZE;
5258 
5259 	/* +1 to cover when the buffer is not page-aligned. */
5260 	max_sg_entries++;
5261 
5262 	max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5263 
5264 	max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5265 
5266 	ctrl_info->sg_chain_buffer_length =
5267 		(max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5268 		PQI_EXTRA_SGL_MEMORY;
5269 	ctrl_info->sg_tablesize = max_sg_entries;
5270 	ctrl_info->max_sectors = max_transfer_size / 512;
5271 }
5272 
pqi_calculate_queue_resources(struct pqi_ctrl_info * ctrl_info)5273 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5274 {
5275 	int num_queue_groups;
5276 	u16 num_elements_per_iq;
5277 	u16 num_elements_per_oq;
5278 
5279 	if (is_kdump_kernel()) {
5280 		num_queue_groups = 1;
5281 	} else {
5282 		int num_cpus;
5283 		int max_queue_groups;
5284 
5285 		max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5286 			ctrl_info->max_outbound_queues - 1);
5287 		max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5288 
5289 		num_cpus = num_online_cpus();
5290 		num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5291 		num_queue_groups = min(num_queue_groups, max_queue_groups);
5292 	}
5293 
5294 	ctrl_info->num_queue_groups = num_queue_groups;
5295 
5296 	/*
5297 	 * Make sure that the max. inbound IU length is an even multiple
5298 	 * of our inbound element length.
5299 	 */
5300 	ctrl_info->max_inbound_iu_length =
5301 		(ctrl_info->max_inbound_iu_length_per_firmware /
5302 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5303 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5304 
5305 	num_elements_per_iq =
5306 		(ctrl_info->max_inbound_iu_length /
5307 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5308 
5309 	/* Add one because one element in each queue is unusable. */
5310 	num_elements_per_iq++;
5311 
5312 	num_elements_per_iq = min(num_elements_per_iq,
5313 		ctrl_info->max_elements_per_iq);
5314 
5315 	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5316 	num_elements_per_oq = min(num_elements_per_oq,
5317 		ctrl_info->max_elements_per_oq);
5318 
5319 	ctrl_info->num_elements_per_iq = num_elements_per_iq;
5320 	ctrl_info->num_elements_per_oq = num_elements_per_oq;
5321 
5322 	ctrl_info->max_sg_per_iu =
5323 		((ctrl_info->max_inbound_iu_length -
5324 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5325 		sizeof(struct pqi_sg_descriptor)) +
5326 		PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5327 
5328 	ctrl_info->max_sg_per_r56_iu =
5329 		((ctrl_info->max_inbound_iu_length -
5330 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5331 		sizeof(struct pqi_sg_descriptor)) +
5332 		PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5333 }
5334 
pqi_set_sg_descriptor(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg)5335 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5336 	struct scatterlist *sg)
5337 {
5338 	u64 address = (u64)sg_dma_address(sg);
5339 	unsigned int length = sg_dma_len(sg);
5340 
5341 	put_unaligned_le64(address, &sg_descriptor->address);
5342 	put_unaligned_le32(length, &sg_descriptor->length);
5343 	put_unaligned_le32(0, &sg_descriptor->flags);
5344 }
5345 
pqi_build_sg_list(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg,int sg_count,struct pqi_io_request * io_request,int max_sg_per_iu,bool * chained)5346 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5347 	struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5348 	int max_sg_per_iu, bool *chained)
5349 {
5350 	int i;
5351 	unsigned int num_sg_in_iu;
5352 
5353 	*chained = false;
5354 	i = 0;
5355 	num_sg_in_iu = 0;
5356 	max_sg_per_iu--;	/* Subtract 1 to leave room for chain marker. */
5357 
5358 	while (1) {
5359 		pqi_set_sg_descriptor(sg_descriptor, sg);
5360 		if (!*chained)
5361 			num_sg_in_iu++;
5362 		i++;
5363 		if (i == sg_count)
5364 			break;
5365 		sg_descriptor++;
5366 		if (i == max_sg_per_iu) {
5367 			put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5368 				&sg_descriptor->address);
5369 			put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5370 				&sg_descriptor->length);
5371 			put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5372 			*chained = true;
5373 			num_sg_in_iu++;
5374 			sg_descriptor = io_request->sg_chain_buffer;
5375 		}
5376 		sg = sg_next(sg);
5377 	}
5378 
5379 	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5380 
5381 	return num_sg_in_iu;
5382 }
5383 
pqi_build_raid_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5384 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5385 	struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5386 	struct pqi_io_request *io_request)
5387 {
5388 	u16 iu_length;
5389 	int sg_count;
5390 	bool chained;
5391 	unsigned int num_sg_in_iu;
5392 	struct scatterlist *sg;
5393 	struct pqi_sg_descriptor *sg_descriptor;
5394 
5395 	sg_count = scsi_dma_map(scmd);
5396 	if (sg_count < 0)
5397 		return sg_count;
5398 
5399 	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5400 		PQI_REQUEST_HEADER_LENGTH;
5401 
5402 	if (sg_count == 0)
5403 		goto out;
5404 
5405 	sg = scsi_sglist(scmd);
5406 	sg_descriptor = request->sg_descriptors;
5407 
5408 	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5409 		ctrl_info->max_sg_per_iu, &chained);
5410 
5411 	request->partial = chained;
5412 	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5413 
5414 out:
5415 	put_unaligned_le16(iu_length, &request->header.iu_length);
5416 
5417 	return 0;
5418 }
5419 
pqi_build_aio_r1_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r1_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5420 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5421 	struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5422 	struct pqi_io_request *io_request)
5423 {
5424 	u16 iu_length;
5425 	int sg_count;
5426 	bool chained;
5427 	unsigned int num_sg_in_iu;
5428 	struct scatterlist *sg;
5429 	struct pqi_sg_descriptor *sg_descriptor;
5430 
5431 	sg_count = scsi_dma_map(scmd);
5432 	if (sg_count < 0)
5433 		return sg_count;
5434 
5435 	iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5436 		PQI_REQUEST_HEADER_LENGTH;
5437 	num_sg_in_iu = 0;
5438 
5439 	if (sg_count == 0)
5440 		goto out;
5441 
5442 	sg = scsi_sglist(scmd);
5443 	sg_descriptor = request->sg_descriptors;
5444 
5445 	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5446 		ctrl_info->max_sg_per_iu, &chained);
5447 
5448 	request->partial = chained;
5449 	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5450 
5451 out:
5452 	put_unaligned_le16(iu_length, &request->header.iu_length);
5453 	request->num_sg_descriptors = num_sg_in_iu;
5454 
5455 	return 0;
5456 }
5457 
pqi_build_aio_r56_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r56_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5458 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5459 	struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5460 	struct pqi_io_request *io_request)
5461 {
5462 	u16 iu_length;
5463 	int sg_count;
5464 	bool chained;
5465 	unsigned int num_sg_in_iu;
5466 	struct scatterlist *sg;
5467 	struct pqi_sg_descriptor *sg_descriptor;
5468 
5469 	sg_count = scsi_dma_map(scmd);
5470 	if (sg_count < 0)
5471 		return sg_count;
5472 
5473 	iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5474 		PQI_REQUEST_HEADER_LENGTH;
5475 	num_sg_in_iu = 0;
5476 
5477 	if (sg_count != 0) {
5478 		sg = scsi_sglist(scmd);
5479 		sg_descriptor = request->sg_descriptors;
5480 
5481 		num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5482 			ctrl_info->max_sg_per_r56_iu, &chained);
5483 
5484 		request->partial = chained;
5485 		iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5486 	}
5487 
5488 	put_unaligned_le16(iu_length, &request->header.iu_length);
5489 	request->num_sg_descriptors = num_sg_in_iu;
5490 
5491 	return 0;
5492 }
5493 
pqi_build_aio_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5494 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5495 	struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5496 	struct pqi_io_request *io_request)
5497 {
5498 	u16 iu_length;
5499 	int sg_count;
5500 	bool chained;
5501 	unsigned int num_sg_in_iu;
5502 	struct scatterlist *sg;
5503 	struct pqi_sg_descriptor *sg_descriptor;
5504 
5505 	sg_count = scsi_dma_map(scmd);
5506 	if (sg_count < 0)
5507 		return sg_count;
5508 
5509 	iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5510 		PQI_REQUEST_HEADER_LENGTH;
5511 	num_sg_in_iu = 0;
5512 
5513 	if (sg_count == 0)
5514 		goto out;
5515 
5516 	sg = scsi_sglist(scmd);
5517 	sg_descriptor = request->sg_descriptors;
5518 
5519 	num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5520 		ctrl_info->max_sg_per_iu, &chained);
5521 
5522 	request->partial = chained;
5523 	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5524 
5525 out:
5526 	put_unaligned_le16(iu_length, &request->header.iu_length);
5527 	request->num_sg_descriptors = num_sg_in_iu;
5528 
5529 	return 0;
5530 }
5531 
pqi_raid_io_complete(struct pqi_io_request * io_request,void * context)5532 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5533 	void *context)
5534 {
5535 	struct scsi_cmnd *scmd;
5536 
5537 	scmd = io_request->scmd;
5538 	pqi_free_io_request(io_request);
5539 	scsi_dma_unmap(scmd);
5540 	pqi_scsi_done(scmd);
5541 }
5542 
pqi_raid_submit_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,bool io_high_prio)5543 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5544 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5545 	struct pqi_queue_group *queue_group, bool io_high_prio)
5546 {
5547 	int rc;
5548 	size_t cdb_length;
5549 	struct pqi_io_request *io_request;
5550 	struct pqi_raid_path_request *request;
5551 
5552 	io_request = pqi_alloc_io_request(ctrl_info, scmd);
5553 	if (!io_request)
5554 		return SCSI_MLQUEUE_HOST_BUSY;
5555 
5556 	io_request->io_complete_callback = pqi_raid_io_complete;
5557 	io_request->scmd = scmd;
5558 
5559 	request = io_request->iu;
5560 	memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5561 
5562 	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5563 	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5564 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5565 	request->command_priority = io_high_prio;
5566 	put_unaligned_le16(io_request->index, &request->request_id);
5567 	request->error_index = request->request_id;
5568 	memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5569 	request->ml_device_lun_number = (u8)scmd->device->lun;
5570 
5571 	cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5572 	memcpy(request->cdb, scmd->cmnd, cdb_length);
5573 
5574 	switch (cdb_length) {
5575 	case 6:
5576 	case 10:
5577 	case 12:
5578 	case 16:
5579 		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5580 		break;
5581 	case 20:
5582 		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5583 		break;
5584 	case 24:
5585 		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5586 		break;
5587 	case 28:
5588 		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5589 		break;
5590 	case 32:
5591 	default:
5592 		request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5593 		break;
5594 	}
5595 
5596 	switch (scmd->sc_data_direction) {
5597 	case DMA_FROM_DEVICE:
5598 		request->data_direction = SOP_READ_FLAG;
5599 		break;
5600 	case DMA_TO_DEVICE:
5601 		request->data_direction = SOP_WRITE_FLAG;
5602 		break;
5603 	case DMA_NONE:
5604 		request->data_direction = SOP_NO_DIRECTION_FLAG;
5605 		break;
5606 	case DMA_BIDIRECTIONAL:
5607 		request->data_direction = SOP_BIDIRECTIONAL;
5608 		break;
5609 	default:
5610 		dev_err(&ctrl_info->pci_dev->dev,
5611 			"unknown data direction: %d\n",
5612 			scmd->sc_data_direction);
5613 		break;
5614 	}
5615 
5616 	rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5617 	if (rc) {
5618 		pqi_free_io_request(io_request);
5619 		return SCSI_MLQUEUE_HOST_BUSY;
5620 	}
5621 
5622 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5623 
5624 	return 0;
5625 }
5626 
pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5627 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5628 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5629 	struct pqi_queue_group *queue_group)
5630 {
5631 	bool io_high_prio;
5632 
5633 	io_high_prio = pqi_is_io_high_priority(device, scmd);
5634 
5635 	return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5636 }
5637 
pqi_raid_bypass_retry_needed(struct pqi_io_request * io_request)5638 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5639 {
5640 	struct scsi_cmnd *scmd;
5641 	struct pqi_scsi_dev *device;
5642 	struct pqi_ctrl_info *ctrl_info;
5643 
5644 	if (!io_request->raid_bypass)
5645 		return false;
5646 
5647 	scmd = io_request->scmd;
5648 	if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5649 		return false;
5650 	if (host_byte(scmd->result) == DID_NO_CONNECT)
5651 		return false;
5652 
5653 	device = scmd->device->hostdata;
5654 	if (pqi_device_offline(device) || pqi_device_in_remove(device))
5655 		return false;
5656 
5657 	ctrl_info = shost_to_hba(scmd->device->host);
5658 	if (pqi_ctrl_offline(ctrl_info))
5659 		return false;
5660 
5661 	return true;
5662 }
5663 
pqi_aio_io_complete(struct pqi_io_request * io_request,void * context)5664 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5665 	void *context)
5666 {
5667 	struct scsi_cmnd *scmd;
5668 
5669 	scmd = io_request->scmd;
5670 	scsi_dma_unmap(scmd);
5671 	if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5672 		set_host_byte(scmd, DID_IMM_RETRY);
5673 		pqi_cmd_priv(scmd)->this_residual++;
5674 	}
5675 
5676 	pqi_free_io_request(io_request);
5677 	pqi_scsi_done(scmd);
5678 }
5679 
pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5680 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5681 	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5682 	struct pqi_queue_group *queue_group)
5683 {
5684 	bool io_high_prio;
5685 
5686 	io_high_prio = pqi_is_io_high_priority(device, scmd);
5687 
5688 	return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5689 		scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5690 		false, io_high_prio);
5691 }
5692 
pqi_aio_submit_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,u32 aio_handle,u8 * cdb,unsigned int cdb_length,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,bool raid_bypass,bool io_high_prio)5693 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5694 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5695 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
5696 	struct pqi_encryption_info *encryption_info, bool raid_bypass,
5697 	bool io_high_prio)
5698 {
5699 	int rc;
5700 	struct pqi_io_request *io_request;
5701 	struct pqi_aio_path_request *request;
5702 
5703 	io_request = pqi_alloc_io_request(ctrl_info, scmd);
5704 	if (!io_request)
5705 		return SCSI_MLQUEUE_HOST_BUSY;
5706 
5707 	io_request->io_complete_callback = pqi_aio_io_complete;
5708 	io_request->scmd = scmd;
5709 	io_request->raid_bypass = raid_bypass;
5710 
5711 	request = io_request->iu;
5712 	memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5713 
5714 	request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5715 	put_unaligned_le32(aio_handle, &request->nexus_id);
5716 	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5717 	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5718 	request->command_priority = io_high_prio;
5719 	put_unaligned_le16(io_request->index, &request->request_id);
5720 	request->error_index = request->request_id;
5721 	if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5722 		put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5723 	if (cdb_length > sizeof(request->cdb))
5724 		cdb_length = sizeof(request->cdb);
5725 	request->cdb_length = cdb_length;
5726 	memcpy(request->cdb, cdb, cdb_length);
5727 
5728 	switch (scmd->sc_data_direction) {
5729 	case DMA_TO_DEVICE:
5730 		request->data_direction = SOP_READ_FLAG;
5731 		break;
5732 	case DMA_FROM_DEVICE:
5733 		request->data_direction = SOP_WRITE_FLAG;
5734 		break;
5735 	case DMA_NONE:
5736 		request->data_direction = SOP_NO_DIRECTION_FLAG;
5737 		break;
5738 	case DMA_BIDIRECTIONAL:
5739 		request->data_direction = SOP_BIDIRECTIONAL;
5740 		break;
5741 	default:
5742 		dev_err(&ctrl_info->pci_dev->dev,
5743 			"unknown data direction: %d\n",
5744 			scmd->sc_data_direction);
5745 		break;
5746 	}
5747 
5748 	if (encryption_info) {
5749 		request->encryption_enable = true;
5750 		put_unaligned_le16(encryption_info->data_encryption_key_index,
5751 			&request->data_encryption_key_index);
5752 		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5753 			&request->encrypt_tweak_lower);
5754 		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5755 			&request->encrypt_tweak_upper);
5756 	}
5757 
5758 	rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5759 	if (rc) {
5760 		pqi_free_io_request(io_request);
5761 		return SCSI_MLQUEUE_HOST_BUSY;
5762 	}
5763 
5764 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5765 
5766 	return 0;
5767 }
5768 
pqi_aio_submit_r1_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5769 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5770 	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5771 	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5772 	struct pqi_scsi_dev_raid_map_data *rmd)
5773 {
5774 	int rc;
5775 	struct pqi_io_request *io_request;
5776 	struct pqi_aio_r1_path_request *r1_request;
5777 
5778 	io_request = pqi_alloc_io_request(ctrl_info, scmd);
5779 	if (!io_request)
5780 		return SCSI_MLQUEUE_HOST_BUSY;
5781 
5782 	io_request->io_complete_callback = pqi_aio_io_complete;
5783 	io_request->scmd = scmd;
5784 	io_request->raid_bypass = true;
5785 
5786 	r1_request = io_request->iu;
5787 	memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5788 
5789 	r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5790 	put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5791 	r1_request->num_drives = rmd->num_it_nexus_entries;
5792 	put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5793 	put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5794 	if (rmd->num_it_nexus_entries == 3)
5795 		put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5796 
5797 	put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5798 	r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5799 	put_unaligned_le16(io_request->index, &r1_request->request_id);
5800 	r1_request->error_index = r1_request->request_id;
5801 	if (rmd->cdb_length > sizeof(r1_request->cdb))
5802 		rmd->cdb_length = sizeof(r1_request->cdb);
5803 	r1_request->cdb_length = rmd->cdb_length;
5804 	memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5805 
5806 	/* The direction is always write. */
5807 	r1_request->data_direction = SOP_READ_FLAG;
5808 
5809 	if (encryption_info) {
5810 		r1_request->encryption_enable = true;
5811 		put_unaligned_le16(encryption_info->data_encryption_key_index,
5812 				&r1_request->data_encryption_key_index);
5813 		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5814 				&r1_request->encrypt_tweak_lower);
5815 		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5816 				&r1_request->encrypt_tweak_upper);
5817 	}
5818 
5819 	rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5820 	if (rc) {
5821 		pqi_free_io_request(io_request);
5822 		return SCSI_MLQUEUE_HOST_BUSY;
5823 	}
5824 
5825 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5826 
5827 	return 0;
5828 }
5829 
pqi_aio_submit_r56_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5830 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5831 	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5832 	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5833 	struct pqi_scsi_dev_raid_map_data *rmd)
5834 {
5835 	int rc;
5836 	struct pqi_io_request *io_request;
5837 	struct pqi_aio_r56_path_request *r56_request;
5838 
5839 	io_request = pqi_alloc_io_request(ctrl_info, scmd);
5840 	if (!io_request)
5841 		return SCSI_MLQUEUE_HOST_BUSY;
5842 	io_request->io_complete_callback = pqi_aio_io_complete;
5843 	io_request->scmd = scmd;
5844 	io_request->raid_bypass = true;
5845 
5846 	r56_request = io_request->iu;
5847 	memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5848 
5849 	if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5850 		r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5851 	else
5852 		r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5853 
5854 	put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5855 	put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5856 	put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5857 	if (rmd->raid_level == SA_RAID_6) {
5858 		put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5859 		r56_request->xor_multiplier = rmd->xor_mult;
5860 	}
5861 	put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5862 	r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5863 	put_unaligned_le64(rmd->row, &r56_request->row);
5864 
5865 	put_unaligned_le16(io_request->index, &r56_request->request_id);
5866 	r56_request->error_index = r56_request->request_id;
5867 
5868 	if (rmd->cdb_length > sizeof(r56_request->cdb))
5869 		rmd->cdb_length = sizeof(r56_request->cdb);
5870 	r56_request->cdb_length = rmd->cdb_length;
5871 	memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5872 
5873 	/* The direction is always write. */
5874 	r56_request->data_direction = SOP_READ_FLAG;
5875 
5876 	if (encryption_info) {
5877 		r56_request->encryption_enable = true;
5878 		put_unaligned_le16(encryption_info->data_encryption_key_index,
5879 				&r56_request->data_encryption_key_index);
5880 		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5881 				&r56_request->encrypt_tweak_lower);
5882 		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5883 				&r56_request->encrypt_tweak_upper);
5884 	}
5885 
5886 	rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5887 	if (rc) {
5888 		pqi_free_io_request(io_request);
5889 		return SCSI_MLQUEUE_HOST_BUSY;
5890 	}
5891 
5892 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5893 
5894 	return 0;
5895 }
5896 
pqi_get_hw_queue(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5897 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5898 	struct scsi_cmnd *scmd)
5899 {
5900 	/*
5901 	 * We are setting host_tagset = 1 during init.
5902 	 */
5903 	return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5904 }
5905 
pqi_is_bypass_eligible_request(struct scsi_cmnd * scmd)5906 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5907 {
5908 	if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5909 		return false;
5910 
5911 	return pqi_cmd_priv(scmd)->this_residual == 0;
5912 }
5913 
5914 /*
5915  * This function gets called just before we hand the completed SCSI request
5916  * back to the SML.
5917  */
5918 
pqi_prep_for_scsi_done(struct scsi_cmnd * scmd)5919 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5920 {
5921 	struct pqi_scsi_dev *device;
5922 	struct completion *wait;
5923 
5924 	if (!scmd->device) {
5925 		set_host_byte(scmd, DID_NO_CONNECT);
5926 		return;
5927 	}
5928 
5929 	device = scmd->device->hostdata;
5930 	if (!device) {
5931 		set_host_byte(scmd, DID_NO_CONNECT);
5932 		return;
5933 	}
5934 
5935 	atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5936 
5937 	wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5938 	if (wait != PQI_NO_COMPLETION)
5939 		complete(wait);
5940 }
5941 
pqi_is_parity_write_stream(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5942 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5943 	struct scsi_cmnd *scmd)
5944 {
5945 	u32 oldest_jiffies;
5946 	u8 lru_index;
5947 	int i;
5948 	int rc;
5949 	struct pqi_scsi_dev *device;
5950 	struct pqi_stream_data *pqi_stream_data;
5951 	struct pqi_scsi_dev_raid_map_data rmd = { 0 };
5952 
5953 	if (!ctrl_info->enable_stream_detection)
5954 		return false;
5955 
5956 	rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5957 	if (rc)
5958 		return false;
5959 
5960 	/* Check writes only. */
5961 	if (!rmd.is_write)
5962 		return false;
5963 
5964 	device = scmd->device->hostdata;
5965 
5966 	/* Check for RAID 5/6 streams. */
5967 	if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5968 		return false;
5969 
5970 	/*
5971 	 * If controller does not support AIO RAID{5,6} writes, need to send
5972 	 * requests down non-AIO path.
5973 	 */
5974 	if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5975 		(device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5976 		return true;
5977 
5978 	lru_index = 0;
5979 	oldest_jiffies = INT_MAX;
5980 	for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5981 		pqi_stream_data = &device->stream_data[i];
5982 		/*
5983 		 * Check for adjacent request or request is within
5984 		 * the previous request.
5985 		 */
5986 		if ((pqi_stream_data->next_lba &&
5987 			rmd.first_block >= pqi_stream_data->next_lba) &&
5988 			rmd.first_block <= pqi_stream_data->next_lba +
5989 				rmd.block_cnt) {
5990 			pqi_stream_data->next_lba = rmd.first_block +
5991 				rmd.block_cnt;
5992 			pqi_stream_data->last_accessed = jiffies;
5993 			per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
5994 			return true;
5995 		}
5996 
5997 		/* unused entry */
5998 		if (pqi_stream_data->last_accessed == 0) {
5999 			lru_index = i;
6000 			break;
6001 		}
6002 
6003 		/* Find entry with oldest last accessed time. */
6004 		if (pqi_stream_data->last_accessed <= oldest_jiffies) {
6005 			oldest_jiffies = pqi_stream_data->last_accessed;
6006 			lru_index = i;
6007 		}
6008 	}
6009 
6010 	/* Set LRU entry. */
6011 	pqi_stream_data = &device->stream_data[lru_index];
6012 	pqi_stream_data->last_accessed = jiffies;
6013 	pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
6014 
6015 	return false;
6016 }
6017 
pqi_scsi_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)6018 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6019 {
6020 	int rc;
6021 	struct pqi_ctrl_info *ctrl_info;
6022 	struct pqi_scsi_dev *device;
6023 	u16 hw_queue;
6024 	struct pqi_queue_group *queue_group;
6025 	bool raid_bypassed;
6026 	u8 lun;
6027 
6028 	scmd->host_scribble = PQI_NO_COMPLETION;
6029 
6030 	device = scmd->device->hostdata;
6031 
6032 	if (!device) {
6033 		set_host_byte(scmd, DID_NO_CONNECT);
6034 		pqi_scsi_done(scmd);
6035 		return 0;
6036 	}
6037 
6038 	lun = (u8)scmd->device->lun;
6039 
6040 	atomic_inc(&device->scsi_cmds_outstanding[lun]);
6041 
6042 	ctrl_info = shost_to_hba(shost);
6043 
6044 	if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
6045 		set_host_byte(scmd, DID_NO_CONNECT);
6046 		pqi_scsi_done(scmd);
6047 		return 0;
6048 	}
6049 
6050 	if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6051 		rc = SCSI_MLQUEUE_HOST_BUSY;
6052 		goto out;
6053 	}
6054 
6055 	/*
6056 	 * This is necessary because the SML doesn't zero out this field during
6057 	 * error recovery.
6058 	 */
6059 	scmd->result = 0;
6060 
6061 	hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6062 	queue_group = &ctrl_info->queue_groups[hw_queue];
6063 
6064 	if (pqi_is_logical_device(device)) {
6065 		raid_bypassed = false;
6066 		if (device->raid_bypass_enabled &&
6067 			pqi_is_bypass_eligible_request(scmd) &&
6068 			!pqi_is_parity_write_stream(ctrl_info, scmd)) {
6069 			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6070 			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6071 				raid_bypassed = true;
6072 				per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
6073 			}
6074 		}
6075 		if (!raid_bypassed)
6076 			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6077 	} else {
6078 		if (device->aio_enabled)
6079 			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6080 		else
6081 			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6082 	}
6083 
6084 out:
6085 	if (rc) {
6086 		scmd->host_scribble = NULL;
6087 		atomic_dec(&device->scsi_cmds_outstanding[lun]);
6088 	}
6089 
6090 	return rc;
6091 }
6092 
pqi_queued_io_count(struct pqi_ctrl_info * ctrl_info)6093 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6094 {
6095 	unsigned int i;
6096 	unsigned int path;
6097 	unsigned long flags;
6098 	unsigned int queued_io_count;
6099 	struct pqi_queue_group *queue_group;
6100 	struct pqi_io_request *io_request;
6101 
6102 	queued_io_count = 0;
6103 
6104 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6105 		queue_group = &ctrl_info->queue_groups[i];
6106 		for (path = 0; path < 2; path++) {
6107 			spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6108 			list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6109 				queued_io_count++;
6110 			spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6111 		}
6112 	}
6113 
6114 	return queued_io_count;
6115 }
6116 
pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info * ctrl_info)6117 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6118 {
6119 	unsigned int i;
6120 	unsigned int path;
6121 	unsigned int nonempty_inbound_queue_count;
6122 	struct pqi_queue_group *queue_group;
6123 	pqi_index_t iq_pi;
6124 	pqi_index_t iq_ci;
6125 
6126 	nonempty_inbound_queue_count = 0;
6127 
6128 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6129 		queue_group = &ctrl_info->queue_groups[i];
6130 		for (path = 0; path < 2; path++) {
6131 			iq_pi = queue_group->iq_pi_copy[path];
6132 			iq_ci = readl(queue_group->iq_ci[path]);
6133 			if (iq_ci != iq_pi)
6134 				nonempty_inbound_queue_count++;
6135 		}
6136 	}
6137 
6138 	return nonempty_inbound_queue_count;
6139 }
6140 
6141 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS	10
6142 
pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info * ctrl_info)6143 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6144 {
6145 	unsigned long start_jiffies;
6146 	unsigned long warning_timeout;
6147 	unsigned int queued_io_count;
6148 	unsigned int nonempty_inbound_queue_count;
6149 	bool displayed_warning;
6150 
6151 	displayed_warning = false;
6152 	start_jiffies = jiffies;
6153 	warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6154 
6155 	while (1) {
6156 		queued_io_count = pqi_queued_io_count(ctrl_info);
6157 		nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6158 		if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6159 			break;
6160 		pqi_check_ctrl_health(ctrl_info);
6161 		if (pqi_ctrl_offline(ctrl_info))
6162 			return -ENXIO;
6163 		if (time_after(jiffies, warning_timeout)) {
6164 			dev_warn(&ctrl_info->pci_dev->dev,
6165 				"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6166 				jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6167 			displayed_warning = true;
6168 			warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6169 		}
6170 		usleep_range(1000, 2000);
6171 	}
6172 
6173 	if (displayed_warning)
6174 		dev_warn(&ctrl_info->pci_dev->dev,
6175 			"queued I/O drained after waiting for %u seconds\n",
6176 			jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6177 
6178 	return 0;
6179 }
6180 
pqi_fail_io_queued_for_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6181 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6182 	struct pqi_scsi_dev *device, u8 lun)
6183 {
6184 	unsigned int i;
6185 	unsigned int path;
6186 	struct pqi_queue_group *queue_group;
6187 	unsigned long flags;
6188 	struct pqi_io_request *io_request;
6189 	struct pqi_io_request *next;
6190 	struct scsi_cmnd *scmd;
6191 	struct pqi_scsi_dev *scsi_device;
6192 
6193 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6194 		queue_group = &ctrl_info->queue_groups[i];
6195 
6196 		for (path = 0; path < 2; path++) {
6197 			spin_lock_irqsave(
6198 				&queue_group->submit_lock[path], flags);
6199 
6200 			list_for_each_entry_safe(io_request, next,
6201 				&queue_group->request_list[path],
6202 				request_list_entry) {
6203 
6204 				scmd = io_request->scmd;
6205 				if (!scmd)
6206 					continue;
6207 
6208 				scsi_device = scmd->device->hostdata;
6209 
6210 				list_del(&io_request->request_list_entry);
6211 				if (scsi_device == device && (u8)scmd->device->lun == lun)
6212 					set_host_byte(scmd, DID_RESET);
6213 				else
6214 					set_host_byte(scmd, DID_REQUEUE);
6215 				pqi_free_io_request(io_request);
6216 				scsi_dma_unmap(scmd);
6217 				pqi_scsi_done(scmd);
6218 			}
6219 
6220 			spin_unlock_irqrestore(
6221 				&queue_group->submit_lock[path], flags);
6222 		}
6223 	}
6224 }
6225 
6226 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS	10
6227 
pqi_device_wait_for_pending_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,unsigned long timeout_msecs)6228 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6229 	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6230 {
6231 	int cmds_outstanding;
6232 	unsigned long start_jiffies;
6233 	unsigned long warning_timeout;
6234 	unsigned long msecs_waiting;
6235 
6236 	start_jiffies = jiffies;
6237 	warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6238 
6239 	while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6240 		if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6241 			pqi_check_ctrl_health(ctrl_info);
6242 			if (pqi_ctrl_offline(ctrl_info))
6243 				return -ENXIO;
6244 		}
6245 		msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6246 		if (msecs_waiting >= timeout_msecs) {
6247 			dev_err(&ctrl_info->pci_dev->dev,
6248 				"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6249 				ctrl_info->scsi_host->host_no, device->bus, device->target,
6250 				lun, msecs_waiting / 1000, cmds_outstanding);
6251 			return -ETIMEDOUT;
6252 		}
6253 		if (time_after(jiffies, warning_timeout)) {
6254 			dev_warn(&ctrl_info->pci_dev->dev,
6255 				"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6256 				ctrl_info->scsi_host->host_no, device->bus, device->target,
6257 				lun, msecs_waiting / 1000, cmds_outstanding);
6258 			warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6259 		}
6260 		usleep_range(1000, 2000);
6261 	}
6262 
6263 	return 0;
6264 }
6265 
pqi_lun_reset_complete(struct pqi_io_request * io_request,void * context)6266 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6267 	void *context)
6268 {
6269 	struct completion *waiting = context;
6270 
6271 	complete(waiting);
6272 }
6273 
6274 #define PQI_LUN_RESET_POLL_COMPLETION_SECS	10
6275 
pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct completion * wait)6276 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6277 	struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6278 {
6279 	int rc;
6280 	unsigned int wait_secs;
6281 	int cmds_outstanding;
6282 
6283 	wait_secs = 0;
6284 
6285 	while (1) {
6286 		if (wait_for_completion_io_timeout(wait,
6287 			PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6288 			rc = 0;
6289 			break;
6290 		}
6291 
6292 		pqi_check_ctrl_health(ctrl_info);
6293 		if (pqi_ctrl_offline(ctrl_info)) {
6294 			rc = -ENXIO;
6295 			break;
6296 		}
6297 
6298 		wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6299 		cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6300 		dev_warn(&ctrl_info->pci_dev->dev,
6301 			"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6302 			ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6303 	}
6304 
6305 	return rc;
6306 }
6307 
6308 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS	30
6309 
pqi_lun_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6310 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6311 {
6312 	int rc;
6313 	struct pqi_io_request *io_request;
6314 	DECLARE_COMPLETION_ONSTACK(wait);
6315 	struct pqi_task_management_request *request;
6316 
6317 	io_request = pqi_alloc_io_request(ctrl_info, NULL);
6318 	io_request->io_complete_callback = pqi_lun_reset_complete;
6319 	io_request->context = &wait;
6320 
6321 	request = io_request->iu;
6322 	memset(request, 0, sizeof(*request));
6323 
6324 	request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6325 	put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6326 		&request->header.iu_length);
6327 	put_unaligned_le16(io_request->index, &request->request_id);
6328 	memcpy(request->lun_number, device->scsi3addr,
6329 		sizeof(request->lun_number));
6330 	if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6331 		request->ml_device_lun_number = lun;
6332 	request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6333 	if (ctrl_info->tmf_iu_timeout_supported)
6334 		put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6335 
6336 	pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6337 		io_request);
6338 
6339 	rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6340 	if (rc == 0)
6341 		rc = io_request->status;
6342 
6343 	pqi_free_io_request(io_request);
6344 
6345 	return rc;
6346 }
6347 
6348 #define PQI_LUN_RESET_RETRIES				3
6349 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS		(10 * 1000)
6350 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS		(10 * 60 * 1000)
6351 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS	(2 * 60 * 1000)
6352 
pqi_lun_reset_with_retries(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6353 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6354 {
6355 	int reset_rc;
6356 	int wait_rc;
6357 	unsigned int retries;
6358 	unsigned long timeout_msecs;
6359 
6360 	for (retries = 0;;) {
6361 		reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6362 		if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6363 			break;
6364 		msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6365 	}
6366 
6367 	timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6368 		PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6369 
6370 	wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6371 	if (wait_rc && reset_rc == 0)
6372 		reset_rc = wait_rc;
6373 
6374 	return reset_rc == 0 ? SUCCESS : FAILED;
6375 }
6376 
pqi_device_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6377 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6378 {
6379 	int rc;
6380 
6381 	pqi_ctrl_block_requests(ctrl_info);
6382 	pqi_ctrl_wait_until_quiesced(ctrl_info);
6383 	pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6384 	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6385 	pqi_device_reset_start(device, lun);
6386 	pqi_ctrl_unblock_requests(ctrl_info);
6387 	if (rc)
6388 		rc = FAILED;
6389 	else
6390 		rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6391 	pqi_device_reset_done(device, lun);
6392 
6393 	return rc;
6394 }
6395 
pqi_device_reset_handler(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct scsi_cmnd * scmd,u8 scsi_opcode)6396 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6397 {
6398 	int rc;
6399 
6400 	mutex_lock(&ctrl_info->lun_reset_mutex);
6401 
6402 	dev_err(&ctrl_info->pci_dev->dev,
6403 		"resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6404 		ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6405 
6406 	pqi_check_ctrl_health(ctrl_info);
6407 	if (pqi_ctrl_offline(ctrl_info))
6408 		rc = FAILED;
6409 	else
6410 		rc = pqi_device_reset(ctrl_info, device, lun);
6411 
6412 	dev_err(&ctrl_info->pci_dev->dev,
6413 		"reset of scsi %d:%d:%d:%u: %s\n",
6414 		ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6415 		rc == SUCCESS ? "SUCCESS" : "FAILED");
6416 
6417 	mutex_unlock(&ctrl_info->lun_reset_mutex);
6418 
6419 	return rc;
6420 }
6421 
pqi_eh_device_reset_handler(struct scsi_cmnd * scmd)6422 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6423 {
6424 	struct Scsi_Host *shost;
6425 	struct pqi_ctrl_info *ctrl_info;
6426 	struct pqi_scsi_dev *device;
6427 	u8 scsi_opcode;
6428 
6429 	shost = scmd->device->host;
6430 	ctrl_info = shost_to_hba(shost);
6431 	device = scmd->device->hostdata;
6432 	scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6433 
6434 	return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6435 }
6436 
pqi_tmf_worker(struct work_struct * work)6437 static void pqi_tmf_worker(struct work_struct *work)
6438 {
6439 	struct pqi_tmf_work *tmf_work;
6440 	struct scsi_cmnd *scmd;
6441 
6442 	tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6443 	scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6444 
6445 	pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6446 }
6447 
pqi_eh_abort_handler(struct scsi_cmnd * scmd)6448 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6449 {
6450 	struct Scsi_Host *shost;
6451 	struct pqi_ctrl_info *ctrl_info;
6452 	struct pqi_scsi_dev *device;
6453 	struct pqi_tmf_work *tmf_work;
6454 	DECLARE_COMPLETION_ONSTACK(wait);
6455 
6456 	shost = scmd->device->host;
6457 	ctrl_info = shost_to_hba(shost);
6458 	device = scmd->device->hostdata;
6459 
6460 	dev_err(&ctrl_info->pci_dev->dev,
6461 		"attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6462 		shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6463 
6464 	if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6465 		dev_err(&ctrl_info->pci_dev->dev,
6466 			"scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6467 			shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6468 		scmd->result = DID_RESET << 16;
6469 		goto out;
6470 	}
6471 
6472 	tmf_work = &device->tmf_work[scmd->device->lun];
6473 
6474 	if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6475 		tmf_work->ctrl_info = ctrl_info;
6476 		tmf_work->device = device;
6477 		tmf_work->lun = (u8)scmd->device->lun;
6478 		tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6479 		schedule_work(&tmf_work->work_struct);
6480 	}
6481 
6482 	wait_for_completion(&wait);
6483 
6484 	dev_err(&ctrl_info->pci_dev->dev,
6485 		"TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6486 		shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6487 
6488 out:
6489 
6490 	return SUCCESS;
6491 }
6492 
pqi_sdev_init(struct scsi_device * sdev)6493 static int pqi_sdev_init(struct scsi_device *sdev)
6494 {
6495 	struct pqi_scsi_dev *device;
6496 	unsigned long flags;
6497 	struct pqi_ctrl_info *ctrl_info;
6498 	struct scsi_target *starget;
6499 	struct sas_rphy *rphy;
6500 
6501 	ctrl_info = shost_to_hba(sdev->host);
6502 
6503 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6504 
6505 	if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6506 		starget = scsi_target(sdev);
6507 		rphy = target_to_rphy(starget);
6508 		device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6509 		if (device) {
6510 			if (device->target_lun_valid) {
6511 				device->ignore_device = true;
6512 			} else {
6513 				device->target = sdev_id(sdev);
6514 				device->lun = sdev->lun;
6515 				device->target_lun_valid = true;
6516 			}
6517 		}
6518 	} else {
6519 		device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6520 			sdev_id(sdev), sdev->lun);
6521 	}
6522 
6523 	if (device) {
6524 		sdev->hostdata = device;
6525 		device->sdev = sdev;
6526 		if (device->queue_depth) {
6527 			device->advertised_queue_depth = device->queue_depth;
6528 			scsi_change_queue_depth(sdev,
6529 				device->advertised_queue_depth);
6530 		}
6531 		if (pqi_is_logical_device(device)) {
6532 			pqi_disable_write_same(sdev);
6533 		} else {
6534 			sdev->allow_restart = 1;
6535 			if (device->device_type == SA_DEVICE_TYPE_NVME)
6536 				pqi_disable_write_same(sdev);
6537 		}
6538 	}
6539 
6540 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6541 
6542 	return 0;
6543 }
6544 
pqi_map_queues(struct Scsi_Host * shost)6545 static void pqi_map_queues(struct Scsi_Host *shost)
6546 {
6547 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6548 
6549 	if (!ctrl_info->disable_managed_interrupts)
6550 		blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6551 				       &ctrl_info->pci_dev->dev, 0);
6552 	else
6553 		blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
6554 }
6555 
pqi_is_tape_changer_device(struct pqi_scsi_dev * device)6556 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6557 {
6558 	return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6559 }
6560 
pqi_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)6561 static int pqi_sdev_configure(struct scsi_device *sdev,
6562 			      struct queue_limits *lim)
6563 {
6564 	int rc = 0;
6565 	struct pqi_scsi_dev *device;
6566 
6567 	device = sdev->hostdata;
6568 	device->devtype = sdev->type;
6569 
6570 	if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6571 		rc = -ENXIO;
6572 		device->ignore_device = false;
6573 	}
6574 
6575 	return rc;
6576 }
6577 
pqi_sdev_destroy(struct scsi_device * sdev)6578 static void pqi_sdev_destroy(struct scsi_device *sdev)
6579 {
6580 	struct pqi_ctrl_info *ctrl_info;
6581 	struct pqi_scsi_dev *device;
6582 	int mutex_acquired;
6583 	unsigned long flags;
6584 
6585 	ctrl_info = shost_to_hba(sdev->host);
6586 
6587 	mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6588 	if (!mutex_acquired)
6589 		return;
6590 
6591 	device = sdev->hostdata;
6592 	if (!device) {
6593 		mutex_unlock(&ctrl_info->scan_mutex);
6594 		return;
6595 	}
6596 
6597 	device->lun_count--;
6598 	if (device->lun_count > 0) {
6599 		mutex_unlock(&ctrl_info->scan_mutex);
6600 		return;
6601 	}
6602 
6603 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6604 	list_del(&device->scsi_device_list_entry);
6605 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6606 
6607 	mutex_unlock(&ctrl_info->scan_mutex);
6608 
6609 	pqi_dev_info(ctrl_info, "removed", device);
6610 	pqi_free_device(device);
6611 }
6612 
pqi_getpciinfo_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6613 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6614 {
6615 	struct pci_dev *pci_dev;
6616 	u32 subsystem_vendor;
6617 	u32 subsystem_device;
6618 	cciss_pci_info_struct pci_info;
6619 
6620 	if (!arg)
6621 		return -EINVAL;
6622 
6623 	pci_dev = ctrl_info->pci_dev;
6624 
6625 	pci_info.domain = pci_domain_nr(pci_dev->bus);
6626 	pci_info.bus = pci_dev->bus->number;
6627 	pci_info.dev_fn = pci_dev->devfn;
6628 	subsystem_vendor = pci_dev->subsystem_vendor;
6629 	subsystem_device = pci_dev->subsystem_device;
6630 	pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6631 
6632 	if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6633 		return -EFAULT;
6634 
6635 	return 0;
6636 }
6637 
pqi_getdrivver_ioctl(void __user * arg)6638 static int pqi_getdrivver_ioctl(void __user *arg)
6639 {
6640 	u32 version;
6641 
6642 	if (!arg)
6643 		return -EINVAL;
6644 
6645 	version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6646 		(DRIVER_RELEASE << 16) | DRIVER_REVISION;
6647 
6648 	if (copy_to_user(arg, &version, sizeof(version)))
6649 		return -EFAULT;
6650 
6651 	return 0;
6652 }
6653 
6654 struct ciss_error_info {
6655 	u8	scsi_status;
6656 	int	command_status;
6657 	size_t	sense_data_length;
6658 };
6659 
pqi_error_info_to_ciss(struct pqi_raid_error_info * pqi_error_info,struct ciss_error_info * ciss_error_info)6660 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6661 	struct ciss_error_info *ciss_error_info)
6662 {
6663 	int ciss_cmd_status;
6664 	size_t sense_data_length;
6665 
6666 	switch (pqi_error_info->data_out_result) {
6667 	case PQI_DATA_IN_OUT_GOOD:
6668 		ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6669 		break;
6670 	case PQI_DATA_IN_OUT_UNDERFLOW:
6671 		ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6672 		break;
6673 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6674 		ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6675 		break;
6676 	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6677 	case PQI_DATA_IN_OUT_BUFFER_ERROR:
6678 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6679 	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6680 	case PQI_DATA_IN_OUT_ERROR:
6681 		ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6682 		break;
6683 	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6684 	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6685 	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6686 	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6687 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6688 	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6689 	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6690 	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6691 	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6692 	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6693 		ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6694 		break;
6695 	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6696 		ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6697 		break;
6698 	case PQI_DATA_IN_OUT_ABORTED:
6699 		ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6700 		break;
6701 	case PQI_DATA_IN_OUT_TIMEOUT:
6702 		ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6703 		break;
6704 	default:
6705 		ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6706 		break;
6707 	}
6708 
6709 	sense_data_length =
6710 		get_unaligned_le16(&pqi_error_info->sense_data_length);
6711 	if (sense_data_length == 0)
6712 		sense_data_length =
6713 		get_unaligned_le16(&pqi_error_info->response_data_length);
6714 	if (sense_data_length)
6715 		if (sense_data_length > sizeof(pqi_error_info->data))
6716 			sense_data_length = sizeof(pqi_error_info->data);
6717 
6718 	ciss_error_info->scsi_status = pqi_error_info->status;
6719 	ciss_error_info->command_status = ciss_cmd_status;
6720 	ciss_error_info->sense_data_length = sense_data_length;
6721 }
6722 
pqi_passthru_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6723 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6724 {
6725 	int rc;
6726 	char *kernel_buffer = NULL;
6727 	u16 iu_length;
6728 	size_t sense_data_length;
6729 	IOCTL_Command_struct iocommand;
6730 	struct pqi_raid_path_request request;
6731 	struct pqi_raid_error_info pqi_error_info;
6732 	struct ciss_error_info ciss_error_info;
6733 
6734 	if (pqi_ctrl_offline(ctrl_info))
6735 		return -ENXIO;
6736 	if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6737 		return -EBUSY;
6738 	if (!arg)
6739 		return -EINVAL;
6740 	if (!capable(CAP_SYS_RAWIO))
6741 		return -EPERM;
6742 	if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6743 		return -EFAULT;
6744 	if (iocommand.buf_size < 1 &&
6745 		iocommand.Request.Type.Direction != XFER_NONE)
6746 		return -EINVAL;
6747 	if (iocommand.Request.CDBLen > sizeof(request.cdb))
6748 		return -EINVAL;
6749 	if (iocommand.Request.Type.Type != TYPE_CMD)
6750 		return -EINVAL;
6751 
6752 	switch (iocommand.Request.Type.Direction) {
6753 	case XFER_NONE:
6754 	case XFER_WRITE:
6755 	case XFER_READ:
6756 	case XFER_READ | XFER_WRITE:
6757 		break;
6758 	default:
6759 		return -EINVAL;
6760 	}
6761 
6762 	if (iocommand.buf_size > 0) {
6763 		kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6764 		if (!kernel_buffer)
6765 			return -ENOMEM;
6766 		if (iocommand.Request.Type.Direction & XFER_WRITE) {
6767 			if (copy_from_user(kernel_buffer, iocommand.buf,
6768 				iocommand.buf_size)) {
6769 				rc = -EFAULT;
6770 				goto out;
6771 			}
6772 		} else {
6773 			memset(kernel_buffer, 0, iocommand.buf_size);
6774 		}
6775 	}
6776 
6777 	memset(&request, 0, sizeof(request));
6778 
6779 	request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6780 	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6781 		PQI_REQUEST_HEADER_LENGTH;
6782 	memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6783 		sizeof(request.lun_number));
6784 	memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6785 	request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6786 
6787 	switch (iocommand.Request.Type.Direction) {
6788 	case XFER_NONE:
6789 		request.data_direction = SOP_NO_DIRECTION_FLAG;
6790 		break;
6791 	case XFER_WRITE:
6792 		request.data_direction = SOP_WRITE_FLAG;
6793 		break;
6794 	case XFER_READ:
6795 		request.data_direction = SOP_READ_FLAG;
6796 		break;
6797 	case XFER_READ | XFER_WRITE:
6798 		request.data_direction = SOP_BIDIRECTIONAL;
6799 		break;
6800 	}
6801 
6802 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6803 
6804 	if (iocommand.buf_size > 0) {
6805 		put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6806 
6807 		rc = pqi_map_single(ctrl_info->pci_dev,
6808 			&request.sg_descriptors[0], kernel_buffer,
6809 			iocommand.buf_size, DMA_BIDIRECTIONAL);
6810 		if (rc)
6811 			goto out;
6812 
6813 		iu_length += sizeof(request.sg_descriptors[0]);
6814 	}
6815 
6816 	put_unaligned_le16(iu_length, &request.header.iu_length);
6817 
6818 	if (ctrl_info->raid_iu_timeout_supported)
6819 		put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6820 
6821 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6822 		PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6823 
6824 	if (iocommand.buf_size > 0)
6825 		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6826 			DMA_BIDIRECTIONAL);
6827 
6828 	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6829 
6830 	if (rc == 0) {
6831 		pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6832 		iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6833 		iocommand.error_info.CommandStatus =
6834 			ciss_error_info.command_status;
6835 		sense_data_length = ciss_error_info.sense_data_length;
6836 		if (sense_data_length) {
6837 			if (sense_data_length >
6838 				sizeof(iocommand.error_info.SenseInfo))
6839 				sense_data_length =
6840 					sizeof(iocommand.error_info.SenseInfo);
6841 			memcpy(iocommand.error_info.SenseInfo,
6842 				pqi_error_info.data, sense_data_length);
6843 			iocommand.error_info.SenseLen = sense_data_length;
6844 		}
6845 	}
6846 
6847 	if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6848 		rc = -EFAULT;
6849 		goto out;
6850 	}
6851 
6852 	if (rc == 0 && iocommand.buf_size > 0 &&
6853 		(iocommand.Request.Type.Direction & XFER_READ)) {
6854 		if (copy_to_user(iocommand.buf, kernel_buffer,
6855 			iocommand.buf_size)) {
6856 			rc = -EFAULT;
6857 		}
6858 	}
6859 
6860 out:
6861 	kfree(kernel_buffer);
6862 
6863 	return rc;
6864 }
6865 
pqi_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)6866 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6867 		     void __user *arg)
6868 {
6869 	int rc;
6870 	struct pqi_ctrl_info *ctrl_info;
6871 
6872 	ctrl_info = shost_to_hba(sdev->host);
6873 
6874 	switch (cmd) {
6875 	case CCISS_DEREGDISK:
6876 	case CCISS_REGNEWDISK:
6877 	case CCISS_REGNEWD:
6878 		rc = pqi_scan_scsi_devices(ctrl_info);
6879 		break;
6880 	case CCISS_GETPCIINFO:
6881 		rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6882 		break;
6883 	case CCISS_GETDRIVVER:
6884 		rc = pqi_getdrivver_ioctl(arg);
6885 		break;
6886 	case CCISS_PASSTHRU:
6887 		rc = pqi_passthru_ioctl(ctrl_info, arg);
6888 		break;
6889 	default:
6890 		rc = -EINVAL;
6891 		break;
6892 	}
6893 
6894 	return rc;
6895 }
6896 
pqi_firmware_version_show(struct device * dev,struct device_attribute * attr,char * buffer)6897 static ssize_t pqi_firmware_version_show(struct device *dev,
6898 	struct device_attribute *attr, char *buffer)
6899 {
6900 	struct Scsi_Host *shost;
6901 	struct pqi_ctrl_info *ctrl_info;
6902 
6903 	shost = class_to_shost(dev);
6904 	ctrl_info = shost_to_hba(shost);
6905 
6906 	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6907 }
6908 
pqi_serial_number_show(struct device * dev,struct device_attribute * attr,char * buffer)6909 static ssize_t pqi_serial_number_show(struct device *dev,
6910 	struct device_attribute *attr, char *buffer)
6911 {
6912 	struct Scsi_Host *shost;
6913 	struct pqi_ctrl_info *ctrl_info;
6914 
6915 	shost = class_to_shost(dev);
6916 	ctrl_info = shost_to_hba(shost);
6917 
6918 	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6919 }
6920 
pqi_model_show(struct device * dev,struct device_attribute * attr,char * buffer)6921 static ssize_t pqi_model_show(struct device *dev,
6922 	struct device_attribute *attr, char *buffer)
6923 {
6924 	struct Scsi_Host *shost;
6925 	struct pqi_ctrl_info *ctrl_info;
6926 
6927 	shost = class_to_shost(dev);
6928 	ctrl_info = shost_to_hba(shost);
6929 
6930 	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6931 }
6932 
pqi_vendor_show(struct device * dev,struct device_attribute * attr,char * buffer)6933 static ssize_t pqi_vendor_show(struct device *dev,
6934 	struct device_attribute *attr, char *buffer)
6935 {
6936 	struct Scsi_Host *shost;
6937 	struct pqi_ctrl_info *ctrl_info;
6938 
6939 	shost = class_to_shost(dev);
6940 	ctrl_info = shost_to_hba(shost);
6941 
6942 	return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6943 }
6944 
pqi_host_rescan_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6945 static ssize_t pqi_host_rescan_store(struct device *dev,
6946 	struct device_attribute *attr, const char *buffer, size_t count)
6947 {
6948 	struct Scsi_Host *shost = class_to_shost(dev);
6949 
6950 	pqi_scan_start(shost);
6951 
6952 	return count;
6953 }
6954 
pqi_lockup_action_show(struct device * dev,struct device_attribute * attr,char * buffer)6955 static ssize_t pqi_lockup_action_show(struct device *dev,
6956 	struct device_attribute *attr, char *buffer)
6957 {
6958 	int count = 0;
6959 	unsigned int i;
6960 
6961 	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6962 		if (pqi_lockup_actions[i].action == pqi_lockup_action)
6963 			count += scnprintf(buffer + count, PAGE_SIZE - count,
6964 				"[%s] ", pqi_lockup_actions[i].name);
6965 		else
6966 			count += scnprintf(buffer + count, PAGE_SIZE - count,
6967 				"%s ", pqi_lockup_actions[i].name);
6968 	}
6969 
6970 	count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6971 
6972 	return count;
6973 }
6974 
pqi_lockup_action_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6975 static ssize_t pqi_lockup_action_store(struct device *dev,
6976 	struct device_attribute *attr, const char *buffer, size_t count)
6977 {
6978 	unsigned int i;
6979 	char *action_name;
6980 	char action_name_buffer[32];
6981 
6982 	strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6983 	action_name = strstrip(action_name_buffer);
6984 
6985 	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6986 		if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6987 			pqi_lockup_action = pqi_lockup_actions[i].action;
6988 			return count;
6989 		}
6990 	}
6991 
6992 	return -EINVAL;
6993 }
6994 
pqi_host_enable_stream_detection_show(struct device * dev,struct device_attribute * attr,char * buffer)6995 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6996 	struct device_attribute *attr, char *buffer)
6997 {
6998 	struct Scsi_Host *shost = class_to_shost(dev);
6999 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7000 
7001 	return scnprintf(buffer, 10, "%x\n",
7002 			ctrl_info->enable_stream_detection);
7003 }
7004 
pqi_host_enable_stream_detection_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7005 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
7006 	struct device_attribute *attr, const char *buffer, size_t count)
7007 {
7008 	struct Scsi_Host *shost = class_to_shost(dev);
7009 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7010 	u8 set_stream_detection = 0;
7011 
7012 	if (kstrtou8(buffer, 0, &set_stream_detection))
7013 		return -EINVAL;
7014 
7015 	if (set_stream_detection > 0)
7016 		set_stream_detection = 1;
7017 
7018 	ctrl_info->enable_stream_detection = set_stream_detection;
7019 
7020 	return count;
7021 }
7022 
pqi_host_enable_r5_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7023 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
7024 	struct device_attribute *attr, char *buffer)
7025 {
7026 	struct Scsi_Host *shost = class_to_shost(dev);
7027 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7028 
7029 	return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
7030 }
7031 
pqi_host_enable_r5_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7032 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
7033 	struct device_attribute *attr, const char *buffer, size_t count)
7034 {
7035 	struct Scsi_Host *shost = class_to_shost(dev);
7036 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7037 	u8 set_r5_writes = 0;
7038 
7039 	if (kstrtou8(buffer, 0, &set_r5_writes))
7040 		return -EINVAL;
7041 
7042 	if (set_r5_writes > 0)
7043 		set_r5_writes = 1;
7044 
7045 	ctrl_info->enable_r5_writes = set_r5_writes;
7046 
7047 	return count;
7048 }
7049 
pqi_host_enable_r6_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7050 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7051 	struct device_attribute *attr, char *buffer)
7052 {
7053 	struct Scsi_Host *shost = class_to_shost(dev);
7054 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7055 
7056 	return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7057 }
7058 
pqi_host_enable_r6_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7059 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7060 	struct device_attribute *attr, const char *buffer, size_t count)
7061 {
7062 	struct Scsi_Host *shost = class_to_shost(dev);
7063 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7064 	u8 set_r6_writes = 0;
7065 
7066 	if (kstrtou8(buffer, 0, &set_r6_writes))
7067 		return -EINVAL;
7068 
7069 	if (set_r6_writes > 0)
7070 		set_r6_writes = 1;
7071 
7072 	ctrl_info->enable_r6_writes = set_r6_writes;
7073 
7074 	return count;
7075 }
7076 
7077 static DEVICE_STRING_ATTR_RO(driver_version, 0444,
7078 	DRIVER_VERSION BUILD_TIMESTAMP);
7079 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7080 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7081 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7082 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7083 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7084 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7085 	pqi_lockup_action_store);
7086 static DEVICE_ATTR(enable_stream_detection, 0644,
7087 	pqi_host_enable_stream_detection_show,
7088 	pqi_host_enable_stream_detection_store);
7089 static DEVICE_ATTR(enable_r5_writes, 0644,
7090 	pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7091 static DEVICE_ATTR(enable_r6_writes, 0644,
7092 	pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7093 
7094 static struct attribute *pqi_shost_attrs[] = {
7095 	&dev_attr_driver_version.attr.attr,
7096 	&dev_attr_firmware_version.attr,
7097 	&dev_attr_model.attr,
7098 	&dev_attr_serial_number.attr,
7099 	&dev_attr_vendor.attr,
7100 	&dev_attr_rescan.attr,
7101 	&dev_attr_lockup_action.attr,
7102 	&dev_attr_enable_stream_detection.attr,
7103 	&dev_attr_enable_r5_writes.attr,
7104 	&dev_attr_enable_r6_writes.attr,
7105 	NULL
7106 };
7107 
7108 ATTRIBUTE_GROUPS(pqi_shost);
7109 
pqi_unique_id_show(struct device * dev,struct device_attribute * attr,char * buffer)7110 static ssize_t pqi_unique_id_show(struct device *dev,
7111 	struct device_attribute *attr, char *buffer)
7112 {
7113 	struct pqi_ctrl_info *ctrl_info;
7114 	struct scsi_device *sdev;
7115 	struct pqi_scsi_dev *device;
7116 	unsigned long flags;
7117 	u8 unique_id[16];
7118 
7119 	sdev = to_scsi_device(dev);
7120 	ctrl_info = shost_to_hba(sdev->host);
7121 
7122 	if (pqi_ctrl_offline(ctrl_info))
7123 		return -ENODEV;
7124 
7125 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7126 
7127 	device = sdev->hostdata;
7128 	if (!device) {
7129 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7130 		return -ENODEV;
7131 	}
7132 
7133 	if (device->is_physical_device)
7134 		memcpy(unique_id, device->wwid, sizeof(device->wwid));
7135 	else
7136 		memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7137 
7138 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7139 
7140 	return scnprintf(buffer, PAGE_SIZE,
7141 		"%02X%02X%02X%02X%02X%02X%02X%02X"
7142 		"%02X%02X%02X%02X%02X%02X%02X%02X\n",
7143 		unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7144 		unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7145 		unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7146 		unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7147 }
7148 
pqi_lunid_show(struct device * dev,struct device_attribute * attr,char * buffer)7149 static ssize_t pqi_lunid_show(struct device *dev,
7150 	struct device_attribute *attr, char *buffer)
7151 {
7152 	struct pqi_ctrl_info *ctrl_info;
7153 	struct scsi_device *sdev;
7154 	struct pqi_scsi_dev *device;
7155 	unsigned long flags;
7156 	u8 lunid[8];
7157 
7158 	sdev = to_scsi_device(dev);
7159 	ctrl_info = shost_to_hba(sdev->host);
7160 
7161 	if (pqi_ctrl_offline(ctrl_info))
7162 		return -ENODEV;
7163 
7164 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7165 
7166 	device = sdev->hostdata;
7167 	if (!device) {
7168 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7169 		return -ENODEV;
7170 	}
7171 
7172 	memcpy(lunid, device->scsi3addr, sizeof(lunid));
7173 
7174 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7175 
7176 	return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7177 }
7178 
7179 #define MAX_PATHS	8
7180 
pqi_path_info_show(struct device * dev,struct device_attribute * attr,char * buf)7181 static ssize_t pqi_path_info_show(struct device *dev,
7182 	struct device_attribute *attr, char *buf)
7183 {
7184 	struct pqi_ctrl_info *ctrl_info;
7185 	struct scsi_device *sdev;
7186 	struct pqi_scsi_dev *device;
7187 	unsigned long flags;
7188 	int i;
7189 	int output_len = 0;
7190 	u8 box;
7191 	u8 bay;
7192 	u8 path_map_index;
7193 	char *active;
7194 	u8 phys_connector[2];
7195 
7196 	sdev = to_scsi_device(dev);
7197 	ctrl_info = shost_to_hba(sdev->host);
7198 
7199 	if (pqi_ctrl_offline(ctrl_info))
7200 		return -ENODEV;
7201 
7202 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7203 
7204 	device = sdev->hostdata;
7205 	if (!device) {
7206 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7207 		return -ENODEV;
7208 	}
7209 
7210 	bay = device->bay;
7211 	for (i = 0; i < MAX_PATHS; i++) {
7212 		path_map_index = 1 << i;
7213 		if (i == device->active_path_index)
7214 			active = "Active";
7215 		else if (device->path_map & path_map_index)
7216 			active = "Inactive";
7217 		else
7218 			continue;
7219 
7220 		output_len += scnprintf(buf + output_len,
7221 					PAGE_SIZE - output_len,
7222 					"[%d:%d:%d:%d] %20.20s ",
7223 					ctrl_info->scsi_host->host_no,
7224 					device->bus, device->target,
7225 					device->lun,
7226 					scsi_device_type(device->devtype));
7227 
7228 		if (device->devtype == TYPE_RAID ||
7229 			pqi_is_logical_device(device))
7230 			goto end_buffer;
7231 
7232 		memcpy(&phys_connector, &device->phys_connector[i],
7233 			sizeof(phys_connector));
7234 		if (phys_connector[0] < '0')
7235 			phys_connector[0] = '0';
7236 		if (phys_connector[1] < '0')
7237 			phys_connector[1] = '0';
7238 
7239 		output_len += scnprintf(buf + output_len,
7240 					PAGE_SIZE - output_len,
7241 					"PORT: %.2s ", phys_connector);
7242 
7243 		box = device->box[i];
7244 		if (box != 0 && box != 0xFF)
7245 			output_len += scnprintf(buf + output_len,
7246 						PAGE_SIZE - output_len,
7247 						"BOX: %hhu ", box);
7248 
7249 		if ((device->devtype == TYPE_DISK ||
7250 			device->devtype == TYPE_ZBC) &&
7251 			pqi_expose_device(device))
7252 			output_len += scnprintf(buf + output_len,
7253 						PAGE_SIZE - output_len,
7254 						"BAY: %hhu ", bay);
7255 
7256 end_buffer:
7257 		output_len += scnprintf(buf + output_len,
7258 					PAGE_SIZE - output_len,
7259 					"%s\n", active);
7260 	}
7261 
7262 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7263 
7264 	return output_len;
7265 }
7266 
pqi_sas_address_show(struct device * dev,struct device_attribute * attr,char * buffer)7267 static ssize_t pqi_sas_address_show(struct device *dev,
7268 	struct device_attribute *attr, char *buffer)
7269 {
7270 	struct pqi_ctrl_info *ctrl_info;
7271 	struct scsi_device *sdev;
7272 	struct pqi_scsi_dev *device;
7273 	unsigned long flags;
7274 	u64 sas_address;
7275 
7276 	sdev = to_scsi_device(dev);
7277 	ctrl_info = shost_to_hba(sdev->host);
7278 
7279 	if (pqi_ctrl_offline(ctrl_info))
7280 		return -ENODEV;
7281 
7282 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7283 
7284 	device = sdev->hostdata;
7285 	if (!device) {
7286 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7287 		return -ENODEV;
7288 	}
7289 
7290 	sas_address = device->sas_address;
7291 
7292 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7293 
7294 	return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7295 }
7296 
pqi_ssd_smart_path_enabled_show(struct device * dev,struct device_attribute * attr,char * buffer)7297 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7298 	struct device_attribute *attr, char *buffer)
7299 {
7300 	struct pqi_ctrl_info *ctrl_info;
7301 	struct scsi_device *sdev;
7302 	struct pqi_scsi_dev *device;
7303 	unsigned long flags;
7304 
7305 	sdev = to_scsi_device(dev);
7306 	ctrl_info = shost_to_hba(sdev->host);
7307 
7308 	if (pqi_ctrl_offline(ctrl_info))
7309 		return -ENODEV;
7310 
7311 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7312 
7313 	device = sdev->hostdata;
7314 	if (!device) {
7315 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7316 		return -ENODEV;
7317 	}
7318 
7319 	buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7320 	buffer[1] = '\n';
7321 	buffer[2] = '\0';
7322 
7323 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7324 
7325 	return 2;
7326 }
7327 
pqi_raid_level_show(struct device * dev,struct device_attribute * attr,char * buffer)7328 static ssize_t pqi_raid_level_show(struct device *dev,
7329 	struct device_attribute *attr, char *buffer)
7330 {
7331 	struct pqi_ctrl_info *ctrl_info;
7332 	struct scsi_device *sdev;
7333 	struct pqi_scsi_dev *device;
7334 	unsigned long flags;
7335 	char *raid_level;
7336 
7337 	sdev = to_scsi_device(dev);
7338 	ctrl_info = shost_to_hba(sdev->host);
7339 
7340 	if (pqi_ctrl_offline(ctrl_info))
7341 		return -ENODEV;
7342 
7343 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7344 
7345 	device = sdev->hostdata;
7346 	if (!device) {
7347 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7348 		return -ENODEV;
7349 	}
7350 
7351 	if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7352 		raid_level = pqi_raid_level_to_string(device->raid_level);
7353 	else
7354 		raid_level = "N/A";
7355 
7356 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7357 
7358 	return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7359 }
7360 
pqi_raid_bypass_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7361 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7362 	struct device_attribute *attr, char *buffer)
7363 {
7364 	struct pqi_ctrl_info *ctrl_info;
7365 	struct scsi_device *sdev;
7366 	struct pqi_scsi_dev *device;
7367 	unsigned long flags;
7368 	u64 raid_bypass_cnt;
7369 	int cpu;
7370 
7371 	sdev = to_scsi_device(dev);
7372 	ctrl_info = shost_to_hba(sdev->host);
7373 
7374 	if (pqi_ctrl_offline(ctrl_info))
7375 		return -ENODEV;
7376 
7377 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7378 
7379 	device = sdev->hostdata;
7380 	if (!device) {
7381 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7382 		return -ENODEV;
7383 	}
7384 
7385 	raid_bypass_cnt = 0;
7386 
7387 	if (device->raid_io_stats) {
7388 		for_each_online_cpu(cpu) {
7389 			raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
7390 		}
7391 	}
7392 
7393 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7394 
7395 	return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
7396 }
7397 
pqi_sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)7398 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7399 		struct device_attribute *attr, char *buf)
7400 {
7401 	struct pqi_ctrl_info *ctrl_info;
7402 	struct scsi_device *sdev;
7403 	struct pqi_scsi_dev *device;
7404 	unsigned long flags;
7405 	int output_len = 0;
7406 
7407 	sdev = to_scsi_device(dev);
7408 	ctrl_info = shost_to_hba(sdev->host);
7409 
7410 	if (pqi_ctrl_offline(ctrl_info))
7411 		return -ENODEV;
7412 
7413 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7414 
7415 	device = sdev->hostdata;
7416 	if (!device) {
7417 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7418 		return -ENODEV;
7419 	}
7420 
7421 	output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7422 				device->ncq_prio_enable);
7423 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7424 
7425 	return output_len;
7426 }
7427 
pqi_sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)7428 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7429 			struct device_attribute *attr,
7430 			const char *buf, size_t count)
7431 {
7432 	struct pqi_ctrl_info *ctrl_info;
7433 	struct scsi_device *sdev;
7434 	struct pqi_scsi_dev *device;
7435 	unsigned long flags;
7436 	u8 ncq_prio_enable = 0;
7437 
7438 	if (kstrtou8(buf, 0, &ncq_prio_enable))
7439 		return -EINVAL;
7440 
7441 	sdev = to_scsi_device(dev);
7442 	ctrl_info = shost_to_hba(sdev->host);
7443 
7444 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7445 
7446 	device = sdev->hostdata;
7447 
7448 	if (!device) {
7449 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7450 		return -ENODEV;
7451 	}
7452 
7453 	if (!device->ncq_prio_support) {
7454 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7455 		return -EINVAL;
7456 	}
7457 
7458 	device->ncq_prio_enable = ncq_prio_enable;
7459 
7460 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7461 
7462 	return  strlen(buf);
7463 }
7464 
pqi_numa_node_show(struct device * dev,struct device_attribute * attr,char * buffer)7465 static ssize_t pqi_numa_node_show(struct device *dev,
7466 	struct device_attribute *attr, char *buffer)
7467 {
7468 	struct scsi_device *sdev;
7469 	struct pqi_ctrl_info *ctrl_info;
7470 
7471 	sdev = to_scsi_device(dev);
7472 	ctrl_info = shost_to_hba(sdev->host);
7473 
7474 	return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7475 }
7476 
pqi_write_stream_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7477 static ssize_t pqi_write_stream_cnt_show(struct device *dev,
7478 	struct device_attribute *attr, char *buffer)
7479 {
7480 	struct pqi_ctrl_info *ctrl_info;
7481 	struct scsi_device *sdev;
7482 	struct pqi_scsi_dev *device;
7483 	unsigned long flags;
7484 	u64 write_stream_cnt;
7485 	int cpu;
7486 
7487 	sdev = to_scsi_device(dev);
7488 	ctrl_info = shost_to_hba(sdev->host);
7489 
7490 	if (pqi_ctrl_offline(ctrl_info))
7491 		return -ENODEV;
7492 
7493 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7494 
7495 	device = sdev->hostdata;
7496 	if (!device) {
7497 		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7498 		return -ENODEV;
7499 	}
7500 
7501 	write_stream_cnt = 0;
7502 
7503 	if (device->raid_io_stats) {
7504 		for_each_online_cpu(cpu) {
7505 			write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
7506 		}
7507 	}
7508 
7509 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7510 
7511 	return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
7512 }
7513 
7514 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7515 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7516 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7517 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7518 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7519 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7520 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7521 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7522 		pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7523 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7524 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
7525 
7526 static struct attribute *pqi_sdev_attrs[] = {
7527 	&dev_attr_lunid.attr,
7528 	&dev_attr_unique_id.attr,
7529 	&dev_attr_path_info.attr,
7530 	&dev_attr_sas_address.attr,
7531 	&dev_attr_ssd_smart_path_enabled.attr,
7532 	&dev_attr_raid_level.attr,
7533 	&dev_attr_raid_bypass_cnt.attr,
7534 	&dev_attr_sas_ncq_prio_enable.attr,
7535 	&dev_attr_numa_node.attr,
7536 	&dev_attr_write_stream_cnt.attr,
7537 	NULL
7538 };
7539 
7540 ATTRIBUTE_GROUPS(pqi_sdev);
7541 
7542 static const struct scsi_host_template pqi_driver_template = {
7543 	.module = THIS_MODULE,
7544 	.name = DRIVER_NAME_SHORT,
7545 	.proc_name = DRIVER_NAME_SHORT,
7546 	.queuecommand = pqi_scsi_queue_command,
7547 	.scan_start = pqi_scan_start,
7548 	.scan_finished = pqi_scan_finished,
7549 	.this_id = -1,
7550 	.eh_device_reset_handler = pqi_eh_device_reset_handler,
7551 	.eh_abort_handler = pqi_eh_abort_handler,
7552 	.ioctl = pqi_ioctl,
7553 	.sdev_init = pqi_sdev_init,
7554 	.sdev_configure = pqi_sdev_configure,
7555 	.sdev_destroy = pqi_sdev_destroy,
7556 	.map_queues = pqi_map_queues,
7557 	.sdev_groups = pqi_sdev_groups,
7558 	.shost_groups = pqi_shost_groups,
7559 	.cmd_size = sizeof(struct pqi_cmd_priv),
7560 };
7561 
pqi_register_scsi(struct pqi_ctrl_info * ctrl_info)7562 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7563 {
7564 	int rc;
7565 	struct Scsi_Host *shost;
7566 
7567 	shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7568 	if (!shost) {
7569 		dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7570 		return -ENOMEM;
7571 	}
7572 
7573 	shost->io_port = 0;
7574 	shost->n_io_port = 0;
7575 	shost->this_id = -1;
7576 	shost->max_channel = PQI_MAX_BUS;
7577 	shost->max_cmd_len = MAX_COMMAND_SIZE;
7578 	shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7579 	shost->max_id = ~0;
7580 	shost->max_sectors = ctrl_info->max_sectors;
7581 	shost->can_queue = ctrl_info->scsi_ml_can_queue;
7582 	shost->cmd_per_lun = shost->can_queue;
7583 	shost->sg_tablesize = ctrl_info->sg_tablesize;
7584 	shost->transportt = pqi_sas_transport_template;
7585 	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7586 	shost->unique_id = shost->irq;
7587 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
7588 	shost->host_tagset = 1;
7589 	shost->hostdata[0] = (unsigned long)ctrl_info;
7590 
7591 	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7592 	if (rc) {
7593 		dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7594 		goto free_host;
7595 	}
7596 
7597 	rc = pqi_add_sas_host(shost, ctrl_info);
7598 	if (rc) {
7599 		dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7600 		goto remove_host;
7601 	}
7602 
7603 	ctrl_info->scsi_host = shost;
7604 
7605 	return 0;
7606 
7607 remove_host:
7608 	scsi_remove_host(shost);
7609 free_host:
7610 	scsi_host_put(shost);
7611 
7612 	return rc;
7613 }
7614 
pqi_unregister_scsi(struct pqi_ctrl_info * ctrl_info)7615 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7616 {
7617 	struct Scsi_Host *shost;
7618 
7619 	pqi_delete_sas_host(ctrl_info);
7620 
7621 	shost = ctrl_info->scsi_host;
7622 	if (!shost)
7623 		return;
7624 
7625 	scsi_remove_host(shost);
7626 	scsi_host_put(shost);
7627 }
7628 
pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info * ctrl_info)7629 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7630 {
7631 	int rc = 0;
7632 	struct pqi_device_registers __iomem *pqi_registers;
7633 	unsigned long timeout;
7634 	unsigned int timeout_msecs;
7635 	union pqi_reset_register reset_reg;
7636 
7637 	pqi_registers = ctrl_info->pqi_registers;
7638 	timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7639 	timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7640 
7641 	while (1) {
7642 		msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7643 		reset_reg.all_bits = readl(&pqi_registers->device_reset);
7644 		if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7645 			break;
7646 		if (!sis_is_firmware_running(ctrl_info)) {
7647 			rc = -ENXIO;
7648 			break;
7649 		}
7650 		if (time_after(jiffies, timeout)) {
7651 			rc = -ETIMEDOUT;
7652 			break;
7653 		}
7654 	}
7655 
7656 	return rc;
7657 }
7658 
pqi_reset(struct pqi_ctrl_info * ctrl_info)7659 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7660 {
7661 	int rc;
7662 	union pqi_reset_register reset_reg;
7663 
7664 	if (ctrl_info->pqi_reset_quiesce_supported) {
7665 		rc = sis_pqi_reset_quiesce(ctrl_info);
7666 		if (rc) {
7667 			dev_err(&ctrl_info->pci_dev->dev,
7668 				"PQI reset failed during quiesce with error %d\n", rc);
7669 			return rc;
7670 		}
7671 	}
7672 
7673 	reset_reg.all_bits = 0;
7674 	reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7675 	reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7676 
7677 	writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7678 
7679 	rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7680 	if (rc)
7681 		dev_err(&ctrl_info->pci_dev->dev,
7682 			"PQI reset failed with error %d\n", rc);
7683 
7684 	return rc;
7685 }
7686 
pqi_get_ctrl_serial_number(struct pqi_ctrl_info * ctrl_info)7687 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7688 {
7689 	int rc;
7690 	struct bmic_sense_subsystem_info *sense_info;
7691 
7692 	sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7693 	if (!sense_info)
7694 		return -ENOMEM;
7695 
7696 	rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7697 	if (rc)
7698 		goto out;
7699 
7700 	memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7701 		sizeof(sense_info->ctrl_serial_number));
7702 	ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7703 
7704 out:
7705 	kfree(sense_info);
7706 
7707 	return rc;
7708 }
7709 
pqi_get_ctrl_product_details(struct pqi_ctrl_info * ctrl_info)7710 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7711 {
7712 	int rc;
7713 	struct bmic_identify_controller *identify;
7714 
7715 	identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7716 	if (!identify)
7717 		return -ENOMEM;
7718 
7719 	rc = pqi_identify_controller(ctrl_info, identify);
7720 	if (rc)
7721 		goto out;
7722 
7723 	if (get_unaligned_le32(&identify->extra_controller_flags) &
7724 		BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7725 		memcpy(ctrl_info->firmware_version,
7726 			identify->firmware_version_long,
7727 			sizeof(identify->firmware_version_long));
7728 	} else {
7729 		memcpy(ctrl_info->firmware_version,
7730 			identify->firmware_version_short,
7731 			sizeof(identify->firmware_version_short));
7732 		ctrl_info->firmware_version
7733 			[sizeof(identify->firmware_version_short)] = '\0';
7734 		snprintf(ctrl_info->firmware_version +
7735 			strlen(ctrl_info->firmware_version),
7736 			sizeof(ctrl_info->firmware_version) -
7737 			sizeof(identify->firmware_version_short),
7738 			"-%u",
7739 			get_unaligned_le16(&identify->firmware_build_number));
7740 	}
7741 
7742 	memcpy(ctrl_info->model, identify->product_id,
7743 		sizeof(identify->product_id));
7744 	ctrl_info->model[sizeof(identify->product_id)] = '\0';
7745 
7746 	memcpy(ctrl_info->vendor, identify->vendor_id,
7747 		sizeof(identify->vendor_id));
7748 	ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7749 
7750 	dev_info(&ctrl_info->pci_dev->dev,
7751 		"Firmware version: %s\n", ctrl_info->firmware_version);
7752 
7753 out:
7754 	kfree(identify);
7755 
7756 	return rc;
7757 }
7758 
7759 struct pqi_config_table_section_info {
7760 	struct pqi_ctrl_info *ctrl_info;
7761 	void		*section;
7762 	u32		section_offset;
7763 	void __iomem	*section_iomem_addr;
7764 };
7765 
pqi_is_firmware_feature_supported(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7766 static inline bool pqi_is_firmware_feature_supported(
7767 	struct pqi_config_table_firmware_features *firmware_features,
7768 	unsigned int bit_position)
7769 {
7770 	unsigned int byte_index;
7771 
7772 	byte_index = bit_position / BITS_PER_BYTE;
7773 
7774 	if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7775 		return false;
7776 
7777 	return firmware_features->features_supported[byte_index] &
7778 		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7779 }
7780 
pqi_is_firmware_feature_enabled(struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr,unsigned int bit_position)7781 static inline bool pqi_is_firmware_feature_enabled(
7782 	struct pqi_config_table_firmware_features *firmware_features,
7783 	void __iomem *firmware_features_iomem_addr,
7784 	unsigned int bit_position)
7785 {
7786 	unsigned int byte_index;
7787 	u8 __iomem *features_enabled_iomem_addr;
7788 
7789 	byte_index = (bit_position / BITS_PER_BYTE) +
7790 		(le16_to_cpu(firmware_features->num_elements) * 2);
7791 
7792 	features_enabled_iomem_addr = firmware_features_iomem_addr +
7793 		offsetof(struct pqi_config_table_firmware_features,
7794 			features_supported) + byte_index;
7795 
7796 	return *((__force u8 *)features_enabled_iomem_addr) &
7797 		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7798 }
7799 
pqi_request_firmware_feature(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7800 static inline void pqi_request_firmware_feature(
7801 	struct pqi_config_table_firmware_features *firmware_features,
7802 	unsigned int bit_position)
7803 {
7804 	unsigned int byte_index;
7805 
7806 	byte_index = (bit_position / BITS_PER_BYTE) +
7807 		le16_to_cpu(firmware_features->num_elements);
7808 
7809 	firmware_features->features_supported[byte_index] |=
7810 		(1 << (bit_position % BITS_PER_BYTE));
7811 }
7812 
pqi_config_table_update(struct pqi_ctrl_info * ctrl_info,u16 first_section,u16 last_section)7813 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7814 	u16 first_section, u16 last_section)
7815 {
7816 	struct pqi_vendor_general_request request;
7817 
7818 	memset(&request, 0, sizeof(request));
7819 
7820 	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7821 	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7822 		&request.header.iu_length);
7823 	put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7824 		&request.function_code);
7825 	put_unaligned_le16(first_section,
7826 		&request.data.config_table_update.first_section);
7827 	put_unaligned_le16(last_section,
7828 		&request.data.config_table_update.last_section);
7829 
7830 	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7831 }
7832 
pqi_enable_firmware_features(struct pqi_ctrl_info * ctrl_info,struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr)7833 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7834 	struct pqi_config_table_firmware_features *firmware_features,
7835 	void __iomem *firmware_features_iomem_addr)
7836 {
7837 	void *features_requested;
7838 	void __iomem *features_requested_iomem_addr;
7839 	void __iomem *host_max_known_feature_iomem_addr;
7840 
7841 	features_requested = firmware_features->features_supported +
7842 		le16_to_cpu(firmware_features->num_elements);
7843 
7844 	features_requested_iomem_addr = firmware_features_iomem_addr +
7845 		(features_requested - (void *)firmware_features);
7846 
7847 	memcpy_toio(features_requested_iomem_addr, features_requested,
7848 		le16_to_cpu(firmware_features->num_elements));
7849 
7850 	if (pqi_is_firmware_feature_supported(firmware_features,
7851 		PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7852 		host_max_known_feature_iomem_addr =
7853 			features_requested_iomem_addr +
7854 			(le16_to_cpu(firmware_features->num_elements) * 2) +
7855 			sizeof(__le16);
7856 		writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7857 		writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7858 	}
7859 
7860 	return pqi_config_table_update(ctrl_info,
7861 		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7862 		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7863 }
7864 
7865 struct pqi_firmware_feature {
7866 	char		*feature_name;
7867 	unsigned int	feature_bit;
7868 	bool		supported;
7869 	bool		enabled;
7870 	void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7871 		struct pqi_firmware_feature *firmware_feature);
7872 };
7873 
pqi_firmware_feature_status(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7874 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7875 	struct pqi_firmware_feature *firmware_feature)
7876 {
7877 	if (!firmware_feature->supported) {
7878 		dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7879 			firmware_feature->feature_name);
7880 		return;
7881 	}
7882 
7883 	if (firmware_feature->enabled) {
7884 		dev_info(&ctrl_info->pci_dev->dev,
7885 			"%s enabled\n", firmware_feature->feature_name);
7886 		return;
7887 	}
7888 
7889 	dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7890 		firmware_feature->feature_name);
7891 }
7892 
pqi_ctrl_update_feature_flags(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7893 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7894 	struct pqi_firmware_feature *firmware_feature)
7895 {
7896 	switch (firmware_feature->feature_bit) {
7897 	case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7898 		ctrl_info->enable_r1_writes = firmware_feature->enabled;
7899 		break;
7900 	case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7901 		ctrl_info->enable_r5_writes = firmware_feature->enabled;
7902 		break;
7903 	case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7904 		ctrl_info->enable_r6_writes = firmware_feature->enabled;
7905 		break;
7906 	case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7907 		ctrl_info->soft_reset_handshake_supported =
7908 			firmware_feature->enabled &&
7909 			pqi_read_soft_reset_status(ctrl_info);
7910 		break;
7911 	case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7912 		ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7913 		break;
7914 	case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7915 		ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7916 		break;
7917 	case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7918 		ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7919 		pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7920 		break;
7921 	case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7922 		ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7923 		break;
7924 	case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7925 		ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7926 		break;
7927 	case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
7928 		ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
7929 		break;
7930 	}
7931 
7932 	pqi_firmware_feature_status(ctrl_info, firmware_feature);
7933 }
7934 
pqi_firmware_feature_update(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7935 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7936 	struct pqi_firmware_feature *firmware_feature)
7937 {
7938 	if (firmware_feature->feature_status)
7939 		firmware_feature->feature_status(ctrl_info, firmware_feature);
7940 }
7941 
7942 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7943 
7944 static struct pqi_firmware_feature pqi_firmware_features[] = {
7945 	{
7946 		.feature_name = "Online Firmware Activation",
7947 		.feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7948 		.feature_status = pqi_firmware_feature_status,
7949 	},
7950 	{
7951 		.feature_name = "Serial Management Protocol",
7952 		.feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7953 		.feature_status = pqi_firmware_feature_status,
7954 	},
7955 	{
7956 		.feature_name = "Maximum Known Feature",
7957 		.feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7958 		.feature_status = pqi_firmware_feature_status,
7959 	},
7960 	{
7961 		.feature_name = "RAID 0 Read Bypass",
7962 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7963 		.feature_status = pqi_firmware_feature_status,
7964 	},
7965 	{
7966 		.feature_name = "RAID 1 Read Bypass",
7967 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7968 		.feature_status = pqi_firmware_feature_status,
7969 	},
7970 	{
7971 		.feature_name = "RAID 5 Read Bypass",
7972 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7973 		.feature_status = pqi_firmware_feature_status,
7974 	},
7975 	{
7976 		.feature_name = "RAID 6 Read Bypass",
7977 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7978 		.feature_status = pqi_firmware_feature_status,
7979 	},
7980 	{
7981 		.feature_name = "RAID 0 Write Bypass",
7982 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7983 		.feature_status = pqi_firmware_feature_status,
7984 	},
7985 	{
7986 		.feature_name = "RAID 1 Write Bypass",
7987 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7988 		.feature_status = pqi_ctrl_update_feature_flags,
7989 	},
7990 	{
7991 		.feature_name = "RAID 5 Write Bypass",
7992 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7993 		.feature_status = pqi_ctrl_update_feature_flags,
7994 	},
7995 	{
7996 		.feature_name = "RAID 6 Write Bypass",
7997 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7998 		.feature_status = pqi_ctrl_update_feature_flags,
7999 	},
8000 	{
8001 		.feature_name = "New Soft Reset Handshake",
8002 		.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
8003 		.feature_status = pqi_ctrl_update_feature_flags,
8004 	},
8005 	{
8006 		.feature_name = "RAID IU Timeout",
8007 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
8008 		.feature_status = pqi_ctrl_update_feature_flags,
8009 	},
8010 	{
8011 		.feature_name = "TMF IU Timeout",
8012 		.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
8013 		.feature_status = pqi_ctrl_update_feature_flags,
8014 	},
8015 	{
8016 		.feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
8017 		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
8018 		.feature_status = pqi_firmware_feature_status,
8019 	},
8020 	{
8021 		.feature_name = "Firmware Triage",
8022 		.feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
8023 		.feature_status = pqi_ctrl_update_feature_flags,
8024 	},
8025 	{
8026 		.feature_name = "RPL Extended Formats 4 and 5",
8027 		.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
8028 		.feature_status = pqi_ctrl_update_feature_flags,
8029 	},
8030 	{
8031 		.feature_name = "Multi-LUN Target",
8032 		.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
8033 		.feature_status = pqi_ctrl_update_feature_flags,
8034 	},
8035 	{
8036 		.feature_name = "Controller Data Logging",
8037 		.feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
8038 		.feature_status = pqi_ctrl_update_feature_flags,
8039 	},
8040 };
8041 
pqi_process_firmware_features(struct pqi_config_table_section_info * section_info)8042 static void pqi_process_firmware_features(
8043 	struct pqi_config_table_section_info *section_info)
8044 {
8045 	int rc;
8046 	struct pqi_ctrl_info *ctrl_info;
8047 	struct pqi_config_table_firmware_features *firmware_features;
8048 	void __iomem *firmware_features_iomem_addr;
8049 	unsigned int i;
8050 	unsigned int num_features_supported;
8051 
8052 	ctrl_info = section_info->ctrl_info;
8053 	firmware_features = section_info->section;
8054 	firmware_features_iomem_addr = section_info->section_iomem_addr;
8055 
8056 	for (i = 0, num_features_supported = 0;
8057 		i < ARRAY_SIZE(pqi_firmware_features); i++) {
8058 		if (pqi_is_firmware_feature_supported(firmware_features,
8059 			pqi_firmware_features[i].feature_bit)) {
8060 			pqi_firmware_features[i].supported = true;
8061 			num_features_supported++;
8062 		} else {
8063 			pqi_firmware_feature_update(ctrl_info,
8064 				&pqi_firmware_features[i]);
8065 		}
8066 	}
8067 
8068 	if (num_features_supported == 0)
8069 		return;
8070 
8071 	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8072 		if (!pqi_firmware_features[i].supported)
8073 			continue;
8074 		pqi_request_firmware_feature(firmware_features,
8075 			pqi_firmware_features[i].feature_bit);
8076 	}
8077 
8078 	rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
8079 		firmware_features_iomem_addr);
8080 	if (rc) {
8081 		dev_err(&ctrl_info->pci_dev->dev,
8082 			"failed to enable firmware features in PQI configuration table\n");
8083 		for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8084 			if (!pqi_firmware_features[i].supported)
8085 				continue;
8086 			pqi_firmware_feature_update(ctrl_info,
8087 				&pqi_firmware_features[i]);
8088 		}
8089 		return;
8090 	}
8091 
8092 	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8093 		if (!pqi_firmware_features[i].supported)
8094 			continue;
8095 		if (pqi_is_firmware_feature_enabled(firmware_features,
8096 			firmware_features_iomem_addr,
8097 			pqi_firmware_features[i].feature_bit)) {
8098 				pqi_firmware_features[i].enabled = true;
8099 		}
8100 		pqi_firmware_feature_update(ctrl_info,
8101 			&pqi_firmware_features[i]);
8102 	}
8103 }
8104 
pqi_init_firmware_features(void)8105 static void pqi_init_firmware_features(void)
8106 {
8107 	unsigned int i;
8108 
8109 	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8110 		pqi_firmware_features[i].supported = false;
8111 		pqi_firmware_features[i].enabled = false;
8112 	}
8113 }
8114 
pqi_process_firmware_features_section(struct pqi_config_table_section_info * section_info)8115 static void pqi_process_firmware_features_section(
8116 	struct pqi_config_table_section_info *section_info)
8117 {
8118 	mutex_lock(&pqi_firmware_features_mutex);
8119 	pqi_init_firmware_features();
8120 	pqi_process_firmware_features(section_info);
8121 	mutex_unlock(&pqi_firmware_features_mutex);
8122 }
8123 
8124 /*
8125  * Reset all controller settings that can be initialized during the processing
8126  * of the PQI Configuration Table.
8127  */
8128 
pqi_ctrl_reset_config(struct pqi_ctrl_info * ctrl_info)8129 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8130 {
8131 	ctrl_info->heartbeat_counter = NULL;
8132 	ctrl_info->soft_reset_status = NULL;
8133 	ctrl_info->soft_reset_handshake_supported = false;
8134 	ctrl_info->enable_r1_writes = false;
8135 	ctrl_info->enable_r5_writes = false;
8136 	ctrl_info->enable_r6_writes = false;
8137 	ctrl_info->raid_iu_timeout_supported = false;
8138 	ctrl_info->tmf_iu_timeout_supported = false;
8139 	ctrl_info->firmware_triage_supported = false;
8140 	ctrl_info->rpl_extended_format_4_5_supported = false;
8141 	ctrl_info->multi_lun_device_supported = false;
8142 	ctrl_info->ctrl_logging_supported = false;
8143 }
8144 
pqi_process_config_table(struct pqi_ctrl_info * ctrl_info)8145 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8146 {
8147 	u32 table_length;
8148 	u32 section_offset;
8149 	bool firmware_feature_section_present;
8150 	void __iomem *table_iomem_addr;
8151 	struct pqi_config_table *config_table;
8152 	struct pqi_config_table_section_header *section;
8153 	struct pqi_config_table_section_info section_info;
8154 	struct pqi_config_table_section_info feature_section_info = {0};
8155 
8156 	table_length = ctrl_info->config_table_length;
8157 	if (table_length == 0)
8158 		return 0;
8159 
8160 	config_table = kmalloc(table_length, GFP_KERNEL);
8161 	if (!config_table) {
8162 		dev_err(&ctrl_info->pci_dev->dev,
8163 			"failed to allocate memory for PQI configuration table\n");
8164 		return -ENOMEM;
8165 	}
8166 
8167 	/*
8168 	 * Copy the config table contents from I/O memory space into the
8169 	 * temporary buffer.
8170 	 */
8171 	table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8172 	memcpy_fromio(config_table, table_iomem_addr, table_length);
8173 
8174 	firmware_feature_section_present = false;
8175 	section_info.ctrl_info = ctrl_info;
8176 	section_offset = get_unaligned_le32(&config_table->first_section_offset);
8177 
8178 	while (section_offset) {
8179 		section = (void *)config_table + section_offset;
8180 
8181 		section_info.section = section;
8182 		section_info.section_offset = section_offset;
8183 		section_info.section_iomem_addr = table_iomem_addr + section_offset;
8184 
8185 		switch (get_unaligned_le16(&section->section_id)) {
8186 		case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8187 			firmware_feature_section_present = true;
8188 			feature_section_info = section_info;
8189 			break;
8190 		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8191 			if (pqi_disable_heartbeat)
8192 				dev_warn(&ctrl_info->pci_dev->dev,
8193 				"heartbeat disabled by module parameter\n");
8194 			else
8195 				ctrl_info->heartbeat_counter =
8196 					table_iomem_addr +
8197 					section_offset +
8198 					offsetof(struct pqi_config_table_heartbeat,
8199 						heartbeat_counter);
8200 			break;
8201 		case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8202 			ctrl_info->soft_reset_status =
8203 				table_iomem_addr +
8204 				section_offset +
8205 				offsetof(struct pqi_config_table_soft_reset,
8206 					soft_reset_status);
8207 			break;
8208 		}
8209 
8210 		section_offset = get_unaligned_le16(&section->next_section_offset);
8211 	}
8212 
8213 	/*
8214 	 * We process the firmware feature section after all other sections
8215 	 * have been processed so that the feature bit callbacks can take
8216 	 * into account the settings configured by other sections.
8217 	 */
8218 	if (firmware_feature_section_present)
8219 		pqi_process_firmware_features_section(&feature_section_info);
8220 
8221 	kfree(config_table);
8222 
8223 	return 0;
8224 }
8225 
8226 /* Switches the controller from PQI mode back into SIS mode. */
8227 
pqi_revert_to_sis_mode(struct pqi_ctrl_info * ctrl_info)8228 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8229 {
8230 	int rc;
8231 
8232 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8233 	rc = pqi_reset(ctrl_info);
8234 	if (rc)
8235 		return rc;
8236 	rc = sis_reenable_sis_mode(ctrl_info);
8237 	if (rc) {
8238 		dev_err(&ctrl_info->pci_dev->dev,
8239 			"re-enabling SIS mode failed with error %d\n", rc);
8240 		return rc;
8241 	}
8242 	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8243 
8244 	return 0;
8245 }
8246 
8247 /*
8248  * If the controller isn't already in SIS mode, this function forces it into
8249  * SIS mode.
8250  */
8251 
pqi_force_sis_mode(struct pqi_ctrl_info * ctrl_info)8252 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8253 {
8254 	if (!sis_is_firmware_running(ctrl_info))
8255 		return -ENXIO;
8256 
8257 	if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8258 		return 0;
8259 
8260 	if (sis_is_kernel_up(ctrl_info)) {
8261 		pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8262 		return 0;
8263 	}
8264 
8265 	return pqi_revert_to_sis_mode(ctrl_info);
8266 }
8267 
pqi_perform_lockup_action(void)8268 static void pqi_perform_lockup_action(void)
8269 {
8270 	switch (pqi_lockup_action) {
8271 	case PANIC:
8272 		panic("FATAL: Smart Family Controller lockup detected");
8273 		break;
8274 	case REBOOT:
8275 		emergency_restart();
8276 		break;
8277 	case NONE:
8278 	default:
8279 		break;
8280 	}
8281 }
8282 
8283 #define PQI_CTRL_LOG_TOTAL_SIZE	(4 * 1024 * 1024)
8284 #define PQI_CTRL_LOG_MIN_SIZE	(PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
8285 
pqi_ctrl_init(struct pqi_ctrl_info * ctrl_info)8286 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8287 {
8288 	int rc;
8289 	u32 product_id;
8290 
8291 	if (reset_devices) {
8292 		if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
8293 			rc = sis_wait_for_fw_triage_completion(ctrl_info);
8294 			if (rc)
8295 				return rc;
8296 		}
8297 		if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
8298 			sis_notify_kdump(ctrl_info);
8299 			rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
8300 			if (rc)
8301 				return rc;
8302 		}
8303 		sis_soft_reset(ctrl_info);
8304 		ssleep(PQI_POST_RESET_DELAY_SECS);
8305 	} else {
8306 		rc = pqi_force_sis_mode(ctrl_info);
8307 		if (rc)
8308 			return rc;
8309 	}
8310 
8311 	/*
8312 	 * Wait until the controller is ready to start accepting SIS
8313 	 * commands.
8314 	 */
8315 	rc = sis_wait_for_ctrl_ready(ctrl_info);
8316 	if (rc) {
8317 		if (reset_devices) {
8318 			dev_err(&ctrl_info->pci_dev->dev,
8319 				"kdump init failed with error %d\n", rc);
8320 			pqi_lockup_action = REBOOT;
8321 			pqi_perform_lockup_action();
8322 		}
8323 		return rc;
8324 	}
8325 
8326 	/*
8327 	 * Get the controller properties.  This allows us to determine
8328 	 * whether or not it supports PQI mode.
8329 	 */
8330 	rc = sis_get_ctrl_properties(ctrl_info);
8331 	if (rc) {
8332 		dev_err(&ctrl_info->pci_dev->dev,
8333 			"error obtaining controller properties\n");
8334 		return rc;
8335 	}
8336 
8337 	rc = sis_get_pqi_capabilities(ctrl_info);
8338 	if (rc) {
8339 		dev_err(&ctrl_info->pci_dev->dev,
8340 			"error obtaining controller capabilities\n");
8341 		return rc;
8342 	}
8343 
8344 	product_id = sis_get_product_id(ctrl_info);
8345 	ctrl_info->product_id = (u8)product_id;
8346 	ctrl_info->product_revision = (u8)(product_id >> 8);
8347 
8348 	if (is_kdump_kernel()) {
8349 		if (ctrl_info->max_outstanding_requests >
8350 			PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8351 				ctrl_info->max_outstanding_requests =
8352 					PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8353 	} else {
8354 		if (ctrl_info->max_outstanding_requests >
8355 			PQI_MAX_OUTSTANDING_REQUESTS)
8356 				ctrl_info->max_outstanding_requests =
8357 					PQI_MAX_OUTSTANDING_REQUESTS;
8358 	}
8359 
8360 	pqi_calculate_io_resources(ctrl_info);
8361 
8362 	rc = pqi_alloc_error_buffer(ctrl_info);
8363 	if (rc) {
8364 		dev_err(&ctrl_info->pci_dev->dev,
8365 			"failed to allocate PQI error buffer\n");
8366 		return rc;
8367 	}
8368 
8369 	/*
8370 	 * If the function we are about to call succeeds, the
8371 	 * controller will transition from legacy SIS mode
8372 	 * into PQI mode.
8373 	 */
8374 	rc = sis_init_base_struct_addr(ctrl_info);
8375 	if (rc) {
8376 		dev_err(&ctrl_info->pci_dev->dev,
8377 			"error initializing PQI mode\n");
8378 		return rc;
8379 	}
8380 
8381 	/* Wait for the controller to complete the SIS -> PQI transition. */
8382 	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8383 	if (rc) {
8384 		dev_err(&ctrl_info->pci_dev->dev,
8385 			"transition to PQI mode failed\n");
8386 		return rc;
8387 	}
8388 
8389 	/* From here on, we are running in PQI mode. */
8390 	ctrl_info->pqi_mode_enabled = true;
8391 	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8392 
8393 	rc = pqi_alloc_admin_queues(ctrl_info);
8394 	if (rc) {
8395 		dev_err(&ctrl_info->pci_dev->dev,
8396 			"failed to allocate admin queues\n");
8397 		return rc;
8398 	}
8399 
8400 	rc = pqi_create_admin_queues(ctrl_info);
8401 	if (rc) {
8402 		dev_err(&ctrl_info->pci_dev->dev,
8403 			"error creating admin queues\n");
8404 		return rc;
8405 	}
8406 
8407 	rc = pqi_report_device_capability(ctrl_info);
8408 	if (rc) {
8409 		dev_err(&ctrl_info->pci_dev->dev,
8410 			"obtaining device capability failed\n");
8411 		return rc;
8412 	}
8413 
8414 	rc = pqi_validate_device_capability(ctrl_info);
8415 	if (rc)
8416 		return rc;
8417 
8418 	pqi_calculate_queue_resources(ctrl_info);
8419 
8420 	rc = pqi_enable_msix_interrupts(ctrl_info);
8421 	if (rc)
8422 		return rc;
8423 
8424 	if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8425 		ctrl_info->max_msix_vectors =
8426 			ctrl_info->num_msix_vectors_enabled;
8427 		pqi_calculate_queue_resources(ctrl_info);
8428 	}
8429 
8430 	rc = pqi_alloc_io_resources(ctrl_info);
8431 	if (rc)
8432 		return rc;
8433 
8434 	rc = pqi_alloc_operational_queues(ctrl_info);
8435 	if (rc) {
8436 		dev_err(&ctrl_info->pci_dev->dev,
8437 			"failed to allocate operational queues\n");
8438 		return rc;
8439 	}
8440 
8441 	pqi_init_operational_queues(ctrl_info);
8442 
8443 	rc = pqi_create_queues(ctrl_info);
8444 	if (rc)
8445 		return rc;
8446 
8447 	rc = pqi_request_irqs(ctrl_info);
8448 	if (rc)
8449 		return rc;
8450 
8451 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8452 
8453 	ctrl_info->controller_online = true;
8454 
8455 	rc = pqi_process_config_table(ctrl_info);
8456 	if (rc)
8457 		return rc;
8458 
8459 	pqi_start_heartbeat_timer(ctrl_info);
8460 
8461 	if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8462 		rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8463 		if (rc) { /* Supported features not returned correctly. */
8464 			dev_err(&ctrl_info->pci_dev->dev,
8465 				"error obtaining advanced RAID bypass configuration\n");
8466 			return rc;
8467 		}
8468 		ctrl_info->ciss_report_log_flags |=
8469 			CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8470 	}
8471 
8472 	rc = pqi_enable_events(ctrl_info);
8473 	if (rc) {
8474 		dev_err(&ctrl_info->pci_dev->dev,
8475 			"error enabling events\n");
8476 		return rc;
8477 	}
8478 
8479 	/* Register with the SCSI subsystem. */
8480 	rc = pqi_register_scsi(ctrl_info);
8481 	if (rc)
8482 		return rc;
8483 
8484 	if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
8485 		pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
8486 		pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8487 	}
8488 
8489 	rc = pqi_get_ctrl_product_details(ctrl_info);
8490 	if (rc) {
8491 		dev_err(&ctrl_info->pci_dev->dev,
8492 			"error obtaining product details\n");
8493 		return rc;
8494 	}
8495 
8496 	rc = pqi_get_ctrl_serial_number(ctrl_info);
8497 	if (rc) {
8498 		dev_err(&ctrl_info->pci_dev->dev,
8499 			"error obtaining ctrl serial number\n");
8500 		return rc;
8501 	}
8502 
8503 	rc = pqi_set_diag_rescan(ctrl_info);
8504 	if (rc) {
8505 		dev_err(&ctrl_info->pci_dev->dev,
8506 			"error enabling multi-lun rescan\n");
8507 		return rc;
8508 	}
8509 
8510 	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8511 	if (rc) {
8512 		dev_err(&ctrl_info->pci_dev->dev,
8513 			"error updating host wellness\n");
8514 		return rc;
8515 	}
8516 
8517 	pqi_schedule_update_time_worker(ctrl_info);
8518 
8519 	pqi_scan_scsi_devices(ctrl_info);
8520 
8521 	return 0;
8522 }
8523 
pqi_reinit_queues(struct pqi_ctrl_info * ctrl_info)8524 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8525 {
8526 	unsigned int i;
8527 	struct pqi_admin_queues *admin_queues;
8528 	struct pqi_event_queue *event_queue;
8529 
8530 	admin_queues = &ctrl_info->admin_queues;
8531 	admin_queues->iq_pi_copy = 0;
8532 	admin_queues->oq_ci_copy = 0;
8533 	writel(0, admin_queues->oq_pi);
8534 
8535 	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8536 		ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8537 		ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8538 		ctrl_info->queue_groups[i].oq_ci_copy = 0;
8539 
8540 		writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8541 		writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8542 		writel(0, ctrl_info->queue_groups[i].oq_pi);
8543 	}
8544 
8545 	event_queue = &ctrl_info->event_queue;
8546 	writel(0, event_queue->oq_pi);
8547 	event_queue->oq_ci_copy = 0;
8548 }
8549 
pqi_ctrl_init_resume(struct pqi_ctrl_info * ctrl_info)8550 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8551 {
8552 	int rc;
8553 
8554 	rc = pqi_force_sis_mode(ctrl_info);
8555 	if (rc)
8556 		return rc;
8557 
8558 	/*
8559 	 * Wait until the controller is ready to start accepting SIS
8560 	 * commands.
8561 	 */
8562 	rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8563 	if (rc)
8564 		return rc;
8565 
8566 	/*
8567 	 * Get the controller properties.  This allows us to determine
8568 	 * whether or not it supports PQI mode.
8569 	 */
8570 	rc = sis_get_ctrl_properties(ctrl_info);
8571 	if (rc) {
8572 		dev_err(&ctrl_info->pci_dev->dev,
8573 			"error obtaining controller properties\n");
8574 		return rc;
8575 	}
8576 
8577 	rc = sis_get_pqi_capabilities(ctrl_info);
8578 	if (rc) {
8579 		dev_err(&ctrl_info->pci_dev->dev,
8580 			"error obtaining controller capabilities\n");
8581 		return rc;
8582 	}
8583 
8584 	/*
8585 	 * If the function we are about to call succeeds, the
8586 	 * controller will transition from legacy SIS mode
8587 	 * into PQI mode.
8588 	 */
8589 	rc = sis_init_base_struct_addr(ctrl_info);
8590 	if (rc) {
8591 		dev_err(&ctrl_info->pci_dev->dev,
8592 			"error initializing PQI mode\n");
8593 		return rc;
8594 	}
8595 
8596 	/* Wait for the controller to complete the SIS -> PQI transition. */
8597 	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8598 	if (rc) {
8599 		dev_err(&ctrl_info->pci_dev->dev,
8600 			"transition to PQI mode failed\n");
8601 		return rc;
8602 	}
8603 
8604 	/* From here on, we are running in PQI mode. */
8605 	ctrl_info->pqi_mode_enabled = true;
8606 	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8607 
8608 	pqi_reinit_queues(ctrl_info);
8609 
8610 	rc = pqi_create_admin_queues(ctrl_info);
8611 	if (rc) {
8612 		dev_err(&ctrl_info->pci_dev->dev,
8613 			"error creating admin queues\n");
8614 		return rc;
8615 	}
8616 
8617 	rc = pqi_create_queues(ctrl_info);
8618 	if (rc)
8619 		return rc;
8620 
8621 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8622 
8623 	ctrl_info->controller_online = true;
8624 	pqi_ctrl_unblock_requests(ctrl_info);
8625 
8626 	pqi_ctrl_reset_config(ctrl_info);
8627 
8628 	rc = pqi_process_config_table(ctrl_info);
8629 	if (rc)
8630 		return rc;
8631 
8632 	pqi_start_heartbeat_timer(ctrl_info);
8633 
8634 	if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8635 		rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8636 		if (rc) {
8637 			dev_err(&ctrl_info->pci_dev->dev,
8638 				"error obtaining advanced RAID bypass configuration\n");
8639 			return rc;
8640 		}
8641 		ctrl_info->ciss_report_log_flags |=
8642 			CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8643 	}
8644 
8645 	rc = pqi_enable_events(ctrl_info);
8646 	if (rc) {
8647 		dev_err(&ctrl_info->pci_dev->dev,
8648 			"error enabling events\n");
8649 		return rc;
8650 	}
8651 
8652 	rc = pqi_get_ctrl_product_details(ctrl_info);
8653 	if (rc) {
8654 		dev_err(&ctrl_info->pci_dev->dev,
8655 			"error obtaining product details\n");
8656 		return rc;
8657 	}
8658 
8659 	rc = pqi_set_diag_rescan(ctrl_info);
8660 	if (rc) {
8661 		dev_err(&ctrl_info->pci_dev->dev,
8662 			"error enabling multi-lun rescan\n");
8663 		return rc;
8664 	}
8665 
8666 	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8667 	if (rc) {
8668 		dev_err(&ctrl_info->pci_dev->dev,
8669 			"error updating host wellness\n");
8670 		return rc;
8671 	}
8672 
8673 	if (pqi_ofa_in_progress(ctrl_info)) {
8674 		pqi_ctrl_unblock_scan(ctrl_info);
8675 		if (ctrl_info->ctrl_logging_supported) {
8676 			if (!ctrl_info->ctrl_log_memory.host_memory)
8677 				pqi_host_setup_buffer(ctrl_info,
8678 					&ctrl_info->ctrl_log_memory,
8679 					PQI_CTRL_LOG_TOTAL_SIZE,
8680 					PQI_CTRL_LOG_MIN_SIZE);
8681 			pqi_host_memory_update(ctrl_info,
8682 				&ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8683 		} else {
8684 			if (ctrl_info->ctrl_log_memory.host_memory)
8685 				pqi_host_free_buffer(ctrl_info,
8686 					&ctrl_info->ctrl_log_memory);
8687 		}
8688 	}
8689 
8690 	pqi_scan_scsi_devices(ctrl_info);
8691 
8692 	return 0;
8693 }
8694 
pqi_set_pcie_completion_timeout(struct pci_dev * pci_dev,u16 timeout)8695 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8696 {
8697 	int rc;
8698 
8699 	rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8700 		PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8701 
8702 	return pcibios_err_to_errno(rc);
8703 }
8704 
pqi_pci_init(struct pqi_ctrl_info * ctrl_info)8705 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8706 {
8707 	int rc;
8708 	u64 mask;
8709 
8710 	rc = pci_enable_device(ctrl_info->pci_dev);
8711 	if (rc) {
8712 		dev_err(&ctrl_info->pci_dev->dev,
8713 			"failed to enable PCI device\n");
8714 		return rc;
8715 	}
8716 
8717 	if (sizeof(dma_addr_t) > 4)
8718 		mask = DMA_BIT_MASK(64);
8719 	else
8720 		mask = DMA_BIT_MASK(32);
8721 
8722 	rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8723 	if (rc) {
8724 		dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8725 		goto disable_device;
8726 	}
8727 
8728 	rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8729 	if (rc) {
8730 		dev_err(&ctrl_info->pci_dev->dev,
8731 			"failed to obtain PCI resources\n");
8732 		goto disable_device;
8733 	}
8734 
8735 	ctrl_info->iomem_base = ioremap(pci_resource_start(
8736 		ctrl_info->pci_dev, 0),
8737 		pci_resource_len(ctrl_info->pci_dev, 0));
8738 	if (!ctrl_info->iomem_base) {
8739 		dev_err(&ctrl_info->pci_dev->dev,
8740 			"failed to map memory for controller registers\n");
8741 		rc = -ENOMEM;
8742 		goto release_regions;
8743 	}
8744 
8745 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS		0x6
8746 
8747 	/* Increase the PCIe completion timeout. */
8748 	rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8749 		PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8750 	if (rc) {
8751 		dev_err(&ctrl_info->pci_dev->dev,
8752 			"failed to set PCIe completion timeout\n");
8753 		goto release_regions;
8754 	}
8755 
8756 	/* Enable bus mastering. */
8757 	pci_set_master(ctrl_info->pci_dev);
8758 
8759 	ctrl_info->registers = ctrl_info->iomem_base;
8760 	ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8761 
8762 	pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8763 
8764 	return 0;
8765 
8766 release_regions:
8767 	pci_release_regions(ctrl_info->pci_dev);
8768 disable_device:
8769 	pci_disable_device(ctrl_info->pci_dev);
8770 
8771 	return rc;
8772 }
8773 
pqi_cleanup_pci_init(struct pqi_ctrl_info * ctrl_info)8774 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8775 {
8776 	iounmap(ctrl_info->iomem_base);
8777 	pci_release_regions(ctrl_info->pci_dev);
8778 	if (pci_is_enabled(ctrl_info->pci_dev))
8779 		pci_disable_device(ctrl_info->pci_dev);
8780 	pci_set_drvdata(ctrl_info->pci_dev, NULL);
8781 }
8782 
pqi_alloc_ctrl_info(int numa_node)8783 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8784 {
8785 	struct pqi_ctrl_info *ctrl_info;
8786 
8787 	ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8788 			GFP_KERNEL, numa_node);
8789 	if (!ctrl_info)
8790 		return NULL;
8791 
8792 	mutex_init(&ctrl_info->scan_mutex);
8793 	mutex_init(&ctrl_info->lun_reset_mutex);
8794 	mutex_init(&ctrl_info->ofa_mutex);
8795 
8796 	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8797 	spin_lock_init(&ctrl_info->scsi_device_list_lock);
8798 
8799 	INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8800 	atomic_set(&ctrl_info->num_interrupts, 0);
8801 
8802 	INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8803 	INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8804 
8805 	timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8806 	INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8807 
8808 	INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8809 	INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8810 
8811 	sema_init(&ctrl_info->sync_request_sem,
8812 		PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8813 	init_waitqueue_head(&ctrl_info->block_requests_wait);
8814 
8815 	ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8816 	ctrl_info->irq_mode = IRQ_MODE_NONE;
8817 	ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8818 
8819 	ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8820 	ctrl_info->max_transfer_encrypted_sas_sata =
8821 		PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8822 	ctrl_info->max_transfer_encrypted_nvme =
8823 		PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8824 	ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8825 	ctrl_info->max_write_raid_1_10_2drive = ~0;
8826 	ctrl_info->max_write_raid_1_10_3drive = ~0;
8827 	ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8828 
8829 	return ctrl_info;
8830 }
8831 
pqi_free_ctrl_info(struct pqi_ctrl_info * ctrl_info)8832 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8833 {
8834 	kfree(ctrl_info);
8835 }
8836 
pqi_free_interrupts(struct pqi_ctrl_info * ctrl_info)8837 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8838 {
8839 	pqi_free_irqs(ctrl_info);
8840 	pqi_disable_msix_interrupts(ctrl_info);
8841 }
8842 
pqi_free_ctrl_resources(struct pqi_ctrl_info * ctrl_info)8843 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8844 {
8845 	pqi_free_interrupts(ctrl_info);
8846 	if (ctrl_info->queue_memory_base)
8847 		dma_free_coherent(&ctrl_info->pci_dev->dev,
8848 			ctrl_info->queue_memory_length,
8849 			ctrl_info->queue_memory_base,
8850 			ctrl_info->queue_memory_base_dma_handle);
8851 	if (ctrl_info->admin_queue_memory_base)
8852 		dma_free_coherent(&ctrl_info->pci_dev->dev,
8853 			ctrl_info->admin_queue_memory_length,
8854 			ctrl_info->admin_queue_memory_base,
8855 			ctrl_info->admin_queue_memory_base_dma_handle);
8856 	pqi_free_all_io_requests(ctrl_info);
8857 	if (ctrl_info->error_buffer)
8858 		dma_free_coherent(&ctrl_info->pci_dev->dev,
8859 			ctrl_info->error_buffer_length,
8860 			ctrl_info->error_buffer,
8861 			ctrl_info->error_buffer_dma_handle);
8862 	if (ctrl_info->iomem_base)
8863 		pqi_cleanup_pci_init(ctrl_info);
8864 	pqi_free_ctrl_info(ctrl_info);
8865 }
8866 
pqi_remove_ctrl(struct pqi_ctrl_info * ctrl_info)8867 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8868 {
8869 	ctrl_info->controller_online = false;
8870 	pqi_stop_heartbeat_timer(ctrl_info);
8871 	pqi_ctrl_block_requests(ctrl_info);
8872 	pqi_cancel_rescan_worker(ctrl_info);
8873 	pqi_cancel_update_time_worker(ctrl_info);
8874 	if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8875 		pqi_fail_all_outstanding_requests(ctrl_info);
8876 		ctrl_info->pqi_mode_enabled = false;
8877 	}
8878 	pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
8879 	pqi_unregister_scsi(ctrl_info);
8880 	if (ctrl_info->pqi_mode_enabled)
8881 		pqi_revert_to_sis_mode(ctrl_info);
8882 	pqi_free_ctrl_resources(ctrl_info);
8883 }
8884 
pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info * ctrl_info)8885 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8886 {
8887 	pqi_ctrl_block_scan(ctrl_info);
8888 	pqi_scsi_block_requests(ctrl_info);
8889 	pqi_ctrl_block_device_reset(ctrl_info);
8890 	pqi_ctrl_block_requests(ctrl_info);
8891 	pqi_ctrl_wait_until_quiesced(ctrl_info);
8892 	pqi_stop_heartbeat_timer(ctrl_info);
8893 }
8894 
pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info * ctrl_info)8895 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8896 {
8897 	pqi_start_heartbeat_timer(ctrl_info);
8898 	pqi_ctrl_unblock_requests(ctrl_info);
8899 	pqi_ctrl_unblock_device_reset(ctrl_info);
8900 	pqi_scsi_unblock_requests(ctrl_info);
8901 	pqi_ctrl_unblock_scan(ctrl_info);
8902 }
8903 
pqi_ofa_ctrl_restart(struct pqi_ctrl_info * ctrl_info,unsigned int delay_secs)8904 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8905 {
8906 	ssleep(delay_secs);
8907 
8908 	return pqi_ctrl_init_resume(ctrl_info);
8909 }
8910 
pqi_host_alloc_mem(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 chunk_size)8911 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8912 	struct pqi_host_memory_descriptor *host_memory_descriptor,
8913 	u32 total_size, u32 chunk_size)
8914 {
8915 	int i;
8916 	u32 sg_count;
8917 	struct device *dev;
8918 	struct pqi_host_memory *host_memory;
8919 	struct pqi_sg_descriptor *mem_descriptor;
8920 	dma_addr_t dma_handle;
8921 
8922 	sg_count = DIV_ROUND_UP(total_size, chunk_size);
8923 	if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
8924 		goto out;
8925 
8926 	host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
8927 	if (!host_memory_descriptor->host_chunk_virt_address)
8928 		goto out;
8929 
8930 	dev = &ctrl_info->pci_dev->dev;
8931 	host_memory = host_memory_descriptor->host_memory;
8932 
8933 	for (i = 0; i < sg_count; i++) {
8934 		host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8935 		if (!host_memory_descriptor->host_chunk_virt_address[i])
8936 			goto out_free_chunks;
8937 		mem_descriptor = &host_memory->sg_descriptor[i];
8938 		put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8939 		put_unaligned_le32(chunk_size, &mem_descriptor->length);
8940 	}
8941 
8942 	put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8943 	put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
8944 	put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
8945 
8946 	return 0;
8947 
8948 out_free_chunks:
8949 	while (--i >= 0) {
8950 		mem_descriptor = &host_memory->sg_descriptor[i];
8951 		dma_free_coherent(dev, chunk_size,
8952 			host_memory_descriptor->host_chunk_virt_address[i],
8953 			get_unaligned_le64(&mem_descriptor->address));
8954 	}
8955 	kfree(host_memory_descriptor->host_chunk_virt_address);
8956 out:
8957 	return -ENOMEM;
8958 }
8959 
pqi_host_alloc_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_required_size,u32 min_required_size)8960 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
8961 	struct pqi_host_memory_descriptor *host_memory_descriptor,
8962 	u32 total_required_size, u32 min_required_size)
8963 {
8964 	u32 chunk_size;
8965 	u32 min_chunk_size;
8966 
8967 	if (total_required_size == 0 || min_required_size == 0)
8968 		return 0;
8969 
8970 	total_required_size = PAGE_ALIGN(total_required_size);
8971 	min_required_size = PAGE_ALIGN(min_required_size);
8972 	min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
8973 	min_chunk_size = PAGE_ALIGN(min_chunk_size);
8974 
8975 	while (total_required_size >= min_required_size) {
8976 		for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
8977 			if (pqi_host_alloc_mem(ctrl_info,
8978 				host_memory_descriptor, total_required_size,
8979 				chunk_size) == 0)
8980 				return 0;
8981 			chunk_size /= 2;
8982 			chunk_size = PAGE_ALIGN(chunk_size);
8983 		}
8984 		total_required_size /= 2;
8985 		total_required_size = PAGE_ALIGN(total_required_size);
8986 	}
8987 
8988 	return -ENOMEM;
8989 }
8990 
pqi_host_setup_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 min_size)8991 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
8992 	struct pqi_host_memory_descriptor *host_memory_descriptor,
8993 	u32 total_size, u32 min_size)
8994 {
8995 	struct device *dev;
8996 	struct pqi_host_memory *host_memory;
8997 
8998 	dev = &ctrl_info->pci_dev->dev;
8999 
9000 	host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
9001 		&host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
9002 	if (!host_memory)
9003 		return;
9004 
9005 	host_memory_descriptor->host_memory = host_memory;
9006 
9007 	if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
9008 		total_size, min_size) < 0) {
9009 		dev_err(dev, "failed to allocate firmware usable host buffer\n");
9010 		dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9011 			host_memory_descriptor->host_memory_dma_handle);
9012 		host_memory_descriptor->host_memory = NULL;
9013 		return;
9014 	}
9015 }
9016 
pqi_host_free_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor)9017 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
9018 	struct pqi_host_memory_descriptor *host_memory_descriptor)
9019 {
9020 	unsigned int i;
9021 	struct device *dev;
9022 	struct pqi_host_memory *host_memory;
9023 	struct pqi_sg_descriptor *mem_descriptor;
9024 	unsigned int num_memory_descriptors;
9025 
9026 	host_memory = host_memory_descriptor->host_memory;
9027 	if (!host_memory)
9028 		return;
9029 
9030 	dev = &ctrl_info->pci_dev->dev;
9031 
9032 	if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
9033 		goto out;
9034 
9035 	mem_descriptor = host_memory->sg_descriptor;
9036 	num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
9037 
9038 	for (i = 0; i < num_memory_descriptors; i++) {
9039 		dma_free_coherent(dev,
9040 			get_unaligned_le32(&mem_descriptor[i].length),
9041 			host_memory_descriptor->host_chunk_virt_address[i],
9042 			get_unaligned_le64(&mem_descriptor[i].address));
9043 	}
9044 	kfree(host_memory_descriptor->host_chunk_virt_address);
9045 
9046 out:
9047 	dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9048 		host_memory_descriptor->host_memory_dma_handle);
9049 	host_memory_descriptor->host_memory = NULL;
9050 }
9051 
pqi_host_memory_update(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u16 function_code)9052 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
9053 	struct pqi_host_memory_descriptor *host_memory_descriptor,
9054 	u16 function_code)
9055 {
9056 	u32 buffer_length;
9057 	struct pqi_vendor_general_request request;
9058 	struct pqi_host_memory *host_memory;
9059 
9060 	memset(&request, 0, sizeof(request));
9061 
9062 	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
9063 	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
9064 	put_unaligned_le16(function_code, &request.function_code);
9065 
9066 	host_memory = host_memory_descriptor->host_memory;
9067 
9068 	if (host_memory) {
9069 		buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
9070 		put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
9071 		put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
9072 
9073 		if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
9074 			put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
9075 			memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
9076 		} else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
9077 			put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
9078 			memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
9079 		}
9080 	}
9081 
9082 	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
9083 }
9084 
9085 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
9086 	.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
9087 	.status = SAM_STAT_CHECK_CONDITION,
9088 };
9089 
pqi_fail_all_outstanding_requests(struct pqi_ctrl_info * ctrl_info)9090 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
9091 {
9092 	unsigned int i;
9093 	struct pqi_io_request *io_request;
9094 	struct scsi_cmnd *scmd;
9095 	struct scsi_device *sdev;
9096 
9097 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
9098 		io_request = &ctrl_info->io_request_pool[i];
9099 		if (atomic_read(&io_request->refcount) == 0)
9100 			continue;
9101 
9102 		scmd = io_request->scmd;
9103 		if (scmd) {
9104 			sdev = scmd->device;
9105 			if (!sdev || !scsi_device_online(sdev)) {
9106 				pqi_free_io_request(io_request);
9107 				continue;
9108 			} else {
9109 				set_host_byte(scmd, DID_NO_CONNECT);
9110 			}
9111 		} else {
9112 			io_request->status = -ENXIO;
9113 			io_request->error_info =
9114 				&pqi_ctrl_offline_raid_error_info;
9115 		}
9116 
9117 		io_request->io_complete_callback(io_request,
9118 			io_request->context);
9119 	}
9120 }
9121 
pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info * ctrl_info)9122 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
9123 {
9124 	pqi_perform_lockup_action();
9125 	pqi_stop_heartbeat_timer(ctrl_info);
9126 	pqi_free_interrupts(ctrl_info);
9127 	pqi_cancel_rescan_worker(ctrl_info);
9128 	pqi_cancel_update_time_worker(ctrl_info);
9129 	pqi_ctrl_wait_until_quiesced(ctrl_info);
9130 	pqi_fail_all_outstanding_requests(ctrl_info);
9131 	pqi_ctrl_unblock_requests(ctrl_info);
9132 }
9133 
pqi_ctrl_offline_worker(struct work_struct * work)9134 static void pqi_ctrl_offline_worker(struct work_struct *work)
9135 {
9136 	struct pqi_ctrl_info *ctrl_info;
9137 
9138 	ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9139 	pqi_take_ctrl_offline_deferred(ctrl_info);
9140 }
9141 
pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9142 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9143 {
9144 	char *string;
9145 
9146 	switch (ctrl_shutdown_reason) {
9147 	case PQI_IQ_NOT_DRAINED_TIMEOUT:
9148 		string = "inbound queue not drained timeout";
9149 		break;
9150 	case PQI_LUN_RESET_TIMEOUT:
9151 		string = "LUN reset timeout";
9152 		break;
9153 	case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9154 		string = "I/O pending timeout after LUN reset";
9155 		break;
9156 	case PQI_NO_HEARTBEAT:
9157 		string = "no controller heartbeat detected";
9158 		break;
9159 	case PQI_FIRMWARE_KERNEL_NOT_UP:
9160 		string = "firmware kernel not ready";
9161 		break;
9162 	case PQI_OFA_RESPONSE_TIMEOUT:
9163 		string = "OFA response timeout";
9164 		break;
9165 	case PQI_INVALID_REQ_ID:
9166 		string = "invalid request ID";
9167 		break;
9168 	case PQI_UNMATCHED_REQ_ID:
9169 		string = "unmatched request ID";
9170 		break;
9171 	case PQI_IO_PI_OUT_OF_RANGE:
9172 		string = "I/O queue producer index out of range";
9173 		break;
9174 	case PQI_EVENT_PI_OUT_OF_RANGE:
9175 		string = "event queue producer index out of range";
9176 		break;
9177 	case PQI_UNEXPECTED_IU_TYPE:
9178 		string = "unexpected IU type";
9179 		break;
9180 	default:
9181 		string = "unknown reason";
9182 		break;
9183 	}
9184 
9185 	return string;
9186 }
9187 
pqi_take_ctrl_offline(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9188 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9189 	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9190 {
9191 	if (!ctrl_info->controller_online)
9192 		return;
9193 
9194 	ctrl_info->controller_online = false;
9195 	ctrl_info->pqi_mode_enabled = false;
9196 	pqi_ctrl_block_requests(ctrl_info);
9197 	if (!pqi_disable_ctrl_shutdown)
9198 		sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9199 	pci_disable_device(ctrl_info->pci_dev);
9200 	dev_err(&ctrl_info->pci_dev->dev,
9201 		"controller offline: reason code 0x%x (%s)\n",
9202 		ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9203 	schedule_work(&ctrl_info->ctrl_offline_work);
9204 }
9205 
pqi_print_ctrl_info(struct pci_dev * pci_dev,const struct pci_device_id * id)9206 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9207 	const struct pci_device_id *id)
9208 {
9209 	char *ctrl_description;
9210 
9211 	if (id->driver_data)
9212 		ctrl_description = (char *)id->driver_data;
9213 	else
9214 		ctrl_description = "Microchip Smart Family Controller";
9215 
9216 	dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9217 }
9218 
pqi_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)9219 static int pqi_pci_probe(struct pci_dev *pci_dev,
9220 	const struct pci_device_id *id)
9221 {
9222 	int rc;
9223 	int node;
9224 	struct pqi_ctrl_info *ctrl_info;
9225 
9226 	pqi_print_ctrl_info(pci_dev, id);
9227 
9228 	if (pqi_disable_device_id_wildcards &&
9229 		id->subvendor == PCI_ANY_ID &&
9230 		id->subdevice == PCI_ANY_ID) {
9231 		dev_warn(&pci_dev->dev,
9232 			"controller not probed because device ID wildcards are disabled\n");
9233 		return -ENODEV;
9234 	}
9235 
9236 	if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9237 		dev_warn(&pci_dev->dev,
9238 			"controller device ID matched using wildcards\n");
9239 
9240 	node = dev_to_node(&pci_dev->dev);
9241 	if (node == NUMA_NO_NODE) {
9242 		node = cpu_to_node(0);
9243 		if (node == NUMA_NO_NODE)
9244 			node = 0;
9245 		set_dev_node(&pci_dev->dev, node);
9246 	}
9247 
9248 	ctrl_info = pqi_alloc_ctrl_info(node);
9249 	if (!ctrl_info) {
9250 		dev_err(&pci_dev->dev,
9251 			"failed to allocate controller info block\n");
9252 		return -ENOMEM;
9253 	}
9254 	ctrl_info->numa_node = node;
9255 
9256 	ctrl_info->pci_dev = pci_dev;
9257 
9258 	rc = pqi_pci_init(ctrl_info);
9259 	if (rc)
9260 		goto error;
9261 
9262 	rc = pqi_ctrl_init(ctrl_info);
9263 	if (rc)
9264 		goto error;
9265 
9266 	return 0;
9267 
9268 error:
9269 	pqi_remove_ctrl(ctrl_info);
9270 
9271 	return rc;
9272 }
9273 
pqi_pci_remove(struct pci_dev * pci_dev)9274 static void pqi_pci_remove(struct pci_dev *pci_dev)
9275 {
9276 	struct pqi_ctrl_info *ctrl_info;
9277 	u16 vendor_id;
9278 	int rc;
9279 
9280 	ctrl_info = pci_get_drvdata(pci_dev);
9281 	if (!ctrl_info)
9282 		return;
9283 
9284 	pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9285 	if (vendor_id == 0xffff)
9286 		ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9287 	else
9288 		ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9289 
9290 	if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9291 		rc = pqi_flush_cache(ctrl_info, RESTART);
9292 		if (rc)
9293 			dev_err(&pci_dev->dev,
9294 				"unable to flush controller cache during remove\n");
9295 	}
9296 
9297 	pqi_remove_ctrl(ctrl_info);
9298 }
9299 
pqi_crash_if_pending_command(struct pqi_ctrl_info * ctrl_info)9300 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9301 {
9302 	unsigned int i;
9303 	struct pqi_io_request *io_request;
9304 	struct scsi_cmnd *scmd;
9305 
9306 	for (i = 0; i < ctrl_info->max_io_slots; i++) {
9307 		io_request = &ctrl_info->io_request_pool[i];
9308 		if (atomic_read(&io_request->refcount) == 0)
9309 			continue;
9310 		scmd = io_request->scmd;
9311 		WARN_ON(scmd != NULL); /* IO command from SML */
9312 		WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9313 	}
9314 }
9315 
pqi_shutdown(struct pci_dev * pci_dev)9316 static void pqi_shutdown(struct pci_dev *pci_dev)
9317 {
9318 	int rc;
9319 	struct pqi_ctrl_info *ctrl_info;
9320 	enum bmic_flush_cache_shutdown_event shutdown_event;
9321 
9322 	ctrl_info = pci_get_drvdata(pci_dev);
9323 	if (!ctrl_info) {
9324 		dev_err(&pci_dev->dev,
9325 			"cache could not be flushed\n");
9326 		return;
9327 	}
9328 
9329 	pqi_wait_until_ofa_finished(ctrl_info);
9330 
9331 	pqi_scsi_block_requests(ctrl_info);
9332 	pqi_ctrl_block_device_reset(ctrl_info);
9333 	pqi_ctrl_block_requests(ctrl_info);
9334 	pqi_ctrl_wait_until_quiesced(ctrl_info);
9335 
9336 	if (system_state == SYSTEM_RESTART)
9337 		shutdown_event = RESTART;
9338 	else
9339 		shutdown_event = SHUTDOWN;
9340 
9341 	/*
9342 	 * Write all data in the controller's battery-backed cache to
9343 	 * storage.
9344 	 */
9345 	rc = pqi_flush_cache(ctrl_info, shutdown_event);
9346 	if (rc)
9347 		dev_err(&pci_dev->dev,
9348 			"unable to flush controller cache during shutdown\n");
9349 
9350 	pqi_crash_if_pending_command(ctrl_info);
9351 	pqi_reset(ctrl_info);
9352 }
9353 
pqi_process_lockup_action_param(void)9354 static void pqi_process_lockup_action_param(void)
9355 {
9356 	unsigned int i;
9357 
9358 	if (!pqi_lockup_action_param)
9359 		return;
9360 
9361 	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9362 		if (strcmp(pqi_lockup_action_param,
9363 			pqi_lockup_actions[i].name) == 0) {
9364 			pqi_lockup_action = pqi_lockup_actions[i].action;
9365 			return;
9366 		}
9367 	}
9368 
9369 	pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9370 		DRIVER_NAME_SHORT, pqi_lockup_action_param);
9371 }
9372 
9373 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS		30
9374 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS		(30 * 60)
9375 
pqi_process_ctrl_ready_timeout_param(void)9376 static void pqi_process_ctrl_ready_timeout_param(void)
9377 {
9378 	if (pqi_ctrl_ready_timeout_secs == 0)
9379 		return;
9380 
9381 	if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9382 		pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9383 			DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9384 		pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9385 	} else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9386 		pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9387 			DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9388 		pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9389 	}
9390 
9391 	sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9392 }
9393 
pqi_process_module_params(void)9394 static void pqi_process_module_params(void)
9395 {
9396 	pqi_process_lockup_action_param();
9397 	pqi_process_ctrl_ready_timeout_param();
9398 }
9399 
9400 #if defined(CONFIG_PM)
9401 
pqi_get_flush_cache_shutdown_event(struct pci_dev * pci_dev)9402 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9403 {
9404 	if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9405 		return RESTART;
9406 
9407 	return SUSPEND;
9408 }
9409 
pqi_suspend_or_freeze(struct device * dev,bool suspend)9410 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9411 {
9412 	struct pci_dev *pci_dev;
9413 	struct pqi_ctrl_info *ctrl_info;
9414 
9415 	pci_dev = to_pci_dev(dev);
9416 	ctrl_info = pci_get_drvdata(pci_dev);
9417 
9418 	pqi_wait_until_ofa_finished(ctrl_info);
9419 
9420 	pqi_ctrl_block_scan(ctrl_info);
9421 	pqi_scsi_block_requests(ctrl_info);
9422 	pqi_ctrl_block_device_reset(ctrl_info);
9423 	pqi_ctrl_block_requests(ctrl_info);
9424 	pqi_ctrl_wait_until_quiesced(ctrl_info);
9425 
9426 	if (suspend) {
9427 		enum bmic_flush_cache_shutdown_event shutdown_event;
9428 
9429 		shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9430 		pqi_flush_cache(ctrl_info, shutdown_event);
9431 	}
9432 
9433 	pqi_stop_heartbeat_timer(ctrl_info);
9434 	pqi_crash_if_pending_command(ctrl_info);
9435 	pqi_free_irqs(ctrl_info);
9436 
9437 	ctrl_info->controller_online = false;
9438 	ctrl_info->pqi_mode_enabled = false;
9439 
9440 	return 0;
9441 }
9442 
pqi_suspend(struct device * dev)9443 static __maybe_unused int pqi_suspend(struct device *dev)
9444 {
9445 	return pqi_suspend_or_freeze(dev, true);
9446 }
9447 
pqi_resume_or_restore(struct device * dev)9448 static int pqi_resume_or_restore(struct device *dev)
9449 {
9450 	int rc;
9451 	struct pci_dev *pci_dev;
9452 	struct pqi_ctrl_info *ctrl_info;
9453 
9454 	pci_dev = to_pci_dev(dev);
9455 	ctrl_info = pci_get_drvdata(pci_dev);
9456 
9457 	rc = pqi_request_irqs(ctrl_info);
9458 	if (rc)
9459 		return rc;
9460 
9461 	pqi_ctrl_unblock_device_reset(ctrl_info);
9462 	pqi_ctrl_unblock_requests(ctrl_info);
9463 	pqi_scsi_unblock_requests(ctrl_info);
9464 	pqi_ctrl_unblock_scan(ctrl_info);
9465 
9466 	ssleep(PQI_POST_RESET_DELAY_SECS);
9467 
9468 	return pqi_ctrl_init_resume(ctrl_info);
9469 }
9470 
pqi_freeze(struct device * dev)9471 static int pqi_freeze(struct device *dev)
9472 {
9473 	return pqi_suspend_or_freeze(dev, false);
9474 }
9475 
pqi_thaw(struct device * dev)9476 static int pqi_thaw(struct device *dev)
9477 {
9478 	int rc;
9479 	struct pci_dev *pci_dev;
9480 	struct pqi_ctrl_info *ctrl_info;
9481 
9482 	pci_dev = to_pci_dev(dev);
9483 	ctrl_info = pci_get_drvdata(pci_dev);
9484 
9485 	rc = pqi_request_irqs(ctrl_info);
9486 	if (rc)
9487 		return rc;
9488 
9489 	ctrl_info->controller_online = true;
9490 	ctrl_info->pqi_mode_enabled = true;
9491 
9492 	pqi_ctrl_unblock_device_reset(ctrl_info);
9493 	pqi_ctrl_unblock_requests(ctrl_info);
9494 	pqi_scsi_unblock_requests(ctrl_info);
9495 	pqi_ctrl_unblock_scan(ctrl_info);
9496 
9497 	return 0;
9498 }
9499 
pqi_poweroff(struct device * dev)9500 static int pqi_poweroff(struct device *dev)
9501 {
9502 	struct pci_dev *pci_dev;
9503 	struct pqi_ctrl_info *ctrl_info;
9504 	enum bmic_flush_cache_shutdown_event shutdown_event;
9505 
9506 	pci_dev = to_pci_dev(dev);
9507 	ctrl_info = pci_get_drvdata(pci_dev);
9508 
9509 	shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9510 	pqi_flush_cache(ctrl_info, shutdown_event);
9511 
9512 	return 0;
9513 }
9514 
9515 static const struct dev_pm_ops pqi_pm_ops = {
9516 	.suspend = pqi_suspend,
9517 	.resume = pqi_resume_or_restore,
9518 	.freeze = pqi_freeze,
9519 	.thaw = pqi_thaw,
9520 	.poweroff = pqi_poweroff,
9521 	.restore = pqi_resume_or_restore,
9522 };
9523 
9524 #endif /* CONFIG_PM */
9525 
9526 /* Define the PCI IDs for the controllers that we support. */
9527 static const struct pci_device_id pqi_pci_id_table[] = {
9528 	{
9529 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9530 			       0x105b, 0x1211)
9531 	},
9532 	{
9533 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9534 			       0x105b, 0x1321)
9535 	},
9536 	{
9537 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9538 			       0x152d, 0x8a22)
9539 	},
9540 	{
9541 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9542 			       0x152d, 0x8a23)
9543 	},
9544 	{
9545 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9546 			       0x152d, 0x8a24)
9547 	},
9548 	{
9549 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9550 			       0x152d, 0x8a36)
9551 	},
9552 	{
9553 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9554 			       0x152d, 0x8a37)
9555 	},
9556 	{
9557 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9558 			       0x193d, 0x0462)
9559 	},
9560 	{
9561 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9562 			       0x193d, 0x1104)
9563 	},
9564 	{
9565 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9566 			       0x193d, 0x1105)
9567 	},
9568 	{
9569 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9570 			       0x193d, 0x1106)
9571 	},
9572 	{
9573 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9574 			       0x193d, 0x1107)
9575 	},
9576 	{
9577 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9578 			       0x193d, 0x1108)
9579 	},
9580 	{
9581 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9582 			       0x193d, 0x1109)
9583 	},
9584 	{
9585 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9586 			       0x193d, 0x110b)
9587 	},
9588 	{
9589 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9590 			       0x193d, 0x1110)
9591 	},
9592 	{
9593 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9594 			       0x193d, 0x8460)
9595 	},
9596 	{
9597 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9598 			       0x193d, 0x8461)
9599 	},
9600 	{
9601 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9602 			       0x193d, 0x8462)
9603 	},
9604 	{
9605 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9606 			       0x193d, 0xc460)
9607 	},
9608 	{
9609 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9610 			       0x193d, 0xc461)
9611 	},
9612 	{
9613 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9614 			       0x193d, 0xf460)
9615 	},
9616 	{
9617 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9618 			       0x193d, 0xf461)
9619 	},
9620 	{
9621 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9622 			       0x1bd4, 0x0045)
9623 	},
9624 	{
9625 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9626 			       0x1bd4, 0x0046)
9627 	},
9628 	{
9629 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9630 			       0x1bd4, 0x0047)
9631 	},
9632 	{
9633 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9634 			       0x1bd4, 0x0048)
9635 	},
9636 	{
9637 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9638 			       0x1bd4, 0x004a)
9639 	},
9640 	{
9641 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9642 			       0x1bd4, 0x004b)
9643 	},
9644 	{
9645 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9646 			       0x1bd4, 0x004c)
9647 	},
9648 	{
9649 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9650 			       0x1bd4, 0x004f)
9651 	},
9652 	{
9653 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9654 			       0x1bd4, 0x0051)
9655 	},
9656 	{
9657 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9658 			       0x1bd4, 0x0052)
9659 	},
9660 	{
9661 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9662 			       0x1bd4, 0x0053)
9663 	},
9664 	{
9665 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9666 			       0x1bd4, 0x0054)
9667 	},
9668 	{
9669 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9670 			       0x1bd4, 0x006b)
9671 	},
9672 	{
9673 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9674 			       0x1bd4, 0x006c)
9675 	},
9676 	{
9677 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9678 			       0x1bd4, 0x006d)
9679 	},
9680 	{
9681 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9682 			       0x1bd4, 0x006f)
9683 	},
9684 	{
9685 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9686 			       0x1bd4, 0x0070)
9687 	},
9688 	{
9689 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9690 			       0x1bd4, 0x0071)
9691 	},
9692 	{
9693 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9694 			       0x1bd4, 0x0072)
9695 	},
9696 	{
9697 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9698 			       0x1bd4, 0x0086)
9699 	},
9700 	{
9701 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9702 			       0x1bd4, 0x0087)
9703 	},
9704 	{
9705 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9706 			       0x1bd4, 0x0088)
9707 	},
9708 	{
9709 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9710 			       0x1bd4, 0x0089)
9711 	},
9712 	{
9713 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9714 			       0x1ff9, 0x00a1)
9715 	},
9716 	{
9717 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9718 			       0x1f3a, 0x0104)
9719 	},
9720 	{
9721 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9722 			       0x19e5, 0xd227)
9723 	},
9724 	{
9725 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9726 			       0x19e5, 0xd228)
9727 	},
9728 	{
9729 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9730 			       0x19e5, 0xd229)
9731 	},
9732 	{
9733 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9734 			       0x19e5, 0xd22a)
9735 	},
9736 	{
9737 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9738 			       0x19e5, 0xd22b)
9739 	},
9740 	{
9741 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9742 			       0x19e5, 0xd22c)
9743 	},
9744 	{
9745 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9746 			       PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9747 	},
9748 	{
9749 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9750 			       PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9751 	},
9752 	{
9753 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9754 			       PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9755 	},
9756 	{
9757 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9758 			       PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9759 	},
9760 	{
9761 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9762 			       PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9763 	},
9764 	{
9765 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9766 			       PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9767 	},
9768 	{
9769 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9770 			       PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9771 	},
9772 	{
9773 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9774 			       PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9775 	},
9776 	{
9777 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9778 			       PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9779 	},
9780 	{
9781 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9782 			       PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9783 	},
9784 	{
9785 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9786 			       PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9787 	},
9788 	{
9789 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9790 			       PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9791 	},
9792 	{
9793 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9794 			       PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9795 	},
9796 	{
9797 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9798 			       PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9799 	},
9800 	{
9801 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9802 			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9803 	},
9804 	{
9805 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9806 			       PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9807 	},
9808 	{
9809 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9810 			       PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9811 	},
9812 	{
9813 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9814 			       PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9815 	},
9816 	{
9817 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9818 			       PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9819 	},
9820 	{
9821 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9822 			       PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9823 	},
9824 	{
9825 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9826 			       PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9827 	},
9828 	{
9829 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9830 			       PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9831 	},
9832 	{
9833 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9834 			       PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9835 	},
9836 	{
9837 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9838 			       PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9839 	},
9840 	{
9841 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9842 			       PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9843 	},
9844 	{
9845 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9846 			       PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9847 	},
9848 	{
9849 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9850 			       PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9851 	},
9852 	{
9853 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9854 			       PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9855 	},
9856 	{
9857 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9858 			       PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9859 	},
9860 	{
9861 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9862 			       PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9863 	},
9864 	{
9865 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9866 			       PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9867 	},
9868 	{
9869 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9870 			       PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9871 	},
9872 	{
9873 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9874 			       PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9875 	},
9876 	{
9877 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9878 			       PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9879 	},
9880 	{
9881 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9882 			       PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9883 	},
9884 	{
9885 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9886 			       PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9887 	},
9888 	{
9889 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9890 			       PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9891 	},
9892 	{
9893 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9894 			       PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9895 	},
9896 	{
9897 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9898 			       PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9899 	},
9900 	{
9901 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9902 			       PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9903 	},
9904 	{
9905 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9906 			       PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9907 	},
9908 	{
9909 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9910 			       PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9911 	},
9912 	{
9913 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9914 			       PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9915 	},
9916 	{
9917 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9918 			       PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9919 	},
9920 	{
9921 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9922 			       PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9923 	},
9924 	{
9925 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9926 			       PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9927 	},
9928 	{
9929 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9930 			       PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9931 	},
9932 	{
9933 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9934 			       PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9935 	},
9936 	{
9937 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9938 			       PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9939 	},
9940 	{
9941 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9942 			       PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9943 	},
9944 	{
9945 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9946 			       PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9947 	},
9948 	{
9949 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9950 			       PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9951 	},
9952 	{
9953 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9954 			       PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9955 	},
9956 	{
9957 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9958 			       PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9959 	},
9960 	{
9961 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9962 			       PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9963 	},
9964 	{
9965 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9966 			       PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9967 	},
9968 	{
9969 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9970 			       PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9971 	},
9972 	{
9973 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9974 			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9975 	},
9976 	{
9977 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9978 			       PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9979 	},
9980 	{
9981 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9982 			       PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9983 	},
9984 	{
9985 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9986 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9987 	},
9988 	{
9989 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9990 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9991 	},
9992 	{
9993 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9994 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9995 	},
9996 	{
9997 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9998 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9999 	},
10000 	{
10001 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10002 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
10003 	},
10004 	{
10005 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10006 			       PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
10007 	},
10008 	{
10009 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10010 			       PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
10011 	},
10012 	{
10013 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10014 			       PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
10015 	},
10016 	{
10017 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10018 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
10019 	},
10020 	{
10021 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10022 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
10023 	},
10024 	{
10025 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10026 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
10027 	},
10028 	{
10029 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10030 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
10031 	},
10032 	{
10033 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10034 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
10035 	},
10036 	{
10037 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10038 			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
10039 	},
10040 	{
10041 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10042 			       PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
10043 	},
10044 	{
10045 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10046 			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
10047 	},
10048 	{
10049 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10050 			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
10051 	},
10052 	{
10053 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10054 			       PCI_VENDOR_ID_DELL, 0x1fe0)
10055 	},
10056 	{
10057 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10058 			       PCI_VENDOR_ID_HP, 0x0600)
10059 	},
10060 	{
10061 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10062 			       PCI_VENDOR_ID_HP, 0x0601)
10063 	},
10064 	{
10065 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10066 			       PCI_VENDOR_ID_HP, 0x0602)
10067 	},
10068 	{
10069 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10070 			       PCI_VENDOR_ID_HP, 0x0603)
10071 	},
10072 	{
10073 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10074 			       PCI_VENDOR_ID_HP, 0x0609)
10075 	},
10076 	{
10077 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10078 			       PCI_VENDOR_ID_HP, 0x0650)
10079 	},
10080 	{
10081 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10082 			       PCI_VENDOR_ID_HP, 0x0651)
10083 	},
10084 	{
10085 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10086 			       PCI_VENDOR_ID_HP, 0x0652)
10087 	},
10088 	{
10089 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10090 			       PCI_VENDOR_ID_HP, 0x0653)
10091 	},
10092 	{
10093 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10094 			       PCI_VENDOR_ID_HP, 0x0654)
10095 	},
10096 	{
10097 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10098 			       PCI_VENDOR_ID_HP, 0x0655)
10099 	},
10100 	{
10101 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10102 			       PCI_VENDOR_ID_HP, 0x0700)
10103 	},
10104 	{
10105 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10106 			       PCI_VENDOR_ID_HP, 0x0701)
10107 	},
10108 	{
10109 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10110 			       PCI_VENDOR_ID_HP, 0x1001)
10111 	},
10112 	{
10113 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10114 			       PCI_VENDOR_ID_HP, 0x1002)
10115 	},
10116 	{
10117 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10118 			       PCI_VENDOR_ID_HP, 0x1100)
10119 	},
10120 	{
10121 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10122 			       PCI_VENDOR_ID_HP, 0x1101)
10123 	},
10124 	{
10125 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10126 			       0x1590, 0x0294)
10127 	},
10128 	{
10129 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10130 			       0x1590, 0x02db)
10131 	},
10132 	{
10133 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10134 			       0x1590, 0x02dc)
10135 	},
10136 	{
10137 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10138 			       0x1590, 0x032e)
10139 	},
10140 	{
10141 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10142 			       0x1590, 0x036f)
10143 	},
10144 	{
10145 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10146 			       0x1590, 0x0381)
10147 	},
10148 	{
10149 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10150 			       0x1590, 0x0382)
10151 	},
10152 	{
10153 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10154 			       0x1590, 0x0383)
10155 	},
10156 	{
10157 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10158 			       0x1d8d, 0x0800)
10159 	},
10160 	{
10161 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10162 			       0x1d8d, 0x0908)
10163 	},
10164 	{
10165 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10166 			       0x1d8d, 0x0806)
10167 	},
10168 	{
10169 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10170 			       0x1d8d, 0x0916)
10171 	},
10172 	{
10173 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10174 			       PCI_VENDOR_ID_GIGABYTE, 0x1000)
10175 	},
10176 	{
10177 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10178 			       0x1dfc, 0x3161)
10179 	},
10180 	{
10181 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10182 			       0x1f0c, 0x3161)
10183 	},
10184 	{
10185 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10186 			       0x1cf2, 0x0804)
10187 	},
10188 	{
10189 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10190 			       0x1cf2, 0x0805)
10191 	},
10192 	{
10193 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10194 			       0x1cf2, 0x0806)
10195 	},
10196 	{
10197 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10198 			       0x1cf2, 0x5445)
10199 	},
10200 	{
10201 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10202 			       0x1cf2, 0x5446)
10203 	},
10204 	{
10205 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10206 			       0x1cf2, 0x5447)
10207 	},
10208 	{
10209 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10210 			       0x1cf2, 0x5449)
10211 	},
10212 	{
10213 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10214 			       0x1cf2, 0x544a)
10215 	},
10216 	{
10217 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10218 			       0x1cf2, 0x544b)
10219 	},
10220 	{
10221 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10222 			       0x1cf2, 0x544d)
10223 	},
10224 	{
10225 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10226 			       0x1cf2, 0x544e)
10227 	},
10228 	{
10229 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10230 			       0x1cf2, 0x544f)
10231 	},
10232 	{
10233 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10234 			       0x1cf2, 0x54da)
10235 	},
10236 	{
10237 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10238 			       0x1cf2, 0x54db)
10239 	},
10240 	{
10241 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10242 			       0x1cf2, 0x54dc)
10243 	},
10244 	{
10245 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10246 			       0x1cf2, 0x0b27)
10247 	},
10248 	{
10249 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10250 			       0x1cf2, 0x0b29)
10251 	},
10252 	{
10253 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10254 			       0x1cf2, 0x0b45)
10255 	},
10256 	{
10257 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10258 			       0x1cc4, 0x0101)
10259 	},
10260 	{
10261 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10262 			       0x1cc4, 0x0201)
10263 	},
10264 	{
10265 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10266 			       PCI_VENDOR_ID_LENOVO, 0x0220)
10267 	},
10268 	{
10269 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10270 			       PCI_VENDOR_ID_LENOVO, 0x0221)
10271 	},
10272 	{
10273 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10274 			       PCI_VENDOR_ID_LENOVO, 0x0520)
10275 	},
10276 	{
10277 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10278 			       PCI_VENDOR_ID_LENOVO, 0x0522)
10279 	},
10280 	{
10281 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10282 			       PCI_VENDOR_ID_LENOVO, 0x0620)
10283 	},
10284 	{
10285 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10286 			       PCI_VENDOR_ID_LENOVO, 0x0621)
10287 	},
10288 	{
10289 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10290 			       PCI_VENDOR_ID_LENOVO, 0x0622)
10291 	},
10292 	{
10293 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10294 			       PCI_VENDOR_ID_LENOVO, 0x0623)
10295 	},
10296 	{
10297 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10298 				0x1014, 0x0718)
10299 	},
10300 	{
10301 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10302 			       0x1137, 0x02f8)
10303 	},
10304 	{
10305 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10306 			       0x1137, 0x02f9)
10307 	},
10308 	{
10309 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10310 			       0x1137, 0x02fa)
10311 	},
10312 	{
10313 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10314 			       0x1137, 0x02fe)
10315 	},
10316 	{
10317 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10318 			       0x1137, 0x02ff)
10319 	},
10320 	{
10321 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10322 			       0x1137, 0x0300)
10323 	},
10324 	{
10325 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10326 			       0x1ff9, 0x0045)
10327 	},
10328 	{
10329 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10330 			       0x1ff9, 0x0046)
10331 	},
10332 	{
10333 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10334 			       0x1ff9, 0x0047)
10335 	},
10336 	{
10337 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10338 			       0x1ff9, 0x0048)
10339 	},
10340 	{
10341 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10342 			       0x1ff9, 0x004a)
10343 	},
10344 	{
10345 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10346 			       0x1ff9, 0x004b)
10347 	},
10348 	{
10349 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10350 			       0x1ff9, 0x004c)
10351 	},
10352 	{
10353 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10354 			       0x1ff9, 0x004f)
10355 	},
10356 	{
10357 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10358 			       0x1ff9, 0x0051)
10359 	},
10360 	{
10361 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10362 			       0x1ff9, 0x0052)
10363 	},
10364 	{
10365 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10366 			       0x1ff9, 0x0053)
10367 	},
10368 	{
10369 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10370 			       0x1ff9, 0x0054)
10371 	},
10372 	{
10373 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10374 			       0x1ff9, 0x006b)
10375 	},
10376 	{
10377 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10378 			       0x1ff9, 0x006c)
10379 	},
10380 	{
10381 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10382 			       0x1ff9, 0x006d)
10383 	},
10384 	{
10385 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10386 			       0x1ff9, 0x006f)
10387 	},
10388 	{
10389 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10390 			       0x1ff9, 0x0070)
10391 	},
10392 	{
10393 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10394 			       0x1ff9, 0x0071)
10395 	},
10396 	{
10397 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10398 			       0x1ff9, 0x0072)
10399 	},
10400 	{
10401 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10402 			       0x1ff9, 0x0086)
10403 	},
10404 	{
10405 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10406 			       0x1ff9, 0x0087)
10407 	},
10408 	{
10409 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10410 			       0x1ff9, 0x0088)
10411 	},
10412 	{
10413 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10414 			       0x1ff9, 0x0089)
10415 	},
10416 	{
10417 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10418 				0x1e93, 0x1000)
10419 	},
10420 	{
10421 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10422 				0x1e93, 0x1001)
10423 	},
10424 	{
10425 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10426 				0x1e93, 0x1002)
10427 	},
10428 	{
10429 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10430 				0x1e93, 0x1005)
10431 	},
10432 	{
10433 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10434 				0x1f51, 0x1001)
10435 	},
10436 	{
10437 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10438 				0x1f51, 0x1002)
10439 	},
10440 	{
10441 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10442 				0x1f51, 0x1003)
10443 	},
10444 	{
10445 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10446 				0x1f51, 0x1004)
10447 	},
10448 	{
10449 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10450 				0x1f51, 0x1005)
10451 	},
10452 	{
10453 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10454 				0x1f51, 0x1006)
10455 	},
10456 	{
10457 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10458 				0x1f51, 0x1007)
10459 	},
10460 	{
10461 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10462 				0x1f51, 0x1008)
10463 	},
10464 	{
10465 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10466 				0x1f51, 0x1009)
10467 	},
10468 	{
10469 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10470 				0x1f51, 0x100a)
10471 	},
10472 	{
10473 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10474 			       0x1f51, 0x100e)
10475 	},
10476 	{
10477 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10478 			       0x1f51, 0x100f)
10479 	},
10480 	{
10481 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10482 			       0x1f51, 0x1010)
10483 	},
10484 	{
10485 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10486 			       0x1f51, 0x1011)
10487 	},
10488 	{
10489 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10490 			       0x1f51, 0x1043)
10491 	},
10492 	{
10493 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10494 			       0x1f51, 0x1044)
10495 	},
10496 	{
10497 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10498 			       0x1f51, 0x1045)
10499 	},
10500 	{
10501 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10502 			       0x1ff9, 0x00a3)
10503 	},
10504 	{
10505 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10506 			       PCI_ANY_ID, PCI_ANY_ID)
10507 	},
10508 	{ 0 }
10509 };
10510 
10511 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10512 
10513 static struct pci_driver pqi_pci_driver = {
10514 	.name = DRIVER_NAME_SHORT,
10515 	.id_table = pqi_pci_id_table,
10516 	.probe = pqi_pci_probe,
10517 	.remove = pqi_pci_remove,
10518 	.shutdown = pqi_shutdown,
10519 #if defined(CONFIG_PM)
10520 	.driver = {
10521 		.pm = &pqi_pm_ops
10522 	},
10523 #endif
10524 };
10525 
pqi_init(void)10526 static int __init pqi_init(void)
10527 {
10528 	int rc;
10529 
10530 	pr_info(DRIVER_NAME "\n");
10531 	pqi_verify_structures();
10532 	sis_verify_structures();
10533 
10534 	pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10535 	if (!pqi_sas_transport_template)
10536 		return -ENODEV;
10537 
10538 	pqi_process_module_params();
10539 
10540 	rc = pci_register_driver(&pqi_pci_driver);
10541 	if (rc)
10542 		sas_release_transport(pqi_sas_transport_template);
10543 
10544 	return rc;
10545 }
10546 
pqi_cleanup(void)10547 static void __exit pqi_cleanup(void)
10548 {
10549 	pci_unregister_driver(&pqi_pci_driver);
10550 	sas_release_transport(pqi_sas_transport_template);
10551 }
10552 
10553 module_init(pqi_init);
10554 module_exit(pqi_cleanup);
10555 
pqi_verify_structures(void)10556 static void pqi_verify_structures(void)
10557 {
10558 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10559 		sis_host_to_ctrl_doorbell) != 0x20);
10560 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10561 		sis_interrupt_mask) != 0x34);
10562 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10563 		sis_ctrl_to_host_doorbell) != 0x9c);
10564 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10565 		sis_ctrl_to_host_doorbell_clear) != 0xa0);
10566 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10567 		sis_driver_scratch) != 0xb0);
10568 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10569 		sis_product_identifier) != 0xb4);
10570 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10571 		sis_firmware_status) != 0xbc);
10572 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10573 		sis_ctrl_shutdown_reason_code) != 0xcc);
10574 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10575 		sis_mailbox) != 0x1000);
10576 	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10577 		pqi_registers) != 0x4000);
10578 
10579 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10580 		iu_type) != 0x0);
10581 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10582 		iu_length) != 0x2);
10583 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10584 		response_queue_id) != 0x4);
10585 	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10586 		driver_flags) != 0x6);
10587 	BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10588 
10589 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10590 		status) != 0x0);
10591 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10592 		service_response) != 0x1);
10593 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10594 		data_present) != 0x2);
10595 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10596 		reserved) != 0x3);
10597 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10598 		residual_count) != 0x4);
10599 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10600 		data_length) != 0x8);
10601 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10602 		reserved1) != 0xa);
10603 	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10604 		data) != 0xc);
10605 	BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10606 
10607 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10608 		data_in_result) != 0x0);
10609 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10610 		data_out_result) != 0x1);
10611 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10612 		reserved) != 0x2);
10613 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10614 		status) != 0x5);
10615 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10616 		status_qualifier) != 0x6);
10617 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10618 		sense_data_length) != 0x8);
10619 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10620 		response_data_length) != 0xa);
10621 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10622 		data_in_transferred) != 0xc);
10623 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10624 		data_out_transferred) != 0x10);
10625 	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10626 		data) != 0x14);
10627 	BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10628 
10629 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10630 		signature) != 0x0);
10631 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10632 		function_and_status_code) != 0x8);
10633 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10634 		max_admin_iq_elements) != 0x10);
10635 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10636 		max_admin_oq_elements) != 0x11);
10637 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10638 		admin_iq_element_length) != 0x12);
10639 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10640 		admin_oq_element_length) != 0x13);
10641 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10642 		max_reset_timeout) != 0x14);
10643 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10644 		legacy_intx_status) != 0x18);
10645 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10646 		legacy_intx_mask_set) != 0x1c);
10647 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10648 		legacy_intx_mask_clear) != 0x20);
10649 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10650 		device_status) != 0x40);
10651 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10652 		admin_iq_pi_offset) != 0x48);
10653 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10654 		admin_oq_ci_offset) != 0x50);
10655 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10656 		admin_iq_element_array_addr) != 0x58);
10657 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10658 		admin_oq_element_array_addr) != 0x60);
10659 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10660 		admin_iq_ci_addr) != 0x68);
10661 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10662 		admin_oq_pi_addr) != 0x70);
10663 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10664 		admin_iq_num_elements) != 0x78);
10665 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10666 		admin_oq_num_elements) != 0x79);
10667 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10668 		admin_queue_int_msg_num) != 0x7a);
10669 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10670 		device_error) != 0x80);
10671 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10672 		error_details) != 0x88);
10673 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10674 		device_reset) != 0x90);
10675 	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10676 		power_action) != 0x94);
10677 	BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10678 
10679 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10680 		header.iu_type) != 0);
10681 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10682 		header.iu_length) != 2);
10683 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10684 		header.driver_flags) != 6);
10685 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10686 		request_id) != 8);
10687 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10688 		function_code) != 10);
10689 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10690 		data.report_device_capability.buffer_length) != 44);
10691 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10692 		data.report_device_capability.sg_descriptor) != 48);
10693 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10694 		data.create_operational_iq.queue_id) != 12);
10695 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10696 		data.create_operational_iq.element_array_addr) != 16);
10697 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10698 		data.create_operational_iq.ci_addr) != 24);
10699 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10700 		data.create_operational_iq.num_elements) != 32);
10701 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10702 		data.create_operational_iq.element_length) != 34);
10703 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10704 		data.create_operational_iq.queue_protocol) != 36);
10705 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10706 		data.create_operational_oq.queue_id) != 12);
10707 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10708 		data.create_operational_oq.element_array_addr) != 16);
10709 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10710 		data.create_operational_oq.pi_addr) != 24);
10711 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10712 		data.create_operational_oq.num_elements) != 32);
10713 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10714 		data.create_operational_oq.element_length) != 34);
10715 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10716 		data.create_operational_oq.queue_protocol) != 36);
10717 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10718 		data.create_operational_oq.int_msg_num) != 40);
10719 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10720 		data.create_operational_oq.coalescing_count) != 42);
10721 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10722 		data.create_operational_oq.min_coalescing_time) != 44);
10723 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10724 		data.create_operational_oq.max_coalescing_time) != 48);
10725 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10726 		data.delete_operational_queue.queue_id) != 12);
10727 	BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10728 	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10729 		data.create_operational_iq) != 64 - 11);
10730 	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10731 		data.create_operational_oq) != 64 - 11);
10732 	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10733 		data.delete_operational_queue) != 64 - 11);
10734 
10735 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10736 		header.iu_type) != 0);
10737 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10738 		header.iu_length) != 2);
10739 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10740 		header.driver_flags) != 6);
10741 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10742 		request_id) != 8);
10743 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10744 		function_code) != 10);
10745 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10746 		status) != 11);
10747 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10748 		data.create_operational_iq.status_descriptor) != 12);
10749 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10750 		data.create_operational_iq.iq_pi_offset) != 16);
10751 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10752 		data.create_operational_oq.status_descriptor) != 12);
10753 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10754 		data.create_operational_oq.oq_ci_offset) != 16);
10755 	BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10756 
10757 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10758 		header.iu_type) != 0);
10759 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10760 		header.iu_length) != 2);
10761 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10762 		header.response_queue_id) != 4);
10763 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10764 		header.driver_flags) != 6);
10765 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10766 		request_id) != 8);
10767 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10768 		nexus_id) != 10);
10769 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10770 		buffer_length) != 12);
10771 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10772 		lun_number) != 16);
10773 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10774 		protocol_specific) != 24);
10775 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10776 		error_index) != 27);
10777 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10778 		cdb) != 32);
10779 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10780 		timeout) != 60);
10781 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10782 		sg_descriptors) != 64);
10783 	BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10784 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10785 
10786 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10787 		header.iu_type) != 0);
10788 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10789 		header.iu_length) != 2);
10790 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10791 		header.response_queue_id) != 4);
10792 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10793 		header.driver_flags) != 6);
10794 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10795 		request_id) != 8);
10796 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10797 		nexus_id) != 12);
10798 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10799 		buffer_length) != 16);
10800 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10801 		data_encryption_key_index) != 22);
10802 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10803 		encrypt_tweak_lower) != 24);
10804 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10805 		encrypt_tweak_upper) != 28);
10806 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10807 		cdb) != 32);
10808 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10809 		error_index) != 48);
10810 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10811 		num_sg_descriptors) != 50);
10812 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10813 		cdb_length) != 51);
10814 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10815 		lun_number) != 52);
10816 	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10817 		sg_descriptors) != 64);
10818 	BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10819 		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10820 
10821 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10822 		header.iu_type) != 0);
10823 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10824 		header.iu_length) != 2);
10825 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10826 		request_id) != 8);
10827 	BUILD_BUG_ON(offsetof(struct pqi_io_response,
10828 		error_index) != 10);
10829 
10830 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10831 		header.iu_type) != 0);
10832 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10833 		header.iu_length) != 2);
10834 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10835 		header.response_queue_id) != 4);
10836 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10837 		request_id) != 8);
10838 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10839 		data.report_event_configuration.buffer_length) != 12);
10840 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10841 		data.report_event_configuration.sg_descriptors) != 16);
10842 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10843 		data.set_event_configuration.global_event_oq_id) != 10);
10844 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10845 		data.set_event_configuration.buffer_length) != 12);
10846 	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10847 		data.set_event_configuration.sg_descriptors) != 16);
10848 
10849 	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10850 		max_inbound_iu_length) != 6);
10851 	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10852 		max_outbound_iu_length) != 14);
10853 	BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10854 
10855 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10856 		data_length) != 0);
10857 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10858 		iq_arbitration_priority_support_bitmask) != 8);
10859 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10860 		maximum_aw_a) != 9);
10861 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10862 		maximum_aw_b) != 10);
10863 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10864 		maximum_aw_c) != 11);
10865 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10866 		max_inbound_queues) != 16);
10867 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10868 		max_elements_per_iq) != 18);
10869 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10870 		max_iq_element_length) != 24);
10871 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10872 		min_iq_element_length) != 26);
10873 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10874 		max_outbound_queues) != 30);
10875 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10876 		max_elements_per_oq) != 32);
10877 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10878 		intr_coalescing_time_granularity) != 34);
10879 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10880 		max_oq_element_length) != 36);
10881 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10882 		min_oq_element_length) != 38);
10883 	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10884 		iu_layer_descriptors) != 64);
10885 	BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10886 
10887 	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10888 		event_type) != 0);
10889 	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10890 		oq_id) != 2);
10891 	BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10892 
10893 	BUILD_BUG_ON(offsetof(struct pqi_event_config,
10894 		num_event_descriptors) != 2);
10895 	BUILD_BUG_ON(offsetof(struct pqi_event_config,
10896 		descriptors) != 4);
10897 
10898 	BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10899 		ARRAY_SIZE(pqi_supported_event_types));
10900 
10901 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10902 		header.iu_type) != 0);
10903 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10904 		header.iu_length) != 2);
10905 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10906 		event_type) != 8);
10907 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10908 		event_id) != 10);
10909 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10910 		additional_event_id) != 12);
10911 	BUILD_BUG_ON(offsetof(struct pqi_event_response,
10912 		data) != 16);
10913 	BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10914 
10915 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10916 		header.iu_type) != 0);
10917 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10918 		header.iu_length) != 2);
10919 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10920 		event_type) != 8);
10921 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10922 		event_id) != 10);
10923 	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10924 		additional_event_id) != 12);
10925 	BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10926 
10927 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10928 		header.iu_type) != 0);
10929 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10930 		header.iu_length) != 2);
10931 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10932 		request_id) != 8);
10933 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10934 		nexus_id) != 10);
10935 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10936 		timeout) != 14);
10937 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10938 		lun_number) != 16);
10939 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10940 		protocol_specific) != 24);
10941 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10942 		outbound_queue_id_to_manage) != 26);
10943 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10944 		request_id_to_manage) != 28);
10945 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10946 		task_management_function) != 30);
10947 	BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10948 
10949 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10950 		header.iu_type) != 0);
10951 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10952 		header.iu_length) != 2);
10953 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10954 		request_id) != 8);
10955 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10956 		nexus_id) != 10);
10957 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10958 		additional_response_info) != 12);
10959 	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10960 		response_code) != 15);
10961 	BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10962 
10963 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10964 		configured_logical_drive_count) != 0);
10965 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10966 		configuration_signature) != 1);
10967 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10968 		firmware_version_short) != 5);
10969 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10970 		extended_logical_unit_count) != 154);
10971 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10972 		firmware_build_number) != 190);
10973 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10974 		vendor_id) != 200);
10975 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10976 		product_id) != 208);
10977 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10978 		extra_controller_flags) != 286);
10979 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10980 		controller_mode) != 292);
10981 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10982 		spare_part_number) != 293);
10983 	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10984 		firmware_version_long) != 325);
10985 
10986 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10987 		phys_bay_in_box) != 115);
10988 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10989 		device_type) != 120);
10990 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10991 		redundant_path_present_map) != 1736);
10992 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10993 		active_path_number) != 1738);
10994 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10995 		alternate_paths_phys_connector) != 1739);
10996 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10997 		alternate_paths_phys_box_on_port) != 1755);
10998 	BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10999 		current_queue_depth_limit) != 1796);
11000 	BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
11001 
11002 	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
11003 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11004 		page_code) != 0);
11005 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11006 		subpage_code) != 1);
11007 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11008 		buffer_length) != 2);
11009 
11010 	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
11011 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11012 		page_code) != 0);
11013 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11014 		subpage_code) != 1);
11015 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11016 		page_length) != 2);
11017 
11018 	BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
11019 		!= 18);
11020 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11021 		header) != 0);
11022 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11023 		firmware_read_support) != 4);
11024 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11025 		driver_read_support) != 5);
11026 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11027 		firmware_write_support) != 6);
11028 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11029 		driver_write_support) != 7);
11030 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11031 		max_transfer_encrypted_sas_sata) != 8);
11032 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11033 		max_transfer_encrypted_nvme) != 10);
11034 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11035 		max_write_raid_5_6) != 12);
11036 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11037 		max_write_raid_1_10_2drive) != 14);
11038 	BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11039 		max_write_raid_1_10_3drive) != 16);
11040 
11041 	BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
11042 	BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
11043 	BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
11044 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11045 	BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
11046 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11047 	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
11048 	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
11049 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11050 	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
11051 	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
11052 		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11053 
11054 	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
11055 	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
11056 		PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
11057 }
11058