1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
9 *
10 */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/crash_dump.h>
23 #include <linux/string.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_eh.h>
28 #include <scsi/scsi_transport_sas.h>
29 #include <linux/unaligned.h>
30 #include "smartpqi.h"
31 #include "smartpqi_sis.h"
32
33 #if !defined(BUILD_TIMESTAMP)
34 #define BUILD_TIMESTAMP
35 #endif
36
37 #define DRIVER_VERSION "2.1.36-026"
38 #define DRIVER_MAJOR 2
39 #define DRIVER_MINOR 1
40 #define DRIVER_RELEASE 36
41 #define DRIVER_REVISION 26
42
43 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
44 DRIVER_VERSION BUILD_TIMESTAMP ")"
45 #define DRIVER_NAME_SHORT "smartpqi"
46
47 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48
49 #define PQI_POST_RESET_DELAY_SECS 5
50 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51
52 #define PQI_NO_COMPLETION ((void *)-1)
53
54 MODULE_AUTHOR("Microchip");
55 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
56 DRIVER_VERSION);
57 MODULE_VERSION(DRIVER_VERSION);
58 MODULE_LICENSE("GPL");
59
60 struct pqi_cmd_priv {
61 int this_residual;
62 };
63
pqi_cmd_priv(struct scsi_cmnd * cmd)64 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
65 {
66 return scsi_cmd_priv(cmd);
67 }
68
69 static void pqi_verify_structures(void);
70 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
71 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
72 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ctrl_offline_worker(struct work_struct *work);
74 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
75 static void pqi_scan_start(struct Scsi_Host *shost);
76 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
77 struct pqi_queue_group *queue_group, enum pqi_io_path path,
78 struct pqi_io_request *io_request);
79 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
80 struct pqi_iu_header *request, unsigned int flags,
81 struct pqi_raid_error_info *error_info);
82 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
84 unsigned int cdb_length, struct pqi_queue_group *queue_group,
85 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
86 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
90 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
91 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
92 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
93 struct pqi_scsi_dev_raid_map_data *rmd);
94 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
95 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
96 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
97 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
98 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
99 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
100 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
101 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
102 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
103 static void pqi_tmf_worker(struct work_struct *work);
104
105 /* for flags argument to pqi_submit_raid_request_synchronous() */
106 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
107
108 static struct scsi_transport_template *pqi_sas_transport_template;
109
110 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
111
112 enum pqi_lockup_action {
113 NONE,
114 REBOOT,
115 PANIC
116 };
117
118 static enum pqi_lockup_action pqi_lockup_action = NONE;
119
120 static struct {
121 enum pqi_lockup_action action;
122 char *name;
123 } pqi_lockup_actions[] = {
124 {
125 .action = NONE,
126 .name = "none",
127 },
128 {
129 .action = REBOOT,
130 .name = "reboot",
131 },
132 {
133 .action = PANIC,
134 .name = "panic",
135 },
136 };
137
138 static unsigned int pqi_supported_event_types[] = {
139 PQI_EVENT_TYPE_HOTPLUG,
140 PQI_EVENT_TYPE_HARDWARE,
141 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
142 PQI_EVENT_TYPE_LOGICAL_DEVICE,
143 PQI_EVENT_TYPE_OFA,
144 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
145 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
146 };
147
148 static int pqi_disable_device_id_wildcards;
149 module_param_named(disable_device_id_wildcards,
150 pqi_disable_device_id_wildcards, int, 0644);
151 MODULE_PARM_DESC(disable_device_id_wildcards,
152 "Disable device ID wildcards.");
153
154 static int pqi_disable_heartbeat;
155 module_param_named(disable_heartbeat,
156 pqi_disable_heartbeat, int, 0644);
157 MODULE_PARM_DESC(disable_heartbeat,
158 "Disable heartbeat.");
159
160 static int pqi_disable_ctrl_shutdown;
161 module_param_named(disable_ctrl_shutdown,
162 pqi_disable_ctrl_shutdown, int, 0644);
163 MODULE_PARM_DESC(disable_ctrl_shutdown,
164 "Disable controller shutdown when controller locked up.");
165
166 static char *pqi_lockup_action_param;
167 module_param_named(lockup_action,
168 pqi_lockup_action_param, charp, 0644);
169 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
170 "\t\tSupported: none, reboot, panic\n"
171 "\t\tDefault: none");
172
173 static int pqi_expose_ld_first;
174 module_param_named(expose_ld_first,
175 pqi_expose_ld_first, int, 0644);
176 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
177
178 static int pqi_hide_vsep;
179 module_param_named(hide_vsep,
180 pqi_hide_vsep, int, 0644);
181 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
182
183 static int pqi_disable_managed_interrupts;
184 module_param_named(disable_managed_interrupts,
185 pqi_disable_managed_interrupts, int, 0644);
186 MODULE_PARM_DESC(disable_managed_interrupts,
187 "Disable the kernel automatically assigning SMP affinity to IRQs.");
188
189 static unsigned int pqi_ctrl_ready_timeout_secs;
190 module_param_named(ctrl_ready_timeout,
191 pqi_ctrl_ready_timeout_secs, uint, 0644);
192 MODULE_PARM_DESC(ctrl_ready_timeout,
193 "Timeout in seconds for driver to wait for controller ready.");
194
195 static char *raid_levels[] = {
196 "RAID-0",
197 "RAID-4",
198 "RAID-1(1+0)",
199 "RAID-5",
200 "RAID-5+1",
201 "RAID-6",
202 "RAID-1(Triple)",
203 };
204
pqi_raid_level_to_string(u8 raid_level)205 static char *pqi_raid_level_to_string(u8 raid_level)
206 {
207 if (raid_level < ARRAY_SIZE(raid_levels))
208 return raid_levels[raid_level];
209
210 return "RAID UNKNOWN";
211 }
212
213 #define SA_RAID_0 0
214 #define SA_RAID_4 1
215 #define SA_RAID_1 2 /* also used for RAID 10 */
216 #define SA_RAID_5 3 /* also used for RAID 50 */
217 #define SA_RAID_51 4
218 #define SA_RAID_6 5 /* also used for RAID 60 */
219 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
220 #define SA_RAID_MAX SA_RAID_TRIPLE
221 #define SA_RAID_UNKNOWN 0xff
222
pqi_scsi_done(struct scsi_cmnd * scmd)223 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
224 {
225 pqi_prep_for_scsi_done(scmd);
226 scsi_done(scmd);
227 }
228
pqi_disable_write_same(struct scsi_device * sdev)229 static inline void pqi_disable_write_same(struct scsi_device *sdev)
230 {
231 sdev->no_write_same = 1;
232 }
233
pqi_scsi3addr_equal(u8 * scsi3addr1,u8 * scsi3addr2)234 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
235 {
236 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
237 }
238
pqi_is_logical_device(struct pqi_scsi_dev * device)239 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
240 {
241 return !device->is_physical_device;
242 }
243
pqi_is_external_raid_addr(u8 * scsi3addr)244 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
245 {
246 return scsi3addr[2] != 0;
247 }
248
pqi_ctrl_offline(struct pqi_ctrl_info * ctrl_info)249 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
250 {
251 return !ctrl_info->controller_online;
252 }
253
pqi_check_ctrl_health(struct pqi_ctrl_info * ctrl_info)254 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
255 {
256 if (ctrl_info->controller_online)
257 if (!sis_is_firmware_running(ctrl_info))
258 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
259 }
260
pqi_is_hba_lunid(u8 * scsi3addr)261 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
262 {
263 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
264 }
265
266 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
267 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
268
pqi_get_ctrl_mode(struct pqi_ctrl_info * ctrl_info)269 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
270 {
271 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
272 }
273
pqi_save_ctrl_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_mode mode)274 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
275 enum pqi_ctrl_mode mode)
276 {
277 u32 driver_scratch;
278
279 driver_scratch = sis_read_driver_scratch(ctrl_info);
280
281 if (mode == PQI_MODE)
282 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
283 else
284 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
285
286 sis_write_driver_scratch(ctrl_info, driver_scratch);
287 }
288
pqi_is_fw_triage_supported(struct pqi_ctrl_info * ctrl_info)289 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
290 {
291 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
292 }
293
pqi_save_fw_triage_setting(struct pqi_ctrl_info * ctrl_info,bool is_supported)294 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
295 {
296 u32 driver_scratch;
297
298 driver_scratch = sis_read_driver_scratch(ctrl_info);
299
300 if (is_supported)
301 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
302 else
303 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
304
305 sis_write_driver_scratch(ctrl_info, driver_scratch);
306 }
307
pqi_ctrl_block_scan(struct pqi_ctrl_info * ctrl_info)308 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
309 {
310 ctrl_info->scan_blocked = true;
311 mutex_lock(&ctrl_info->scan_mutex);
312 }
313
pqi_ctrl_unblock_scan(struct pqi_ctrl_info * ctrl_info)314 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
315 {
316 ctrl_info->scan_blocked = false;
317 mutex_unlock(&ctrl_info->scan_mutex);
318 }
319
pqi_ctrl_scan_blocked(struct pqi_ctrl_info * ctrl_info)320 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
321 {
322 return ctrl_info->scan_blocked;
323 }
324
pqi_ctrl_block_device_reset(struct pqi_ctrl_info * ctrl_info)325 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
326 {
327 mutex_lock(&ctrl_info->lun_reset_mutex);
328 }
329
pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info * ctrl_info)330 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
331 {
332 mutex_unlock(&ctrl_info->lun_reset_mutex);
333 }
334
pqi_scsi_block_requests(struct pqi_ctrl_info * ctrl_info)335 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
336 {
337 struct Scsi_Host *shost;
338 unsigned int num_loops;
339 int msecs_sleep;
340
341 shost = ctrl_info->scsi_host;
342
343 scsi_block_requests(shost);
344
345 num_loops = 0;
346 msecs_sleep = 20;
347 while (scsi_host_busy(shost)) {
348 num_loops++;
349 if (num_loops == 10)
350 msecs_sleep = 500;
351 msleep(msecs_sleep);
352 }
353 }
354
pqi_scsi_unblock_requests(struct pqi_ctrl_info * ctrl_info)355 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
356 {
357 scsi_unblock_requests(ctrl_info->scsi_host);
358 }
359
pqi_ctrl_busy(struct pqi_ctrl_info * ctrl_info)360 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
361 {
362 atomic_inc(&ctrl_info->num_busy_threads);
363 }
364
pqi_ctrl_unbusy(struct pqi_ctrl_info * ctrl_info)365 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
366 {
367 atomic_dec(&ctrl_info->num_busy_threads);
368 }
369
pqi_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)370 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
371 {
372 return ctrl_info->block_requests;
373 }
374
pqi_ctrl_block_requests(struct pqi_ctrl_info * ctrl_info)375 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
376 {
377 ctrl_info->block_requests = true;
378 }
379
pqi_ctrl_unblock_requests(struct pqi_ctrl_info * ctrl_info)380 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
381 {
382 ctrl_info->block_requests = false;
383 wake_up_all(&ctrl_info->block_requests_wait);
384 }
385
pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)386 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
387 {
388 if (!pqi_ctrl_blocked(ctrl_info))
389 return;
390
391 atomic_inc(&ctrl_info->num_blocked_threads);
392 wait_event(ctrl_info->block_requests_wait,
393 !pqi_ctrl_blocked(ctrl_info));
394 atomic_dec(&ctrl_info->num_blocked_threads);
395 }
396
397 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
398
pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info * ctrl_info)399 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
400 {
401 unsigned long start_jiffies;
402 unsigned long warning_timeout;
403 bool displayed_warning;
404
405 displayed_warning = false;
406 start_jiffies = jiffies;
407 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
408
409 while (atomic_read(&ctrl_info->num_busy_threads) >
410 atomic_read(&ctrl_info->num_blocked_threads)) {
411 if (time_after(jiffies, warning_timeout)) {
412 dev_warn(&ctrl_info->pci_dev->dev,
413 "waiting %u seconds for driver activity to quiesce\n",
414 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
415 displayed_warning = true;
416 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
417 }
418 usleep_range(1000, 2000);
419 }
420
421 if (displayed_warning)
422 dev_warn(&ctrl_info->pci_dev->dev,
423 "driver activity quiesced after waiting for %u seconds\n",
424 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
425 }
426
pqi_device_offline(struct pqi_scsi_dev * device)427 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
428 {
429 return device->device_offline;
430 }
431
pqi_ctrl_ofa_start(struct pqi_ctrl_info * ctrl_info)432 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
433 {
434 mutex_lock(&ctrl_info->ofa_mutex);
435 }
436
pqi_ctrl_ofa_done(struct pqi_ctrl_info * ctrl_info)437 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
438 {
439 mutex_unlock(&ctrl_info->ofa_mutex);
440 }
441
pqi_wait_until_ofa_finished(struct pqi_ctrl_info * ctrl_info)442 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
443 {
444 mutex_lock(&ctrl_info->ofa_mutex);
445 mutex_unlock(&ctrl_info->ofa_mutex);
446 }
447
pqi_ofa_in_progress(struct pqi_ctrl_info * ctrl_info)448 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
449 {
450 return mutex_is_locked(&ctrl_info->ofa_mutex);
451 }
452
pqi_device_remove_start(struct pqi_scsi_dev * device)453 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
454 {
455 device->in_remove = true;
456 }
457
pqi_device_in_remove(struct pqi_scsi_dev * device)458 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
459 {
460 return device->in_remove;
461 }
462
pqi_device_reset_start(struct pqi_scsi_dev * device,u8 lun)463 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
464 {
465 device->in_reset[lun] = true;
466 }
467
pqi_device_reset_done(struct pqi_scsi_dev * device,u8 lun)468 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
469 {
470 device->in_reset[lun] = false;
471 }
472
pqi_device_in_reset(struct pqi_scsi_dev * device,u8 lun)473 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
474 {
475 return device->in_reset[lun];
476 }
477
pqi_event_type_to_event_index(unsigned int event_type)478 static inline int pqi_event_type_to_event_index(unsigned int event_type)
479 {
480 int index;
481
482 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
483 if (event_type == pqi_supported_event_types[index])
484 return index;
485
486 return -1;
487 }
488
pqi_is_supported_event(unsigned int event_type)489 static inline bool pqi_is_supported_event(unsigned int event_type)
490 {
491 return pqi_event_type_to_event_index(event_type) != -1;
492 }
493
pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info * ctrl_info,unsigned long delay)494 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
495 unsigned long delay)
496 {
497 if (pqi_ctrl_offline(ctrl_info))
498 return;
499
500 schedule_delayed_work(&ctrl_info->rescan_work, delay);
501 }
502
pqi_schedule_rescan_worker(struct pqi_ctrl_info * ctrl_info)503 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
504 {
505 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
506 }
507
508 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
509
pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info * ctrl_info)510 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
511 {
512 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
513 }
514
pqi_cancel_rescan_worker(struct pqi_ctrl_info * ctrl_info)515 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
516 {
517 cancel_delayed_work_sync(&ctrl_info->rescan_work);
518 }
519
pqi_read_heartbeat_counter(struct pqi_ctrl_info * ctrl_info)520 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
521 {
522 if (!ctrl_info->heartbeat_counter)
523 return 0;
524
525 return readl(ctrl_info->heartbeat_counter);
526 }
527
pqi_read_soft_reset_status(struct pqi_ctrl_info * ctrl_info)528 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
529 {
530 return readb(ctrl_info->soft_reset_status);
531 }
532
pqi_clear_soft_reset_status(struct pqi_ctrl_info * ctrl_info)533 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
534 {
535 u8 status;
536
537 status = pqi_read_soft_reset_status(ctrl_info);
538 status &= ~PQI_SOFT_RESET_ABORT;
539 writeb(status, ctrl_info->soft_reset_status);
540 }
541
pqi_is_io_high_priority(struct pqi_scsi_dev * device,struct scsi_cmnd * scmd)542 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
543 {
544 bool io_high_prio;
545 int priority_class;
546
547 io_high_prio = false;
548
549 if (device->ncq_prio_enable) {
550 priority_class =
551 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
552 if (priority_class == IOPRIO_CLASS_RT) {
553 /* Set NCQ priority for read/write commands. */
554 switch (scmd->cmnd[0]) {
555 case WRITE_16:
556 case READ_16:
557 case WRITE_12:
558 case READ_12:
559 case WRITE_10:
560 case READ_10:
561 case WRITE_6:
562 case READ_6:
563 io_high_prio = true;
564 break;
565 }
566 }
567 }
568
569 return io_high_prio;
570 }
571
pqi_map_single(struct pci_dev * pci_dev,struct pqi_sg_descriptor * sg_descriptor,void * buffer,size_t buffer_length,enum dma_data_direction data_direction)572 static int pqi_map_single(struct pci_dev *pci_dev,
573 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
574 size_t buffer_length, enum dma_data_direction data_direction)
575 {
576 dma_addr_t bus_address;
577
578 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
579 return 0;
580
581 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
582 data_direction);
583 if (dma_mapping_error(&pci_dev->dev, bus_address))
584 return -ENOMEM;
585
586 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
587 put_unaligned_le32(buffer_length, &sg_descriptor->length);
588 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
589
590 return 0;
591 }
592
pqi_pci_unmap(struct pci_dev * pci_dev,struct pqi_sg_descriptor * descriptors,int num_descriptors,enum dma_data_direction data_direction)593 static void pqi_pci_unmap(struct pci_dev *pci_dev,
594 struct pqi_sg_descriptor *descriptors, int num_descriptors,
595 enum dma_data_direction data_direction)
596 {
597 int i;
598
599 if (data_direction == DMA_NONE)
600 return;
601
602 for (i = 0; i < num_descriptors; i++)
603 dma_unmap_single(&pci_dev->dev,
604 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
605 get_unaligned_le32(&descriptors[i].length),
606 data_direction);
607 }
608
pqi_build_raid_path_request(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,enum dma_data_direction * dir)609 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
610 struct pqi_raid_path_request *request, u8 cmd,
611 u8 *scsi3addr, void *buffer, size_t buffer_length,
612 u16 vpd_page, enum dma_data_direction *dir)
613 {
614 u8 *cdb;
615 size_t cdb_length = buffer_length;
616
617 memset(request, 0, sizeof(*request));
618
619 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
620 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
621 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
622 &request->header.iu_length);
623 put_unaligned_le32(buffer_length, &request->buffer_length);
624 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
625 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
626 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
627
628 cdb = request->cdb;
629
630 switch (cmd) {
631 case INQUIRY:
632 request->data_direction = SOP_READ_FLAG;
633 cdb[0] = INQUIRY;
634 if (vpd_page & VPD_PAGE) {
635 cdb[1] = 0x1;
636 cdb[2] = (u8)vpd_page;
637 }
638 cdb[4] = (u8)cdb_length;
639 break;
640 case CISS_REPORT_LOG:
641 case CISS_REPORT_PHYS:
642 request->data_direction = SOP_READ_FLAG;
643 cdb[0] = cmd;
644 if (cmd == CISS_REPORT_PHYS) {
645 if (ctrl_info->rpl_extended_format_4_5_supported)
646 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
647 else
648 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
649 } else {
650 cdb[1] = ctrl_info->ciss_report_log_flags;
651 }
652 put_unaligned_be32(cdb_length, &cdb[6]);
653 break;
654 case CISS_GET_RAID_MAP:
655 request->data_direction = SOP_READ_FLAG;
656 cdb[0] = CISS_READ;
657 cdb[1] = CISS_GET_RAID_MAP;
658 put_unaligned_be32(cdb_length, &cdb[6]);
659 break;
660 case SA_FLUSH_CACHE:
661 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
662 request->data_direction = SOP_WRITE_FLAG;
663 cdb[0] = BMIC_WRITE;
664 cdb[6] = BMIC_FLUSH_CACHE;
665 put_unaligned_be16(cdb_length, &cdb[7]);
666 break;
667 case BMIC_SENSE_DIAG_OPTIONS:
668 cdb_length = 0;
669 fallthrough;
670 case BMIC_IDENTIFY_CONTROLLER:
671 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
672 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
673 case BMIC_SENSE_FEATURE:
674 request->data_direction = SOP_READ_FLAG;
675 cdb[0] = BMIC_READ;
676 cdb[6] = cmd;
677 put_unaligned_be16(cdb_length, &cdb[7]);
678 break;
679 case BMIC_SET_DIAG_OPTIONS:
680 cdb_length = 0;
681 fallthrough;
682 case BMIC_WRITE_HOST_WELLNESS:
683 request->data_direction = SOP_WRITE_FLAG;
684 cdb[0] = BMIC_WRITE;
685 cdb[6] = cmd;
686 put_unaligned_be16(cdb_length, &cdb[7]);
687 break;
688 case BMIC_CSMI_PASSTHRU:
689 request->data_direction = SOP_BIDIRECTIONAL;
690 cdb[0] = BMIC_WRITE;
691 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
692 cdb[6] = cmd;
693 put_unaligned_be16(cdb_length, &cdb[7]);
694 break;
695 default:
696 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
697 break;
698 }
699
700 switch (request->data_direction) {
701 case SOP_READ_FLAG:
702 *dir = DMA_FROM_DEVICE;
703 break;
704 case SOP_WRITE_FLAG:
705 *dir = DMA_TO_DEVICE;
706 break;
707 case SOP_NO_DIRECTION_FLAG:
708 *dir = DMA_NONE;
709 break;
710 default:
711 *dir = DMA_BIDIRECTIONAL;
712 break;
713 }
714
715 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
716 buffer, buffer_length, *dir);
717 }
718
pqi_reinit_io_request(struct pqi_io_request * io_request)719 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
720 {
721 io_request->scmd = NULL;
722 io_request->status = 0;
723 io_request->error_info = NULL;
724 io_request->raid_bypass = false;
725 }
726
pqi_alloc_io_request(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)727 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
728 {
729 struct pqi_io_request *io_request;
730 u16 i;
731
732 if (scmd) { /* SML I/O request */
733 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
734
735 i = blk_mq_unique_tag_to_tag(blk_tag);
736 io_request = &ctrl_info->io_request_pool[i];
737 if (atomic_inc_return(&io_request->refcount) > 1) {
738 atomic_dec(&io_request->refcount);
739 return NULL;
740 }
741 } else { /* IOCTL or driver internal request */
742 /*
743 * benignly racy - may have to wait for an open slot.
744 * command slot range is scsi_ml_can_queue -
745 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
746 */
747 i = 0;
748 while (1) {
749 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
750 if (atomic_inc_return(&io_request->refcount) == 1)
751 break;
752 atomic_dec(&io_request->refcount);
753 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
754 }
755 }
756
757 if (io_request)
758 pqi_reinit_io_request(io_request);
759
760 return io_request;
761 }
762
pqi_free_io_request(struct pqi_io_request * io_request)763 static void pqi_free_io_request(struct pqi_io_request *io_request)
764 {
765 atomic_dec(&io_request->refcount);
766 }
767
pqi_send_scsi_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,struct pqi_raid_error_info * error_info)768 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
769 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
770 struct pqi_raid_error_info *error_info)
771 {
772 int rc;
773 struct pqi_raid_path_request request;
774 enum dma_data_direction dir;
775
776 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
777 buffer, buffer_length, vpd_page, &dir);
778 if (rc)
779 return rc;
780
781 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
782
783 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
784
785 return rc;
786 }
787
788 /* helper functions for pqi_send_scsi_raid_request */
789
pqi_send_ctrl_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)790 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
791 u8 cmd, void *buffer, size_t buffer_length)
792 {
793 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
794 buffer, buffer_length, 0, NULL);
795 }
796
pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)797 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
798 u8 cmd, void *buffer, size_t buffer_length,
799 struct pqi_raid_error_info *error_info)
800 {
801 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
802 buffer, buffer_length, 0, error_info);
803 }
804
pqi_identify_controller(struct pqi_ctrl_info * ctrl_info,struct bmic_identify_controller * buffer)805 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
806 struct bmic_identify_controller *buffer)
807 {
808 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
809 buffer, sizeof(*buffer));
810 }
811
pqi_sense_subsystem_info(struct pqi_ctrl_info * ctrl_info,struct bmic_sense_subsystem_info * sense_info)812 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
813 struct bmic_sense_subsystem_info *sense_info)
814 {
815 return pqi_send_ctrl_raid_request(ctrl_info,
816 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
817 sizeof(*sense_info));
818 }
819
pqi_scsi_inquiry(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u16 vpd_page,void * buffer,size_t buffer_length)820 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
821 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
822 {
823 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
824 buffer, buffer_length, vpd_page, NULL);
825 }
826
pqi_identify_physical_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * buffer,size_t buffer_length)827 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
828 struct pqi_scsi_dev *device,
829 struct bmic_identify_physical_device *buffer, size_t buffer_length)
830 {
831 int rc;
832 enum dma_data_direction dir;
833 u16 bmic_device_index;
834 struct pqi_raid_path_request request;
835
836 rc = pqi_build_raid_path_request(ctrl_info, &request,
837 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
838 buffer_length, 0, &dir);
839 if (rc)
840 return rc;
841
842 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
843 request.cdb[2] = (u8)bmic_device_index;
844 request.cdb[9] = (u8)(bmic_device_index >> 8);
845
846 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
847
848 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
849
850 return rc;
851 }
852
pqi_aio_limit_to_bytes(__le16 * limit)853 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
854 {
855 u32 bytes;
856
857 bytes = get_unaligned_le16(limit);
858 if (bytes == 0)
859 bytes = ~0;
860 else
861 bytes *= 1024;
862
863 return bytes;
864 }
865
866 #pragma pack(1)
867
868 struct bmic_sense_feature_buffer {
869 struct bmic_sense_feature_buffer_header header;
870 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
871 };
872
873 #pragma pack()
874
875 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
876 offsetofend(struct bmic_sense_feature_buffer, \
877 aio_subpage.max_write_raid_1_10_3drive)
878
879 #define MINIMUM_AIO_SUBPAGE_LENGTH \
880 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
881 max_write_raid_1_10_3drive) - \
882 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
883
pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info * ctrl_info)884 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
885 {
886 int rc;
887 enum dma_data_direction dir;
888 struct pqi_raid_path_request request;
889 struct bmic_sense_feature_buffer *buffer;
890
891 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
892 if (!buffer)
893 return -ENOMEM;
894
895 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
896 buffer, sizeof(*buffer), 0, &dir);
897 if (rc)
898 goto error;
899
900 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
901 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
902
903 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
904
905 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
906
907 if (rc)
908 goto error;
909
910 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
911 buffer->header.subpage_code !=
912 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
913 get_unaligned_le16(&buffer->header.buffer_length) <
914 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
915 buffer->aio_subpage.header.page_code !=
916 BMIC_SENSE_FEATURE_IO_PAGE ||
917 buffer->aio_subpage.header.subpage_code !=
918 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
919 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
920 MINIMUM_AIO_SUBPAGE_LENGTH) {
921 goto error;
922 }
923
924 ctrl_info->max_transfer_encrypted_sas_sata =
925 pqi_aio_limit_to_bytes(
926 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
927
928 ctrl_info->max_transfer_encrypted_nvme =
929 pqi_aio_limit_to_bytes(
930 &buffer->aio_subpage.max_transfer_encrypted_nvme);
931
932 ctrl_info->max_write_raid_5_6 =
933 pqi_aio_limit_to_bytes(
934 &buffer->aio_subpage.max_write_raid_5_6);
935
936 ctrl_info->max_write_raid_1_10_2drive =
937 pqi_aio_limit_to_bytes(
938 &buffer->aio_subpage.max_write_raid_1_10_2drive);
939
940 ctrl_info->max_write_raid_1_10_3drive =
941 pqi_aio_limit_to_bytes(
942 &buffer->aio_subpage.max_write_raid_1_10_3drive);
943
944 error:
945 kfree(buffer);
946
947 return rc;
948 }
949
pqi_flush_cache(struct pqi_ctrl_info * ctrl_info,enum bmic_flush_cache_shutdown_event shutdown_event)950 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
951 enum bmic_flush_cache_shutdown_event shutdown_event)
952 {
953 int rc;
954 struct bmic_flush_cache *flush_cache;
955
956 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
957 if (!flush_cache)
958 return -ENOMEM;
959
960 flush_cache->shutdown_event = shutdown_event;
961
962 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
963 sizeof(*flush_cache));
964
965 kfree(flush_cache);
966
967 return rc;
968 }
969
pqi_csmi_smp_passthru(struct pqi_ctrl_info * ctrl_info,struct bmic_csmi_smp_passthru_buffer * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)970 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
971 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
972 struct pqi_raid_error_info *error_info)
973 {
974 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
975 buffer, buffer_length, error_info);
976 }
977
978 #define PQI_FETCH_PTRAID_DATA (1 << 31)
979
pqi_set_diag_rescan(struct pqi_ctrl_info * ctrl_info)980 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
981 {
982 int rc;
983 struct bmic_diag_options *diag;
984
985 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
986 if (!diag)
987 return -ENOMEM;
988
989 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
990 diag, sizeof(*diag));
991 if (rc)
992 goto out;
993
994 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
995
996 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
997 sizeof(*diag));
998
999 out:
1000 kfree(diag);
1001
1002 return rc;
1003 }
1004
pqi_write_host_wellness(struct pqi_ctrl_info * ctrl_info,void * buffer,size_t buffer_length)1005 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1006 void *buffer, size_t buffer_length)
1007 {
1008 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1009 buffer, buffer_length);
1010 }
1011
1012 #pragma pack(1)
1013
1014 struct bmic_host_wellness_driver_version {
1015 u8 start_tag[4];
1016 u8 driver_version_tag[2];
1017 __le16 driver_version_length;
1018 char driver_version[32];
1019 u8 dont_write_tag[2];
1020 u8 end_tag[2];
1021 };
1022
1023 #pragma pack()
1024
pqi_write_driver_version_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1025 static int pqi_write_driver_version_to_host_wellness(
1026 struct pqi_ctrl_info *ctrl_info)
1027 {
1028 int rc;
1029 struct bmic_host_wellness_driver_version *buffer;
1030 size_t buffer_length;
1031
1032 buffer_length = sizeof(*buffer);
1033
1034 buffer = kmalloc(buffer_length, GFP_KERNEL);
1035 if (!buffer)
1036 return -ENOMEM;
1037
1038 buffer->start_tag[0] = '<';
1039 buffer->start_tag[1] = 'H';
1040 buffer->start_tag[2] = 'W';
1041 buffer->start_tag[3] = '>';
1042 buffer->driver_version_tag[0] = 'D';
1043 buffer->driver_version_tag[1] = 'V';
1044 put_unaligned_le16(sizeof(buffer->driver_version),
1045 &buffer->driver_version_length);
1046 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1047 sizeof(buffer->driver_version));
1048 buffer->dont_write_tag[0] = 'D';
1049 buffer->dont_write_tag[1] = 'W';
1050 buffer->end_tag[0] = 'Z';
1051 buffer->end_tag[1] = 'Z';
1052
1053 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1054
1055 kfree(buffer);
1056
1057 return rc;
1058 }
1059
1060 #pragma pack(1)
1061
1062 struct bmic_host_wellness_time {
1063 u8 start_tag[4];
1064 u8 time_tag[2];
1065 __le16 time_length;
1066 u8 time[8];
1067 u8 dont_write_tag[2];
1068 u8 end_tag[2];
1069 };
1070
1071 #pragma pack()
1072
pqi_write_current_time_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1073 static int pqi_write_current_time_to_host_wellness(
1074 struct pqi_ctrl_info *ctrl_info)
1075 {
1076 int rc;
1077 struct bmic_host_wellness_time *buffer;
1078 size_t buffer_length;
1079 time64_t local_time;
1080 unsigned int year;
1081 struct tm tm;
1082
1083 buffer_length = sizeof(*buffer);
1084
1085 buffer = kmalloc(buffer_length, GFP_KERNEL);
1086 if (!buffer)
1087 return -ENOMEM;
1088
1089 buffer->start_tag[0] = '<';
1090 buffer->start_tag[1] = 'H';
1091 buffer->start_tag[2] = 'W';
1092 buffer->start_tag[3] = '>';
1093 buffer->time_tag[0] = 'T';
1094 buffer->time_tag[1] = 'D';
1095 put_unaligned_le16(sizeof(buffer->time),
1096 &buffer->time_length);
1097
1098 local_time = ktime_get_real_seconds();
1099 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1100 year = tm.tm_year + 1900;
1101
1102 buffer->time[0] = bin2bcd(tm.tm_hour);
1103 buffer->time[1] = bin2bcd(tm.tm_min);
1104 buffer->time[2] = bin2bcd(tm.tm_sec);
1105 buffer->time[3] = 0;
1106 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1107 buffer->time[5] = bin2bcd(tm.tm_mday);
1108 buffer->time[6] = bin2bcd(year / 100);
1109 buffer->time[7] = bin2bcd(year % 100);
1110
1111 buffer->dont_write_tag[0] = 'D';
1112 buffer->dont_write_tag[1] = 'W';
1113 buffer->end_tag[0] = 'Z';
1114 buffer->end_tag[1] = 'Z';
1115
1116 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1117
1118 kfree(buffer);
1119
1120 return rc;
1121 }
1122
1123 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1124
pqi_update_time_worker(struct work_struct * work)1125 static void pqi_update_time_worker(struct work_struct *work)
1126 {
1127 int rc;
1128 struct pqi_ctrl_info *ctrl_info;
1129
1130 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1131 update_time_work);
1132
1133 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1134 if (rc)
1135 dev_warn(&ctrl_info->pci_dev->dev,
1136 "error updating time on controller\n");
1137
1138 schedule_delayed_work(&ctrl_info->update_time_work,
1139 PQI_UPDATE_TIME_WORK_INTERVAL);
1140 }
1141
pqi_schedule_update_time_worker(struct pqi_ctrl_info * ctrl_info)1142 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1143 {
1144 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1145 }
1146
pqi_cancel_update_time_worker(struct pqi_ctrl_info * ctrl_info)1147 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1148 {
1149 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1150 }
1151
pqi_report_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)1152 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1153 size_t buffer_length)
1154 {
1155 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1156 }
1157
pqi_report_phys_logical_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void ** buffer)1158 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1159 {
1160 int rc;
1161 size_t lun_list_length;
1162 size_t lun_data_length;
1163 size_t new_lun_list_length;
1164 void *lun_data = NULL;
1165 struct report_lun_header *report_lun_header;
1166
1167 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1168 if (!report_lun_header) {
1169 rc = -ENOMEM;
1170 goto out;
1171 }
1172
1173 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1174 if (rc)
1175 goto out;
1176
1177 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1178
1179 again:
1180 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1181
1182 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1183 if (!lun_data) {
1184 rc = -ENOMEM;
1185 goto out;
1186 }
1187
1188 if (lun_list_length == 0) {
1189 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1190 goto out;
1191 }
1192
1193 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1194 if (rc)
1195 goto out;
1196
1197 new_lun_list_length =
1198 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1199
1200 if (new_lun_list_length > lun_list_length) {
1201 lun_list_length = new_lun_list_length;
1202 kfree(lun_data);
1203 goto again;
1204 }
1205
1206 out:
1207 kfree(report_lun_header);
1208
1209 if (rc) {
1210 kfree(lun_data);
1211 lun_data = NULL;
1212 }
1213
1214 *buffer = lun_data;
1215
1216 return rc;
1217 }
1218
pqi_report_phys_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1219 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1220 {
1221 int rc;
1222 unsigned int i;
1223 u8 rpl_response_format;
1224 u32 num_physicals;
1225 void *rpl_list;
1226 struct report_lun_header *rpl_header;
1227 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1228 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1229
1230 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1231 if (rc)
1232 return rc;
1233
1234 if (ctrl_info->rpl_extended_format_4_5_supported) {
1235 rpl_header = rpl_list;
1236 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1237 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1238 *buffer = rpl_list;
1239 return 0;
1240 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1241 dev_err(&ctrl_info->pci_dev->dev,
1242 "RPL returned unsupported data format %u\n",
1243 rpl_response_format);
1244 return -EINVAL;
1245 } else {
1246 dev_warn(&ctrl_info->pci_dev->dev,
1247 "RPL returned extended format 2 instead of 4\n");
1248 }
1249 }
1250
1251 rpl_8byte_wwid_list = rpl_list;
1252 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1253
1254 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1255 num_physicals), GFP_KERNEL);
1256 if (!rpl_16byte_wwid_list)
1257 return -ENOMEM;
1258
1259 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1260 &rpl_16byte_wwid_list->header.list_length);
1261 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1262
1263 for (i = 0; i < num_physicals; i++) {
1264 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1265 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1266 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1267 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1268 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1269 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1270 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1271 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1272 }
1273
1274 kfree(rpl_8byte_wwid_list);
1275 *buffer = rpl_16byte_wwid_list;
1276
1277 return 0;
1278 }
1279
pqi_report_logical_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1280 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1281 {
1282 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1283 }
1284
pqi_get_device_lists(struct pqi_ctrl_info * ctrl_info,struct report_phys_lun_16byte_wwid_list ** physdev_list,struct report_log_lun_list ** logdev_list)1285 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1286 struct report_phys_lun_16byte_wwid_list **physdev_list,
1287 struct report_log_lun_list **logdev_list)
1288 {
1289 int rc;
1290 size_t logdev_list_length;
1291 size_t logdev_data_length;
1292 struct report_log_lun_list *internal_logdev_list;
1293 struct report_log_lun_list *logdev_data;
1294 struct report_lun_header report_lun_header;
1295
1296 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1297 if (rc)
1298 dev_err(&ctrl_info->pci_dev->dev,
1299 "report physical LUNs failed\n");
1300
1301 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1302 if (rc)
1303 dev_err(&ctrl_info->pci_dev->dev,
1304 "report logical LUNs failed\n");
1305
1306 /*
1307 * Tack the controller itself onto the end of the logical device list
1308 * by adding a list entry that is all zeros.
1309 */
1310
1311 logdev_data = *logdev_list;
1312
1313 if (logdev_data) {
1314 logdev_list_length =
1315 get_unaligned_be32(&logdev_data->header.list_length);
1316 } else {
1317 memset(&report_lun_header, 0, sizeof(report_lun_header));
1318 logdev_data =
1319 (struct report_log_lun_list *)&report_lun_header;
1320 logdev_list_length = 0;
1321 }
1322
1323 logdev_data_length = sizeof(struct report_lun_header) +
1324 logdev_list_length;
1325
1326 internal_logdev_list = kmalloc(logdev_data_length +
1327 sizeof(struct report_log_lun), GFP_KERNEL);
1328 if (!internal_logdev_list) {
1329 kfree(*logdev_list);
1330 *logdev_list = NULL;
1331 return -ENOMEM;
1332 }
1333
1334 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1335 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1336 sizeof(struct report_log_lun));
1337 put_unaligned_be32(logdev_list_length +
1338 sizeof(struct report_log_lun),
1339 &internal_logdev_list->header.list_length);
1340
1341 kfree(*logdev_list);
1342 *logdev_list = internal_logdev_list;
1343
1344 return 0;
1345 }
1346
pqi_set_bus_target_lun(struct pqi_scsi_dev * device,int bus,int target,int lun)1347 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1348 int bus, int target, int lun)
1349 {
1350 device->bus = bus;
1351 device->target = target;
1352 device->lun = lun;
1353 }
1354
pqi_assign_bus_target_lun(struct pqi_scsi_dev * device)1355 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1356 {
1357 u8 *scsi3addr;
1358 u32 lunid;
1359 int bus;
1360 int target;
1361 int lun;
1362
1363 scsi3addr = device->scsi3addr;
1364 lunid = get_unaligned_le32(scsi3addr);
1365
1366 if (pqi_is_hba_lunid(scsi3addr)) {
1367 /* The specified device is the controller. */
1368 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1369 device->target_lun_valid = true;
1370 return;
1371 }
1372
1373 if (pqi_is_logical_device(device)) {
1374 if (device->is_external_raid_device) {
1375 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1376 target = (lunid >> 16) & 0x3fff;
1377 lun = lunid & 0xff;
1378 } else {
1379 bus = PQI_RAID_VOLUME_BUS;
1380 target = 0;
1381 lun = lunid & 0x3fff;
1382 }
1383 pqi_set_bus_target_lun(device, bus, target, lun);
1384 device->target_lun_valid = true;
1385 return;
1386 }
1387
1388 /*
1389 * Defer target and LUN assignment for non-controller physical devices
1390 * because the SAS transport layer will make these assignments later.
1391 */
1392 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1393 }
1394
pqi_get_raid_level(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1395 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1396 struct pqi_scsi_dev *device)
1397 {
1398 int rc;
1399 u8 raid_level;
1400 u8 *buffer;
1401
1402 raid_level = SA_RAID_UNKNOWN;
1403
1404 buffer = kmalloc(64, GFP_KERNEL);
1405 if (buffer) {
1406 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1407 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1408 if (rc == 0) {
1409 raid_level = buffer[8];
1410 if (raid_level > SA_RAID_MAX)
1411 raid_level = SA_RAID_UNKNOWN;
1412 }
1413 kfree(buffer);
1414 }
1415
1416 device->raid_level = raid_level;
1417 }
1418
pqi_validate_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct raid_map * raid_map)1419 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1420 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1421 {
1422 char *err_msg;
1423 u32 raid_map_size;
1424 u32 r5or6_blocks_per_row;
1425
1426 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1427
1428 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1429 err_msg = "RAID map too small";
1430 goto bad_raid_map;
1431 }
1432
1433 if (device->raid_level == SA_RAID_1) {
1434 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1435 err_msg = "invalid RAID-1 map";
1436 goto bad_raid_map;
1437 }
1438 } else if (device->raid_level == SA_RAID_TRIPLE) {
1439 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1440 err_msg = "invalid RAID-1(Triple) map";
1441 goto bad_raid_map;
1442 }
1443 } else if ((device->raid_level == SA_RAID_5 ||
1444 device->raid_level == SA_RAID_6) &&
1445 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1446 /* RAID 50/60 */
1447 r5or6_blocks_per_row =
1448 get_unaligned_le16(&raid_map->strip_size) *
1449 get_unaligned_le16(&raid_map->data_disks_per_row);
1450 if (r5or6_blocks_per_row == 0) {
1451 err_msg = "invalid RAID-5 or RAID-6 map";
1452 goto bad_raid_map;
1453 }
1454 }
1455
1456 return 0;
1457
1458 bad_raid_map:
1459 dev_warn(&ctrl_info->pci_dev->dev,
1460 "logical device %08x%08x %s\n",
1461 *((u32 *)&device->scsi3addr),
1462 *((u32 *)&device->scsi3addr[4]), err_msg);
1463
1464 return -EINVAL;
1465 }
1466
pqi_get_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1467 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1468 struct pqi_scsi_dev *device)
1469 {
1470 int rc;
1471 u32 raid_map_size;
1472 struct raid_map *raid_map;
1473
1474 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1475 if (!raid_map)
1476 return -ENOMEM;
1477
1478 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1479 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1480 if (rc)
1481 goto error;
1482
1483 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1484
1485 if (raid_map_size > sizeof(*raid_map)) {
1486
1487 kfree(raid_map);
1488
1489 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1490 if (!raid_map)
1491 return -ENOMEM;
1492
1493 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1494 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1495 if (rc)
1496 goto error;
1497
1498 if (get_unaligned_le32(&raid_map->structure_size)
1499 != raid_map_size) {
1500 dev_warn(&ctrl_info->pci_dev->dev,
1501 "requested %u bytes, received %u bytes\n",
1502 raid_map_size,
1503 get_unaligned_le32(&raid_map->structure_size));
1504 rc = -EINVAL;
1505 goto error;
1506 }
1507 }
1508
1509 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1510 if (rc)
1511 goto error;
1512
1513 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
1514 if (!device->raid_io_stats) {
1515 rc = -ENOMEM;
1516 goto error;
1517 }
1518
1519 device->raid_map = raid_map;
1520
1521 return 0;
1522
1523 error:
1524 kfree(raid_map);
1525
1526 return rc;
1527 }
1528
pqi_set_max_transfer_encrypted(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1529 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1530 struct pqi_scsi_dev *device)
1531 {
1532 if (!ctrl_info->lv_drive_type_mix_valid) {
1533 device->max_transfer_encrypted = ~0;
1534 return;
1535 }
1536
1537 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1538 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1539 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1540 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1541 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1542 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1543 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1544 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1545 device->max_transfer_encrypted =
1546 ctrl_info->max_transfer_encrypted_sas_sata;
1547 break;
1548 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1549 device->max_transfer_encrypted =
1550 ctrl_info->max_transfer_encrypted_nvme;
1551 break;
1552 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1553 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1554 default:
1555 device->max_transfer_encrypted =
1556 min(ctrl_info->max_transfer_encrypted_sas_sata,
1557 ctrl_info->max_transfer_encrypted_nvme);
1558 break;
1559 }
1560 }
1561
pqi_get_raid_bypass_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1562 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device)
1564 {
1565 int rc;
1566 u8 *buffer;
1567 u8 bypass_status;
1568
1569 buffer = kmalloc(64, GFP_KERNEL);
1570 if (!buffer)
1571 return;
1572
1573 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1574 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1575 if (rc)
1576 goto out;
1577
1578 #define RAID_BYPASS_STATUS 4
1579 #define RAID_BYPASS_CONFIGURED 0x1
1580 #define RAID_BYPASS_ENABLED 0x2
1581
1582 bypass_status = buffer[RAID_BYPASS_STATUS];
1583 device->raid_bypass_configured =
1584 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1585 if (device->raid_bypass_configured &&
1586 (bypass_status & RAID_BYPASS_ENABLED) &&
1587 pqi_get_raid_map(ctrl_info, device) == 0) {
1588 device->raid_bypass_enabled = true;
1589 if (get_unaligned_le16(&device->raid_map->flags) &
1590 RAID_MAP_ENCRYPTION_ENABLED)
1591 pqi_set_max_transfer_encrypted(ctrl_info, device);
1592 }
1593
1594 out:
1595 kfree(buffer);
1596 }
1597
1598 /*
1599 * Use vendor-specific VPD to determine online/offline status of a volume.
1600 */
1601
pqi_get_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1602 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1603 struct pqi_scsi_dev *device)
1604 {
1605 int rc;
1606 size_t page_length;
1607 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1608 bool volume_offline = true;
1609 u32 volume_flags;
1610 struct ciss_vpd_logical_volume_status *vpd;
1611
1612 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1613 if (!vpd)
1614 goto no_buffer;
1615
1616 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1617 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1618 if (rc)
1619 goto out;
1620
1621 if (vpd->page_code != CISS_VPD_LV_STATUS)
1622 goto out;
1623
1624 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1625 volume_status) + vpd->page_length;
1626 if (page_length < sizeof(*vpd))
1627 goto out;
1628
1629 volume_status = vpd->volume_status;
1630 volume_flags = get_unaligned_be32(&vpd->flags);
1631 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1632
1633 out:
1634 kfree(vpd);
1635 no_buffer:
1636 device->volume_status = volume_status;
1637 device->volume_offline = volume_offline;
1638 }
1639
1640 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1641 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1642 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
1643
pqi_get_physical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1644 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1645 struct pqi_scsi_dev *device,
1646 struct bmic_identify_physical_device *id_phys)
1647 {
1648 int rc;
1649
1650 memset(id_phys, 0, sizeof(*id_phys));
1651
1652 rc = pqi_identify_physical_device(ctrl_info, device,
1653 id_phys, sizeof(*id_phys));
1654 if (rc) {
1655 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1656 return rc;
1657 }
1658
1659 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1660 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1661
1662 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1663 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1664
1665 device->box_index = id_phys->box_index;
1666 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1667 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1668 device->queue_depth =
1669 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1670 device->active_path_index = id_phys->active_path_number;
1671 device->path_map = id_phys->redundant_path_present_map;
1672 memcpy(&device->box,
1673 &id_phys->alternate_paths_phys_box_on_port,
1674 sizeof(device->box));
1675 memcpy(&device->phys_connector,
1676 &id_phys->alternate_paths_phys_connector,
1677 sizeof(device->phys_connector));
1678 device->bay = id_phys->phys_bay_in_box;
1679 device->lun_count = id_phys->multi_lun_device_lun_count;
1680 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1681 id_phys->phy_count)
1682 device->phy_id =
1683 id_phys->phy_to_phy_map[device->active_path_index];
1684 else
1685 device->phy_id = 0xFF;
1686
1687 device->ncq_prio_support =
1688 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1689 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1690
1691 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1692
1693 return 0;
1694 }
1695
pqi_get_logical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1696 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1697 struct pqi_scsi_dev *device)
1698 {
1699 int rc;
1700 u8 *buffer;
1701
1702 buffer = kmalloc(64, GFP_KERNEL);
1703 if (!buffer)
1704 return -ENOMEM;
1705
1706 /* Send an inquiry to the device to see what it is. */
1707 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1708 if (rc)
1709 goto out;
1710
1711 scsi_sanitize_inquiry_string(&buffer[8], 8);
1712 scsi_sanitize_inquiry_string(&buffer[16], 16);
1713
1714 device->devtype = buffer[0] & 0x1f;
1715 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1716 memcpy(device->model, &buffer[16], sizeof(device->model));
1717
1718 if (device->devtype == TYPE_DISK) {
1719 if (device->is_external_raid_device) {
1720 device->raid_level = SA_RAID_UNKNOWN;
1721 device->volume_status = CISS_LV_OK;
1722 device->volume_offline = false;
1723 } else {
1724 pqi_get_raid_level(ctrl_info, device);
1725 pqi_get_raid_bypass_status(ctrl_info, device);
1726 pqi_get_volume_status(ctrl_info, device);
1727 }
1728 }
1729
1730 out:
1731 kfree(buffer);
1732
1733 return rc;
1734 }
1735
1736 /*
1737 * Prevent adding drive to OS for some corner cases such as a drive
1738 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1739 * the drive until the sanitize completes, which can take hours,
1740 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1741 * are allowed, but READ/WRITE cause check condition. So the OS
1742 * cannot check/read the partition table.
1743 * Note: devices that have completed sanitize must be re-enabled
1744 * using the management utility.
1745 */
pqi_keep_device_offline(struct pqi_scsi_dev * device)1746 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1747 {
1748 return device->erase_in_progress;
1749 }
1750
pqi_get_device_info_phys_logical(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1751 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1752 struct pqi_scsi_dev *device,
1753 struct bmic_identify_physical_device *id_phys)
1754 {
1755 int rc;
1756
1757 if (device->is_expander_smp_device)
1758 return 0;
1759
1760 if (pqi_is_logical_device(device))
1761 rc = pqi_get_logical_device_info(ctrl_info, device);
1762 else
1763 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1764
1765 return rc;
1766 }
1767
pqi_get_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1768 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1769 struct pqi_scsi_dev *device,
1770 struct bmic_identify_physical_device *id_phys)
1771 {
1772 int rc;
1773
1774 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1775
1776 if (rc == 0 && device->lun_count == 0)
1777 device->lun_count = 1;
1778
1779 return rc;
1780 }
1781
pqi_show_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1782 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1783 struct pqi_scsi_dev *device)
1784 {
1785 char *status;
1786 static const char unknown_state_str[] =
1787 "Volume is in an unknown state (%u)";
1788 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1789
1790 switch (device->volume_status) {
1791 case CISS_LV_OK:
1792 status = "Volume online";
1793 break;
1794 case CISS_LV_FAILED:
1795 status = "Volume failed";
1796 break;
1797 case CISS_LV_NOT_CONFIGURED:
1798 status = "Volume not configured";
1799 break;
1800 case CISS_LV_DEGRADED:
1801 status = "Volume degraded";
1802 break;
1803 case CISS_LV_READY_FOR_RECOVERY:
1804 status = "Volume ready for recovery operation";
1805 break;
1806 case CISS_LV_UNDERGOING_RECOVERY:
1807 status = "Volume undergoing recovery";
1808 break;
1809 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1810 status = "Wrong physical drive was replaced";
1811 break;
1812 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1813 status = "A physical drive not properly connected";
1814 break;
1815 case CISS_LV_HARDWARE_OVERHEATING:
1816 status = "Hardware is overheating";
1817 break;
1818 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1819 status = "Hardware has overheated";
1820 break;
1821 case CISS_LV_UNDERGOING_EXPANSION:
1822 status = "Volume undergoing expansion";
1823 break;
1824 case CISS_LV_NOT_AVAILABLE:
1825 status = "Volume waiting for transforming volume";
1826 break;
1827 case CISS_LV_QUEUED_FOR_EXPANSION:
1828 status = "Volume queued for expansion";
1829 break;
1830 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1831 status = "Volume disabled due to SCSI ID conflict";
1832 break;
1833 case CISS_LV_EJECTED:
1834 status = "Volume has been ejected";
1835 break;
1836 case CISS_LV_UNDERGOING_ERASE:
1837 status = "Volume undergoing background erase";
1838 break;
1839 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1840 status = "Volume ready for predictive spare rebuild";
1841 break;
1842 case CISS_LV_UNDERGOING_RPI:
1843 status = "Volume undergoing rapid parity initialization";
1844 break;
1845 case CISS_LV_PENDING_RPI:
1846 status = "Volume queued for rapid parity initialization";
1847 break;
1848 case CISS_LV_ENCRYPTED_NO_KEY:
1849 status = "Encrypted volume inaccessible - key not present";
1850 break;
1851 case CISS_LV_UNDERGOING_ENCRYPTION:
1852 status = "Volume undergoing encryption process";
1853 break;
1854 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1855 status = "Volume undergoing encryption re-keying process";
1856 break;
1857 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1858 status = "Volume encrypted but encryption is disabled";
1859 break;
1860 case CISS_LV_PENDING_ENCRYPTION:
1861 status = "Volume pending migration to encrypted state";
1862 break;
1863 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1864 status = "Volume pending encryption rekeying";
1865 break;
1866 case CISS_LV_NOT_SUPPORTED:
1867 status = "Volume not supported on this controller";
1868 break;
1869 case CISS_LV_STATUS_UNAVAILABLE:
1870 status = "Volume status not available";
1871 break;
1872 default:
1873 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1874 unknown_state_str, device->volume_status);
1875 status = unknown_state_buffer;
1876 break;
1877 }
1878
1879 dev_info(&ctrl_info->pci_dev->dev,
1880 "scsi %d:%d:%d:%d %s\n",
1881 ctrl_info->scsi_host->host_no,
1882 device->bus, device->target, device->lun, status);
1883 }
1884
pqi_rescan_worker(struct work_struct * work)1885 static void pqi_rescan_worker(struct work_struct *work)
1886 {
1887 struct pqi_ctrl_info *ctrl_info;
1888
1889 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1890 rescan_work);
1891
1892 pqi_scan_scsi_devices(ctrl_info);
1893 }
1894
pqi_add_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1895 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1896 struct pqi_scsi_dev *device)
1897 {
1898 int rc;
1899
1900 if (pqi_is_logical_device(device))
1901 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1902 device->target, device->lun);
1903 else
1904 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1905
1906 return rc;
1907 }
1908
1909 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1910
pqi_remove_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1911 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1912 {
1913 int rc;
1914 int lun;
1915
1916 for (lun = 0; lun < device->lun_count; lun++) {
1917 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1918 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1919 if (rc)
1920 dev_err(&ctrl_info->pci_dev->dev,
1921 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1922 ctrl_info->scsi_host->host_no, device->bus,
1923 device->target, lun,
1924 atomic_read(&device->scsi_cmds_outstanding[lun]));
1925 }
1926
1927 if (pqi_is_logical_device(device))
1928 scsi_remove_device(device->sdev);
1929 else
1930 pqi_remove_sas_device(device);
1931
1932 pqi_device_remove_start(device);
1933 }
1934
1935 /* Assumes the SCSI device list lock is held. */
1936
pqi_find_scsi_dev(struct pqi_ctrl_info * ctrl_info,int bus,int target,int lun)1937 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1938 int bus, int target, int lun)
1939 {
1940 struct pqi_scsi_dev *device;
1941
1942 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1943 if (device->bus == bus && device->target == target && device->lun == lun)
1944 return device;
1945
1946 return NULL;
1947 }
1948
pqi_device_equal(struct pqi_scsi_dev * dev1,struct pqi_scsi_dev * dev2)1949 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1950 {
1951 if (dev1->is_physical_device != dev2->is_physical_device)
1952 return false;
1953
1954 if (dev1->is_physical_device)
1955 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1956
1957 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1958 }
1959
1960 enum pqi_find_result {
1961 DEVICE_NOT_FOUND,
1962 DEVICE_CHANGED,
1963 DEVICE_SAME,
1964 };
1965
pqi_scsi_find_entry(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device_to_find,struct pqi_scsi_dev ** matching_device)1966 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1967 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1968 {
1969 struct pqi_scsi_dev *device;
1970
1971 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1972 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1973 *matching_device = device;
1974 if (pqi_device_equal(device_to_find, device)) {
1975 if (device_to_find->volume_offline)
1976 return DEVICE_CHANGED;
1977 return DEVICE_SAME;
1978 }
1979 return DEVICE_CHANGED;
1980 }
1981 }
1982
1983 return DEVICE_NOT_FOUND;
1984 }
1985
pqi_device_type(struct pqi_scsi_dev * device)1986 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1987 {
1988 if (device->is_expander_smp_device)
1989 return "Enclosure SMP ";
1990
1991 return scsi_device_type(device->devtype);
1992 }
1993
1994 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1995
pqi_dev_info(struct pqi_ctrl_info * ctrl_info,char * action,struct pqi_scsi_dev * device)1996 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1997 char *action, struct pqi_scsi_dev *device)
1998 {
1999 ssize_t count;
2000 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
2001
2002 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
2003 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2004
2005 if (device->target_lun_valid)
2006 count += scnprintf(buffer + count,
2007 PQI_DEV_INFO_BUFFER_LENGTH - count,
2008 "%d:%d",
2009 device->target,
2010 device->lun);
2011 else
2012 count += scnprintf(buffer + count,
2013 PQI_DEV_INFO_BUFFER_LENGTH - count,
2014 "-:-");
2015
2016 if (pqi_is_logical_device(device)) {
2017 count += scnprintf(buffer + count,
2018 PQI_DEV_INFO_BUFFER_LENGTH - count,
2019 " %08x%08x",
2020 *((u32 *)&device->scsi3addr),
2021 *((u32 *)&device->scsi3addr[4]));
2022 } else if (ctrl_info->rpl_extended_format_4_5_supported) {
2023 if (device->device_type == SA_DEVICE_TYPE_NVME)
2024 count += scnprintf(buffer + count,
2025 PQI_DEV_INFO_BUFFER_LENGTH - count,
2026 " %016llx%016llx",
2027 get_unaligned_be64(&device->wwid[0]),
2028 get_unaligned_be64(&device->wwid[8]));
2029 else
2030 count += scnprintf(buffer + count,
2031 PQI_DEV_INFO_BUFFER_LENGTH - count,
2032 " %016llx",
2033 get_unaligned_be64(&device->wwid[0]));
2034 } else {
2035 count += scnprintf(buffer + count,
2036 PQI_DEV_INFO_BUFFER_LENGTH - count,
2037 " %016llx",
2038 get_unaligned_be64(&device->wwid[0]));
2039 }
2040
2041
2042 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2043 " %s %.8s %.16s ",
2044 pqi_device_type(device),
2045 device->vendor,
2046 device->model);
2047
2048 if (pqi_is_logical_device(device)) {
2049 if (device->devtype == TYPE_DISK)
2050 count += scnprintf(buffer + count,
2051 PQI_DEV_INFO_BUFFER_LENGTH - count,
2052 "SSDSmartPathCap%c En%c %-12s",
2053 device->raid_bypass_configured ? '+' : '-',
2054 device->raid_bypass_enabled ? '+' : '-',
2055 pqi_raid_level_to_string(device->raid_level));
2056 } else {
2057 count += scnprintf(buffer + count,
2058 PQI_DEV_INFO_BUFFER_LENGTH - count,
2059 "AIO%c", device->aio_enabled ? '+' : '-');
2060 if (device->devtype == TYPE_DISK ||
2061 device->devtype == TYPE_ZBC)
2062 count += scnprintf(buffer + count,
2063 PQI_DEV_INFO_BUFFER_LENGTH - count,
2064 " qd=%-6d", device->queue_depth);
2065 }
2066
2067 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2068 }
2069
pqi_raid_maps_equal(struct raid_map * raid_map1,struct raid_map * raid_map2)2070 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2071 {
2072 u32 raid_map1_size;
2073 u32 raid_map2_size;
2074
2075 if (raid_map1 == NULL || raid_map2 == NULL)
2076 return raid_map1 == raid_map2;
2077
2078 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2079 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2080
2081 if (raid_map1_size != raid_map2_size)
2082 return false;
2083
2084 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2085 }
2086
2087 /* Assumes the SCSI device list lock is held. */
2088
pqi_scsi_update_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * existing_device,struct pqi_scsi_dev * new_device)2089 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2090 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2091 {
2092 existing_device->device_type = new_device->device_type;
2093 existing_device->bus = new_device->bus;
2094 if (new_device->target_lun_valid) {
2095 existing_device->target = new_device->target;
2096 existing_device->lun = new_device->lun;
2097 existing_device->target_lun_valid = true;
2098 }
2099
2100 /* By definition, the scsi3addr and wwid fields are already the same. */
2101
2102 existing_device->is_physical_device = new_device->is_physical_device;
2103 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2104 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2105 existing_device->sas_address = new_device->sas_address;
2106 existing_device->queue_depth = new_device->queue_depth;
2107 existing_device->device_offline = false;
2108 existing_device->lun_count = new_device->lun_count;
2109
2110 if (pqi_is_logical_device(existing_device)) {
2111 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2112
2113 if (existing_device->devtype == TYPE_DISK) {
2114 existing_device->raid_level = new_device->raid_level;
2115 existing_device->volume_status = new_device->volume_status;
2116 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2117 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2118 kfree(existing_device->raid_map);
2119 existing_device->raid_map = new_device->raid_map;
2120 /* To prevent this from being freed later. */
2121 new_device->raid_map = NULL;
2122 }
2123 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
2124 existing_device->raid_io_stats = new_device->raid_io_stats;
2125 new_device->raid_io_stats = NULL;
2126 }
2127 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2128 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2129 }
2130 } else {
2131 existing_device->aio_enabled = new_device->aio_enabled;
2132 existing_device->aio_handle = new_device->aio_handle;
2133 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2134 existing_device->active_path_index = new_device->active_path_index;
2135 existing_device->phy_id = new_device->phy_id;
2136 existing_device->path_map = new_device->path_map;
2137 existing_device->bay = new_device->bay;
2138 existing_device->box_index = new_device->box_index;
2139 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2140 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2141 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2142 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2143 }
2144 }
2145
pqi_free_device(struct pqi_scsi_dev * device)2146 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2147 {
2148 if (device) {
2149 free_percpu(device->raid_io_stats);
2150 kfree(device->raid_map);
2151 kfree(device);
2152 }
2153 }
2154
2155 /*
2156 * Called when exposing a new device to the OS fails in order to re-adjust
2157 * our internal SCSI device list to match the SCSI ML's view.
2158 */
2159
pqi_fixup_botched_add(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)2160 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2161 struct pqi_scsi_dev *device)
2162 {
2163 unsigned long flags;
2164
2165 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2166 list_del(&device->scsi_device_list_entry);
2167 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2168
2169 /* Allow the device structure to be freed later. */
2170 device->keep_device = false;
2171 }
2172
pqi_is_device_added(struct pqi_scsi_dev * device)2173 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2174 {
2175 if (device->is_expander_smp_device)
2176 return device->sas_port != NULL;
2177
2178 return device->sdev != NULL;
2179 }
2180
pqi_init_device_tmf_work(struct pqi_scsi_dev * device)2181 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2182 {
2183 unsigned int lun;
2184 struct pqi_tmf_work *tmf_work;
2185
2186 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2187 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2188 }
2189
pqi_volume_rescan_needed(struct pqi_scsi_dev * device)2190 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
2191 {
2192 if (pqi_device_in_remove(device))
2193 return false;
2194
2195 if (device->sdev == NULL)
2196 return false;
2197
2198 if (!scsi_device_online(device->sdev))
2199 return false;
2200
2201 return device->rescan;
2202 }
2203
pqi_update_device_list(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * new_device_list[],unsigned int num_new_devices)2204 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2205 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2206 {
2207 int rc;
2208 unsigned int i;
2209 unsigned long flags;
2210 enum pqi_find_result find_result;
2211 struct pqi_scsi_dev *device;
2212 struct pqi_scsi_dev *next;
2213 struct pqi_scsi_dev *matching_device;
2214 LIST_HEAD(add_list);
2215 LIST_HEAD(delete_list);
2216
2217 /*
2218 * The idea here is to do as little work as possible while holding the
2219 * spinlock. That's why we go to great pains to defer anything other
2220 * than updating the internal device list until after we release the
2221 * spinlock.
2222 */
2223
2224 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2225
2226 /* Assume that all devices in the existing list have gone away. */
2227 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2228 device->device_gone = true;
2229
2230 for (i = 0; i < num_new_devices; i++) {
2231 device = new_device_list[i];
2232
2233 find_result = pqi_scsi_find_entry(ctrl_info, device,
2234 &matching_device);
2235
2236 switch (find_result) {
2237 case DEVICE_SAME:
2238 /*
2239 * The newly found device is already in the existing
2240 * device list.
2241 */
2242 device->new_device = false;
2243 matching_device->device_gone = false;
2244 pqi_scsi_update_device(ctrl_info, matching_device, device);
2245 break;
2246 case DEVICE_NOT_FOUND:
2247 /*
2248 * The newly found device is NOT in the existing device
2249 * list.
2250 */
2251 device->new_device = true;
2252 break;
2253 case DEVICE_CHANGED:
2254 /*
2255 * The original device has gone away and we need to add
2256 * the new device.
2257 */
2258 device->new_device = true;
2259 break;
2260 }
2261 }
2262
2263 /* Process all devices that have gone away. */
2264 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2265 scsi_device_list_entry) {
2266 if (device->device_gone) {
2267 list_del(&device->scsi_device_list_entry);
2268 list_add_tail(&device->delete_list_entry, &delete_list);
2269 }
2270 }
2271
2272 /* Process all new devices. */
2273 for (i = 0; i < num_new_devices; i++) {
2274 device = new_device_list[i];
2275 if (!device->new_device)
2276 continue;
2277 if (device->volume_offline)
2278 continue;
2279 list_add_tail(&device->scsi_device_list_entry,
2280 &ctrl_info->scsi_device_list);
2281 list_add_tail(&device->add_list_entry, &add_list);
2282 /* To prevent this device structure from being freed later. */
2283 device->keep_device = true;
2284 pqi_init_device_tmf_work(device);
2285 }
2286
2287 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2288
2289 /*
2290 * If OFA is in progress and there are devices that need to be deleted,
2291 * allow any pending reset operations to continue and unblock any SCSI
2292 * requests before removal.
2293 */
2294 if (pqi_ofa_in_progress(ctrl_info)) {
2295 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2296 if (pqi_is_device_added(device))
2297 pqi_device_remove_start(device);
2298 pqi_ctrl_unblock_device_reset(ctrl_info);
2299 pqi_scsi_unblock_requests(ctrl_info);
2300 }
2301
2302 /* Remove all devices that have gone away. */
2303 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2304 if (device->volume_offline) {
2305 pqi_dev_info(ctrl_info, "offline", device);
2306 pqi_show_volume_status(ctrl_info, device);
2307 } else {
2308 pqi_dev_info(ctrl_info, "removed", device);
2309 }
2310 if (pqi_is_device_added(device))
2311 pqi_remove_device(ctrl_info, device);
2312 list_del(&device->delete_list_entry);
2313 pqi_free_device(device);
2314 }
2315
2316 /*
2317 * Notify the SML of any existing device changes such as;
2318 * queue depth, device size.
2319 */
2320 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2321 /*
2322 * Check for queue depth change.
2323 */
2324 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2325 device->advertised_queue_depth = device->queue_depth;
2326 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2327 }
2328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2329 /*
2330 * Check for changes in the device, such as size.
2331 */
2332 if (pqi_volume_rescan_needed(device)) {
2333 device->rescan = false;
2334 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2335 scsi_rescan_device(device->sdev);
2336 } else {
2337 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2338 }
2339 }
2340
2341 /* Expose any new devices. */
2342 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2343 if (!pqi_is_device_added(device)) {
2344 rc = pqi_add_device(ctrl_info, device);
2345 if (rc == 0) {
2346 pqi_dev_info(ctrl_info, "added", device);
2347 } else {
2348 dev_warn(&ctrl_info->pci_dev->dev,
2349 "scsi %d:%d:%d:%d addition failed, device not added\n",
2350 ctrl_info->scsi_host->host_no,
2351 device->bus, device->target,
2352 device->lun);
2353 pqi_fixup_botched_add(ctrl_info, device);
2354 }
2355 }
2356 }
2357
2358 }
2359
pqi_is_supported_device(struct pqi_scsi_dev * device)2360 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2361 {
2362 /*
2363 * Only support the HBA controller itself as a RAID
2364 * controller. If it's a RAID controller other than
2365 * the HBA itself (an external RAID controller, for
2366 * example), we don't support it.
2367 */
2368 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2369 !pqi_is_hba_lunid(device->scsi3addr))
2370 return false;
2371
2372 return true;
2373 }
2374
pqi_skip_device(u8 * scsi3addr)2375 static inline bool pqi_skip_device(u8 *scsi3addr)
2376 {
2377 /* Ignore all masked devices. */
2378 if (MASKED_DEVICE(scsi3addr))
2379 return true;
2380
2381 return false;
2382 }
2383
pqi_mask_device(u8 * scsi3addr)2384 static inline void pqi_mask_device(u8 *scsi3addr)
2385 {
2386 scsi3addr[3] |= 0xc0;
2387 }
2388
pqi_expose_device(struct pqi_scsi_dev * device)2389 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2390 {
2391 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2392 }
2393
pqi_update_scsi_devices(struct pqi_ctrl_info * ctrl_info)2394 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2395 {
2396 int i;
2397 int rc;
2398 LIST_HEAD(new_device_list_head);
2399 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2400 struct report_log_lun_list *logdev_list = NULL;
2401 struct report_phys_lun_16byte_wwid *phys_lun;
2402 struct report_log_lun *log_lun;
2403 struct bmic_identify_physical_device *id_phys = NULL;
2404 u32 num_physicals;
2405 u32 num_logicals;
2406 struct pqi_scsi_dev **new_device_list = NULL;
2407 struct pqi_scsi_dev *device;
2408 struct pqi_scsi_dev *next;
2409 unsigned int num_new_devices;
2410 unsigned int num_valid_devices;
2411 bool is_physical_device;
2412 u8 *scsi3addr;
2413 unsigned int physical_index;
2414 unsigned int logical_index;
2415 static char *out_of_memory_msg =
2416 "failed to allocate memory, device discovery stopped";
2417
2418 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2419 if (rc)
2420 goto out;
2421
2422 if (physdev_list)
2423 num_physicals =
2424 get_unaligned_be32(&physdev_list->header.list_length)
2425 / sizeof(physdev_list->lun_entries[0]);
2426 else
2427 num_physicals = 0;
2428
2429 if (logdev_list)
2430 num_logicals =
2431 get_unaligned_be32(&logdev_list->header.list_length)
2432 / sizeof(logdev_list->lun_entries[0]);
2433 else
2434 num_logicals = 0;
2435
2436 if (num_physicals) {
2437 /*
2438 * We need this buffer for calls to pqi_get_physical_disk_info()
2439 * below. We allocate it here instead of inside
2440 * pqi_get_physical_disk_info() because it's a fairly large
2441 * buffer.
2442 */
2443 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2444 if (!id_phys) {
2445 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2446 out_of_memory_msg);
2447 rc = -ENOMEM;
2448 goto out;
2449 }
2450
2451 if (pqi_hide_vsep) {
2452 for (i = num_physicals - 1; i >= 0; i--) {
2453 phys_lun = &physdev_list->lun_entries[i];
2454 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2455 pqi_mask_device(phys_lun->lunid);
2456 break;
2457 }
2458 }
2459 }
2460 }
2461
2462 if (num_logicals &&
2463 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2464 ctrl_info->lv_drive_type_mix_valid = true;
2465
2466 num_new_devices = num_physicals + num_logicals;
2467
2468 new_device_list = kmalloc_array(num_new_devices,
2469 sizeof(*new_device_list),
2470 GFP_KERNEL);
2471 if (!new_device_list) {
2472 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2473 rc = -ENOMEM;
2474 goto out;
2475 }
2476
2477 for (i = 0; i < num_new_devices; i++) {
2478 device = kzalloc(sizeof(*device), GFP_KERNEL);
2479 if (!device) {
2480 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2481 out_of_memory_msg);
2482 rc = -ENOMEM;
2483 goto out;
2484 }
2485 list_add_tail(&device->new_device_list_entry,
2486 &new_device_list_head);
2487 }
2488
2489 device = NULL;
2490 num_valid_devices = 0;
2491 physical_index = 0;
2492 logical_index = 0;
2493
2494 for (i = 0; i < num_new_devices; i++) {
2495
2496 if ((!pqi_expose_ld_first && i < num_physicals) ||
2497 (pqi_expose_ld_first && i >= num_logicals)) {
2498 is_physical_device = true;
2499 phys_lun = &physdev_list->lun_entries[physical_index++];
2500 log_lun = NULL;
2501 scsi3addr = phys_lun->lunid;
2502 } else {
2503 is_physical_device = false;
2504 phys_lun = NULL;
2505 log_lun = &logdev_list->lun_entries[logical_index++];
2506 scsi3addr = log_lun->lunid;
2507 }
2508
2509 if (is_physical_device && pqi_skip_device(scsi3addr))
2510 continue;
2511
2512 if (device)
2513 device = list_next_entry(device, new_device_list_entry);
2514 else
2515 device = list_first_entry(&new_device_list_head,
2516 struct pqi_scsi_dev, new_device_list_entry);
2517
2518 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2519 device->is_physical_device = is_physical_device;
2520 if (is_physical_device) {
2521 device->device_type = phys_lun->device_type;
2522 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2523 device->is_expander_smp_device = true;
2524 } else {
2525 device->is_external_raid_device =
2526 pqi_is_external_raid_addr(scsi3addr);
2527 }
2528
2529 if (!pqi_is_supported_device(device))
2530 continue;
2531
2532 /* Gather information about the device. */
2533 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2534 if (rc == -ENOMEM) {
2535 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2536 out_of_memory_msg);
2537 goto out;
2538 }
2539 if (rc) {
2540 if (device->is_physical_device)
2541 dev_warn(&ctrl_info->pci_dev->dev,
2542 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2543 get_unaligned_be64(&phys_lun->wwid[0]),
2544 get_unaligned_be64(&phys_lun->wwid[8]));
2545 else
2546 dev_warn(&ctrl_info->pci_dev->dev,
2547 "obtaining device info failed, skipping logical device %08x%08x\n",
2548 *((u32 *)&device->scsi3addr),
2549 *((u32 *)&device->scsi3addr[4]));
2550 rc = 0;
2551 continue;
2552 }
2553
2554 /* Do not present disks that the OS cannot fully probe. */
2555 if (pqi_keep_device_offline(device))
2556 continue;
2557
2558 pqi_assign_bus_target_lun(device);
2559
2560 if (device->is_physical_device) {
2561 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2562 if ((phys_lun->device_flags &
2563 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2564 phys_lun->aio_handle) {
2565 device->aio_enabled = true;
2566 device->aio_handle =
2567 phys_lun->aio_handle;
2568 }
2569 } else {
2570 memcpy(device->volume_id, log_lun->volume_id,
2571 sizeof(device->volume_id));
2572 }
2573
2574 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2575
2576 new_device_list[num_valid_devices++] = device;
2577 }
2578
2579 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2580
2581 out:
2582 list_for_each_entry_safe(device, next, &new_device_list_head,
2583 new_device_list_entry) {
2584 if (device->keep_device)
2585 continue;
2586 list_del(&device->new_device_list_entry);
2587 pqi_free_device(device);
2588 }
2589
2590 kfree(new_device_list);
2591 kfree(physdev_list);
2592 kfree(logdev_list);
2593 kfree(id_phys);
2594
2595 return rc;
2596 }
2597
pqi_scan_scsi_devices(struct pqi_ctrl_info * ctrl_info)2598 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2599 {
2600 int rc;
2601 int mutex_acquired;
2602
2603 if (pqi_ctrl_offline(ctrl_info))
2604 return -ENXIO;
2605
2606 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2607
2608 if (!mutex_acquired) {
2609 if (pqi_ctrl_scan_blocked(ctrl_info))
2610 return -EBUSY;
2611 pqi_schedule_rescan_worker_delayed(ctrl_info);
2612 return -EINPROGRESS;
2613 }
2614
2615 rc = pqi_update_scsi_devices(ctrl_info);
2616 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2617 pqi_schedule_rescan_worker_delayed(ctrl_info);
2618
2619 mutex_unlock(&ctrl_info->scan_mutex);
2620
2621 return rc;
2622 }
2623
pqi_scan_start(struct Scsi_Host * shost)2624 static void pqi_scan_start(struct Scsi_Host *shost)
2625 {
2626 struct pqi_ctrl_info *ctrl_info;
2627
2628 ctrl_info = shost_to_hba(shost);
2629
2630 pqi_scan_scsi_devices(ctrl_info);
2631 }
2632
2633 /* Returns TRUE if scan is finished. */
2634
pqi_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)2635 static int pqi_scan_finished(struct Scsi_Host *shost,
2636 unsigned long elapsed_time)
2637 {
2638 struct pqi_ctrl_info *ctrl_info;
2639
2640 ctrl_info = shost_priv(shost);
2641
2642 return !mutex_is_locked(&ctrl_info->scan_mutex);
2643 }
2644
pqi_set_encryption_info(struct pqi_encryption_info * encryption_info,struct raid_map * raid_map,u64 first_block)2645 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2646 struct raid_map *raid_map, u64 first_block)
2647 {
2648 u32 volume_blk_size;
2649
2650 /*
2651 * Set the encryption tweak values based on logical block address.
2652 * If the block size is 512, the tweak value is equal to the LBA.
2653 * For other block sizes, tweak value is (LBA * block size) / 512.
2654 */
2655 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2656 if (volume_blk_size != 512)
2657 first_block = (first_block * volume_blk_size) / 512;
2658
2659 encryption_info->data_encryption_key_index =
2660 get_unaligned_le16(&raid_map->data_encryption_key_index);
2661 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2662 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2663 }
2664
2665 /*
2666 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2667 */
2668
pqi_aio_raid_level_supported(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd)2669 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2670 struct pqi_scsi_dev_raid_map_data *rmd)
2671 {
2672 bool is_supported = true;
2673
2674 switch (rmd->raid_level) {
2675 case SA_RAID_0:
2676 break;
2677 case SA_RAID_1:
2678 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2679 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2680 is_supported = false;
2681 break;
2682 case SA_RAID_TRIPLE:
2683 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2684 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2685 is_supported = false;
2686 break;
2687 case SA_RAID_5:
2688 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2689 rmd->data_length > ctrl_info->max_write_raid_5_6))
2690 is_supported = false;
2691 break;
2692 case SA_RAID_6:
2693 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2694 rmd->data_length > ctrl_info->max_write_raid_5_6))
2695 is_supported = false;
2696 break;
2697 default:
2698 is_supported = false;
2699 break;
2700 }
2701
2702 return is_supported;
2703 }
2704
2705 #define PQI_RAID_BYPASS_INELIGIBLE 1
2706
pqi_get_aio_lba_and_block_count(struct scsi_cmnd * scmd,struct pqi_scsi_dev_raid_map_data * rmd)2707 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2708 struct pqi_scsi_dev_raid_map_data *rmd)
2709 {
2710 /* Check for valid opcode, get LBA and block count. */
2711 switch (scmd->cmnd[0]) {
2712 case WRITE_6:
2713 rmd->is_write = true;
2714 fallthrough;
2715 case READ_6:
2716 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2717 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2718 rmd->block_cnt = (u32)scmd->cmnd[4];
2719 if (rmd->block_cnt == 0)
2720 rmd->block_cnt = 256;
2721 break;
2722 case WRITE_10:
2723 rmd->is_write = true;
2724 fallthrough;
2725 case READ_10:
2726 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2727 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2728 break;
2729 case WRITE_12:
2730 rmd->is_write = true;
2731 fallthrough;
2732 case READ_12:
2733 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2734 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2735 break;
2736 case WRITE_16:
2737 rmd->is_write = true;
2738 fallthrough;
2739 case READ_16:
2740 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2741 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2742 break;
2743 default:
2744 /* Process via normal I/O path. */
2745 return PQI_RAID_BYPASS_INELIGIBLE;
2746 }
2747
2748 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2749
2750 return 0;
2751 }
2752
pci_get_aio_common_raid_map_values(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2753 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2754 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2755 {
2756 #if BITS_PER_LONG == 32
2757 u64 tmpdiv;
2758 #endif
2759
2760 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2761
2762 /* Check for invalid block or wraparound. */
2763 if (rmd->last_block >=
2764 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2765 rmd->last_block < rmd->first_block)
2766 return PQI_RAID_BYPASS_INELIGIBLE;
2767
2768 rmd->data_disks_per_row =
2769 get_unaligned_le16(&raid_map->data_disks_per_row);
2770 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2771 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2772
2773 /* Calculate stripe information for the request. */
2774 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2775 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2776 return PQI_RAID_BYPASS_INELIGIBLE;
2777 #if BITS_PER_LONG == 32
2778 tmpdiv = rmd->first_block;
2779 do_div(tmpdiv, rmd->blocks_per_row);
2780 rmd->first_row = tmpdiv;
2781 tmpdiv = rmd->last_block;
2782 do_div(tmpdiv, rmd->blocks_per_row);
2783 rmd->last_row = tmpdiv;
2784 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2785 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2786 tmpdiv = rmd->first_row_offset;
2787 do_div(tmpdiv, rmd->strip_size);
2788 rmd->first_column = tmpdiv;
2789 tmpdiv = rmd->last_row_offset;
2790 do_div(tmpdiv, rmd->strip_size);
2791 rmd->last_column = tmpdiv;
2792 #else
2793 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2794 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2795 rmd->first_row_offset = (u32)(rmd->first_block -
2796 (rmd->first_row * rmd->blocks_per_row));
2797 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2798 rmd->blocks_per_row));
2799 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2800 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2801 #endif
2802
2803 /* If this isn't a single row/column then give to the controller. */
2804 if (rmd->first_row != rmd->last_row ||
2805 rmd->first_column != rmd->last_column)
2806 return PQI_RAID_BYPASS_INELIGIBLE;
2807
2808 /* Proceeding with driver mapping. */
2809 rmd->total_disks_per_row = rmd->data_disks_per_row +
2810 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2811 rmd->map_row = ((u32)(rmd->first_row >>
2812 raid_map->parity_rotation_shift)) %
2813 get_unaligned_le16(&raid_map->row_cnt);
2814 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2815 rmd->first_column;
2816
2817 return 0;
2818 }
2819
pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2820 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2821 struct raid_map *raid_map)
2822 {
2823 #if BITS_PER_LONG == 32
2824 u64 tmpdiv;
2825 #endif
2826
2827 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2828 return PQI_RAID_BYPASS_INELIGIBLE;
2829
2830 /* RAID 50/60 */
2831 /* Verify first and last block are in same RAID group. */
2832 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2833 #if BITS_PER_LONG == 32
2834 tmpdiv = rmd->first_block;
2835 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2836 tmpdiv = rmd->first_group;
2837 do_div(tmpdiv, rmd->blocks_per_row);
2838 rmd->first_group = tmpdiv;
2839 tmpdiv = rmd->last_block;
2840 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2841 tmpdiv = rmd->last_group;
2842 do_div(tmpdiv, rmd->blocks_per_row);
2843 rmd->last_group = tmpdiv;
2844 #else
2845 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2846 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2847 #endif
2848 if (rmd->first_group != rmd->last_group)
2849 return PQI_RAID_BYPASS_INELIGIBLE;
2850
2851 /* Verify request is in a single row of RAID 5/6. */
2852 #if BITS_PER_LONG == 32
2853 tmpdiv = rmd->first_block;
2854 do_div(tmpdiv, rmd->stripesize);
2855 rmd->first_row = tmpdiv;
2856 rmd->r5or6_first_row = tmpdiv;
2857 tmpdiv = rmd->last_block;
2858 do_div(tmpdiv, rmd->stripesize);
2859 rmd->r5or6_last_row = tmpdiv;
2860 #else
2861 rmd->first_row = rmd->r5or6_first_row =
2862 rmd->first_block / rmd->stripesize;
2863 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2864 #endif
2865 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2866 return PQI_RAID_BYPASS_INELIGIBLE;
2867
2868 /* Verify request is in a single column. */
2869 #if BITS_PER_LONG == 32
2870 tmpdiv = rmd->first_block;
2871 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2872 tmpdiv = rmd->first_row_offset;
2873 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2874 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2875 tmpdiv = rmd->last_block;
2876 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2877 tmpdiv = rmd->r5or6_last_row_offset;
2878 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2879 tmpdiv = rmd->r5or6_first_row_offset;
2880 do_div(tmpdiv, rmd->strip_size);
2881 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2882 tmpdiv = rmd->r5or6_last_row_offset;
2883 do_div(tmpdiv, rmd->strip_size);
2884 rmd->r5or6_last_column = tmpdiv;
2885 #else
2886 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2887 (u32)((rmd->first_block % rmd->stripesize) %
2888 rmd->blocks_per_row);
2889
2890 rmd->r5or6_last_row_offset =
2891 (u32)((rmd->last_block % rmd->stripesize) %
2892 rmd->blocks_per_row);
2893
2894 rmd->first_column =
2895 rmd->r5or6_first_row_offset / rmd->strip_size;
2896 rmd->r5or6_first_column = rmd->first_column;
2897 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2898 #endif
2899 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2900 return PQI_RAID_BYPASS_INELIGIBLE;
2901
2902 /* Request is eligible. */
2903 rmd->map_row =
2904 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2905 get_unaligned_le16(&raid_map->row_cnt);
2906
2907 rmd->map_index = (rmd->first_group *
2908 (get_unaligned_le16(&raid_map->row_cnt) *
2909 rmd->total_disks_per_row)) +
2910 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2911
2912 if (rmd->is_write) {
2913 u32 index;
2914
2915 /*
2916 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2917 * parity entries inside the device's raid_map.
2918 *
2919 * A device's RAID map is bounded by: number of RAID disks squared.
2920 *
2921 * The devices RAID map size is checked during device
2922 * initialization.
2923 */
2924 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2925 index *= rmd->total_disks_per_row;
2926 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2927
2928 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2929 if (rmd->raid_level == SA_RAID_6) {
2930 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2931 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2932 }
2933 #if BITS_PER_LONG == 32
2934 tmpdiv = rmd->first_block;
2935 do_div(tmpdiv, rmd->blocks_per_row);
2936 rmd->row = tmpdiv;
2937 #else
2938 rmd->row = rmd->first_block / rmd->blocks_per_row;
2939 #endif
2940 }
2941
2942 return 0;
2943 }
2944
pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data * rmd)2945 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2946 {
2947 /* Build the new CDB for the physical disk I/O. */
2948 if (rmd->disk_block > 0xffffffff) {
2949 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2950 rmd->cdb[1] = 0;
2951 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2952 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2953 rmd->cdb[14] = 0;
2954 rmd->cdb[15] = 0;
2955 rmd->cdb_length = 16;
2956 } else {
2957 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2958 rmd->cdb[1] = 0;
2959 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2960 rmd->cdb[6] = 0;
2961 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2962 rmd->cdb[9] = 0;
2963 rmd->cdb_length = 10;
2964 }
2965 }
2966
pqi_calc_aio_r1_nexus(struct raid_map * raid_map,struct pqi_scsi_dev_raid_map_data * rmd)2967 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2968 struct pqi_scsi_dev_raid_map_data *rmd)
2969 {
2970 u32 index;
2971 u32 group;
2972
2973 group = rmd->map_index / rmd->data_disks_per_row;
2974
2975 index = rmd->map_index - (group * rmd->data_disks_per_row);
2976 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2977 index += rmd->data_disks_per_row;
2978 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2979 if (rmd->layout_map_count > 2) {
2980 index += rmd->data_disks_per_row;
2981 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2982 }
2983
2984 rmd->num_it_nexus_entries = rmd->layout_map_count;
2985 }
2986
pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)2987 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2988 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2989 struct pqi_queue_group *queue_group)
2990 {
2991 int rc;
2992 struct raid_map *raid_map;
2993 u32 group;
2994 u32 next_bypass_group;
2995 struct pqi_encryption_info *encryption_info_ptr;
2996 struct pqi_encryption_info encryption_info;
2997 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2998
2999 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
3000 if (rc)
3001 return PQI_RAID_BYPASS_INELIGIBLE;
3002
3003 rmd.raid_level = device->raid_level;
3004
3005 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
3006 return PQI_RAID_BYPASS_INELIGIBLE;
3007
3008 if (unlikely(rmd.block_cnt == 0))
3009 return PQI_RAID_BYPASS_INELIGIBLE;
3010
3011 raid_map = device->raid_map;
3012
3013 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
3014 if (rc)
3015 return PQI_RAID_BYPASS_INELIGIBLE;
3016
3017 if (device->raid_level == SA_RAID_1 ||
3018 device->raid_level == SA_RAID_TRIPLE) {
3019 if (rmd.is_write) {
3020 pqi_calc_aio_r1_nexus(raid_map, &rmd);
3021 } else {
3022 group = device->next_bypass_group[rmd.map_index];
3023 next_bypass_group = group + 1;
3024 if (next_bypass_group >= rmd.layout_map_count)
3025 next_bypass_group = 0;
3026 device->next_bypass_group[rmd.map_index] = next_bypass_group;
3027 rmd.map_index += group * rmd.data_disks_per_row;
3028 }
3029 } else if ((device->raid_level == SA_RAID_5 ||
3030 device->raid_level == SA_RAID_6) &&
3031 (rmd.layout_map_count > 1 || rmd.is_write)) {
3032 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3033 if (rc)
3034 return PQI_RAID_BYPASS_INELIGIBLE;
3035 }
3036
3037 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3038 return PQI_RAID_BYPASS_INELIGIBLE;
3039
3040 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3041 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3042 rmd.first_row * rmd.strip_size +
3043 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3044 rmd.disk_block_cnt = rmd.block_cnt;
3045
3046 /* Handle differing logical/physical block sizes. */
3047 if (raid_map->phys_blk_shift) {
3048 rmd.disk_block <<= raid_map->phys_blk_shift;
3049 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3050 }
3051
3052 if (unlikely(rmd.disk_block_cnt > 0xffff))
3053 return PQI_RAID_BYPASS_INELIGIBLE;
3054
3055 pqi_set_aio_cdb(&rmd);
3056
3057 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3058 if (rmd.data_length > device->max_transfer_encrypted)
3059 return PQI_RAID_BYPASS_INELIGIBLE;
3060 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3061 encryption_info_ptr = &encryption_info;
3062 } else {
3063 encryption_info_ptr = NULL;
3064 }
3065
3066 if (rmd.is_write) {
3067 switch (device->raid_level) {
3068 case SA_RAID_1:
3069 case SA_RAID_TRIPLE:
3070 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3071 encryption_info_ptr, device, &rmd);
3072 case SA_RAID_5:
3073 case SA_RAID_6:
3074 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3075 encryption_info_ptr, device, &rmd);
3076 }
3077 }
3078
3079 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3080 rmd.cdb, rmd.cdb_length, queue_group,
3081 encryption_info_ptr, true, false);
3082 }
3083
3084 #define PQI_STATUS_IDLE 0x0
3085
3086 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3087 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3088
3089 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3090 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3091 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3092 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3093 #define PQI_DEVICE_STATE_ERROR 0x4
3094
3095 #define PQI_MODE_READY_TIMEOUT_SECS 30
3096 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3097
pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info * ctrl_info)3098 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3099 {
3100 struct pqi_device_registers __iomem *pqi_registers;
3101 unsigned long timeout;
3102 u64 signature;
3103 u8 status;
3104
3105 pqi_registers = ctrl_info->pqi_registers;
3106 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3107
3108 while (1) {
3109 signature = readq(&pqi_registers->signature);
3110 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3111 sizeof(signature)) == 0)
3112 break;
3113 if (time_after(jiffies, timeout)) {
3114 dev_err(&ctrl_info->pci_dev->dev,
3115 "timed out waiting for PQI signature\n");
3116 return -ETIMEDOUT;
3117 }
3118 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3119 }
3120
3121 while (1) {
3122 status = readb(&pqi_registers->function_and_status_code);
3123 if (status == PQI_STATUS_IDLE)
3124 break;
3125 if (time_after(jiffies, timeout)) {
3126 dev_err(&ctrl_info->pci_dev->dev,
3127 "timed out waiting for PQI IDLE\n");
3128 return -ETIMEDOUT;
3129 }
3130 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3131 }
3132
3133 while (1) {
3134 if (readl(&pqi_registers->device_status) ==
3135 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3136 break;
3137 if (time_after(jiffies, timeout)) {
3138 dev_err(&ctrl_info->pci_dev->dev,
3139 "timed out waiting for PQI all registers ready\n");
3140 return -ETIMEDOUT;
3141 }
3142 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3143 }
3144
3145 return 0;
3146 }
3147
pqi_aio_path_disabled(struct pqi_io_request * io_request)3148 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3149 {
3150 struct pqi_scsi_dev *device;
3151
3152 device = io_request->scmd->device->hostdata;
3153 device->raid_bypass_enabled = false;
3154 device->aio_enabled = false;
3155 }
3156
pqi_take_device_offline(struct scsi_device * sdev,char * path)3157 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3158 {
3159 struct pqi_ctrl_info *ctrl_info;
3160 struct pqi_scsi_dev *device;
3161
3162 device = sdev->hostdata;
3163 if (device->device_offline)
3164 return;
3165
3166 device->device_offline = true;
3167 ctrl_info = shost_to_hba(sdev->host);
3168 pqi_schedule_rescan_worker(ctrl_info);
3169 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3170 path, ctrl_info->scsi_host->host_no, device->bus,
3171 device->target, device->lun);
3172 }
3173
pqi_process_raid_io_error(struct pqi_io_request * io_request)3174 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3175 {
3176 u8 scsi_status;
3177 u8 host_byte;
3178 struct scsi_cmnd *scmd;
3179 struct pqi_raid_error_info *error_info;
3180 size_t sense_data_length;
3181 int residual_count;
3182 int xfer_count;
3183 struct scsi_sense_hdr sshdr;
3184
3185 scmd = io_request->scmd;
3186 if (!scmd)
3187 return;
3188
3189 error_info = io_request->error_info;
3190 scsi_status = error_info->status;
3191 host_byte = DID_OK;
3192
3193 switch (error_info->data_out_result) {
3194 case PQI_DATA_IN_OUT_GOOD:
3195 break;
3196 case PQI_DATA_IN_OUT_UNDERFLOW:
3197 xfer_count =
3198 get_unaligned_le32(&error_info->data_out_transferred);
3199 residual_count = scsi_bufflen(scmd) - xfer_count;
3200 scsi_set_resid(scmd, residual_count);
3201 if (xfer_count < scmd->underflow)
3202 host_byte = DID_SOFT_ERROR;
3203 break;
3204 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3205 case PQI_DATA_IN_OUT_ABORTED:
3206 host_byte = DID_ABORT;
3207 break;
3208 case PQI_DATA_IN_OUT_TIMEOUT:
3209 host_byte = DID_TIME_OUT;
3210 break;
3211 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3212 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3213 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3214 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3215 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3216 case PQI_DATA_IN_OUT_ERROR:
3217 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3218 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3219 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3220 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3221 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3222 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3223 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3224 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3225 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3226 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3227 default:
3228 host_byte = DID_ERROR;
3229 break;
3230 }
3231
3232 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3233 if (sense_data_length == 0)
3234 sense_data_length =
3235 get_unaligned_le16(&error_info->response_data_length);
3236 if (sense_data_length) {
3237 if (sense_data_length > sizeof(error_info->data))
3238 sense_data_length = sizeof(error_info->data);
3239
3240 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3241 scsi_normalize_sense(error_info->data,
3242 sense_data_length, &sshdr) &&
3243 sshdr.sense_key == HARDWARE_ERROR &&
3244 sshdr.asc == 0x3e) {
3245 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3246 struct pqi_scsi_dev *device = scmd->device->hostdata;
3247
3248 switch (sshdr.ascq) {
3249 case 0x1: /* LOGICAL UNIT FAILURE */
3250 if (printk_ratelimit())
3251 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3252 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3253 pqi_take_device_offline(scmd->device, "RAID");
3254 host_byte = DID_NO_CONNECT;
3255 break;
3256
3257 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3258 if (printk_ratelimit())
3259 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3260 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3261 break;
3262 }
3263 }
3264
3265 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3266 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3267 memcpy(scmd->sense_buffer, error_info->data,
3268 sense_data_length);
3269 }
3270
3271 if (pqi_cmd_priv(scmd)->this_residual &&
3272 !pqi_is_logical_device(scmd->device->hostdata) &&
3273 scsi_status == SAM_STAT_CHECK_CONDITION &&
3274 host_byte == DID_OK &&
3275 sense_data_length &&
3276 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
3277 sshdr.sense_key == ILLEGAL_REQUEST &&
3278 sshdr.asc == 0x26 &&
3279 sshdr.ascq == 0x0) {
3280 host_byte = DID_NO_CONNECT;
3281 pqi_take_device_offline(scmd->device, "AIO");
3282 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
3283 }
3284
3285 scmd->result = scsi_status;
3286 set_host_byte(scmd, host_byte);
3287 }
3288
pqi_process_aio_io_error(struct pqi_io_request * io_request)3289 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3290 {
3291 u8 scsi_status;
3292 u8 host_byte;
3293 struct scsi_cmnd *scmd;
3294 struct pqi_aio_error_info *error_info;
3295 size_t sense_data_length;
3296 int residual_count;
3297 int xfer_count;
3298 bool device_offline;
3299
3300 scmd = io_request->scmd;
3301 error_info = io_request->error_info;
3302 host_byte = DID_OK;
3303 sense_data_length = 0;
3304 device_offline = false;
3305
3306 switch (error_info->service_response) {
3307 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3308 scsi_status = error_info->status;
3309 break;
3310 case PQI_AIO_SERV_RESPONSE_FAILURE:
3311 switch (error_info->status) {
3312 case PQI_AIO_STATUS_IO_ABORTED:
3313 scsi_status = SAM_STAT_TASK_ABORTED;
3314 break;
3315 case PQI_AIO_STATUS_UNDERRUN:
3316 scsi_status = SAM_STAT_GOOD;
3317 residual_count = get_unaligned_le32(
3318 &error_info->residual_count);
3319 scsi_set_resid(scmd, residual_count);
3320 xfer_count = scsi_bufflen(scmd) - residual_count;
3321 if (xfer_count < scmd->underflow)
3322 host_byte = DID_SOFT_ERROR;
3323 break;
3324 case PQI_AIO_STATUS_OVERRUN:
3325 scsi_status = SAM_STAT_GOOD;
3326 break;
3327 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3328 pqi_aio_path_disabled(io_request);
3329 scsi_status = SAM_STAT_GOOD;
3330 io_request->status = -EAGAIN;
3331 break;
3332 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3333 case PQI_AIO_STATUS_INVALID_DEVICE:
3334 if (!io_request->raid_bypass) {
3335 device_offline = true;
3336 pqi_take_device_offline(scmd->device, "AIO");
3337 host_byte = DID_NO_CONNECT;
3338 }
3339 scsi_status = SAM_STAT_CHECK_CONDITION;
3340 break;
3341 case PQI_AIO_STATUS_IO_ERROR:
3342 default:
3343 scsi_status = SAM_STAT_CHECK_CONDITION;
3344 break;
3345 }
3346 break;
3347 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3348 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3349 scsi_status = SAM_STAT_GOOD;
3350 break;
3351 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3352 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3353 default:
3354 scsi_status = SAM_STAT_CHECK_CONDITION;
3355 break;
3356 }
3357
3358 if (error_info->data_present) {
3359 sense_data_length =
3360 get_unaligned_le16(&error_info->data_length);
3361 if (sense_data_length) {
3362 if (sense_data_length > sizeof(error_info->data))
3363 sense_data_length = sizeof(error_info->data);
3364 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3365 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3366 memcpy(scmd->sense_buffer, error_info->data,
3367 sense_data_length);
3368 }
3369 }
3370
3371 if (device_offline && sense_data_length == 0)
3372 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3373
3374 scmd->result = scsi_status;
3375 set_host_byte(scmd, host_byte);
3376 }
3377
pqi_process_io_error(unsigned int iu_type,struct pqi_io_request * io_request)3378 static void pqi_process_io_error(unsigned int iu_type,
3379 struct pqi_io_request *io_request)
3380 {
3381 switch (iu_type) {
3382 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3383 pqi_process_raid_io_error(io_request);
3384 break;
3385 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3386 pqi_process_aio_io_error(io_request);
3387 break;
3388 }
3389 }
3390
pqi_interpret_task_management_response(struct pqi_ctrl_info * ctrl_info,struct pqi_task_management_response * response)3391 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3392 struct pqi_task_management_response *response)
3393 {
3394 int rc;
3395
3396 switch (response->response_code) {
3397 case SOP_TMF_COMPLETE:
3398 case SOP_TMF_FUNCTION_SUCCEEDED:
3399 rc = 0;
3400 break;
3401 case SOP_TMF_REJECTED:
3402 rc = -EAGAIN;
3403 break;
3404 case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3405 rc = -ENODEV;
3406 break;
3407 default:
3408 rc = -EIO;
3409 break;
3410 }
3411
3412 if (rc)
3413 dev_err(&ctrl_info->pci_dev->dev,
3414 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3415
3416 return rc;
3417 }
3418
pqi_invalid_response(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)3419 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3420 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3421 {
3422 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3423 }
3424
pqi_process_io_intr(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group)3425 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3426 {
3427 int num_responses;
3428 pqi_index_t oq_pi;
3429 pqi_index_t oq_ci;
3430 struct pqi_io_request *io_request;
3431 struct pqi_io_response *response;
3432 u16 request_id;
3433
3434 num_responses = 0;
3435 oq_ci = queue_group->oq_ci_copy;
3436
3437 while (1) {
3438 oq_pi = readl(queue_group->oq_pi);
3439 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3440 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3441 dev_err(&ctrl_info->pci_dev->dev,
3442 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3443 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3444 return -1;
3445 }
3446 if (oq_pi == oq_ci)
3447 break;
3448
3449 num_responses++;
3450 response = queue_group->oq_element_array +
3451 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3452
3453 request_id = get_unaligned_le16(&response->request_id);
3454 if (request_id >= ctrl_info->max_io_slots) {
3455 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3456 dev_err(&ctrl_info->pci_dev->dev,
3457 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3458 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3459 return -1;
3460 }
3461
3462 io_request = &ctrl_info->io_request_pool[request_id];
3463 if (atomic_read(&io_request->refcount) == 0) {
3464 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3465 dev_err(&ctrl_info->pci_dev->dev,
3466 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3467 request_id, oq_pi, oq_ci);
3468 return -1;
3469 }
3470
3471 switch (response->header.iu_type) {
3472 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3473 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3474 if (io_request->scmd)
3475 io_request->scmd->result = 0;
3476 fallthrough;
3477 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3478 break;
3479 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3480 io_request->status =
3481 get_unaligned_le16(
3482 &((struct pqi_vendor_general_response *)response)->status);
3483 break;
3484 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3485 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3486 (void *)response);
3487 break;
3488 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3489 pqi_aio_path_disabled(io_request);
3490 io_request->status = -EAGAIN;
3491 break;
3492 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3493 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3494 io_request->error_info = ctrl_info->error_buffer +
3495 (get_unaligned_le16(&response->error_index) *
3496 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3497 pqi_process_io_error(response->header.iu_type, io_request);
3498 break;
3499 default:
3500 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3501 dev_err(&ctrl_info->pci_dev->dev,
3502 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3503 response->header.iu_type, oq_pi, oq_ci);
3504 return -1;
3505 }
3506
3507 io_request->io_complete_callback(io_request, io_request->context);
3508
3509 /*
3510 * Note that the I/O request structure CANNOT BE TOUCHED after
3511 * returning from the I/O completion callback!
3512 */
3513 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3514 }
3515
3516 if (num_responses) {
3517 queue_group->oq_ci_copy = oq_ci;
3518 writel(oq_ci, queue_group->oq_ci);
3519 }
3520
3521 return num_responses;
3522 }
3523
pqi_num_elements_free(unsigned int pi,unsigned int ci,unsigned int elements_in_queue)3524 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3525 unsigned int ci, unsigned int elements_in_queue)
3526 {
3527 unsigned int num_elements_used;
3528
3529 if (pi >= ci)
3530 num_elements_used = pi - ci;
3531 else
3532 num_elements_used = elements_in_queue - ci + pi;
3533
3534 return elements_in_queue - num_elements_used - 1;
3535 }
3536
pqi_send_event_ack(struct pqi_ctrl_info * ctrl_info,struct pqi_event_acknowledge_request * iu,size_t iu_length)3537 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3538 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3539 {
3540 pqi_index_t iq_pi;
3541 pqi_index_t iq_ci;
3542 unsigned long flags;
3543 void *next_element;
3544 struct pqi_queue_group *queue_group;
3545
3546 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3547 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3548
3549 while (1) {
3550 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3551
3552 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3553 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3554
3555 if (pqi_num_elements_free(iq_pi, iq_ci,
3556 ctrl_info->num_elements_per_iq))
3557 break;
3558
3559 spin_unlock_irqrestore(
3560 &queue_group->submit_lock[RAID_PATH], flags);
3561
3562 if (pqi_ctrl_offline(ctrl_info))
3563 return;
3564 }
3565
3566 next_element = queue_group->iq_element_array[RAID_PATH] +
3567 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3568
3569 memcpy(next_element, iu, iu_length);
3570
3571 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3572 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3573
3574 /*
3575 * This write notifies the controller that an IU is available to be
3576 * processed.
3577 */
3578 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3579
3580 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3581 }
3582
pqi_acknowledge_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3583 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3584 struct pqi_event *event)
3585 {
3586 struct pqi_event_acknowledge_request request;
3587
3588 memset(&request, 0, sizeof(request));
3589
3590 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3591 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3592 &request.header.iu_length);
3593 request.event_type = event->event_type;
3594 put_unaligned_le16(event->event_id, &request.event_id);
3595 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3596
3597 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3598 }
3599
3600 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3601 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3602
pqi_poll_for_soft_reset_status(struct pqi_ctrl_info * ctrl_info)3603 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3604 struct pqi_ctrl_info *ctrl_info)
3605 {
3606 u8 status;
3607 unsigned long timeout;
3608
3609 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3610
3611 while (1) {
3612 status = pqi_read_soft_reset_status(ctrl_info);
3613 if (status & PQI_SOFT_RESET_INITIATE)
3614 return RESET_INITIATE_DRIVER;
3615
3616 if (status & PQI_SOFT_RESET_ABORT)
3617 return RESET_ABORT;
3618
3619 if (!sis_is_firmware_running(ctrl_info))
3620 return RESET_NORESPONSE;
3621
3622 if (time_after(jiffies, timeout)) {
3623 dev_warn(&ctrl_info->pci_dev->dev,
3624 "timed out waiting for soft reset status\n");
3625 return RESET_TIMEDOUT;
3626 }
3627
3628 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3629 }
3630 }
3631
pqi_process_soft_reset(struct pqi_ctrl_info * ctrl_info)3632 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3633 {
3634 int rc;
3635 unsigned int delay_secs;
3636 enum pqi_soft_reset_status reset_status;
3637
3638 if (ctrl_info->soft_reset_handshake_supported)
3639 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3640 else
3641 reset_status = RESET_INITIATE_FIRMWARE;
3642
3643 delay_secs = PQI_POST_RESET_DELAY_SECS;
3644
3645 switch (reset_status) {
3646 case RESET_TIMEDOUT:
3647 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3648 fallthrough;
3649 case RESET_INITIATE_DRIVER:
3650 dev_info(&ctrl_info->pci_dev->dev,
3651 "Online Firmware Activation: resetting controller\n");
3652 sis_soft_reset(ctrl_info);
3653 fallthrough;
3654 case RESET_INITIATE_FIRMWARE:
3655 ctrl_info->pqi_mode_enabled = false;
3656 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3657 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3658 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3659 pqi_ctrl_ofa_done(ctrl_info);
3660 dev_info(&ctrl_info->pci_dev->dev,
3661 "Online Firmware Activation: %s\n",
3662 rc == 0 ? "SUCCESS" : "FAILED");
3663 break;
3664 case RESET_ABORT:
3665 dev_info(&ctrl_info->pci_dev->dev,
3666 "Online Firmware Activation ABORTED\n");
3667 if (ctrl_info->soft_reset_handshake_supported)
3668 pqi_clear_soft_reset_status(ctrl_info);
3669 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3670 pqi_ctrl_ofa_done(ctrl_info);
3671 pqi_ofa_ctrl_unquiesce(ctrl_info);
3672 break;
3673 case RESET_NORESPONSE:
3674 fallthrough;
3675 default:
3676 dev_err(&ctrl_info->pci_dev->dev,
3677 "unexpected Online Firmware Activation reset status: 0x%x\n",
3678 reset_status);
3679 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3680 pqi_ctrl_ofa_done(ctrl_info);
3681 pqi_ofa_ctrl_unquiesce(ctrl_info);
3682 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3683 break;
3684 }
3685 }
3686
pqi_ofa_memory_alloc_worker(struct work_struct * work)3687 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3688 {
3689 struct pqi_ctrl_info *ctrl_info;
3690
3691 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3692
3693 pqi_ctrl_ofa_start(ctrl_info);
3694 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
3695 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
3696 }
3697
pqi_ofa_quiesce_worker(struct work_struct * work)3698 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3699 {
3700 struct pqi_ctrl_info *ctrl_info;
3701 struct pqi_event *event;
3702
3703 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3704
3705 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3706
3707 pqi_ofa_ctrl_quiesce(ctrl_info);
3708 pqi_acknowledge_event(ctrl_info, event);
3709 pqi_process_soft_reset(ctrl_info);
3710 }
3711
pqi_ofa_process_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3712 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3713 struct pqi_event *event)
3714 {
3715 bool ack_event;
3716
3717 ack_event = true;
3718
3719 switch (event->event_id) {
3720 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3721 dev_info(&ctrl_info->pci_dev->dev,
3722 "received Online Firmware Activation memory allocation request\n");
3723 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3724 break;
3725 case PQI_EVENT_OFA_QUIESCE:
3726 dev_info(&ctrl_info->pci_dev->dev,
3727 "received Online Firmware Activation quiesce request\n");
3728 schedule_work(&ctrl_info->ofa_quiesce_work);
3729 ack_event = false;
3730 break;
3731 case PQI_EVENT_OFA_CANCELED:
3732 dev_info(&ctrl_info->pci_dev->dev,
3733 "received Online Firmware Activation cancel request: reason: %u\n",
3734 ctrl_info->ofa_cancel_reason);
3735 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3736 pqi_ctrl_ofa_done(ctrl_info);
3737 break;
3738 default:
3739 dev_err(&ctrl_info->pci_dev->dev,
3740 "received unknown Online Firmware Activation request: event ID: %u\n",
3741 event->event_id);
3742 break;
3743 }
3744
3745 return ack_event;
3746 }
3747
pqi_mark_volumes_for_rescan(struct pqi_ctrl_info * ctrl_info)3748 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
3749 {
3750 unsigned long flags;
3751 struct pqi_scsi_dev *device;
3752
3753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3754
3755 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
3756 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
3757 device->rescan = true;
3758 }
3759
3760 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3761 }
3762
pqi_disable_raid_bypass(struct pqi_ctrl_info * ctrl_info)3763 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3764 {
3765 unsigned long flags;
3766 struct pqi_scsi_dev *device;
3767
3768 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3769
3770 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3771 if (device->raid_bypass_enabled)
3772 device->raid_bypass_enabled = false;
3773
3774 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3775 }
3776
pqi_event_worker(struct work_struct * work)3777 static void pqi_event_worker(struct work_struct *work)
3778 {
3779 unsigned int i;
3780 bool rescan_needed;
3781 struct pqi_ctrl_info *ctrl_info;
3782 struct pqi_event *event;
3783 bool ack_event;
3784
3785 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3786
3787 pqi_ctrl_busy(ctrl_info);
3788 pqi_wait_if_ctrl_blocked(ctrl_info);
3789 if (pqi_ctrl_offline(ctrl_info))
3790 goto out;
3791
3792 rescan_needed = false;
3793 event = ctrl_info->events;
3794 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3795 if (event->pending) {
3796 event->pending = false;
3797 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3798 ack_event = pqi_ofa_process_event(ctrl_info, event);
3799 } else {
3800 ack_event = true;
3801 rescan_needed = true;
3802 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3803 pqi_mark_volumes_for_rescan(ctrl_info);
3804 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3805 pqi_disable_raid_bypass(ctrl_info);
3806 }
3807 if (ack_event)
3808 pqi_acknowledge_event(ctrl_info, event);
3809 }
3810 event++;
3811 }
3812
3813 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3814
3815 if (rescan_needed)
3816 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3817 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3818
3819 out:
3820 pqi_ctrl_unbusy(ctrl_info);
3821 }
3822
3823 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3824
pqi_heartbeat_timer_handler(struct timer_list * t)3825 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3826 {
3827 int num_interrupts;
3828 u32 heartbeat_count;
3829 struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t,
3830 heartbeat_timer);
3831
3832 pqi_check_ctrl_health(ctrl_info);
3833 if (pqi_ctrl_offline(ctrl_info))
3834 return;
3835
3836 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3837 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3838
3839 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3840 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3841 dev_err(&ctrl_info->pci_dev->dev,
3842 "no heartbeat detected - last heartbeat count: %u\n",
3843 heartbeat_count);
3844 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3845 return;
3846 }
3847 } else {
3848 ctrl_info->previous_num_interrupts = num_interrupts;
3849 }
3850
3851 ctrl_info->previous_heartbeat_count = heartbeat_count;
3852 mod_timer(&ctrl_info->heartbeat_timer,
3853 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3854 }
3855
pqi_start_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3856 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3857 {
3858 if (!ctrl_info->heartbeat_counter)
3859 return;
3860
3861 ctrl_info->previous_num_interrupts =
3862 atomic_read(&ctrl_info->num_interrupts);
3863 ctrl_info->previous_heartbeat_count =
3864 pqi_read_heartbeat_counter(ctrl_info);
3865
3866 ctrl_info->heartbeat_timer.expires =
3867 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3868 add_timer(&ctrl_info->heartbeat_timer);
3869 }
3870
pqi_stop_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3871 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3872 {
3873 timer_delete_sync(&ctrl_info->heartbeat_timer);
3874 }
3875
pqi_ofa_capture_event_payload(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event,struct pqi_event_response * response)3876 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3877 struct pqi_event *event, struct pqi_event_response *response)
3878 {
3879 switch (event->event_id) {
3880 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3881 ctrl_info->ofa_bytes_requested =
3882 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3883 break;
3884 case PQI_EVENT_OFA_CANCELED:
3885 ctrl_info->ofa_cancel_reason =
3886 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3887 break;
3888 }
3889 }
3890
pqi_process_event_intr(struct pqi_ctrl_info * ctrl_info)3891 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3892 {
3893 int num_events;
3894 pqi_index_t oq_pi;
3895 pqi_index_t oq_ci;
3896 struct pqi_event_queue *event_queue;
3897 struct pqi_event_response *response;
3898 struct pqi_event *event;
3899 int event_index;
3900
3901 event_queue = &ctrl_info->event_queue;
3902 num_events = 0;
3903 oq_ci = event_queue->oq_ci_copy;
3904
3905 while (1) {
3906 oq_pi = readl(event_queue->oq_pi);
3907 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3908 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3909 dev_err(&ctrl_info->pci_dev->dev,
3910 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3911 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3912 return -1;
3913 }
3914
3915 if (oq_pi == oq_ci)
3916 break;
3917
3918 num_events++;
3919 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3920
3921 event_index = pqi_event_type_to_event_index(response->event_type);
3922
3923 if (event_index >= 0 && response->request_acknowledge) {
3924 event = &ctrl_info->events[event_index];
3925 event->pending = true;
3926 event->event_type = response->event_type;
3927 event->event_id = get_unaligned_le16(&response->event_id);
3928 event->additional_event_id =
3929 get_unaligned_le32(&response->additional_event_id);
3930 if (event->event_type == PQI_EVENT_TYPE_OFA)
3931 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3932 }
3933
3934 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3935 }
3936
3937 if (num_events) {
3938 event_queue->oq_ci_copy = oq_ci;
3939 writel(oq_ci, event_queue->oq_ci);
3940 schedule_work(&ctrl_info->event_work);
3941 }
3942
3943 return num_events;
3944 }
3945
3946 #define PQI_LEGACY_INTX_MASK 0x1
3947
pqi_configure_legacy_intx(struct pqi_ctrl_info * ctrl_info,bool enable_intx)3948 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3949 {
3950 u32 intx_mask;
3951 struct pqi_device_registers __iomem *pqi_registers;
3952 volatile void __iomem *register_addr;
3953
3954 pqi_registers = ctrl_info->pqi_registers;
3955
3956 if (enable_intx)
3957 register_addr = &pqi_registers->legacy_intx_mask_clear;
3958 else
3959 register_addr = &pqi_registers->legacy_intx_mask_set;
3960
3961 intx_mask = readl(register_addr);
3962 intx_mask |= PQI_LEGACY_INTX_MASK;
3963 writel(intx_mask, register_addr);
3964 }
3965
pqi_change_irq_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_irq_mode new_mode)3966 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3967 enum pqi_irq_mode new_mode)
3968 {
3969 switch (ctrl_info->irq_mode) {
3970 case IRQ_MODE_MSIX:
3971 switch (new_mode) {
3972 case IRQ_MODE_MSIX:
3973 break;
3974 case IRQ_MODE_INTX:
3975 pqi_configure_legacy_intx(ctrl_info, true);
3976 sis_enable_intx(ctrl_info);
3977 break;
3978 case IRQ_MODE_NONE:
3979 break;
3980 }
3981 break;
3982 case IRQ_MODE_INTX:
3983 switch (new_mode) {
3984 case IRQ_MODE_MSIX:
3985 pqi_configure_legacy_intx(ctrl_info, false);
3986 sis_enable_msix(ctrl_info);
3987 break;
3988 case IRQ_MODE_INTX:
3989 break;
3990 case IRQ_MODE_NONE:
3991 pqi_configure_legacy_intx(ctrl_info, false);
3992 break;
3993 }
3994 break;
3995 case IRQ_MODE_NONE:
3996 switch (new_mode) {
3997 case IRQ_MODE_MSIX:
3998 sis_enable_msix(ctrl_info);
3999 break;
4000 case IRQ_MODE_INTX:
4001 pqi_configure_legacy_intx(ctrl_info, true);
4002 sis_enable_intx(ctrl_info);
4003 break;
4004 case IRQ_MODE_NONE:
4005 break;
4006 }
4007 break;
4008 }
4009
4010 ctrl_info->irq_mode = new_mode;
4011 }
4012
4013 #define PQI_LEGACY_INTX_PENDING 0x1
4014
pqi_is_valid_irq(struct pqi_ctrl_info * ctrl_info)4015 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
4016 {
4017 bool valid_irq;
4018 u32 intx_status;
4019
4020 switch (ctrl_info->irq_mode) {
4021 case IRQ_MODE_MSIX:
4022 valid_irq = true;
4023 break;
4024 case IRQ_MODE_INTX:
4025 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
4026 if (intx_status & PQI_LEGACY_INTX_PENDING)
4027 valid_irq = true;
4028 else
4029 valid_irq = false;
4030 break;
4031 case IRQ_MODE_NONE:
4032 default:
4033 valid_irq = false;
4034 break;
4035 }
4036
4037 return valid_irq;
4038 }
4039
pqi_irq_handler(int irq,void * data)4040 static irqreturn_t pqi_irq_handler(int irq, void *data)
4041 {
4042 struct pqi_ctrl_info *ctrl_info;
4043 struct pqi_queue_group *queue_group;
4044 int num_io_responses_handled;
4045 int num_events_handled;
4046
4047 queue_group = data;
4048 ctrl_info = queue_group->ctrl_info;
4049
4050 if (!pqi_is_valid_irq(ctrl_info))
4051 return IRQ_NONE;
4052
4053 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
4054 if (num_io_responses_handled < 0)
4055 goto out;
4056
4057 if (irq == ctrl_info->event_irq) {
4058 num_events_handled = pqi_process_event_intr(ctrl_info);
4059 if (num_events_handled < 0)
4060 goto out;
4061 } else {
4062 num_events_handled = 0;
4063 }
4064
4065 if (num_io_responses_handled + num_events_handled > 0)
4066 atomic_inc(&ctrl_info->num_interrupts);
4067
4068 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4069 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4070
4071 out:
4072 return IRQ_HANDLED;
4073 }
4074
pqi_request_irqs(struct pqi_ctrl_info * ctrl_info)4075 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4076 {
4077 struct pci_dev *pci_dev = ctrl_info->pci_dev;
4078 int i;
4079 int rc;
4080
4081 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4082
4083 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4084 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4085 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4086 if (rc) {
4087 dev_err(&pci_dev->dev,
4088 "irq %u init failed with error %d\n",
4089 pci_irq_vector(pci_dev, i), rc);
4090 return rc;
4091 }
4092 ctrl_info->num_msix_vectors_initialized++;
4093 }
4094
4095 return 0;
4096 }
4097
pqi_free_irqs(struct pqi_ctrl_info * ctrl_info)4098 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4099 {
4100 int i;
4101
4102 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4103 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4104 &ctrl_info->queue_groups[i]);
4105
4106 ctrl_info->num_msix_vectors_initialized = 0;
4107 }
4108
pqi_enable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4109 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4110 {
4111 int num_vectors_enabled;
4112 unsigned int flags = PCI_IRQ_MSIX;
4113
4114 if (!pqi_disable_managed_interrupts)
4115 flags |= PCI_IRQ_AFFINITY;
4116
4117 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4118 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4119 flags);
4120 if (num_vectors_enabled < 0) {
4121 dev_err(&ctrl_info->pci_dev->dev,
4122 "MSI-X init failed with error %d\n",
4123 num_vectors_enabled);
4124 return num_vectors_enabled;
4125 }
4126
4127 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4128 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4129 return 0;
4130 }
4131
pqi_disable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4132 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4133 {
4134 if (ctrl_info->num_msix_vectors_enabled) {
4135 pci_free_irq_vectors(ctrl_info->pci_dev);
4136 ctrl_info->num_msix_vectors_enabled = 0;
4137 }
4138 }
4139
pqi_alloc_operational_queues(struct pqi_ctrl_info * ctrl_info)4140 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4141 {
4142 unsigned int i;
4143 size_t alloc_length;
4144 size_t element_array_length_per_iq;
4145 size_t element_array_length_per_oq;
4146 void *element_array;
4147 void __iomem *next_queue_index;
4148 void *aligned_pointer;
4149 unsigned int num_inbound_queues;
4150 unsigned int num_outbound_queues;
4151 unsigned int num_queue_indexes;
4152 struct pqi_queue_group *queue_group;
4153
4154 element_array_length_per_iq =
4155 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4156 ctrl_info->num_elements_per_iq;
4157 element_array_length_per_oq =
4158 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4159 ctrl_info->num_elements_per_oq;
4160 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4161 num_outbound_queues = ctrl_info->num_queue_groups;
4162 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4163
4164 aligned_pointer = NULL;
4165
4166 for (i = 0; i < num_inbound_queues; i++) {
4167 aligned_pointer = PTR_ALIGN(aligned_pointer,
4168 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4169 aligned_pointer += element_array_length_per_iq;
4170 }
4171
4172 for (i = 0; i < num_outbound_queues; i++) {
4173 aligned_pointer = PTR_ALIGN(aligned_pointer,
4174 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4175 aligned_pointer += element_array_length_per_oq;
4176 }
4177
4178 aligned_pointer = PTR_ALIGN(aligned_pointer,
4179 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4180 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4181 PQI_EVENT_OQ_ELEMENT_LENGTH;
4182
4183 for (i = 0; i < num_queue_indexes; i++) {
4184 aligned_pointer = PTR_ALIGN(aligned_pointer,
4185 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4186 aligned_pointer += sizeof(pqi_index_t);
4187 }
4188
4189 alloc_length = (size_t)aligned_pointer +
4190 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4191
4192 alloc_length += PQI_EXTRA_SGL_MEMORY;
4193
4194 ctrl_info->queue_memory_base =
4195 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4196 &ctrl_info->queue_memory_base_dma_handle,
4197 GFP_KERNEL);
4198
4199 if (!ctrl_info->queue_memory_base)
4200 return -ENOMEM;
4201
4202 ctrl_info->queue_memory_length = alloc_length;
4203
4204 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4206
4207 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4208 queue_group = &ctrl_info->queue_groups[i];
4209 queue_group->iq_element_array[RAID_PATH] = element_array;
4210 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4211 ctrl_info->queue_memory_base_dma_handle +
4212 (element_array - ctrl_info->queue_memory_base);
4213 element_array += element_array_length_per_iq;
4214 element_array = PTR_ALIGN(element_array,
4215 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4216 queue_group->iq_element_array[AIO_PATH] = element_array;
4217 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4218 ctrl_info->queue_memory_base_dma_handle +
4219 (element_array - ctrl_info->queue_memory_base);
4220 element_array += element_array_length_per_iq;
4221 element_array = PTR_ALIGN(element_array,
4222 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4223 }
4224
4225 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4226 queue_group = &ctrl_info->queue_groups[i];
4227 queue_group->oq_element_array = element_array;
4228 queue_group->oq_element_array_bus_addr =
4229 ctrl_info->queue_memory_base_dma_handle +
4230 (element_array - ctrl_info->queue_memory_base);
4231 element_array += element_array_length_per_oq;
4232 element_array = PTR_ALIGN(element_array,
4233 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4234 }
4235
4236 ctrl_info->event_queue.oq_element_array = element_array;
4237 ctrl_info->event_queue.oq_element_array_bus_addr =
4238 ctrl_info->queue_memory_base_dma_handle +
4239 (element_array - ctrl_info->queue_memory_base);
4240 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4241 PQI_EVENT_OQ_ELEMENT_LENGTH;
4242
4243 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4244 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4245
4246 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4247 queue_group = &ctrl_info->queue_groups[i];
4248 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4249 queue_group->iq_ci_bus_addr[RAID_PATH] =
4250 ctrl_info->queue_memory_base_dma_handle +
4251 (next_queue_index -
4252 (void __iomem *)ctrl_info->queue_memory_base);
4253 next_queue_index += sizeof(pqi_index_t);
4254 next_queue_index = PTR_ALIGN(next_queue_index,
4255 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4256 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4257 queue_group->iq_ci_bus_addr[AIO_PATH] =
4258 ctrl_info->queue_memory_base_dma_handle +
4259 (next_queue_index -
4260 (void __iomem *)ctrl_info->queue_memory_base);
4261 next_queue_index += sizeof(pqi_index_t);
4262 next_queue_index = PTR_ALIGN(next_queue_index,
4263 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4264 queue_group->oq_pi = next_queue_index;
4265 queue_group->oq_pi_bus_addr =
4266 ctrl_info->queue_memory_base_dma_handle +
4267 (next_queue_index -
4268 (void __iomem *)ctrl_info->queue_memory_base);
4269 next_queue_index += sizeof(pqi_index_t);
4270 next_queue_index = PTR_ALIGN(next_queue_index,
4271 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4272 }
4273
4274 ctrl_info->event_queue.oq_pi = next_queue_index;
4275 ctrl_info->event_queue.oq_pi_bus_addr =
4276 ctrl_info->queue_memory_base_dma_handle +
4277 (next_queue_index -
4278 (void __iomem *)ctrl_info->queue_memory_base);
4279
4280 return 0;
4281 }
4282
pqi_init_operational_queues(struct pqi_ctrl_info * ctrl_info)4283 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4284 {
4285 unsigned int i;
4286 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4287 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4288
4289 /*
4290 * Initialize the backpointers to the controller structure in
4291 * each operational queue group structure.
4292 */
4293 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4294 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4295
4296 /*
4297 * Assign IDs to all operational queues. Note that the IDs
4298 * assigned to operational IQs are independent of the IDs
4299 * assigned to operational OQs.
4300 */
4301 ctrl_info->event_queue.oq_id = next_oq_id++;
4302 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4303 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4304 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4305 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4306 }
4307
4308 /*
4309 * Assign MSI-X table entry indexes to all queues. Note that the
4310 * interrupt for the event queue is shared with the first queue group.
4311 */
4312 ctrl_info->event_queue.int_msg_num = 0;
4313 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4314 ctrl_info->queue_groups[i].int_msg_num = i;
4315
4316 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4317 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4318 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4319 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4320 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4321 }
4322 }
4323
pqi_alloc_admin_queues(struct pqi_ctrl_info * ctrl_info)4324 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4325 {
4326 size_t alloc_length;
4327 struct pqi_admin_queues_aligned *admin_queues_aligned;
4328 struct pqi_admin_queues *admin_queues;
4329
4330 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4331 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4332
4333 ctrl_info->admin_queue_memory_base =
4334 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4335 &ctrl_info->admin_queue_memory_base_dma_handle,
4336 GFP_KERNEL);
4337
4338 if (!ctrl_info->admin_queue_memory_base)
4339 return -ENOMEM;
4340
4341 ctrl_info->admin_queue_memory_length = alloc_length;
4342
4343 admin_queues = &ctrl_info->admin_queues;
4344 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4345 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4346 admin_queues->iq_element_array =
4347 &admin_queues_aligned->iq_element_array;
4348 admin_queues->oq_element_array =
4349 &admin_queues_aligned->oq_element_array;
4350 admin_queues->iq_ci =
4351 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4352 admin_queues->oq_pi =
4353 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4354
4355 admin_queues->iq_element_array_bus_addr =
4356 ctrl_info->admin_queue_memory_base_dma_handle +
4357 (admin_queues->iq_element_array -
4358 ctrl_info->admin_queue_memory_base);
4359 admin_queues->oq_element_array_bus_addr =
4360 ctrl_info->admin_queue_memory_base_dma_handle +
4361 (admin_queues->oq_element_array -
4362 ctrl_info->admin_queue_memory_base);
4363 admin_queues->iq_ci_bus_addr =
4364 ctrl_info->admin_queue_memory_base_dma_handle +
4365 ((void __iomem *)admin_queues->iq_ci -
4366 (void __iomem *)ctrl_info->admin_queue_memory_base);
4367 admin_queues->oq_pi_bus_addr =
4368 ctrl_info->admin_queue_memory_base_dma_handle +
4369 ((void __iomem *)admin_queues->oq_pi -
4370 (void __iomem *)ctrl_info->admin_queue_memory_base);
4371
4372 return 0;
4373 }
4374
4375 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4376 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4377
pqi_create_admin_queues(struct pqi_ctrl_info * ctrl_info)4378 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4379 {
4380 struct pqi_device_registers __iomem *pqi_registers;
4381 struct pqi_admin_queues *admin_queues;
4382 unsigned long timeout;
4383 u8 status;
4384 u32 reg;
4385
4386 pqi_registers = ctrl_info->pqi_registers;
4387 admin_queues = &ctrl_info->admin_queues;
4388
4389 writeq((u64)admin_queues->iq_element_array_bus_addr,
4390 &pqi_registers->admin_iq_element_array_addr);
4391 writeq((u64)admin_queues->oq_element_array_bus_addr,
4392 &pqi_registers->admin_oq_element_array_addr);
4393 writeq((u64)admin_queues->iq_ci_bus_addr,
4394 &pqi_registers->admin_iq_ci_addr);
4395 writeq((u64)admin_queues->oq_pi_bus_addr,
4396 &pqi_registers->admin_oq_pi_addr);
4397
4398 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4399 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4400 (admin_queues->int_msg_num << 16);
4401 writel(reg, &pqi_registers->admin_iq_num_elements);
4402
4403 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4404 &pqi_registers->function_and_status_code);
4405
4406 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4407 while (1) {
4408 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4409 status = readb(&pqi_registers->function_and_status_code);
4410 if (status == PQI_STATUS_IDLE)
4411 break;
4412 if (time_after(jiffies, timeout))
4413 return -ETIMEDOUT;
4414 }
4415
4416 /*
4417 * The offset registers are not initialized to the correct
4418 * offsets until *after* the create admin queue pair command
4419 * completes successfully.
4420 */
4421 admin_queues->iq_pi = ctrl_info->iomem_base +
4422 PQI_DEVICE_REGISTERS_OFFSET +
4423 readq(&pqi_registers->admin_iq_pi_offset);
4424 admin_queues->oq_ci = ctrl_info->iomem_base +
4425 PQI_DEVICE_REGISTERS_OFFSET +
4426 readq(&pqi_registers->admin_oq_ci_offset);
4427
4428 return 0;
4429 }
4430
pqi_submit_admin_request(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request)4431 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4432 struct pqi_general_admin_request *request)
4433 {
4434 struct pqi_admin_queues *admin_queues;
4435 void *next_element;
4436 pqi_index_t iq_pi;
4437
4438 admin_queues = &ctrl_info->admin_queues;
4439 iq_pi = admin_queues->iq_pi_copy;
4440
4441 next_element = admin_queues->iq_element_array +
4442 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4443
4444 memcpy(next_element, request, sizeof(*request));
4445
4446 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4447 admin_queues->iq_pi_copy = iq_pi;
4448
4449 /*
4450 * This write notifies the controller that an IU is available to be
4451 * processed.
4452 */
4453 writel(iq_pi, admin_queues->iq_pi);
4454 }
4455
4456 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4457
pqi_poll_for_admin_response(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_response * response)4458 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4459 struct pqi_general_admin_response *response)
4460 {
4461 struct pqi_admin_queues *admin_queues;
4462 pqi_index_t oq_pi;
4463 pqi_index_t oq_ci;
4464 unsigned long timeout;
4465
4466 admin_queues = &ctrl_info->admin_queues;
4467 oq_ci = admin_queues->oq_ci_copy;
4468
4469 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4470
4471 while (1) {
4472 oq_pi = readl(admin_queues->oq_pi);
4473 if (oq_pi != oq_ci)
4474 break;
4475 if (time_after(jiffies, timeout)) {
4476 dev_err(&ctrl_info->pci_dev->dev,
4477 "timed out waiting for admin response\n");
4478 return -ETIMEDOUT;
4479 }
4480 if (!sis_is_firmware_running(ctrl_info))
4481 return -ENXIO;
4482 usleep_range(1000, 2000);
4483 }
4484
4485 memcpy(response, admin_queues->oq_element_array +
4486 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4487
4488 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4489 admin_queues->oq_ci_copy = oq_ci;
4490 writel(oq_ci, admin_queues->oq_ci);
4491
4492 return 0;
4493 }
4494
pqi_start_io(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group,enum pqi_io_path path,struct pqi_io_request * io_request)4495 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4496 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4497 struct pqi_io_request *io_request)
4498 {
4499 struct pqi_io_request *next;
4500 void *next_element;
4501 pqi_index_t iq_pi;
4502 pqi_index_t iq_ci;
4503 size_t iu_length;
4504 unsigned long flags;
4505 unsigned int num_elements_needed;
4506 unsigned int num_elements_to_end_of_queue;
4507 size_t copy_count;
4508 struct pqi_iu_header *request;
4509
4510 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4511
4512 if (io_request) {
4513 io_request->queue_group = queue_group;
4514 list_add_tail(&io_request->request_list_entry,
4515 &queue_group->request_list[path]);
4516 }
4517
4518 iq_pi = queue_group->iq_pi_copy[path];
4519
4520 list_for_each_entry_safe(io_request, next,
4521 &queue_group->request_list[path], request_list_entry) {
4522
4523 request = io_request->iu;
4524
4525 iu_length = get_unaligned_le16(&request->iu_length) +
4526 PQI_REQUEST_HEADER_LENGTH;
4527 num_elements_needed =
4528 DIV_ROUND_UP(iu_length,
4529 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4530
4531 iq_ci = readl(queue_group->iq_ci[path]);
4532
4533 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4534 ctrl_info->num_elements_per_iq))
4535 break;
4536
4537 put_unaligned_le16(queue_group->oq_id,
4538 &request->response_queue_id);
4539
4540 next_element = queue_group->iq_element_array[path] +
4541 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4542
4543 num_elements_to_end_of_queue =
4544 ctrl_info->num_elements_per_iq - iq_pi;
4545
4546 if (num_elements_needed <= num_elements_to_end_of_queue) {
4547 memcpy(next_element, request, iu_length);
4548 } else {
4549 copy_count = num_elements_to_end_of_queue *
4550 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4551 memcpy(next_element, request, copy_count);
4552 memcpy(queue_group->iq_element_array[path],
4553 (u8 *)request + copy_count,
4554 iu_length - copy_count);
4555 }
4556
4557 iq_pi = (iq_pi + num_elements_needed) %
4558 ctrl_info->num_elements_per_iq;
4559
4560 list_del(&io_request->request_list_entry);
4561 }
4562
4563 if (iq_pi != queue_group->iq_pi_copy[path]) {
4564 queue_group->iq_pi_copy[path] = iq_pi;
4565 /*
4566 * This write notifies the controller that one or more IUs are
4567 * available to be processed.
4568 */
4569 writel(iq_pi, queue_group->iq_pi[path]);
4570 }
4571
4572 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4573 }
4574
4575 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4576
pqi_wait_for_completion_io(struct pqi_ctrl_info * ctrl_info,struct completion * wait)4577 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4578 struct completion *wait)
4579 {
4580 int rc;
4581
4582 while (1) {
4583 if (wait_for_completion_io_timeout(wait,
4584 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4585 rc = 0;
4586 break;
4587 }
4588
4589 pqi_check_ctrl_health(ctrl_info);
4590 if (pqi_ctrl_offline(ctrl_info)) {
4591 rc = -ENXIO;
4592 break;
4593 }
4594 }
4595
4596 return rc;
4597 }
4598
pqi_raid_synchronous_complete(struct pqi_io_request * io_request,void * context)4599 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4600 void *context)
4601 {
4602 struct completion *waiting = context;
4603
4604 complete(waiting);
4605 }
4606
pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info * error_info)4607 static int pqi_process_raid_io_error_synchronous(
4608 struct pqi_raid_error_info *error_info)
4609 {
4610 int rc = -EIO;
4611
4612 switch (error_info->data_out_result) {
4613 case PQI_DATA_IN_OUT_GOOD:
4614 if (error_info->status == SAM_STAT_GOOD)
4615 rc = 0;
4616 break;
4617 case PQI_DATA_IN_OUT_UNDERFLOW:
4618 if (error_info->status == SAM_STAT_GOOD ||
4619 error_info->status == SAM_STAT_CHECK_CONDITION)
4620 rc = 0;
4621 break;
4622 case PQI_DATA_IN_OUT_ABORTED:
4623 rc = PQI_CMD_STATUS_ABORTED;
4624 break;
4625 }
4626
4627 return rc;
4628 }
4629
pqi_is_blockable_request(struct pqi_iu_header * request)4630 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4631 {
4632 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4633 }
4634
pqi_submit_raid_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_iu_header * request,unsigned int flags,struct pqi_raid_error_info * error_info)4635 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4636 struct pqi_iu_header *request, unsigned int flags,
4637 struct pqi_raid_error_info *error_info)
4638 {
4639 int rc = 0;
4640 struct pqi_io_request *io_request;
4641 size_t iu_length;
4642 DECLARE_COMPLETION_ONSTACK(wait);
4643
4644 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4645 if (down_interruptible(&ctrl_info->sync_request_sem))
4646 return -ERESTARTSYS;
4647 } else {
4648 down(&ctrl_info->sync_request_sem);
4649 }
4650
4651 pqi_ctrl_busy(ctrl_info);
4652 /*
4653 * Wait for other admin queue updates such as;
4654 * config table changes, OFA memory updates, ...
4655 */
4656 if (pqi_is_blockable_request(request))
4657 pqi_wait_if_ctrl_blocked(ctrl_info);
4658
4659 if (pqi_ctrl_offline(ctrl_info)) {
4660 rc = -ENXIO;
4661 goto out;
4662 }
4663
4664 io_request = pqi_alloc_io_request(ctrl_info, NULL);
4665
4666 put_unaligned_le16(io_request->index,
4667 &(((struct pqi_raid_path_request *)request)->request_id));
4668
4669 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4670 ((struct pqi_raid_path_request *)request)->error_index =
4671 ((struct pqi_raid_path_request *)request)->request_id;
4672
4673 iu_length = get_unaligned_le16(&request->iu_length) +
4674 PQI_REQUEST_HEADER_LENGTH;
4675 memcpy(io_request->iu, request, iu_length);
4676
4677 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4678 io_request->context = &wait;
4679
4680 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4681 io_request);
4682
4683 pqi_wait_for_completion_io(ctrl_info, &wait);
4684
4685 if (error_info) {
4686 if (io_request->error_info)
4687 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4688 else
4689 memset(error_info, 0, sizeof(*error_info));
4690 } else if (rc == 0 && io_request->error_info) {
4691 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4692 }
4693
4694 pqi_free_io_request(io_request);
4695
4696 out:
4697 pqi_ctrl_unbusy(ctrl_info);
4698 up(&ctrl_info->sync_request_sem);
4699
4700 return rc;
4701 }
4702
pqi_validate_admin_response(struct pqi_general_admin_response * response,u8 expected_function_code)4703 static int pqi_validate_admin_response(
4704 struct pqi_general_admin_response *response, u8 expected_function_code)
4705 {
4706 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4707 return -EINVAL;
4708
4709 if (get_unaligned_le16(&response->header.iu_length) !=
4710 PQI_GENERAL_ADMIN_IU_LENGTH)
4711 return -EINVAL;
4712
4713 if (response->function_code != expected_function_code)
4714 return -EINVAL;
4715
4716 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4717 return -EINVAL;
4718
4719 return 0;
4720 }
4721
pqi_submit_admin_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request,struct pqi_general_admin_response * response)4722 static int pqi_submit_admin_request_synchronous(
4723 struct pqi_ctrl_info *ctrl_info,
4724 struct pqi_general_admin_request *request,
4725 struct pqi_general_admin_response *response)
4726 {
4727 int rc;
4728
4729 pqi_submit_admin_request(ctrl_info, request);
4730
4731 rc = pqi_poll_for_admin_response(ctrl_info, response);
4732
4733 if (rc == 0)
4734 rc = pqi_validate_admin_response(response, request->function_code);
4735
4736 return rc;
4737 }
4738
pqi_report_device_capability(struct pqi_ctrl_info * ctrl_info)4739 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4740 {
4741 int rc;
4742 struct pqi_general_admin_request request;
4743 struct pqi_general_admin_response response;
4744 struct pqi_device_capability *capability;
4745 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4746
4747 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4748 if (!capability)
4749 return -ENOMEM;
4750
4751 memset(&request, 0, sizeof(request));
4752
4753 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4754 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4755 &request.header.iu_length);
4756 request.function_code =
4757 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4758 put_unaligned_le32(sizeof(*capability),
4759 &request.data.report_device_capability.buffer_length);
4760
4761 rc = pqi_map_single(ctrl_info->pci_dev,
4762 &request.data.report_device_capability.sg_descriptor,
4763 capability, sizeof(*capability),
4764 DMA_FROM_DEVICE);
4765 if (rc)
4766 goto out;
4767
4768 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4769
4770 pqi_pci_unmap(ctrl_info->pci_dev,
4771 &request.data.report_device_capability.sg_descriptor, 1,
4772 DMA_FROM_DEVICE);
4773
4774 if (rc)
4775 goto out;
4776
4777 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4778 rc = -EIO;
4779 goto out;
4780 }
4781
4782 ctrl_info->max_inbound_queues =
4783 get_unaligned_le16(&capability->max_inbound_queues);
4784 ctrl_info->max_elements_per_iq =
4785 get_unaligned_le16(&capability->max_elements_per_iq);
4786 ctrl_info->max_iq_element_length =
4787 get_unaligned_le16(&capability->max_iq_element_length)
4788 * 16;
4789 ctrl_info->max_outbound_queues =
4790 get_unaligned_le16(&capability->max_outbound_queues);
4791 ctrl_info->max_elements_per_oq =
4792 get_unaligned_le16(&capability->max_elements_per_oq);
4793 ctrl_info->max_oq_element_length =
4794 get_unaligned_le16(&capability->max_oq_element_length)
4795 * 16;
4796
4797 sop_iu_layer_descriptor =
4798 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4799
4800 ctrl_info->max_inbound_iu_length_per_firmware =
4801 get_unaligned_le16(
4802 &sop_iu_layer_descriptor->max_inbound_iu_length);
4803 ctrl_info->inbound_spanning_supported =
4804 sop_iu_layer_descriptor->inbound_spanning_supported;
4805 ctrl_info->outbound_spanning_supported =
4806 sop_iu_layer_descriptor->outbound_spanning_supported;
4807
4808 out:
4809 kfree(capability);
4810
4811 return rc;
4812 }
4813
pqi_validate_device_capability(struct pqi_ctrl_info * ctrl_info)4814 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4815 {
4816 if (ctrl_info->max_iq_element_length <
4817 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4818 dev_err(&ctrl_info->pci_dev->dev,
4819 "max. inbound queue element length of %d is less than the required length of %d\n",
4820 ctrl_info->max_iq_element_length,
4821 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4822 return -EINVAL;
4823 }
4824
4825 if (ctrl_info->max_oq_element_length <
4826 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4827 dev_err(&ctrl_info->pci_dev->dev,
4828 "max. outbound queue element length of %d is less than the required length of %d\n",
4829 ctrl_info->max_oq_element_length,
4830 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4831 return -EINVAL;
4832 }
4833
4834 if (ctrl_info->max_inbound_iu_length_per_firmware <
4835 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4836 dev_err(&ctrl_info->pci_dev->dev,
4837 "max. inbound IU length of %u is less than the min. required length of %d\n",
4838 ctrl_info->max_inbound_iu_length_per_firmware,
4839 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4840 return -EINVAL;
4841 }
4842
4843 if (!ctrl_info->inbound_spanning_supported) {
4844 dev_err(&ctrl_info->pci_dev->dev,
4845 "the controller does not support inbound spanning\n");
4846 return -EINVAL;
4847 }
4848
4849 if (ctrl_info->outbound_spanning_supported) {
4850 dev_err(&ctrl_info->pci_dev->dev,
4851 "the controller supports outbound spanning but this driver does not\n");
4852 return -EINVAL;
4853 }
4854
4855 return 0;
4856 }
4857
pqi_create_event_queue(struct pqi_ctrl_info * ctrl_info)4858 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4859 {
4860 int rc;
4861 struct pqi_event_queue *event_queue;
4862 struct pqi_general_admin_request request;
4863 struct pqi_general_admin_response response;
4864
4865 event_queue = &ctrl_info->event_queue;
4866
4867 /*
4868 * Create OQ (Outbound Queue - device to host queue) to dedicate
4869 * to events.
4870 */
4871 memset(&request, 0, sizeof(request));
4872 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4873 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4874 &request.header.iu_length);
4875 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4876 put_unaligned_le16(event_queue->oq_id,
4877 &request.data.create_operational_oq.queue_id);
4878 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4879 &request.data.create_operational_oq.element_array_addr);
4880 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4881 &request.data.create_operational_oq.pi_addr);
4882 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4883 &request.data.create_operational_oq.num_elements);
4884 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4885 &request.data.create_operational_oq.element_length);
4886 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4887 put_unaligned_le16(event_queue->int_msg_num,
4888 &request.data.create_operational_oq.int_msg_num);
4889
4890 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4891 &response);
4892 if (rc)
4893 return rc;
4894
4895 event_queue->oq_ci = ctrl_info->iomem_base +
4896 PQI_DEVICE_REGISTERS_OFFSET +
4897 get_unaligned_le64(
4898 &response.data.create_operational_oq.oq_ci_offset);
4899
4900 return 0;
4901 }
4902
pqi_create_queue_group(struct pqi_ctrl_info * ctrl_info,unsigned int group_number)4903 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4904 unsigned int group_number)
4905 {
4906 int rc;
4907 struct pqi_queue_group *queue_group;
4908 struct pqi_general_admin_request request;
4909 struct pqi_general_admin_response response;
4910
4911 queue_group = &ctrl_info->queue_groups[group_number];
4912
4913 /*
4914 * Create IQ (Inbound Queue - host to device queue) for
4915 * RAID path.
4916 */
4917 memset(&request, 0, sizeof(request));
4918 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4919 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4920 &request.header.iu_length);
4921 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4922 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4923 &request.data.create_operational_iq.queue_id);
4924 put_unaligned_le64(
4925 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4926 &request.data.create_operational_iq.element_array_addr);
4927 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4928 &request.data.create_operational_iq.ci_addr);
4929 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4930 &request.data.create_operational_iq.num_elements);
4931 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4932 &request.data.create_operational_iq.element_length);
4933 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4934
4935 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4936 &response);
4937 if (rc) {
4938 dev_err(&ctrl_info->pci_dev->dev,
4939 "error creating inbound RAID queue\n");
4940 return rc;
4941 }
4942
4943 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4944 PQI_DEVICE_REGISTERS_OFFSET +
4945 get_unaligned_le64(
4946 &response.data.create_operational_iq.iq_pi_offset);
4947
4948 /*
4949 * Create IQ (Inbound Queue - host to device queue) for
4950 * Advanced I/O (AIO) path.
4951 */
4952 memset(&request, 0, sizeof(request));
4953 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4954 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4955 &request.header.iu_length);
4956 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4957 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4958 &request.data.create_operational_iq.queue_id);
4959 put_unaligned_le64((u64)queue_group->
4960 iq_element_array_bus_addr[AIO_PATH],
4961 &request.data.create_operational_iq.element_array_addr);
4962 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4963 &request.data.create_operational_iq.ci_addr);
4964 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4965 &request.data.create_operational_iq.num_elements);
4966 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4967 &request.data.create_operational_iq.element_length);
4968 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4969
4970 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4971 &response);
4972 if (rc) {
4973 dev_err(&ctrl_info->pci_dev->dev,
4974 "error creating inbound AIO queue\n");
4975 return rc;
4976 }
4977
4978 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4979 PQI_DEVICE_REGISTERS_OFFSET +
4980 get_unaligned_le64(
4981 &response.data.create_operational_iq.iq_pi_offset);
4982
4983 /*
4984 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4985 * assumed to be for RAID path I/O unless we change the queue's
4986 * property.
4987 */
4988 memset(&request, 0, sizeof(request));
4989 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4990 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4991 &request.header.iu_length);
4992 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4993 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4994 &request.data.change_operational_iq_properties.queue_id);
4995 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4996 &request.data.change_operational_iq_properties.vendor_specific);
4997
4998 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4999 &response);
5000 if (rc) {
5001 dev_err(&ctrl_info->pci_dev->dev,
5002 "error changing queue property\n");
5003 return rc;
5004 }
5005
5006 /*
5007 * Create OQ (Outbound Queue - device to host queue).
5008 */
5009 memset(&request, 0, sizeof(request));
5010 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
5011 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
5012 &request.header.iu_length);
5013 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
5014 put_unaligned_le16(queue_group->oq_id,
5015 &request.data.create_operational_oq.queue_id);
5016 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
5017 &request.data.create_operational_oq.element_array_addr);
5018 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
5019 &request.data.create_operational_oq.pi_addr);
5020 put_unaligned_le16(ctrl_info->num_elements_per_oq,
5021 &request.data.create_operational_oq.num_elements);
5022 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
5023 &request.data.create_operational_oq.element_length);
5024 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
5025 put_unaligned_le16(queue_group->int_msg_num,
5026 &request.data.create_operational_oq.int_msg_num);
5027
5028 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
5029 &response);
5030 if (rc) {
5031 dev_err(&ctrl_info->pci_dev->dev,
5032 "error creating outbound queue\n");
5033 return rc;
5034 }
5035
5036 queue_group->oq_ci = ctrl_info->iomem_base +
5037 PQI_DEVICE_REGISTERS_OFFSET +
5038 get_unaligned_le64(
5039 &response.data.create_operational_oq.oq_ci_offset);
5040
5041 return 0;
5042 }
5043
pqi_create_queues(struct pqi_ctrl_info * ctrl_info)5044 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
5045 {
5046 int rc;
5047 unsigned int i;
5048
5049 rc = pqi_create_event_queue(ctrl_info);
5050 if (rc) {
5051 dev_err(&ctrl_info->pci_dev->dev,
5052 "error creating event queue\n");
5053 return rc;
5054 }
5055
5056 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5057 rc = pqi_create_queue_group(ctrl_info, i);
5058 if (rc) {
5059 dev_err(&ctrl_info->pci_dev->dev,
5060 "error creating queue group number %u/%u\n",
5061 i, ctrl_info->num_queue_groups);
5062 return rc;
5063 }
5064 }
5065
5066 return 0;
5067 }
5068
5069 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5070 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5071
pqi_configure_events(struct pqi_ctrl_info * ctrl_info,bool enable_events)5072 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5073 bool enable_events)
5074 {
5075 int rc;
5076 unsigned int i;
5077 struct pqi_event_config *event_config;
5078 struct pqi_event_descriptor *event_descriptor;
5079 struct pqi_general_management_request request;
5080
5081 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5082 GFP_KERNEL);
5083 if (!event_config)
5084 return -ENOMEM;
5085
5086 memset(&request, 0, sizeof(request));
5087
5088 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5089 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5090 data.report_event_configuration.sg_descriptors[1]) -
5091 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5092 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5093 &request.data.report_event_configuration.buffer_length);
5094
5095 rc = pqi_map_single(ctrl_info->pci_dev,
5096 request.data.report_event_configuration.sg_descriptors,
5097 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5098 DMA_FROM_DEVICE);
5099 if (rc)
5100 goto out;
5101
5102 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5103
5104 pqi_pci_unmap(ctrl_info->pci_dev,
5105 request.data.report_event_configuration.sg_descriptors, 1,
5106 DMA_FROM_DEVICE);
5107
5108 if (rc)
5109 goto out;
5110
5111 for (i = 0; i < event_config->num_event_descriptors; i++) {
5112 event_descriptor = &event_config->descriptors[i];
5113 if (enable_events &&
5114 pqi_is_supported_event(event_descriptor->event_type))
5115 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5116 &event_descriptor->oq_id);
5117 else
5118 put_unaligned_le16(0, &event_descriptor->oq_id);
5119 }
5120
5121 memset(&request, 0, sizeof(request));
5122
5123 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5124 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5125 data.report_event_configuration.sg_descriptors[1]) -
5126 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5127 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5128 &request.data.report_event_configuration.buffer_length);
5129
5130 rc = pqi_map_single(ctrl_info->pci_dev,
5131 request.data.report_event_configuration.sg_descriptors,
5132 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5133 DMA_TO_DEVICE);
5134 if (rc)
5135 goto out;
5136
5137 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5138
5139 pqi_pci_unmap(ctrl_info->pci_dev,
5140 request.data.report_event_configuration.sg_descriptors, 1,
5141 DMA_TO_DEVICE);
5142
5143 out:
5144 kfree(event_config);
5145
5146 return rc;
5147 }
5148
pqi_enable_events(struct pqi_ctrl_info * ctrl_info)5149 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5150 {
5151 return pqi_configure_events(ctrl_info, true);
5152 }
5153
pqi_free_all_io_requests(struct pqi_ctrl_info * ctrl_info)5154 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5155 {
5156 unsigned int i;
5157 struct device *dev;
5158 size_t sg_chain_buffer_length;
5159 struct pqi_io_request *io_request;
5160
5161 if (!ctrl_info->io_request_pool)
5162 return;
5163
5164 dev = &ctrl_info->pci_dev->dev;
5165 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5166 io_request = ctrl_info->io_request_pool;
5167
5168 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5169 kfree(io_request->iu);
5170 if (!io_request->sg_chain_buffer)
5171 break;
5172 dma_free_coherent(dev, sg_chain_buffer_length,
5173 io_request->sg_chain_buffer,
5174 io_request->sg_chain_buffer_dma_handle);
5175 io_request++;
5176 }
5177
5178 kfree(ctrl_info->io_request_pool);
5179 ctrl_info->io_request_pool = NULL;
5180 }
5181
pqi_alloc_error_buffer(struct pqi_ctrl_info * ctrl_info)5182 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5183 {
5184 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5185 ctrl_info->error_buffer_length,
5186 &ctrl_info->error_buffer_dma_handle,
5187 GFP_KERNEL);
5188 if (!ctrl_info->error_buffer)
5189 return -ENOMEM;
5190
5191 return 0;
5192 }
5193
pqi_alloc_io_resources(struct pqi_ctrl_info * ctrl_info)5194 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5195 {
5196 unsigned int i;
5197 void *sg_chain_buffer;
5198 size_t sg_chain_buffer_length;
5199 dma_addr_t sg_chain_buffer_dma_handle;
5200 struct device *dev;
5201 struct pqi_io_request *io_request;
5202
5203 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5204 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5205
5206 if (!ctrl_info->io_request_pool) {
5207 dev_err(&ctrl_info->pci_dev->dev,
5208 "failed to allocate I/O request pool\n");
5209 goto error;
5210 }
5211
5212 dev = &ctrl_info->pci_dev->dev;
5213 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5214 io_request = ctrl_info->io_request_pool;
5215
5216 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5217 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5218
5219 if (!io_request->iu) {
5220 dev_err(&ctrl_info->pci_dev->dev,
5221 "failed to allocate IU buffers\n");
5222 goto error;
5223 }
5224
5225 sg_chain_buffer = dma_alloc_coherent(dev,
5226 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5227 GFP_KERNEL);
5228
5229 if (!sg_chain_buffer) {
5230 dev_err(&ctrl_info->pci_dev->dev,
5231 "failed to allocate PQI scatter-gather chain buffers\n");
5232 goto error;
5233 }
5234
5235 io_request->index = i;
5236 io_request->sg_chain_buffer = sg_chain_buffer;
5237 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5238 io_request++;
5239 }
5240
5241 return 0;
5242
5243 error:
5244 pqi_free_all_io_requests(ctrl_info);
5245
5246 return -ENOMEM;
5247 }
5248
5249 /*
5250 * Calculate required resources that are sized based on max. outstanding
5251 * requests and max. transfer size.
5252 */
5253
pqi_calculate_io_resources(struct pqi_ctrl_info * ctrl_info)5254 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5255 {
5256 u32 max_transfer_size;
5257 u32 max_sg_entries;
5258
5259 ctrl_info->scsi_ml_can_queue =
5260 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5261 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5262
5263 ctrl_info->error_buffer_length =
5264 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5265
5266 if (is_kdump_kernel())
5267 max_transfer_size = min(ctrl_info->max_transfer_size,
5268 PQI_MAX_TRANSFER_SIZE_KDUMP);
5269 else
5270 max_transfer_size = min(ctrl_info->max_transfer_size,
5271 PQI_MAX_TRANSFER_SIZE);
5272
5273 max_sg_entries = max_transfer_size / PAGE_SIZE;
5274
5275 /* +1 to cover when the buffer is not page-aligned. */
5276 max_sg_entries++;
5277
5278 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5279
5280 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5281
5282 ctrl_info->sg_chain_buffer_length =
5283 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5284 PQI_EXTRA_SGL_MEMORY;
5285 ctrl_info->sg_tablesize = max_sg_entries;
5286 ctrl_info->max_sectors = max_transfer_size / 512;
5287 }
5288
pqi_calculate_queue_resources(struct pqi_ctrl_info * ctrl_info)5289 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5290 {
5291 int num_queue_groups;
5292 u16 num_elements_per_iq;
5293 u16 num_elements_per_oq;
5294
5295 if (is_kdump_kernel()) {
5296 num_queue_groups = 1;
5297 } else {
5298 int max_queue_groups;
5299
5300 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5301 ctrl_info->max_outbound_queues - 1);
5302 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5303
5304 num_queue_groups =
5305 blk_mq_num_online_queues(ctrl_info->max_msix_vectors);
5306 num_queue_groups = min(num_queue_groups, max_queue_groups);
5307 }
5308
5309 ctrl_info->num_queue_groups = num_queue_groups;
5310
5311 /*
5312 * Make sure that the max. inbound IU length is an even multiple
5313 * of our inbound element length.
5314 */
5315 ctrl_info->max_inbound_iu_length =
5316 (ctrl_info->max_inbound_iu_length_per_firmware /
5317 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5318 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5319
5320 num_elements_per_iq =
5321 (ctrl_info->max_inbound_iu_length /
5322 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5323
5324 /* Add one because one element in each queue is unusable. */
5325 num_elements_per_iq++;
5326
5327 num_elements_per_iq = min(num_elements_per_iq,
5328 ctrl_info->max_elements_per_iq);
5329
5330 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5331 num_elements_per_oq = min(num_elements_per_oq,
5332 ctrl_info->max_elements_per_oq);
5333
5334 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5335 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5336
5337 ctrl_info->max_sg_per_iu =
5338 ((ctrl_info->max_inbound_iu_length -
5339 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5340 sizeof(struct pqi_sg_descriptor)) +
5341 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5342
5343 ctrl_info->max_sg_per_r56_iu =
5344 ((ctrl_info->max_inbound_iu_length -
5345 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5346 sizeof(struct pqi_sg_descriptor)) +
5347 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5348 }
5349
pqi_set_sg_descriptor(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg)5350 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5351 struct scatterlist *sg)
5352 {
5353 u64 address = (u64)sg_dma_address(sg);
5354 unsigned int length = sg_dma_len(sg);
5355
5356 put_unaligned_le64(address, &sg_descriptor->address);
5357 put_unaligned_le32(length, &sg_descriptor->length);
5358 put_unaligned_le32(0, &sg_descriptor->flags);
5359 }
5360
pqi_build_sg_list(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg,int sg_count,struct pqi_io_request * io_request,int max_sg_per_iu,bool * chained)5361 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5362 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5363 int max_sg_per_iu, bool *chained)
5364 {
5365 int i;
5366 unsigned int num_sg_in_iu;
5367
5368 *chained = false;
5369 i = 0;
5370 num_sg_in_iu = 0;
5371 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5372
5373 while (1) {
5374 pqi_set_sg_descriptor(sg_descriptor, sg);
5375 if (!*chained)
5376 num_sg_in_iu++;
5377 i++;
5378 if (i == sg_count)
5379 break;
5380 sg_descriptor++;
5381 if (i == max_sg_per_iu) {
5382 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5383 &sg_descriptor->address);
5384 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5385 &sg_descriptor->length);
5386 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5387 *chained = true;
5388 num_sg_in_iu++;
5389 sg_descriptor = io_request->sg_chain_buffer;
5390 }
5391 sg = sg_next(sg);
5392 }
5393
5394 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5395
5396 return num_sg_in_iu;
5397 }
5398
pqi_build_raid_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5399 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5400 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5401 struct pqi_io_request *io_request)
5402 {
5403 u16 iu_length;
5404 int sg_count;
5405 bool chained;
5406 unsigned int num_sg_in_iu;
5407 struct scatterlist *sg;
5408 struct pqi_sg_descriptor *sg_descriptor;
5409
5410 sg_count = scsi_dma_map(scmd);
5411 if (sg_count < 0)
5412 return sg_count;
5413
5414 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5415 PQI_REQUEST_HEADER_LENGTH;
5416
5417 if (sg_count == 0)
5418 goto out;
5419
5420 sg = scsi_sglist(scmd);
5421 sg_descriptor = request->sg_descriptors;
5422
5423 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5424 ctrl_info->max_sg_per_iu, &chained);
5425
5426 request->partial = chained;
5427 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5428
5429 out:
5430 put_unaligned_le16(iu_length, &request->header.iu_length);
5431
5432 return 0;
5433 }
5434
pqi_build_aio_r1_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r1_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5435 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5436 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5437 struct pqi_io_request *io_request)
5438 {
5439 u16 iu_length;
5440 int sg_count;
5441 bool chained;
5442 unsigned int num_sg_in_iu;
5443 struct scatterlist *sg;
5444 struct pqi_sg_descriptor *sg_descriptor;
5445
5446 sg_count = scsi_dma_map(scmd);
5447 if (sg_count < 0)
5448 return sg_count;
5449
5450 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5451 PQI_REQUEST_HEADER_LENGTH;
5452 num_sg_in_iu = 0;
5453
5454 if (sg_count == 0)
5455 goto out;
5456
5457 sg = scsi_sglist(scmd);
5458 sg_descriptor = request->sg_descriptors;
5459
5460 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5461 ctrl_info->max_sg_per_iu, &chained);
5462
5463 request->partial = chained;
5464 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5465
5466 out:
5467 put_unaligned_le16(iu_length, &request->header.iu_length);
5468 request->num_sg_descriptors = num_sg_in_iu;
5469
5470 return 0;
5471 }
5472
pqi_build_aio_r56_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r56_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5473 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5474 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5475 struct pqi_io_request *io_request)
5476 {
5477 u16 iu_length;
5478 int sg_count;
5479 bool chained;
5480 unsigned int num_sg_in_iu;
5481 struct scatterlist *sg;
5482 struct pqi_sg_descriptor *sg_descriptor;
5483
5484 sg_count = scsi_dma_map(scmd);
5485 if (sg_count < 0)
5486 return sg_count;
5487
5488 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5489 PQI_REQUEST_HEADER_LENGTH;
5490 num_sg_in_iu = 0;
5491
5492 if (sg_count != 0) {
5493 sg = scsi_sglist(scmd);
5494 sg_descriptor = request->sg_descriptors;
5495
5496 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5497 ctrl_info->max_sg_per_r56_iu, &chained);
5498
5499 request->partial = chained;
5500 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5501 }
5502
5503 put_unaligned_le16(iu_length, &request->header.iu_length);
5504 request->num_sg_descriptors = num_sg_in_iu;
5505
5506 return 0;
5507 }
5508
pqi_build_aio_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5509 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5510 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5511 struct pqi_io_request *io_request)
5512 {
5513 u16 iu_length;
5514 int sg_count;
5515 bool chained;
5516 unsigned int num_sg_in_iu;
5517 struct scatterlist *sg;
5518 struct pqi_sg_descriptor *sg_descriptor;
5519
5520 sg_count = scsi_dma_map(scmd);
5521 if (sg_count < 0)
5522 return sg_count;
5523
5524 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5525 PQI_REQUEST_HEADER_LENGTH;
5526 num_sg_in_iu = 0;
5527
5528 if (sg_count == 0)
5529 goto out;
5530
5531 sg = scsi_sglist(scmd);
5532 sg_descriptor = request->sg_descriptors;
5533
5534 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5535 ctrl_info->max_sg_per_iu, &chained);
5536
5537 request->partial = chained;
5538 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5539
5540 out:
5541 put_unaligned_le16(iu_length, &request->header.iu_length);
5542 request->num_sg_descriptors = num_sg_in_iu;
5543
5544 return 0;
5545 }
5546
pqi_raid_io_complete(struct pqi_io_request * io_request,void * context)5547 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5548 void *context)
5549 {
5550 struct scsi_cmnd *scmd;
5551
5552 scmd = io_request->scmd;
5553 pqi_free_io_request(io_request);
5554 scsi_dma_unmap(scmd);
5555 pqi_scsi_done(scmd);
5556 }
5557
5558 /*
5559 * Adjust the timeout value for physical devices sent to the firmware
5560 * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
5561 *
5562 * This provides the firmware with additional time to attempt early recovery
5563 * before the OS-level timeout occurs.
5564 */
5565 #define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
5566
pqi_raid_submit_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,bool io_high_prio)5567 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5568 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5569 struct pqi_queue_group *queue_group, bool io_high_prio)
5570 {
5571 int rc;
5572 u32 timeout;
5573 size_t cdb_length;
5574 struct pqi_io_request *io_request;
5575 struct pqi_raid_path_request *request;
5576 struct request *rq;
5577
5578 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5579 if (!io_request)
5580 return SCSI_MLQUEUE_HOST_BUSY;
5581
5582 io_request->io_complete_callback = pqi_raid_io_complete;
5583 io_request->scmd = scmd;
5584
5585 request = io_request->iu;
5586 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5587
5588 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5589 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5590 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5591 request->command_priority = io_high_prio;
5592 put_unaligned_le16(io_request->index, &request->request_id);
5593 request->error_index = request->request_id;
5594 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5595 request->ml_device_lun_number = (u8)scmd->device->lun;
5596
5597 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5598 memcpy(request->cdb, scmd->cmnd, cdb_length);
5599
5600 switch (cdb_length) {
5601 case 6:
5602 case 10:
5603 case 12:
5604 case 16:
5605 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5606 break;
5607 case 20:
5608 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5609 break;
5610 case 24:
5611 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5612 break;
5613 case 28:
5614 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5615 break;
5616 case 32:
5617 default:
5618 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5619 break;
5620 }
5621
5622 switch (scmd->sc_data_direction) {
5623 case DMA_FROM_DEVICE:
5624 request->data_direction = SOP_READ_FLAG;
5625 break;
5626 case DMA_TO_DEVICE:
5627 request->data_direction = SOP_WRITE_FLAG;
5628 break;
5629 case DMA_NONE:
5630 request->data_direction = SOP_NO_DIRECTION_FLAG;
5631 break;
5632 case DMA_BIDIRECTIONAL:
5633 request->data_direction = SOP_BIDIRECTIONAL;
5634 break;
5635 default:
5636 dev_err(&ctrl_info->pci_dev->dev,
5637 "unknown data direction: %d\n",
5638 scmd->sc_data_direction);
5639 break;
5640 }
5641
5642 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5643 if (rc) {
5644 pqi_free_io_request(io_request);
5645 return SCSI_MLQUEUE_HOST_BUSY;
5646 }
5647
5648 if (device->is_physical_device) {
5649 rq = scsi_cmd_to_rq(scmd);
5650 timeout = rq->timeout / HZ;
5651 put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
5652 }
5653
5654 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5655
5656 return 0;
5657 }
5658
pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5659 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5660 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5661 struct pqi_queue_group *queue_group)
5662 {
5663 bool io_high_prio;
5664
5665 io_high_prio = pqi_is_io_high_priority(device, scmd);
5666
5667 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5668 }
5669
pqi_raid_bypass_retry_needed(struct pqi_io_request * io_request)5670 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5671 {
5672 struct scsi_cmnd *scmd;
5673 struct pqi_scsi_dev *device;
5674 struct pqi_ctrl_info *ctrl_info;
5675
5676 if (!io_request->raid_bypass)
5677 return false;
5678
5679 scmd = io_request->scmd;
5680 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5681 return false;
5682 if (host_byte(scmd->result) == DID_NO_CONNECT)
5683 return false;
5684
5685 device = scmd->device->hostdata;
5686 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5687 return false;
5688
5689 ctrl_info = shost_to_hba(scmd->device->host);
5690 if (pqi_ctrl_offline(ctrl_info))
5691 return false;
5692
5693 return true;
5694 }
5695
pqi_aio_io_complete(struct pqi_io_request * io_request,void * context)5696 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5697 void *context)
5698 {
5699 struct scsi_cmnd *scmd;
5700
5701 scmd = io_request->scmd;
5702 scsi_dma_unmap(scmd);
5703 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5704 set_host_byte(scmd, DID_IMM_RETRY);
5705 pqi_cmd_priv(scmd)->this_residual++;
5706 }
5707
5708 pqi_free_io_request(io_request);
5709 pqi_scsi_done(scmd);
5710 }
5711
pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5712 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5713 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5714 struct pqi_queue_group *queue_group)
5715 {
5716 bool io_high_prio;
5717
5718 io_high_prio = pqi_is_io_high_priority(device, scmd);
5719
5720 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5721 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5722 false, io_high_prio);
5723 }
5724
pqi_aio_submit_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,u32 aio_handle,u8 * cdb,unsigned int cdb_length,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,bool raid_bypass,bool io_high_prio)5725 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5726 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5727 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5728 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5729 bool io_high_prio)
5730 {
5731 int rc;
5732 struct pqi_io_request *io_request;
5733 struct pqi_aio_path_request *request;
5734
5735 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5736 if (!io_request)
5737 return SCSI_MLQUEUE_HOST_BUSY;
5738
5739 io_request->io_complete_callback = pqi_aio_io_complete;
5740 io_request->scmd = scmd;
5741 io_request->raid_bypass = raid_bypass;
5742
5743 request = io_request->iu;
5744 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5745
5746 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5747 put_unaligned_le32(aio_handle, &request->nexus_id);
5748 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5749 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5750 request->command_priority = io_high_prio;
5751 put_unaligned_le16(io_request->index, &request->request_id);
5752 request->error_index = request->request_id;
5753 if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5754 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5755 if (cdb_length > sizeof(request->cdb))
5756 cdb_length = sizeof(request->cdb);
5757 request->cdb_length = cdb_length;
5758 memcpy(request->cdb, cdb, cdb_length);
5759
5760 switch (scmd->sc_data_direction) {
5761 case DMA_TO_DEVICE:
5762 request->data_direction = SOP_READ_FLAG;
5763 break;
5764 case DMA_FROM_DEVICE:
5765 request->data_direction = SOP_WRITE_FLAG;
5766 break;
5767 case DMA_NONE:
5768 request->data_direction = SOP_NO_DIRECTION_FLAG;
5769 break;
5770 case DMA_BIDIRECTIONAL:
5771 request->data_direction = SOP_BIDIRECTIONAL;
5772 break;
5773 default:
5774 dev_err(&ctrl_info->pci_dev->dev,
5775 "unknown data direction: %d\n",
5776 scmd->sc_data_direction);
5777 break;
5778 }
5779
5780 if (encryption_info) {
5781 request->encryption_enable = true;
5782 put_unaligned_le16(encryption_info->data_encryption_key_index,
5783 &request->data_encryption_key_index);
5784 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5785 &request->encrypt_tweak_lower);
5786 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5787 &request->encrypt_tweak_upper);
5788 }
5789
5790 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5791 if (rc) {
5792 pqi_free_io_request(io_request);
5793 return SCSI_MLQUEUE_HOST_BUSY;
5794 }
5795
5796 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5797
5798 return 0;
5799 }
5800
pqi_aio_submit_r1_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5801 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5802 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5803 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5804 struct pqi_scsi_dev_raid_map_data *rmd)
5805 {
5806 int rc;
5807 struct pqi_io_request *io_request;
5808 struct pqi_aio_r1_path_request *r1_request;
5809
5810 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5811 if (!io_request)
5812 return SCSI_MLQUEUE_HOST_BUSY;
5813
5814 io_request->io_complete_callback = pqi_aio_io_complete;
5815 io_request->scmd = scmd;
5816 io_request->raid_bypass = true;
5817
5818 r1_request = io_request->iu;
5819 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5820
5821 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5822 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5823 r1_request->num_drives = rmd->num_it_nexus_entries;
5824 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5825 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5826 if (rmd->num_it_nexus_entries == 3)
5827 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5828
5829 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5830 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5831 put_unaligned_le16(io_request->index, &r1_request->request_id);
5832 r1_request->error_index = r1_request->request_id;
5833 if (rmd->cdb_length > sizeof(r1_request->cdb))
5834 rmd->cdb_length = sizeof(r1_request->cdb);
5835 r1_request->cdb_length = rmd->cdb_length;
5836 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5837
5838 /* The direction is always write. */
5839 r1_request->data_direction = SOP_READ_FLAG;
5840
5841 if (encryption_info) {
5842 r1_request->encryption_enable = true;
5843 put_unaligned_le16(encryption_info->data_encryption_key_index,
5844 &r1_request->data_encryption_key_index);
5845 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5846 &r1_request->encrypt_tweak_lower);
5847 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5848 &r1_request->encrypt_tweak_upper);
5849 }
5850
5851 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5852 if (rc) {
5853 pqi_free_io_request(io_request);
5854 return SCSI_MLQUEUE_HOST_BUSY;
5855 }
5856
5857 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5858
5859 return 0;
5860 }
5861
pqi_aio_submit_r56_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5862 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5863 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5864 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5865 struct pqi_scsi_dev_raid_map_data *rmd)
5866 {
5867 int rc;
5868 struct pqi_io_request *io_request;
5869 struct pqi_aio_r56_path_request *r56_request;
5870
5871 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5872 if (!io_request)
5873 return SCSI_MLQUEUE_HOST_BUSY;
5874 io_request->io_complete_callback = pqi_aio_io_complete;
5875 io_request->scmd = scmd;
5876 io_request->raid_bypass = true;
5877
5878 r56_request = io_request->iu;
5879 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5880
5881 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5882 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5883 else
5884 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5885
5886 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5887 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5888 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5889 if (rmd->raid_level == SA_RAID_6) {
5890 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5891 r56_request->xor_multiplier = rmd->xor_mult;
5892 }
5893 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5894 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5895 put_unaligned_le64(rmd->row, &r56_request->row);
5896
5897 put_unaligned_le16(io_request->index, &r56_request->request_id);
5898 r56_request->error_index = r56_request->request_id;
5899
5900 if (rmd->cdb_length > sizeof(r56_request->cdb))
5901 rmd->cdb_length = sizeof(r56_request->cdb);
5902 r56_request->cdb_length = rmd->cdb_length;
5903 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5904
5905 /* The direction is always write. */
5906 r56_request->data_direction = SOP_READ_FLAG;
5907
5908 if (encryption_info) {
5909 r56_request->encryption_enable = true;
5910 put_unaligned_le16(encryption_info->data_encryption_key_index,
5911 &r56_request->data_encryption_key_index);
5912 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5913 &r56_request->encrypt_tweak_lower);
5914 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5915 &r56_request->encrypt_tweak_upper);
5916 }
5917
5918 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5919 if (rc) {
5920 pqi_free_io_request(io_request);
5921 return SCSI_MLQUEUE_HOST_BUSY;
5922 }
5923
5924 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5925
5926 return 0;
5927 }
5928
pqi_get_hw_queue(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5929 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5930 struct scsi_cmnd *scmd)
5931 {
5932 /*
5933 * We are setting host_tagset = 1 during init.
5934 */
5935 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5936 }
5937
pqi_is_bypass_eligible_request(struct scsi_cmnd * scmd)5938 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5939 {
5940 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5941 return false;
5942
5943 return pqi_cmd_priv(scmd)->this_residual == 0;
5944 }
5945
5946 /*
5947 * This function gets called just before we hand the completed SCSI request
5948 * back to the SML.
5949 */
5950
pqi_prep_for_scsi_done(struct scsi_cmnd * scmd)5951 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5952 {
5953 struct pqi_scsi_dev *device;
5954 struct completion *wait;
5955
5956 if (!scmd->device) {
5957 set_host_byte(scmd, DID_NO_CONNECT);
5958 return;
5959 }
5960
5961 device = scmd->device->hostdata;
5962 if (!device) {
5963 set_host_byte(scmd, DID_NO_CONNECT);
5964 return;
5965 }
5966
5967 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5968
5969 wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5970 if (wait != PQI_NO_COMPLETION)
5971 complete(wait);
5972 }
5973
pqi_is_parity_write_stream(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5974 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5975 struct scsi_cmnd *scmd)
5976 {
5977 u32 oldest_jiffies;
5978 u8 lru_index;
5979 int i;
5980 int rc;
5981 struct pqi_scsi_dev *device;
5982 struct pqi_stream_data *pqi_stream_data;
5983 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
5984
5985 if (!ctrl_info->enable_stream_detection)
5986 return false;
5987
5988 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5989 if (rc)
5990 return false;
5991
5992 /* Check writes only. */
5993 if (!rmd.is_write)
5994 return false;
5995
5996 device = scmd->device->hostdata;
5997
5998 /* Check for RAID 5/6 streams. */
5999 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
6000 return false;
6001
6002 /*
6003 * If controller does not support AIO RAID{5,6} writes, need to send
6004 * requests down non-AIO path.
6005 */
6006 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
6007 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
6008 return true;
6009
6010 lru_index = 0;
6011 oldest_jiffies = INT_MAX;
6012 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
6013 pqi_stream_data = &device->stream_data[i];
6014 /*
6015 * Check for adjacent request or request is within
6016 * the previous request.
6017 */
6018 if ((pqi_stream_data->next_lba &&
6019 rmd.first_block >= pqi_stream_data->next_lba) &&
6020 rmd.first_block <= pqi_stream_data->next_lba +
6021 rmd.block_cnt) {
6022 pqi_stream_data->next_lba = rmd.first_block +
6023 rmd.block_cnt;
6024 pqi_stream_data->last_accessed = jiffies;
6025 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
6026 return true;
6027 }
6028
6029 /* unused entry */
6030 if (pqi_stream_data->last_accessed == 0) {
6031 lru_index = i;
6032 break;
6033 }
6034
6035 /* Find entry with oldest last accessed time. */
6036 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
6037 oldest_jiffies = pqi_stream_data->last_accessed;
6038 lru_index = i;
6039 }
6040 }
6041
6042 /* Set LRU entry. */
6043 pqi_stream_data = &device->stream_data[lru_index];
6044 pqi_stream_data->last_accessed = jiffies;
6045 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
6046
6047 return false;
6048 }
6049
pqi_scsi_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)6050 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6051 {
6052 int rc;
6053 struct pqi_ctrl_info *ctrl_info;
6054 struct pqi_scsi_dev *device;
6055 u16 hw_queue;
6056 struct pqi_queue_group *queue_group;
6057 bool raid_bypassed;
6058 u8 lun;
6059
6060 scmd->host_scribble = PQI_NO_COMPLETION;
6061
6062 device = scmd->device->hostdata;
6063
6064 if (!device) {
6065 set_host_byte(scmd, DID_NO_CONNECT);
6066 pqi_scsi_done(scmd);
6067 return 0;
6068 }
6069
6070 lun = (u8)scmd->device->lun;
6071
6072 atomic_inc(&device->scsi_cmds_outstanding[lun]);
6073
6074 ctrl_info = shost_to_hba(shost);
6075
6076 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
6077 set_host_byte(scmd, DID_NO_CONNECT);
6078 pqi_scsi_done(scmd);
6079 return 0;
6080 }
6081
6082 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6083 rc = SCSI_MLQUEUE_HOST_BUSY;
6084 goto out;
6085 }
6086
6087 /*
6088 * This is necessary because the SML doesn't zero out this field during
6089 * error recovery.
6090 */
6091 scmd->result = 0;
6092
6093 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6094 queue_group = &ctrl_info->queue_groups[hw_queue];
6095
6096 if (pqi_is_logical_device(device)) {
6097 raid_bypassed = false;
6098 if (device->raid_bypass_enabled &&
6099 pqi_is_bypass_eligible_request(scmd) &&
6100 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6101 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6102 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6103 raid_bypassed = true;
6104 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
6105 }
6106 }
6107 if (!raid_bypassed)
6108 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6109 } else {
6110 if (device->aio_enabled)
6111 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6112 else
6113 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6114 }
6115
6116 out:
6117 if (rc) {
6118 scmd->host_scribble = NULL;
6119 atomic_dec(&device->scsi_cmds_outstanding[lun]);
6120 }
6121
6122 return rc;
6123 }
6124
pqi_queued_io_count(struct pqi_ctrl_info * ctrl_info)6125 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6126 {
6127 unsigned int i;
6128 unsigned int path;
6129 unsigned long flags;
6130 unsigned int queued_io_count;
6131 struct pqi_queue_group *queue_group;
6132 struct pqi_io_request *io_request;
6133
6134 queued_io_count = 0;
6135
6136 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6137 queue_group = &ctrl_info->queue_groups[i];
6138 for (path = 0; path < 2; path++) {
6139 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6140 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6141 queued_io_count++;
6142 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6143 }
6144 }
6145
6146 return queued_io_count;
6147 }
6148
pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info * ctrl_info)6149 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6150 {
6151 unsigned int i;
6152 unsigned int path;
6153 unsigned int nonempty_inbound_queue_count;
6154 struct pqi_queue_group *queue_group;
6155 pqi_index_t iq_pi;
6156 pqi_index_t iq_ci;
6157
6158 nonempty_inbound_queue_count = 0;
6159
6160 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6161 queue_group = &ctrl_info->queue_groups[i];
6162 for (path = 0; path < 2; path++) {
6163 iq_pi = queue_group->iq_pi_copy[path];
6164 iq_ci = readl(queue_group->iq_ci[path]);
6165 if (iq_ci != iq_pi)
6166 nonempty_inbound_queue_count++;
6167 }
6168 }
6169
6170 return nonempty_inbound_queue_count;
6171 }
6172
6173 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6174
pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info * ctrl_info)6175 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6176 {
6177 unsigned long start_jiffies;
6178 unsigned long warning_timeout;
6179 unsigned int queued_io_count;
6180 unsigned int nonempty_inbound_queue_count;
6181 bool displayed_warning;
6182
6183 displayed_warning = false;
6184 start_jiffies = jiffies;
6185 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6186
6187 while (1) {
6188 queued_io_count = pqi_queued_io_count(ctrl_info);
6189 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6190 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6191 break;
6192 pqi_check_ctrl_health(ctrl_info);
6193 if (pqi_ctrl_offline(ctrl_info))
6194 return -ENXIO;
6195 if (time_after(jiffies, warning_timeout)) {
6196 dev_warn(&ctrl_info->pci_dev->dev,
6197 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6198 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6199 displayed_warning = true;
6200 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6201 }
6202 usleep_range(1000, 2000);
6203 }
6204
6205 if (displayed_warning)
6206 dev_warn(&ctrl_info->pci_dev->dev,
6207 "queued I/O drained after waiting for %u seconds\n",
6208 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6209
6210 return 0;
6211 }
6212
pqi_fail_io_queued_for_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6213 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6214 struct pqi_scsi_dev *device, u8 lun)
6215 {
6216 unsigned int i;
6217 unsigned int path;
6218 struct pqi_queue_group *queue_group;
6219 unsigned long flags;
6220 struct pqi_io_request *io_request;
6221 struct pqi_io_request *next;
6222 struct scsi_cmnd *scmd;
6223 struct pqi_scsi_dev *scsi_device;
6224
6225 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6226 queue_group = &ctrl_info->queue_groups[i];
6227
6228 for (path = 0; path < 2; path++) {
6229 spin_lock_irqsave(
6230 &queue_group->submit_lock[path], flags);
6231
6232 list_for_each_entry_safe(io_request, next,
6233 &queue_group->request_list[path],
6234 request_list_entry) {
6235
6236 scmd = io_request->scmd;
6237 if (!scmd)
6238 continue;
6239
6240 scsi_device = scmd->device->hostdata;
6241
6242 list_del(&io_request->request_list_entry);
6243 if (scsi_device == device && (u8)scmd->device->lun == lun)
6244 set_host_byte(scmd, DID_RESET);
6245 else
6246 set_host_byte(scmd, DID_REQUEUE);
6247 pqi_free_io_request(io_request);
6248 scsi_dma_unmap(scmd);
6249 pqi_scsi_done(scmd);
6250 }
6251
6252 spin_unlock_irqrestore(
6253 &queue_group->submit_lock[path], flags);
6254 }
6255 }
6256 }
6257
6258 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6259
pqi_device_wait_for_pending_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,unsigned long timeout_msecs)6260 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6261 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6262 {
6263 int cmds_outstanding;
6264 unsigned long start_jiffies;
6265 unsigned long warning_timeout;
6266 unsigned long msecs_waiting;
6267
6268 start_jiffies = jiffies;
6269 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6270
6271 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6272 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6273 pqi_check_ctrl_health(ctrl_info);
6274 if (pqi_ctrl_offline(ctrl_info))
6275 return -ENXIO;
6276 }
6277 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6278 if (msecs_waiting >= timeout_msecs) {
6279 dev_err(&ctrl_info->pci_dev->dev,
6280 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6281 ctrl_info->scsi_host->host_no, device->bus, device->target,
6282 lun, msecs_waiting / 1000, cmds_outstanding);
6283 return -ETIMEDOUT;
6284 }
6285 if (time_after(jiffies, warning_timeout)) {
6286 dev_warn(&ctrl_info->pci_dev->dev,
6287 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6288 ctrl_info->scsi_host->host_no, device->bus, device->target,
6289 lun, msecs_waiting / 1000, cmds_outstanding);
6290 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6291 }
6292 usleep_range(1000, 2000);
6293 }
6294
6295 return 0;
6296 }
6297
pqi_lun_reset_complete(struct pqi_io_request * io_request,void * context)6298 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6299 void *context)
6300 {
6301 struct completion *waiting = context;
6302
6303 complete(waiting);
6304 }
6305
6306 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6307
pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct completion * wait)6308 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6309 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6310 {
6311 int rc;
6312 unsigned int wait_secs;
6313 int cmds_outstanding;
6314
6315 wait_secs = 0;
6316
6317 while (1) {
6318 if (wait_for_completion_io_timeout(wait,
6319 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6320 rc = 0;
6321 break;
6322 }
6323
6324 pqi_check_ctrl_health(ctrl_info);
6325 if (pqi_ctrl_offline(ctrl_info)) {
6326 rc = -ENXIO;
6327 break;
6328 }
6329
6330 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6331 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6332 dev_warn(&ctrl_info->pci_dev->dev,
6333 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6334 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6335 }
6336
6337 return rc;
6338 }
6339
6340 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6341
pqi_lun_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6342 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6343 {
6344 int rc;
6345 struct pqi_io_request *io_request;
6346 DECLARE_COMPLETION_ONSTACK(wait);
6347 struct pqi_task_management_request *request;
6348
6349 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6350 io_request->io_complete_callback = pqi_lun_reset_complete;
6351 io_request->context = &wait;
6352
6353 request = io_request->iu;
6354 memset(request, 0, sizeof(*request));
6355
6356 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6357 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6358 &request->header.iu_length);
6359 put_unaligned_le16(io_request->index, &request->request_id);
6360 memcpy(request->lun_number, device->scsi3addr,
6361 sizeof(request->lun_number));
6362 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6363 request->ml_device_lun_number = lun;
6364 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6365 if (ctrl_info->tmf_iu_timeout_supported)
6366 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6367
6368 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6369 io_request);
6370
6371 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6372 if (rc == 0)
6373 rc = io_request->status;
6374
6375 pqi_free_io_request(io_request);
6376
6377 return rc;
6378 }
6379
6380 #define PQI_LUN_RESET_RETRIES 3
6381 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6382 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6383 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6384
pqi_lun_reset_with_retries(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6385 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6386 {
6387 int reset_rc;
6388 int wait_rc;
6389 unsigned int retries;
6390 unsigned long timeout_msecs;
6391
6392 for (retries = 0;;) {
6393 reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6394 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6395 break;
6396 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6397 }
6398
6399 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6400 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6401
6402 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6403 if (wait_rc && reset_rc == 0)
6404 reset_rc = wait_rc;
6405
6406 return reset_rc == 0 ? SUCCESS : FAILED;
6407 }
6408
pqi_device_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6409 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6410 {
6411 int rc;
6412
6413 pqi_ctrl_block_requests(ctrl_info);
6414 pqi_ctrl_wait_until_quiesced(ctrl_info);
6415 pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6416 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6417 pqi_device_reset_start(device, lun);
6418 pqi_ctrl_unblock_requests(ctrl_info);
6419 if (rc)
6420 rc = FAILED;
6421 else
6422 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6423 pqi_device_reset_done(device, lun);
6424
6425 return rc;
6426 }
6427
pqi_device_reset_handler(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct scsi_cmnd * scmd,u8 scsi_opcode)6428 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6429 {
6430 unsigned long flags;
6431 int rc;
6432
6433 mutex_lock(&ctrl_info->lun_reset_mutex);
6434
6435 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6436 if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
6437 dev_warn(&ctrl_info->pci_dev->dev,
6438 "skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
6439 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
6440 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6441 mutex_unlock(&ctrl_info->lun_reset_mutex);
6442 return 0;
6443 }
6444 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6445
6446 dev_err(&ctrl_info->pci_dev->dev,
6447 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6448 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6449
6450 pqi_check_ctrl_health(ctrl_info);
6451 if (pqi_ctrl_offline(ctrl_info))
6452 rc = FAILED;
6453 else
6454 rc = pqi_device_reset(ctrl_info, device, lun);
6455
6456 dev_err(&ctrl_info->pci_dev->dev,
6457 "reset of scsi %d:%d:%d:%u: %s\n",
6458 ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6459 rc == SUCCESS ? "SUCCESS" : "FAILED");
6460
6461 mutex_unlock(&ctrl_info->lun_reset_mutex);
6462
6463 return rc;
6464 }
6465
pqi_eh_device_reset_handler(struct scsi_cmnd * scmd)6466 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6467 {
6468 struct Scsi_Host *shost;
6469 struct pqi_ctrl_info *ctrl_info;
6470 struct pqi_scsi_dev *device;
6471 u8 scsi_opcode;
6472
6473 shost = scmd->device->host;
6474 ctrl_info = shost_to_hba(shost);
6475 device = scmd->device->hostdata;
6476 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6477
6478 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6479 }
6480
pqi_tmf_worker(struct work_struct * work)6481 static void pqi_tmf_worker(struct work_struct *work)
6482 {
6483 struct pqi_tmf_work *tmf_work;
6484 struct scsi_cmnd *scmd;
6485
6486 tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6487 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6488
6489 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6490 }
6491
pqi_eh_abort_handler(struct scsi_cmnd * scmd)6492 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6493 {
6494 struct Scsi_Host *shost;
6495 struct pqi_ctrl_info *ctrl_info;
6496 struct pqi_scsi_dev *device;
6497 struct pqi_tmf_work *tmf_work;
6498 DECLARE_COMPLETION_ONSTACK(wait);
6499
6500 shost = scmd->device->host;
6501 ctrl_info = shost_to_hba(shost);
6502 device = scmd->device->hostdata;
6503
6504 dev_err(&ctrl_info->pci_dev->dev,
6505 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6506 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6507
6508 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6509 dev_err(&ctrl_info->pci_dev->dev,
6510 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6511 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6512 scmd->result = DID_RESET << 16;
6513 goto out;
6514 }
6515
6516 tmf_work = &device->tmf_work[scmd->device->lun];
6517
6518 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6519 tmf_work->ctrl_info = ctrl_info;
6520 tmf_work->device = device;
6521 tmf_work->lun = (u8)scmd->device->lun;
6522 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6523 schedule_work(&tmf_work->work_struct);
6524 }
6525
6526 wait_for_completion(&wait);
6527
6528 dev_err(&ctrl_info->pci_dev->dev,
6529 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6530 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6531
6532 out:
6533
6534 return SUCCESS;
6535 }
6536
pqi_sdev_init(struct scsi_device * sdev)6537 static int pqi_sdev_init(struct scsi_device *sdev)
6538 {
6539 struct pqi_scsi_dev *device;
6540 unsigned long flags;
6541 struct pqi_ctrl_info *ctrl_info;
6542 struct scsi_target *starget;
6543 struct sas_rphy *rphy;
6544
6545 ctrl_info = shost_to_hba(sdev->host);
6546
6547 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6548
6549 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6550 starget = scsi_target(sdev);
6551 rphy = target_to_rphy(starget);
6552 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6553 if (device) {
6554 if (device->target_lun_valid) {
6555 device->ignore_device = true;
6556 } else {
6557 device->target = sdev_id(sdev);
6558 device->lun = sdev->lun;
6559 device->target_lun_valid = true;
6560 }
6561 }
6562 } else {
6563 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6564 sdev_id(sdev), sdev->lun);
6565 }
6566
6567 if (device) {
6568 sdev->hostdata = device;
6569 device->sdev = sdev;
6570 if (device->queue_depth) {
6571 device->advertised_queue_depth = device->queue_depth;
6572 scsi_change_queue_depth(sdev,
6573 device->advertised_queue_depth);
6574 }
6575 if (pqi_is_logical_device(device)) {
6576 pqi_disable_write_same(sdev);
6577 } else {
6578 sdev->allow_restart = 1;
6579 if (device->device_type == SA_DEVICE_TYPE_NVME)
6580 pqi_disable_write_same(sdev);
6581 }
6582 }
6583
6584 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6585
6586 return 0;
6587 }
6588
pqi_map_queues(struct Scsi_Host * shost)6589 static void pqi_map_queues(struct Scsi_Host *shost)
6590 {
6591 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6592
6593 if (!ctrl_info->disable_managed_interrupts)
6594 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6595 &ctrl_info->pci_dev->dev, 0);
6596 else
6597 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
6598 }
6599
pqi_is_tape_changer_device(struct pqi_scsi_dev * device)6600 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6601 {
6602 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6603 }
6604
pqi_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)6605 static int pqi_sdev_configure(struct scsi_device *sdev,
6606 struct queue_limits *lim)
6607 {
6608 int rc = 0;
6609 struct pqi_scsi_dev *device;
6610
6611 device = sdev->hostdata;
6612 device->devtype = sdev->type;
6613
6614 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6615 rc = -ENXIO;
6616 device->ignore_device = false;
6617 }
6618
6619 return rc;
6620 }
6621
pqi_sdev_destroy(struct scsi_device * sdev)6622 static void pqi_sdev_destroy(struct scsi_device *sdev)
6623 {
6624 struct pqi_ctrl_info *ctrl_info;
6625 struct pqi_scsi_dev *device;
6626 struct pqi_tmf_work *tmf_work;
6627 int mutex_acquired;
6628 unsigned int lun;
6629 unsigned long flags;
6630
6631 ctrl_info = shost_to_hba(sdev->host);
6632
6633 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6634 if (!mutex_acquired)
6635 return;
6636
6637 device = sdev->hostdata;
6638 if (!device) {
6639 mutex_unlock(&ctrl_info->scan_mutex);
6640 return;
6641 }
6642
6643 device->lun_count--;
6644 if (device->lun_count > 0) {
6645 mutex_unlock(&ctrl_info->scan_mutex);
6646 return;
6647 }
6648
6649 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6650 list_del(&device->scsi_device_list_entry);
6651 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6652
6653 mutex_unlock(&ctrl_info->scan_mutex);
6654
6655 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
6656 cancel_work_sync(&tmf_work->work_struct);
6657
6658 mutex_lock(&ctrl_info->lun_reset_mutex);
6659 pqi_dev_info(ctrl_info, "removed", device);
6660 pqi_free_device(device);
6661 mutex_unlock(&ctrl_info->lun_reset_mutex);
6662 }
6663
pqi_getpciinfo_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6664 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6665 {
6666 struct pci_dev *pci_dev;
6667 u32 subsystem_vendor;
6668 u32 subsystem_device;
6669 cciss_pci_info_struct pci_info;
6670
6671 if (!arg)
6672 return -EINVAL;
6673
6674 pci_dev = ctrl_info->pci_dev;
6675
6676 pci_info.domain = pci_domain_nr(pci_dev->bus);
6677 pci_info.bus = pci_dev->bus->number;
6678 pci_info.dev_fn = pci_dev->devfn;
6679 subsystem_vendor = pci_dev->subsystem_vendor;
6680 subsystem_device = pci_dev->subsystem_device;
6681 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6682
6683 if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6684 return -EFAULT;
6685
6686 return 0;
6687 }
6688
pqi_getdrivver_ioctl(void __user * arg)6689 static int pqi_getdrivver_ioctl(void __user *arg)
6690 {
6691 u32 version;
6692
6693 if (!arg)
6694 return -EINVAL;
6695
6696 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6697 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6698
6699 if (copy_to_user(arg, &version, sizeof(version)))
6700 return -EFAULT;
6701
6702 return 0;
6703 }
6704
6705 struct ciss_error_info {
6706 u8 scsi_status;
6707 int command_status;
6708 size_t sense_data_length;
6709 };
6710
pqi_error_info_to_ciss(struct pqi_raid_error_info * pqi_error_info,struct ciss_error_info * ciss_error_info)6711 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6712 struct ciss_error_info *ciss_error_info)
6713 {
6714 int ciss_cmd_status;
6715 size_t sense_data_length;
6716
6717 switch (pqi_error_info->data_out_result) {
6718 case PQI_DATA_IN_OUT_GOOD:
6719 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6720 break;
6721 case PQI_DATA_IN_OUT_UNDERFLOW:
6722 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6723 break;
6724 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6725 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6726 break;
6727 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6728 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6729 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6730 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6731 case PQI_DATA_IN_OUT_ERROR:
6732 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6733 break;
6734 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6735 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6736 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6737 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6738 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6739 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6740 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6741 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6742 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6743 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6744 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6745 break;
6746 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6747 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6748 break;
6749 case PQI_DATA_IN_OUT_ABORTED:
6750 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6751 break;
6752 case PQI_DATA_IN_OUT_TIMEOUT:
6753 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6754 break;
6755 default:
6756 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6757 break;
6758 }
6759
6760 sense_data_length =
6761 get_unaligned_le16(&pqi_error_info->sense_data_length);
6762 if (sense_data_length == 0)
6763 sense_data_length =
6764 get_unaligned_le16(&pqi_error_info->response_data_length);
6765 if (sense_data_length)
6766 if (sense_data_length > sizeof(pqi_error_info->data))
6767 sense_data_length = sizeof(pqi_error_info->data);
6768
6769 ciss_error_info->scsi_status = pqi_error_info->status;
6770 ciss_error_info->command_status = ciss_cmd_status;
6771 ciss_error_info->sense_data_length = sense_data_length;
6772 }
6773
pqi_passthru_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6774 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6775 {
6776 int rc;
6777 char *kernel_buffer = NULL;
6778 u16 iu_length;
6779 size_t sense_data_length;
6780 IOCTL_Command_struct iocommand;
6781 struct pqi_raid_path_request request;
6782 struct pqi_raid_error_info pqi_error_info;
6783 struct ciss_error_info ciss_error_info;
6784
6785 if (pqi_ctrl_offline(ctrl_info))
6786 return -ENXIO;
6787 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6788 return -EBUSY;
6789 if (!arg)
6790 return -EINVAL;
6791 if (!capable(CAP_SYS_RAWIO))
6792 return -EPERM;
6793 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6794 return -EFAULT;
6795 if (iocommand.buf_size < 1 &&
6796 iocommand.Request.Type.Direction != XFER_NONE)
6797 return -EINVAL;
6798 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6799 return -EINVAL;
6800 if (iocommand.Request.Type.Type != TYPE_CMD)
6801 return -EINVAL;
6802
6803 switch (iocommand.Request.Type.Direction) {
6804 case XFER_NONE:
6805 case XFER_WRITE:
6806 case XFER_READ:
6807 case XFER_READ | XFER_WRITE:
6808 break;
6809 default:
6810 return -EINVAL;
6811 }
6812
6813 if (iocommand.buf_size > 0) {
6814 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6815 kernel_buffer = memdup_user(iocommand.buf,
6816 iocommand.buf_size);
6817 if (IS_ERR(kernel_buffer))
6818 return PTR_ERR(kernel_buffer);
6819 } else {
6820 kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL);
6821 if (!kernel_buffer)
6822 return -ENOMEM;
6823 }
6824 }
6825
6826 memset(&request, 0, sizeof(request));
6827
6828 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6829 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6830 PQI_REQUEST_HEADER_LENGTH;
6831 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6832 sizeof(request.lun_number));
6833 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6834 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6835
6836 switch (iocommand.Request.Type.Direction) {
6837 case XFER_NONE:
6838 request.data_direction = SOP_NO_DIRECTION_FLAG;
6839 break;
6840 case XFER_WRITE:
6841 request.data_direction = SOP_WRITE_FLAG;
6842 break;
6843 case XFER_READ:
6844 request.data_direction = SOP_READ_FLAG;
6845 break;
6846 case XFER_READ | XFER_WRITE:
6847 request.data_direction = SOP_BIDIRECTIONAL;
6848 break;
6849 }
6850
6851 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6852
6853 if (iocommand.buf_size > 0) {
6854 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6855
6856 rc = pqi_map_single(ctrl_info->pci_dev,
6857 &request.sg_descriptors[0], kernel_buffer,
6858 iocommand.buf_size, DMA_BIDIRECTIONAL);
6859 if (rc)
6860 goto out;
6861
6862 iu_length += sizeof(request.sg_descriptors[0]);
6863 }
6864
6865 put_unaligned_le16(iu_length, &request.header.iu_length);
6866
6867 if (ctrl_info->raid_iu_timeout_supported)
6868 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6869
6870 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6871 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6872
6873 if (iocommand.buf_size > 0)
6874 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6875 DMA_BIDIRECTIONAL);
6876
6877 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6878
6879 if (rc == 0) {
6880 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6881 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6882 iocommand.error_info.CommandStatus =
6883 ciss_error_info.command_status;
6884 sense_data_length = ciss_error_info.sense_data_length;
6885 if (sense_data_length) {
6886 if (sense_data_length >
6887 sizeof(iocommand.error_info.SenseInfo))
6888 sense_data_length =
6889 sizeof(iocommand.error_info.SenseInfo);
6890 memcpy(iocommand.error_info.SenseInfo,
6891 pqi_error_info.data, sense_data_length);
6892 iocommand.error_info.SenseLen = sense_data_length;
6893 }
6894 }
6895
6896 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6897 rc = -EFAULT;
6898 goto out;
6899 }
6900
6901 if (rc == 0 && iocommand.buf_size > 0 &&
6902 (iocommand.Request.Type.Direction & XFER_READ)) {
6903 if (copy_to_user(iocommand.buf, kernel_buffer,
6904 iocommand.buf_size)) {
6905 rc = -EFAULT;
6906 }
6907 }
6908
6909 out:
6910 kfree(kernel_buffer);
6911
6912 return rc;
6913 }
6914
pqi_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)6915 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6916 void __user *arg)
6917 {
6918 int rc;
6919 struct pqi_ctrl_info *ctrl_info;
6920
6921 ctrl_info = shost_to_hba(sdev->host);
6922
6923 switch (cmd) {
6924 case CCISS_DEREGDISK:
6925 case CCISS_REGNEWDISK:
6926 case CCISS_REGNEWD:
6927 rc = pqi_scan_scsi_devices(ctrl_info);
6928 break;
6929 case CCISS_GETPCIINFO:
6930 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6931 break;
6932 case CCISS_GETDRIVVER:
6933 rc = pqi_getdrivver_ioctl(arg);
6934 break;
6935 case CCISS_PASSTHRU:
6936 rc = pqi_passthru_ioctl(ctrl_info, arg);
6937 break;
6938 default:
6939 rc = -EINVAL;
6940 break;
6941 }
6942
6943 return rc;
6944 }
6945
pqi_firmware_version_show(struct device * dev,struct device_attribute * attr,char * buffer)6946 static ssize_t pqi_firmware_version_show(struct device *dev,
6947 struct device_attribute *attr, char *buffer)
6948 {
6949 struct Scsi_Host *shost;
6950 struct pqi_ctrl_info *ctrl_info;
6951
6952 shost = class_to_shost(dev);
6953 ctrl_info = shost_to_hba(shost);
6954
6955 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6956 }
6957
pqi_serial_number_show(struct device * dev,struct device_attribute * attr,char * buffer)6958 static ssize_t pqi_serial_number_show(struct device *dev,
6959 struct device_attribute *attr, char *buffer)
6960 {
6961 struct Scsi_Host *shost;
6962 struct pqi_ctrl_info *ctrl_info;
6963
6964 shost = class_to_shost(dev);
6965 ctrl_info = shost_to_hba(shost);
6966
6967 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6968 }
6969
pqi_model_show(struct device * dev,struct device_attribute * attr,char * buffer)6970 static ssize_t pqi_model_show(struct device *dev,
6971 struct device_attribute *attr, char *buffer)
6972 {
6973 struct Scsi_Host *shost;
6974 struct pqi_ctrl_info *ctrl_info;
6975
6976 shost = class_to_shost(dev);
6977 ctrl_info = shost_to_hba(shost);
6978
6979 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6980 }
6981
pqi_vendor_show(struct device * dev,struct device_attribute * attr,char * buffer)6982 static ssize_t pqi_vendor_show(struct device *dev,
6983 struct device_attribute *attr, char *buffer)
6984 {
6985 struct Scsi_Host *shost;
6986 struct pqi_ctrl_info *ctrl_info;
6987
6988 shost = class_to_shost(dev);
6989 ctrl_info = shost_to_hba(shost);
6990
6991 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6992 }
6993
pqi_host_rescan_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6994 static ssize_t pqi_host_rescan_store(struct device *dev,
6995 struct device_attribute *attr, const char *buffer, size_t count)
6996 {
6997 struct Scsi_Host *shost = class_to_shost(dev);
6998
6999 pqi_scan_start(shost);
7000
7001 return count;
7002 }
7003
pqi_lockup_action_show(struct device * dev,struct device_attribute * attr,char * buffer)7004 static ssize_t pqi_lockup_action_show(struct device *dev,
7005 struct device_attribute *attr, char *buffer)
7006 {
7007 int count = 0;
7008 unsigned int i;
7009
7010 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7011 if (pqi_lockup_actions[i].action == pqi_lockup_action)
7012 count += scnprintf(buffer + count, PAGE_SIZE - count,
7013 "[%s] ", pqi_lockup_actions[i].name);
7014 else
7015 count += scnprintf(buffer + count, PAGE_SIZE - count,
7016 "%s ", pqi_lockup_actions[i].name);
7017 }
7018
7019 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
7020
7021 return count;
7022 }
7023
pqi_lockup_action_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7024 static ssize_t pqi_lockup_action_store(struct device *dev,
7025 struct device_attribute *attr, const char *buffer, size_t count)
7026 {
7027 unsigned int i;
7028 char *action_name;
7029 char action_name_buffer[32];
7030
7031 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
7032 action_name = strstrip(action_name_buffer);
7033
7034 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7035 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
7036 pqi_lockup_action = pqi_lockup_actions[i].action;
7037 return count;
7038 }
7039 }
7040
7041 return -EINVAL;
7042 }
7043
pqi_host_enable_stream_detection_show(struct device * dev,struct device_attribute * attr,char * buffer)7044 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
7045 struct device_attribute *attr, char *buffer)
7046 {
7047 struct Scsi_Host *shost = class_to_shost(dev);
7048 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7049
7050 return scnprintf(buffer, 10, "%x\n",
7051 ctrl_info->enable_stream_detection);
7052 }
7053
pqi_host_enable_stream_detection_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7054 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
7055 struct device_attribute *attr, const char *buffer, size_t count)
7056 {
7057 struct Scsi_Host *shost = class_to_shost(dev);
7058 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7059 u8 set_stream_detection = 0;
7060
7061 if (kstrtou8(buffer, 0, &set_stream_detection))
7062 return -EINVAL;
7063
7064 if (set_stream_detection > 0)
7065 set_stream_detection = 1;
7066
7067 ctrl_info->enable_stream_detection = set_stream_detection;
7068
7069 return count;
7070 }
7071
pqi_host_enable_r5_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7072 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
7073 struct device_attribute *attr, char *buffer)
7074 {
7075 struct Scsi_Host *shost = class_to_shost(dev);
7076 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7077
7078 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
7079 }
7080
pqi_host_enable_r5_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7081 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
7082 struct device_attribute *attr, const char *buffer, size_t count)
7083 {
7084 struct Scsi_Host *shost = class_to_shost(dev);
7085 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7086 u8 set_r5_writes = 0;
7087
7088 if (kstrtou8(buffer, 0, &set_r5_writes))
7089 return -EINVAL;
7090
7091 if (set_r5_writes > 0)
7092 set_r5_writes = 1;
7093
7094 ctrl_info->enable_r5_writes = set_r5_writes;
7095
7096 return count;
7097 }
7098
pqi_host_enable_r6_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7099 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7100 struct device_attribute *attr, char *buffer)
7101 {
7102 struct Scsi_Host *shost = class_to_shost(dev);
7103 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7104
7105 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7106 }
7107
pqi_host_enable_r6_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7108 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7109 struct device_attribute *attr, const char *buffer, size_t count)
7110 {
7111 struct Scsi_Host *shost = class_to_shost(dev);
7112 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7113 u8 set_r6_writes = 0;
7114
7115 if (kstrtou8(buffer, 0, &set_r6_writes))
7116 return -EINVAL;
7117
7118 if (set_r6_writes > 0)
7119 set_r6_writes = 1;
7120
7121 ctrl_info->enable_r6_writes = set_r6_writes;
7122
7123 return count;
7124 }
7125
7126 static DEVICE_STRING_ATTR_RO(driver_version, 0444,
7127 DRIVER_VERSION BUILD_TIMESTAMP);
7128 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7129 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7130 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7131 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7132 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7133 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7134 pqi_lockup_action_store);
7135 static DEVICE_ATTR(enable_stream_detection, 0644,
7136 pqi_host_enable_stream_detection_show,
7137 pqi_host_enable_stream_detection_store);
7138 static DEVICE_ATTR(enable_r5_writes, 0644,
7139 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7140 static DEVICE_ATTR(enable_r6_writes, 0644,
7141 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7142
7143 static struct attribute *pqi_shost_attrs[] = {
7144 &dev_attr_driver_version.attr.attr,
7145 &dev_attr_firmware_version.attr,
7146 &dev_attr_model.attr,
7147 &dev_attr_serial_number.attr,
7148 &dev_attr_vendor.attr,
7149 &dev_attr_rescan.attr,
7150 &dev_attr_lockup_action.attr,
7151 &dev_attr_enable_stream_detection.attr,
7152 &dev_attr_enable_r5_writes.attr,
7153 &dev_attr_enable_r6_writes.attr,
7154 NULL
7155 };
7156
7157 ATTRIBUTE_GROUPS(pqi_shost);
7158
pqi_unique_id_show(struct device * dev,struct device_attribute * attr,char * buffer)7159 static ssize_t pqi_unique_id_show(struct device *dev,
7160 struct device_attribute *attr, char *buffer)
7161 {
7162 struct pqi_ctrl_info *ctrl_info;
7163 struct scsi_device *sdev;
7164 struct pqi_scsi_dev *device;
7165 unsigned long flags;
7166 u8 unique_id[16];
7167
7168 sdev = to_scsi_device(dev);
7169 ctrl_info = shost_to_hba(sdev->host);
7170
7171 if (pqi_ctrl_offline(ctrl_info))
7172 return -ENODEV;
7173
7174 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7175
7176 device = sdev->hostdata;
7177 if (!device) {
7178 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7179 return -ENODEV;
7180 }
7181
7182 if (device->is_physical_device)
7183 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7184 else
7185 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7186
7187 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7188
7189 return scnprintf(buffer, PAGE_SIZE,
7190 "%02X%02X%02X%02X%02X%02X%02X%02X"
7191 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7192 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7193 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7194 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7195 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7196 }
7197
pqi_lunid_show(struct device * dev,struct device_attribute * attr,char * buffer)7198 static ssize_t pqi_lunid_show(struct device *dev,
7199 struct device_attribute *attr, char *buffer)
7200 {
7201 struct pqi_ctrl_info *ctrl_info;
7202 struct scsi_device *sdev;
7203 struct pqi_scsi_dev *device;
7204 unsigned long flags;
7205 u8 lunid[8];
7206
7207 sdev = to_scsi_device(dev);
7208 ctrl_info = shost_to_hba(sdev->host);
7209
7210 if (pqi_ctrl_offline(ctrl_info))
7211 return -ENODEV;
7212
7213 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7214
7215 device = sdev->hostdata;
7216 if (!device) {
7217 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7218 return -ENODEV;
7219 }
7220
7221 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7222
7223 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7224
7225 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7226 }
7227
7228 #define MAX_PATHS 8
7229
pqi_path_info_show(struct device * dev,struct device_attribute * attr,char * buf)7230 static ssize_t pqi_path_info_show(struct device *dev,
7231 struct device_attribute *attr, char *buf)
7232 {
7233 struct pqi_ctrl_info *ctrl_info;
7234 struct scsi_device *sdev;
7235 struct pqi_scsi_dev *device;
7236 unsigned long flags;
7237 int i;
7238 int output_len = 0;
7239 u8 box;
7240 u8 bay;
7241 u8 path_map_index;
7242 char *active;
7243 u8 phys_connector[2];
7244
7245 sdev = to_scsi_device(dev);
7246 ctrl_info = shost_to_hba(sdev->host);
7247
7248 if (pqi_ctrl_offline(ctrl_info))
7249 return -ENODEV;
7250
7251 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7252
7253 device = sdev->hostdata;
7254 if (!device) {
7255 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7256 return -ENODEV;
7257 }
7258
7259 bay = device->bay;
7260 for (i = 0; i < MAX_PATHS; i++) {
7261 path_map_index = 1 << i;
7262 if (i == device->active_path_index)
7263 active = "Active";
7264 else if (device->path_map & path_map_index)
7265 active = "Inactive";
7266 else
7267 continue;
7268
7269 output_len += scnprintf(buf + output_len,
7270 PAGE_SIZE - output_len,
7271 "[%d:%d:%d:%d] %20.20s ",
7272 ctrl_info->scsi_host->host_no,
7273 device->bus, device->target,
7274 device->lun,
7275 scsi_device_type(device->devtype));
7276
7277 if (device->devtype == TYPE_RAID ||
7278 pqi_is_logical_device(device))
7279 goto end_buffer;
7280
7281 memcpy(&phys_connector, &device->phys_connector[i],
7282 sizeof(phys_connector));
7283 if (phys_connector[0] < '0')
7284 phys_connector[0] = '0';
7285 if (phys_connector[1] < '0')
7286 phys_connector[1] = '0';
7287
7288 output_len += scnprintf(buf + output_len,
7289 PAGE_SIZE - output_len,
7290 "PORT: %.2s ", phys_connector);
7291
7292 box = device->box[i];
7293 if (box != 0 && box != 0xFF)
7294 output_len += scnprintf(buf + output_len,
7295 PAGE_SIZE - output_len,
7296 "BOX: %hhu ", box);
7297
7298 if ((device->devtype == TYPE_DISK ||
7299 device->devtype == TYPE_ZBC) &&
7300 pqi_expose_device(device))
7301 output_len += scnprintf(buf + output_len,
7302 PAGE_SIZE - output_len,
7303 "BAY: %hhu ", bay);
7304
7305 end_buffer:
7306 output_len += scnprintf(buf + output_len,
7307 PAGE_SIZE - output_len,
7308 "%s\n", active);
7309 }
7310
7311 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7312
7313 return output_len;
7314 }
7315
pqi_sas_address_show(struct device * dev,struct device_attribute * attr,char * buffer)7316 static ssize_t pqi_sas_address_show(struct device *dev,
7317 struct device_attribute *attr, char *buffer)
7318 {
7319 struct pqi_ctrl_info *ctrl_info;
7320 struct scsi_device *sdev;
7321 struct pqi_scsi_dev *device;
7322 unsigned long flags;
7323 u64 sas_address;
7324
7325 sdev = to_scsi_device(dev);
7326 ctrl_info = shost_to_hba(sdev->host);
7327
7328 if (pqi_ctrl_offline(ctrl_info))
7329 return -ENODEV;
7330
7331 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7332
7333 device = sdev->hostdata;
7334 if (!device) {
7335 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7336 return -ENODEV;
7337 }
7338
7339 sas_address = device->sas_address;
7340
7341 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7342
7343 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7344 }
7345
pqi_ssd_smart_path_enabled_show(struct device * dev,struct device_attribute * attr,char * buffer)7346 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7347 struct device_attribute *attr, char *buffer)
7348 {
7349 struct pqi_ctrl_info *ctrl_info;
7350 struct scsi_device *sdev;
7351 struct pqi_scsi_dev *device;
7352 unsigned long flags;
7353
7354 sdev = to_scsi_device(dev);
7355 ctrl_info = shost_to_hba(sdev->host);
7356
7357 if (pqi_ctrl_offline(ctrl_info))
7358 return -ENODEV;
7359
7360 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7361
7362 device = sdev->hostdata;
7363 if (!device) {
7364 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7365 return -ENODEV;
7366 }
7367
7368 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7369 buffer[1] = '\n';
7370 buffer[2] = '\0';
7371
7372 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7373
7374 return 2;
7375 }
7376
pqi_raid_level_show(struct device * dev,struct device_attribute * attr,char * buffer)7377 static ssize_t pqi_raid_level_show(struct device *dev,
7378 struct device_attribute *attr, char *buffer)
7379 {
7380 struct pqi_ctrl_info *ctrl_info;
7381 struct scsi_device *sdev;
7382 struct pqi_scsi_dev *device;
7383 unsigned long flags;
7384 char *raid_level;
7385
7386 sdev = to_scsi_device(dev);
7387 ctrl_info = shost_to_hba(sdev->host);
7388
7389 if (pqi_ctrl_offline(ctrl_info))
7390 return -ENODEV;
7391
7392 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7393
7394 device = sdev->hostdata;
7395 if (!device) {
7396 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7397 return -ENODEV;
7398 }
7399
7400 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7401 raid_level = pqi_raid_level_to_string(device->raid_level);
7402 else
7403 raid_level = "N/A";
7404
7405 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7406
7407 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7408 }
7409
pqi_raid_bypass_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7410 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7411 struct device_attribute *attr, char *buffer)
7412 {
7413 struct pqi_ctrl_info *ctrl_info;
7414 struct scsi_device *sdev;
7415 struct pqi_scsi_dev *device;
7416 unsigned long flags;
7417 u64 raid_bypass_cnt;
7418 int cpu;
7419
7420 sdev = to_scsi_device(dev);
7421 ctrl_info = shost_to_hba(sdev->host);
7422
7423 if (pqi_ctrl_offline(ctrl_info))
7424 return -ENODEV;
7425
7426 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7427
7428 device = sdev->hostdata;
7429 if (!device) {
7430 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7431 return -ENODEV;
7432 }
7433
7434 raid_bypass_cnt = 0;
7435
7436 if (device->raid_io_stats) {
7437 for_each_online_cpu(cpu) {
7438 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
7439 }
7440 }
7441
7442 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7443
7444 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
7445 }
7446
pqi_sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)7447 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7448 struct device_attribute *attr, char *buf)
7449 {
7450 struct pqi_ctrl_info *ctrl_info;
7451 struct scsi_device *sdev;
7452 struct pqi_scsi_dev *device;
7453 unsigned long flags;
7454 int output_len = 0;
7455
7456 sdev = to_scsi_device(dev);
7457 ctrl_info = shost_to_hba(sdev->host);
7458
7459 if (pqi_ctrl_offline(ctrl_info))
7460 return -ENODEV;
7461
7462 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7463
7464 device = sdev->hostdata;
7465 if (!device) {
7466 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7467 return -ENODEV;
7468 }
7469
7470 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7471 device->ncq_prio_enable);
7472 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7473
7474 return output_len;
7475 }
7476
pqi_sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)7477 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7478 struct device_attribute *attr,
7479 const char *buf, size_t count)
7480 {
7481 struct pqi_ctrl_info *ctrl_info;
7482 struct scsi_device *sdev;
7483 struct pqi_scsi_dev *device;
7484 unsigned long flags;
7485 u8 ncq_prio_enable = 0;
7486
7487 if (kstrtou8(buf, 0, &ncq_prio_enable))
7488 return -EINVAL;
7489
7490 sdev = to_scsi_device(dev);
7491 ctrl_info = shost_to_hba(sdev->host);
7492
7493 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7494
7495 device = sdev->hostdata;
7496
7497 if (!device) {
7498 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7499 return -ENODEV;
7500 }
7501
7502 if (!device->ncq_prio_support) {
7503 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7504 return -EINVAL;
7505 }
7506
7507 device->ncq_prio_enable = ncq_prio_enable;
7508
7509 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7510
7511 return strlen(buf);
7512 }
7513
pqi_numa_node_show(struct device * dev,struct device_attribute * attr,char * buffer)7514 static ssize_t pqi_numa_node_show(struct device *dev,
7515 struct device_attribute *attr, char *buffer)
7516 {
7517 struct scsi_device *sdev;
7518 struct pqi_ctrl_info *ctrl_info;
7519
7520 sdev = to_scsi_device(dev);
7521 ctrl_info = shost_to_hba(sdev->host);
7522
7523 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7524 }
7525
pqi_write_stream_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7526 static ssize_t pqi_write_stream_cnt_show(struct device *dev,
7527 struct device_attribute *attr, char *buffer)
7528 {
7529 struct pqi_ctrl_info *ctrl_info;
7530 struct scsi_device *sdev;
7531 struct pqi_scsi_dev *device;
7532 unsigned long flags;
7533 u64 write_stream_cnt;
7534 int cpu;
7535
7536 sdev = to_scsi_device(dev);
7537 ctrl_info = shost_to_hba(sdev->host);
7538
7539 if (pqi_ctrl_offline(ctrl_info))
7540 return -ENODEV;
7541
7542 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7543
7544 device = sdev->hostdata;
7545 if (!device) {
7546 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7547 return -ENODEV;
7548 }
7549
7550 write_stream_cnt = 0;
7551
7552 if (device->raid_io_stats) {
7553 for_each_online_cpu(cpu) {
7554 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
7555 }
7556 }
7557
7558 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7559
7560 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
7561 }
7562
7563 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7564 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7565 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7566 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7567 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7568 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7569 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7570 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7571 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7572 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7573 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
7574
7575 static struct attribute *pqi_sdev_attrs[] = {
7576 &dev_attr_lunid.attr,
7577 &dev_attr_unique_id.attr,
7578 &dev_attr_path_info.attr,
7579 &dev_attr_sas_address.attr,
7580 &dev_attr_ssd_smart_path_enabled.attr,
7581 &dev_attr_raid_level.attr,
7582 &dev_attr_raid_bypass_cnt.attr,
7583 &dev_attr_sas_ncq_prio_enable.attr,
7584 &dev_attr_numa_node.attr,
7585 &dev_attr_write_stream_cnt.attr,
7586 NULL
7587 };
7588
7589 ATTRIBUTE_GROUPS(pqi_sdev);
7590
7591 static const struct scsi_host_template pqi_driver_template = {
7592 .module = THIS_MODULE,
7593 .name = DRIVER_NAME_SHORT,
7594 .proc_name = DRIVER_NAME_SHORT,
7595 .queuecommand = pqi_scsi_queue_command,
7596 .scan_start = pqi_scan_start,
7597 .scan_finished = pqi_scan_finished,
7598 .this_id = -1,
7599 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7600 .eh_abort_handler = pqi_eh_abort_handler,
7601 .ioctl = pqi_ioctl,
7602 .sdev_init = pqi_sdev_init,
7603 .sdev_configure = pqi_sdev_configure,
7604 .sdev_destroy = pqi_sdev_destroy,
7605 .map_queues = pqi_map_queues,
7606 .sdev_groups = pqi_sdev_groups,
7607 .shost_groups = pqi_shost_groups,
7608 .cmd_size = sizeof(struct pqi_cmd_priv),
7609 };
7610
pqi_register_scsi(struct pqi_ctrl_info * ctrl_info)7611 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7612 {
7613 int rc;
7614 struct Scsi_Host *shost;
7615
7616 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7617 if (!shost) {
7618 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7619 return -ENOMEM;
7620 }
7621
7622 shost->io_port = 0;
7623 shost->n_io_port = 0;
7624 shost->this_id = -1;
7625 shost->max_channel = PQI_MAX_BUS;
7626 shost->max_cmd_len = MAX_COMMAND_SIZE;
7627 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7628 shost->max_id = ~0;
7629 shost->max_sectors = ctrl_info->max_sectors;
7630 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7631 shost->cmd_per_lun = shost->can_queue;
7632 shost->sg_tablesize = ctrl_info->sg_tablesize;
7633 shost->transportt = pqi_sas_transport_template;
7634 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7635 shost->unique_id = shost->irq;
7636 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7637 shost->host_tagset = 1;
7638 shost->hostdata[0] = (unsigned long)ctrl_info;
7639
7640 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7641 if (rc) {
7642 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7643 goto free_host;
7644 }
7645
7646 rc = pqi_add_sas_host(shost, ctrl_info);
7647 if (rc) {
7648 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7649 goto remove_host;
7650 }
7651
7652 ctrl_info->scsi_host = shost;
7653
7654 return 0;
7655
7656 remove_host:
7657 scsi_remove_host(shost);
7658 free_host:
7659 scsi_host_put(shost);
7660
7661 return rc;
7662 }
7663
pqi_unregister_scsi(struct pqi_ctrl_info * ctrl_info)7664 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7665 {
7666 struct Scsi_Host *shost;
7667
7668 pqi_delete_sas_host(ctrl_info);
7669
7670 shost = ctrl_info->scsi_host;
7671 if (!shost)
7672 return;
7673
7674 scsi_remove_host(shost);
7675 scsi_host_put(shost);
7676 }
7677
pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info * ctrl_info)7678 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7679 {
7680 int rc = 0;
7681 struct pqi_device_registers __iomem *pqi_registers;
7682 unsigned long timeout;
7683 unsigned int timeout_msecs;
7684 union pqi_reset_register reset_reg;
7685
7686 pqi_registers = ctrl_info->pqi_registers;
7687 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7688 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7689
7690 while (1) {
7691 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7692 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7693 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7694 break;
7695 if (!sis_is_firmware_running(ctrl_info)) {
7696 rc = -ENXIO;
7697 break;
7698 }
7699 if (time_after(jiffies, timeout)) {
7700 rc = -ETIMEDOUT;
7701 break;
7702 }
7703 }
7704
7705 return rc;
7706 }
7707
pqi_reset(struct pqi_ctrl_info * ctrl_info)7708 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7709 {
7710 int rc;
7711 union pqi_reset_register reset_reg;
7712
7713 if (ctrl_info->pqi_reset_quiesce_supported) {
7714 rc = sis_pqi_reset_quiesce(ctrl_info);
7715 if (rc) {
7716 dev_err(&ctrl_info->pci_dev->dev,
7717 "PQI reset failed during quiesce with error %d\n", rc);
7718 return rc;
7719 }
7720 }
7721
7722 reset_reg.all_bits = 0;
7723 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7724 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7725
7726 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7727
7728 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7729 if (rc)
7730 dev_err(&ctrl_info->pci_dev->dev,
7731 "PQI reset failed with error %d\n", rc);
7732
7733 return rc;
7734 }
7735
pqi_get_ctrl_serial_number(struct pqi_ctrl_info * ctrl_info)7736 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7737 {
7738 int rc;
7739 struct bmic_sense_subsystem_info *sense_info;
7740
7741 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7742 if (!sense_info)
7743 return -ENOMEM;
7744
7745 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7746 if (rc)
7747 goto out;
7748
7749 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7750 sizeof(sense_info->ctrl_serial_number));
7751 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7752
7753 out:
7754 kfree(sense_info);
7755
7756 return rc;
7757 }
7758
pqi_get_ctrl_product_details(struct pqi_ctrl_info * ctrl_info)7759 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7760 {
7761 int rc;
7762 struct bmic_identify_controller *identify;
7763
7764 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7765 if (!identify)
7766 return -ENOMEM;
7767
7768 rc = pqi_identify_controller(ctrl_info, identify);
7769 if (rc)
7770 goto out;
7771
7772 if (get_unaligned_le32(&identify->extra_controller_flags) &
7773 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7774 memcpy(ctrl_info->firmware_version,
7775 identify->firmware_version_long,
7776 sizeof(identify->firmware_version_long));
7777 } else {
7778 memcpy(ctrl_info->firmware_version,
7779 identify->firmware_version_short,
7780 sizeof(identify->firmware_version_short));
7781 ctrl_info->firmware_version
7782 [sizeof(identify->firmware_version_short)] = '\0';
7783 snprintf(ctrl_info->firmware_version +
7784 strlen(ctrl_info->firmware_version),
7785 sizeof(ctrl_info->firmware_version) -
7786 sizeof(identify->firmware_version_short),
7787 "-%u",
7788 get_unaligned_le16(&identify->firmware_build_number));
7789 }
7790
7791 memcpy(ctrl_info->model, identify->product_id,
7792 sizeof(identify->product_id));
7793 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7794
7795 memcpy(ctrl_info->vendor, identify->vendor_id,
7796 sizeof(identify->vendor_id));
7797 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7798
7799 dev_info(&ctrl_info->pci_dev->dev,
7800 "Firmware version: %s\n", ctrl_info->firmware_version);
7801
7802 out:
7803 kfree(identify);
7804
7805 return rc;
7806 }
7807
7808 struct pqi_config_table_section_info {
7809 struct pqi_ctrl_info *ctrl_info;
7810 void *section;
7811 u32 section_offset;
7812 void __iomem *section_iomem_addr;
7813 };
7814
pqi_is_firmware_feature_supported(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7815 static inline bool pqi_is_firmware_feature_supported(
7816 struct pqi_config_table_firmware_features *firmware_features,
7817 unsigned int bit_position)
7818 {
7819 unsigned int byte_index;
7820
7821 byte_index = bit_position / BITS_PER_BYTE;
7822
7823 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7824 return false;
7825
7826 return firmware_features->features_supported[byte_index] &
7827 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7828 }
7829
pqi_is_firmware_feature_enabled(struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr,unsigned int bit_position)7830 static inline bool pqi_is_firmware_feature_enabled(
7831 struct pqi_config_table_firmware_features *firmware_features,
7832 void __iomem *firmware_features_iomem_addr,
7833 unsigned int bit_position)
7834 {
7835 unsigned int byte_index;
7836 u8 __iomem *features_enabled_iomem_addr;
7837
7838 byte_index = (bit_position / BITS_PER_BYTE) +
7839 (le16_to_cpu(firmware_features->num_elements) * 2);
7840
7841 features_enabled_iomem_addr = firmware_features_iomem_addr +
7842 offsetof(struct pqi_config_table_firmware_features,
7843 features_supported) + byte_index;
7844
7845 return *((__force u8 *)features_enabled_iomem_addr) &
7846 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7847 }
7848
pqi_request_firmware_feature(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7849 static inline void pqi_request_firmware_feature(
7850 struct pqi_config_table_firmware_features *firmware_features,
7851 unsigned int bit_position)
7852 {
7853 unsigned int byte_index;
7854
7855 byte_index = (bit_position / BITS_PER_BYTE) +
7856 le16_to_cpu(firmware_features->num_elements);
7857
7858 firmware_features->features_supported[byte_index] |=
7859 (1 << (bit_position % BITS_PER_BYTE));
7860 }
7861
pqi_config_table_update(struct pqi_ctrl_info * ctrl_info,u16 first_section,u16 last_section)7862 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7863 u16 first_section, u16 last_section)
7864 {
7865 struct pqi_vendor_general_request request;
7866
7867 memset(&request, 0, sizeof(request));
7868
7869 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7870 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7871 &request.header.iu_length);
7872 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7873 &request.function_code);
7874 put_unaligned_le16(first_section,
7875 &request.data.config_table_update.first_section);
7876 put_unaligned_le16(last_section,
7877 &request.data.config_table_update.last_section);
7878
7879 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7880 }
7881
pqi_enable_firmware_features(struct pqi_ctrl_info * ctrl_info,struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr)7882 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7883 struct pqi_config_table_firmware_features *firmware_features,
7884 void __iomem *firmware_features_iomem_addr)
7885 {
7886 void *features_requested;
7887 void __iomem *features_requested_iomem_addr;
7888 void __iomem *host_max_known_feature_iomem_addr;
7889
7890 features_requested = firmware_features->features_supported +
7891 le16_to_cpu(firmware_features->num_elements);
7892
7893 features_requested_iomem_addr = firmware_features_iomem_addr +
7894 (features_requested - (void *)firmware_features);
7895
7896 memcpy_toio(features_requested_iomem_addr, features_requested,
7897 le16_to_cpu(firmware_features->num_elements));
7898
7899 if (pqi_is_firmware_feature_supported(firmware_features,
7900 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7901 host_max_known_feature_iomem_addr =
7902 features_requested_iomem_addr +
7903 (le16_to_cpu(firmware_features->num_elements) * 2) +
7904 sizeof(__le16);
7905 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7906 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7907 }
7908
7909 return pqi_config_table_update(ctrl_info,
7910 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7911 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7912 }
7913
7914 struct pqi_firmware_feature {
7915 char *feature_name;
7916 unsigned int feature_bit;
7917 bool supported;
7918 bool enabled;
7919 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7920 struct pqi_firmware_feature *firmware_feature);
7921 };
7922
pqi_firmware_feature_status(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7923 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7924 struct pqi_firmware_feature *firmware_feature)
7925 {
7926 if (!firmware_feature->supported) {
7927 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7928 firmware_feature->feature_name);
7929 return;
7930 }
7931
7932 if (firmware_feature->enabled) {
7933 dev_info(&ctrl_info->pci_dev->dev,
7934 "%s enabled\n", firmware_feature->feature_name);
7935 return;
7936 }
7937
7938 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7939 firmware_feature->feature_name);
7940 }
7941
pqi_ctrl_update_feature_flags(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7942 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7943 struct pqi_firmware_feature *firmware_feature)
7944 {
7945 switch (firmware_feature->feature_bit) {
7946 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7947 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7948 break;
7949 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7950 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7951 break;
7952 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7953 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7954 break;
7955 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7956 ctrl_info->soft_reset_handshake_supported =
7957 firmware_feature->enabled &&
7958 pqi_read_soft_reset_status(ctrl_info);
7959 break;
7960 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7961 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7962 break;
7963 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7964 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7965 break;
7966 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7967 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7968 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7969 break;
7970 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7971 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7972 break;
7973 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7974 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7975 break;
7976 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
7977 ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
7978 break;
7979 }
7980
7981 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7982 }
7983
pqi_firmware_feature_update(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7984 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7985 struct pqi_firmware_feature *firmware_feature)
7986 {
7987 if (firmware_feature->feature_status)
7988 firmware_feature->feature_status(ctrl_info, firmware_feature);
7989 }
7990
7991 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7992
7993 static struct pqi_firmware_feature pqi_firmware_features[] = {
7994 {
7995 .feature_name = "Online Firmware Activation",
7996 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7997 .feature_status = pqi_firmware_feature_status,
7998 },
7999 {
8000 .feature_name = "Serial Management Protocol",
8001 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
8002 .feature_status = pqi_firmware_feature_status,
8003 },
8004 {
8005 .feature_name = "Maximum Known Feature",
8006 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
8007 .feature_status = pqi_firmware_feature_status,
8008 },
8009 {
8010 .feature_name = "RAID 0 Read Bypass",
8011 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
8012 .feature_status = pqi_firmware_feature_status,
8013 },
8014 {
8015 .feature_name = "RAID 1 Read Bypass",
8016 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
8017 .feature_status = pqi_firmware_feature_status,
8018 },
8019 {
8020 .feature_name = "RAID 5 Read Bypass",
8021 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
8022 .feature_status = pqi_firmware_feature_status,
8023 },
8024 {
8025 .feature_name = "RAID 6 Read Bypass",
8026 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
8027 .feature_status = pqi_firmware_feature_status,
8028 },
8029 {
8030 .feature_name = "RAID 0 Write Bypass",
8031 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
8032 .feature_status = pqi_firmware_feature_status,
8033 },
8034 {
8035 .feature_name = "RAID 1 Write Bypass",
8036 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
8037 .feature_status = pqi_ctrl_update_feature_flags,
8038 },
8039 {
8040 .feature_name = "RAID 5 Write Bypass",
8041 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
8042 .feature_status = pqi_ctrl_update_feature_flags,
8043 },
8044 {
8045 .feature_name = "RAID 6 Write Bypass",
8046 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
8047 .feature_status = pqi_ctrl_update_feature_flags,
8048 },
8049 {
8050 .feature_name = "New Soft Reset Handshake",
8051 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
8052 .feature_status = pqi_ctrl_update_feature_flags,
8053 },
8054 {
8055 .feature_name = "RAID IU Timeout",
8056 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
8057 .feature_status = pqi_ctrl_update_feature_flags,
8058 },
8059 {
8060 .feature_name = "TMF IU Timeout",
8061 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
8062 .feature_status = pqi_ctrl_update_feature_flags,
8063 },
8064 {
8065 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
8066 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
8067 .feature_status = pqi_firmware_feature_status,
8068 },
8069 {
8070 .feature_name = "Firmware Triage",
8071 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
8072 .feature_status = pqi_ctrl_update_feature_flags,
8073 },
8074 {
8075 .feature_name = "RPL Extended Formats 4 and 5",
8076 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
8077 .feature_status = pqi_ctrl_update_feature_flags,
8078 },
8079 {
8080 .feature_name = "Multi-LUN Target",
8081 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
8082 .feature_status = pqi_ctrl_update_feature_flags,
8083 },
8084 {
8085 .feature_name = "Controller Data Logging",
8086 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
8087 .feature_status = pqi_ctrl_update_feature_flags,
8088 },
8089 };
8090
pqi_process_firmware_features(struct pqi_config_table_section_info * section_info)8091 static void pqi_process_firmware_features(
8092 struct pqi_config_table_section_info *section_info)
8093 {
8094 int rc;
8095 struct pqi_ctrl_info *ctrl_info;
8096 struct pqi_config_table_firmware_features *firmware_features;
8097 void __iomem *firmware_features_iomem_addr;
8098 unsigned int i;
8099 unsigned int num_features_supported;
8100
8101 ctrl_info = section_info->ctrl_info;
8102 firmware_features = section_info->section;
8103 firmware_features_iomem_addr = section_info->section_iomem_addr;
8104
8105 for (i = 0, num_features_supported = 0;
8106 i < ARRAY_SIZE(pqi_firmware_features); i++) {
8107 if (pqi_is_firmware_feature_supported(firmware_features,
8108 pqi_firmware_features[i].feature_bit)) {
8109 pqi_firmware_features[i].supported = true;
8110 num_features_supported++;
8111 } else {
8112 pqi_firmware_feature_update(ctrl_info,
8113 &pqi_firmware_features[i]);
8114 }
8115 }
8116
8117 if (num_features_supported == 0)
8118 return;
8119
8120 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8121 if (!pqi_firmware_features[i].supported)
8122 continue;
8123 pqi_request_firmware_feature(firmware_features,
8124 pqi_firmware_features[i].feature_bit);
8125 }
8126
8127 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
8128 firmware_features_iomem_addr);
8129 if (rc) {
8130 dev_err(&ctrl_info->pci_dev->dev,
8131 "failed to enable firmware features in PQI configuration table\n");
8132 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8133 if (!pqi_firmware_features[i].supported)
8134 continue;
8135 pqi_firmware_feature_update(ctrl_info,
8136 &pqi_firmware_features[i]);
8137 }
8138 return;
8139 }
8140
8141 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8142 if (!pqi_firmware_features[i].supported)
8143 continue;
8144 if (pqi_is_firmware_feature_enabled(firmware_features,
8145 firmware_features_iomem_addr,
8146 pqi_firmware_features[i].feature_bit)) {
8147 pqi_firmware_features[i].enabled = true;
8148 }
8149 pqi_firmware_feature_update(ctrl_info,
8150 &pqi_firmware_features[i]);
8151 }
8152 }
8153
pqi_init_firmware_features(void)8154 static void pqi_init_firmware_features(void)
8155 {
8156 unsigned int i;
8157
8158 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8159 pqi_firmware_features[i].supported = false;
8160 pqi_firmware_features[i].enabled = false;
8161 }
8162 }
8163
pqi_process_firmware_features_section(struct pqi_config_table_section_info * section_info)8164 static void pqi_process_firmware_features_section(
8165 struct pqi_config_table_section_info *section_info)
8166 {
8167 mutex_lock(&pqi_firmware_features_mutex);
8168 pqi_init_firmware_features();
8169 pqi_process_firmware_features(section_info);
8170 mutex_unlock(&pqi_firmware_features_mutex);
8171 }
8172
8173 /*
8174 * Reset all controller settings that can be initialized during the processing
8175 * of the PQI Configuration Table.
8176 */
8177
pqi_ctrl_reset_config(struct pqi_ctrl_info * ctrl_info)8178 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8179 {
8180 ctrl_info->heartbeat_counter = NULL;
8181 ctrl_info->soft_reset_status = NULL;
8182 ctrl_info->soft_reset_handshake_supported = false;
8183 ctrl_info->enable_r1_writes = false;
8184 ctrl_info->enable_r5_writes = false;
8185 ctrl_info->enable_r6_writes = false;
8186 ctrl_info->raid_iu_timeout_supported = false;
8187 ctrl_info->tmf_iu_timeout_supported = false;
8188 ctrl_info->firmware_triage_supported = false;
8189 ctrl_info->rpl_extended_format_4_5_supported = false;
8190 ctrl_info->multi_lun_device_supported = false;
8191 ctrl_info->ctrl_logging_supported = false;
8192 }
8193
pqi_process_config_table(struct pqi_ctrl_info * ctrl_info)8194 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8195 {
8196 u32 table_length;
8197 u32 section_offset;
8198 bool firmware_feature_section_present;
8199 void __iomem *table_iomem_addr;
8200 struct pqi_config_table *config_table;
8201 struct pqi_config_table_section_header *section;
8202 struct pqi_config_table_section_info section_info;
8203 struct pqi_config_table_section_info feature_section_info = {0};
8204
8205 table_length = ctrl_info->config_table_length;
8206 if (table_length == 0)
8207 return 0;
8208
8209 config_table = kmalloc(table_length, GFP_KERNEL);
8210 if (!config_table) {
8211 dev_err(&ctrl_info->pci_dev->dev,
8212 "failed to allocate memory for PQI configuration table\n");
8213 return -ENOMEM;
8214 }
8215
8216 /*
8217 * Copy the config table contents from I/O memory space into the
8218 * temporary buffer.
8219 */
8220 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8221 memcpy_fromio(config_table, table_iomem_addr, table_length);
8222
8223 firmware_feature_section_present = false;
8224 section_info.ctrl_info = ctrl_info;
8225 section_offset = get_unaligned_le32(&config_table->first_section_offset);
8226
8227 while (section_offset) {
8228 section = (void *)config_table + section_offset;
8229
8230 section_info.section = section;
8231 section_info.section_offset = section_offset;
8232 section_info.section_iomem_addr = table_iomem_addr + section_offset;
8233
8234 switch (get_unaligned_le16(§ion->section_id)) {
8235 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8236 firmware_feature_section_present = true;
8237 feature_section_info = section_info;
8238 break;
8239 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8240 if (pqi_disable_heartbeat)
8241 dev_warn(&ctrl_info->pci_dev->dev,
8242 "heartbeat disabled by module parameter\n");
8243 else
8244 ctrl_info->heartbeat_counter =
8245 table_iomem_addr +
8246 section_offset +
8247 offsetof(struct pqi_config_table_heartbeat,
8248 heartbeat_counter);
8249 break;
8250 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8251 ctrl_info->soft_reset_status =
8252 table_iomem_addr +
8253 section_offset +
8254 offsetof(struct pqi_config_table_soft_reset,
8255 soft_reset_status);
8256 break;
8257 }
8258
8259 section_offset = get_unaligned_le16(§ion->next_section_offset);
8260 }
8261
8262 /*
8263 * We process the firmware feature section after all other sections
8264 * have been processed so that the feature bit callbacks can take
8265 * into account the settings configured by other sections.
8266 */
8267 if (firmware_feature_section_present)
8268 pqi_process_firmware_features_section(&feature_section_info);
8269
8270 kfree(config_table);
8271
8272 return 0;
8273 }
8274
8275 /* Switches the controller from PQI mode back into SIS mode. */
8276
pqi_revert_to_sis_mode(struct pqi_ctrl_info * ctrl_info)8277 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8278 {
8279 int rc;
8280
8281 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8282 rc = pqi_reset(ctrl_info);
8283 if (rc)
8284 return rc;
8285 rc = sis_reenable_sis_mode(ctrl_info);
8286 if (rc) {
8287 dev_err(&ctrl_info->pci_dev->dev,
8288 "re-enabling SIS mode failed with error %d\n", rc);
8289 return rc;
8290 }
8291 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8292
8293 return 0;
8294 }
8295
8296 /*
8297 * If the controller isn't already in SIS mode, this function forces it into
8298 * SIS mode.
8299 */
8300
pqi_force_sis_mode(struct pqi_ctrl_info * ctrl_info)8301 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8302 {
8303 if (!sis_is_firmware_running(ctrl_info))
8304 return -ENXIO;
8305
8306 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8307 return 0;
8308
8309 if (sis_is_kernel_up(ctrl_info)) {
8310 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8311 return 0;
8312 }
8313
8314 return pqi_revert_to_sis_mode(ctrl_info);
8315 }
8316
pqi_perform_lockup_action(void)8317 static void pqi_perform_lockup_action(void)
8318 {
8319 switch (pqi_lockup_action) {
8320 case PANIC:
8321 panic("FATAL: Smart Family Controller lockup detected");
8322 break;
8323 case REBOOT:
8324 emergency_restart();
8325 break;
8326 case NONE:
8327 default:
8328 break;
8329 }
8330 }
8331
8332 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
8333 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
8334
pqi_ctrl_init(struct pqi_ctrl_info * ctrl_info)8335 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8336 {
8337 int rc;
8338 u32 product_id;
8339
8340 if (reset_devices) {
8341 if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
8342 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8343 if (rc)
8344 return rc;
8345 }
8346 if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
8347 sis_notify_kdump(ctrl_info);
8348 rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
8349 if (rc)
8350 return rc;
8351 }
8352 sis_soft_reset(ctrl_info);
8353 ssleep(PQI_POST_RESET_DELAY_SECS);
8354 } else {
8355 rc = pqi_force_sis_mode(ctrl_info);
8356 if (rc)
8357 return rc;
8358 }
8359
8360 /*
8361 * Wait until the controller is ready to start accepting SIS
8362 * commands.
8363 */
8364 rc = sis_wait_for_ctrl_ready(ctrl_info);
8365 if (rc) {
8366 if (reset_devices) {
8367 dev_err(&ctrl_info->pci_dev->dev,
8368 "kdump init failed with error %d\n", rc);
8369 pqi_lockup_action = REBOOT;
8370 pqi_perform_lockup_action();
8371 }
8372 return rc;
8373 }
8374
8375 /*
8376 * Get the controller properties. This allows us to determine
8377 * whether or not it supports PQI mode.
8378 */
8379 rc = sis_get_ctrl_properties(ctrl_info);
8380 if (rc) {
8381 dev_err(&ctrl_info->pci_dev->dev,
8382 "error obtaining controller properties\n");
8383 return rc;
8384 }
8385
8386 rc = sis_get_pqi_capabilities(ctrl_info);
8387 if (rc) {
8388 dev_err(&ctrl_info->pci_dev->dev,
8389 "error obtaining controller capabilities\n");
8390 return rc;
8391 }
8392
8393 product_id = sis_get_product_id(ctrl_info);
8394 ctrl_info->product_id = (u8)product_id;
8395 ctrl_info->product_revision = (u8)(product_id >> 8);
8396
8397 if (is_kdump_kernel()) {
8398 if (ctrl_info->max_outstanding_requests >
8399 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8400 ctrl_info->max_outstanding_requests =
8401 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8402 } else {
8403 if (ctrl_info->max_outstanding_requests >
8404 PQI_MAX_OUTSTANDING_REQUESTS)
8405 ctrl_info->max_outstanding_requests =
8406 PQI_MAX_OUTSTANDING_REQUESTS;
8407 }
8408
8409 pqi_calculate_io_resources(ctrl_info);
8410
8411 rc = pqi_alloc_error_buffer(ctrl_info);
8412 if (rc) {
8413 dev_err(&ctrl_info->pci_dev->dev,
8414 "failed to allocate PQI error buffer\n");
8415 return rc;
8416 }
8417
8418 /*
8419 * If the function we are about to call succeeds, the
8420 * controller will transition from legacy SIS mode
8421 * into PQI mode.
8422 */
8423 rc = sis_init_base_struct_addr(ctrl_info);
8424 if (rc) {
8425 dev_err(&ctrl_info->pci_dev->dev,
8426 "error initializing PQI mode\n");
8427 return rc;
8428 }
8429
8430 /* Wait for the controller to complete the SIS -> PQI transition. */
8431 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8432 if (rc) {
8433 dev_err(&ctrl_info->pci_dev->dev,
8434 "transition to PQI mode failed\n");
8435 return rc;
8436 }
8437
8438 /* From here on, we are running in PQI mode. */
8439 ctrl_info->pqi_mode_enabled = true;
8440 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8441
8442 rc = pqi_alloc_admin_queues(ctrl_info);
8443 if (rc) {
8444 dev_err(&ctrl_info->pci_dev->dev,
8445 "failed to allocate admin queues\n");
8446 return rc;
8447 }
8448
8449 rc = pqi_create_admin_queues(ctrl_info);
8450 if (rc) {
8451 dev_err(&ctrl_info->pci_dev->dev,
8452 "error creating admin queues\n");
8453 return rc;
8454 }
8455
8456 rc = pqi_report_device_capability(ctrl_info);
8457 if (rc) {
8458 dev_err(&ctrl_info->pci_dev->dev,
8459 "obtaining device capability failed\n");
8460 return rc;
8461 }
8462
8463 rc = pqi_validate_device_capability(ctrl_info);
8464 if (rc)
8465 return rc;
8466
8467 pqi_calculate_queue_resources(ctrl_info);
8468
8469 rc = pqi_enable_msix_interrupts(ctrl_info);
8470 if (rc)
8471 return rc;
8472
8473 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8474 ctrl_info->max_msix_vectors =
8475 ctrl_info->num_msix_vectors_enabled;
8476 pqi_calculate_queue_resources(ctrl_info);
8477 }
8478
8479 rc = pqi_alloc_io_resources(ctrl_info);
8480 if (rc)
8481 return rc;
8482
8483 rc = pqi_alloc_operational_queues(ctrl_info);
8484 if (rc) {
8485 dev_err(&ctrl_info->pci_dev->dev,
8486 "failed to allocate operational queues\n");
8487 return rc;
8488 }
8489
8490 pqi_init_operational_queues(ctrl_info);
8491
8492 rc = pqi_create_queues(ctrl_info);
8493 if (rc)
8494 return rc;
8495
8496 rc = pqi_request_irqs(ctrl_info);
8497 if (rc)
8498 return rc;
8499
8500 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8501
8502 ctrl_info->controller_online = true;
8503
8504 rc = pqi_process_config_table(ctrl_info);
8505 if (rc)
8506 return rc;
8507
8508 pqi_start_heartbeat_timer(ctrl_info);
8509
8510 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8511 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8512 if (rc) { /* Supported features not returned correctly. */
8513 dev_err(&ctrl_info->pci_dev->dev,
8514 "error obtaining advanced RAID bypass configuration\n");
8515 return rc;
8516 }
8517 ctrl_info->ciss_report_log_flags |=
8518 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8519 }
8520
8521 rc = pqi_enable_events(ctrl_info);
8522 if (rc) {
8523 dev_err(&ctrl_info->pci_dev->dev,
8524 "error enabling events\n");
8525 return rc;
8526 }
8527
8528 /* Register with the SCSI subsystem. */
8529 rc = pqi_register_scsi(ctrl_info);
8530 if (rc)
8531 return rc;
8532
8533 if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
8534 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
8535 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8536 }
8537
8538 rc = pqi_get_ctrl_product_details(ctrl_info);
8539 if (rc) {
8540 dev_err(&ctrl_info->pci_dev->dev,
8541 "error obtaining product details\n");
8542 return rc;
8543 }
8544
8545 rc = pqi_get_ctrl_serial_number(ctrl_info);
8546 if (rc) {
8547 dev_err(&ctrl_info->pci_dev->dev,
8548 "error obtaining ctrl serial number\n");
8549 return rc;
8550 }
8551
8552 rc = pqi_set_diag_rescan(ctrl_info);
8553 if (rc) {
8554 dev_err(&ctrl_info->pci_dev->dev,
8555 "error enabling multi-lun rescan\n");
8556 return rc;
8557 }
8558
8559 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8560 if (rc) {
8561 dev_err(&ctrl_info->pci_dev->dev,
8562 "error updating host wellness\n");
8563 return rc;
8564 }
8565
8566 pqi_schedule_update_time_worker(ctrl_info);
8567
8568 pqi_scan_scsi_devices(ctrl_info);
8569
8570 return 0;
8571 }
8572
pqi_reinit_queues(struct pqi_ctrl_info * ctrl_info)8573 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8574 {
8575 unsigned int i;
8576 struct pqi_admin_queues *admin_queues;
8577 struct pqi_event_queue *event_queue;
8578
8579 admin_queues = &ctrl_info->admin_queues;
8580 admin_queues->iq_pi_copy = 0;
8581 admin_queues->oq_ci_copy = 0;
8582 writel(0, admin_queues->oq_pi);
8583
8584 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8585 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8586 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8587 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8588
8589 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8590 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8591 writel(0, ctrl_info->queue_groups[i].oq_pi);
8592 }
8593
8594 event_queue = &ctrl_info->event_queue;
8595 writel(0, event_queue->oq_pi);
8596 event_queue->oq_ci_copy = 0;
8597 }
8598
pqi_ctrl_init_resume(struct pqi_ctrl_info * ctrl_info)8599 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8600 {
8601 int rc;
8602
8603 rc = pqi_force_sis_mode(ctrl_info);
8604 if (rc)
8605 return rc;
8606
8607 /*
8608 * Wait until the controller is ready to start accepting SIS
8609 * commands.
8610 */
8611 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8612 if (rc)
8613 return rc;
8614
8615 /*
8616 * Get the controller properties. This allows us to determine
8617 * whether or not it supports PQI mode.
8618 */
8619 rc = sis_get_ctrl_properties(ctrl_info);
8620 if (rc) {
8621 dev_err(&ctrl_info->pci_dev->dev,
8622 "error obtaining controller properties\n");
8623 return rc;
8624 }
8625
8626 rc = sis_get_pqi_capabilities(ctrl_info);
8627 if (rc) {
8628 dev_err(&ctrl_info->pci_dev->dev,
8629 "error obtaining controller capabilities\n");
8630 return rc;
8631 }
8632
8633 /*
8634 * If the function we are about to call succeeds, the
8635 * controller will transition from legacy SIS mode
8636 * into PQI mode.
8637 */
8638 rc = sis_init_base_struct_addr(ctrl_info);
8639 if (rc) {
8640 dev_err(&ctrl_info->pci_dev->dev,
8641 "error initializing PQI mode\n");
8642 return rc;
8643 }
8644
8645 /* Wait for the controller to complete the SIS -> PQI transition. */
8646 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8647 if (rc) {
8648 dev_err(&ctrl_info->pci_dev->dev,
8649 "transition to PQI mode failed\n");
8650 return rc;
8651 }
8652
8653 /* From here on, we are running in PQI mode. */
8654 ctrl_info->pqi_mode_enabled = true;
8655 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8656
8657 pqi_reinit_queues(ctrl_info);
8658
8659 rc = pqi_create_admin_queues(ctrl_info);
8660 if (rc) {
8661 dev_err(&ctrl_info->pci_dev->dev,
8662 "error creating admin queues\n");
8663 return rc;
8664 }
8665
8666 rc = pqi_create_queues(ctrl_info);
8667 if (rc)
8668 return rc;
8669
8670 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8671
8672 ctrl_info->controller_online = true;
8673 pqi_ctrl_unblock_requests(ctrl_info);
8674
8675 pqi_ctrl_reset_config(ctrl_info);
8676
8677 rc = pqi_process_config_table(ctrl_info);
8678 if (rc)
8679 return rc;
8680
8681 pqi_start_heartbeat_timer(ctrl_info);
8682
8683 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8684 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8685 if (rc) {
8686 dev_err(&ctrl_info->pci_dev->dev,
8687 "error obtaining advanced RAID bypass configuration\n");
8688 return rc;
8689 }
8690 ctrl_info->ciss_report_log_flags |=
8691 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8692 }
8693
8694 rc = pqi_enable_events(ctrl_info);
8695 if (rc) {
8696 dev_err(&ctrl_info->pci_dev->dev,
8697 "error enabling events\n");
8698 return rc;
8699 }
8700
8701 rc = pqi_get_ctrl_product_details(ctrl_info);
8702 if (rc) {
8703 dev_err(&ctrl_info->pci_dev->dev,
8704 "error obtaining product details\n");
8705 return rc;
8706 }
8707
8708 rc = pqi_set_diag_rescan(ctrl_info);
8709 if (rc) {
8710 dev_err(&ctrl_info->pci_dev->dev,
8711 "error enabling multi-lun rescan\n");
8712 return rc;
8713 }
8714
8715 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8716 if (rc) {
8717 dev_err(&ctrl_info->pci_dev->dev,
8718 "error updating host wellness\n");
8719 return rc;
8720 }
8721
8722 if (pqi_ofa_in_progress(ctrl_info)) {
8723 pqi_ctrl_unblock_scan(ctrl_info);
8724 if (ctrl_info->ctrl_logging_supported) {
8725 if (!ctrl_info->ctrl_log_memory.host_memory)
8726 pqi_host_setup_buffer(ctrl_info,
8727 &ctrl_info->ctrl_log_memory,
8728 PQI_CTRL_LOG_TOTAL_SIZE,
8729 PQI_CTRL_LOG_MIN_SIZE);
8730 pqi_host_memory_update(ctrl_info,
8731 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8732 } else {
8733 if (ctrl_info->ctrl_log_memory.host_memory)
8734 pqi_host_free_buffer(ctrl_info,
8735 &ctrl_info->ctrl_log_memory);
8736 }
8737 }
8738
8739 pqi_scan_scsi_devices(ctrl_info);
8740
8741 return 0;
8742 }
8743
pqi_set_pcie_completion_timeout(struct pci_dev * pci_dev,u16 timeout)8744 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8745 {
8746 int rc;
8747
8748 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8749 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8750
8751 return pcibios_err_to_errno(rc);
8752 }
8753
pqi_pci_init(struct pqi_ctrl_info * ctrl_info)8754 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8755 {
8756 int rc;
8757 u64 mask;
8758
8759 rc = pci_enable_device(ctrl_info->pci_dev);
8760 if (rc) {
8761 dev_err(&ctrl_info->pci_dev->dev,
8762 "failed to enable PCI device\n");
8763 return rc;
8764 }
8765
8766 if (sizeof(dma_addr_t) > 4)
8767 mask = DMA_BIT_MASK(64);
8768 else
8769 mask = DMA_BIT_MASK(32);
8770
8771 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8772 if (rc) {
8773 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8774 goto disable_device;
8775 }
8776
8777 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8778 if (rc) {
8779 dev_err(&ctrl_info->pci_dev->dev,
8780 "failed to obtain PCI resources\n");
8781 goto disable_device;
8782 }
8783
8784 ctrl_info->iomem_base = ioremap(pci_resource_start(
8785 ctrl_info->pci_dev, 0),
8786 pci_resource_len(ctrl_info->pci_dev, 0));
8787 if (!ctrl_info->iomem_base) {
8788 dev_err(&ctrl_info->pci_dev->dev,
8789 "failed to map memory for controller registers\n");
8790 rc = -ENOMEM;
8791 goto release_regions;
8792 }
8793
8794 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8795
8796 /* Increase the PCIe completion timeout. */
8797 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8798 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8799 if (rc) {
8800 dev_err(&ctrl_info->pci_dev->dev,
8801 "failed to set PCIe completion timeout\n");
8802 goto release_regions;
8803 }
8804
8805 /* Enable bus mastering. */
8806 pci_set_master(ctrl_info->pci_dev);
8807
8808 ctrl_info->registers = ctrl_info->iomem_base;
8809 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8810
8811 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8812
8813 return 0;
8814
8815 release_regions:
8816 pci_release_regions(ctrl_info->pci_dev);
8817 disable_device:
8818 pci_disable_device(ctrl_info->pci_dev);
8819
8820 return rc;
8821 }
8822
pqi_cleanup_pci_init(struct pqi_ctrl_info * ctrl_info)8823 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8824 {
8825 iounmap(ctrl_info->iomem_base);
8826 pci_release_regions(ctrl_info->pci_dev);
8827 if (pci_is_enabled(ctrl_info->pci_dev))
8828 pci_disable_device(ctrl_info->pci_dev);
8829 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8830 }
8831
pqi_alloc_ctrl_info(int numa_node)8832 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8833 {
8834 struct pqi_ctrl_info *ctrl_info;
8835
8836 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8837 GFP_KERNEL, numa_node);
8838 if (!ctrl_info)
8839 return NULL;
8840
8841 mutex_init(&ctrl_info->scan_mutex);
8842 mutex_init(&ctrl_info->lun_reset_mutex);
8843 mutex_init(&ctrl_info->ofa_mutex);
8844
8845 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8846 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8847
8848 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8849 atomic_set(&ctrl_info->num_interrupts, 0);
8850
8851 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8852 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8853
8854 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8855 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8856
8857 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8858 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8859
8860 sema_init(&ctrl_info->sync_request_sem,
8861 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8862 init_waitqueue_head(&ctrl_info->block_requests_wait);
8863
8864 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8865 ctrl_info->irq_mode = IRQ_MODE_NONE;
8866 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8867
8868 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8869 ctrl_info->max_transfer_encrypted_sas_sata =
8870 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8871 ctrl_info->max_transfer_encrypted_nvme =
8872 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8873 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8874 ctrl_info->max_write_raid_1_10_2drive = ~0;
8875 ctrl_info->max_write_raid_1_10_3drive = ~0;
8876 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8877
8878 return ctrl_info;
8879 }
8880
pqi_free_ctrl_info(struct pqi_ctrl_info * ctrl_info)8881 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8882 {
8883 kfree(ctrl_info);
8884 }
8885
pqi_free_interrupts(struct pqi_ctrl_info * ctrl_info)8886 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8887 {
8888 pqi_free_irqs(ctrl_info);
8889 pqi_disable_msix_interrupts(ctrl_info);
8890 }
8891
pqi_free_ctrl_resources(struct pqi_ctrl_info * ctrl_info)8892 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8893 {
8894 pqi_free_interrupts(ctrl_info);
8895 if (ctrl_info->queue_memory_base)
8896 dma_free_coherent(&ctrl_info->pci_dev->dev,
8897 ctrl_info->queue_memory_length,
8898 ctrl_info->queue_memory_base,
8899 ctrl_info->queue_memory_base_dma_handle);
8900 if (ctrl_info->admin_queue_memory_base)
8901 dma_free_coherent(&ctrl_info->pci_dev->dev,
8902 ctrl_info->admin_queue_memory_length,
8903 ctrl_info->admin_queue_memory_base,
8904 ctrl_info->admin_queue_memory_base_dma_handle);
8905 pqi_free_all_io_requests(ctrl_info);
8906 if (ctrl_info->error_buffer)
8907 dma_free_coherent(&ctrl_info->pci_dev->dev,
8908 ctrl_info->error_buffer_length,
8909 ctrl_info->error_buffer,
8910 ctrl_info->error_buffer_dma_handle);
8911 if (ctrl_info->iomem_base)
8912 pqi_cleanup_pci_init(ctrl_info);
8913 pqi_free_ctrl_info(ctrl_info);
8914 }
8915
pqi_remove_ctrl(struct pqi_ctrl_info * ctrl_info)8916 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8917 {
8918 ctrl_info->controller_online = false;
8919 pqi_stop_heartbeat_timer(ctrl_info);
8920 pqi_ctrl_block_requests(ctrl_info);
8921 pqi_cancel_rescan_worker(ctrl_info);
8922 pqi_cancel_update_time_worker(ctrl_info);
8923 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8924 pqi_fail_all_outstanding_requests(ctrl_info);
8925 ctrl_info->pqi_mode_enabled = false;
8926 }
8927 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
8928 pqi_unregister_scsi(ctrl_info);
8929 if (ctrl_info->pqi_mode_enabled)
8930 pqi_revert_to_sis_mode(ctrl_info);
8931 pqi_free_ctrl_resources(ctrl_info);
8932 }
8933
pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info * ctrl_info)8934 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8935 {
8936 pqi_ctrl_block_scan(ctrl_info);
8937 pqi_scsi_block_requests(ctrl_info);
8938 pqi_ctrl_block_device_reset(ctrl_info);
8939 pqi_ctrl_block_requests(ctrl_info);
8940 pqi_ctrl_wait_until_quiesced(ctrl_info);
8941 pqi_stop_heartbeat_timer(ctrl_info);
8942 }
8943
pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info * ctrl_info)8944 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8945 {
8946 pqi_start_heartbeat_timer(ctrl_info);
8947 pqi_ctrl_unblock_requests(ctrl_info);
8948 pqi_ctrl_unblock_device_reset(ctrl_info);
8949 pqi_scsi_unblock_requests(ctrl_info);
8950 pqi_ctrl_unblock_scan(ctrl_info);
8951 }
8952
pqi_ofa_ctrl_restart(struct pqi_ctrl_info * ctrl_info,unsigned int delay_secs)8953 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8954 {
8955 ssleep(delay_secs);
8956
8957 return pqi_ctrl_init_resume(ctrl_info);
8958 }
8959
pqi_host_alloc_mem(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 chunk_size)8960 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8961 struct pqi_host_memory_descriptor *host_memory_descriptor,
8962 u32 total_size, u32 chunk_size)
8963 {
8964 int i;
8965 u32 sg_count;
8966 struct device *dev;
8967 struct pqi_host_memory *host_memory;
8968 struct pqi_sg_descriptor *mem_descriptor;
8969 dma_addr_t dma_handle;
8970
8971 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8972 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
8973 goto out;
8974
8975 host_memory_descriptor->host_chunk_virt_address =
8976 kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8977 if (!host_memory_descriptor->host_chunk_virt_address)
8978 goto out;
8979
8980 dev = &ctrl_info->pci_dev->dev;
8981 host_memory = host_memory_descriptor->host_memory;
8982
8983 for (i = 0; i < sg_count; i++) {
8984 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8985 if (!host_memory_descriptor->host_chunk_virt_address[i])
8986 goto out_free_chunks;
8987 mem_descriptor = &host_memory->sg_descriptor[i];
8988 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8989 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8990 }
8991
8992 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8993 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
8994 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
8995
8996 return 0;
8997
8998 out_free_chunks:
8999 while (--i >= 0) {
9000 mem_descriptor = &host_memory->sg_descriptor[i];
9001 dma_free_coherent(dev, chunk_size,
9002 host_memory_descriptor->host_chunk_virt_address[i],
9003 get_unaligned_le64(&mem_descriptor->address));
9004 }
9005 kfree(host_memory_descriptor->host_chunk_virt_address);
9006 out:
9007 return -ENOMEM;
9008 }
9009
pqi_host_alloc_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_required_size,u32 min_required_size)9010 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
9011 struct pqi_host_memory_descriptor *host_memory_descriptor,
9012 u32 total_required_size, u32 min_required_size)
9013 {
9014 u32 chunk_size;
9015 u32 min_chunk_size;
9016
9017 if (total_required_size == 0 || min_required_size == 0)
9018 return 0;
9019
9020 total_required_size = PAGE_ALIGN(total_required_size);
9021 min_required_size = PAGE_ALIGN(min_required_size);
9022 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
9023 min_chunk_size = PAGE_ALIGN(min_chunk_size);
9024
9025 while (total_required_size >= min_required_size) {
9026 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
9027 if (pqi_host_alloc_mem(ctrl_info,
9028 host_memory_descriptor, total_required_size,
9029 chunk_size) == 0)
9030 return 0;
9031 chunk_size /= 2;
9032 chunk_size = PAGE_ALIGN(chunk_size);
9033 }
9034 total_required_size /= 2;
9035 total_required_size = PAGE_ALIGN(total_required_size);
9036 }
9037
9038 return -ENOMEM;
9039 }
9040
pqi_host_setup_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 min_size)9041 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
9042 struct pqi_host_memory_descriptor *host_memory_descriptor,
9043 u32 total_size, u32 min_size)
9044 {
9045 struct device *dev;
9046 struct pqi_host_memory *host_memory;
9047
9048 dev = &ctrl_info->pci_dev->dev;
9049
9050 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
9051 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
9052 if (!host_memory)
9053 return;
9054
9055 host_memory_descriptor->host_memory = host_memory;
9056
9057 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
9058 total_size, min_size) < 0) {
9059 dev_err(dev, "failed to allocate firmware usable host buffer\n");
9060 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9061 host_memory_descriptor->host_memory_dma_handle);
9062 host_memory_descriptor->host_memory = NULL;
9063 return;
9064 }
9065 }
9066
pqi_host_free_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor)9067 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
9068 struct pqi_host_memory_descriptor *host_memory_descriptor)
9069 {
9070 unsigned int i;
9071 struct device *dev;
9072 struct pqi_host_memory *host_memory;
9073 struct pqi_sg_descriptor *mem_descriptor;
9074 unsigned int num_memory_descriptors;
9075
9076 host_memory = host_memory_descriptor->host_memory;
9077 if (!host_memory)
9078 return;
9079
9080 dev = &ctrl_info->pci_dev->dev;
9081
9082 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
9083 goto out;
9084
9085 mem_descriptor = host_memory->sg_descriptor;
9086 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
9087
9088 for (i = 0; i < num_memory_descriptors; i++) {
9089 dma_free_coherent(dev,
9090 get_unaligned_le32(&mem_descriptor[i].length),
9091 host_memory_descriptor->host_chunk_virt_address[i],
9092 get_unaligned_le64(&mem_descriptor[i].address));
9093 }
9094 kfree(host_memory_descriptor->host_chunk_virt_address);
9095
9096 out:
9097 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9098 host_memory_descriptor->host_memory_dma_handle);
9099 host_memory_descriptor->host_memory = NULL;
9100 }
9101
pqi_host_memory_update(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u16 function_code)9102 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
9103 struct pqi_host_memory_descriptor *host_memory_descriptor,
9104 u16 function_code)
9105 {
9106 u32 buffer_length;
9107 struct pqi_vendor_general_request request;
9108 struct pqi_host_memory *host_memory;
9109
9110 memset(&request, 0, sizeof(request));
9111
9112 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
9113 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
9114 put_unaligned_le16(function_code, &request.function_code);
9115
9116 host_memory = host_memory_descriptor->host_memory;
9117
9118 if (host_memory) {
9119 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
9120 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
9121 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
9122
9123 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
9124 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
9125 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
9126 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
9127 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
9128 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
9129 }
9130 }
9131
9132 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
9133 }
9134
9135 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
9136 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
9137 .status = SAM_STAT_CHECK_CONDITION,
9138 };
9139
pqi_fail_all_outstanding_requests(struct pqi_ctrl_info * ctrl_info)9140 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
9141 {
9142 unsigned int i;
9143 struct pqi_io_request *io_request;
9144 struct scsi_cmnd *scmd;
9145 struct scsi_device *sdev;
9146
9147 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9148 io_request = &ctrl_info->io_request_pool[i];
9149 if (atomic_read(&io_request->refcount) == 0)
9150 continue;
9151
9152 scmd = io_request->scmd;
9153 if (scmd) {
9154 sdev = scmd->device;
9155 if (!sdev || !scsi_device_online(sdev)) {
9156 pqi_free_io_request(io_request);
9157 continue;
9158 } else {
9159 set_host_byte(scmd, DID_NO_CONNECT);
9160 }
9161 } else {
9162 io_request->status = -ENXIO;
9163 io_request->error_info =
9164 &pqi_ctrl_offline_raid_error_info;
9165 }
9166
9167 io_request->io_complete_callback(io_request,
9168 io_request->context);
9169 }
9170 }
9171
pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info * ctrl_info)9172 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
9173 {
9174 pqi_perform_lockup_action();
9175 pqi_stop_heartbeat_timer(ctrl_info);
9176 pqi_free_interrupts(ctrl_info);
9177 pqi_cancel_rescan_worker(ctrl_info);
9178 pqi_cancel_update_time_worker(ctrl_info);
9179 pqi_ctrl_wait_until_quiesced(ctrl_info);
9180 pqi_fail_all_outstanding_requests(ctrl_info);
9181 pqi_ctrl_unblock_requests(ctrl_info);
9182 pqi_take_ctrl_devices_offline(ctrl_info);
9183 }
9184
pqi_ctrl_offline_worker(struct work_struct * work)9185 static void pqi_ctrl_offline_worker(struct work_struct *work)
9186 {
9187 struct pqi_ctrl_info *ctrl_info;
9188
9189 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9190 pqi_take_ctrl_offline_deferred(ctrl_info);
9191 }
9192
pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9193 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9194 {
9195 char *string;
9196
9197 switch (ctrl_shutdown_reason) {
9198 case PQI_IQ_NOT_DRAINED_TIMEOUT:
9199 string = "inbound queue not drained timeout";
9200 break;
9201 case PQI_LUN_RESET_TIMEOUT:
9202 string = "LUN reset timeout";
9203 break;
9204 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9205 string = "I/O pending timeout after LUN reset";
9206 break;
9207 case PQI_NO_HEARTBEAT:
9208 string = "no controller heartbeat detected";
9209 break;
9210 case PQI_FIRMWARE_KERNEL_NOT_UP:
9211 string = "firmware kernel not ready";
9212 break;
9213 case PQI_OFA_RESPONSE_TIMEOUT:
9214 string = "OFA response timeout";
9215 break;
9216 case PQI_INVALID_REQ_ID:
9217 string = "invalid request ID";
9218 break;
9219 case PQI_UNMATCHED_REQ_ID:
9220 string = "unmatched request ID";
9221 break;
9222 case PQI_IO_PI_OUT_OF_RANGE:
9223 string = "I/O queue producer index out of range";
9224 break;
9225 case PQI_EVENT_PI_OUT_OF_RANGE:
9226 string = "event queue producer index out of range";
9227 break;
9228 case PQI_UNEXPECTED_IU_TYPE:
9229 string = "unexpected IU type";
9230 break;
9231 default:
9232 string = "unknown reason";
9233 break;
9234 }
9235
9236 return string;
9237 }
9238
pqi_take_ctrl_offline(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9239 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9240 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9241 {
9242 if (!ctrl_info->controller_online)
9243 return;
9244
9245 ctrl_info->controller_online = false;
9246 ctrl_info->pqi_mode_enabled = false;
9247 pqi_ctrl_block_requests(ctrl_info);
9248 if (!pqi_disable_ctrl_shutdown)
9249 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9250 pci_disable_device(ctrl_info->pci_dev);
9251 dev_err(&ctrl_info->pci_dev->dev,
9252 "controller offline: reason code 0x%x (%s)\n",
9253 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9254 schedule_work(&ctrl_info->ctrl_offline_work);
9255 }
9256
pqi_take_ctrl_devices_offline(struct pqi_ctrl_info * ctrl_info)9257 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info)
9258 {
9259 int rc;
9260 unsigned long flags;
9261 struct pqi_scsi_dev *device;
9262
9263 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
9264 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
9265 rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list);
9266 if (rc)
9267 continue;
9268
9269 /*
9270 * Is the sdev pointer NULL?
9271 */
9272 if (device->sdev)
9273 scsi_device_set_state(device->sdev, SDEV_OFFLINE);
9274 }
9275 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
9276 }
9277
pqi_print_ctrl_info(struct pci_dev * pci_dev,const struct pci_device_id * id)9278 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9279 const struct pci_device_id *id)
9280 {
9281 char *ctrl_description;
9282
9283 if (id->driver_data)
9284 ctrl_description = (char *)id->driver_data;
9285 else
9286 ctrl_description = "Microchip Smart Family Controller";
9287
9288 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9289 }
9290
pqi_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)9291 static int pqi_pci_probe(struct pci_dev *pci_dev,
9292 const struct pci_device_id *id)
9293 {
9294 int rc;
9295 int node;
9296 struct pqi_ctrl_info *ctrl_info;
9297
9298 pqi_print_ctrl_info(pci_dev, id);
9299
9300 if (pqi_disable_device_id_wildcards &&
9301 id->subvendor == PCI_ANY_ID &&
9302 id->subdevice == PCI_ANY_ID) {
9303 dev_warn(&pci_dev->dev,
9304 "controller not probed because device ID wildcards are disabled\n");
9305 return -ENODEV;
9306 }
9307
9308 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9309 dev_warn(&pci_dev->dev,
9310 "controller device ID matched using wildcards\n");
9311
9312 node = dev_to_node(&pci_dev->dev);
9313 if (node == NUMA_NO_NODE) {
9314 node = cpu_to_node(0);
9315 if (node == NUMA_NO_NODE)
9316 node = 0;
9317 set_dev_node(&pci_dev->dev, node);
9318 }
9319
9320 ctrl_info = pqi_alloc_ctrl_info(node);
9321 if (!ctrl_info) {
9322 dev_err(&pci_dev->dev,
9323 "failed to allocate controller info block\n");
9324 return -ENOMEM;
9325 }
9326 ctrl_info->numa_node = node;
9327
9328 ctrl_info->pci_dev = pci_dev;
9329
9330 rc = pqi_pci_init(ctrl_info);
9331 if (rc)
9332 goto error;
9333
9334 rc = pqi_ctrl_init(ctrl_info);
9335 if (rc)
9336 goto error;
9337
9338 return 0;
9339
9340 error:
9341 pqi_remove_ctrl(ctrl_info);
9342
9343 return rc;
9344 }
9345
pqi_pci_remove(struct pci_dev * pci_dev)9346 static void pqi_pci_remove(struct pci_dev *pci_dev)
9347 {
9348 struct pqi_ctrl_info *ctrl_info;
9349 u16 vendor_id;
9350 int rc;
9351
9352 ctrl_info = pci_get_drvdata(pci_dev);
9353 if (!ctrl_info)
9354 return;
9355
9356 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9357 if (vendor_id == 0xffff)
9358 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9359 else
9360 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9361
9362 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9363 rc = pqi_flush_cache(ctrl_info, RESTART);
9364 if (rc)
9365 dev_err(&pci_dev->dev,
9366 "unable to flush controller cache during remove\n");
9367 }
9368
9369 pqi_remove_ctrl(ctrl_info);
9370 }
9371
pqi_crash_if_pending_command(struct pqi_ctrl_info * ctrl_info)9372 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9373 {
9374 unsigned int i;
9375 struct pqi_io_request *io_request;
9376 struct scsi_cmnd *scmd;
9377
9378 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9379 io_request = &ctrl_info->io_request_pool[i];
9380 if (atomic_read(&io_request->refcount) == 0)
9381 continue;
9382 scmd = io_request->scmd;
9383 WARN_ON(scmd != NULL); /* IO command from SML */
9384 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9385 }
9386 }
9387
pqi_shutdown(struct pci_dev * pci_dev)9388 static void pqi_shutdown(struct pci_dev *pci_dev)
9389 {
9390 int rc;
9391 struct pqi_ctrl_info *ctrl_info;
9392 enum bmic_flush_cache_shutdown_event shutdown_event;
9393
9394 ctrl_info = pci_get_drvdata(pci_dev);
9395 if (!ctrl_info) {
9396 dev_err(&pci_dev->dev,
9397 "cache could not be flushed\n");
9398 return;
9399 }
9400
9401 pqi_wait_until_ofa_finished(ctrl_info);
9402
9403 pqi_scsi_block_requests(ctrl_info);
9404 pqi_ctrl_block_device_reset(ctrl_info);
9405 pqi_ctrl_block_requests(ctrl_info);
9406 pqi_ctrl_wait_until_quiesced(ctrl_info);
9407
9408 if (system_state == SYSTEM_RESTART)
9409 shutdown_event = RESTART;
9410 else
9411 shutdown_event = SHUTDOWN;
9412
9413 /*
9414 * Write all data in the controller's battery-backed cache to
9415 * storage.
9416 */
9417 rc = pqi_flush_cache(ctrl_info, shutdown_event);
9418 if (rc)
9419 dev_err(&pci_dev->dev,
9420 "unable to flush controller cache during shutdown\n");
9421
9422 pqi_crash_if_pending_command(ctrl_info);
9423 pqi_reset(ctrl_info);
9424 }
9425
pqi_process_lockup_action_param(void)9426 static void pqi_process_lockup_action_param(void)
9427 {
9428 unsigned int i;
9429
9430 if (!pqi_lockup_action_param)
9431 return;
9432
9433 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9434 if (strcmp(pqi_lockup_action_param,
9435 pqi_lockup_actions[i].name) == 0) {
9436 pqi_lockup_action = pqi_lockup_actions[i].action;
9437 return;
9438 }
9439 }
9440
9441 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9442 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9443 }
9444
9445 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9446 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9447
pqi_process_ctrl_ready_timeout_param(void)9448 static void pqi_process_ctrl_ready_timeout_param(void)
9449 {
9450 if (pqi_ctrl_ready_timeout_secs == 0)
9451 return;
9452
9453 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9454 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9455 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9456 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9457 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9458 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9459 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9460 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9461 }
9462
9463 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9464 }
9465
pqi_process_module_params(void)9466 static void pqi_process_module_params(void)
9467 {
9468 pqi_process_lockup_action_param();
9469 pqi_process_ctrl_ready_timeout_param();
9470 }
9471
9472 #if defined(CONFIG_PM)
9473
pqi_get_flush_cache_shutdown_event(struct pci_dev * pci_dev)9474 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9475 {
9476 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9477 return RESTART;
9478
9479 return SUSPEND;
9480 }
9481
pqi_suspend_or_freeze(struct device * dev,bool suspend)9482 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9483 {
9484 struct pci_dev *pci_dev;
9485 struct pqi_ctrl_info *ctrl_info;
9486
9487 pci_dev = to_pci_dev(dev);
9488 ctrl_info = pci_get_drvdata(pci_dev);
9489
9490 pqi_wait_until_ofa_finished(ctrl_info);
9491
9492 pqi_ctrl_block_scan(ctrl_info);
9493 pqi_scsi_block_requests(ctrl_info);
9494 pqi_ctrl_block_device_reset(ctrl_info);
9495 pqi_ctrl_block_requests(ctrl_info);
9496 pqi_ctrl_wait_until_quiesced(ctrl_info);
9497
9498 if (suspend) {
9499 enum bmic_flush_cache_shutdown_event shutdown_event;
9500
9501 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9502 pqi_flush_cache(ctrl_info, shutdown_event);
9503 }
9504
9505 pqi_stop_heartbeat_timer(ctrl_info);
9506 pqi_crash_if_pending_command(ctrl_info);
9507 pqi_free_irqs(ctrl_info);
9508
9509 ctrl_info->controller_online = false;
9510 ctrl_info->pqi_mode_enabled = false;
9511
9512 return 0;
9513 }
9514
pqi_suspend(struct device * dev)9515 static __maybe_unused int pqi_suspend(struct device *dev)
9516 {
9517 return pqi_suspend_or_freeze(dev, true);
9518 }
9519
pqi_resume_or_restore(struct device * dev)9520 static int pqi_resume_or_restore(struct device *dev)
9521 {
9522 int rc;
9523 struct pci_dev *pci_dev;
9524 struct pqi_ctrl_info *ctrl_info;
9525
9526 pci_dev = to_pci_dev(dev);
9527 ctrl_info = pci_get_drvdata(pci_dev);
9528
9529 rc = pqi_request_irqs(ctrl_info);
9530 if (rc)
9531 return rc;
9532
9533 pqi_ctrl_unblock_device_reset(ctrl_info);
9534 pqi_ctrl_unblock_requests(ctrl_info);
9535 pqi_scsi_unblock_requests(ctrl_info);
9536 pqi_ctrl_unblock_scan(ctrl_info);
9537
9538 ssleep(PQI_POST_RESET_DELAY_SECS);
9539
9540 return pqi_ctrl_init_resume(ctrl_info);
9541 }
9542
pqi_freeze(struct device * dev)9543 static int pqi_freeze(struct device *dev)
9544 {
9545 return pqi_suspend_or_freeze(dev, false);
9546 }
9547
pqi_thaw(struct device * dev)9548 static int pqi_thaw(struct device *dev)
9549 {
9550 int rc;
9551 struct pci_dev *pci_dev;
9552 struct pqi_ctrl_info *ctrl_info;
9553
9554 pci_dev = to_pci_dev(dev);
9555 ctrl_info = pci_get_drvdata(pci_dev);
9556
9557 rc = pqi_request_irqs(ctrl_info);
9558 if (rc)
9559 return rc;
9560
9561 ctrl_info->controller_online = true;
9562 ctrl_info->pqi_mode_enabled = true;
9563
9564 pqi_ctrl_unblock_device_reset(ctrl_info);
9565 pqi_ctrl_unblock_requests(ctrl_info);
9566 pqi_scsi_unblock_requests(ctrl_info);
9567 pqi_ctrl_unblock_scan(ctrl_info);
9568
9569 return 0;
9570 }
9571
pqi_poweroff(struct device * dev)9572 static int pqi_poweroff(struct device *dev)
9573 {
9574 struct pci_dev *pci_dev;
9575 struct pqi_ctrl_info *ctrl_info;
9576 enum bmic_flush_cache_shutdown_event shutdown_event;
9577
9578 pci_dev = to_pci_dev(dev);
9579 ctrl_info = pci_get_drvdata(pci_dev);
9580
9581 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9582 pqi_flush_cache(ctrl_info, shutdown_event);
9583
9584 return 0;
9585 }
9586
9587 static const struct dev_pm_ops pqi_pm_ops = {
9588 .suspend = pqi_suspend,
9589 .resume = pqi_resume_or_restore,
9590 .freeze = pqi_freeze,
9591 .thaw = pqi_thaw,
9592 .poweroff = pqi_poweroff,
9593 .restore = pqi_resume_or_restore,
9594 };
9595
9596 #endif /* CONFIG_PM */
9597
9598 /* Define the PCI IDs for the controllers that we support. */
9599 static const struct pci_device_id pqi_pci_id_table[] = {
9600 {
9601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9602 0x105b, 0x1211)
9603 },
9604 {
9605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9606 0x105b, 0x1321)
9607 },
9608 {
9609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9610 0x152d, 0x8a22)
9611 },
9612 {
9613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9614 0x152d, 0x8a23)
9615 },
9616 {
9617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9618 0x152d, 0x8a24)
9619 },
9620 {
9621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9622 0x152d, 0x8a36)
9623 },
9624 {
9625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9626 0x152d, 0x8a37)
9627 },
9628 {
9629 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9630 0x193d, 0x0462)
9631 },
9632 {
9633 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9634 0x193d, 0x1104)
9635 },
9636 {
9637 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9638 0x193d, 0x1105)
9639 },
9640 {
9641 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9642 0x193d, 0x1106)
9643 },
9644 {
9645 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9646 0x193d, 0x1107)
9647 },
9648 {
9649 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9650 0x193d, 0x1108)
9651 },
9652 {
9653 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9654 0x193d, 0x1109)
9655 },
9656 {
9657 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9658 0x193d, 0x110b)
9659 },
9660 {
9661 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9662 0x193d, 0x1110)
9663 },
9664 {
9665 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9666 0x193d, 0x8460)
9667 },
9668 {
9669 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9670 0x193d, 0x8461)
9671 },
9672 {
9673 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9674 0x193d, 0x8462)
9675 },
9676 {
9677 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9678 0x193d, 0xc460)
9679 },
9680 {
9681 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9682 0x193d, 0xc461)
9683 },
9684 {
9685 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9686 0x193d, 0xf460)
9687 },
9688 {
9689 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9690 0x193d, 0xf461)
9691 },
9692 {
9693 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9694 0x1bd4, 0x0045)
9695 },
9696 {
9697 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9698 0x1bd4, 0x0046)
9699 },
9700 {
9701 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9702 0x1bd4, 0x0047)
9703 },
9704 {
9705 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9706 0x1bd4, 0x0048)
9707 },
9708 {
9709 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9710 0x1bd4, 0x004a)
9711 },
9712 {
9713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9714 0x1bd4, 0x004b)
9715 },
9716 {
9717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9718 0x1bd4, 0x004c)
9719 },
9720 {
9721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9722 0x1bd4, 0x004f)
9723 },
9724 {
9725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9726 0x1bd4, 0x0051)
9727 },
9728 {
9729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9730 0x1bd4, 0x0052)
9731 },
9732 {
9733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9734 0x1bd4, 0x0053)
9735 },
9736 {
9737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9738 0x1bd4, 0x0054)
9739 },
9740 {
9741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9742 0x1bd4, 0x006b)
9743 },
9744 {
9745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9746 0x1bd4, 0x006c)
9747 },
9748 {
9749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9750 0x1bd4, 0x006d)
9751 },
9752 {
9753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9754 0x1bd4, 0x006f)
9755 },
9756 {
9757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9758 0x1bd4, 0x0070)
9759 },
9760 {
9761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9762 0x1bd4, 0x0071)
9763 },
9764 {
9765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9766 0x1bd4, 0x0072)
9767 },
9768 {
9769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9770 0x1bd4, 0x0086)
9771 },
9772 {
9773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9774 0x1bd4, 0x0087)
9775 },
9776 {
9777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9778 0x1bd4, 0x0088)
9779 },
9780 {
9781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9782 0x1bd4, 0x0089)
9783 },
9784 {
9785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9786 0x1bd4, 0x00a3)
9787 },
9788 {
9789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9790 0x1ff9, 0x00a1)
9791 },
9792 {
9793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9794 0x1f3a, 0x0104)
9795 },
9796 {
9797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9798 0x19e5, 0xd227)
9799 },
9800 {
9801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9802 0x19e5, 0xd228)
9803 },
9804 {
9805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9806 0x19e5, 0xd229)
9807 },
9808 {
9809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9810 0x19e5, 0xd22a)
9811 },
9812 {
9813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9814 0x19e5, 0xd22b)
9815 },
9816 {
9817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9818 0x19e5, 0xd22c)
9819 },
9820 {
9821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9822 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9823 },
9824 {
9825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9826 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9827 },
9828 {
9829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9830 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9831 },
9832 {
9833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9834 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9835 },
9836 {
9837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9838 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9839 },
9840 {
9841 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9842 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9843 },
9844 {
9845 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9846 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9847 },
9848 {
9849 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9850 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9851 },
9852 {
9853 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9854 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9855 },
9856 {
9857 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9858 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9859 },
9860 {
9861 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9862 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9863 },
9864 {
9865 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9866 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9867 },
9868 {
9869 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9870 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9871 },
9872 {
9873 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9874 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9875 },
9876 {
9877 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9878 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9879 },
9880 {
9881 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9882 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9883 },
9884 {
9885 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9886 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9887 },
9888 {
9889 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9890 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9891 },
9892 {
9893 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9894 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9895 },
9896 {
9897 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9898 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9899 },
9900 {
9901 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9902 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9903 },
9904 {
9905 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9906 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9907 },
9908 {
9909 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9910 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9911 },
9912 {
9913 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9914 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9915 },
9916 {
9917 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9918 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9919 },
9920 {
9921 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9922 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9923 },
9924 {
9925 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9926 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9927 },
9928 {
9929 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9930 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9931 },
9932 {
9933 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9934 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9935 },
9936 {
9937 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9938 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9939 },
9940 {
9941 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9942 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9943 },
9944 {
9945 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9946 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9947 },
9948 {
9949 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9950 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9951 },
9952 {
9953 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9954 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9955 },
9956 {
9957 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9958 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9959 },
9960 {
9961 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9962 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9963 },
9964 {
9965 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9966 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9967 },
9968 {
9969 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9970 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9971 },
9972 {
9973 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9974 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9975 },
9976 {
9977 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9978 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9979 },
9980 {
9981 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9982 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9983 },
9984 {
9985 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9986 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9987 },
9988 {
9989 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9990 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9991 },
9992 {
9993 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9994 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9995 },
9996 {
9997 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9998 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9999 },
10000 {
10001 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10002 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
10003 },
10004 {
10005 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10006 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
10007 },
10008 {
10009 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10010 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
10011 },
10012 {
10013 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10014 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
10015 },
10016 {
10017 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10018 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
10019 },
10020 {
10021 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10022 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
10023 },
10024 {
10025 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10026 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
10027 },
10028 {
10029 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10030 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
10031 },
10032 {
10033 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10034 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
10035 },
10036 {
10037 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10038 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
10039 },
10040 {
10041 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10042 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
10043 },
10044 {
10045 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10046 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
10047 },
10048 {
10049 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10050 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
10051 },
10052 {
10053 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10054 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
10055 },
10056 {
10057 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10058 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
10059 },
10060 {
10061 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10062 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
10063 },
10064 {
10065 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10066 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
10067 },
10068 {
10069 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10070 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
10071 },
10072 {
10073 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10074 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
10075 },
10076 {
10077 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10078 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
10079 },
10080 {
10081 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10082 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
10083 },
10084 {
10085 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10086 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
10087 },
10088 {
10089 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10090 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
10091 },
10092 {
10093 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10094 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
10095 },
10096 {
10097 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10098 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
10099 },
10100 {
10101 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10102 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
10103 },
10104 {
10105 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10106 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
10107 },
10108 {
10109 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10110 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
10111 },
10112 {
10113 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10114 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
10115 },
10116 {
10117 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10118 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
10119 },
10120 {
10121 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10122 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
10123 },
10124 {
10125 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10126 0x207d, 0x4044)
10127 },
10128 {
10129 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10130 0x207d, 0x4054)
10131 },
10132 {
10133 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10134 0x207d, 0x4084)
10135 },
10136 {
10137 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10138 0x207d, 0x4094)
10139 },
10140 {
10141 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10142 0x207d, 0x4140)
10143 },
10144 {
10145 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10146 0x207d, 0x4240)
10147 },
10148 {
10149 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10150 0x207d, 0x4840)
10151 },
10152 {
10153 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10154 PCI_VENDOR_ID_ADVANTECH, 0x8312)
10155 },
10156 {
10157 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10158 PCI_VENDOR_ID_DELL, 0x1fe0)
10159 },
10160 {
10161 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10162 PCI_VENDOR_ID_HP, 0x0600)
10163 },
10164 {
10165 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10166 PCI_VENDOR_ID_HP, 0x0601)
10167 },
10168 {
10169 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10170 PCI_VENDOR_ID_HP, 0x0602)
10171 },
10172 {
10173 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10174 PCI_VENDOR_ID_HP, 0x0603)
10175 },
10176 {
10177 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10178 PCI_VENDOR_ID_HP, 0x0609)
10179 },
10180 {
10181 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10182 PCI_VENDOR_ID_HP, 0x0650)
10183 },
10184 {
10185 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10186 PCI_VENDOR_ID_HP, 0x0651)
10187 },
10188 {
10189 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10190 PCI_VENDOR_ID_HP, 0x0652)
10191 },
10192 {
10193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10194 PCI_VENDOR_ID_HP, 0x0653)
10195 },
10196 {
10197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10198 PCI_VENDOR_ID_HP, 0x0654)
10199 },
10200 {
10201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10202 PCI_VENDOR_ID_HP, 0x0655)
10203 },
10204 {
10205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10206 PCI_VENDOR_ID_HP, 0x0700)
10207 },
10208 {
10209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10210 PCI_VENDOR_ID_HP, 0x0701)
10211 },
10212 {
10213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10214 PCI_VENDOR_ID_HP, 0x1001)
10215 },
10216 {
10217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10218 PCI_VENDOR_ID_HP, 0x1002)
10219 },
10220 {
10221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10222 PCI_VENDOR_ID_HP, 0x1100)
10223 },
10224 {
10225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10226 PCI_VENDOR_ID_HP, 0x1101)
10227 },
10228 {
10229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10230 0x1590, 0x0294)
10231 },
10232 {
10233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10234 0x1590, 0x02db)
10235 },
10236 {
10237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10238 0x1590, 0x02dc)
10239 },
10240 {
10241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10242 0x1590, 0x032e)
10243 },
10244 {
10245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10246 0x1590, 0x036f)
10247 },
10248 {
10249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10250 0x1590, 0x0381)
10251 },
10252 {
10253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10254 0x1590, 0x0382)
10255 },
10256 {
10257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10258 0x1590, 0x0383)
10259 },
10260 {
10261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10262 0x1d8d, 0x0800)
10263 },
10264 {
10265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10266 0x1d8d, 0x0908)
10267 },
10268 {
10269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10270 0x1d8d, 0x0806)
10271 },
10272 {
10273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10274 0x1d8d, 0x0916)
10275 },
10276 {
10277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10278 PCI_VENDOR_ID_GIGABYTE, 0x1000)
10279 },
10280 {
10281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10282 0x1dfc, 0x3161)
10283 },
10284 {
10285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10286 0x1f0c, 0x3161)
10287 },
10288 {
10289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10290 0x1cf2, 0x0804)
10291 },
10292 {
10293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10294 0x1cf2, 0x0805)
10295 },
10296 {
10297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10298 0x1cf2, 0x0806)
10299 },
10300 {
10301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10302 0x1cf2, 0x5445)
10303 },
10304 {
10305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10306 0x1cf2, 0x5446)
10307 },
10308 {
10309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10310 0x1cf2, 0x5447)
10311 },
10312 {
10313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10314 0x1cf2, 0x5449)
10315 },
10316 {
10317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10318 0x1cf2, 0x544a)
10319 },
10320 {
10321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10322 0x1cf2, 0x544b)
10323 },
10324 {
10325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10326 0x1cf2, 0x544d)
10327 },
10328 {
10329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10330 0x1cf2, 0x544e)
10331 },
10332 {
10333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10334 0x1cf2, 0x544f)
10335 },
10336 {
10337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10338 0x1cf2, 0x54da)
10339 },
10340 {
10341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10342 0x1cf2, 0x54db)
10343 },
10344 {
10345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10346 0x1cf2, 0x54dc)
10347 },
10348 {
10349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10350 0x1cf2, 0x0b27)
10351 },
10352 {
10353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10354 0x1cf2, 0x0b29)
10355 },
10356 {
10357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10358 0x1cf2, 0x0b45)
10359 },
10360 {
10361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10362 0x1cc4, 0x0101)
10363 },
10364 {
10365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10366 0x1cc4, 0x0201)
10367 },
10368 {
10369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10370 0x1018, 0x8238)
10371 },
10372 {
10373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10374 0x1f3f, 0x0610)
10375 },
10376 {
10377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10378 PCI_VENDOR_ID_LENOVO, 0x0220)
10379 },
10380 {
10381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10382 PCI_VENDOR_ID_LENOVO, 0x0221)
10383 },
10384 {
10385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10386 PCI_VENDOR_ID_LENOVO, 0x0222)
10387 },
10388 {
10389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10390 PCI_VENDOR_ID_LENOVO, 0x0223)
10391 },
10392 {
10393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10394 PCI_VENDOR_ID_LENOVO, 0x0224)
10395 },
10396 {
10397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10398 PCI_VENDOR_ID_LENOVO, 0x0225)
10399 },
10400 {
10401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10402 PCI_VENDOR_ID_LENOVO, 0x0520)
10403 },
10404 {
10405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10406 PCI_VENDOR_ID_LENOVO, 0x0521)
10407 },
10408 {
10409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10410 PCI_VENDOR_ID_LENOVO, 0x0522)
10411 },
10412 {
10413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10414 PCI_VENDOR_ID_LENOVO, 0x0620)
10415 },
10416 {
10417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10418 PCI_VENDOR_ID_LENOVO, 0x0621)
10419 },
10420 {
10421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10422 PCI_VENDOR_ID_LENOVO, 0x0622)
10423 },
10424 {
10425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10426 PCI_VENDOR_ID_LENOVO, 0x0623)
10427 },
10428 {
10429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10430 PCI_VENDOR_ID_LENOVO, 0x0624)
10431 },
10432 {
10433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10434 PCI_VENDOR_ID_LENOVO, 0x0625)
10435 },
10436 {
10437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10438 PCI_VENDOR_ID_LENOVO, 0x0626)
10439 },
10440 {
10441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10442 PCI_VENDOR_ID_LENOVO, 0x0627)
10443 },
10444 {
10445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10446 PCI_VENDOR_ID_LENOVO, 0x0628)
10447 },
10448 {
10449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10450 0x1014, 0x0718)
10451 },
10452 {
10453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10454 0x1137, 0x02f8)
10455 },
10456 {
10457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10458 0x1137, 0x02f9)
10459 },
10460 {
10461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10462 0x1137, 0x02fa)
10463 },
10464 {
10465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10466 0x1137, 0x02fe)
10467 },
10468 {
10469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10470 0x1137, 0x02ff)
10471 },
10472 {
10473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10474 0x1137, 0x0300)
10475 },
10476 {
10477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10478 0x1ded, 0x3301)
10479 },
10480 {
10481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10482 0x1ff9, 0x0045)
10483 },
10484 {
10485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10486 0x1ff9, 0x0046)
10487 },
10488 {
10489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10490 0x1ff9, 0x0047)
10491 },
10492 {
10493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10494 0x1ff9, 0x0048)
10495 },
10496 {
10497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10498 0x1ff9, 0x004a)
10499 },
10500 {
10501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10502 0x1ff9, 0x004b)
10503 },
10504 {
10505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10506 0x1ff9, 0x004c)
10507 },
10508 {
10509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10510 0x1ff9, 0x004f)
10511 },
10512 {
10513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10514 0x1ff9, 0x0051)
10515 },
10516 {
10517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10518 0x1ff9, 0x0052)
10519 },
10520 {
10521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10522 0x1ff9, 0x0053)
10523 },
10524 {
10525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10526 0x1ff9, 0x0054)
10527 },
10528 {
10529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10530 0x1ff9, 0x006b)
10531 },
10532 {
10533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10534 0x1ff9, 0x006c)
10535 },
10536 {
10537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10538 0x1ff9, 0x006d)
10539 },
10540 {
10541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10542 0x1ff9, 0x006f)
10543 },
10544 {
10545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10546 0x1ff9, 0x0070)
10547 },
10548 {
10549 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10550 0x1ff9, 0x0071)
10551 },
10552 {
10553 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10554 0x1ff9, 0x0072)
10555 },
10556 {
10557 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10558 0x1ff9, 0x0086)
10559 },
10560 {
10561 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10562 0x1ff9, 0x0087)
10563 },
10564 {
10565 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10566 0x1ff9, 0x0088)
10567 },
10568 {
10569 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10570 0x1ff9, 0x0089)
10571 },
10572 {
10573 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10574 0x1e93, 0x1000)
10575 },
10576 {
10577 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10578 0x1e93, 0x1001)
10579 },
10580 {
10581 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10582 0x1e93, 0x1002)
10583 },
10584 {
10585 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10586 0x1e93, 0x1005)
10587 },
10588 {
10589 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10590 0x1f51, 0x1001)
10591 },
10592 {
10593 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10594 0x1f51, 0x1002)
10595 },
10596 {
10597 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10598 0x1f51, 0x1003)
10599 },
10600 {
10601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10602 0x1f51, 0x1004)
10603 },
10604 {
10605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10606 0x1f51, 0x1005)
10607 },
10608 {
10609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10610 0x1f51, 0x1006)
10611 },
10612 {
10613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10614 0x1f51, 0x1007)
10615 },
10616 {
10617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10618 0x1f51, 0x1008)
10619 },
10620 {
10621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10622 0x1f51, 0x1009)
10623 },
10624 {
10625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10626 0x1f51, 0x100a)
10627 },
10628 {
10629 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10630 0x1f51, 0x100b)
10631 },
10632 {
10633 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10634 0x1f51, 0x100e)
10635 },
10636 {
10637 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10638 0x1f51, 0x100f)
10639 },
10640 {
10641 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10642 0x1f51, 0x1010)
10643 },
10644 {
10645 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10646 0x1f51, 0x1011)
10647 },
10648 {
10649 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10650 0x1f51, 0x1043)
10651 },
10652 {
10653 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10654 0x1f51, 0x1044)
10655 },
10656 {
10657 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10658 0x1f51, 0x1045)
10659 },
10660 {
10661 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10662 0x1ff9, 0x00a3)
10663 },
10664 {
10665 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10666 PCI_ANY_ID, PCI_ANY_ID)
10667 },
10668 { 0 }
10669 };
10670
10671 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10672
10673 static struct pci_driver pqi_pci_driver = {
10674 .name = DRIVER_NAME_SHORT,
10675 .id_table = pqi_pci_id_table,
10676 .probe = pqi_pci_probe,
10677 .remove = pqi_pci_remove,
10678 .shutdown = pqi_shutdown,
10679 #if defined(CONFIG_PM)
10680 .driver = {
10681 .pm = &pqi_pm_ops
10682 },
10683 #endif
10684 };
10685
pqi_init(void)10686 static int __init pqi_init(void)
10687 {
10688 int rc;
10689
10690 pr_info(DRIVER_NAME "\n");
10691 pqi_verify_structures();
10692 sis_verify_structures();
10693
10694 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10695 if (!pqi_sas_transport_template)
10696 return -ENODEV;
10697
10698 pqi_process_module_params();
10699
10700 rc = pci_register_driver(&pqi_pci_driver);
10701 if (rc)
10702 sas_release_transport(pqi_sas_transport_template);
10703
10704 return rc;
10705 }
10706
pqi_cleanup(void)10707 static void __exit pqi_cleanup(void)
10708 {
10709 pci_unregister_driver(&pqi_pci_driver);
10710 sas_release_transport(pqi_sas_transport_template);
10711 }
10712
10713 module_init(pqi_init);
10714 module_exit(pqi_cleanup);
10715
pqi_verify_structures(void)10716 static void pqi_verify_structures(void)
10717 {
10718 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10719 sis_host_to_ctrl_doorbell) != 0x20);
10720 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10721 sis_interrupt_mask) != 0x34);
10722 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10723 sis_ctrl_to_host_doorbell) != 0x9c);
10724 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10725 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10726 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10727 sis_driver_scratch) != 0xb0);
10728 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10729 sis_product_identifier) != 0xb4);
10730 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10731 sis_firmware_status) != 0xbc);
10732 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10733 sis_ctrl_shutdown_reason_code) != 0xcc);
10734 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10735 sis_mailbox) != 0x1000);
10736 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10737 pqi_registers) != 0x4000);
10738
10739 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10740 iu_type) != 0x0);
10741 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10742 iu_length) != 0x2);
10743 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10744 response_queue_id) != 0x4);
10745 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10746 driver_flags) != 0x6);
10747 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10748
10749 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10750 status) != 0x0);
10751 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10752 service_response) != 0x1);
10753 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10754 data_present) != 0x2);
10755 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10756 reserved) != 0x3);
10757 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10758 residual_count) != 0x4);
10759 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10760 data_length) != 0x8);
10761 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10762 reserved1) != 0xa);
10763 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10764 data) != 0xc);
10765 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10766
10767 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10768 data_in_result) != 0x0);
10769 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10770 data_out_result) != 0x1);
10771 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10772 reserved) != 0x2);
10773 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10774 status) != 0x5);
10775 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10776 status_qualifier) != 0x6);
10777 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10778 sense_data_length) != 0x8);
10779 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10780 response_data_length) != 0xa);
10781 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10782 data_in_transferred) != 0xc);
10783 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10784 data_out_transferred) != 0x10);
10785 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10786 data) != 0x14);
10787 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10788
10789 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10790 signature) != 0x0);
10791 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10792 function_and_status_code) != 0x8);
10793 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10794 max_admin_iq_elements) != 0x10);
10795 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10796 max_admin_oq_elements) != 0x11);
10797 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10798 admin_iq_element_length) != 0x12);
10799 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10800 admin_oq_element_length) != 0x13);
10801 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10802 max_reset_timeout) != 0x14);
10803 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10804 legacy_intx_status) != 0x18);
10805 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10806 legacy_intx_mask_set) != 0x1c);
10807 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10808 legacy_intx_mask_clear) != 0x20);
10809 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10810 device_status) != 0x40);
10811 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10812 admin_iq_pi_offset) != 0x48);
10813 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10814 admin_oq_ci_offset) != 0x50);
10815 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10816 admin_iq_element_array_addr) != 0x58);
10817 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10818 admin_oq_element_array_addr) != 0x60);
10819 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10820 admin_iq_ci_addr) != 0x68);
10821 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10822 admin_oq_pi_addr) != 0x70);
10823 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10824 admin_iq_num_elements) != 0x78);
10825 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10826 admin_oq_num_elements) != 0x79);
10827 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10828 admin_queue_int_msg_num) != 0x7a);
10829 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10830 device_error) != 0x80);
10831 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10832 error_details) != 0x88);
10833 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10834 device_reset) != 0x90);
10835 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10836 power_action) != 0x94);
10837 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10838
10839 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10840 header.iu_type) != 0);
10841 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10842 header.iu_length) != 2);
10843 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10844 header.driver_flags) != 6);
10845 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10846 request_id) != 8);
10847 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10848 function_code) != 10);
10849 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10850 data.report_device_capability.buffer_length) != 44);
10851 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10852 data.report_device_capability.sg_descriptor) != 48);
10853 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10854 data.create_operational_iq.queue_id) != 12);
10855 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10856 data.create_operational_iq.element_array_addr) != 16);
10857 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10858 data.create_operational_iq.ci_addr) != 24);
10859 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10860 data.create_operational_iq.num_elements) != 32);
10861 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10862 data.create_operational_iq.element_length) != 34);
10863 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10864 data.create_operational_iq.queue_protocol) != 36);
10865 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10866 data.create_operational_oq.queue_id) != 12);
10867 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10868 data.create_operational_oq.element_array_addr) != 16);
10869 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10870 data.create_operational_oq.pi_addr) != 24);
10871 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10872 data.create_operational_oq.num_elements) != 32);
10873 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10874 data.create_operational_oq.element_length) != 34);
10875 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10876 data.create_operational_oq.queue_protocol) != 36);
10877 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10878 data.create_operational_oq.int_msg_num) != 40);
10879 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10880 data.create_operational_oq.coalescing_count) != 42);
10881 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10882 data.create_operational_oq.min_coalescing_time) != 44);
10883 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10884 data.create_operational_oq.max_coalescing_time) != 48);
10885 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10886 data.delete_operational_queue.queue_id) != 12);
10887 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10888 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10889 data.create_operational_iq) != 64 - 11);
10890 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10891 data.create_operational_oq) != 64 - 11);
10892 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10893 data.delete_operational_queue) != 64 - 11);
10894
10895 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10896 header.iu_type) != 0);
10897 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10898 header.iu_length) != 2);
10899 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10900 header.driver_flags) != 6);
10901 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10902 request_id) != 8);
10903 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10904 function_code) != 10);
10905 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10906 status) != 11);
10907 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10908 data.create_operational_iq.status_descriptor) != 12);
10909 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10910 data.create_operational_iq.iq_pi_offset) != 16);
10911 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10912 data.create_operational_oq.status_descriptor) != 12);
10913 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10914 data.create_operational_oq.oq_ci_offset) != 16);
10915 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10916
10917 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10918 header.iu_type) != 0);
10919 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10920 header.iu_length) != 2);
10921 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10922 header.response_queue_id) != 4);
10923 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10924 header.driver_flags) != 6);
10925 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10926 request_id) != 8);
10927 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10928 nexus_id) != 10);
10929 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10930 buffer_length) != 12);
10931 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10932 lun_number) != 16);
10933 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10934 protocol_specific) != 24);
10935 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10936 error_index) != 27);
10937 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10938 cdb) != 32);
10939 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10940 timeout) != 60);
10941 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10942 sg_descriptors) != 64);
10943 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10945
10946 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10947 header.iu_type) != 0);
10948 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10949 header.iu_length) != 2);
10950 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10951 header.response_queue_id) != 4);
10952 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10953 header.driver_flags) != 6);
10954 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10955 request_id) != 8);
10956 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10957 nexus_id) != 12);
10958 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10959 buffer_length) != 16);
10960 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10961 data_encryption_key_index) != 22);
10962 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10963 encrypt_tweak_lower) != 24);
10964 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10965 encrypt_tweak_upper) != 28);
10966 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10967 cdb) != 32);
10968 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10969 error_index) != 48);
10970 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10971 num_sg_descriptors) != 50);
10972 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10973 cdb_length) != 51);
10974 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10975 lun_number) != 52);
10976 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10977 sg_descriptors) != 64);
10978 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10979 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10980
10981 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10982 header.iu_type) != 0);
10983 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10984 header.iu_length) != 2);
10985 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10986 request_id) != 8);
10987 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10988 error_index) != 10);
10989
10990 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10991 header.iu_type) != 0);
10992 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10993 header.iu_length) != 2);
10994 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10995 header.response_queue_id) != 4);
10996 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10997 request_id) != 8);
10998 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10999 data.report_event_configuration.buffer_length) != 12);
11000 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
11001 data.report_event_configuration.sg_descriptors) != 16);
11002 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
11003 data.set_event_configuration.global_event_oq_id) != 10);
11004 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
11005 data.set_event_configuration.buffer_length) != 12);
11006 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
11007 data.set_event_configuration.sg_descriptors) != 16);
11008
11009 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
11010 max_inbound_iu_length) != 6);
11011 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
11012 max_outbound_iu_length) != 14);
11013 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
11014
11015 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11016 data_length) != 0);
11017 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11018 iq_arbitration_priority_support_bitmask) != 8);
11019 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11020 maximum_aw_a) != 9);
11021 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11022 maximum_aw_b) != 10);
11023 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11024 maximum_aw_c) != 11);
11025 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11026 max_inbound_queues) != 16);
11027 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11028 max_elements_per_iq) != 18);
11029 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11030 max_iq_element_length) != 24);
11031 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11032 min_iq_element_length) != 26);
11033 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11034 max_outbound_queues) != 30);
11035 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11036 max_elements_per_oq) != 32);
11037 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11038 intr_coalescing_time_granularity) != 34);
11039 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11040 max_oq_element_length) != 36);
11041 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11042 min_oq_element_length) != 38);
11043 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11044 iu_layer_descriptors) != 64);
11045 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
11046
11047 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
11048 event_type) != 0);
11049 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
11050 oq_id) != 2);
11051 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
11052
11053 BUILD_BUG_ON(offsetof(struct pqi_event_config,
11054 num_event_descriptors) != 2);
11055 BUILD_BUG_ON(offsetof(struct pqi_event_config,
11056 descriptors) != 4);
11057
11058 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
11059 ARRAY_SIZE(pqi_supported_event_types));
11060
11061 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11062 header.iu_type) != 0);
11063 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11064 header.iu_length) != 2);
11065 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11066 event_type) != 8);
11067 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11068 event_id) != 10);
11069 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11070 additional_event_id) != 12);
11071 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11072 data) != 16);
11073 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
11074
11075 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11076 header.iu_type) != 0);
11077 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11078 header.iu_length) != 2);
11079 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11080 event_type) != 8);
11081 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11082 event_id) != 10);
11083 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11084 additional_event_id) != 12);
11085 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
11086
11087 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11088 header.iu_type) != 0);
11089 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11090 header.iu_length) != 2);
11091 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11092 request_id) != 8);
11093 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11094 nexus_id) != 10);
11095 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11096 timeout) != 14);
11097 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11098 lun_number) != 16);
11099 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11100 protocol_specific) != 24);
11101 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11102 outbound_queue_id_to_manage) != 26);
11103 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11104 request_id_to_manage) != 28);
11105 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11106 task_management_function) != 30);
11107 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
11108
11109 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11110 header.iu_type) != 0);
11111 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11112 header.iu_length) != 2);
11113 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11114 request_id) != 8);
11115 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11116 nexus_id) != 10);
11117 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11118 additional_response_info) != 12);
11119 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11120 response_code) != 15);
11121 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
11122
11123 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11124 configured_logical_drive_count) != 0);
11125 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11126 configuration_signature) != 1);
11127 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11128 firmware_version_short) != 5);
11129 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11130 extended_logical_unit_count) != 154);
11131 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11132 firmware_build_number) != 190);
11133 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11134 vendor_id) != 200);
11135 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11136 product_id) != 208);
11137 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11138 extra_controller_flags) != 286);
11139 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11140 controller_mode) != 292);
11141 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11142 spare_part_number) != 293);
11143 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11144 firmware_version_long) != 325);
11145
11146 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11147 phys_bay_in_box) != 115);
11148 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11149 device_type) != 120);
11150 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11151 redundant_path_present_map) != 1736);
11152 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11153 active_path_number) != 1738);
11154 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11155 alternate_paths_phys_connector) != 1739);
11156 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11157 alternate_paths_phys_box_on_port) != 1755);
11158 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11159 current_queue_depth_limit) != 1796);
11160 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
11161
11162 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
11163 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11164 page_code) != 0);
11165 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11166 subpage_code) != 1);
11167 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11168 buffer_length) != 2);
11169
11170 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
11171 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11172 page_code) != 0);
11173 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11174 subpage_code) != 1);
11175 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11176 page_length) != 2);
11177
11178 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
11179 != 18);
11180 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11181 header) != 0);
11182 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11183 firmware_read_support) != 4);
11184 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11185 driver_read_support) != 5);
11186 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11187 firmware_write_support) != 6);
11188 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11189 driver_write_support) != 7);
11190 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11191 max_transfer_encrypted_sas_sata) != 8);
11192 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11193 max_transfer_encrypted_nvme) != 10);
11194 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11195 max_write_raid_5_6) != 12);
11196 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11197 max_write_raid_1_10_2drive) != 14);
11198 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11199 max_write_raid_1_10_3drive) != 16);
11200
11201 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
11202 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
11203 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
11204 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11205 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
11206 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11207 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
11208 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
11209 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11210 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
11211 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
11212 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11213
11214 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
11215 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
11216 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
11217 }
11218