1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
9 *
10 */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/crash_dump.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <linux/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION "2.1.34-035"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 34
40 #define DRIVER_REVISION 35
41
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
50
51 #define PQI_NO_COMPLETION ((void *)-1)
52
53 MODULE_AUTHOR("Microchip");
54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
55 DRIVER_VERSION);
56 MODULE_VERSION(DRIVER_VERSION);
57 MODULE_LICENSE("GPL");
58
59 struct pqi_cmd_priv {
60 int this_residual;
61 };
62
pqi_cmd_priv(struct scsi_cmnd * cmd)63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
64 {
65 return scsi_cmd_priv(cmd);
66 }
67
68 static void pqi_verify_structures(void);
69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
70 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
71 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
72 static void pqi_ctrl_offline_worker(struct work_struct *work);
73 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
74 static void pqi_scan_start(struct Scsi_Host *shost);
75 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_queue_group *queue_group, enum pqi_io_path path,
77 struct pqi_io_request *io_request);
78 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
79 struct pqi_iu_header *request, unsigned int flags,
80 struct pqi_raid_error_info *error_info);
81 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
82 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
83 unsigned int cdb_length, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
85 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
86 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
87 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
88 struct pqi_scsi_dev_raid_map_data *rmd);
89 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
90 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
91 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
92 struct pqi_scsi_dev_raid_map_data *rmd);
93 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
94 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
95 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
96 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
97 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
98 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
99 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
100 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
101 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
102 static void pqi_tmf_worker(struct work_struct *work);
103
104 /* for flags argument to pqi_submit_raid_request_synchronous() */
105 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
106
107 static struct scsi_transport_template *pqi_sas_transport_template;
108
109 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
110
111 enum pqi_lockup_action {
112 NONE,
113 REBOOT,
114 PANIC
115 };
116
117 static enum pqi_lockup_action pqi_lockup_action = NONE;
118
119 static struct {
120 enum pqi_lockup_action action;
121 char *name;
122 } pqi_lockup_actions[] = {
123 {
124 .action = NONE,
125 .name = "none",
126 },
127 {
128 .action = REBOOT,
129 .name = "reboot",
130 },
131 {
132 .action = PANIC,
133 .name = "panic",
134 },
135 };
136
137 static unsigned int pqi_supported_event_types[] = {
138 PQI_EVENT_TYPE_HOTPLUG,
139 PQI_EVENT_TYPE_HARDWARE,
140 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
141 PQI_EVENT_TYPE_LOGICAL_DEVICE,
142 PQI_EVENT_TYPE_OFA,
143 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
144 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
145 };
146
147 static int pqi_disable_device_id_wildcards;
148 module_param_named(disable_device_id_wildcards,
149 pqi_disable_device_id_wildcards, int, 0644);
150 MODULE_PARM_DESC(disable_device_id_wildcards,
151 "Disable device ID wildcards.");
152
153 static int pqi_disable_heartbeat;
154 module_param_named(disable_heartbeat,
155 pqi_disable_heartbeat, int, 0644);
156 MODULE_PARM_DESC(disable_heartbeat,
157 "Disable heartbeat.");
158
159 static int pqi_disable_ctrl_shutdown;
160 module_param_named(disable_ctrl_shutdown,
161 pqi_disable_ctrl_shutdown, int, 0644);
162 MODULE_PARM_DESC(disable_ctrl_shutdown,
163 "Disable controller shutdown when controller locked up.");
164
165 static char *pqi_lockup_action_param;
166 module_param_named(lockup_action,
167 pqi_lockup_action_param, charp, 0644);
168 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
169 "\t\tSupported: none, reboot, panic\n"
170 "\t\tDefault: none");
171
172 static int pqi_expose_ld_first;
173 module_param_named(expose_ld_first,
174 pqi_expose_ld_first, int, 0644);
175 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
176
177 static int pqi_hide_vsep;
178 module_param_named(hide_vsep,
179 pqi_hide_vsep, int, 0644);
180 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
181
182 static int pqi_disable_managed_interrupts;
183 module_param_named(disable_managed_interrupts,
184 pqi_disable_managed_interrupts, int, 0644);
185 MODULE_PARM_DESC(disable_managed_interrupts,
186 "Disable the kernel automatically assigning SMP affinity to IRQs.");
187
188 static unsigned int pqi_ctrl_ready_timeout_secs;
189 module_param_named(ctrl_ready_timeout,
190 pqi_ctrl_ready_timeout_secs, uint, 0644);
191 MODULE_PARM_DESC(ctrl_ready_timeout,
192 "Timeout in seconds for driver to wait for controller ready.");
193
194 static char *raid_levels[] = {
195 "RAID-0",
196 "RAID-4",
197 "RAID-1(1+0)",
198 "RAID-5",
199 "RAID-5+1",
200 "RAID-6",
201 "RAID-1(Triple)",
202 };
203
pqi_raid_level_to_string(u8 raid_level)204 static char *pqi_raid_level_to_string(u8 raid_level)
205 {
206 if (raid_level < ARRAY_SIZE(raid_levels))
207 return raid_levels[raid_level];
208
209 return "RAID UNKNOWN";
210 }
211
212 #define SA_RAID_0 0
213 #define SA_RAID_4 1
214 #define SA_RAID_1 2 /* also used for RAID 10 */
215 #define SA_RAID_5 3 /* also used for RAID 50 */
216 #define SA_RAID_51 4
217 #define SA_RAID_6 5 /* also used for RAID 60 */
218 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
219 #define SA_RAID_MAX SA_RAID_TRIPLE
220 #define SA_RAID_UNKNOWN 0xff
221
pqi_scsi_done(struct scsi_cmnd * scmd)222 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
223 {
224 pqi_prep_for_scsi_done(scmd);
225 scsi_done(scmd);
226 }
227
pqi_disable_write_same(struct scsi_device * sdev)228 static inline void pqi_disable_write_same(struct scsi_device *sdev)
229 {
230 sdev->no_write_same = 1;
231 }
232
pqi_scsi3addr_equal(u8 * scsi3addr1,u8 * scsi3addr2)233 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
234 {
235 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
236 }
237
pqi_is_logical_device(struct pqi_scsi_dev * device)238 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
239 {
240 return !device->is_physical_device;
241 }
242
pqi_is_external_raid_addr(u8 * scsi3addr)243 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
244 {
245 return scsi3addr[2] != 0;
246 }
247
pqi_ctrl_offline(struct pqi_ctrl_info * ctrl_info)248 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
249 {
250 return !ctrl_info->controller_online;
251 }
252
pqi_check_ctrl_health(struct pqi_ctrl_info * ctrl_info)253 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
254 {
255 if (ctrl_info->controller_online)
256 if (!sis_is_firmware_running(ctrl_info))
257 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
258 }
259
pqi_is_hba_lunid(u8 * scsi3addr)260 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
261 {
262 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
263 }
264
265 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
266 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
267
pqi_get_ctrl_mode(struct pqi_ctrl_info * ctrl_info)268 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
269 {
270 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
271 }
272
pqi_save_ctrl_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_mode mode)273 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
274 enum pqi_ctrl_mode mode)
275 {
276 u32 driver_scratch;
277
278 driver_scratch = sis_read_driver_scratch(ctrl_info);
279
280 if (mode == PQI_MODE)
281 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
282 else
283 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
284
285 sis_write_driver_scratch(ctrl_info, driver_scratch);
286 }
287
pqi_is_fw_triage_supported(struct pqi_ctrl_info * ctrl_info)288 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
289 {
290 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
291 }
292
pqi_save_fw_triage_setting(struct pqi_ctrl_info * ctrl_info,bool is_supported)293 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
294 {
295 u32 driver_scratch;
296
297 driver_scratch = sis_read_driver_scratch(ctrl_info);
298
299 if (is_supported)
300 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
301 else
302 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
303
304 sis_write_driver_scratch(ctrl_info, driver_scratch);
305 }
306
pqi_ctrl_block_scan(struct pqi_ctrl_info * ctrl_info)307 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
308 {
309 ctrl_info->scan_blocked = true;
310 mutex_lock(&ctrl_info->scan_mutex);
311 }
312
pqi_ctrl_unblock_scan(struct pqi_ctrl_info * ctrl_info)313 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
314 {
315 ctrl_info->scan_blocked = false;
316 mutex_unlock(&ctrl_info->scan_mutex);
317 }
318
pqi_ctrl_scan_blocked(struct pqi_ctrl_info * ctrl_info)319 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
320 {
321 return ctrl_info->scan_blocked;
322 }
323
pqi_ctrl_block_device_reset(struct pqi_ctrl_info * ctrl_info)324 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
325 {
326 mutex_lock(&ctrl_info->lun_reset_mutex);
327 }
328
pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info * ctrl_info)329 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
330 {
331 mutex_unlock(&ctrl_info->lun_reset_mutex);
332 }
333
pqi_scsi_block_requests(struct pqi_ctrl_info * ctrl_info)334 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
335 {
336 struct Scsi_Host *shost;
337 unsigned int num_loops;
338 int msecs_sleep;
339
340 shost = ctrl_info->scsi_host;
341
342 scsi_block_requests(shost);
343
344 num_loops = 0;
345 msecs_sleep = 20;
346 while (scsi_host_busy(shost)) {
347 num_loops++;
348 if (num_loops == 10)
349 msecs_sleep = 500;
350 msleep(msecs_sleep);
351 }
352 }
353
pqi_scsi_unblock_requests(struct pqi_ctrl_info * ctrl_info)354 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
355 {
356 scsi_unblock_requests(ctrl_info->scsi_host);
357 }
358
pqi_ctrl_busy(struct pqi_ctrl_info * ctrl_info)359 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
360 {
361 atomic_inc(&ctrl_info->num_busy_threads);
362 }
363
pqi_ctrl_unbusy(struct pqi_ctrl_info * ctrl_info)364 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
365 {
366 atomic_dec(&ctrl_info->num_busy_threads);
367 }
368
pqi_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)369 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
370 {
371 return ctrl_info->block_requests;
372 }
373
pqi_ctrl_block_requests(struct pqi_ctrl_info * ctrl_info)374 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
375 {
376 ctrl_info->block_requests = true;
377 }
378
pqi_ctrl_unblock_requests(struct pqi_ctrl_info * ctrl_info)379 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
380 {
381 ctrl_info->block_requests = false;
382 wake_up_all(&ctrl_info->block_requests_wait);
383 }
384
pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info * ctrl_info)385 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
386 {
387 if (!pqi_ctrl_blocked(ctrl_info))
388 return;
389
390 atomic_inc(&ctrl_info->num_blocked_threads);
391 wait_event(ctrl_info->block_requests_wait,
392 !pqi_ctrl_blocked(ctrl_info));
393 atomic_dec(&ctrl_info->num_blocked_threads);
394 }
395
396 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
397
pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info * ctrl_info)398 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
399 {
400 unsigned long start_jiffies;
401 unsigned long warning_timeout;
402 bool displayed_warning;
403
404 displayed_warning = false;
405 start_jiffies = jiffies;
406 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
407
408 while (atomic_read(&ctrl_info->num_busy_threads) >
409 atomic_read(&ctrl_info->num_blocked_threads)) {
410 if (time_after(jiffies, warning_timeout)) {
411 dev_warn(&ctrl_info->pci_dev->dev,
412 "waiting %u seconds for driver activity to quiesce\n",
413 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
414 displayed_warning = true;
415 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
416 }
417 usleep_range(1000, 2000);
418 }
419
420 if (displayed_warning)
421 dev_warn(&ctrl_info->pci_dev->dev,
422 "driver activity quiesced after waiting for %u seconds\n",
423 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
424 }
425
pqi_device_offline(struct pqi_scsi_dev * device)426 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
427 {
428 return device->device_offline;
429 }
430
pqi_ctrl_ofa_start(struct pqi_ctrl_info * ctrl_info)431 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
432 {
433 mutex_lock(&ctrl_info->ofa_mutex);
434 }
435
pqi_ctrl_ofa_done(struct pqi_ctrl_info * ctrl_info)436 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
437 {
438 mutex_unlock(&ctrl_info->ofa_mutex);
439 }
440
pqi_wait_until_ofa_finished(struct pqi_ctrl_info * ctrl_info)441 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
442 {
443 mutex_lock(&ctrl_info->ofa_mutex);
444 mutex_unlock(&ctrl_info->ofa_mutex);
445 }
446
pqi_ofa_in_progress(struct pqi_ctrl_info * ctrl_info)447 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
448 {
449 return mutex_is_locked(&ctrl_info->ofa_mutex);
450 }
451
pqi_device_remove_start(struct pqi_scsi_dev * device)452 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
453 {
454 device->in_remove = true;
455 }
456
pqi_device_in_remove(struct pqi_scsi_dev * device)457 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
458 {
459 return device->in_remove;
460 }
461
pqi_device_reset_start(struct pqi_scsi_dev * device,u8 lun)462 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
463 {
464 device->in_reset[lun] = true;
465 }
466
pqi_device_reset_done(struct pqi_scsi_dev * device,u8 lun)467 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
468 {
469 device->in_reset[lun] = false;
470 }
471
pqi_device_in_reset(struct pqi_scsi_dev * device,u8 lun)472 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
473 {
474 return device->in_reset[lun];
475 }
476
pqi_event_type_to_event_index(unsigned int event_type)477 static inline int pqi_event_type_to_event_index(unsigned int event_type)
478 {
479 int index;
480
481 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
482 if (event_type == pqi_supported_event_types[index])
483 return index;
484
485 return -1;
486 }
487
pqi_is_supported_event(unsigned int event_type)488 static inline bool pqi_is_supported_event(unsigned int event_type)
489 {
490 return pqi_event_type_to_event_index(event_type) != -1;
491 }
492
pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info * ctrl_info,unsigned long delay)493 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
494 unsigned long delay)
495 {
496 if (pqi_ctrl_offline(ctrl_info))
497 return;
498
499 schedule_delayed_work(&ctrl_info->rescan_work, delay);
500 }
501
pqi_schedule_rescan_worker(struct pqi_ctrl_info * ctrl_info)502 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
503 {
504 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
505 }
506
507 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
508
pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info * ctrl_info)509 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
510 {
511 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
512 }
513
pqi_cancel_rescan_worker(struct pqi_ctrl_info * ctrl_info)514 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
515 {
516 cancel_delayed_work_sync(&ctrl_info->rescan_work);
517 }
518
pqi_read_heartbeat_counter(struct pqi_ctrl_info * ctrl_info)519 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
520 {
521 if (!ctrl_info->heartbeat_counter)
522 return 0;
523
524 return readl(ctrl_info->heartbeat_counter);
525 }
526
pqi_read_soft_reset_status(struct pqi_ctrl_info * ctrl_info)527 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
528 {
529 return readb(ctrl_info->soft_reset_status);
530 }
531
pqi_clear_soft_reset_status(struct pqi_ctrl_info * ctrl_info)532 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
533 {
534 u8 status;
535
536 status = pqi_read_soft_reset_status(ctrl_info);
537 status &= ~PQI_SOFT_RESET_ABORT;
538 writeb(status, ctrl_info->soft_reset_status);
539 }
540
pqi_is_io_high_priority(struct pqi_scsi_dev * device,struct scsi_cmnd * scmd)541 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
542 {
543 bool io_high_prio;
544 int priority_class;
545
546 io_high_prio = false;
547
548 if (device->ncq_prio_enable) {
549 priority_class =
550 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
551 if (priority_class == IOPRIO_CLASS_RT) {
552 /* Set NCQ priority for read/write commands. */
553 switch (scmd->cmnd[0]) {
554 case WRITE_16:
555 case READ_16:
556 case WRITE_12:
557 case READ_12:
558 case WRITE_10:
559 case READ_10:
560 case WRITE_6:
561 case READ_6:
562 io_high_prio = true;
563 break;
564 }
565 }
566 }
567
568 return io_high_prio;
569 }
570
pqi_map_single(struct pci_dev * pci_dev,struct pqi_sg_descriptor * sg_descriptor,void * buffer,size_t buffer_length,enum dma_data_direction data_direction)571 static int pqi_map_single(struct pci_dev *pci_dev,
572 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
573 size_t buffer_length, enum dma_data_direction data_direction)
574 {
575 dma_addr_t bus_address;
576
577 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
578 return 0;
579
580 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
581 data_direction);
582 if (dma_mapping_error(&pci_dev->dev, bus_address))
583 return -ENOMEM;
584
585 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
586 put_unaligned_le32(buffer_length, &sg_descriptor->length);
587 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
588
589 return 0;
590 }
591
pqi_pci_unmap(struct pci_dev * pci_dev,struct pqi_sg_descriptor * descriptors,int num_descriptors,enum dma_data_direction data_direction)592 static void pqi_pci_unmap(struct pci_dev *pci_dev,
593 struct pqi_sg_descriptor *descriptors, int num_descriptors,
594 enum dma_data_direction data_direction)
595 {
596 int i;
597
598 if (data_direction == DMA_NONE)
599 return;
600
601 for (i = 0; i < num_descriptors; i++)
602 dma_unmap_single(&pci_dev->dev,
603 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
604 get_unaligned_le32(&descriptors[i].length),
605 data_direction);
606 }
607
pqi_build_raid_path_request(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,enum dma_data_direction * dir)608 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
609 struct pqi_raid_path_request *request, u8 cmd,
610 u8 *scsi3addr, void *buffer, size_t buffer_length,
611 u16 vpd_page, enum dma_data_direction *dir)
612 {
613 u8 *cdb;
614 size_t cdb_length = buffer_length;
615
616 memset(request, 0, sizeof(*request));
617
618 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
619 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
620 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
621 &request->header.iu_length);
622 put_unaligned_le32(buffer_length, &request->buffer_length);
623 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
624 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
625 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
626
627 cdb = request->cdb;
628
629 switch (cmd) {
630 case INQUIRY:
631 request->data_direction = SOP_READ_FLAG;
632 cdb[0] = INQUIRY;
633 if (vpd_page & VPD_PAGE) {
634 cdb[1] = 0x1;
635 cdb[2] = (u8)vpd_page;
636 }
637 cdb[4] = (u8)cdb_length;
638 break;
639 case CISS_REPORT_LOG:
640 case CISS_REPORT_PHYS:
641 request->data_direction = SOP_READ_FLAG;
642 cdb[0] = cmd;
643 if (cmd == CISS_REPORT_PHYS) {
644 if (ctrl_info->rpl_extended_format_4_5_supported)
645 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
646 else
647 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
648 } else {
649 cdb[1] = ctrl_info->ciss_report_log_flags;
650 }
651 put_unaligned_be32(cdb_length, &cdb[6]);
652 break;
653 case CISS_GET_RAID_MAP:
654 request->data_direction = SOP_READ_FLAG;
655 cdb[0] = CISS_READ;
656 cdb[1] = CISS_GET_RAID_MAP;
657 put_unaligned_be32(cdb_length, &cdb[6]);
658 break;
659 case SA_FLUSH_CACHE:
660 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
661 request->data_direction = SOP_WRITE_FLAG;
662 cdb[0] = BMIC_WRITE;
663 cdb[6] = BMIC_FLUSH_CACHE;
664 put_unaligned_be16(cdb_length, &cdb[7]);
665 break;
666 case BMIC_SENSE_DIAG_OPTIONS:
667 cdb_length = 0;
668 fallthrough;
669 case BMIC_IDENTIFY_CONTROLLER:
670 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
671 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
672 case BMIC_SENSE_FEATURE:
673 request->data_direction = SOP_READ_FLAG;
674 cdb[0] = BMIC_READ;
675 cdb[6] = cmd;
676 put_unaligned_be16(cdb_length, &cdb[7]);
677 break;
678 case BMIC_SET_DIAG_OPTIONS:
679 cdb_length = 0;
680 fallthrough;
681 case BMIC_WRITE_HOST_WELLNESS:
682 request->data_direction = SOP_WRITE_FLAG;
683 cdb[0] = BMIC_WRITE;
684 cdb[6] = cmd;
685 put_unaligned_be16(cdb_length, &cdb[7]);
686 break;
687 case BMIC_CSMI_PASSTHRU:
688 request->data_direction = SOP_BIDIRECTIONAL;
689 cdb[0] = BMIC_WRITE;
690 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
691 cdb[6] = cmd;
692 put_unaligned_be16(cdb_length, &cdb[7]);
693 break;
694 default:
695 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
696 break;
697 }
698
699 switch (request->data_direction) {
700 case SOP_READ_FLAG:
701 *dir = DMA_FROM_DEVICE;
702 break;
703 case SOP_WRITE_FLAG:
704 *dir = DMA_TO_DEVICE;
705 break;
706 case SOP_NO_DIRECTION_FLAG:
707 *dir = DMA_NONE;
708 break;
709 default:
710 *dir = DMA_BIDIRECTIONAL;
711 break;
712 }
713
714 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
715 buffer, buffer_length, *dir);
716 }
717
pqi_reinit_io_request(struct pqi_io_request * io_request)718 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
719 {
720 io_request->scmd = NULL;
721 io_request->status = 0;
722 io_request->error_info = NULL;
723 io_request->raid_bypass = false;
724 }
725
pqi_alloc_io_request(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)726 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
727 {
728 struct pqi_io_request *io_request;
729 u16 i;
730
731 if (scmd) { /* SML I/O request */
732 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
733
734 i = blk_mq_unique_tag_to_tag(blk_tag);
735 io_request = &ctrl_info->io_request_pool[i];
736 if (atomic_inc_return(&io_request->refcount) > 1) {
737 atomic_dec(&io_request->refcount);
738 return NULL;
739 }
740 } else { /* IOCTL or driver internal request */
741 /*
742 * benignly racy - may have to wait for an open slot.
743 * command slot range is scsi_ml_can_queue -
744 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
745 */
746 i = 0;
747 while (1) {
748 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
749 if (atomic_inc_return(&io_request->refcount) == 1)
750 break;
751 atomic_dec(&io_request->refcount);
752 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
753 }
754 }
755
756 if (io_request)
757 pqi_reinit_io_request(io_request);
758
759 return io_request;
760 }
761
pqi_free_io_request(struct pqi_io_request * io_request)762 static void pqi_free_io_request(struct pqi_io_request *io_request)
763 {
764 atomic_dec(&io_request->refcount);
765 }
766
pqi_send_scsi_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,u8 * scsi3addr,void * buffer,size_t buffer_length,u16 vpd_page,struct pqi_raid_error_info * error_info)767 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
768 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
769 struct pqi_raid_error_info *error_info)
770 {
771 int rc;
772 struct pqi_raid_path_request request;
773 enum dma_data_direction dir;
774
775 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
776 buffer, buffer_length, vpd_page, &dir);
777 if (rc)
778 return rc;
779
780 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
781
782 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
783
784 return rc;
785 }
786
787 /* helper functions for pqi_send_scsi_raid_request */
788
pqi_send_ctrl_raid_request(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)789 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
790 u8 cmd, void *buffer, size_t buffer_length)
791 {
792 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
793 buffer, buffer_length, 0, NULL);
794 }
795
pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)796 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
797 u8 cmd, void *buffer, size_t buffer_length,
798 struct pqi_raid_error_info *error_info)
799 {
800 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
801 buffer, buffer_length, 0, error_info);
802 }
803
pqi_identify_controller(struct pqi_ctrl_info * ctrl_info,struct bmic_identify_controller * buffer)804 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
805 struct bmic_identify_controller *buffer)
806 {
807 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
808 buffer, sizeof(*buffer));
809 }
810
pqi_sense_subsystem_info(struct pqi_ctrl_info * ctrl_info,struct bmic_sense_subsystem_info * sense_info)811 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
812 struct bmic_sense_subsystem_info *sense_info)
813 {
814 return pqi_send_ctrl_raid_request(ctrl_info,
815 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
816 sizeof(*sense_info));
817 }
818
pqi_scsi_inquiry(struct pqi_ctrl_info * ctrl_info,u8 * scsi3addr,u16 vpd_page,void * buffer,size_t buffer_length)819 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
820 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
821 {
822 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
823 buffer, buffer_length, vpd_page, NULL);
824 }
825
pqi_identify_physical_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * buffer,size_t buffer_length)826 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
827 struct pqi_scsi_dev *device,
828 struct bmic_identify_physical_device *buffer, size_t buffer_length)
829 {
830 int rc;
831 enum dma_data_direction dir;
832 u16 bmic_device_index;
833 struct pqi_raid_path_request request;
834
835 rc = pqi_build_raid_path_request(ctrl_info, &request,
836 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
837 buffer_length, 0, &dir);
838 if (rc)
839 return rc;
840
841 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
842 request.cdb[2] = (u8)bmic_device_index;
843 request.cdb[9] = (u8)(bmic_device_index >> 8);
844
845 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
846
847 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
848
849 return rc;
850 }
851
pqi_aio_limit_to_bytes(__le16 * limit)852 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
853 {
854 u32 bytes;
855
856 bytes = get_unaligned_le16(limit);
857 if (bytes == 0)
858 bytes = ~0;
859 else
860 bytes *= 1024;
861
862 return bytes;
863 }
864
865 #pragma pack(1)
866
867 struct bmic_sense_feature_buffer {
868 struct bmic_sense_feature_buffer_header header;
869 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
870 };
871
872 #pragma pack()
873
874 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
875 offsetofend(struct bmic_sense_feature_buffer, \
876 aio_subpage.max_write_raid_1_10_3drive)
877
878 #define MINIMUM_AIO_SUBPAGE_LENGTH \
879 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
880 max_write_raid_1_10_3drive) - \
881 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
882
pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info * ctrl_info)883 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
884 {
885 int rc;
886 enum dma_data_direction dir;
887 struct pqi_raid_path_request request;
888 struct bmic_sense_feature_buffer *buffer;
889
890 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
891 if (!buffer)
892 return -ENOMEM;
893
894 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
895 buffer, sizeof(*buffer), 0, &dir);
896 if (rc)
897 goto error;
898
899 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
900 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
901
902 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
903
904 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
905
906 if (rc)
907 goto error;
908
909 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
910 buffer->header.subpage_code !=
911 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
912 get_unaligned_le16(&buffer->header.buffer_length) <
913 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
914 buffer->aio_subpage.header.page_code !=
915 BMIC_SENSE_FEATURE_IO_PAGE ||
916 buffer->aio_subpage.header.subpage_code !=
917 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
918 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
919 MINIMUM_AIO_SUBPAGE_LENGTH) {
920 goto error;
921 }
922
923 ctrl_info->max_transfer_encrypted_sas_sata =
924 pqi_aio_limit_to_bytes(
925 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
926
927 ctrl_info->max_transfer_encrypted_nvme =
928 pqi_aio_limit_to_bytes(
929 &buffer->aio_subpage.max_transfer_encrypted_nvme);
930
931 ctrl_info->max_write_raid_5_6 =
932 pqi_aio_limit_to_bytes(
933 &buffer->aio_subpage.max_write_raid_5_6);
934
935 ctrl_info->max_write_raid_1_10_2drive =
936 pqi_aio_limit_to_bytes(
937 &buffer->aio_subpage.max_write_raid_1_10_2drive);
938
939 ctrl_info->max_write_raid_1_10_3drive =
940 pqi_aio_limit_to_bytes(
941 &buffer->aio_subpage.max_write_raid_1_10_3drive);
942
943 error:
944 kfree(buffer);
945
946 return rc;
947 }
948
pqi_flush_cache(struct pqi_ctrl_info * ctrl_info,enum bmic_flush_cache_shutdown_event shutdown_event)949 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
950 enum bmic_flush_cache_shutdown_event shutdown_event)
951 {
952 int rc;
953 struct bmic_flush_cache *flush_cache;
954
955 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
956 if (!flush_cache)
957 return -ENOMEM;
958
959 flush_cache->shutdown_event = shutdown_event;
960
961 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
962 sizeof(*flush_cache));
963
964 kfree(flush_cache);
965
966 return rc;
967 }
968
pqi_csmi_smp_passthru(struct pqi_ctrl_info * ctrl_info,struct bmic_csmi_smp_passthru_buffer * buffer,size_t buffer_length,struct pqi_raid_error_info * error_info)969 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
970 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
971 struct pqi_raid_error_info *error_info)
972 {
973 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
974 buffer, buffer_length, error_info);
975 }
976
977 #define PQI_FETCH_PTRAID_DATA (1 << 31)
978
pqi_set_diag_rescan(struct pqi_ctrl_info * ctrl_info)979 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
980 {
981 int rc;
982 struct bmic_diag_options *diag;
983
984 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
985 if (!diag)
986 return -ENOMEM;
987
988 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
989 diag, sizeof(*diag));
990 if (rc)
991 goto out;
992
993 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
994
995 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
996 sizeof(*diag));
997
998 out:
999 kfree(diag);
1000
1001 return rc;
1002 }
1003
pqi_write_host_wellness(struct pqi_ctrl_info * ctrl_info,void * buffer,size_t buffer_length)1004 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1005 void *buffer, size_t buffer_length)
1006 {
1007 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1008 buffer, buffer_length);
1009 }
1010
1011 #pragma pack(1)
1012
1013 struct bmic_host_wellness_driver_version {
1014 u8 start_tag[4];
1015 u8 driver_version_tag[2];
1016 __le16 driver_version_length;
1017 char driver_version[32];
1018 u8 dont_write_tag[2];
1019 u8 end_tag[2];
1020 };
1021
1022 #pragma pack()
1023
pqi_write_driver_version_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1024 static int pqi_write_driver_version_to_host_wellness(
1025 struct pqi_ctrl_info *ctrl_info)
1026 {
1027 int rc;
1028 struct bmic_host_wellness_driver_version *buffer;
1029 size_t buffer_length;
1030
1031 buffer_length = sizeof(*buffer);
1032
1033 buffer = kmalloc(buffer_length, GFP_KERNEL);
1034 if (!buffer)
1035 return -ENOMEM;
1036
1037 buffer->start_tag[0] = '<';
1038 buffer->start_tag[1] = 'H';
1039 buffer->start_tag[2] = 'W';
1040 buffer->start_tag[3] = '>';
1041 buffer->driver_version_tag[0] = 'D';
1042 buffer->driver_version_tag[1] = 'V';
1043 put_unaligned_le16(sizeof(buffer->driver_version),
1044 &buffer->driver_version_length);
1045 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1046 sizeof(buffer->driver_version));
1047 buffer->dont_write_tag[0] = 'D';
1048 buffer->dont_write_tag[1] = 'W';
1049 buffer->end_tag[0] = 'Z';
1050 buffer->end_tag[1] = 'Z';
1051
1052 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1053
1054 kfree(buffer);
1055
1056 return rc;
1057 }
1058
1059 #pragma pack(1)
1060
1061 struct bmic_host_wellness_time {
1062 u8 start_tag[4];
1063 u8 time_tag[2];
1064 __le16 time_length;
1065 u8 time[8];
1066 u8 dont_write_tag[2];
1067 u8 end_tag[2];
1068 };
1069
1070 #pragma pack()
1071
pqi_write_current_time_to_host_wellness(struct pqi_ctrl_info * ctrl_info)1072 static int pqi_write_current_time_to_host_wellness(
1073 struct pqi_ctrl_info *ctrl_info)
1074 {
1075 int rc;
1076 struct bmic_host_wellness_time *buffer;
1077 size_t buffer_length;
1078 time64_t local_time;
1079 unsigned int year;
1080 struct tm tm;
1081
1082 buffer_length = sizeof(*buffer);
1083
1084 buffer = kmalloc(buffer_length, GFP_KERNEL);
1085 if (!buffer)
1086 return -ENOMEM;
1087
1088 buffer->start_tag[0] = '<';
1089 buffer->start_tag[1] = 'H';
1090 buffer->start_tag[2] = 'W';
1091 buffer->start_tag[3] = '>';
1092 buffer->time_tag[0] = 'T';
1093 buffer->time_tag[1] = 'D';
1094 put_unaligned_le16(sizeof(buffer->time),
1095 &buffer->time_length);
1096
1097 local_time = ktime_get_real_seconds();
1098 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1099 year = tm.tm_year + 1900;
1100
1101 buffer->time[0] = bin2bcd(tm.tm_hour);
1102 buffer->time[1] = bin2bcd(tm.tm_min);
1103 buffer->time[2] = bin2bcd(tm.tm_sec);
1104 buffer->time[3] = 0;
1105 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1106 buffer->time[5] = bin2bcd(tm.tm_mday);
1107 buffer->time[6] = bin2bcd(year / 100);
1108 buffer->time[7] = bin2bcd(year % 100);
1109
1110 buffer->dont_write_tag[0] = 'D';
1111 buffer->dont_write_tag[1] = 'W';
1112 buffer->end_tag[0] = 'Z';
1113 buffer->end_tag[1] = 'Z';
1114
1115 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1116
1117 kfree(buffer);
1118
1119 return rc;
1120 }
1121
1122 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1123
pqi_update_time_worker(struct work_struct * work)1124 static void pqi_update_time_worker(struct work_struct *work)
1125 {
1126 int rc;
1127 struct pqi_ctrl_info *ctrl_info;
1128
1129 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1130 update_time_work);
1131
1132 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1133 if (rc)
1134 dev_warn(&ctrl_info->pci_dev->dev,
1135 "error updating time on controller\n");
1136
1137 schedule_delayed_work(&ctrl_info->update_time_work,
1138 PQI_UPDATE_TIME_WORK_INTERVAL);
1139 }
1140
pqi_schedule_update_time_worker(struct pqi_ctrl_info * ctrl_info)1141 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1142 {
1143 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1144 }
1145
pqi_cancel_update_time_worker(struct pqi_ctrl_info * ctrl_info)1146 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1147 {
1148 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1149 }
1150
pqi_report_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void * buffer,size_t buffer_length)1151 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1152 size_t buffer_length)
1153 {
1154 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1155 }
1156
pqi_report_phys_logical_luns(struct pqi_ctrl_info * ctrl_info,u8 cmd,void ** buffer)1157 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1158 {
1159 int rc;
1160 size_t lun_list_length;
1161 size_t lun_data_length;
1162 size_t new_lun_list_length;
1163 void *lun_data = NULL;
1164 struct report_lun_header *report_lun_header;
1165
1166 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1167 if (!report_lun_header) {
1168 rc = -ENOMEM;
1169 goto out;
1170 }
1171
1172 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1173 if (rc)
1174 goto out;
1175
1176 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1177
1178 again:
1179 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1180
1181 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1182 if (!lun_data) {
1183 rc = -ENOMEM;
1184 goto out;
1185 }
1186
1187 if (lun_list_length == 0) {
1188 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1189 goto out;
1190 }
1191
1192 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1193 if (rc)
1194 goto out;
1195
1196 new_lun_list_length =
1197 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1198
1199 if (new_lun_list_length > lun_list_length) {
1200 lun_list_length = new_lun_list_length;
1201 kfree(lun_data);
1202 goto again;
1203 }
1204
1205 out:
1206 kfree(report_lun_header);
1207
1208 if (rc) {
1209 kfree(lun_data);
1210 lun_data = NULL;
1211 }
1212
1213 *buffer = lun_data;
1214
1215 return rc;
1216 }
1217
pqi_report_phys_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1218 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1219 {
1220 int rc;
1221 unsigned int i;
1222 u8 rpl_response_format;
1223 u32 num_physicals;
1224 void *rpl_list;
1225 struct report_lun_header *rpl_header;
1226 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1227 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1228
1229 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1230 if (rc)
1231 return rc;
1232
1233 if (ctrl_info->rpl_extended_format_4_5_supported) {
1234 rpl_header = rpl_list;
1235 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1236 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1237 *buffer = rpl_list;
1238 return 0;
1239 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1240 dev_err(&ctrl_info->pci_dev->dev,
1241 "RPL returned unsupported data format %u\n",
1242 rpl_response_format);
1243 return -EINVAL;
1244 } else {
1245 dev_warn(&ctrl_info->pci_dev->dev,
1246 "RPL returned extended format 2 instead of 4\n");
1247 }
1248 }
1249
1250 rpl_8byte_wwid_list = rpl_list;
1251 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1252
1253 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1254 num_physicals), GFP_KERNEL);
1255 if (!rpl_16byte_wwid_list)
1256 return -ENOMEM;
1257
1258 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1259 &rpl_16byte_wwid_list->header.list_length);
1260 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1261
1262 for (i = 0; i < num_physicals; i++) {
1263 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1264 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1265 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1266 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1267 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1268 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1269 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1270 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1271 }
1272
1273 kfree(rpl_8byte_wwid_list);
1274 *buffer = rpl_16byte_wwid_list;
1275
1276 return 0;
1277 }
1278
pqi_report_logical_luns(struct pqi_ctrl_info * ctrl_info,void ** buffer)1279 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1280 {
1281 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1282 }
1283
pqi_get_device_lists(struct pqi_ctrl_info * ctrl_info,struct report_phys_lun_16byte_wwid_list ** physdev_list,struct report_log_lun_list ** logdev_list)1284 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1285 struct report_phys_lun_16byte_wwid_list **physdev_list,
1286 struct report_log_lun_list **logdev_list)
1287 {
1288 int rc;
1289 size_t logdev_list_length;
1290 size_t logdev_data_length;
1291 struct report_log_lun_list *internal_logdev_list;
1292 struct report_log_lun_list *logdev_data;
1293 struct report_lun_header report_lun_header;
1294
1295 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1296 if (rc)
1297 dev_err(&ctrl_info->pci_dev->dev,
1298 "report physical LUNs failed\n");
1299
1300 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1301 if (rc)
1302 dev_err(&ctrl_info->pci_dev->dev,
1303 "report logical LUNs failed\n");
1304
1305 /*
1306 * Tack the controller itself onto the end of the logical device list
1307 * by adding a list entry that is all zeros.
1308 */
1309
1310 logdev_data = *logdev_list;
1311
1312 if (logdev_data) {
1313 logdev_list_length =
1314 get_unaligned_be32(&logdev_data->header.list_length);
1315 } else {
1316 memset(&report_lun_header, 0, sizeof(report_lun_header));
1317 logdev_data =
1318 (struct report_log_lun_list *)&report_lun_header;
1319 logdev_list_length = 0;
1320 }
1321
1322 logdev_data_length = sizeof(struct report_lun_header) +
1323 logdev_list_length;
1324
1325 internal_logdev_list = kmalloc(logdev_data_length +
1326 sizeof(struct report_log_lun), GFP_KERNEL);
1327 if (!internal_logdev_list) {
1328 kfree(*logdev_list);
1329 *logdev_list = NULL;
1330 return -ENOMEM;
1331 }
1332
1333 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1334 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1335 sizeof(struct report_log_lun));
1336 put_unaligned_be32(logdev_list_length +
1337 sizeof(struct report_log_lun),
1338 &internal_logdev_list->header.list_length);
1339
1340 kfree(*logdev_list);
1341 *logdev_list = internal_logdev_list;
1342
1343 return 0;
1344 }
1345
pqi_set_bus_target_lun(struct pqi_scsi_dev * device,int bus,int target,int lun)1346 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1347 int bus, int target, int lun)
1348 {
1349 device->bus = bus;
1350 device->target = target;
1351 device->lun = lun;
1352 }
1353
pqi_assign_bus_target_lun(struct pqi_scsi_dev * device)1354 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1355 {
1356 u8 *scsi3addr;
1357 u32 lunid;
1358 int bus;
1359 int target;
1360 int lun;
1361
1362 scsi3addr = device->scsi3addr;
1363 lunid = get_unaligned_le32(scsi3addr);
1364
1365 if (pqi_is_hba_lunid(scsi3addr)) {
1366 /* The specified device is the controller. */
1367 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1368 device->target_lun_valid = true;
1369 return;
1370 }
1371
1372 if (pqi_is_logical_device(device)) {
1373 if (device->is_external_raid_device) {
1374 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1375 target = (lunid >> 16) & 0x3fff;
1376 lun = lunid & 0xff;
1377 } else {
1378 bus = PQI_RAID_VOLUME_BUS;
1379 target = 0;
1380 lun = lunid & 0x3fff;
1381 }
1382 pqi_set_bus_target_lun(device, bus, target, lun);
1383 device->target_lun_valid = true;
1384 return;
1385 }
1386
1387 /*
1388 * Defer target and LUN assignment for non-controller physical devices
1389 * because the SAS transport layer will make these assignments later.
1390 */
1391 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1392 }
1393
pqi_get_raid_level(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1394 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1395 struct pqi_scsi_dev *device)
1396 {
1397 int rc;
1398 u8 raid_level;
1399 u8 *buffer;
1400
1401 raid_level = SA_RAID_UNKNOWN;
1402
1403 buffer = kmalloc(64, GFP_KERNEL);
1404 if (buffer) {
1405 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1406 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1407 if (rc == 0) {
1408 raid_level = buffer[8];
1409 if (raid_level > SA_RAID_MAX)
1410 raid_level = SA_RAID_UNKNOWN;
1411 }
1412 kfree(buffer);
1413 }
1414
1415 device->raid_level = raid_level;
1416 }
1417
pqi_validate_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct raid_map * raid_map)1418 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1419 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1420 {
1421 char *err_msg;
1422 u32 raid_map_size;
1423 u32 r5or6_blocks_per_row;
1424
1425 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1426
1427 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1428 err_msg = "RAID map too small";
1429 goto bad_raid_map;
1430 }
1431
1432 if (device->raid_level == SA_RAID_1) {
1433 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1434 err_msg = "invalid RAID-1 map";
1435 goto bad_raid_map;
1436 }
1437 } else if (device->raid_level == SA_RAID_TRIPLE) {
1438 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1439 err_msg = "invalid RAID-1(Triple) map";
1440 goto bad_raid_map;
1441 }
1442 } else if ((device->raid_level == SA_RAID_5 ||
1443 device->raid_level == SA_RAID_6) &&
1444 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1445 /* RAID 50/60 */
1446 r5or6_blocks_per_row =
1447 get_unaligned_le16(&raid_map->strip_size) *
1448 get_unaligned_le16(&raid_map->data_disks_per_row);
1449 if (r5or6_blocks_per_row == 0) {
1450 err_msg = "invalid RAID-5 or RAID-6 map";
1451 goto bad_raid_map;
1452 }
1453 }
1454
1455 return 0;
1456
1457 bad_raid_map:
1458 dev_warn(&ctrl_info->pci_dev->dev,
1459 "logical device %08x%08x %s\n",
1460 *((u32 *)&device->scsi3addr),
1461 *((u32 *)&device->scsi3addr[4]), err_msg);
1462
1463 return -EINVAL;
1464 }
1465
pqi_get_raid_map(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1466 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1467 struct pqi_scsi_dev *device)
1468 {
1469 int rc;
1470 u32 raid_map_size;
1471 struct raid_map *raid_map;
1472
1473 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1474 if (!raid_map)
1475 return -ENOMEM;
1476
1477 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1478 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1479 if (rc)
1480 goto error;
1481
1482 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1483
1484 if (raid_map_size > sizeof(*raid_map)) {
1485
1486 kfree(raid_map);
1487
1488 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1489 if (!raid_map)
1490 return -ENOMEM;
1491
1492 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1493 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1494 if (rc)
1495 goto error;
1496
1497 if (get_unaligned_le32(&raid_map->structure_size)
1498 != raid_map_size) {
1499 dev_warn(&ctrl_info->pci_dev->dev,
1500 "requested %u bytes, received %u bytes\n",
1501 raid_map_size,
1502 get_unaligned_le32(&raid_map->structure_size));
1503 rc = -EINVAL;
1504 goto error;
1505 }
1506 }
1507
1508 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1509 if (rc)
1510 goto error;
1511
1512 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
1513 if (!device->raid_io_stats) {
1514 rc = -ENOMEM;
1515 goto error;
1516 }
1517
1518 device->raid_map = raid_map;
1519
1520 return 0;
1521
1522 error:
1523 kfree(raid_map);
1524
1525 return rc;
1526 }
1527
pqi_set_max_transfer_encrypted(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1528 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1529 struct pqi_scsi_dev *device)
1530 {
1531 if (!ctrl_info->lv_drive_type_mix_valid) {
1532 device->max_transfer_encrypted = ~0;
1533 return;
1534 }
1535
1536 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1537 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1538 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1539 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1540 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1541 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1542 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1543 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1544 device->max_transfer_encrypted =
1545 ctrl_info->max_transfer_encrypted_sas_sata;
1546 break;
1547 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1548 device->max_transfer_encrypted =
1549 ctrl_info->max_transfer_encrypted_nvme;
1550 break;
1551 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1552 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1553 default:
1554 device->max_transfer_encrypted =
1555 min(ctrl_info->max_transfer_encrypted_sas_sata,
1556 ctrl_info->max_transfer_encrypted_nvme);
1557 break;
1558 }
1559 }
1560
pqi_get_raid_bypass_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1561 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1562 struct pqi_scsi_dev *device)
1563 {
1564 int rc;
1565 u8 *buffer;
1566 u8 bypass_status;
1567
1568 buffer = kmalloc(64, GFP_KERNEL);
1569 if (!buffer)
1570 return;
1571
1572 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1573 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1574 if (rc)
1575 goto out;
1576
1577 #define RAID_BYPASS_STATUS 4
1578 #define RAID_BYPASS_CONFIGURED 0x1
1579 #define RAID_BYPASS_ENABLED 0x2
1580
1581 bypass_status = buffer[RAID_BYPASS_STATUS];
1582 device->raid_bypass_configured =
1583 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1584 if (device->raid_bypass_configured &&
1585 (bypass_status & RAID_BYPASS_ENABLED) &&
1586 pqi_get_raid_map(ctrl_info, device) == 0) {
1587 device->raid_bypass_enabled = true;
1588 if (get_unaligned_le16(&device->raid_map->flags) &
1589 RAID_MAP_ENCRYPTION_ENABLED)
1590 pqi_set_max_transfer_encrypted(ctrl_info, device);
1591 }
1592
1593 out:
1594 kfree(buffer);
1595 }
1596
1597 /*
1598 * Use vendor-specific VPD to determine online/offline status of a volume.
1599 */
1600
pqi_get_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1601 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1602 struct pqi_scsi_dev *device)
1603 {
1604 int rc;
1605 size_t page_length;
1606 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1607 bool volume_offline = true;
1608 u32 volume_flags;
1609 struct ciss_vpd_logical_volume_status *vpd;
1610
1611 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1612 if (!vpd)
1613 goto no_buffer;
1614
1615 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1616 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1617 if (rc)
1618 goto out;
1619
1620 if (vpd->page_code != CISS_VPD_LV_STATUS)
1621 goto out;
1622
1623 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1624 volume_status) + vpd->page_length;
1625 if (page_length < sizeof(*vpd))
1626 goto out;
1627
1628 volume_status = vpd->volume_status;
1629 volume_flags = get_unaligned_be32(&vpd->flags);
1630 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1631
1632 out:
1633 kfree(vpd);
1634 no_buffer:
1635 device->volume_status = volume_status;
1636 device->volume_offline = volume_offline;
1637 }
1638
1639 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1640 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1641 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
1642
pqi_get_physical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1643 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1644 struct pqi_scsi_dev *device,
1645 struct bmic_identify_physical_device *id_phys)
1646 {
1647 int rc;
1648
1649 memset(id_phys, 0, sizeof(*id_phys));
1650
1651 rc = pqi_identify_physical_device(ctrl_info, device,
1652 id_phys, sizeof(*id_phys));
1653 if (rc) {
1654 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1655 return rc;
1656 }
1657
1658 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1659 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1660
1661 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1662 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1663
1664 device->box_index = id_phys->box_index;
1665 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1666 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1667 device->queue_depth =
1668 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1669 device->active_path_index = id_phys->active_path_number;
1670 device->path_map = id_phys->redundant_path_present_map;
1671 memcpy(&device->box,
1672 &id_phys->alternate_paths_phys_box_on_port,
1673 sizeof(device->box));
1674 memcpy(&device->phys_connector,
1675 &id_phys->alternate_paths_phys_connector,
1676 sizeof(device->phys_connector));
1677 device->bay = id_phys->phys_bay_in_box;
1678 device->lun_count = id_phys->multi_lun_device_lun_count;
1679 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1680 id_phys->phy_count)
1681 device->phy_id =
1682 id_phys->phy_to_phy_map[device->active_path_index];
1683 else
1684 device->phy_id = 0xFF;
1685
1686 device->ncq_prio_support =
1687 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1688 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1689
1690 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1691
1692 return 0;
1693 }
1694
pqi_get_logical_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1695 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1696 struct pqi_scsi_dev *device)
1697 {
1698 int rc;
1699 u8 *buffer;
1700
1701 buffer = kmalloc(64, GFP_KERNEL);
1702 if (!buffer)
1703 return -ENOMEM;
1704
1705 /* Send an inquiry to the device to see what it is. */
1706 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1707 if (rc)
1708 goto out;
1709
1710 scsi_sanitize_inquiry_string(&buffer[8], 8);
1711 scsi_sanitize_inquiry_string(&buffer[16], 16);
1712
1713 device->devtype = buffer[0] & 0x1f;
1714 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1715 memcpy(device->model, &buffer[16], sizeof(device->model));
1716
1717 if (device->devtype == TYPE_DISK) {
1718 if (device->is_external_raid_device) {
1719 device->raid_level = SA_RAID_UNKNOWN;
1720 device->volume_status = CISS_LV_OK;
1721 device->volume_offline = false;
1722 } else {
1723 pqi_get_raid_level(ctrl_info, device);
1724 pqi_get_raid_bypass_status(ctrl_info, device);
1725 pqi_get_volume_status(ctrl_info, device);
1726 }
1727 }
1728
1729 out:
1730 kfree(buffer);
1731
1732 return rc;
1733 }
1734
1735 /*
1736 * Prevent adding drive to OS for some corner cases such as a drive
1737 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1738 * the drive until the sanitize completes, which can take hours,
1739 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1740 * are allowed, but READ/WRITE cause check condition. So the OS
1741 * cannot check/read the partition table.
1742 * Note: devices that have completed sanitize must be re-enabled
1743 * using the management utility.
1744 */
pqi_keep_device_offline(struct pqi_scsi_dev * device)1745 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1746 {
1747 return device->erase_in_progress;
1748 }
1749
pqi_get_device_info_phys_logical(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1750 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1751 struct pqi_scsi_dev *device,
1752 struct bmic_identify_physical_device *id_phys)
1753 {
1754 int rc;
1755
1756 if (device->is_expander_smp_device)
1757 return 0;
1758
1759 if (pqi_is_logical_device(device))
1760 rc = pqi_get_logical_device_info(ctrl_info, device);
1761 else
1762 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1763
1764 return rc;
1765 }
1766
pqi_get_device_info(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct bmic_identify_physical_device * id_phys)1767 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1768 struct pqi_scsi_dev *device,
1769 struct bmic_identify_physical_device *id_phys)
1770 {
1771 int rc;
1772
1773 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1774
1775 if (rc == 0 && device->lun_count == 0)
1776 device->lun_count = 1;
1777
1778 return rc;
1779 }
1780
pqi_show_volume_status(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1781 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1782 struct pqi_scsi_dev *device)
1783 {
1784 char *status;
1785 static const char unknown_state_str[] =
1786 "Volume is in an unknown state (%u)";
1787 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1788
1789 switch (device->volume_status) {
1790 case CISS_LV_OK:
1791 status = "Volume online";
1792 break;
1793 case CISS_LV_FAILED:
1794 status = "Volume failed";
1795 break;
1796 case CISS_LV_NOT_CONFIGURED:
1797 status = "Volume not configured";
1798 break;
1799 case CISS_LV_DEGRADED:
1800 status = "Volume degraded";
1801 break;
1802 case CISS_LV_READY_FOR_RECOVERY:
1803 status = "Volume ready for recovery operation";
1804 break;
1805 case CISS_LV_UNDERGOING_RECOVERY:
1806 status = "Volume undergoing recovery";
1807 break;
1808 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1809 status = "Wrong physical drive was replaced";
1810 break;
1811 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1812 status = "A physical drive not properly connected";
1813 break;
1814 case CISS_LV_HARDWARE_OVERHEATING:
1815 status = "Hardware is overheating";
1816 break;
1817 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1818 status = "Hardware has overheated";
1819 break;
1820 case CISS_LV_UNDERGOING_EXPANSION:
1821 status = "Volume undergoing expansion";
1822 break;
1823 case CISS_LV_NOT_AVAILABLE:
1824 status = "Volume waiting for transforming volume";
1825 break;
1826 case CISS_LV_QUEUED_FOR_EXPANSION:
1827 status = "Volume queued for expansion";
1828 break;
1829 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1830 status = "Volume disabled due to SCSI ID conflict";
1831 break;
1832 case CISS_LV_EJECTED:
1833 status = "Volume has been ejected";
1834 break;
1835 case CISS_LV_UNDERGOING_ERASE:
1836 status = "Volume undergoing background erase";
1837 break;
1838 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1839 status = "Volume ready for predictive spare rebuild";
1840 break;
1841 case CISS_LV_UNDERGOING_RPI:
1842 status = "Volume undergoing rapid parity initialization";
1843 break;
1844 case CISS_LV_PENDING_RPI:
1845 status = "Volume queued for rapid parity initialization";
1846 break;
1847 case CISS_LV_ENCRYPTED_NO_KEY:
1848 status = "Encrypted volume inaccessible - key not present";
1849 break;
1850 case CISS_LV_UNDERGOING_ENCRYPTION:
1851 status = "Volume undergoing encryption process";
1852 break;
1853 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1854 status = "Volume undergoing encryption re-keying process";
1855 break;
1856 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1857 status = "Volume encrypted but encryption is disabled";
1858 break;
1859 case CISS_LV_PENDING_ENCRYPTION:
1860 status = "Volume pending migration to encrypted state";
1861 break;
1862 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1863 status = "Volume pending encryption rekeying";
1864 break;
1865 case CISS_LV_NOT_SUPPORTED:
1866 status = "Volume not supported on this controller";
1867 break;
1868 case CISS_LV_STATUS_UNAVAILABLE:
1869 status = "Volume status not available";
1870 break;
1871 default:
1872 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1873 unknown_state_str, device->volume_status);
1874 status = unknown_state_buffer;
1875 break;
1876 }
1877
1878 dev_info(&ctrl_info->pci_dev->dev,
1879 "scsi %d:%d:%d:%d %s\n",
1880 ctrl_info->scsi_host->host_no,
1881 device->bus, device->target, device->lun, status);
1882 }
1883
pqi_rescan_worker(struct work_struct * work)1884 static void pqi_rescan_worker(struct work_struct *work)
1885 {
1886 struct pqi_ctrl_info *ctrl_info;
1887
1888 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1889 rescan_work);
1890
1891 pqi_scan_scsi_devices(ctrl_info);
1892 }
1893
pqi_add_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1894 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1895 struct pqi_scsi_dev *device)
1896 {
1897 int rc;
1898
1899 if (pqi_is_logical_device(device))
1900 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1901 device->target, device->lun);
1902 else
1903 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1904
1905 return rc;
1906 }
1907
1908 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1909
pqi_remove_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)1910 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1911 {
1912 int rc;
1913 int lun;
1914
1915 for (lun = 0; lun < device->lun_count; lun++) {
1916 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1917 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1918 if (rc)
1919 dev_err(&ctrl_info->pci_dev->dev,
1920 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1921 ctrl_info->scsi_host->host_no, device->bus,
1922 device->target, lun,
1923 atomic_read(&device->scsi_cmds_outstanding[lun]));
1924 }
1925
1926 if (pqi_is_logical_device(device))
1927 scsi_remove_device(device->sdev);
1928 else
1929 pqi_remove_sas_device(device);
1930
1931 pqi_device_remove_start(device);
1932 }
1933
1934 /* Assumes the SCSI device list lock is held. */
1935
pqi_find_scsi_dev(struct pqi_ctrl_info * ctrl_info,int bus,int target,int lun)1936 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1937 int bus, int target, int lun)
1938 {
1939 struct pqi_scsi_dev *device;
1940
1941 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1942 if (device->bus == bus && device->target == target && device->lun == lun)
1943 return device;
1944
1945 return NULL;
1946 }
1947
pqi_device_equal(struct pqi_scsi_dev * dev1,struct pqi_scsi_dev * dev2)1948 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1949 {
1950 if (dev1->is_physical_device != dev2->is_physical_device)
1951 return false;
1952
1953 if (dev1->is_physical_device)
1954 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1955
1956 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1957 }
1958
1959 enum pqi_find_result {
1960 DEVICE_NOT_FOUND,
1961 DEVICE_CHANGED,
1962 DEVICE_SAME,
1963 };
1964
pqi_scsi_find_entry(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device_to_find,struct pqi_scsi_dev ** matching_device)1965 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1966 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1967 {
1968 struct pqi_scsi_dev *device;
1969
1970 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1971 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1972 *matching_device = device;
1973 if (pqi_device_equal(device_to_find, device)) {
1974 if (device_to_find->volume_offline)
1975 return DEVICE_CHANGED;
1976 return DEVICE_SAME;
1977 }
1978 return DEVICE_CHANGED;
1979 }
1980 }
1981
1982 return DEVICE_NOT_FOUND;
1983 }
1984
pqi_device_type(struct pqi_scsi_dev * device)1985 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1986 {
1987 if (device->is_expander_smp_device)
1988 return "Enclosure SMP ";
1989
1990 return scsi_device_type(device->devtype);
1991 }
1992
1993 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1994
pqi_dev_info(struct pqi_ctrl_info * ctrl_info,char * action,struct pqi_scsi_dev * device)1995 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1996 char *action, struct pqi_scsi_dev *device)
1997 {
1998 ssize_t count;
1999 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
2000
2001 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
2002 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2003
2004 if (device->target_lun_valid)
2005 count += scnprintf(buffer + count,
2006 PQI_DEV_INFO_BUFFER_LENGTH - count,
2007 "%d:%d",
2008 device->target,
2009 device->lun);
2010 else
2011 count += scnprintf(buffer + count,
2012 PQI_DEV_INFO_BUFFER_LENGTH - count,
2013 "-:-");
2014
2015 if (pqi_is_logical_device(device)) {
2016 count += scnprintf(buffer + count,
2017 PQI_DEV_INFO_BUFFER_LENGTH - count,
2018 " %08x%08x",
2019 *((u32 *)&device->scsi3addr),
2020 *((u32 *)&device->scsi3addr[4]));
2021 } else if (ctrl_info->rpl_extended_format_4_5_supported) {
2022 if (device->device_type == SA_DEVICE_TYPE_NVME)
2023 count += scnprintf(buffer + count,
2024 PQI_DEV_INFO_BUFFER_LENGTH - count,
2025 " %016llx%016llx",
2026 get_unaligned_be64(&device->wwid[0]),
2027 get_unaligned_be64(&device->wwid[8]));
2028 else
2029 count += scnprintf(buffer + count,
2030 PQI_DEV_INFO_BUFFER_LENGTH - count,
2031 " %016llx",
2032 get_unaligned_be64(&device->wwid[0]));
2033 } else {
2034 count += scnprintf(buffer + count,
2035 PQI_DEV_INFO_BUFFER_LENGTH - count,
2036 " %016llx",
2037 get_unaligned_be64(&device->wwid[0]));
2038 }
2039
2040
2041 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2042 " %s %.8s %.16s ",
2043 pqi_device_type(device),
2044 device->vendor,
2045 device->model);
2046
2047 if (pqi_is_logical_device(device)) {
2048 if (device->devtype == TYPE_DISK)
2049 count += scnprintf(buffer + count,
2050 PQI_DEV_INFO_BUFFER_LENGTH - count,
2051 "SSDSmartPathCap%c En%c %-12s",
2052 device->raid_bypass_configured ? '+' : '-',
2053 device->raid_bypass_enabled ? '+' : '-',
2054 pqi_raid_level_to_string(device->raid_level));
2055 } else {
2056 count += scnprintf(buffer + count,
2057 PQI_DEV_INFO_BUFFER_LENGTH - count,
2058 "AIO%c", device->aio_enabled ? '+' : '-');
2059 if (device->devtype == TYPE_DISK ||
2060 device->devtype == TYPE_ZBC)
2061 count += scnprintf(buffer + count,
2062 PQI_DEV_INFO_BUFFER_LENGTH - count,
2063 " qd=%-6d", device->queue_depth);
2064 }
2065
2066 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2067 }
2068
pqi_raid_maps_equal(struct raid_map * raid_map1,struct raid_map * raid_map2)2069 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2070 {
2071 u32 raid_map1_size;
2072 u32 raid_map2_size;
2073
2074 if (raid_map1 == NULL || raid_map2 == NULL)
2075 return raid_map1 == raid_map2;
2076
2077 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2078 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2079
2080 if (raid_map1_size != raid_map2_size)
2081 return false;
2082
2083 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2084 }
2085
2086 /* Assumes the SCSI device list lock is held. */
2087
pqi_scsi_update_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * existing_device,struct pqi_scsi_dev * new_device)2088 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2089 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2090 {
2091 existing_device->device_type = new_device->device_type;
2092 existing_device->bus = new_device->bus;
2093 if (new_device->target_lun_valid) {
2094 existing_device->target = new_device->target;
2095 existing_device->lun = new_device->lun;
2096 existing_device->target_lun_valid = true;
2097 }
2098
2099 /* By definition, the scsi3addr and wwid fields are already the same. */
2100
2101 existing_device->is_physical_device = new_device->is_physical_device;
2102 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2103 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2104 existing_device->sas_address = new_device->sas_address;
2105 existing_device->queue_depth = new_device->queue_depth;
2106 existing_device->device_offline = false;
2107 existing_device->lun_count = new_device->lun_count;
2108
2109 if (pqi_is_logical_device(existing_device)) {
2110 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2111
2112 if (existing_device->devtype == TYPE_DISK) {
2113 existing_device->raid_level = new_device->raid_level;
2114 existing_device->volume_status = new_device->volume_status;
2115 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2116 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2117 kfree(existing_device->raid_map);
2118 existing_device->raid_map = new_device->raid_map;
2119 /* To prevent this from being freed later. */
2120 new_device->raid_map = NULL;
2121 }
2122 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
2123 existing_device->raid_io_stats = new_device->raid_io_stats;
2124 new_device->raid_io_stats = NULL;
2125 }
2126 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2127 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2128 }
2129 } else {
2130 existing_device->aio_enabled = new_device->aio_enabled;
2131 existing_device->aio_handle = new_device->aio_handle;
2132 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2133 existing_device->active_path_index = new_device->active_path_index;
2134 existing_device->phy_id = new_device->phy_id;
2135 existing_device->path_map = new_device->path_map;
2136 existing_device->bay = new_device->bay;
2137 existing_device->box_index = new_device->box_index;
2138 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2139 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2140 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2141 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2142 }
2143 }
2144
pqi_free_device(struct pqi_scsi_dev * device)2145 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2146 {
2147 if (device) {
2148 free_percpu(device->raid_io_stats);
2149 kfree(device->raid_map);
2150 kfree(device);
2151 }
2152 }
2153
2154 /*
2155 * Called when exposing a new device to the OS fails in order to re-adjust
2156 * our internal SCSI device list to match the SCSI ML's view.
2157 */
2158
pqi_fixup_botched_add(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device)2159 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2160 struct pqi_scsi_dev *device)
2161 {
2162 unsigned long flags;
2163
2164 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2165 list_del(&device->scsi_device_list_entry);
2166 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2167
2168 /* Allow the device structure to be freed later. */
2169 device->keep_device = false;
2170 }
2171
pqi_is_device_added(struct pqi_scsi_dev * device)2172 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2173 {
2174 if (device->is_expander_smp_device)
2175 return device->sas_port != NULL;
2176
2177 return device->sdev != NULL;
2178 }
2179
pqi_init_device_tmf_work(struct pqi_scsi_dev * device)2180 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2181 {
2182 unsigned int lun;
2183 struct pqi_tmf_work *tmf_work;
2184
2185 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2186 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2187 }
2188
pqi_volume_rescan_needed(struct pqi_scsi_dev * device)2189 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
2190 {
2191 if (pqi_device_in_remove(device))
2192 return false;
2193
2194 if (device->sdev == NULL)
2195 return false;
2196
2197 if (!scsi_device_online(device->sdev))
2198 return false;
2199
2200 return device->rescan;
2201 }
2202
pqi_update_device_list(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * new_device_list[],unsigned int num_new_devices)2203 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2204 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2205 {
2206 int rc;
2207 unsigned int i;
2208 unsigned long flags;
2209 enum pqi_find_result find_result;
2210 struct pqi_scsi_dev *device;
2211 struct pqi_scsi_dev *next;
2212 struct pqi_scsi_dev *matching_device;
2213 LIST_HEAD(add_list);
2214 LIST_HEAD(delete_list);
2215
2216 /*
2217 * The idea here is to do as little work as possible while holding the
2218 * spinlock. That's why we go to great pains to defer anything other
2219 * than updating the internal device list until after we release the
2220 * spinlock.
2221 */
2222
2223 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2224
2225 /* Assume that all devices in the existing list have gone away. */
2226 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2227 device->device_gone = true;
2228
2229 for (i = 0; i < num_new_devices; i++) {
2230 device = new_device_list[i];
2231
2232 find_result = pqi_scsi_find_entry(ctrl_info, device,
2233 &matching_device);
2234
2235 switch (find_result) {
2236 case DEVICE_SAME:
2237 /*
2238 * The newly found device is already in the existing
2239 * device list.
2240 */
2241 device->new_device = false;
2242 matching_device->device_gone = false;
2243 pqi_scsi_update_device(ctrl_info, matching_device, device);
2244 break;
2245 case DEVICE_NOT_FOUND:
2246 /*
2247 * The newly found device is NOT in the existing device
2248 * list.
2249 */
2250 device->new_device = true;
2251 break;
2252 case DEVICE_CHANGED:
2253 /*
2254 * The original device has gone away and we need to add
2255 * the new device.
2256 */
2257 device->new_device = true;
2258 break;
2259 }
2260 }
2261
2262 /* Process all devices that have gone away. */
2263 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2264 scsi_device_list_entry) {
2265 if (device->device_gone) {
2266 list_del(&device->scsi_device_list_entry);
2267 list_add_tail(&device->delete_list_entry, &delete_list);
2268 }
2269 }
2270
2271 /* Process all new devices. */
2272 for (i = 0; i < num_new_devices; i++) {
2273 device = new_device_list[i];
2274 if (!device->new_device)
2275 continue;
2276 if (device->volume_offline)
2277 continue;
2278 list_add_tail(&device->scsi_device_list_entry,
2279 &ctrl_info->scsi_device_list);
2280 list_add_tail(&device->add_list_entry, &add_list);
2281 /* To prevent this device structure from being freed later. */
2282 device->keep_device = true;
2283 pqi_init_device_tmf_work(device);
2284 }
2285
2286 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2287
2288 /*
2289 * If OFA is in progress and there are devices that need to be deleted,
2290 * allow any pending reset operations to continue and unblock any SCSI
2291 * requests before removal.
2292 */
2293 if (pqi_ofa_in_progress(ctrl_info)) {
2294 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2295 if (pqi_is_device_added(device))
2296 pqi_device_remove_start(device);
2297 pqi_ctrl_unblock_device_reset(ctrl_info);
2298 pqi_scsi_unblock_requests(ctrl_info);
2299 }
2300
2301 /* Remove all devices that have gone away. */
2302 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2303 if (device->volume_offline) {
2304 pqi_dev_info(ctrl_info, "offline", device);
2305 pqi_show_volume_status(ctrl_info, device);
2306 } else {
2307 pqi_dev_info(ctrl_info, "removed", device);
2308 }
2309 if (pqi_is_device_added(device))
2310 pqi_remove_device(ctrl_info, device);
2311 list_del(&device->delete_list_entry);
2312 pqi_free_device(device);
2313 }
2314
2315 /*
2316 * Notify the SML of any existing device changes such as;
2317 * queue depth, device size.
2318 */
2319 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2320 /*
2321 * Check for queue depth change.
2322 */
2323 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2324 device->advertised_queue_depth = device->queue_depth;
2325 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2326 }
2327 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2328 /*
2329 * Check for changes in the device, such as size.
2330 */
2331 if (pqi_volume_rescan_needed(device)) {
2332 device->rescan = false;
2333 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2334 scsi_rescan_device(device->sdev);
2335 } else {
2336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2337 }
2338 }
2339
2340 /* Expose any new devices. */
2341 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2342 if (!pqi_is_device_added(device)) {
2343 rc = pqi_add_device(ctrl_info, device);
2344 if (rc == 0) {
2345 pqi_dev_info(ctrl_info, "added", device);
2346 } else {
2347 dev_warn(&ctrl_info->pci_dev->dev,
2348 "scsi %d:%d:%d:%d addition failed, device not added\n",
2349 ctrl_info->scsi_host->host_no,
2350 device->bus, device->target,
2351 device->lun);
2352 pqi_fixup_botched_add(ctrl_info, device);
2353 }
2354 }
2355 }
2356
2357 }
2358
pqi_is_supported_device(struct pqi_scsi_dev * device)2359 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2360 {
2361 /*
2362 * Only support the HBA controller itself as a RAID
2363 * controller. If it's a RAID controller other than
2364 * the HBA itself (an external RAID controller, for
2365 * example), we don't support it.
2366 */
2367 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2368 !pqi_is_hba_lunid(device->scsi3addr))
2369 return false;
2370
2371 return true;
2372 }
2373
pqi_skip_device(u8 * scsi3addr)2374 static inline bool pqi_skip_device(u8 *scsi3addr)
2375 {
2376 /* Ignore all masked devices. */
2377 if (MASKED_DEVICE(scsi3addr))
2378 return true;
2379
2380 return false;
2381 }
2382
pqi_mask_device(u8 * scsi3addr)2383 static inline void pqi_mask_device(u8 *scsi3addr)
2384 {
2385 scsi3addr[3] |= 0xc0;
2386 }
2387
pqi_expose_device(struct pqi_scsi_dev * device)2388 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2389 {
2390 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2391 }
2392
pqi_update_scsi_devices(struct pqi_ctrl_info * ctrl_info)2393 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2394 {
2395 int i;
2396 int rc;
2397 LIST_HEAD(new_device_list_head);
2398 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2399 struct report_log_lun_list *logdev_list = NULL;
2400 struct report_phys_lun_16byte_wwid *phys_lun;
2401 struct report_log_lun *log_lun;
2402 struct bmic_identify_physical_device *id_phys = NULL;
2403 u32 num_physicals;
2404 u32 num_logicals;
2405 struct pqi_scsi_dev **new_device_list = NULL;
2406 struct pqi_scsi_dev *device;
2407 struct pqi_scsi_dev *next;
2408 unsigned int num_new_devices;
2409 unsigned int num_valid_devices;
2410 bool is_physical_device;
2411 u8 *scsi3addr;
2412 unsigned int physical_index;
2413 unsigned int logical_index;
2414 static char *out_of_memory_msg =
2415 "failed to allocate memory, device discovery stopped";
2416
2417 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2418 if (rc)
2419 goto out;
2420
2421 if (physdev_list)
2422 num_physicals =
2423 get_unaligned_be32(&physdev_list->header.list_length)
2424 / sizeof(physdev_list->lun_entries[0]);
2425 else
2426 num_physicals = 0;
2427
2428 if (logdev_list)
2429 num_logicals =
2430 get_unaligned_be32(&logdev_list->header.list_length)
2431 / sizeof(logdev_list->lun_entries[0]);
2432 else
2433 num_logicals = 0;
2434
2435 if (num_physicals) {
2436 /*
2437 * We need this buffer for calls to pqi_get_physical_disk_info()
2438 * below. We allocate it here instead of inside
2439 * pqi_get_physical_disk_info() because it's a fairly large
2440 * buffer.
2441 */
2442 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2443 if (!id_phys) {
2444 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2445 out_of_memory_msg);
2446 rc = -ENOMEM;
2447 goto out;
2448 }
2449
2450 if (pqi_hide_vsep) {
2451 for (i = num_physicals - 1; i >= 0; i--) {
2452 phys_lun = &physdev_list->lun_entries[i];
2453 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2454 pqi_mask_device(phys_lun->lunid);
2455 break;
2456 }
2457 }
2458 }
2459 }
2460
2461 if (num_logicals &&
2462 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2463 ctrl_info->lv_drive_type_mix_valid = true;
2464
2465 num_new_devices = num_physicals + num_logicals;
2466
2467 new_device_list = kmalloc_array(num_new_devices,
2468 sizeof(*new_device_list),
2469 GFP_KERNEL);
2470 if (!new_device_list) {
2471 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2472 rc = -ENOMEM;
2473 goto out;
2474 }
2475
2476 for (i = 0; i < num_new_devices; i++) {
2477 device = kzalloc(sizeof(*device), GFP_KERNEL);
2478 if (!device) {
2479 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2480 out_of_memory_msg);
2481 rc = -ENOMEM;
2482 goto out;
2483 }
2484 list_add_tail(&device->new_device_list_entry,
2485 &new_device_list_head);
2486 }
2487
2488 device = NULL;
2489 num_valid_devices = 0;
2490 physical_index = 0;
2491 logical_index = 0;
2492
2493 for (i = 0; i < num_new_devices; i++) {
2494
2495 if ((!pqi_expose_ld_first && i < num_physicals) ||
2496 (pqi_expose_ld_first && i >= num_logicals)) {
2497 is_physical_device = true;
2498 phys_lun = &physdev_list->lun_entries[physical_index++];
2499 log_lun = NULL;
2500 scsi3addr = phys_lun->lunid;
2501 } else {
2502 is_physical_device = false;
2503 phys_lun = NULL;
2504 log_lun = &logdev_list->lun_entries[logical_index++];
2505 scsi3addr = log_lun->lunid;
2506 }
2507
2508 if (is_physical_device && pqi_skip_device(scsi3addr))
2509 continue;
2510
2511 if (device)
2512 device = list_next_entry(device, new_device_list_entry);
2513 else
2514 device = list_first_entry(&new_device_list_head,
2515 struct pqi_scsi_dev, new_device_list_entry);
2516
2517 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2518 device->is_physical_device = is_physical_device;
2519 if (is_physical_device) {
2520 device->device_type = phys_lun->device_type;
2521 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2522 device->is_expander_smp_device = true;
2523 } else {
2524 device->is_external_raid_device =
2525 pqi_is_external_raid_addr(scsi3addr);
2526 }
2527
2528 if (!pqi_is_supported_device(device))
2529 continue;
2530
2531 /* Gather information about the device. */
2532 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2533 if (rc == -ENOMEM) {
2534 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2535 out_of_memory_msg);
2536 goto out;
2537 }
2538 if (rc) {
2539 if (device->is_physical_device)
2540 dev_warn(&ctrl_info->pci_dev->dev,
2541 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2542 get_unaligned_be64(&phys_lun->wwid[0]),
2543 get_unaligned_be64(&phys_lun->wwid[8]));
2544 else
2545 dev_warn(&ctrl_info->pci_dev->dev,
2546 "obtaining device info failed, skipping logical device %08x%08x\n",
2547 *((u32 *)&device->scsi3addr),
2548 *((u32 *)&device->scsi3addr[4]));
2549 rc = 0;
2550 continue;
2551 }
2552
2553 /* Do not present disks that the OS cannot fully probe. */
2554 if (pqi_keep_device_offline(device))
2555 continue;
2556
2557 pqi_assign_bus_target_lun(device);
2558
2559 if (device->is_physical_device) {
2560 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2561 if ((phys_lun->device_flags &
2562 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2563 phys_lun->aio_handle) {
2564 device->aio_enabled = true;
2565 device->aio_handle =
2566 phys_lun->aio_handle;
2567 }
2568 } else {
2569 memcpy(device->volume_id, log_lun->volume_id,
2570 sizeof(device->volume_id));
2571 }
2572
2573 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2574
2575 new_device_list[num_valid_devices++] = device;
2576 }
2577
2578 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2579
2580 out:
2581 list_for_each_entry_safe(device, next, &new_device_list_head,
2582 new_device_list_entry) {
2583 if (device->keep_device)
2584 continue;
2585 list_del(&device->new_device_list_entry);
2586 pqi_free_device(device);
2587 }
2588
2589 kfree(new_device_list);
2590 kfree(physdev_list);
2591 kfree(logdev_list);
2592 kfree(id_phys);
2593
2594 return rc;
2595 }
2596
pqi_scan_scsi_devices(struct pqi_ctrl_info * ctrl_info)2597 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2598 {
2599 int rc;
2600 int mutex_acquired;
2601
2602 if (pqi_ctrl_offline(ctrl_info))
2603 return -ENXIO;
2604
2605 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2606
2607 if (!mutex_acquired) {
2608 if (pqi_ctrl_scan_blocked(ctrl_info))
2609 return -EBUSY;
2610 pqi_schedule_rescan_worker_delayed(ctrl_info);
2611 return -EINPROGRESS;
2612 }
2613
2614 rc = pqi_update_scsi_devices(ctrl_info);
2615 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2616 pqi_schedule_rescan_worker_delayed(ctrl_info);
2617
2618 mutex_unlock(&ctrl_info->scan_mutex);
2619
2620 return rc;
2621 }
2622
pqi_scan_start(struct Scsi_Host * shost)2623 static void pqi_scan_start(struct Scsi_Host *shost)
2624 {
2625 struct pqi_ctrl_info *ctrl_info;
2626
2627 ctrl_info = shost_to_hba(shost);
2628
2629 pqi_scan_scsi_devices(ctrl_info);
2630 }
2631
2632 /* Returns TRUE if scan is finished. */
2633
pqi_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)2634 static int pqi_scan_finished(struct Scsi_Host *shost,
2635 unsigned long elapsed_time)
2636 {
2637 struct pqi_ctrl_info *ctrl_info;
2638
2639 ctrl_info = shost_priv(shost);
2640
2641 return !mutex_is_locked(&ctrl_info->scan_mutex);
2642 }
2643
pqi_set_encryption_info(struct pqi_encryption_info * encryption_info,struct raid_map * raid_map,u64 first_block)2644 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2645 struct raid_map *raid_map, u64 first_block)
2646 {
2647 u32 volume_blk_size;
2648
2649 /*
2650 * Set the encryption tweak values based on logical block address.
2651 * If the block size is 512, the tweak value is equal to the LBA.
2652 * For other block sizes, tweak value is (LBA * block size) / 512.
2653 */
2654 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2655 if (volume_blk_size != 512)
2656 first_block = (first_block * volume_blk_size) / 512;
2657
2658 encryption_info->data_encryption_key_index =
2659 get_unaligned_le16(&raid_map->data_encryption_key_index);
2660 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2661 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2662 }
2663
2664 /*
2665 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2666 */
2667
pqi_aio_raid_level_supported(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd)2668 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2669 struct pqi_scsi_dev_raid_map_data *rmd)
2670 {
2671 bool is_supported = true;
2672
2673 switch (rmd->raid_level) {
2674 case SA_RAID_0:
2675 break;
2676 case SA_RAID_1:
2677 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2678 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2679 is_supported = false;
2680 break;
2681 case SA_RAID_TRIPLE:
2682 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2683 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2684 is_supported = false;
2685 break;
2686 case SA_RAID_5:
2687 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2688 rmd->data_length > ctrl_info->max_write_raid_5_6))
2689 is_supported = false;
2690 break;
2691 case SA_RAID_6:
2692 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2693 rmd->data_length > ctrl_info->max_write_raid_5_6))
2694 is_supported = false;
2695 break;
2696 default:
2697 is_supported = false;
2698 break;
2699 }
2700
2701 return is_supported;
2702 }
2703
2704 #define PQI_RAID_BYPASS_INELIGIBLE 1
2705
pqi_get_aio_lba_and_block_count(struct scsi_cmnd * scmd,struct pqi_scsi_dev_raid_map_data * rmd)2706 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2707 struct pqi_scsi_dev_raid_map_data *rmd)
2708 {
2709 /* Check for valid opcode, get LBA and block count. */
2710 switch (scmd->cmnd[0]) {
2711 case WRITE_6:
2712 rmd->is_write = true;
2713 fallthrough;
2714 case READ_6:
2715 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2716 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2717 rmd->block_cnt = (u32)scmd->cmnd[4];
2718 if (rmd->block_cnt == 0)
2719 rmd->block_cnt = 256;
2720 break;
2721 case WRITE_10:
2722 rmd->is_write = true;
2723 fallthrough;
2724 case READ_10:
2725 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2726 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2727 break;
2728 case WRITE_12:
2729 rmd->is_write = true;
2730 fallthrough;
2731 case READ_12:
2732 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2733 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2734 break;
2735 case WRITE_16:
2736 rmd->is_write = true;
2737 fallthrough;
2738 case READ_16:
2739 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2740 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2741 break;
2742 default:
2743 /* Process via normal I/O path. */
2744 return PQI_RAID_BYPASS_INELIGIBLE;
2745 }
2746
2747 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2748
2749 return 0;
2750 }
2751
pci_get_aio_common_raid_map_values(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2752 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2753 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2754 {
2755 #if BITS_PER_LONG == 32
2756 u64 tmpdiv;
2757 #endif
2758
2759 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2760
2761 /* Check for invalid block or wraparound. */
2762 if (rmd->last_block >=
2763 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2764 rmd->last_block < rmd->first_block)
2765 return PQI_RAID_BYPASS_INELIGIBLE;
2766
2767 rmd->data_disks_per_row =
2768 get_unaligned_le16(&raid_map->data_disks_per_row);
2769 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2770 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2771
2772 /* Calculate stripe information for the request. */
2773 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2774 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2775 return PQI_RAID_BYPASS_INELIGIBLE;
2776 #if BITS_PER_LONG == 32
2777 tmpdiv = rmd->first_block;
2778 do_div(tmpdiv, rmd->blocks_per_row);
2779 rmd->first_row = tmpdiv;
2780 tmpdiv = rmd->last_block;
2781 do_div(tmpdiv, rmd->blocks_per_row);
2782 rmd->last_row = tmpdiv;
2783 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2784 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2785 tmpdiv = rmd->first_row_offset;
2786 do_div(tmpdiv, rmd->strip_size);
2787 rmd->first_column = tmpdiv;
2788 tmpdiv = rmd->last_row_offset;
2789 do_div(tmpdiv, rmd->strip_size);
2790 rmd->last_column = tmpdiv;
2791 #else
2792 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2793 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2794 rmd->first_row_offset = (u32)(rmd->first_block -
2795 (rmd->first_row * rmd->blocks_per_row));
2796 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2797 rmd->blocks_per_row));
2798 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2799 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2800 #endif
2801
2802 /* If this isn't a single row/column then give to the controller. */
2803 if (rmd->first_row != rmd->last_row ||
2804 rmd->first_column != rmd->last_column)
2805 return PQI_RAID_BYPASS_INELIGIBLE;
2806
2807 /* Proceeding with driver mapping. */
2808 rmd->total_disks_per_row = rmd->data_disks_per_row +
2809 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2810 rmd->map_row = ((u32)(rmd->first_row >>
2811 raid_map->parity_rotation_shift)) %
2812 get_unaligned_le16(&raid_map->row_cnt);
2813 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2814 rmd->first_column;
2815
2816 return 0;
2817 }
2818
pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data * rmd,struct raid_map * raid_map)2819 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2820 struct raid_map *raid_map)
2821 {
2822 #if BITS_PER_LONG == 32
2823 u64 tmpdiv;
2824 #endif
2825
2826 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2827 return PQI_RAID_BYPASS_INELIGIBLE;
2828
2829 /* RAID 50/60 */
2830 /* Verify first and last block are in same RAID group. */
2831 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2832 #if BITS_PER_LONG == 32
2833 tmpdiv = rmd->first_block;
2834 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2835 tmpdiv = rmd->first_group;
2836 do_div(tmpdiv, rmd->blocks_per_row);
2837 rmd->first_group = tmpdiv;
2838 tmpdiv = rmd->last_block;
2839 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2840 tmpdiv = rmd->last_group;
2841 do_div(tmpdiv, rmd->blocks_per_row);
2842 rmd->last_group = tmpdiv;
2843 #else
2844 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2845 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2846 #endif
2847 if (rmd->first_group != rmd->last_group)
2848 return PQI_RAID_BYPASS_INELIGIBLE;
2849
2850 /* Verify request is in a single row of RAID 5/6. */
2851 #if BITS_PER_LONG == 32
2852 tmpdiv = rmd->first_block;
2853 do_div(tmpdiv, rmd->stripesize);
2854 rmd->first_row = tmpdiv;
2855 rmd->r5or6_first_row = tmpdiv;
2856 tmpdiv = rmd->last_block;
2857 do_div(tmpdiv, rmd->stripesize);
2858 rmd->r5or6_last_row = tmpdiv;
2859 #else
2860 rmd->first_row = rmd->r5or6_first_row =
2861 rmd->first_block / rmd->stripesize;
2862 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2863 #endif
2864 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2865 return PQI_RAID_BYPASS_INELIGIBLE;
2866
2867 /* Verify request is in a single column. */
2868 #if BITS_PER_LONG == 32
2869 tmpdiv = rmd->first_block;
2870 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2871 tmpdiv = rmd->first_row_offset;
2872 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2873 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2874 tmpdiv = rmd->last_block;
2875 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2876 tmpdiv = rmd->r5or6_last_row_offset;
2877 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2878 tmpdiv = rmd->r5or6_first_row_offset;
2879 do_div(tmpdiv, rmd->strip_size);
2880 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2881 tmpdiv = rmd->r5or6_last_row_offset;
2882 do_div(tmpdiv, rmd->strip_size);
2883 rmd->r5or6_last_column = tmpdiv;
2884 #else
2885 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2886 (u32)((rmd->first_block % rmd->stripesize) %
2887 rmd->blocks_per_row);
2888
2889 rmd->r5or6_last_row_offset =
2890 (u32)((rmd->last_block % rmd->stripesize) %
2891 rmd->blocks_per_row);
2892
2893 rmd->first_column =
2894 rmd->r5or6_first_row_offset / rmd->strip_size;
2895 rmd->r5or6_first_column = rmd->first_column;
2896 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2897 #endif
2898 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2899 return PQI_RAID_BYPASS_INELIGIBLE;
2900
2901 /* Request is eligible. */
2902 rmd->map_row =
2903 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2904 get_unaligned_le16(&raid_map->row_cnt);
2905
2906 rmd->map_index = (rmd->first_group *
2907 (get_unaligned_le16(&raid_map->row_cnt) *
2908 rmd->total_disks_per_row)) +
2909 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2910
2911 if (rmd->is_write) {
2912 u32 index;
2913
2914 /*
2915 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2916 * parity entries inside the device's raid_map.
2917 *
2918 * A device's RAID map is bounded by: number of RAID disks squared.
2919 *
2920 * The devices RAID map size is checked during device
2921 * initialization.
2922 */
2923 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2924 index *= rmd->total_disks_per_row;
2925 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2926
2927 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2928 if (rmd->raid_level == SA_RAID_6) {
2929 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2930 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2931 }
2932 #if BITS_PER_LONG == 32
2933 tmpdiv = rmd->first_block;
2934 do_div(tmpdiv, rmd->blocks_per_row);
2935 rmd->row = tmpdiv;
2936 #else
2937 rmd->row = rmd->first_block / rmd->blocks_per_row;
2938 #endif
2939 }
2940
2941 return 0;
2942 }
2943
pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data * rmd)2944 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2945 {
2946 /* Build the new CDB for the physical disk I/O. */
2947 if (rmd->disk_block > 0xffffffff) {
2948 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2949 rmd->cdb[1] = 0;
2950 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2951 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2952 rmd->cdb[14] = 0;
2953 rmd->cdb[15] = 0;
2954 rmd->cdb_length = 16;
2955 } else {
2956 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2957 rmd->cdb[1] = 0;
2958 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2959 rmd->cdb[6] = 0;
2960 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2961 rmd->cdb[9] = 0;
2962 rmd->cdb_length = 10;
2963 }
2964 }
2965
pqi_calc_aio_r1_nexus(struct raid_map * raid_map,struct pqi_scsi_dev_raid_map_data * rmd)2966 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2967 struct pqi_scsi_dev_raid_map_data *rmd)
2968 {
2969 u32 index;
2970 u32 group;
2971
2972 group = rmd->map_index / rmd->data_disks_per_row;
2973
2974 index = rmd->map_index - (group * rmd->data_disks_per_row);
2975 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2976 index += rmd->data_disks_per_row;
2977 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2978 if (rmd->layout_map_count > 2) {
2979 index += rmd->data_disks_per_row;
2980 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2981 }
2982
2983 rmd->num_it_nexus_entries = rmd->layout_map_count;
2984 }
2985
pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)2986 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2987 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2988 struct pqi_queue_group *queue_group)
2989 {
2990 int rc;
2991 struct raid_map *raid_map;
2992 u32 group;
2993 u32 next_bypass_group;
2994 struct pqi_encryption_info *encryption_info_ptr;
2995 struct pqi_encryption_info encryption_info;
2996 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2997
2998 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2999 if (rc)
3000 return PQI_RAID_BYPASS_INELIGIBLE;
3001
3002 rmd.raid_level = device->raid_level;
3003
3004 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
3005 return PQI_RAID_BYPASS_INELIGIBLE;
3006
3007 if (unlikely(rmd.block_cnt == 0))
3008 return PQI_RAID_BYPASS_INELIGIBLE;
3009
3010 raid_map = device->raid_map;
3011
3012 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
3013 if (rc)
3014 return PQI_RAID_BYPASS_INELIGIBLE;
3015
3016 if (device->raid_level == SA_RAID_1 ||
3017 device->raid_level == SA_RAID_TRIPLE) {
3018 if (rmd.is_write) {
3019 pqi_calc_aio_r1_nexus(raid_map, &rmd);
3020 } else {
3021 group = device->next_bypass_group[rmd.map_index];
3022 next_bypass_group = group + 1;
3023 if (next_bypass_group >= rmd.layout_map_count)
3024 next_bypass_group = 0;
3025 device->next_bypass_group[rmd.map_index] = next_bypass_group;
3026 rmd.map_index += group * rmd.data_disks_per_row;
3027 }
3028 } else if ((device->raid_level == SA_RAID_5 ||
3029 device->raid_level == SA_RAID_6) &&
3030 (rmd.layout_map_count > 1 || rmd.is_write)) {
3031 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3032 if (rc)
3033 return PQI_RAID_BYPASS_INELIGIBLE;
3034 }
3035
3036 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3037 return PQI_RAID_BYPASS_INELIGIBLE;
3038
3039 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3040 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3041 rmd.first_row * rmd.strip_size +
3042 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3043 rmd.disk_block_cnt = rmd.block_cnt;
3044
3045 /* Handle differing logical/physical block sizes. */
3046 if (raid_map->phys_blk_shift) {
3047 rmd.disk_block <<= raid_map->phys_blk_shift;
3048 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3049 }
3050
3051 if (unlikely(rmd.disk_block_cnt > 0xffff))
3052 return PQI_RAID_BYPASS_INELIGIBLE;
3053
3054 pqi_set_aio_cdb(&rmd);
3055
3056 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3057 if (rmd.data_length > device->max_transfer_encrypted)
3058 return PQI_RAID_BYPASS_INELIGIBLE;
3059 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3060 encryption_info_ptr = &encryption_info;
3061 } else {
3062 encryption_info_ptr = NULL;
3063 }
3064
3065 if (rmd.is_write) {
3066 switch (device->raid_level) {
3067 case SA_RAID_1:
3068 case SA_RAID_TRIPLE:
3069 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3070 encryption_info_ptr, device, &rmd);
3071 case SA_RAID_5:
3072 case SA_RAID_6:
3073 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3074 encryption_info_ptr, device, &rmd);
3075 }
3076 }
3077
3078 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3079 rmd.cdb, rmd.cdb_length, queue_group,
3080 encryption_info_ptr, true, false);
3081 }
3082
3083 #define PQI_STATUS_IDLE 0x0
3084
3085 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3086 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3087
3088 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3089 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3090 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3091 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3092 #define PQI_DEVICE_STATE_ERROR 0x4
3093
3094 #define PQI_MODE_READY_TIMEOUT_SECS 30
3095 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3096
pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info * ctrl_info)3097 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3098 {
3099 struct pqi_device_registers __iomem *pqi_registers;
3100 unsigned long timeout;
3101 u64 signature;
3102 u8 status;
3103
3104 pqi_registers = ctrl_info->pqi_registers;
3105 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3106
3107 while (1) {
3108 signature = readq(&pqi_registers->signature);
3109 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3110 sizeof(signature)) == 0)
3111 break;
3112 if (time_after(jiffies, timeout)) {
3113 dev_err(&ctrl_info->pci_dev->dev,
3114 "timed out waiting for PQI signature\n");
3115 return -ETIMEDOUT;
3116 }
3117 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3118 }
3119
3120 while (1) {
3121 status = readb(&pqi_registers->function_and_status_code);
3122 if (status == PQI_STATUS_IDLE)
3123 break;
3124 if (time_after(jiffies, timeout)) {
3125 dev_err(&ctrl_info->pci_dev->dev,
3126 "timed out waiting for PQI IDLE\n");
3127 return -ETIMEDOUT;
3128 }
3129 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3130 }
3131
3132 while (1) {
3133 if (readl(&pqi_registers->device_status) ==
3134 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3135 break;
3136 if (time_after(jiffies, timeout)) {
3137 dev_err(&ctrl_info->pci_dev->dev,
3138 "timed out waiting for PQI all registers ready\n");
3139 return -ETIMEDOUT;
3140 }
3141 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3142 }
3143
3144 return 0;
3145 }
3146
pqi_aio_path_disabled(struct pqi_io_request * io_request)3147 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3148 {
3149 struct pqi_scsi_dev *device;
3150
3151 device = io_request->scmd->device->hostdata;
3152 device->raid_bypass_enabled = false;
3153 device->aio_enabled = false;
3154 }
3155
pqi_take_device_offline(struct scsi_device * sdev,char * path)3156 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3157 {
3158 struct pqi_ctrl_info *ctrl_info;
3159 struct pqi_scsi_dev *device;
3160
3161 device = sdev->hostdata;
3162 if (device->device_offline)
3163 return;
3164
3165 device->device_offline = true;
3166 ctrl_info = shost_to_hba(sdev->host);
3167 pqi_schedule_rescan_worker(ctrl_info);
3168 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3169 path, ctrl_info->scsi_host->host_no, device->bus,
3170 device->target, device->lun);
3171 }
3172
pqi_process_raid_io_error(struct pqi_io_request * io_request)3173 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3174 {
3175 u8 scsi_status;
3176 u8 host_byte;
3177 struct scsi_cmnd *scmd;
3178 struct pqi_raid_error_info *error_info;
3179 size_t sense_data_length;
3180 int residual_count;
3181 int xfer_count;
3182 struct scsi_sense_hdr sshdr;
3183
3184 scmd = io_request->scmd;
3185 if (!scmd)
3186 return;
3187
3188 error_info = io_request->error_info;
3189 scsi_status = error_info->status;
3190 host_byte = DID_OK;
3191
3192 switch (error_info->data_out_result) {
3193 case PQI_DATA_IN_OUT_GOOD:
3194 break;
3195 case PQI_DATA_IN_OUT_UNDERFLOW:
3196 xfer_count =
3197 get_unaligned_le32(&error_info->data_out_transferred);
3198 residual_count = scsi_bufflen(scmd) - xfer_count;
3199 scsi_set_resid(scmd, residual_count);
3200 if (xfer_count < scmd->underflow)
3201 host_byte = DID_SOFT_ERROR;
3202 break;
3203 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3204 case PQI_DATA_IN_OUT_ABORTED:
3205 host_byte = DID_ABORT;
3206 break;
3207 case PQI_DATA_IN_OUT_TIMEOUT:
3208 host_byte = DID_TIME_OUT;
3209 break;
3210 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3211 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3212 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3213 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3214 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3215 case PQI_DATA_IN_OUT_ERROR:
3216 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3217 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3218 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3219 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3220 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3221 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3222 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3223 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3224 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3225 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3226 default:
3227 host_byte = DID_ERROR;
3228 break;
3229 }
3230
3231 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3232 if (sense_data_length == 0)
3233 sense_data_length =
3234 get_unaligned_le16(&error_info->response_data_length);
3235 if (sense_data_length) {
3236 if (sense_data_length > sizeof(error_info->data))
3237 sense_data_length = sizeof(error_info->data);
3238
3239 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3240 scsi_normalize_sense(error_info->data,
3241 sense_data_length, &sshdr) &&
3242 sshdr.sense_key == HARDWARE_ERROR &&
3243 sshdr.asc == 0x3e) {
3244 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3245 struct pqi_scsi_dev *device = scmd->device->hostdata;
3246
3247 switch (sshdr.ascq) {
3248 case 0x1: /* LOGICAL UNIT FAILURE */
3249 if (printk_ratelimit())
3250 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3251 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3252 pqi_take_device_offline(scmd->device, "RAID");
3253 host_byte = DID_NO_CONNECT;
3254 break;
3255
3256 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3257 if (printk_ratelimit())
3258 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3259 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3260 break;
3261 }
3262 }
3263
3264 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3265 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3266 memcpy(scmd->sense_buffer, error_info->data,
3267 sense_data_length);
3268 }
3269
3270 if (pqi_cmd_priv(scmd)->this_residual &&
3271 !pqi_is_logical_device(scmd->device->hostdata) &&
3272 scsi_status == SAM_STAT_CHECK_CONDITION &&
3273 host_byte == DID_OK &&
3274 sense_data_length &&
3275 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
3276 sshdr.sense_key == ILLEGAL_REQUEST &&
3277 sshdr.asc == 0x26 &&
3278 sshdr.ascq == 0x0) {
3279 host_byte = DID_NO_CONNECT;
3280 pqi_take_device_offline(scmd->device, "AIO");
3281 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
3282 }
3283
3284 scmd->result = scsi_status;
3285 set_host_byte(scmd, host_byte);
3286 }
3287
pqi_process_aio_io_error(struct pqi_io_request * io_request)3288 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3289 {
3290 u8 scsi_status;
3291 u8 host_byte;
3292 struct scsi_cmnd *scmd;
3293 struct pqi_aio_error_info *error_info;
3294 size_t sense_data_length;
3295 int residual_count;
3296 int xfer_count;
3297 bool device_offline;
3298
3299 scmd = io_request->scmd;
3300 error_info = io_request->error_info;
3301 host_byte = DID_OK;
3302 sense_data_length = 0;
3303 device_offline = false;
3304
3305 switch (error_info->service_response) {
3306 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3307 scsi_status = error_info->status;
3308 break;
3309 case PQI_AIO_SERV_RESPONSE_FAILURE:
3310 switch (error_info->status) {
3311 case PQI_AIO_STATUS_IO_ABORTED:
3312 scsi_status = SAM_STAT_TASK_ABORTED;
3313 break;
3314 case PQI_AIO_STATUS_UNDERRUN:
3315 scsi_status = SAM_STAT_GOOD;
3316 residual_count = get_unaligned_le32(
3317 &error_info->residual_count);
3318 scsi_set_resid(scmd, residual_count);
3319 xfer_count = scsi_bufflen(scmd) - residual_count;
3320 if (xfer_count < scmd->underflow)
3321 host_byte = DID_SOFT_ERROR;
3322 break;
3323 case PQI_AIO_STATUS_OVERRUN:
3324 scsi_status = SAM_STAT_GOOD;
3325 break;
3326 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3327 pqi_aio_path_disabled(io_request);
3328 scsi_status = SAM_STAT_GOOD;
3329 io_request->status = -EAGAIN;
3330 break;
3331 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3332 case PQI_AIO_STATUS_INVALID_DEVICE:
3333 if (!io_request->raid_bypass) {
3334 device_offline = true;
3335 pqi_take_device_offline(scmd->device, "AIO");
3336 host_byte = DID_NO_CONNECT;
3337 }
3338 scsi_status = SAM_STAT_CHECK_CONDITION;
3339 break;
3340 case PQI_AIO_STATUS_IO_ERROR:
3341 default:
3342 scsi_status = SAM_STAT_CHECK_CONDITION;
3343 break;
3344 }
3345 break;
3346 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3347 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3348 scsi_status = SAM_STAT_GOOD;
3349 break;
3350 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3351 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3352 default:
3353 scsi_status = SAM_STAT_CHECK_CONDITION;
3354 break;
3355 }
3356
3357 if (error_info->data_present) {
3358 sense_data_length =
3359 get_unaligned_le16(&error_info->data_length);
3360 if (sense_data_length) {
3361 if (sense_data_length > sizeof(error_info->data))
3362 sense_data_length = sizeof(error_info->data);
3363 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3364 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3365 memcpy(scmd->sense_buffer, error_info->data,
3366 sense_data_length);
3367 }
3368 }
3369
3370 if (device_offline && sense_data_length == 0)
3371 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3372
3373 scmd->result = scsi_status;
3374 set_host_byte(scmd, host_byte);
3375 }
3376
pqi_process_io_error(unsigned int iu_type,struct pqi_io_request * io_request)3377 static void pqi_process_io_error(unsigned int iu_type,
3378 struct pqi_io_request *io_request)
3379 {
3380 switch (iu_type) {
3381 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3382 pqi_process_raid_io_error(io_request);
3383 break;
3384 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3385 pqi_process_aio_io_error(io_request);
3386 break;
3387 }
3388 }
3389
pqi_interpret_task_management_response(struct pqi_ctrl_info * ctrl_info,struct pqi_task_management_response * response)3390 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3391 struct pqi_task_management_response *response)
3392 {
3393 int rc;
3394
3395 switch (response->response_code) {
3396 case SOP_TMF_COMPLETE:
3397 case SOP_TMF_FUNCTION_SUCCEEDED:
3398 rc = 0;
3399 break;
3400 case SOP_TMF_REJECTED:
3401 rc = -EAGAIN;
3402 break;
3403 case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3404 rc = -ENODEV;
3405 break;
3406 default:
3407 rc = -EIO;
3408 break;
3409 }
3410
3411 if (rc)
3412 dev_err(&ctrl_info->pci_dev->dev,
3413 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3414
3415 return rc;
3416 }
3417
pqi_invalid_response(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)3418 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3419 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3420 {
3421 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3422 }
3423
pqi_process_io_intr(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group)3424 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3425 {
3426 int num_responses;
3427 pqi_index_t oq_pi;
3428 pqi_index_t oq_ci;
3429 struct pqi_io_request *io_request;
3430 struct pqi_io_response *response;
3431 u16 request_id;
3432
3433 num_responses = 0;
3434 oq_ci = queue_group->oq_ci_copy;
3435
3436 while (1) {
3437 oq_pi = readl(queue_group->oq_pi);
3438 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3439 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3440 dev_err(&ctrl_info->pci_dev->dev,
3441 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3442 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3443 return -1;
3444 }
3445 if (oq_pi == oq_ci)
3446 break;
3447
3448 num_responses++;
3449 response = queue_group->oq_element_array +
3450 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3451
3452 request_id = get_unaligned_le16(&response->request_id);
3453 if (request_id >= ctrl_info->max_io_slots) {
3454 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3455 dev_err(&ctrl_info->pci_dev->dev,
3456 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3457 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3458 return -1;
3459 }
3460
3461 io_request = &ctrl_info->io_request_pool[request_id];
3462 if (atomic_read(&io_request->refcount) == 0) {
3463 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3464 dev_err(&ctrl_info->pci_dev->dev,
3465 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3466 request_id, oq_pi, oq_ci);
3467 return -1;
3468 }
3469
3470 switch (response->header.iu_type) {
3471 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3472 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3473 if (io_request->scmd)
3474 io_request->scmd->result = 0;
3475 fallthrough;
3476 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3477 break;
3478 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3479 io_request->status =
3480 get_unaligned_le16(
3481 &((struct pqi_vendor_general_response *)response)->status);
3482 break;
3483 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3484 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3485 (void *)response);
3486 break;
3487 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3488 pqi_aio_path_disabled(io_request);
3489 io_request->status = -EAGAIN;
3490 break;
3491 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3492 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3493 io_request->error_info = ctrl_info->error_buffer +
3494 (get_unaligned_le16(&response->error_index) *
3495 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3496 pqi_process_io_error(response->header.iu_type, io_request);
3497 break;
3498 default:
3499 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3500 dev_err(&ctrl_info->pci_dev->dev,
3501 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3502 response->header.iu_type, oq_pi, oq_ci);
3503 return -1;
3504 }
3505
3506 io_request->io_complete_callback(io_request, io_request->context);
3507
3508 /*
3509 * Note that the I/O request structure CANNOT BE TOUCHED after
3510 * returning from the I/O completion callback!
3511 */
3512 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3513 }
3514
3515 if (num_responses) {
3516 queue_group->oq_ci_copy = oq_ci;
3517 writel(oq_ci, queue_group->oq_ci);
3518 }
3519
3520 return num_responses;
3521 }
3522
pqi_num_elements_free(unsigned int pi,unsigned int ci,unsigned int elements_in_queue)3523 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3524 unsigned int ci, unsigned int elements_in_queue)
3525 {
3526 unsigned int num_elements_used;
3527
3528 if (pi >= ci)
3529 num_elements_used = pi - ci;
3530 else
3531 num_elements_used = elements_in_queue - ci + pi;
3532
3533 return elements_in_queue - num_elements_used - 1;
3534 }
3535
pqi_send_event_ack(struct pqi_ctrl_info * ctrl_info,struct pqi_event_acknowledge_request * iu,size_t iu_length)3536 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3537 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3538 {
3539 pqi_index_t iq_pi;
3540 pqi_index_t iq_ci;
3541 unsigned long flags;
3542 void *next_element;
3543 struct pqi_queue_group *queue_group;
3544
3545 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3546 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3547
3548 while (1) {
3549 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3550
3551 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3552 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3553
3554 if (pqi_num_elements_free(iq_pi, iq_ci,
3555 ctrl_info->num_elements_per_iq))
3556 break;
3557
3558 spin_unlock_irqrestore(
3559 &queue_group->submit_lock[RAID_PATH], flags);
3560
3561 if (pqi_ctrl_offline(ctrl_info))
3562 return;
3563 }
3564
3565 next_element = queue_group->iq_element_array[RAID_PATH] +
3566 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3567
3568 memcpy(next_element, iu, iu_length);
3569
3570 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3571 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3572
3573 /*
3574 * This write notifies the controller that an IU is available to be
3575 * processed.
3576 */
3577 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3578
3579 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3580 }
3581
pqi_acknowledge_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3582 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3583 struct pqi_event *event)
3584 {
3585 struct pqi_event_acknowledge_request request;
3586
3587 memset(&request, 0, sizeof(request));
3588
3589 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3590 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3591 &request.header.iu_length);
3592 request.event_type = event->event_type;
3593 put_unaligned_le16(event->event_id, &request.event_id);
3594 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3595
3596 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3597 }
3598
3599 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3600 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3601
pqi_poll_for_soft_reset_status(struct pqi_ctrl_info * ctrl_info)3602 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3603 struct pqi_ctrl_info *ctrl_info)
3604 {
3605 u8 status;
3606 unsigned long timeout;
3607
3608 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3609
3610 while (1) {
3611 status = pqi_read_soft_reset_status(ctrl_info);
3612 if (status & PQI_SOFT_RESET_INITIATE)
3613 return RESET_INITIATE_DRIVER;
3614
3615 if (status & PQI_SOFT_RESET_ABORT)
3616 return RESET_ABORT;
3617
3618 if (!sis_is_firmware_running(ctrl_info))
3619 return RESET_NORESPONSE;
3620
3621 if (time_after(jiffies, timeout)) {
3622 dev_warn(&ctrl_info->pci_dev->dev,
3623 "timed out waiting for soft reset status\n");
3624 return RESET_TIMEDOUT;
3625 }
3626
3627 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3628 }
3629 }
3630
pqi_process_soft_reset(struct pqi_ctrl_info * ctrl_info)3631 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3632 {
3633 int rc;
3634 unsigned int delay_secs;
3635 enum pqi_soft_reset_status reset_status;
3636
3637 if (ctrl_info->soft_reset_handshake_supported)
3638 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3639 else
3640 reset_status = RESET_INITIATE_FIRMWARE;
3641
3642 delay_secs = PQI_POST_RESET_DELAY_SECS;
3643
3644 switch (reset_status) {
3645 case RESET_TIMEDOUT:
3646 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3647 fallthrough;
3648 case RESET_INITIATE_DRIVER:
3649 dev_info(&ctrl_info->pci_dev->dev,
3650 "Online Firmware Activation: resetting controller\n");
3651 sis_soft_reset(ctrl_info);
3652 fallthrough;
3653 case RESET_INITIATE_FIRMWARE:
3654 ctrl_info->pqi_mode_enabled = false;
3655 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3656 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3657 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3658 pqi_ctrl_ofa_done(ctrl_info);
3659 dev_info(&ctrl_info->pci_dev->dev,
3660 "Online Firmware Activation: %s\n",
3661 rc == 0 ? "SUCCESS" : "FAILED");
3662 break;
3663 case RESET_ABORT:
3664 dev_info(&ctrl_info->pci_dev->dev,
3665 "Online Firmware Activation ABORTED\n");
3666 if (ctrl_info->soft_reset_handshake_supported)
3667 pqi_clear_soft_reset_status(ctrl_info);
3668 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3669 pqi_ctrl_ofa_done(ctrl_info);
3670 pqi_ofa_ctrl_unquiesce(ctrl_info);
3671 break;
3672 case RESET_NORESPONSE:
3673 fallthrough;
3674 default:
3675 dev_err(&ctrl_info->pci_dev->dev,
3676 "unexpected Online Firmware Activation reset status: 0x%x\n",
3677 reset_status);
3678 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3679 pqi_ctrl_ofa_done(ctrl_info);
3680 pqi_ofa_ctrl_unquiesce(ctrl_info);
3681 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3682 break;
3683 }
3684 }
3685
pqi_ofa_memory_alloc_worker(struct work_struct * work)3686 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3687 {
3688 struct pqi_ctrl_info *ctrl_info;
3689
3690 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3691
3692 pqi_ctrl_ofa_start(ctrl_info);
3693 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
3694 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
3695 }
3696
pqi_ofa_quiesce_worker(struct work_struct * work)3697 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3698 {
3699 struct pqi_ctrl_info *ctrl_info;
3700 struct pqi_event *event;
3701
3702 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3703
3704 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3705
3706 pqi_ofa_ctrl_quiesce(ctrl_info);
3707 pqi_acknowledge_event(ctrl_info, event);
3708 pqi_process_soft_reset(ctrl_info);
3709 }
3710
pqi_ofa_process_event(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event)3711 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3712 struct pqi_event *event)
3713 {
3714 bool ack_event;
3715
3716 ack_event = true;
3717
3718 switch (event->event_id) {
3719 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3720 dev_info(&ctrl_info->pci_dev->dev,
3721 "received Online Firmware Activation memory allocation request\n");
3722 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3723 break;
3724 case PQI_EVENT_OFA_QUIESCE:
3725 dev_info(&ctrl_info->pci_dev->dev,
3726 "received Online Firmware Activation quiesce request\n");
3727 schedule_work(&ctrl_info->ofa_quiesce_work);
3728 ack_event = false;
3729 break;
3730 case PQI_EVENT_OFA_CANCELED:
3731 dev_info(&ctrl_info->pci_dev->dev,
3732 "received Online Firmware Activation cancel request: reason: %u\n",
3733 ctrl_info->ofa_cancel_reason);
3734 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3735 pqi_ctrl_ofa_done(ctrl_info);
3736 break;
3737 default:
3738 dev_err(&ctrl_info->pci_dev->dev,
3739 "received unknown Online Firmware Activation request: event ID: %u\n",
3740 event->event_id);
3741 break;
3742 }
3743
3744 return ack_event;
3745 }
3746
pqi_mark_volumes_for_rescan(struct pqi_ctrl_info * ctrl_info)3747 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
3748 {
3749 unsigned long flags;
3750 struct pqi_scsi_dev *device;
3751
3752 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3753
3754 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
3755 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
3756 device->rescan = true;
3757 }
3758
3759 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3760 }
3761
pqi_disable_raid_bypass(struct pqi_ctrl_info * ctrl_info)3762 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3763 {
3764 unsigned long flags;
3765 struct pqi_scsi_dev *device;
3766
3767 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3768
3769 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3770 if (device->raid_bypass_enabled)
3771 device->raid_bypass_enabled = false;
3772
3773 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3774 }
3775
pqi_event_worker(struct work_struct * work)3776 static void pqi_event_worker(struct work_struct *work)
3777 {
3778 unsigned int i;
3779 bool rescan_needed;
3780 struct pqi_ctrl_info *ctrl_info;
3781 struct pqi_event *event;
3782 bool ack_event;
3783
3784 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3785
3786 pqi_ctrl_busy(ctrl_info);
3787 pqi_wait_if_ctrl_blocked(ctrl_info);
3788 if (pqi_ctrl_offline(ctrl_info))
3789 goto out;
3790
3791 rescan_needed = false;
3792 event = ctrl_info->events;
3793 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3794 if (event->pending) {
3795 event->pending = false;
3796 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3797 ack_event = pqi_ofa_process_event(ctrl_info, event);
3798 } else {
3799 ack_event = true;
3800 rescan_needed = true;
3801 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3802 pqi_mark_volumes_for_rescan(ctrl_info);
3803 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3804 pqi_disable_raid_bypass(ctrl_info);
3805 }
3806 if (ack_event)
3807 pqi_acknowledge_event(ctrl_info, event);
3808 }
3809 event++;
3810 }
3811
3812 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3813
3814 if (rescan_needed)
3815 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3816 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3817
3818 out:
3819 pqi_ctrl_unbusy(ctrl_info);
3820 }
3821
3822 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3823
pqi_heartbeat_timer_handler(struct timer_list * t)3824 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3825 {
3826 int num_interrupts;
3827 u32 heartbeat_count;
3828 struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t,
3829 heartbeat_timer);
3830
3831 pqi_check_ctrl_health(ctrl_info);
3832 if (pqi_ctrl_offline(ctrl_info))
3833 return;
3834
3835 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3836 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3837
3838 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3839 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3840 dev_err(&ctrl_info->pci_dev->dev,
3841 "no heartbeat detected - last heartbeat count: %u\n",
3842 heartbeat_count);
3843 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3844 return;
3845 }
3846 } else {
3847 ctrl_info->previous_num_interrupts = num_interrupts;
3848 }
3849
3850 ctrl_info->previous_heartbeat_count = heartbeat_count;
3851 mod_timer(&ctrl_info->heartbeat_timer,
3852 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3853 }
3854
pqi_start_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3855 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3856 {
3857 if (!ctrl_info->heartbeat_counter)
3858 return;
3859
3860 ctrl_info->previous_num_interrupts =
3861 atomic_read(&ctrl_info->num_interrupts);
3862 ctrl_info->previous_heartbeat_count =
3863 pqi_read_heartbeat_counter(ctrl_info);
3864
3865 ctrl_info->heartbeat_timer.expires =
3866 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3867 add_timer(&ctrl_info->heartbeat_timer);
3868 }
3869
pqi_stop_heartbeat_timer(struct pqi_ctrl_info * ctrl_info)3870 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3871 {
3872 timer_delete_sync(&ctrl_info->heartbeat_timer);
3873 }
3874
pqi_ofa_capture_event_payload(struct pqi_ctrl_info * ctrl_info,struct pqi_event * event,struct pqi_event_response * response)3875 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3876 struct pqi_event *event, struct pqi_event_response *response)
3877 {
3878 switch (event->event_id) {
3879 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3880 ctrl_info->ofa_bytes_requested =
3881 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3882 break;
3883 case PQI_EVENT_OFA_CANCELED:
3884 ctrl_info->ofa_cancel_reason =
3885 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3886 break;
3887 }
3888 }
3889
pqi_process_event_intr(struct pqi_ctrl_info * ctrl_info)3890 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3891 {
3892 int num_events;
3893 pqi_index_t oq_pi;
3894 pqi_index_t oq_ci;
3895 struct pqi_event_queue *event_queue;
3896 struct pqi_event_response *response;
3897 struct pqi_event *event;
3898 int event_index;
3899
3900 event_queue = &ctrl_info->event_queue;
3901 num_events = 0;
3902 oq_ci = event_queue->oq_ci_copy;
3903
3904 while (1) {
3905 oq_pi = readl(event_queue->oq_pi);
3906 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3907 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3908 dev_err(&ctrl_info->pci_dev->dev,
3909 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3910 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3911 return -1;
3912 }
3913
3914 if (oq_pi == oq_ci)
3915 break;
3916
3917 num_events++;
3918 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3919
3920 event_index = pqi_event_type_to_event_index(response->event_type);
3921
3922 if (event_index >= 0 && response->request_acknowledge) {
3923 event = &ctrl_info->events[event_index];
3924 event->pending = true;
3925 event->event_type = response->event_type;
3926 event->event_id = get_unaligned_le16(&response->event_id);
3927 event->additional_event_id =
3928 get_unaligned_le32(&response->additional_event_id);
3929 if (event->event_type == PQI_EVENT_TYPE_OFA)
3930 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3931 }
3932
3933 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3934 }
3935
3936 if (num_events) {
3937 event_queue->oq_ci_copy = oq_ci;
3938 writel(oq_ci, event_queue->oq_ci);
3939 schedule_work(&ctrl_info->event_work);
3940 }
3941
3942 return num_events;
3943 }
3944
3945 #define PQI_LEGACY_INTX_MASK 0x1
3946
pqi_configure_legacy_intx(struct pqi_ctrl_info * ctrl_info,bool enable_intx)3947 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3948 {
3949 u32 intx_mask;
3950 struct pqi_device_registers __iomem *pqi_registers;
3951 volatile void __iomem *register_addr;
3952
3953 pqi_registers = ctrl_info->pqi_registers;
3954
3955 if (enable_intx)
3956 register_addr = &pqi_registers->legacy_intx_mask_clear;
3957 else
3958 register_addr = &pqi_registers->legacy_intx_mask_set;
3959
3960 intx_mask = readl(register_addr);
3961 intx_mask |= PQI_LEGACY_INTX_MASK;
3962 writel(intx_mask, register_addr);
3963 }
3964
pqi_change_irq_mode(struct pqi_ctrl_info * ctrl_info,enum pqi_irq_mode new_mode)3965 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3966 enum pqi_irq_mode new_mode)
3967 {
3968 switch (ctrl_info->irq_mode) {
3969 case IRQ_MODE_MSIX:
3970 switch (new_mode) {
3971 case IRQ_MODE_MSIX:
3972 break;
3973 case IRQ_MODE_INTX:
3974 pqi_configure_legacy_intx(ctrl_info, true);
3975 sis_enable_intx(ctrl_info);
3976 break;
3977 case IRQ_MODE_NONE:
3978 break;
3979 }
3980 break;
3981 case IRQ_MODE_INTX:
3982 switch (new_mode) {
3983 case IRQ_MODE_MSIX:
3984 pqi_configure_legacy_intx(ctrl_info, false);
3985 sis_enable_msix(ctrl_info);
3986 break;
3987 case IRQ_MODE_INTX:
3988 break;
3989 case IRQ_MODE_NONE:
3990 pqi_configure_legacy_intx(ctrl_info, false);
3991 break;
3992 }
3993 break;
3994 case IRQ_MODE_NONE:
3995 switch (new_mode) {
3996 case IRQ_MODE_MSIX:
3997 sis_enable_msix(ctrl_info);
3998 break;
3999 case IRQ_MODE_INTX:
4000 pqi_configure_legacy_intx(ctrl_info, true);
4001 sis_enable_intx(ctrl_info);
4002 break;
4003 case IRQ_MODE_NONE:
4004 break;
4005 }
4006 break;
4007 }
4008
4009 ctrl_info->irq_mode = new_mode;
4010 }
4011
4012 #define PQI_LEGACY_INTX_PENDING 0x1
4013
pqi_is_valid_irq(struct pqi_ctrl_info * ctrl_info)4014 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
4015 {
4016 bool valid_irq;
4017 u32 intx_status;
4018
4019 switch (ctrl_info->irq_mode) {
4020 case IRQ_MODE_MSIX:
4021 valid_irq = true;
4022 break;
4023 case IRQ_MODE_INTX:
4024 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
4025 if (intx_status & PQI_LEGACY_INTX_PENDING)
4026 valid_irq = true;
4027 else
4028 valid_irq = false;
4029 break;
4030 case IRQ_MODE_NONE:
4031 default:
4032 valid_irq = false;
4033 break;
4034 }
4035
4036 return valid_irq;
4037 }
4038
pqi_irq_handler(int irq,void * data)4039 static irqreturn_t pqi_irq_handler(int irq, void *data)
4040 {
4041 struct pqi_ctrl_info *ctrl_info;
4042 struct pqi_queue_group *queue_group;
4043 int num_io_responses_handled;
4044 int num_events_handled;
4045
4046 queue_group = data;
4047 ctrl_info = queue_group->ctrl_info;
4048
4049 if (!pqi_is_valid_irq(ctrl_info))
4050 return IRQ_NONE;
4051
4052 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
4053 if (num_io_responses_handled < 0)
4054 goto out;
4055
4056 if (irq == ctrl_info->event_irq) {
4057 num_events_handled = pqi_process_event_intr(ctrl_info);
4058 if (num_events_handled < 0)
4059 goto out;
4060 } else {
4061 num_events_handled = 0;
4062 }
4063
4064 if (num_io_responses_handled + num_events_handled > 0)
4065 atomic_inc(&ctrl_info->num_interrupts);
4066
4067 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4068 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4069
4070 out:
4071 return IRQ_HANDLED;
4072 }
4073
pqi_request_irqs(struct pqi_ctrl_info * ctrl_info)4074 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4075 {
4076 struct pci_dev *pci_dev = ctrl_info->pci_dev;
4077 int i;
4078 int rc;
4079
4080 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4081
4082 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4083 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4084 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4085 if (rc) {
4086 dev_err(&pci_dev->dev,
4087 "irq %u init failed with error %d\n",
4088 pci_irq_vector(pci_dev, i), rc);
4089 return rc;
4090 }
4091 ctrl_info->num_msix_vectors_initialized++;
4092 }
4093
4094 return 0;
4095 }
4096
pqi_free_irqs(struct pqi_ctrl_info * ctrl_info)4097 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4098 {
4099 int i;
4100
4101 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4102 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4103 &ctrl_info->queue_groups[i]);
4104
4105 ctrl_info->num_msix_vectors_initialized = 0;
4106 }
4107
pqi_enable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4108 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4109 {
4110 int num_vectors_enabled;
4111 unsigned int flags = PCI_IRQ_MSIX;
4112
4113 if (!pqi_disable_managed_interrupts)
4114 flags |= PCI_IRQ_AFFINITY;
4115
4116 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4117 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4118 flags);
4119 if (num_vectors_enabled < 0) {
4120 dev_err(&ctrl_info->pci_dev->dev,
4121 "MSI-X init failed with error %d\n",
4122 num_vectors_enabled);
4123 return num_vectors_enabled;
4124 }
4125
4126 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4127 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4128 return 0;
4129 }
4130
pqi_disable_msix_interrupts(struct pqi_ctrl_info * ctrl_info)4131 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4132 {
4133 if (ctrl_info->num_msix_vectors_enabled) {
4134 pci_free_irq_vectors(ctrl_info->pci_dev);
4135 ctrl_info->num_msix_vectors_enabled = 0;
4136 }
4137 }
4138
pqi_alloc_operational_queues(struct pqi_ctrl_info * ctrl_info)4139 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4140 {
4141 unsigned int i;
4142 size_t alloc_length;
4143 size_t element_array_length_per_iq;
4144 size_t element_array_length_per_oq;
4145 void *element_array;
4146 void __iomem *next_queue_index;
4147 void *aligned_pointer;
4148 unsigned int num_inbound_queues;
4149 unsigned int num_outbound_queues;
4150 unsigned int num_queue_indexes;
4151 struct pqi_queue_group *queue_group;
4152
4153 element_array_length_per_iq =
4154 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4155 ctrl_info->num_elements_per_iq;
4156 element_array_length_per_oq =
4157 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4158 ctrl_info->num_elements_per_oq;
4159 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4160 num_outbound_queues = ctrl_info->num_queue_groups;
4161 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4162
4163 aligned_pointer = NULL;
4164
4165 for (i = 0; i < num_inbound_queues; i++) {
4166 aligned_pointer = PTR_ALIGN(aligned_pointer,
4167 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4168 aligned_pointer += element_array_length_per_iq;
4169 }
4170
4171 for (i = 0; i < num_outbound_queues; i++) {
4172 aligned_pointer = PTR_ALIGN(aligned_pointer,
4173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4174 aligned_pointer += element_array_length_per_oq;
4175 }
4176
4177 aligned_pointer = PTR_ALIGN(aligned_pointer,
4178 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4179 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4180 PQI_EVENT_OQ_ELEMENT_LENGTH;
4181
4182 for (i = 0; i < num_queue_indexes; i++) {
4183 aligned_pointer = PTR_ALIGN(aligned_pointer,
4184 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4185 aligned_pointer += sizeof(pqi_index_t);
4186 }
4187
4188 alloc_length = (size_t)aligned_pointer +
4189 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4190
4191 alloc_length += PQI_EXTRA_SGL_MEMORY;
4192
4193 ctrl_info->queue_memory_base =
4194 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4195 &ctrl_info->queue_memory_base_dma_handle,
4196 GFP_KERNEL);
4197
4198 if (!ctrl_info->queue_memory_base)
4199 return -ENOMEM;
4200
4201 ctrl_info->queue_memory_length = alloc_length;
4202
4203 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4204 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4205
4206 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4207 queue_group = &ctrl_info->queue_groups[i];
4208 queue_group->iq_element_array[RAID_PATH] = element_array;
4209 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4210 ctrl_info->queue_memory_base_dma_handle +
4211 (element_array - ctrl_info->queue_memory_base);
4212 element_array += element_array_length_per_iq;
4213 element_array = PTR_ALIGN(element_array,
4214 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4215 queue_group->iq_element_array[AIO_PATH] = element_array;
4216 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4217 ctrl_info->queue_memory_base_dma_handle +
4218 (element_array - ctrl_info->queue_memory_base);
4219 element_array += element_array_length_per_iq;
4220 element_array = PTR_ALIGN(element_array,
4221 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4222 }
4223
4224 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4225 queue_group = &ctrl_info->queue_groups[i];
4226 queue_group->oq_element_array = element_array;
4227 queue_group->oq_element_array_bus_addr =
4228 ctrl_info->queue_memory_base_dma_handle +
4229 (element_array - ctrl_info->queue_memory_base);
4230 element_array += element_array_length_per_oq;
4231 element_array = PTR_ALIGN(element_array,
4232 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4233 }
4234
4235 ctrl_info->event_queue.oq_element_array = element_array;
4236 ctrl_info->event_queue.oq_element_array_bus_addr =
4237 ctrl_info->queue_memory_base_dma_handle +
4238 (element_array - ctrl_info->queue_memory_base);
4239 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4240 PQI_EVENT_OQ_ELEMENT_LENGTH;
4241
4242 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4243 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4244
4245 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4246 queue_group = &ctrl_info->queue_groups[i];
4247 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4248 queue_group->iq_ci_bus_addr[RAID_PATH] =
4249 ctrl_info->queue_memory_base_dma_handle +
4250 (next_queue_index -
4251 (void __iomem *)ctrl_info->queue_memory_base);
4252 next_queue_index += sizeof(pqi_index_t);
4253 next_queue_index = PTR_ALIGN(next_queue_index,
4254 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4255 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4256 queue_group->iq_ci_bus_addr[AIO_PATH] =
4257 ctrl_info->queue_memory_base_dma_handle +
4258 (next_queue_index -
4259 (void __iomem *)ctrl_info->queue_memory_base);
4260 next_queue_index += sizeof(pqi_index_t);
4261 next_queue_index = PTR_ALIGN(next_queue_index,
4262 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4263 queue_group->oq_pi = next_queue_index;
4264 queue_group->oq_pi_bus_addr =
4265 ctrl_info->queue_memory_base_dma_handle +
4266 (next_queue_index -
4267 (void __iomem *)ctrl_info->queue_memory_base);
4268 next_queue_index += sizeof(pqi_index_t);
4269 next_queue_index = PTR_ALIGN(next_queue_index,
4270 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4271 }
4272
4273 ctrl_info->event_queue.oq_pi = next_queue_index;
4274 ctrl_info->event_queue.oq_pi_bus_addr =
4275 ctrl_info->queue_memory_base_dma_handle +
4276 (next_queue_index -
4277 (void __iomem *)ctrl_info->queue_memory_base);
4278
4279 return 0;
4280 }
4281
pqi_init_operational_queues(struct pqi_ctrl_info * ctrl_info)4282 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4283 {
4284 unsigned int i;
4285 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4286 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4287
4288 /*
4289 * Initialize the backpointers to the controller structure in
4290 * each operational queue group structure.
4291 */
4292 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4293 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4294
4295 /*
4296 * Assign IDs to all operational queues. Note that the IDs
4297 * assigned to operational IQs are independent of the IDs
4298 * assigned to operational OQs.
4299 */
4300 ctrl_info->event_queue.oq_id = next_oq_id++;
4301 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4302 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4303 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4304 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4305 }
4306
4307 /*
4308 * Assign MSI-X table entry indexes to all queues. Note that the
4309 * interrupt for the event queue is shared with the first queue group.
4310 */
4311 ctrl_info->event_queue.int_msg_num = 0;
4312 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4313 ctrl_info->queue_groups[i].int_msg_num = i;
4314
4315 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4316 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4317 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4318 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4319 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4320 }
4321 }
4322
pqi_alloc_admin_queues(struct pqi_ctrl_info * ctrl_info)4323 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4324 {
4325 size_t alloc_length;
4326 struct pqi_admin_queues_aligned *admin_queues_aligned;
4327 struct pqi_admin_queues *admin_queues;
4328
4329 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4330 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4331
4332 ctrl_info->admin_queue_memory_base =
4333 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4334 &ctrl_info->admin_queue_memory_base_dma_handle,
4335 GFP_KERNEL);
4336
4337 if (!ctrl_info->admin_queue_memory_base)
4338 return -ENOMEM;
4339
4340 ctrl_info->admin_queue_memory_length = alloc_length;
4341
4342 admin_queues = &ctrl_info->admin_queues;
4343 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4344 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4345 admin_queues->iq_element_array =
4346 &admin_queues_aligned->iq_element_array;
4347 admin_queues->oq_element_array =
4348 &admin_queues_aligned->oq_element_array;
4349 admin_queues->iq_ci =
4350 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4351 admin_queues->oq_pi =
4352 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4353
4354 admin_queues->iq_element_array_bus_addr =
4355 ctrl_info->admin_queue_memory_base_dma_handle +
4356 (admin_queues->iq_element_array -
4357 ctrl_info->admin_queue_memory_base);
4358 admin_queues->oq_element_array_bus_addr =
4359 ctrl_info->admin_queue_memory_base_dma_handle +
4360 (admin_queues->oq_element_array -
4361 ctrl_info->admin_queue_memory_base);
4362 admin_queues->iq_ci_bus_addr =
4363 ctrl_info->admin_queue_memory_base_dma_handle +
4364 ((void __iomem *)admin_queues->iq_ci -
4365 (void __iomem *)ctrl_info->admin_queue_memory_base);
4366 admin_queues->oq_pi_bus_addr =
4367 ctrl_info->admin_queue_memory_base_dma_handle +
4368 ((void __iomem *)admin_queues->oq_pi -
4369 (void __iomem *)ctrl_info->admin_queue_memory_base);
4370
4371 return 0;
4372 }
4373
4374 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4375 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4376
pqi_create_admin_queues(struct pqi_ctrl_info * ctrl_info)4377 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4378 {
4379 struct pqi_device_registers __iomem *pqi_registers;
4380 struct pqi_admin_queues *admin_queues;
4381 unsigned long timeout;
4382 u8 status;
4383 u32 reg;
4384
4385 pqi_registers = ctrl_info->pqi_registers;
4386 admin_queues = &ctrl_info->admin_queues;
4387
4388 writeq((u64)admin_queues->iq_element_array_bus_addr,
4389 &pqi_registers->admin_iq_element_array_addr);
4390 writeq((u64)admin_queues->oq_element_array_bus_addr,
4391 &pqi_registers->admin_oq_element_array_addr);
4392 writeq((u64)admin_queues->iq_ci_bus_addr,
4393 &pqi_registers->admin_iq_ci_addr);
4394 writeq((u64)admin_queues->oq_pi_bus_addr,
4395 &pqi_registers->admin_oq_pi_addr);
4396
4397 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4398 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4399 (admin_queues->int_msg_num << 16);
4400 writel(reg, &pqi_registers->admin_iq_num_elements);
4401
4402 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4403 &pqi_registers->function_and_status_code);
4404
4405 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4406 while (1) {
4407 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4408 status = readb(&pqi_registers->function_and_status_code);
4409 if (status == PQI_STATUS_IDLE)
4410 break;
4411 if (time_after(jiffies, timeout))
4412 return -ETIMEDOUT;
4413 }
4414
4415 /*
4416 * The offset registers are not initialized to the correct
4417 * offsets until *after* the create admin queue pair command
4418 * completes successfully.
4419 */
4420 admin_queues->iq_pi = ctrl_info->iomem_base +
4421 PQI_DEVICE_REGISTERS_OFFSET +
4422 readq(&pqi_registers->admin_iq_pi_offset);
4423 admin_queues->oq_ci = ctrl_info->iomem_base +
4424 PQI_DEVICE_REGISTERS_OFFSET +
4425 readq(&pqi_registers->admin_oq_ci_offset);
4426
4427 return 0;
4428 }
4429
pqi_submit_admin_request(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request)4430 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4431 struct pqi_general_admin_request *request)
4432 {
4433 struct pqi_admin_queues *admin_queues;
4434 void *next_element;
4435 pqi_index_t iq_pi;
4436
4437 admin_queues = &ctrl_info->admin_queues;
4438 iq_pi = admin_queues->iq_pi_copy;
4439
4440 next_element = admin_queues->iq_element_array +
4441 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4442
4443 memcpy(next_element, request, sizeof(*request));
4444
4445 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4446 admin_queues->iq_pi_copy = iq_pi;
4447
4448 /*
4449 * This write notifies the controller that an IU is available to be
4450 * processed.
4451 */
4452 writel(iq_pi, admin_queues->iq_pi);
4453 }
4454
4455 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4456
pqi_poll_for_admin_response(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_response * response)4457 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4458 struct pqi_general_admin_response *response)
4459 {
4460 struct pqi_admin_queues *admin_queues;
4461 pqi_index_t oq_pi;
4462 pqi_index_t oq_ci;
4463 unsigned long timeout;
4464
4465 admin_queues = &ctrl_info->admin_queues;
4466 oq_ci = admin_queues->oq_ci_copy;
4467
4468 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4469
4470 while (1) {
4471 oq_pi = readl(admin_queues->oq_pi);
4472 if (oq_pi != oq_ci)
4473 break;
4474 if (time_after(jiffies, timeout)) {
4475 dev_err(&ctrl_info->pci_dev->dev,
4476 "timed out waiting for admin response\n");
4477 return -ETIMEDOUT;
4478 }
4479 if (!sis_is_firmware_running(ctrl_info))
4480 return -ENXIO;
4481 usleep_range(1000, 2000);
4482 }
4483
4484 memcpy(response, admin_queues->oq_element_array +
4485 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4486
4487 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4488 admin_queues->oq_ci_copy = oq_ci;
4489 writel(oq_ci, admin_queues->oq_ci);
4490
4491 return 0;
4492 }
4493
pqi_start_io(struct pqi_ctrl_info * ctrl_info,struct pqi_queue_group * queue_group,enum pqi_io_path path,struct pqi_io_request * io_request)4494 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4495 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4496 struct pqi_io_request *io_request)
4497 {
4498 struct pqi_io_request *next;
4499 void *next_element;
4500 pqi_index_t iq_pi;
4501 pqi_index_t iq_ci;
4502 size_t iu_length;
4503 unsigned long flags;
4504 unsigned int num_elements_needed;
4505 unsigned int num_elements_to_end_of_queue;
4506 size_t copy_count;
4507 struct pqi_iu_header *request;
4508
4509 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4510
4511 if (io_request) {
4512 io_request->queue_group = queue_group;
4513 list_add_tail(&io_request->request_list_entry,
4514 &queue_group->request_list[path]);
4515 }
4516
4517 iq_pi = queue_group->iq_pi_copy[path];
4518
4519 list_for_each_entry_safe(io_request, next,
4520 &queue_group->request_list[path], request_list_entry) {
4521
4522 request = io_request->iu;
4523
4524 iu_length = get_unaligned_le16(&request->iu_length) +
4525 PQI_REQUEST_HEADER_LENGTH;
4526 num_elements_needed =
4527 DIV_ROUND_UP(iu_length,
4528 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4529
4530 iq_ci = readl(queue_group->iq_ci[path]);
4531
4532 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4533 ctrl_info->num_elements_per_iq))
4534 break;
4535
4536 put_unaligned_le16(queue_group->oq_id,
4537 &request->response_queue_id);
4538
4539 next_element = queue_group->iq_element_array[path] +
4540 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4541
4542 num_elements_to_end_of_queue =
4543 ctrl_info->num_elements_per_iq - iq_pi;
4544
4545 if (num_elements_needed <= num_elements_to_end_of_queue) {
4546 memcpy(next_element, request, iu_length);
4547 } else {
4548 copy_count = num_elements_to_end_of_queue *
4549 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4550 memcpy(next_element, request, copy_count);
4551 memcpy(queue_group->iq_element_array[path],
4552 (u8 *)request + copy_count,
4553 iu_length - copy_count);
4554 }
4555
4556 iq_pi = (iq_pi + num_elements_needed) %
4557 ctrl_info->num_elements_per_iq;
4558
4559 list_del(&io_request->request_list_entry);
4560 }
4561
4562 if (iq_pi != queue_group->iq_pi_copy[path]) {
4563 queue_group->iq_pi_copy[path] = iq_pi;
4564 /*
4565 * This write notifies the controller that one or more IUs are
4566 * available to be processed.
4567 */
4568 writel(iq_pi, queue_group->iq_pi[path]);
4569 }
4570
4571 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4572 }
4573
4574 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4575
pqi_wait_for_completion_io(struct pqi_ctrl_info * ctrl_info,struct completion * wait)4576 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4577 struct completion *wait)
4578 {
4579 int rc;
4580
4581 while (1) {
4582 if (wait_for_completion_io_timeout(wait,
4583 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4584 rc = 0;
4585 break;
4586 }
4587
4588 pqi_check_ctrl_health(ctrl_info);
4589 if (pqi_ctrl_offline(ctrl_info)) {
4590 rc = -ENXIO;
4591 break;
4592 }
4593 }
4594
4595 return rc;
4596 }
4597
pqi_raid_synchronous_complete(struct pqi_io_request * io_request,void * context)4598 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4599 void *context)
4600 {
4601 struct completion *waiting = context;
4602
4603 complete(waiting);
4604 }
4605
pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info * error_info)4606 static int pqi_process_raid_io_error_synchronous(
4607 struct pqi_raid_error_info *error_info)
4608 {
4609 int rc = -EIO;
4610
4611 switch (error_info->data_out_result) {
4612 case PQI_DATA_IN_OUT_GOOD:
4613 if (error_info->status == SAM_STAT_GOOD)
4614 rc = 0;
4615 break;
4616 case PQI_DATA_IN_OUT_UNDERFLOW:
4617 if (error_info->status == SAM_STAT_GOOD ||
4618 error_info->status == SAM_STAT_CHECK_CONDITION)
4619 rc = 0;
4620 break;
4621 case PQI_DATA_IN_OUT_ABORTED:
4622 rc = PQI_CMD_STATUS_ABORTED;
4623 break;
4624 }
4625
4626 return rc;
4627 }
4628
pqi_is_blockable_request(struct pqi_iu_header * request)4629 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4630 {
4631 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4632 }
4633
pqi_submit_raid_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_iu_header * request,unsigned int flags,struct pqi_raid_error_info * error_info)4634 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4635 struct pqi_iu_header *request, unsigned int flags,
4636 struct pqi_raid_error_info *error_info)
4637 {
4638 int rc = 0;
4639 struct pqi_io_request *io_request;
4640 size_t iu_length;
4641 DECLARE_COMPLETION_ONSTACK(wait);
4642
4643 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4644 if (down_interruptible(&ctrl_info->sync_request_sem))
4645 return -ERESTARTSYS;
4646 } else {
4647 down(&ctrl_info->sync_request_sem);
4648 }
4649
4650 pqi_ctrl_busy(ctrl_info);
4651 /*
4652 * Wait for other admin queue updates such as;
4653 * config table changes, OFA memory updates, ...
4654 */
4655 if (pqi_is_blockable_request(request))
4656 pqi_wait_if_ctrl_blocked(ctrl_info);
4657
4658 if (pqi_ctrl_offline(ctrl_info)) {
4659 rc = -ENXIO;
4660 goto out;
4661 }
4662
4663 io_request = pqi_alloc_io_request(ctrl_info, NULL);
4664
4665 put_unaligned_le16(io_request->index,
4666 &(((struct pqi_raid_path_request *)request)->request_id));
4667
4668 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4669 ((struct pqi_raid_path_request *)request)->error_index =
4670 ((struct pqi_raid_path_request *)request)->request_id;
4671
4672 iu_length = get_unaligned_le16(&request->iu_length) +
4673 PQI_REQUEST_HEADER_LENGTH;
4674 memcpy(io_request->iu, request, iu_length);
4675
4676 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4677 io_request->context = &wait;
4678
4679 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4680 io_request);
4681
4682 pqi_wait_for_completion_io(ctrl_info, &wait);
4683
4684 if (error_info) {
4685 if (io_request->error_info)
4686 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4687 else
4688 memset(error_info, 0, sizeof(*error_info));
4689 } else if (rc == 0 && io_request->error_info) {
4690 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4691 }
4692
4693 pqi_free_io_request(io_request);
4694
4695 out:
4696 pqi_ctrl_unbusy(ctrl_info);
4697 up(&ctrl_info->sync_request_sem);
4698
4699 return rc;
4700 }
4701
pqi_validate_admin_response(struct pqi_general_admin_response * response,u8 expected_function_code)4702 static int pqi_validate_admin_response(
4703 struct pqi_general_admin_response *response, u8 expected_function_code)
4704 {
4705 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4706 return -EINVAL;
4707
4708 if (get_unaligned_le16(&response->header.iu_length) !=
4709 PQI_GENERAL_ADMIN_IU_LENGTH)
4710 return -EINVAL;
4711
4712 if (response->function_code != expected_function_code)
4713 return -EINVAL;
4714
4715 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4716 return -EINVAL;
4717
4718 return 0;
4719 }
4720
pqi_submit_admin_request_synchronous(struct pqi_ctrl_info * ctrl_info,struct pqi_general_admin_request * request,struct pqi_general_admin_response * response)4721 static int pqi_submit_admin_request_synchronous(
4722 struct pqi_ctrl_info *ctrl_info,
4723 struct pqi_general_admin_request *request,
4724 struct pqi_general_admin_response *response)
4725 {
4726 int rc;
4727
4728 pqi_submit_admin_request(ctrl_info, request);
4729
4730 rc = pqi_poll_for_admin_response(ctrl_info, response);
4731
4732 if (rc == 0)
4733 rc = pqi_validate_admin_response(response, request->function_code);
4734
4735 return rc;
4736 }
4737
pqi_report_device_capability(struct pqi_ctrl_info * ctrl_info)4738 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4739 {
4740 int rc;
4741 struct pqi_general_admin_request request;
4742 struct pqi_general_admin_response response;
4743 struct pqi_device_capability *capability;
4744 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4745
4746 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4747 if (!capability)
4748 return -ENOMEM;
4749
4750 memset(&request, 0, sizeof(request));
4751
4752 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4753 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4754 &request.header.iu_length);
4755 request.function_code =
4756 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4757 put_unaligned_le32(sizeof(*capability),
4758 &request.data.report_device_capability.buffer_length);
4759
4760 rc = pqi_map_single(ctrl_info->pci_dev,
4761 &request.data.report_device_capability.sg_descriptor,
4762 capability, sizeof(*capability),
4763 DMA_FROM_DEVICE);
4764 if (rc)
4765 goto out;
4766
4767 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4768
4769 pqi_pci_unmap(ctrl_info->pci_dev,
4770 &request.data.report_device_capability.sg_descriptor, 1,
4771 DMA_FROM_DEVICE);
4772
4773 if (rc)
4774 goto out;
4775
4776 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4777 rc = -EIO;
4778 goto out;
4779 }
4780
4781 ctrl_info->max_inbound_queues =
4782 get_unaligned_le16(&capability->max_inbound_queues);
4783 ctrl_info->max_elements_per_iq =
4784 get_unaligned_le16(&capability->max_elements_per_iq);
4785 ctrl_info->max_iq_element_length =
4786 get_unaligned_le16(&capability->max_iq_element_length)
4787 * 16;
4788 ctrl_info->max_outbound_queues =
4789 get_unaligned_le16(&capability->max_outbound_queues);
4790 ctrl_info->max_elements_per_oq =
4791 get_unaligned_le16(&capability->max_elements_per_oq);
4792 ctrl_info->max_oq_element_length =
4793 get_unaligned_le16(&capability->max_oq_element_length)
4794 * 16;
4795
4796 sop_iu_layer_descriptor =
4797 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4798
4799 ctrl_info->max_inbound_iu_length_per_firmware =
4800 get_unaligned_le16(
4801 &sop_iu_layer_descriptor->max_inbound_iu_length);
4802 ctrl_info->inbound_spanning_supported =
4803 sop_iu_layer_descriptor->inbound_spanning_supported;
4804 ctrl_info->outbound_spanning_supported =
4805 sop_iu_layer_descriptor->outbound_spanning_supported;
4806
4807 out:
4808 kfree(capability);
4809
4810 return rc;
4811 }
4812
pqi_validate_device_capability(struct pqi_ctrl_info * ctrl_info)4813 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4814 {
4815 if (ctrl_info->max_iq_element_length <
4816 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4817 dev_err(&ctrl_info->pci_dev->dev,
4818 "max. inbound queue element length of %d is less than the required length of %d\n",
4819 ctrl_info->max_iq_element_length,
4820 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4821 return -EINVAL;
4822 }
4823
4824 if (ctrl_info->max_oq_element_length <
4825 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4826 dev_err(&ctrl_info->pci_dev->dev,
4827 "max. outbound queue element length of %d is less than the required length of %d\n",
4828 ctrl_info->max_oq_element_length,
4829 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4830 return -EINVAL;
4831 }
4832
4833 if (ctrl_info->max_inbound_iu_length_per_firmware <
4834 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4835 dev_err(&ctrl_info->pci_dev->dev,
4836 "max. inbound IU length of %u is less than the min. required length of %d\n",
4837 ctrl_info->max_inbound_iu_length_per_firmware,
4838 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4839 return -EINVAL;
4840 }
4841
4842 if (!ctrl_info->inbound_spanning_supported) {
4843 dev_err(&ctrl_info->pci_dev->dev,
4844 "the controller does not support inbound spanning\n");
4845 return -EINVAL;
4846 }
4847
4848 if (ctrl_info->outbound_spanning_supported) {
4849 dev_err(&ctrl_info->pci_dev->dev,
4850 "the controller supports outbound spanning but this driver does not\n");
4851 return -EINVAL;
4852 }
4853
4854 return 0;
4855 }
4856
pqi_create_event_queue(struct pqi_ctrl_info * ctrl_info)4857 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4858 {
4859 int rc;
4860 struct pqi_event_queue *event_queue;
4861 struct pqi_general_admin_request request;
4862 struct pqi_general_admin_response response;
4863
4864 event_queue = &ctrl_info->event_queue;
4865
4866 /*
4867 * Create OQ (Outbound Queue - device to host queue) to dedicate
4868 * to events.
4869 */
4870 memset(&request, 0, sizeof(request));
4871 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4872 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4873 &request.header.iu_length);
4874 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4875 put_unaligned_le16(event_queue->oq_id,
4876 &request.data.create_operational_oq.queue_id);
4877 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4878 &request.data.create_operational_oq.element_array_addr);
4879 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4880 &request.data.create_operational_oq.pi_addr);
4881 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4882 &request.data.create_operational_oq.num_elements);
4883 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4884 &request.data.create_operational_oq.element_length);
4885 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4886 put_unaligned_le16(event_queue->int_msg_num,
4887 &request.data.create_operational_oq.int_msg_num);
4888
4889 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4890 &response);
4891 if (rc)
4892 return rc;
4893
4894 event_queue->oq_ci = ctrl_info->iomem_base +
4895 PQI_DEVICE_REGISTERS_OFFSET +
4896 get_unaligned_le64(
4897 &response.data.create_operational_oq.oq_ci_offset);
4898
4899 return 0;
4900 }
4901
pqi_create_queue_group(struct pqi_ctrl_info * ctrl_info,unsigned int group_number)4902 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4903 unsigned int group_number)
4904 {
4905 int rc;
4906 struct pqi_queue_group *queue_group;
4907 struct pqi_general_admin_request request;
4908 struct pqi_general_admin_response response;
4909
4910 queue_group = &ctrl_info->queue_groups[group_number];
4911
4912 /*
4913 * Create IQ (Inbound Queue - host to device queue) for
4914 * RAID path.
4915 */
4916 memset(&request, 0, sizeof(request));
4917 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4918 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4919 &request.header.iu_length);
4920 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4921 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4922 &request.data.create_operational_iq.queue_id);
4923 put_unaligned_le64(
4924 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4925 &request.data.create_operational_iq.element_array_addr);
4926 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4927 &request.data.create_operational_iq.ci_addr);
4928 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4929 &request.data.create_operational_iq.num_elements);
4930 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4931 &request.data.create_operational_iq.element_length);
4932 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4933
4934 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4935 &response);
4936 if (rc) {
4937 dev_err(&ctrl_info->pci_dev->dev,
4938 "error creating inbound RAID queue\n");
4939 return rc;
4940 }
4941
4942 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4943 PQI_DEVICE_REGISTERS_OFFSET +
4944 get_unaligned_le64(
4945 &response.data.create_operational_iq.iq_pi_offset);
4946
4947 /*
4948 * Create IQ (Inbound Queue - host to device queue) for
4949 * Advanced I/O (AIO) path.
4950 */
4951 memset(&request, 0, sizeof(request));
4952 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4953 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4954 &request.header.iu_length);
4955 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4956 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4957 &request.data.create_operational_iq.queue_id);
4958 put_unaligned_le64((u64)queue_group->
4959 iq_element_array_bus_addr[AIO_PATH],
4960 &request.data.create_operational_iq.element_array_addr);
4961 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4962 &request.data.create_operational_iq.ci_addr);
4963 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4964 &request.data.create_operational_iq.num_elements);
4965 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4966 &request.data.create_operational_iq.element_length);
4967 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4968
4969 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4970 &response);
4971 if (rc) {
4972 dev_err(&ctrl_info->pci_dev->dev,
4973 "error creating inbound AIO queue\n");
4974 return rc;
4975 }
4976
4977 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4978 PQI_DEVICE_REGISTERS_OFFSET +
4979 get_unaligned_le64(
4980 &response.data.create_operational_iq.iq_pi_offset);
4981
4982 /*
4983 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4984 * assumed to be for RAID path I/O unless we change the queue's
4985 * property.
4986 */
4987 memset(&request, 0, sizeof(request));
4988 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4989 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4990 &request.header.iu_length);
4991 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4992 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4993 &request.data.change_operational_iq_properties.queue_id);
4994 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4995 &request.data.change_operational_iq_properties.vendor_specific);
4996
4997 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4998 &response);
4999 if (rc) {
5000 dev_err(&ctrl_info->pci_dev->dev,
5001 "error changing queue property\n");
5002 return rc;
5003 }
5004
5005 /*
5006 * Create OQ (Outbound Queue - device to host queue).
5007 */
5008 memset(&request, 0, sizeof(request));
5009 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
5010 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
5011 &request.header.iu_length);
5012 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
5013 put_unaligned_le16(queue_group->oq_id,
5014 &request.data.create_operational_oq.queue_id);
5015 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
5016 &request.data.create_operational_oq.element_array_addr);
5017 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
5018 &request.data.create_operational_oq.pi_addr);
5019 put_unaligned_le16(ctrl_info->num_elements_per_oq,
5020 &request.data.create_operational_oq.num_elements);
5021 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
5022 &request.data.create_operational_oq.element_length);
5023 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
5024 put_unaligned_le16(queue_group->int_msg_num,
5025 &request.data.create_operational_oq.int_msg_num);
5026
5027 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
5028 &response);
5029 if (rc) {
5030 dev_err(&ctrl_info->pci_dev->dev,
5031 "error creating outbound queue\n");
5032 return rc;
5033 }
5034
5035 queue_group->oq_ci = ctrl_info->iomem_base +
5036 PQI_DEVICE_REGISTERS_OFFSET +
5037 get_unaligned_le64(
5038 &response.data.create_operational_oq.oq_ci_offset);
5039
5040 return 0;
5041 }
5042
pqi_create_queues(struct pqi_ctrl_info * ctrl_info)5043 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
5044 {
5045 int rc;
5046 unsigned int i;
5047
5048 rc = pqi_create_event_queue(ctrl_info);
5049 if (rc) {
5050 dev_err(&ctrl_info->pci_dev->dev,
5051 "error creating event queue\n");
5052 return rc;
5053 }
5054
5055 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5056 rc = pqi_create_queue_group(ctrl_info, i);
5057 if (rc) {
5058 dev_err(&ctrl_info->pci_dev->dev,
5059 "error creating queue group number %u/%u\n",
5060 i, ctrl_info->num_queue_groups);
5061 return rc;
5062 }
5063 }
5064
5065 return 0;
5066 }
5067
5068 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5069 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5070
pqi_configure_events(struct pqi_ctrl_info * ctrl_info,bool enable_events)5071 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5072 bool enable_events)
5073 {
5074 int rc;
5075 unsigned int i;
5076 struct pqi_event_config *event_config;
5077 struct pqi_event_descriptor *event_descriptor;
5078 struct pqi_general_management_request request;
5079
5080 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5081 GFP_KERNEL);
5082 if (!event_config)
5083 return -ENOMEM;
5084
5085 memset(&request, 0, sizeof(request));
5086
5087 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5088 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5089 data.report_event_configuration.sg_descriptors[1]) -
5090 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5091 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5092 &request.data.report_event_configuration.buffer_length);
5093
5094 rc = pqi_map_single(ctrl_info->pci_dev,
5095 request.data.report_event_configuration.sg_descriptors,
5096 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5097 DMA_FROM_DEVICE);
5098 if (rc)
5099 goto out;
5100
5101 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5102
5103 pqi_pci_unmap(ctrl_info->pci_dev,
5104 request.data.report_event_configuration.sg_descriptors, 1,
5105 DMA_FROM_DEVICE);
5106
5107 if (rc)
5108 goto out;
5109
5110 for (i = 0; i < event_config->num_event_descriptors; i++) {
5111 event_descriptor = &event_config->descriptors[i];
5112 if (enable_events &&
5113 pqi_is_supported_event(event_descriptor->event_type))
5114 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5115 &event_descriptor->oq_id);
5116 else
5117 put_unaligned_le16(0, &event_descriptor->oq_id);
5118 }
5119
5120 memset(&request, 0, sizeof(request));
5121
5122 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5123 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5124 data.report_event_configuration.sg_descriptors[1]) -
5125 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5126 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5127 &request.data.report_event_configuration.buffer_length);
5128
5129 rc = pqi_map_single(ctrl_info->pci_dev,
5130 request.data.report_event_configuration.sg_descriptors,
5131 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5132 DMA_TO_DEVICE);
5133 if (rc)
5134 goto out;
5135
5136 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5137
5138 pqi_pci_unmap(ctrl_info->pci_dev,
5139 request.data.report_event_configuration.sg_descriptors, 1,
5140 DMA_TO_DEVICE);
5141
5142 out:
5143 kfree(event_config);
5144
5145 return rc;
5146 }
5147
pqi_enable_events(struct pqi_ctrl_info * ctrl_info)5148 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5149 {
5150 return pqi_configure_events(ctrl_info, true);
5151 }
5152
pqi_free_all_io_requests(struct pqi_ctrl_info * ctrl_info)5153 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5154 {
5155 unsigned int i;
5156 struct device *dev;
5157 size_t sg_chain_buffer_length;
5158 struct pqi_io_request *io_request;
5159
5160 if (!ctrl_info->io_request_pool)
5161 return;
5162
5163 dev = &ctrl_info->pci_dev->dev;
5164 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5165 io_request = ctrl_info->io_request_pool;
5166
5167 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5168 kfree(io_request->iu);
5169 if (!io_request->sg_chain_buffer)
5170 break;
5171 dma_free_coherent(dev, sg_chain_buffer_length,
5172 io_request->sg_chain_buffer,
5173 io_request->sg_chain_buffer_dma_handle);
5174 io_request++;
5175 }
5176
5177 kfree(ctrl_info->io_request_pool);
5178 ctrl_info->io_request_pool = NULL;
5179 }
5180
pqi_alloc_error_buffer(struct pqi_ctrl_info * ctrl_info)5181 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5182 {
5183 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5184 ctrl_info->error_buffer_length,
5185 &ctrl_info->error_buffer_dma_handle,
5186 GFP_KERNEL);
5187 if (!ctrl_info->error_buffer)
5188 return -ENOMEM;
5189
5190 return 0;
5191 }
5192
pqi_alloc_io_resources(struct pqi_ctrl_info * ctrl_info)5193 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5194 {
5195 unsigned int i;
5196 void *sg_chain_buffer;
5197 size_t sg_chain_buffer_length;
5198 dma_addr_t sg_chain_buffer_dma_handle;
5199 struct device *dev;
5200 struct pqi_io_request *io_request;
5201
5202 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5203 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5204
5205 if (!ctrl_info->io_request_pool) {
5206 dev_err(&ctrl_info->pci_dev->dev,
5207 "failed to allocate I/O request pool\n");
5208 goto error;
5209 }
5210
5211 dev = &ctrl_info->pci_dev->dev;
5212 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5213 io_request = ctrl_info->io_request_pool;
5214
5215 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5216 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5217
5218 if (!io_request->iu) {
5219 dev_err(&ctrl_info->pci_dev->dev,
5220 "failed to allocate IU buffers\n");
5221 goto error;
5222 }
5223
5224 sg_chain_buffer = dma_alloc_coherent(dev,
5225 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5226 GFP_KERNEL);
5227
5228 if (!sg_chain_buffer) {
5229 dev_err(&ctrl_info->pci_dev->dev,
5230 "failed to allocate PQI scatter-gather chain buffers\n");
5231 goto error;
5232 }
5233
5234 io_request->index = i;
5235 io_request->sg_chain_buffer = sg_chain_buffer;
5236 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5237 io_request++;
5238 }
5239
5240 return 0;
5241
5242 error:
5243 pqi_free_all_io_requests(ctrl_info);
5244
5245 return -ENOMEM;
5246 }
5247
5248 /*
5249 * Calculate required resources that are sized based on max. outstanding
5250 * requests and max. transfer size.
5251 */
5252
pqi_calculate_io_resources(struct pqi_ctrl_info * ctrl_info)5253 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5254 {
5255 u32 max_transfer_size;
5256 u32 max_sg_entries;
5257
5258 ctrl_info->scsi_ml_can_queue =
5259 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5260 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5261
5262 ctrl_info->error_buffer_length =
5263 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5264
5265 if (is_kdump_kernel())
5266 max_transfer_size = min(ctrl_info->max_transfer_size,
5267 PQI_MAX_TRANSFER_SIZE_KDUMP);
5268 else
5269 max_transfer_size = min(ctrl_info->max_transfer_size,
5270 PQI_MAX_TRANSFER_SIZE);
5271
5272 max_sg_entries = max_transfer_size / PAGE_SIZE;
5273
5274 /* +1 to cover when the buffer is not page-aligned. */
5275 max_sg_entries++;
5276
5277 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5278
5279 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5280
5281 ctrl_info->sg_chain_buffer_length =
5282 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5283 PQI_EXTRA_SGL_MEMORY;
5284 ctrl_info->sg_tablesize = max_sg_entries;
5285 ctrl_info->max_sectors = max_transfer_size / 512;
5286 }
5287
pqi_calculate_queue_resources(struct pqi_ctrl_info * ctrl_info)5288 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5289 {
5290 int num_queue_groups;
5291 u16 num_elements_per_iq;
5292 u16 num_elements_per_oq;
5293
5294 if (is_kdump_kernel()) {
5295 num_queue_groups = 1;
5296 } else {
5297 int max_queue_groups;
5298
5299 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5300 ctrl_info->max_outbound_queues - 1);
5301 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5302
5303 num_queue_groups =
5304 blk_mq_num_online_queues(ctrl_info->max_msix_vectors);
5305 num_queue_groups = min(num_queue_groups, max_queue_groups);
5306 }
5307
5308 ctrl_info->num_queue_groups = num_queue_groups;
5309
5310 /*
5311 * Make sure that the max. inbound IU length is an even multiple
5312 * of our inbound element length.
5313 */
5314 ctrl_info->max_inbound_iu_length =
5315 (ctrl_info->max_inbound_iu_length_per_firmware /
5316 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5317 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5318
5319 num_elements_per_iq =
5320 (ctrl_info->max_inbound_iu_length /
5321 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5322
5323 /* Add one because one element in each queue is unusable. */
5324 num_elements_per_iq++;
5325
5326 num_elements_per_iq = min(num_elements_per_iq,
5327 ctrl_info->max_elements_per_iq);
5328
5329 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5330 num_elements_per_oq = min(num_elements_per_oq,
5331 ctrl_info->max_elements_per_oq);
5332
5333 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5334 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5335
5336 ctrl_info->max_sg_per_iu =
5337 ((ctrl_info->max_inbound_iu_length -
5338 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5339 sizeof(struct pqi_sg_descriptor)) +
5340 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5341
5342 ctrl_info->max_sg_per_r56_iu =
5343 ((ctrl_info->max_inbound_iu_length -
5344 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5345 sizeof(struct pqi_sg_descriptor)) +
5346 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5347 }
5348
pqi_set_sg_descriptor(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg)5349 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5350 struct scatterlist *sg)
5351 {
5352 u64 address = (u64)sg_dma_address(sg);
5353 unsigned int length = sg_dma_len(sg);
5354
5355 put_unaligned_le64(address, &sg_descriptor->address);
5356 put_unaligned_le32(length, &sg_descriptor->length);
5357 put_unaligned_le32(0, &sg_descriptor->flags);
5358 }
5359
pqi_build_sg_list(struct pqi_sg_descriptor * sg_descriptor,struct scatterlist * sg,int sg_count,struct pqi_io_request * io_request,int max_sg_per_iu,bool * chained)5360 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5361 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5362 int max_sg_per_iu, bool *chained)
5363 {
5364 int i;
5365 unsigned int num_sg_in_iu;
5366
5367 *chained = false;
5368 i = 0;
5369 num_sg_in_iu = 0;
5370 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5371
5372 while (1) {
5373 pqi_set_sg_descriptor(sg_descriptor, sg);
5374 if (!*chained)
5375 num_sg_in_iu++;
5376 i++;
5377 if (i == sg_count)
5378 break;
5379 sg_descriptor++;
5380 if (i == max_sg_per_iu) {
5381 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5382 &sg_descriptor->address);
5383 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5384 &sg_descriptor->length);
5385 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5386 *chained = true;
5387 num_sg_in_iu++;
5388 sg_descriptor = io_request->sg_chain_buffer;
5389 }
5390 sg = sg_next(sg);
5391 }
5392
5393 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5394
5395 return num_sg_in_iu;
5396 }
5397
pqi_build_raid_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_raid_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5398 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5399 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5400 struct pqi_io_request *io_request)
5401 {
5402 u16 iu_length;
5403 int sg_count;
5404 bool chained;
5405 unsigned int num_sg_in_iu;
5406 struct scatterlist *sg;
5407 struct pqi_sg_descriptor *sg_descriptor;
5408
5409 sg_count = scsi_dma_map(scmd);
5410 if (sg_count < 0)
5411 return sg_count;
5412
5413 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5414 PQI_REQUEST_HEADER_LENGTH;
5415
5416 if (sg_count == 0)
5417 goto out;
5418
5419 sg = scsi_sglist(scmd);
5420 sg_descriptor = request->sg_descriptors;
5421
5422 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5423 ctrl_info->max_sg_per_iu, &chained);
5424
5425 request->partial = chained;
5426 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5427
5428 out:
5429 put_unaligned_le16(iu_length, &request->header.iu_length);
5430
5431 return 0;
5432 }
5433
pqi_build_aio_r1_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r1_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5434 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5435 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5436 struct pqi_io_request *io_request)
5437 {
5438 u16 iu_length;
5439 int sg_count;
5440 bool chained;
5441 unsigned int num_sg_in_iu;
5442 struct scatterlist *sg;
5443 struct pqi_sg_descriptor *sg_descriptor;
5444
5445 sg_count = scsi_dma_map(scmd);
5446 if (sg_count < 0)
5447 return sg_count;
5448
5449 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5450 PQI_REQUEST_HEADER_LENGTH;
5451 num_sg_in_iu = 0;
5452
5453 if (sg_count == 0)
5454 goto out;
5455
5456 sg = scsi_sglist(scmd);
5457 sg_descriptor = request->sg_descriptors;
5458
5459 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5460 ctrl_info->max_sg_per_iu, &chained);
5461
5462 request->partial = chained;
5463 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5464
5465 out:
5466 put_unaligned_le16(iu_length, &request->header.iu_length);
5467 request->num_sg_descriptors = num_sg_in_iu;
5468
5469 return 0;
5470 }
5471
pqi_build_aio_r56_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_r56_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5472 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5473 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5474 struct pqi_io_request *io_request)
5475 {
5476 u16 iu_length;
5477 int sg_count;
5478 bool chained;
5479 unsigned int num_sg_in_iu;
5480 struct scatterlist *sg;
5481 struct pqi_sg_descriptor *sg_descriptor;
5482
5483 sg_count = scsi_dma_map(scmd);
5484 if (sg_count < 0)
5485 return sg_count;
5486
5487 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5488 PQI_REQUEST_HEADER_LENGTH;
5489 num_sg_in_iu = 0;
5490
5491 if (sg_count != 0) {
5492 sg = scsi_sglist(scmd);
5493 sg_descriptor = request->sg_descriptors;
5494
5495 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5496 ctrl_info->max_sg_per_r56_iu, &chained);
5497
5498 request->partial = chained;
5499 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5500 }
5501
5502 put_unaligned_le16(iu_length, &request->header.iu_length);
5503 request->num_sg_descriptors = num_sg_in_iu;
5504
5505 return 0;
5506 }
5507
pqi_build_aio_sg_list(struct pqi_ctrl_info * ctrl_info,struct pqi_aio_path_request * request,struct scsi_cmnd * scmd,struct pqi_io_request * io_request)5508 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5509 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5510 struct pqi_io_request *io_request)
5511 {
5512 u16 iu_length;
5513 int sg_count;
5514 bool chained;
5515 unsigned int num_sg_in_iu;
5516 struct scatterlist *sg;
5517 struct pqi_sg_descriptor *sg_descriptor;
5518
5519 sg_count = scsi_dma_map(scmd);
5520 if (sg_count < 0)
5521 return sg_count;
5522
5523 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5524 PQI_REQUEST_HEADER_LENGTH;
5525 num_sg_in_iu = 0;
5526
5527 if (sg_count == 0)
5528 goto out;
5529
5530 sg = scsi_sglist(scmd);
5531 sg_descriptor = request->sg_descriptors;
5532
5533 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5534 ctrl_info->max_sg_per_iu, &chained);
5535
5536 request->partial = chained;
5537 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5538
5539 out:
5540 put_unaligned_le16(iu_length, &request->header.iu_length);
5541 request->num_sg_descriptors = num_sg_in_iu;
5542
5543 return 0;
5544 }
5545
pqi_raid_io_complete(struct pqi_io_request * io_request,void * context)5546 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5547 void *context)
5548 {
5549 struct scsi_cmnd *scmd;
5550
5551 scmd = io_request->scmd;
5552 pqi_free_io_request(io_request);
5553 scsi_dma_unmap(scmd);
5554 pqi_scsi_done(scmd);
5555 }
5556
pqi_raid_submit_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,bool io_high_prio)5557 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5558 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5559 struct pqi_queue_group *queue_group, bool io_high_prio)
5560 {
5561 int rc;
5562 size_t cdb_length;
5563 struct pqi_io_request *io_request;
5564 struct pqi_raid_path_request *request;
5565
5566 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5567 if (!io_request)
5568 return SCSI_MLQUEUE_HOST_BUSY;
5569
5570 io_request->io_complete_callback = pqi_raid_io_complete;
5571 io_request->scmd = scmd;
5572
5573 request = io_request->iu;
5574 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5575
5576 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5577 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5578 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5579 request->command_priority = io_high_prio;
5580 put_unaligned_le16(io_request->index, &request->request_id);
5581 request->error_index = request->request_id;
5582 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5583 request->ml_device_lun_number = (u8)scmd->device->lun;
5584
5585 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5586 memcpy(request->cdb, scmd->cmnd, cdb_length);
5587
5588 switch (cdb_length) {
5589 case 6:
5590 case 10:
5591 case 12:
5592 case 16:
5593 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5594 break;
5595 case 20:
5596 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5597 break;
5598 case 24:
5599 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5600 break;
5601 case 28:
5602 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5603 break;
5604 case 32:
5605 default:
5606 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5607 break;
5608 }
5609
5610 switch (scmd->sc_data_direction) {
5611 case DMA_FROM_DEVICE:
5612 request->data_direction = SOP_READ_FLAG;
5613 break;
5614 case DMA_TO_DEVICE:
5615 request->data_direction = SOP_WRITE_FLAG;
5616 break;
5617 case DMA_NONE:
5618 request->data_direction = SOP_NO_DIRECTION_FLAG;
5619 break;
5620 case DMA_BIDIRECTIONAL:
5621 request->data_direction = SOP_BIDIRECTIONAL;
5622 break;
5623 default:
5624 dev_err(&ctrl_info->pci_dev->dev,
5625 "unknown data direction: %d\n",
5626 scmd->sc_data_direction);
5627 break;
5628 }
5629
5630 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5631 if (rc) {
5632 pqi_free_io_request(io_request);
5633 return SCSI_MLQUEUE_HOST_BUSY;
5634 }
5635
5636 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5637
5638 return 0;
5639 }
5640
pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5641 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5642 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5643 struct pqi_queue_group *queue_group)
5644 {
5645 bool io_high_prio;
5646
5647 io_high_prio = pqi_is_io_high_priority(device, scmd);
5648
5649 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5650 }
5651
pqi_raid_bypass_retry_needed(struct pqi_io_request * io_request)5652 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5653 {
5654 struct scsi_cmnd *scmd;
5655 struct pqi_scsi_dev *device;
5656 struct pqi_ctrl_info *ctrl_info;
5657
5658 if (!io_request->raid_bypass)
5659 return false;
5660
5661 scmd = io_request->scmd;
5662 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5663 return false;
5664 if (host_byte(scmd->result) == DID_NO_CONNECT)
5665 return false;
5666
5667 device = scmd->device->hostdata;
5668 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5669 return false;
5670
5671 ctrl_info = shost_to_hba(scmd->device->host);
5672 if (pqi_ctrl_offline(ctrl_info))
5673 return false;
5674
5675 return true;
5676 }
5677
pqi_aio_io_complete(struct pqi_io_request * io_request,void * context)5678 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5679 void *context)
5680 {
5681 struct scsi_cmnd *scmd;
5682
5683 scmd = io_request->scmd;
5684 scsi_dma_unmap(scmd);
5685 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5686 set_host_byte(scmd, DID_IMM_RETRY);
5687 pqi_cmd_priv(scmd)->this_residual++;
5688 }
5689
5690 pqi_free_io_request(io_request);
5691 pqi_scsi_done(scmd);
5692 }
5693
pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group)5694 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5695 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5696 struct pqi_queue_group *queue_group)
5697 {
5698 bool io_high_prio;
5699
5700 io_high_prio = pqi_is_io_high_priority(device, scmd);
5701
5702 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5703 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5704 false, io_high_prio);
5705 }
5706
pqi_aio_submit_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,u32 aio_handle,u8 * cdb,unsigned int cdb_length,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,bool raid_bypass,bool io_high_prio)5707 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5708 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5709 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5710 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5711 bool io_high_prio)
5712 {
5713 int rc;
5714 struct pqi_io_request *io_request;
5715 struct pqi_aio_path_request *request;
5716
5717 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5718 if (!io_request)
5719 return SCSI_MLQUEUE_HOST_BUSY;
5720
5721 io_request->io_complete_callback = pqi_aio_io_complete;
5722 io_request->scmd = scmd;
5723 io_request->raid_bypass = raid_bypass;
5724
5725 request = io_request->iu;
5726 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5727
5728 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5729 put_unaligned_le32(aio_handle, &request->nexus_id);
5730 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5731 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5732 request->command_priority = io_high_prio;
5733 put_unaligned_le16(io_request->index, &request->request_id);
5734 request->error_index = request->request_id;
5735 if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5736 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5737 if (cdb_length > sizeof(request->cdb))
5738 cdb_length = sizeof(request->cdb);
5739 request->cdb_length = cdb_length;
5740 memcpy(request->cdb, cdb, cdb_length);
5741
5742 switch (scmd->sc_data_direction) {
5743 case DMA_TO_DEVICE:
5744 request->data_direction = SOP_READ_FLAG;
5745 break;
5746 case DMA_FROM_DEVICE:
5747 request->data_direction = SOP_WRITE_FLAG;
5748 break;
5749 case DMA_NONE:
5750 request->data_direction = SOP_NO_DIRECTION_FLAG;
5751 break;
5752 case DMA_BIDIRECTIONAL:
5753 request->data_direction = SOP_BIDIRECTIONAL;
5754 break;
5755 default:
5756 dev_err(&ctrl_info->pci_dev->dev,
5757 "unknown data direction: %d\n",
5758 scmd->sc_data_direction);
5759 break;
5760 }
5761
5762 if (encryption_info) {
5763 request->encryption_enable = true;
5764 put_unaligned_le16(encryption_info->data_encryption_key_index,
5765 &request->data_encryption_key_index);
5766 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5767 &request->encrypt_tweak_lower);
5768 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5769 &request->encrypt_tweak_upper);
5770 }
5771
5772 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5773 if (rc) {
5774 pqi_free_io_request(io_request);
5775 return SCSI_MLQUEUE_HOST_BUSY;
5776 }
5777
5778 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5779
5780 return 0;
5781 }
5782
pqi_aio_submit_r1_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5783 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5784 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5785 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5786 struct pqi_scsi_dev_raid_map_data *rmd)
5787 {
5788 int rc;
5789 struct pqi_io_request *io_request;
5790 struct pqi_aio_r1_path_request *r1_request;
5791
5792 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5793 if (!io_request)
5794 return SCSI_MLQUEUE_HOST_BUSY;
5795
5796 io_request->io_complete_callback = pqi_aio_io_complete;
5797 io_request->scmd = scmd;
5798 io_request->raid_bypass = true;
5799
5800 r1_request = io_request->iu;
5801 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5802
5803 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5804 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5805 r1_request->num_drives = rmd->num_it_nexus_entries;
5806 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5807 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5808 if (rmd->num_it_nexus_entries == 3)
5809 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5810
5811 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5812 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5813 put_unaligned_le16(io_request->index, &r1_request->request_id);
5814 r1_request->error_index = r1_request->request_id;
5815 if (rmd->cdb_length > sizeof(r1_request->cdb))
5816 rmd->cdb_length = sizeof(r1_request->cdb);
5817 r1_request->cdb_length = rmd->cdb_length;
5818 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5819
5820 /* The direction is always write. */
5821 r1_request->data_direction = SOP_READ_FLAG;
5822
5823 if (encryption_info) {
5824 r1_request->encryption_enable = true;
5825 put_unaligned_le16(encryption_info->data_encryption_key_index,
5826 &r1_request->data_encryption_key_index);
5827 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5828 &r1_request->encrypt_tweak_lower);
5829 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5830 &r1_request->encrypt_tweak_upper);
5831 }
5832
5833 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5834 if (rc) {
5835 pqi_free_io_request(io_request);
5836 return SCSI_MLQUEUE_HOST_BUSY;
5837 }
5838
5839 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5840
5841 return 0;
5842 }
5843
pqi_aio_submit_r56_write_io(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd,struct pqi_queue_group * queue_group,struct pqi_encryption_info * encryption_info,struct pqi_scsi_dev * device,struct pqi_scsi_dev_raid_map_data * rmd)5844 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5845 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5846 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5847 struct pqi_scsi_dev_raid_map_data *rmd)
5848 {
5849 int rc;
5850 struct pqi_io_request *io_request;
5851 struct pqi_aio_r56_path_request *r56_request;
5852
5853 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5854 if (!io_request)
5855 return SCSI_MLQUEUE_HOST_BUSY;
5856 io_request->io_complete_callback = pqi_aio_io_complete;
5857 io_request->scmd = scmd;
5858 io_request->raid_bypass = true;
5859
5860 r56_request = io_request->iu;
5861 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5862
5863 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5864 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5865 else
5866 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5867
5868 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5869 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5870 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5871 if (rmd->raid_level == SA_RAID_6) {
5872 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5873 r56_request->xor_multiplier = rmd->xor_mult;
5874 }
5875 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5876 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5877 put_unaligned_le64(rmd->row, &r56_request->row);
5878
5879 put_unaligned_le16(io_request->index, &r56_request->request_id);
5880 r56_request->error_index = r56_request->request_id;
5881
5882 if (rmd->cdb_length > sizeof(r56_request->cdb))
5883 rmd->cdb_length = sizeof(r56_request->cdb);
5884 r56_request->cdb_length = rmd->cdb_length;
5885 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5886
5887 /* The direction is always write. */
5888 r56_request->data_direction = SOP_READ_FLAG;
5889
5890 if (encryption_info) {
5891 r56_request->encryption_enable = true;
5892 put_unaligned_le16(encryption_info->data_encryption_key_index,
5893 &r56_request->data_encryption_key_index);
5894 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5895 &r56_request->encrypt_tweak_lower);
5896 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5897 &r56_request->encrypt_tweak_upper);
5898 }
5899
5900 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5901 if (rc) {
5902 pqi_free_io_request(io_request);
5903 return SCSI_MLQUEUE_HOST_BUSY;
5904 }
5905
5906 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5907
5908 return 0;
5909 }
5910
pqi_get_hw_queue(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5911 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5912 struct scsi_cmnd *scmd)
5913 {
5914 /*
5915 * We are setting host_tagset = 1 during init.
5916 */
5917 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5918 }
5919
pqi_is_bypass_eligible_request(struct scsi_cmnd * scmd)5920 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5921 {
5922 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5923 return false;
5924
5925 return pqi_cmd_priv(scmd)->this_residual == 0;
5926 }
5927
5928 /*
5929 * This function gets called just before we hand the completed SCSI request
5930 * back to the SML.
5931 */
5932
pqi_prep_for_scsi_done(struct scsi_cmnd * scmd)5933 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5934 {
5935 struct pqi_scsi_dev *device;
5936 struct completion *wait;
5937
5938 if (!scmd->device) {
5939 set_host_byte(scmd, DID_NO_CONNECT);
5940 return;
5941 }
5942
5943 device = scmd->device->hostdata;
5944 if (!device) {
5945 set_host_byte(scmd, DID_NO_CONNECT);
5946 return;
5947 }
5948
5949 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5950
5951 wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5952 if (wait != PQI_NO_COMPLETION)
5953 complete(wait);
5954 }
5955
pqi_is_parity_write_stream(struct pqi_ctrl_info * ctrl_info,struct scsi_cmnd * scmd)5956 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5957 struct scsi_cmnd *scmd)
5958 {
5959 u32 oldest_jiffies;
5960 u8 lru_index;
5961 int i;
5962 int rc;
5963 struct pqi_scsi_dev *device;
5964 struct pqi_stream_data *pqi_stream_data;
5965 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
5966
5967 if (!ctrl_info->enable_stream_detection)
5968 return false;
5969
5970 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5971 if (rc)
5972 return false;
5973
5974 /* Check writes only. */
5975 if (!rmd.is_write)
5976 return false;
5977
5978 device = scmd->device->hostdata;
5979
5980 /* Check for RAID 5/6 streams. */
5981 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5982 return false;
5983
5984 /*
5985 * If controller does not support AIO RAID{5,6} writes, need to send
5986 * requests down non-AIO path.
5987 */
5988 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5989 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5990 return true;
5991
5992 lru_index = 0;
5993 oldest_jiffies = INT_MAX;
5994 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5995 pqi_stream_data = &device->stream_data[i];
5996 /*
5997 * Check for adjacent request or request is within
5998 * the previous request.
5999 */
6000 if ((pqi_stream_data->next_lba &&
6001 rmd.first_block >= pqi_stream_data->next_lba) &&
6002 rmd.first_block <= pqi_stream_data->next_lba +
6003 rmd.block_cnt) {
6004 pqi_stream_data->next_lba = rmd.first_block +
6005 rmd.block_cnt;
6006 pqi_stream_data->last_accessed = jiffies;
6007 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
6008 return true;
6009 }
6010
6011 /* unused entry */
6012 if (pqi_stream_data->last_accessed == 0) {
6013 lru_index = i;
6014 break;
6015 }
6016
6017 /* Find entry with oldest last accessed time. */
6018 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
6019 oldest_jiffies = pqi_stream_data->last_accessed;
6020 lru_index = i;
6021 }
6022 }
6023
6024 /* Set LRU entry. */
6025 pqi_stream_data = &device->stream_data[lru_index];
6026 pqi_stream_data->last_accessed = jiffies;
6027 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
6028
6029 return false;
6030 }
6031
pqi_scsi_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)6032 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6033 {
6034 int rc;
6035 struct pqi_ctrl_info *ctrl_info;
6036 struct pqi_scsi_dev *device;
6037 u16 hw_queue;
6038 struct pqi_queue_group *queue_group;
6039 bool raid_bypassed;
6040 u8 lun;
6041
6042 scmd->host_scribble = PQI_NO_COMPLETION;
6043
6044 device = scmd->device->hostdata;
6045
6046 if (!device) {
6047 set_host_byte(scmd, DID_NO_CONNECT);
6048 pqi_scsi_done(scmd);
6049 return 0;
6050 }
6051
6052 lun = (u8)scmd->device->lun;
6053
6054 atomic_inc(&device->scsi_cmds_outstanding[lun]);
6055
6056 ctrl_info = shost_to_hba(shost);
6057
6058 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
6059 set_host_byte(scmd, DID_NO_CONNECT);
6060 pqi_scsi_done(scmd);
6061 return 0;
6062 }
6063
6064 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6065 rc = SCSI_MLQUEUE_HOST_BUSY;
6066 goto out;
6067 }
6068
6069 /*
6070 * This is necessary because the SML doesn't zero out this field during
6071 * error recovery.
6072 */
6073 scmd->result = 0;
6074
6075 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6076 queue_group = &ctrl_info->queue_groups[hw_queue];
6077
6078 if (pqi_is_logical_device(device)) {
6079 raid_bypassed = false;
6080 if (device->raid_bypass_enabled &&
6081 pqi_is_bypass_eligible_request(scmd) &&
6082 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6083 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6084 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6085 raid_bypassed = true;
6086 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
6087 }
6088 }
6089 if (!raid_bypassed)
6090 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6091 } else {
6092 if (device->aio_enabled)
6093 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6094 else
6095 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6096 }
6097
6098 out:
6099 if (rc) {
6100 scmd->host_scribble = NULL;
6101 atomic_dec(&device->scsi_cmds_outstanding[lun]);
6102 }
6103
6104 return rc;
6105 }
6106
pqi_queued_io_count(struct pqi_ctrl_info * ctrl_info)6107 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6108 {
6109 unsigned int i;
6110 unsigned int path;
6111 unsigned long flags;
6112 unsigned int queued_io_count;
6113 struct pqi_queue_group *queue_group;
6114 struct pqi_io_request *io_request;
6115
6116 queued_io_count = 0;
6117
6118 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6119 queue_group = &ctrl_info->queue_groups[i];
6120 for (path = 0; path < 2; path++) {
6121 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6122 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6123 queued_io_count++;
6124 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6125 }
6126 }
6127
6128 return queued_io_count;
6129 }
6130
pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info * ctrl_info)6131 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6132 {
6133 unsigned int i;
6134 unsigned int path;
6135 unsigned int nonempty_inbound_queue_count;
6136 struct pqi_queue_group *queue_group;
6137 pqi_index_t iq_pi;
6138 pqi_index_t iq_ci;
6139
6140 nonempty_inbound_queue_count = 0;
6141
6142 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6143 queue_group = &ctrl_info->queue_groups[i];
6144 for (path = 0; path < 2; path++) {
6145 iq_pi = queue_group->iq_pi_copy[path];
6146 iq_ci = readl(queue_group->iq_ci[path]);
6147 if (iq_ci != iq_pi)
6148 nonempty_inbound_queue_count++;
6149 }
6150 }
6151
6152 return nonempty_inbound_queue_count;
6153 }
6154
6155 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6156
pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info * ctrl_info)6157 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6158 {
6159 unsigned long start_jiffies;
6160 unsigned long warning_timeout;
6161 unsigned int queued_io_count;
6162 unsigned int nonempty_inbound_queue_count;
6163 bool displayed_warning;
6164
6165 displayed_warning = false;
6166 start_jiffies = jiffies;
6167 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6168
6169 while (1) {
6170 queued_io_count = pqi_queued_io_count(ctrl_info);
6171 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6172 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6173 break;
6174 pqi_check_ctrl_health(ctrl_info);
6175 if (pqi_ctrl_offline(ctrl_info))
6176 return -ENXIO;
6177 if (time_after(jiffies, warning_timeout)) {
6178 dev_warn(&ctrl_info->pci_dev->dev,
6179 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6180 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6181 displayed_warning = true;
6182 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6183 }
6184 usleep_range(1000, 2000);
6185 }
6186
6187 if (displayed_warning)
6188 dev_warn(&ctrl_info->pci_dev->dev,
6189 "queued I/O drained after waiting for %u seconds\n",
6190 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6191
6192 return 0;
6193 }
6194
pqi_fail_io_queued_for_device(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6195 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6196 struct pqi_scsi_dev *device, u8 lun)
6197 {
6198 unsigned int i;
6199 unsigned int path;
6200 struct pqi_queue_group *queue_group;
6201 unsigned long flags;
6202 struct pqi_io_request *io_request;
6203 struct pqi_io_request *next;
6204 struct scsi_cmnd *scmd;
6205 struct pqi_scsi_dev *scsi_device;
6206
6207 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6208 queue_group = &ctrl_info->queue_groups[i];
6209
6210 for (path = 0; path < 2; path++) {
6211 spin_lock_irqsave(
6212 &queue_group->submit_lock[path], flags);
6213
6214 list_for_each_entry_safe(io_request, next,
6215 &queue_group->request_list[path],
6216 request_list_entry) {
6217
6218 scmd = io_request->scmd;
6219 if (!scmd)
6220 continue;
6221
6222 scsi_device = scmd->device->hostdata;
6223
6224 list_del(&io_request->request_list_entry);
6225 if (scsi_device == device && (u8)scmd->device->lun == lun)
6226 set_host_byte(scmd, DID_RESET);
6227 else
6228 set_host_byte(scmd, DID_REQUEUE);
6229 pqi_free_io_request(io_request);
6230 scsi_dma_unmap(scmd);
6231 pqi_scsi_done(scmd);
6232 }
6233
6234 spin_unlock_irqrestore(
6235 &queue_group->submit_lock[path], flags);
6236 }
6237 }
6238 }
6239
6240 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6241
pqi_device_wait_for_pending_io(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,unsigned long timeout_msecs)6242 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6243 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6244 {
6245 int cmds_outstanding;
6246 unsigned long start_jiffies;
6247 unsigned long warning_timeout;
6248 unsigned long msecs_waiting;
6249
6250 start_jiffies = jiffies;
6251 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6252
6253 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6254 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6255 pqi_check_ctrl_health(ctrl_info);
6256 if (pqi_ctrl_offline(ctrl_info))
6257 return -ENXIO;
6258 }
6259 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6260 if (msecs_waiting >= timeout_msecs) {
6261 dev_err(&ctrl_info->pci_dev->dev,
6262 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6263 ctrl_info->scsi_host->host_no, device->bus, device->target,
6264 lun, msecs_waiting / 1000, cmds_outstanding);
6265 return -ETIMEDOUT;
6266 }
6267 if (time_after(jiffies, warning_timeout)) {
6268 dev_warn(&ctrl_info->pci_dev->dev,
6269 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6270 ctrl_info->scsi_host->host_no, device->bus, device->target,
6271 lun, msecs_waiting / 1000, cmds_outstanding);
6272 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6273 }
6274 usleep_range(1000, 2000);
6275 }
6276
6277 return 0;
6278 }
6279
pqi_lun_reset_complete(struct pqi_io_request * io_request,void * context)6280 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6281 void *context)
6282 {
6283 struct completion *waiting = context;
6284
6285 complete(waiting);
6286 }
6287
6288 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6289
pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct completion * wait)6290 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6291 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6292 {
6293 int rc;
6294 unsigned int wait_secs;
6295 int cmds_outstanding;
6296
6297 wait_secs = 0;
6298
6299 while (1) {
6300 if (wait_for_completion_io_timeout(wait,
6301 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6302 rc = 0;
6303 break;
6304 }
6305
6306 pqi_check_ctrl_health(ctrl_info);
6307 if (pqi_ctrl_offline(ctrl_info)) {
6308 rc = -ENXIO;
6309 break;
6310 }
6311
6312 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6313 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6314 dev_warn(&ctrl_info->pci_dev->dev,
6315 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6316 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6317 }
6318
6319 return rc;
6320 }
6321
6322 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6323
pqi_lun_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6324 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6325 {
6326 int rc;
6327 struct pqi_io_request *io_request;
6328 DECLARE_COMPLETION_ONSTACK(wait);
6329 struct pqi_task_management_request *request;
6330
6331 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6332 io_request->io_complete_callback = pqi_lun_reset_complete;
6333 io_request->context = &wait;
6334
6335 request = io_request->iu;
6336 memset(request, 0, sizeof(*request));
6337
6338 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6339 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6340 &request->header.iu_length);
6341 put_unaligned_le16(io_request->index, &request->request_id);
6342 memcpy(request->lun_number, device->scsi3addr,
6343 sizeof(request->lun_number));
6344 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6345 request->ml_device_lun_number = lun;
6346 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6347 if (ctrl_info->tmf_iu_timeout_supported)
6348 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6349
6350 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6351 io_request);
6352
6353 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6354 if (rc == 0)
6355 rc = io_request->status;
6356
6357 pqi_free_io_request(io_request);
6358
6359 return rc;
6360 }
6361
6362 #define PQI_LUN_RESET_RETRIES 3
6363 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6364 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6365 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6366
pqi_lun_reset_with_retries(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6367 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6368 {
6369 int reset_rc;
6370 int wait_rc;
6371 unsigned int retries;
6372 unsigned long timeout_msecs;
6373
6374 for (retries = 0;;) {
6375 reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6376 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6377 break;
6378 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6379 }
6380
6381 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6382 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6383
6384 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6385 if (wait_rc && reset_rc == 0)
6386 reset_rc = wait_rc;
6387
6388 return reset_rc == 0 ? SUCCESS : FAILED;
6389 }
6390
pqi_device_reset(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun)6391 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6392 {
6393 int rc;
6394
6395 pqi_ctrl_block_requests(ctrl_info);
6396 pqi_ctrl_wait_until_quiesced(ctrl_info);
6397 pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6398 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6399 pqi_device_reset_start(device, lun);
6400 pqi_ctrl_unblock_requests(ctrl_info);
6401 if (rc)
6402 rc = FAILED;
6403 else
6404 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6405 pqi_device_reset_done(device, lun);
6406
6407 return rc;
6408 }
6409
pqi_device_reset_handler(struct pqi_ctrl_info * ctrl_info,struct pqi_scsi_dev * device,u8 lun,struct scsi_cmnd * scmd,u8 scsi_opcode)6410 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6411 {
6412 int rc;
6413
6414 mutex_lock(&ctrl_info->lun_reset_mutex);
6415
6416 dev_err(&ctrl_info->pci_dev->dev,
6417 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6418 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6419
6420 pqi_check_ctrl_health(ctrl_info);
6421 if (pqi_ctrl_offline(ctrl_info))
6422 rc = FAILED;
6423 else
6424 rc = pqi_device_reset(ctrl_info, device, lun);
6425
6426 dev_err(&ctrl_info->pci_dev->dev,
6427 "reset of scsi %d:%d:%d:%u: %s\n",
6428 ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6429 rc == SUCCESS ? "SUCCESS" : "FAILED");
6430
6431 mutex_unlock(&ctrl_info->lun_reset_mutex);
6432
6433 return rc;
6434 }
6435
pqi_eh_device_reset_handler(struct scsi_cmnd * scmd)6436 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6437 {
6438 struct Scsi_Host *shost;
6439 struct pqi_ctrl_info *ctrl_info;
6440 struct pqi_scsi_dev *device;
6441 u8 scsi_opcode;
6442
6443 shost = scmd->device->host;
6444 ctrl_info = shost_to_hba(shost);
6445 device = scmd->device->hostdata;
6446 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6447
6448 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6449 }
6450
pqi_tmf_worker(struct work_struct * work)6451 static void pqi_tmf_worker(struct work_struct *work)
6452 {
6453 struct pqi_tmf_work *tmf_work;
6454 struct scsi_cmnd *scmd;
6455
6456 tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6457 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6458
6459 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6460 }
6461
pqi_eh_abort_handler(struct scsi_cmnd * scmd)6462 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6463 {
6464 struct Scsi_Host *shost;
6465 struct pqi_ctrl_info *ctrl_info;
6466 struct pqi_scsi_dev *device;
6467 struct pqi_tmf_work *tmf_work;
6468 DECLARE_COMPLETION_ONSTACK(wait);
6469
6470 shost = scmd->device->host;
6471 ctrl_info = shost_to_hba(shost);
6472 device = scmd->device->hostdata;
6473
6474 dev_err(&ctrl_info->pci_dev->dev,
6475 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6476 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6477
6478 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6479 dev_err(&ctrl_info->pci_dev->dev,
6480 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6481 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6482 scmd->result = DID_RESET << 16;
6483 goto out;
6484 }
6485
6486 tmf_work = &device->tmf_work[scmd->device->lun];
6487
6488 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6489 tmf_work->ctrl_info = ctrl_info;
6490 tmf_work->device = device;
6491 tmf_work->lun = (u8)scmd->device->lun;
6492 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6493 schedule_work(&tmf_work->work_struct);
6494 }
6495
6496 wait_for_completion(&wait);
6497
6498 dev_err(&ctrl_info->pci_dev->dev,
6499 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6500 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6501
6502 out:
6503
6504 return SUCCESS;
6505 }
6506
pqi_sdev_init(struct scsi_device * sdev)6507 static int pqi_sdev_init(struct scsi_device *sdev)
6508 {
6509 struct pqi_scsi_dev *device;
6510 unsigned long flags;
6511 struct pqi_ctrl_info *ctrl_info;
6512 struct scsi_target *starget;
6513 struct sas_rphy *rphy;
6514
6515 ctrl_info = shost_to_hba(sdev->host);
6516
6517 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6518
6519 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6520 starget = scsi_target(sdev);
6521 rphy = target_to_rphy(starget);
6522 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6523 if (device) {
6524 if (device->target_lun_valid) {
6525 device->ignore_device = true;
6526 } else {
6527 device->target = sdev_id(sdev);
6528 device->lun = sdev->lun;
6529 device->target_lun_valid = true;
6530 }
6531 }
6532 } else {
6533 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6534 sdev_id(sdev), sdev->lun);
6535 }
6536
6537 if (device) {
6538 sdev->hostdata = device;
6539 device->sdev = sdev;
6540 if (device->queue_depth) {
6541 device->advertised_queue_depth = device->queue_depth;
6542 scsi_change_queue_depth(sdev,
6543 device->advertised_queue_depth);
6544 }
6545 if (pqi_is_logical_device(device)) {
6546 pqi_disable_write_same(sdev);
6547 } else {
6548 sdev->allow_restart = 1;
6549 if (device->device_type == SA_DEVICE_TYPE_NVME)
6550 pqi_disable_write_same(sdev);
6551 }
6552 }
6553
6554 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6555
6556 return 0;
6557 }
6558
pqi_map_queues(struct Scsi_Host * shost)6559 static void pqi_map_queues(struct Scsi_Host *shost)
6560 {
6561 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6562
6563 if (!ctrl_info->disable_managed_interrupts)
6564 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6565 &ctrl_info->pci_dev->dev, 0);
6566 else
6567 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
6568 }
6569
pqi_is_tape_changer_device(struct pqi_scsi_dev * device)6570 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6571 {
6572 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6573 }
6574
pqi_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)6575 static int pqi_sdev_configure(struct scsi_device *sdev,
6576 struct queue_limits *lim)
6577 {
6578 int rc = 0;
6579 struct pqi_scsi_dev *device;
6580
6581 device = sdev->hostdata;
6582 device->devtype = sdev->type;
6583
6584 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6585 rc = -ENXIO;
6586 device->ignore_device = false;
6587 }
6588
6589 return rc;
6590 }
6591
pqi_sdev_destroy(struct scsi_device * sdev)6592 static void pqi_sdev_destroy(struct scsi_device *sdev)
6593 {
6594 struct pqi_ctrl_info *ctrl_info;
6595 struct pqi_scsi_dev *device;
6596 int mutex_acquired;
6597 unsigned long flags;
6598
6599 ctrl_info = shost_to_hba(sdev->host);
6600
6601 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6602 if (!mutex_acquired)
6603 return;
6604
6605 device = sdev->hostdata;
6606 if (!device) {
6607 mutex_unlock(&ctrl_info->scan_mutex);
6608 return;
6609 }
6610
6611 device->lun_count--;
6612 if (device->lun_count > 0) {
6613 mutex_unlock(&ctrl_info->scan_mutex);
6614 return;
6615 }
6616
6617 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6618 list_del(&device->scsi_device_list_entry);
6619 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6620
6621 mutex_unlock(&ctrl_info->scan_mutex);
6622
6623 pqi_dev_info(ctrl_info, "removed", device);
6624 pqi_free_device(device);
6625 }
6626
pqi_getpciinfo_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6627 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6628 {
6629 struct pci_dev *pci_dev;
6630 u32 subsystem_vendor;
6631 u32 subsystem_device;
6632 cciss_pci_info_struct pci_info;
6633
6634 if (!arg)
6635 return -EINVAL;
6636
6637 pci_dev = ctrl_info->pci_dev;
6638
6639 pci_info.domain = pci_domain_nr(pci_dev->bus);
6640 pci_info.bus = pci_dev->bus->number;
6641 pci_info.dev_fn = pci_dev->devfn;
6642 subsystem_vendor = pci_dev->subsystem_vendor;
6643 subsystem_device = pci_dev->subsystem_device;
6644 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6645
6646 if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6647 return -EFAULT;
6648
6649 return 0;
6650 }
6651
pqi_getdrivver_ioctl(void __user * arg)6652 static int pqi_getdrivver_ioctl(void __user *arg)
6653 {
6654 u32 version;
6655
6656 if (!arg)
6657 return -EINVAL;
6658
6659 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6660 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6661
6662 if (copy_to_user(arg, &version, sizeof(version)))
6663 return -EFAULT;
6664
6665 return 0;
6666 }
6667
6668 struct ciss_error_info {
6669 u8 scsi_status;
6670 int command_status;
6671 size_t sense_data_length;
6672 };
6673
pqi_error_info_to_ciss(struct pqi_raid_error_info * pqi_error_info,struct ciss_error_info * ciss_error_info)6674 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6675 struct ciss_error_info *ciss_error_info)
6676 {
6677 int ciss_cmd_status;
6678 size_t sense_data_length;
6679
6680 switch (pqi_error_info->data_out_result) {
6681 case PQI_DATA_IN_OUT_GOOD:
6682 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6683 break;
6684 case PQI_DATA_IN_OUT_UNDERFLOW:
6685 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6686 break;
6687 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6688 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6689 break;
6690 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6691 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6692 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6693 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6694 case PQI_DATA_IN_OUT_ERROR:
6695 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6696 break;
6697 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6698 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6699 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6700 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6701 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6702 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6703 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6704 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6705 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6706 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6707 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6708 break;
6709 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6710 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6711 break;
6712 case PQI_DATA_IN_OUT_ABORTED:
6713 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6714 break;
6715 case PQI_DATA_IN_OUT_TIMEOUT:
6716 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6717 break;
6718 default:
6719 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6720 break;
6721 }
6722
6723 sense_data_length =
6724 get_unaligned_le16(&pqi_error_info->sense_data_length);
6725 if (sense_data_length == 0)
6726 sense_data_length =
6727 get_unaligned_le16(&pqi_error_info->response_data_length);
6728 if (sense_data_length)
6729 if (sense_data_length > sizeof(pqi_error_info->data))
6730 sense_data_length = sizeof(pqi_error_info->data);
6731
6732 ciss_error_info->scsi_status = pqi_error_info->status;
6733 ciss_error_info->command_status = ciss_cmd_status;
6734 ciss_error_info->sense_data_length = sense_data_length;
6735 }
6736
pqi_passthru_ioctl(struct pqi_ctrl_info * ctrl_info,void __user * arg)6737 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6738 {
6739 int rc;
6740 char *kernel_buffer = NULL;
6741 u16 iu_length;
6742 size_t sense_data_length;
6743 IOCTL_Command_struct iocommand;
6744 struct pqi_raid_path_request request;
6745 struct pqi_raid_error_info pqi_error_info;
6746 struct ciss_error_info ciss_error_info;
6747
6748 if (pqi_ctrl_offline(ctrl_info))
6749 return -ENXIO;
6750 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6751 return -EBUSY;
6752 if (!arg)
6753 return -EINVAL;
6754 if (!capable(CAP_SYS_RAWIO))
6755 return -EPERM;
6756 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6757 return -EFAULT;
6758 if (iocommand.buf_size < 1 &&
6759 iocommand.Request.Type.Direction != XFER_NONE)
6760 return -EINVAL;
6761 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6762 return -EINVAL;
6763 if (iocommand.Request.Type.Type != TYPE_CMD)
6764 return -EINVAL;
6765
6766 switch (iocommand.Request.Type.Direction) {
6767 case XFER_NONE:
6768 case XFER_WRITE:
6769 case XFER_READ:
6770 case XFER_READ | XFER_WRITE:
6771 break;
6772 default:
6773 return -EINVAL;
6774 }
6775
6776 if (iocommand.buf_size > 0) {
6777 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6778 if (!kernel_buffer)
6779 return -ENOMEM;
6780 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6781 if (copy_from_user(kernel_buffer, iocommand.buf,
6782 iocommand.buf_size)) {
6783 rc = -EFAULT;
6784 goto out;
6785 }
6786 } else {
6787 memset(kernel_buffer, 0, iocommand.buf_size);
6788 }
6789 }
6790
6791 memset(&request, 0, sizeof(request));
6792
6793 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6794 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6795 PQI_REQUEST_HEADER_LENGTH;
6796 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6797 sizeof(request.lun_number));
6798 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6799 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6800
6801 switch (iocommand.Request.Type.Direction) {
6802 case XFER_NONE:
6803 request.data_direction = SOP_NO_DIRECTION_FLAG;
6804 break;
6805 case XFER_WRITE:
6806 request.data_direction = SOP_WRITE_FLAG;
6807 break;
6808 case XFER_READ:
6809 request.data_direction = SOP_READ_FLAG;
6810 break;
6811 case XFER_READ | XFER_WRITE:
6812 request.data_direction = SOP_BIDIRECTIONAL;
6813 break;
6814 }
6815
6816 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6817
6818 if (iocommand.buf_size > 0) {
6819 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6820
6821 rc = pqi_map_single(ctrl_info->pci_dev,
6822 &request.sg_descriptors[0], kernel_buffer,
6823 iocommand.buf_size, DMA_BIDIRECTIONAL);
6824 if (rc)
6825 goto out;
6826
6827 iu_length += sizeof(request.sg_descriptors[0]);
6828 }
6829
6830 put_unaligned_le16(iu_length, &request.header.iu_length);
6831
6832 if (ctrl_info->raid_iu_timeout_supported)
6833 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6834
6835 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6836 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6837
6838 if (iocommand.buf_size > 0)
6839 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6840 DMA_BIDIRECTIONAL);
6841
6842 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6843
6844 if (rc == 0) {
6845 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6846 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6847 iocommand.error_info.CommandStatus =
6848 ciss_error_info.command_status;
6849 sense_data_length = ciss_error_info.sense_data_length;
6850 if (sense_data_length) {
6851 if (sense_data_length >
6852 sizeof(iocommand.error_info.SenseInfo))
6853 sense_data_length =
6854 sizeof(iocommand.error_info.SenseInfo);
6855 memcpy(iocommand.error_info.SenseInfo,
6856 pqi_error_info.data, sense_data_length);
6857 iocommand.error_info.SenseLen = sense_data_length;
6858 }
6859 }
6860
6861 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6862 rc = -EFAULT;
6863 goto out;
6864 }
6865
6866 if (rc == 0 && iocommand.buf_size > 0 &&
6867 (iocommand.Request.Type.Direction & XFER_READ)) {
6868 if (copy_to_user(iocommand.buf, kernel_buffer,
6869 iocommand.buf_size)) {
6870 rc = -EFAULT;
6871 }
6872 }
6873
6874 out:
6875 kfree(kernel_buffer);
6876
6877 return rc;
6878 }
6879
pqi_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)6880 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6881 void __user *arg)
6882 {
6883 int rc;
6884 struct pqi_ctrl_info *ctrl_info;
6885
6886 ctrl_info = shost_to_hba(sdev->host);
6887
6888 switch (cmd) {
6889 case CCISS_DEREGDISK:
6890 case CCISS_REGNEWDISK:
6891 case CCISS_REGNEWD:
6892 rc = pqi_scan_scsi_devices(ctrl_info);
6893 break;
6894 case CCISS_GETPCIINFO:
6895 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6896 break;
6897 case CCISS_GETDRIVVER:
6898 rc = pqi_getdrivver_ioctl(arg);
6899 break;
6900 case CCISS_PASSTHRU:
6901 rc = pqi_passthru_ioctl(ctrl_info, arg);
6902 break;
6903 default:
6904 rc = -EINVAL;
6905 break;
6906 }
6907
6908 return rc;
6909 }
6910
pqi_firmware_version_show(struct device * dev,struct device_attribute * attr,char * buffer)6911 static ssize_t pqi_firmware_version_show(struct device *dev,
6912 struct device_attribute *attr, char *buffer)
6913 {
6914 struct Scsi_Host *shost;
6915 struct pqi_ctrl_info *ctrl_info;
6916
6917 shost = class_to_shost(dev);
6918 ctrl_info = shost_to_hba(shost);
6919
6920 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6921 }
6922
pqi_serial_number_show(struct device * dev,struct device_attribute * attr,char * buffer)6923 static ssize_t pqi_serial_number_show(struct device *dev,
6924 struct device_attribute *attr, char *buffer)
6925 {
6926 struct Scsi_Host *shost;
6927 struct pqi_ctrl_info *ctrl_info;
6928
6929 shost = class_to_shost(dev);
6930 ctrl_info = shost_to_hba(shost);
6931
6932 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6933 }
6934
pqi_model_show(struct device * dev,struct device_attribute * attr,char * buffer)6935 static ssize_t pqi_model_show(struct device *dev,
6936 struct device_attribute *attr, char *buffer)
6937 {
6938 struct Scsi_Host *shost;
6939 struct pqi_ctrl_info *ctrl_info;
6940
6941 shost = class_to_shost(dev);
6942 ctrl_info = shost_to_hba(shost);
6943
6944 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6945 }
6946
pqi_vendor_show(struct device * dev,struct device_attribute * attr,char * buffer)6947 static ssize_t pqi_vendor_show(struct device *dev,
6948 struct device_attribute *attr, char *buffer)
6949 {
6950 struct Scsi_Host *shost;
6951 struct pqi_ctrl_info *ctrl_info;
6952
6953 shost = class_to_shost(dev);
6954 ctrl_info = shost_to_hba(shost);
6955
6956 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6957 }
6958
pqi_host_rescan_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6959 static ssize_t pqi_host_rescan_store(struct device *dev,
6960 struct device_attribute *attr, const char *buffer, size_t count)
6961 {
6962 struct Scsi_Host *shost = class_to_shost(dev);
6963
6964 pqi_scan_start(shost);
6965
6966 return count;
6967 }
6968
pqi_lockup_action_show(struct device * dev,struct device_attribute * attr,char * buffer)6969 static ssize_t pqi_lockup_action_show(struct device *dev,
6970 struct device_attribute *attr, char *buffer)
6971 {
6972 int count = 0;
6973 unsigned int i;
6974
6975 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6976 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6977 count += scnprintf(buffer + count, PAGE_SIZE - count,
6978 "[%s] ", pqi_lockup_actions[i].name);
6979 else
6980 count += scnprintf(buffer + count, PAGE_SIZE - count,
6981 "%s ", pqi_lockup_actions[i].name);
6982 }
6983
6984 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6985
6986 return count;
6987 }
6988
pqi_lockup_action_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)6989 static ssize_t pqi_lockup_action_store(struct device *dev,
6990 struct device_attribute *attr, const char *buffer, size_t count)
6991 {
6992 unsigned int i;
6993 char *action_name;
6994 char action_name_buffer[32];
6995
6996 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6997 action_name = strstrip(action_name_buffer);
6998
6999 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7000 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
7001 pqi_lockup_action = pqi_lockup_actions[i].action;
7002 return count;
7003 }
7004 }
7005
7006 return -EINVAL;
7007 }
7008
pqi_host_enable_stream_detection_show(struct device * dev,struct device_attribute * attr,char * buffer)7009 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
7010 struct device_attribute *attr, char *buffer)
7011 {
7012 struct Scsi_Host *shost = class_to_shost(dev);
7013 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7014
7015 return scnprintf(buffer, 10, "%x\n",
7016 ctrl_info->enable_stream_detection);
7017 }
7018
pqi_host_enable_stream_detection_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7019 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
7020 struct device_attribute *attr, const char *buffer, size_t count)
7021 {
7022 struct Scsi_Host *shost = class_to_shost(dev);
7023 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7024 u8 set_stream_detection = 0;
7025
7026 if (kstrtou8(buffer, 0, &set_stream_detection))
7027 return -EINVAL;
7028
7029 if (set_stream_detection > 0)
7030 set_stream_detection = 1;
7031
7032 ctrl_info->enable_stream_detection = set_stream_detection;
7033
7034 return count;
7035 }
7036
pqi_host_enable_r5_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7037 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
7038 struct device_attribute *attr, char *buffer)
7039 {
7040 struct Scsi_Host *shost = class_to_shost(dev);
7041 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7042
7043 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
7044 }
7045
pqi_host_enable_r5_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7046 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
7047 struct device_attribute *attr, const char *buffer, size_t count)
7048 {
7049 struct Scsi_Host *shost = class_to_shost(dev);
7050 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7051 u8 set_r5_writes = 0;
7052
7053 if (kstrtou8(buffer, 0, &set_r5_writes))
7054 return -EINVAL;
7055
7056 if (set_r5_writes > 0)
7057 set_r5_writes = 1;
7058
7059 ctrl_info->enable_r5_writes = set_r5_writes;
7060
7061 return count;
7062 }
7063
pqi_host_enable_r6_writes_show(struct device * dev,struct device_attribute * attr,char * buffer)7064 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7065 struct device_attribute *attr, char *buffer)
7066 {
7067 struct Scsi_Host *shost = class_to_shost(dev);
7068 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7069
7070 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7071 }
7072
pqi_host_enable_r6_writes_store(struct device * dev,struct device_attribute * attr,const char * buffer,size_t count)7073 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7074 struct device_attribute *attr, const char *buffer, size_t count)
7075 {
7076 struct Scsi_Host *shost = class_to_shost(dev);
7077 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7078 u8 set_r6_writes = 0;
7079
7080 if (kstrtou8(buffer, 0, &set_r6_writes))
7081 return -EINVAL;
7082
7083 if (set_r6_writes > 0)
7084 set_r6_writes = 1;
7085
7086 ctrl_info->enable_r6_writes = set_r6_writes;
7087
7088 return count;
7089 }
7090
7091 static DEVICE_STRING_ATTR_RO(driver_version, 0444,
7092 DRIVER_VERSION BUILD_TIMESTAMP);
7093 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7094 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7095 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7096 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7097 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7098 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7099 pqi_lockup_action_store);
7100 static DEVICE_ATTR(enable_stream_detection, 0644,
7101 pqi_host_enable_stream_detection_show,
7102 pqi_host_enable_stream_detection_store);
7103 static DEVICE_ATTR(enable_r5_writes, 0644,
7104 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7105 static DEVICE_ATTR(enable_r6_writes, 0644,
7106 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7107
7108 static struct attribute *pqi_shost_attrs[] = {
7109 &dev_attr_driver_version.attr.attr,
7110 &dev_attr_firmware_version.attr,
7111 &dev_attr_model.attr,
7112 &dev_attr_serial_number.attr,
7113 &dev_attr_vendor.attr,
7114 &dev_attr_rescan.attr,
7115 &dev_attr_lockup_action.attr,
7116 &dev_attr_enable_stream_detection.attr,
7117 &dev_attr_enable_r5_writes.attr,
7118 &dev_attr_enable_r6_writes.attr,
7119 NULL
7120 };
7121
7122 ATTRIBUTE_GROUPS(pqi_shost);
7123
pqi_unique_id_show(struct device * dev,struct device_attribute * attr,char * buffer)7124 static ssize_t pqi_unique_id_show(struct device *dev,
7125 struct device_attribute *attr, char *buffer)
7126 {
7127 struct pqi_ctrl_info *ctrl_info;
7128 struct scsi_device *sdev;
7129 struct pqi_scsi_dev *device;
7130 unsigned long flags;
7131 u8 unique_id[16];
7132
7133 sdev = to_scsi_device(dev);
7134 ctrl_info = shost_to_hba(sdev->host);
7135
7136 if (pqi_ctrl_offline(ctrl_info))
7137 return -ENODEV;
7138
7139 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7140
7141 device = sdev->hostdata;
7142 if (!device) {
7143 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7144 return -ENODEV;
7145 }
7146
7147 if (device->is_physical_device)
7148 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7149 else
7150 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7151
7152 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7153
7154 return scnprintf(buffer, PAGE_SIZE,
7155 "%02X%02X%02X%02X%02X%02X%02X%02X"
7156 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7157 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7158 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7159 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7160 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7161 }
7162
pqi_lunid_show(struct device * dev,struct device_attribute * attr,char * buffer)7163 static ssize_t pqi_lunid_show(struct device *dev,
7164 struct device_attribute *attr, char *buffer)
7165 {
7166 struct pqi_ctrl_info *ctrl_info;
7167 struct scsi_device *sdev;
7168 struct pqi_scsi_dev *device;
7169 unsigned long flags;
7170 u8 lunid[8];
7171
7172 sdev = to_scsi_device(dev);
7173 ctrl_info = shost_to_hba(sdev->host);
7174
7175 if (pqi_ctrl_offline(ctrl_info))
7176 return -ENODEV;
7177
7178 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7179
7180 device = sdev->hostdata;
7181 if (!device) {
7182 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7183 return -ENODEV;
7184 }
7185
7186 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7187
7188 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7189
7190 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7191 }
7192
7193 #define MAX_PATHS 8
7194
pqi_path_info_show(struct device * dev,struct device_attribute * attr,char * buf)7195 static ssize_t pqi_path_info_show(struct device *dev,
7196 struct device_attribute *attr, char *buf)
7197 {
7198 struct pqi_ctrl_info *ctrl_info;
7199 struct scsi_device *sdev;
7200 struct pqi_scsi_dev *device;
7201 unsigned long flags;
7202 int i;
7203 int output_len = 0;
7204 u8 box;
7205 u8 bay;
7206 u8 path_map_index;
7207 char *active;
7208 u8 phys_connector[2];
7209
7210 sdev = to_scsi_device(dev);
7211 ctrl_info = shost_to_hba(sdev->host);
7212
7213 if (pqi_ctrl_offline(ctrl_info))
7214 return -ENODEV;
7215
7216 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7217
7218 device = sdev->hostdata;
7219 if (!device) {
7220 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7221 return -ENODEV;
7222 }
7223
7224 bay = device->bay;
7225 for (i = 0; i < MAX_PATHS; i++) {
7226 path_map_index = 1 << i;
7227 if (i == device->active_path_index)
7228 active = "Active";
7229 else if (device->path_map & path_map_index)
7230 active = "Inactive";
7231 else
7232 continue;
7233
7234 output_len += scnprintf(buf + output_len,
7235 PAGE_SIZE - output_len,
7236 "[%d:%d:%d:%d] %20.20s ",
7237 ctrl_info->scsi_host->host_no,
7238 device->bus, device->target,
7239 device->lun,
7240 scsi_device_type(device->devtype));
7241
7242 if (device->devtype == TYPE_RAID ||
7243 pqi_is_logical_device(device))
7244 goto end_buffer;
7245
7246 memcpy(&phys_connector, &device->phys_connector[i],
7247 sizeof(phys_connector));
7248 if (phys_connector[0] < '0')
7249 phys_connector[0] = '0';
7250 if (phys_connector[1] < '0')
7251 phys_connector[1] = '0';
7252
7253 output_len += scnprintf(buf + output_len,
7254 PAGE_SIZE - output_len,
7255 "PORT: %.2s ", phys_connector);
7256
7257 box = device->box[i];
7258 if (box != 0 && box != 0xFF)
7259 output_len += scnprintf(buf + output_len,
7260 PAGE_SIZE - output_len,
7261 "BOX: %hhu ", box);
7262
7263 if ((device->devtype == TYPE_DISK ||
7264 device->devtype == TYPE_ZBC) &&
7265 pqi_expose_device(device))
7266 output_len += scnprintf(buf + output_len,
7267 PAGE_SIZE - output_len,
7268 "BAY: %hhu ", bay);
7269
7270 end_buffer:
7271 output_len += scnprintf(buf + output_len,
7272 PAGE_SIZE - output_len,
7273 "%s\n", active);
7274 }
7275
7276 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7277
7278 return output_len;
7279 }
7280
pqi_sas_address_show(struct device * dev,struct device_attribute * attr,char * buffer)7281 static ssize_t pqi_sas_address_show(struct device *dev,
7282 struct device_attribute *attr, char *buffer)
7283 {
7284 struct pqi_ctrl_info *ctrl_info;
7285 struct scsi_device *sdev;
7286 struct pqi_scsi_dev *device;
7287 unsigned long flags;
7288 u64 sas_address;
7289
7290 sdev = to_scsi_device(dev);
7291 ctrl_info = shost_to_hba(sdev->host);
7292
7293 if (pqi_ctrl_offline(ctrl_info))
7294 return -ENODEV;
7295
7296 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7297
7298 device = sdev->hostdata;
7299 if (!device) {
7300 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7301 return -ENODEV;
7302 }
7303
7304 sas_address = device->sas_address;
7305
7306 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7307
7308 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7309 }
7310
pqi_ssd_smart_path_enabled_show(struct device * dev,struct device_attribute * attr,char * buffer)7311 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7312 struct device_attribute *attr, char *buffer)
7313 {
7314 struct pqi_ctrl_info *ctrl_info;
7315 struct scsi_device *sdev;
7316 struct pqi_scsi_dev *device;
7317 unsigned long flags;
7318
7319 sdev = to_scsi_device(dev);
7320 ctrl_info = shost_to_hba(sdev->host);
7321
7322 if (pqi_ctrl_offline(ctrl_info))
7323 return -ENODEV;
7324
7325 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7326
7327 device = sdev->hostdata;
7328 if (!device) {
7329 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7330 return -ENODEV;
7331 }
7332
7333 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7334 buffer[1] = '\n';
7335 buffer[2] = '\0';
7336
7337 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7338
7339 return 2;
7340 }
7341
pqi_raid_level_show(struct device * dev,struct device_attribute * attr,char * buffer)7342 static ssize_t pqi_raid_level_show(struct device *dev,
7343 struct device_attribute *attr, char *buffer)
7344 {
7345 struct pqi_ctrl_info *ctrl_info;
7346 struct scsi_device *sdev;
7347 struct pqi_scsi_dev *device;
7348 unsigned long flags;
7349 char *raid_level;
7350
7351 sdev = to_scsi_device(dev);
7352 ctrl_info = shost_to_hba(sdev->host);
7353
7354 if (pqi_ctrl_offline(ctrl_info))
7355 return -ENODEV;
7356
7357 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7358
7359 device = sdev->hostdata;
7360 if (!device) {
7361 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7362 return -ENODEV;
7363 }
7364
7365 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7366 raid_level = pqi_raid_level_to_string(device->raid_level);
7367 else
7368 raid_level = "N/A";
7369
7370 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7371
7372 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7373 }
7374
pqi_raid_bypass_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7375 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7376 struct device_attribute *attr, char *buffer)
7377 {
7378 struct pqi_ctrl_info *ctrl_info;
7379 struct scsi_device *sdev;
7380 struct pqi_scsi_dev *device;
7381 unsigned long flags;
7382 u64 raid_bypass_cnt;
7383 int cpu;
7384
7385 sdev = to_scsi_device(dev);
7386 ctrl_info = shost_to_hba(sdev->host);
7387
7388 if (pqi_ctrl_offline(ctrl_info))
7389 return -ENODEV;
7390
7391 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7392
7393 device = sdev->hostdata;
7394 if (!device) {
7395 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7396 return -ENODEV;
7397 }
7398
7399 raid_bypass_cnt = 0;
7400
7401 if (device->raid_io_stats) {
7402 for_each_online_cpu(cpu) {
7403 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
7404 }
7405 }
7406
7407 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7408
7409 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
7410 }
7411
pqi_sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)7412 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7413 struct device_attribute *attr, char *buf)
7414 {
7415 struct pqi_ctrl_info *ctrl_info;
7416 struct scsi_device *sdev;
7417 struct pqi_scsi_dev *device;
7418 unsigned long flags;
7419 int output_len = 0;
7420
7421 sdev = to_scsi_device(dev);
7422 ctrl_info = shost_to_hba(sdev->host);
7423
7424 if (pqi_ctrl_offline(ctrl_info))
7425 return -ENODEV;
7426
7427 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7428
7429 device = sdev->hostdata;
7430 if (!device) {
7431 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7432 return -ENODEV;
7433 }
7434
7435 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7436 device->ncq_prio_enable);
7437 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7438
7439 return output_len;
7440 }
7441
pqi_sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)7442 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7443 struct device_attribute *attr,
7444 const char *buf, size_t count)
7445 {
7446 struct pqi_ctrl_info *ctrl_info;
7447 struct scsi_device *sdev;
7448 struct pqi_scsi_dev *device;
7449 unsigned long flags;
7450 u8 ncq_prio_enable = 0;
7451
7452 if (kstrtou8(buf, 0, &ncq_prio_enable))
7453 return -EINVAL;
7454
7455 sdev = to_scsi_device(dev);
7456 ctrl_info = shost_to_hba(sdev->host);
7457
7458 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7459
7460 device = sdev->hostdata;
7461
7462 if (!device) {
7463 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7464 return -ENODEV;
7465 }
7466
7467 if (!device->ncq_prio_support) {
7468 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7469 return -EINVAL;
7470 }
7471
7472 device->ncq_prio_enable = ncq_prio_enable;
7473
7474 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7475
7476 return strlen(buf);
7477 }
7478
pqi_numa_node_show(struct device * dev,struct device_attribute * attr,char * buffer)7479 static ssize_t pqi_numa_node_show(struct device *dev,
7480 struct device_attribute *attr, char *buffer)
7481 {
7482 struct scsi_device *sdev;
7483 struct pqi_ctrl_info *ctrl_info;
7484
7485 sdev = to_scsi_device(dev);
7486 ctrl_info = shost_to_hba(sdev->host);
7487
7488 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7489 }
7490
pqi_write_stream_cnt_show(struct device * dev,struct device_attribute * attr,char * buffer)7491 static ssize_t pqi_write_stream_cnt_show(struct device *dev,
7492 struct device_attribute *attr, char *buffer)
7493 {
7494 struct pqi_ctrl_info *ctrl_info;
7495 struct scsi_device *sdev;
7496 struct pqi_scsi_dev *device;
7497 unsigned long flags;
7498 u64 write_stream_cnt;
7499 int cpu;
7500
7501 sdev = to_scsi_device(dev);
7502 ctrl_info = shost_to_hba(sdev->host);
7503
7504 if (pqi_ctrl_offline(ctrl_info))
7505 return -ENODEV;
7506
7507 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7508
7509 device = sdev->hostdata;
7510 if (!device) {
7511 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7512 return -ENODEV;
7513 }
7514
7515 write_stream_cnt = 0;
7516
7517 if (device->raid_io_stats) {
7518 for_each_online_cpu(cpu) {
7519 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
7520 }
7521 }
7522
7523 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7524
7525 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
7526 }
7527
7528 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7529 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7530 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7531 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7532 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7533 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7534 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7535 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7536 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7537 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7538 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
7539
7540 static struct attribute *pqi_sdev_attrs[] = {
7541 &dev_attr_lunid.attr,
7542 &dev_attr_unique_id.attr,
7543 &dev_attr_path_info.attr,
7544 &dev_attr_sas_address.attr,
7545 &dev_attr_ssd_smart_path_enabled.attr,
7546 &dev_attr_raid_level.attr,
7547 &dev_attr_raid_bypass_cnt.attr,
7548 &dev_attr_sas_ncq_prio_enable.attr,
7549 &dev_attr_numa_node.attr,
7550 &dev_attr_write_stream_cnt.attr,
7551 NULL
7552 };
7553
7554 ATTRIBUTE_GROUPS(pqi_sdev);
7555
7556 static const struct scsi_host_template pqi_driver_template = {
7557 .module = THIS_MODULE,
7558 .name = DRIVER_NAME_SHORT,
7559 .proc_name = DRIVER_NAME_SHORT,
7560 .queuecommand = pqi_scsi_queue_command,
7561 .scan_start = pqi_scan_start,
7562 .scan_finished = pqi_scan_finished,
7563 .this_id = -1,
7564 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7565 .eh_abort_handler = pqi_eh_abort_handler,
7566 .ioctl = pqi_ioctl,
7567 .sdev_init = pqi_sdev_init,
7568 .sdev_configure = pqi_sdev_configure,
7569 .sdev_destroy = pqi_sdev_destroy,
7570 .map_queues = pqi_map_queues,
7571 .sdev_groups = pqi_sdev_groups,
7572 .shost_groups = pqi_shost_groups,
7573 .cmd_size = sizeof(struct pqi_cmd_priv),
7574 };
7575
pqi_register_scsi(struct pqi_ctrl_info * ctrl_info)7576 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7577 {
7578 int rc;
7579 struct Scsi_Host *shost;
7580
7581 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7582 if (!shost) {
7583 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7584 return -ENOMEM;
7585 }
7586
7587 shost->io_port = 0;
7588 shost->n_io_port = 0;
7589 shost->this_id = -1;
7590 shost->max_channel = PQI_MAX_BUS;
7591 shost->max_cmd_len = MAX_COMMAND_SIZE;
7592 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7593 shost->max_id = ~0;
7594 shost->max_sectors = ctrl_info->max_sectors;
7595 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7596 shost->cmd_per_lun = shost->can_queue;
7597 shost->sg_tablesize = ctrl_info->sg_tablesize;
7598 shost->transportt = pqi_sas_transport_template;
7599 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7600 shost->unique_id = shost->irq;
7601 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7602 shost->host_tagset = 1;
7603 shost->hostdata[0] = (unsigned long)ctrl_info;
7604
7605 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7606 if (rc) {
7607 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7608 goto free_host;
7609 }
7610
7611 rc = pqi_add_sas_host(shost, ctrl_info);
7612 if (rc) {
7613 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7614 goto remove_host;
7615 }
7616
7617 ctrl_info->scsi_host = shost;
7618
7619 return 0;
7620
7621 remove_host:
7622 scsi_remove_host(shost);
7623 free_host:
7624 scsi_host_put(shost);
7625
7626 return rc;
7627 }
7628
pqi_unregister_scsi(struct pqi_ctrl_info * ctrl_info)7629 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7630 {
7631 struct Scsi_Host *shost;
7632
7633 pqi_delete_sas_host(ctrl_info);
7634
7635 shost = ctrl_info->scsi_host;
7636 if (!shost)
7637 return;
7638
7639 scsi_remove_host(shost);
7640 scsi_host_put(shost);
7641 }
7642
pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info * ctrl_info)7643 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7644 {
7645 int rc = 0;
7646 struct pqi_device_registers __iomem *pqi_registers;
7647 unsigned long timeout;
7648 unsigned int timeout_msecs;
7649 union pqi_reset_register reset_reg;
7650
7651 pqi_registers = ctrl_info->pqi_registers;
7652 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7653 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7654
7655 while (1) {
7656 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7657 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7658 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7659 break;
7660 if (!sis_is_firmware_running(ctrl_info)) {
7661 rc = -ENXIO;
7662 break;
7663 }
7664 if (time_after(jiffies, timeout)) {
7665 rc = -ETIMEDOUT;
7666 break;
7667 }
7668 }
7669
7670 return rc;
7671 }
7672
pqi_reset(struct pqi_ctrl_info * ctrl_info)7673 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7674 {
7675 int rc;
7676 union pqi_reset_register reset_reg;
7677
7678 if (ctrl_info->pqi_reset_quiesce_supported) {
7679 rc = sis_pqi_reset_quiesce(ctrl_info);
7680 if (rc) {
7681 dev_err(&ctrl_info->pci_dev->dev,
7682 "PQI reset failed during quiesce with error %d\n", rc);
7683 return rc;
7684 }
7685 }
7686
7687 reset_reg.all_bits = 0;
7688 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7689 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7690
7691 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7692
7693 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7694 if (rc)
7695 dev_err(&ctrl_info->pci_dev->dev,
7696 "PQI reset failed with error %d\n", rc);
7697
7698 return rc;
7699 }
7700
pqi_get_ctrl_serial_number(struct pqi_ctrl_info * ctrl_info)7701 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7702 {
7703 int rc;
7704 struct bmic_sense_subsystem_info *sense_info;
7705
7706 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7707 if (!sense_info)
7708 return -ENOMEM;
7709
7710 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7711 if (rc)
7712 goto out;
7713
7714 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7715 sizeof(sense_info->ctrl_serial_number));
7716 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7717
7718 out:
7719 kfree(sense_info);
7720
7721 return rc;
7722 }
7723
pqi_get_ctrl_product_details(struct pqi_ctrl_info * ctrl_info)7724 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7725 {
7726 int rc;
7727 struct bmic_identify_controller *identify;
7728
7729 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7730 if (!identify)
7731 return -ENOMEM;
7732
7733 rc = pqi_identify_controller(ctrl_info, identify);
7734 if (rc)
7735 goto out;
7736
7737 if (get_unaligned_le32(&identify->extra_controller_flags) &
7738 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7739 memcpy(ctrl_info->firmware_version,
7740 identify->firmware_version_long,
7741 sizeof(identify->firmware_version_long));
7742 } else {
7743 memcpy(ctrl_info->firmware_version,
7744 identify->firmware_version_short,
7745 sizeof(identify->firmware_version_short));
7746 ctrl_info->firmware_version
7747 [sizeof(identify->firmware_version_short)] = '\0';
7748 snprintf(ctrl_info->firmware_version +
7749 strlen(ctrl_info->firmware_version),
7750 sizeof(ctrl_info->firmware_version) -
7751 sizeof(identify->firmware_version_short),
7752 "-%u",
7753 get_unaligned_le16(&identify->firmware_build_number));
7754 }
7755
7756 memcpy(ctrl_info->model, identify->product_id,
7757 sizeof(identify->product_id));
7758 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7759
7760 memcpy(ctrl_info->vendor, identify->vendor_id,
7761 sizeof(identify->vendor_id));
7762 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7763
7764 dev_info(&ctrl_info->pci_dev->dev,
7765 "Firmware version: %s\n", ctrl_info->firmware_version);
7766
7767 out:
7768 kfree(identify);
7769
7770 return rc;
7771 }
7772
7773 struct pqi_config_table_section_info {
7774 struct pqi_ctrl_info *ctrl_info;
7775 void *section;
7776 u32 section_offset;
7777 void __iomem *section_iomem_addr;
7778 };
7779
pqi_is_firmware_feature_supported(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7780 static inline bool pqi_is_firmware_feature_supported(
7781 struct pqi_config_table_firmware_features *firmware_features,
7782 unsigned int bit_position)
7783 {
7784 unsigned int byte_index;
7785
7786 byte_index = bit_position / BITS_PER_BYTE;
7787
7788 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7789 return false;
7790
7791 return firmware_features->features_supported[byte_index] &
7792 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7793 }
7794
pqi_is_firmware_feature_enabled(struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr,unsigned int bit_position)7795 static inline bool pqi_is_firmware_feature_enabled(
7796 struct pqi_config_table_firmware_features *firmware_features,
7797 void __iomem *firmware_features_iomem_addr,
7798 unsigned int bit_position)
7799 {
7800 unsigned int byte_index;
7801 u8 __iomem *features_enabled_iomem_addr;
7802
7803 byte_index = (bit_position / BITS_PER_BYTE) +
7804 (le16_to_cpu(firmware_features->num_elements) * 2);
7805
7806 features_enabled_iomem_addr = firmware_features_iomem_addr +
7807 offsetof(struct pqi_config_table_firmware_features,
7808 features_supported) + byte_index;
7809
7810 return *((__force u8 *)features_enabled_iomem_addr) &
7811 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7812 }
7813
pqi_request_firmware_feature(struct pqi_config_table_firmware_features * firmware_features,unsigned int bit_position)7814 static inline void pqi_request_firmware_feature(
7815 struct pqi_config_table_firmware_features *firmware_features,
7816 unsigned int bit_position)
7817 {
7818 unsigned int byte_index;
7819
7820 byte_index = (bit_position / BITS_PER_BYTE) +
7821 le16_to_cpu(firmware_features->num_elements);
7822
7823 firmware_features->features_supported[byte_index] |=
7824 (1 << (bit_position % BITS_PER_BYTE));
7825 }
7826
pqi_config_table_update(struct pqi_ctrl_info * ctrl_info,u16 first_section,u16 last_section)7827 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7828 u16 first_section, u16 last_section)
7829 {
7830 struct pqi_vendor_general_request request;
7831
7832 memset(&request, 0, sizeof(request));
7833
7834 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7835 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7836 &request.header.iu_length);
7837 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7838 &request.function_code);
7839 put_unaligned_le16(first_section,
7840 &request.data.config_table_update.first_section);
7841 put_unaligned_le16(last_section,
7842 &request.data.config_table_update.last_section);
7843
7844 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7845 }
7846
pqi_enable_firmware_features(struct pqi_ctrl_info * ctrl_info,struct pqi_config_table_firmware_features * firmware_features,void __iomem * firmware_features_iomem_addr)7847 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7848 struct pqi_config_table_firmware_features *firmware_features,
7849 void __iomem *firmware_features_iomem_addr)
7850 {
7851 void *features_requested;
7852 void __iomem *features_requested_iomem_addr;
7853 void __iomem *host_max_known_feature_iomem_addr;
7854
7855 features_requested = firmware_features->features_supported +
7856 le16_to_cpu(firmware_features->num_elements);
7857
7858 features_requested_iomem_addr = firmware_features_iomem_addr +
7859 (features_requested - (void *)firmware_features);
7860
7861 memcpy_toio(features_requested_iomem_addr, features_requested,
7862 le16_to_cpu(firmware_features->num_elements));
7863
7864 if (pqi_is_firmware_feature_supported(firmware_features,
7865 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7866 host_max_known_feature_iomem_addr =
7867 features_requested_iomem_addr +
7868 (le16_to_cpu(firmware_features->num_elements) * 2) +
7869 sizeof(__le16);
7870 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7871 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7872 }
7873
7874 return pqi_config_table_update(ctrl_info,
7875 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7876 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7877 }
7878
7879 struct pqi_firmware_feature {
7880 char *feature_name;
7881 unsigned int feature_bit;
7882 bool supported;
7883 bool enabled;
7884 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7885 struct pqi_firmware_feature *firmware_feature);
7886 };
7887
pqi_firmware_feature_status(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7888 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7889 struct pqi_firmware_feature *firmware_feature)
7890 {
7891 if (!firmware_feature->supported) {
7892 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7893 firmware_feature->feature_name);
7894 return;
7895 }
7896
7897 if (firmware_feature->enabled) {
7898 dev_info(&ctrl_info->pci_dev->dev,
7899 "%s enabled\n", firmware_feature->feature_name);
7900 return;
7901 }
7902
7903 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7904 firmware_feature->feature_name);
7905 }
7906
pqi_ctrl_update_feature_flags(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7907 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7908 struct pqi_firmware_feature *firmware_feature)
7909 {
7910 switch (firmware_feature->feature_bit) {
7911 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7912 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7913 break;
7914 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7915 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7916 break;
7917 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7918 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7919 break;
7920 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7921 ctrl_info->soft_reset_handshake_supported =
7922 firmware_feature->enabled &&
7923 pqi_read_soft_reset_status(ctrl_info);
7924 break;
7925 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7926 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7927 break;
7928 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7929 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7930 break;
7931 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7932 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7933 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7934 break;
7935 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7936 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7937 break;
7938 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7939 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7940 break;
7941 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
7942 ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
7943 break;
7944 }
7945
7946 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7947 }
7948
pqi_firmware_feature_update(struct pqi_ctrl_info * ctrl_info,struct pqi_firmware_feature * firmware_feature)7949 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7950 struct pqi_firmware_feature *firmware_feature)
7951 {
7952 if (firmware_feature->feature_status)
7953 firmware_feature->feature_status(ctrl_info, firmware_feature);
7954 }
7955
7956 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7957
7958 static struct pqi_firmware_feature pqi_firmware_features[] = {
7959 {
7960 .feature_name = "Online Firmware Activation",
7961 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7962 .feature_status = pqi_firmware_feature_status,
7963 },
7964 {
7965 .feature_name = "Serial Management Protocol",
7966 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7967 .feature_status = pqi_firmware_feature_status,
7968 },
7969 {
7970 .feature_name = "Maximum Known Feature",
7971 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7972 .feature_status = pqi_firmware_feature_status,
7973 },
7974 {
7975 .feature_name = "RAID 0 Read Bypass",
7976 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7977 .feature_status = pqi_firmware_feature_status,
7978 },
7979 {
7980 .feature_name = "RAID 1 Read Bypass",
7981 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7982 .feature_status = pqi_firmware_feature_status,
7983 },
7984 {
7985 .feature_name = "RAID 5 Read Bypass",
7986 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7987 .feature_status = pqi_firmware_feature_status,
7988 },
7989 {
7990 .feature_name = "RAID 6 Read Bypass",
7991 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7992 .feature_status = pqi_firmware_feature_status,
7993 },
7994 {
7995 .feature_name = "RAID 0 Write Bypass",
7996 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7997 .feature_status = pqi_firmware_feature_status,
7998 },
7999 {
8000 .feature_name = "RAID 1 Write Bypass",
8001 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
8002 .feature_status = pqi_ctrl_update_feature_flags,
8003 },
8004 {
8005 .feature_name = "RAID 5 Write Bypass",
8006 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
8007 .feature_status = pqi_ctrl_update_feature_flags,
8008 },
8009 {
8010 .feature_name = "RAID 6 Write Bypass",
8011 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
8012 .feature_status = pqi_ctrl_update_feature_flags,
8013 },
8014 {
8015 .feature_name = "New Soft Reset Handshake",
8016 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
8017 .feature_status = pqi_ctrl_update_feature_flags,
8018 },
8019 {
8020 .feature_name = "RAID IU Timeout",
8021 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
8022 .feature_status = pqi_ctrl_update_feature_flags,
8023 },
8024 {
8025 .feature_name = "TMF IU Timeout",
8026 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
8027 .feature_status = pqi_ctrl_update_feature_flags,
8028 },
8029 {
8030 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
8031 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
8032 .feature_status = pqi_firmware_feature_status,
8033 },
8034 {
8035 .feature_name = "Firmware Triage",
8036 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
8037 .feature_status = pqi_ctrl_update_feature_flags,
8038 },
8039 {
8040 .feature_name = "RPL Extended Formats 4 and 5",
8041 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
8042 .feature_status = pqi_ctrl_update_feature_flags,
8043 },
8044 {
8045 .feature_name = "Multi-LUN Target",
8046 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
8047 .feature_status = pqi_ctrl_update_feature_flags,
8048 },
8049 {
8050 .feature_name = "Controller Data Logging",
8051 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
8052 .feature_status = pqi_ctrl_update_feature_flags,
8053 },
8054 };
8055
pqi_process_firmware_features(struct pqi_config_table_section_info * section_info)8056 static void pqi_process_firmware_features(
8057 struct pqi_config_table_section_info *section_info)
8058 {
8059 int rc;
8060 struct pqi_ctrl_info *ctrl_info;
8061 struct pqi_config_table_firmware_features *firmware_features;
8062 void __iomem *firmware_features_iomem_addr;
8063 unsigned int i;
8064 unsigned int num_features_supported;
8065
8066 ctrl_info = section_info->ctrl_info;
8067 firmware_features = section_info->section;
8068 firmware_features_iomem_addr = section_info->section_iomem_addr;
8069
8070 for (i = 0, num_features_supported = 0;
8071 i < ARRAY_SIZE(pqi_firmware_features); i++) {
8072 if (pqi_is_firmware_feature_supported(firmware_features,
8073 pqi_firmware_features[i].feature_bit)) {
8074 pqi_firmware_features[i].supported = true;
8075 num_features_supported++;
8076 } else {
8077 pqi_firmware_feature_update(ctrl_info,
8078 &pqi_firmware_features[i]);
8079 }
8080 }
8081
8082 if (num_features_supported == 0)
8083 return;
8084
8085 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8086 if (!pqi_firmware_features[i].supported)
8087 continue;
8088 pqi_request_firmware_feature(firmware_features,
8089 pqi_firmware_features[i].feature_bit);
8090 }
8091
8092 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
8093 firmware_features_iomem_addr);
8094 if (rc) {
8095 dev_err(&ctrl_info->pci_dev->dev,
8096 "failed to enable firmware features in PQI configuration table\n");
8097 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8098 if (!pqi_firmware_features[i].supported)
8099 continue;
8100 pqi_firmware_feature_update(ctrl_info,
8101 &pqi_firmware_features[i]);
8102 }
8103 return;
8104 }
8105
8106 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8107 if (!pqi_firmware_features[i].supported)
8108 continue;
8109 if (pqi_is_firmware_feature_enabled(firmware_features,
8110 firmware_features_iomem_addr,
8111 pqi_firmware_features[i].feature_bit)) {
8112 pqi_firmware_features[i].enabled = true;
8113 }
8114 pqi_firmware_feature_update(ctrl_info,
8115 &pqi_firmware_features[i]);
8116 }
8117 }
8118
pqi_init_firmware_features(void)8119 static void pqi_init_firmware_features(void)
8120 {
8121 unsigned int i;
8122
8123 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8124 pqi_firmware_features[i].supported = false;
8125 pqi_firmware_features[i].enabled = false;
8126 }
8127 }
8128
pqi_process_firmware_features_section(struct pqi_config_table_section_info * section_info)8129 static void pqi_process_firmware_features_section(
8130 struct pqi_config_table_section_info *section_info)
8131 {
8132 mutex_lock(&pqi_firmware_features_mutex);
8133 pqi_init_firmware_features();
8134 pqi_process_firmware_features(section_info);
8135 mutex_unlock(&pqi_firmware_features_mutex);
8136 }
8137
8138 /*
8139 * Reset all controller settings that can be initialized during the processing
8140 * of the PQI Configuration Table.
8141 */
8142
pqi_ctrl_reset_config(struct pqi_ctrl_info * ctrl_info)8143 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8144 {
8145 ctrl_info->heartbeat_counter = NULL;
8146 ctrl_info->soft_reset_status = NULL;
8147 ctrl_info->soft_reset_handshake_supported = false;
8148 ctrl_info->enable_r1_writes = false;
8149 ctrl_info->enable_r5_writes = false;
8150 ctrl_info->enable_r6_writes = false;
8151 ctrl_info->raid_iu_timeout_supported = false;
8152 ctrl_info->tmf_iu_timeout_supported = false;
8153 ctrl_info->firmware_triage_supported = false;
8154 ctrl_info->rpl_extended_format_4_5_supported = false;
8155 ctrl_info->multi_lun_device_supported = false;
8156 ctrl_info->ctrl_logging_supported = false;
8157 }
8158
pqi_process_config_table(struct pqi_ctrl_info * ctrl_info)8159 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8160 {
8161 u32 table_length;
8162 u32 section_offset;
8163 bool firmware_feature_section_present;
8164 void __iomem *table_iomem_addr;
8165 struct pqi_config_table *config_table;
8166 struct pqi_config_table_section_header *section;
8167 struct pqi_config_table_section_info section_info;
8168 struct pqi_config_table_section_info feature_section_info = {0};
8169
8170 table_length = ctrl_info->config_table_length;
8171 if (table_length == 0)
8172 return 0;
8173
8174 config_table = kmalloc(table_length, GFP_KERNEL);
8175 if (!config_table) {
8176 dev_err(&ctrl_info->pci_dev->dev,
8177 "failed to allocate memory for PQI configuration table\n");
8178 return -ENOMEM;
8179 }
8180
8181 /*
8182 * Copy the config table contents from I/O memory space into the
8183 * temporary buffer.
8184 */
8185 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8186 memcpy_fromio(config_table, table_iomem_addr, table_length);
8187
8188 firmware_feature_section_present = false;
8189 section_info.ctrl_info = ctrl_info;
8190 section_offset = get_unaligned_le32(&config_table->first_section_offset);
8191
8192 while (section_offset) {
8193 section = (void *)config_table + section_offset;
8194
8195 section_info.section = section;
8196 section_info.section_offset = section_offset;
8197 section_info.section_iomem_addr = table_iomem_addr + section_offset;
8198
8199 switch (get_unaligned_le16(§ion->section_id)) {
8200 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8201 firmware_feature_section_present = true;
8202 feature_section_info = section_info;
8203 break;
8204 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8205 if (pqi_disable_heartbeat)
8206 dev_warn(&ctrl_info->pci_dev->dev,
8207 "heartbeat disabled by module parameter\n");
8208 else
8209 ctrl_info->heartbeat_counter =
8210 table_iomem_addr +
8211 section_offset +
8212 offsetof(struct pqi_config_table_heartbeat,
8213 heartbeat_counter);
8214 break;
8215 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8216 ctrl_info->soft_reset_status =
8217 table_iomem_addr +
8218 section_offset +
8219 offsetof(struct pqi_config_table_soft_reset,
8220 soft_reset_status);
8221 break;
8222 }
8223
8224 section_offset = get_unaligned_le16(§ion->next_section_offset);
8225 }
8226
8227 /*
8228 * We process the firmware feature section after all other sections
8229 * have been processed so that the feature bit callbacks can take
8230 * into account the settings configured by other sections.
8231 */
8232 if (firmware_feature_section_present)
8233 pqi_process_firmware_features_section(&feature_section_info);
8234
8235 kfree(config_table);
8236
8237 return 0;
8238 }
8239
8240 /* Switches the controller from PQI mode back into SIS mode. */
8241
pqi_revert_to_sis_mode(struct pqi_ctrl_info * ctrl_info)8242 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8243 {
8244 int rc;
8245
8246 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8247 rc = pqi_reset(ctrl_info);
8248 if (rc)
8249 return rc;
8250 rc = sis_reenable_sis_mode(ctrl_info);
8251 if (rc) {
8252 dev_err(&ctrl_info->pci_dev->dev,
8253 "re-enabling SIS mode failed with error %d\n", rc);
8254 return rc;
8255 }
8256 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8257
8258 return 0;
8259 }
8260
8261 /*
8262 * If the controller isn't already in SIS mode, this function forces it into
8263 * SIS mode.
8264 */
8265
pqi_force_sis_mode(struct pqi_ctrl_info * ctrl_info)8266 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8267 {
8268 if (!sis_is_firmware_running(ctrl_info))
8269 return -ENXIO;
8270
8271 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8272 return 0;
8273
8274 if (sis_is_kernel_up(ctrl_info)) {
8275 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8276 return 0;
8277 }
8278
8279 return pqi_revert_to_sis_mode(ctrl_info);
8280 }
8281
pqi_perform_lockup_action(void)8282 static void pqi_perform_lockup_action(void)
8283 {
8284 switch (pqi_lockup_action) {
8285 case PANIC:
8286 panic("FATAL: Smart Family Controller lockup detected");
8287 break;
8288 case REBOOT:
8289 emergency_restart();
8290 break;
8291 case NONE:
8292 default:
8293 break;
8294 }
8295 }
8296
8297 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
8298 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
8299
pqi_ctrl_init(struct pqi_ctrl_info * ctrl_info)8300 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8301 {
8302 int rc;
8303 u32 product_id;
8304
8305 if (reset_devices) {
8306 if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
8307 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8308 if (rc)
8309 return rc;
8310 }
8311 if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
8312 sis_notify_kdump(ctrl_info);
8313 rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
8314 if (rc)
8315 return rc;
8316 }
8317 sis_soft_reset(ctrl_info);
8318 ssleep(PQI_POST_RESET_DELAY_SECS);
8319 } else {
8320 rc = pqi_force_sis_mode(ctrl_info);
8321 if (rc)
8322 return rc;
8323 }
8324
8325 /*
8326 * Wait until the controller is ready to start accepting SIS
8327 * commands.
8328 */
8329 rc = sis_wait_for_ctrl_ready(ctrl_info);
8330 if (rc) {
8331 if (reset_devices) {
8332 dev_err(&ctrl_info->pci_dev->dev,
8333 "kdump init failed with error %d\n", rc);
8334 pqi_lockup_action = REBOOT;
8335 pqi_perform_lockup_action();
8336 }
8337 return rc;
8338 }
8339
8340 /*
8341 * Get the controller properties. This allows us to determine
8342 * whether or not it supports PQI mode.
8343 */
8344 rc = sis_get_ctrl_properties(ctrl_info);
8345 if (rc) {
8346 dev_err(&ctrl_info->pci_dev->dev,
8347 "error obtaining controller properties\n");
8348 return rc;
8349 }
8350
8351 rc = sis_get_pqi_capabilities(ctrl_info);
8352 if (rc) {
8353 dev_err(&ctrl_info->pci_dev->dev,
8354 "error obtaining controller capabilities\n");
8355 return rc;
8356 }
8357
8358 product_id = sis_get_product_id(ctrl_info);
8359 ctrl_info->product_id = (u8)product_id;
8360 ctrl_info->product_revision = (u8)(product_id >> 8);
8361
8362 if (is_kdump_kernel()) {
8363 if (ctrl_info->max_outstanding_requests >
8364 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8365 ctrl_info->max_outstanding_requests =
8366 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8367 } else {
8368 if (ctrl_info->max_outstanding_requests >
8369 PQI_MAX_OUTSTANDING_REQUESTS)
8370 ctrl_info->max_outstanding_requests =
8371 PQI_MAX_OUTSTANDING_REQUESTS;
8372 }
8373
8374 pqi_calculate_io_resources(ctrl_info);
8375
8376 rc = pqi_alloc_error_buffer(ctrl_info);
8377 if (rc) {
8378 dev_err(&ctrl_info->pci_dev->dev,
8379 "failed to allocate PQI error buffer\n");
8380 return rc;
8381 }
8382
8383 /*
8384 * If the function we are about to call succeeds, the
8385 * controller will transition from legacy SIS mode
8386 * into PQI mode.
8387 */
8388 rc = sis_init_base_struct_addr(ctrl_info);
8389 if (rc) {
8390 dev_err(&ctrl_info->pci_dev->dev,
8391 "error initializing PQI mode\n");
8392 return rc;
8393 }
8394
8395 /* Wait for the controller to complete the SIS -> PQI transition. */
8396 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8397 if (rc) {
8398 dev_err(&ctrl_info->pci_dev->dev,
8399 "transition to PQI mode failed\n");
8400 return rc;
8401 }
8402
8403 /* From here on, we are running in PQI mode. */
8404 ctrl_info->pqi_mode_enabled = true;
8405 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8406
8407 rc = pqi_alloc_admin_queues(ctrl_info);
8408 if (rc) {
8409 dev_err(&ctrl_info->pci_dev->dev,
8410 "failed to allocate admin queues\n");
8411 return rc;
8412 }
8413
8414 rc = pqi_create_admin_queues(ctrl_info);
8415 if (rc) {
8416 dev_err(&ctrl_info->pci_dev->dev,
8417 "error creating admin queues\n");
8418 return rc;
8419 }
8420
8421 rc = pqi_report_device_capability(ctrl_info);
8422 if (rc) {
8423 dev_err(&ctrl_info->pci_dev->dev,
8424 "obtaining device capability failed\n");
8425 return rc;
8426 }
8427
8428 rc = pqi_validate_device_capability(ctrl_info);
8429 if (rc)
8430 return rc;
8431
8432 pqi_calculate_queue_resources(ctrl_info);
8433
8434 rc = pqi_enable_msix_interrupts(ctrl_info);
8435 if (rc)
8436 return rc;
8437
8438 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8439 ctrl_info->max_msix_vectors =
8440 ctrl_info->num_msix_vectors_enabled;
8441 pqi_calculate_queue_resources(ctrl_info);
8442 }
8443
8444 rc = pqi_alloc_io_resources(ctrl_info);
8445 if (rc)
8446 return rc;
8447
8448 rc = pqi_alloc_operational_queues(ctrl_info);
8449 if (rc) {
8450 dev_err(&ctrl_info->pci_dev->dev,
8451 "failed to allocate operational queues\n");
8452 return rc;
8453 }
8454
8455 pqi_init_operational_queues(ctrl_info);
8456
8457 rc = pqi_create_queues(ctrl_info);
8458 if (rc)
8459 return rc;
8460
8461 rc = pqi_request_irqs(ctrl_info);
8462 if (rc)
8463 return rc;
8464
8465 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8466
8467 ctrl_info->controller_online = true;
8468
8469 rc = pqi_process_config_table(ctrl_info);
8470 if (rc)
8471 return rc;
8472
8473 pqi_start_heartbeat_timer(ctrl_info);
8474
8475 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8476 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8477 if (rc) { /* Supported features not returned correctly. */
8478 dev_err(&ctrl_info->pci_dev->dev,
8479 "error obtaining advanced RAID bypass configuration\n");
8480 return rc;
8481 }
8482 ctrl_info->ciss_report_log_flags |=
8483 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8484 }
8485
8486 rc = pqi_enable_events(ctrl_info);
8487 if (rc) {
8488 dev_err(&ctrl_info->pci_dev->dev,
8489 "error enabling events\n");
8490 return rc;
8491 }
8492
8493 /* Register with the SCSI subsystem. */
8494 rc = pqi_register_scsi(ctrl_info);
8495 if (rc)
8496 return rc;
8497
8498 if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
8499 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
8500 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8501 }
8502
8503 rc = pqi_get_ctrl_product_details(ctrl_info);
8504 if (rc) {
8505 dev_err(&ctrl_info->pci_dev->dev,
8506 "error obtaining product details\n");
8507 return rc;
8508 }
8509
8510 rc = pqi_get_ctrl_serial_number(ctrl_info);
8511 if (rc) {
8512 dev_err(&ctrl_info->pci_dev->dev,
8513 "error obtaining ctrl serial number\n");
8514 return rc;
8515 }
8516
8517 rc = pqi_set_diag_rescan(ctrl_info);
8518 if (rc) {
8519 dev_err(&ctrl_info->pci_dev->dev,
8520 "error enabling multi-lun rescan\n");
8521 return rc;
8522 }
8523
8524 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8525 if (rc) {
8526 dev_err(&ctrl_info->pci_dev->dev,
8527 "error updating host wellness\n");
8528 return rc;
8529 }
8530
8531 pqi_schedule_update_time_worker(ctrl_info);
8532
8533 pqi_scan_scsi_devices(ctrl_info);
8534
8535 return 0;
8536 }
8537
pqi_reinit_queues(struct pqi_ctrl_info * ctrl_info)8538 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8539 {
8540 unsigned int i;
8541 struct pqi_admin_queues *admin_queues;
8542 struct pqi_event_queue *event_queue;
8543
8544 admin_queues = &ctrl_info->admin_queues;
8545 admin_queues->iq_pi_copy = 0;
8546 admin_queues->oq_ci_copy = 0;
8547 writel(0, admin_queues->oq_pi);
8548
8549 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8550 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8551 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8552 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8553
8554 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8555 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8556 writel(0, ctrl_info->queue_groups[i].oq_pi);
8557 }
8558
8559 event_queue = &ctrl_info->event_queue;
8560 writel(0, event_queue->oq_pi);
8561 event_queue->oq_ci_copy = 0;
8562 }
8563
pqi_ctrl_init_resume(struct pqi_ctrl_info * ctrl_info)8564 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8565 {
8566 int rc;
8567
8568 rc = pqi_force_sis_mode(ctrl_info);
8569 if (rc)
8570 return rc;
8571
8572 /*
8573 * Wait until the controller is ready to start accepting SIS
8574 * commands.
8575 */
8576 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8577 if (rc)
8578 return rc;
8579
8580 /*
8581 * Get the controller properties. This allows us to determine
8582 * whether or not it supports PQI mode.
8583 */
8584 rc = sis_get_ctrl_properties(ctrl_info);
8585 if (rc) {
8586 dev_err(&ctrl_info->pci_dev->dev,
8587 "error obtaining controller properties\n");
8588 return rc;
8589 }
8590
8591 rc = sis_get_pqi_capabilities(ctrl_info);
8592 if (rc) {
8593 dev_err(&ctrl_info->pci_dev->dev,
8594 "error obtaining controller capabilities\n");
8595 return rc;
8596 }
8597
8598 /*
8599 * If the function we are about to call succeeds, the
8600 * controller will transition from legacy SIS mode
8601 * into PQI mode.
8602 */
8603 rc = sis_init_base_struct_addr(ctrl_info);
8604 if (rc) {
8605 dev_err(&ctrl_info->pci_dev->dev,
8606 "error initializing PQI mode\n");
8607 return rc;
8608 }
8609
8610 /* Wait for the controller to complete the SIS -> PQI transition. */
8611 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8612 if (rc) {
8613 dev_err(&ctrl_info->pci_dev->dev,
8614 "transition to PQI mode failed\n");
8615 return rc;
8616 }
8617
8618 /* From here on, we are running in PQI mode. */
8619 ctrl_info->pqi_mode_enabled = true;
8620 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8621
8622 pqi_reinit_queues(ctrl_info);
8623
8624 rc = pqi_create_admin_queues(ctrl_info);
8625 if (rc) {
8626 dev_err(&ctrl_info->pci_dev->dev,
8627 "error creating admin queues\n");
8628 return rc;
8629 }
8630
8631 rc = pqi_create_queues(ctrl_info);
8632 if (rc)
8633 return rc;
8634
8635 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8636
8637 ctrl_info->controller_online = true;
8638 pqi_ctrl_unblock_requests(ctrl_info);
8639
8640 pqi_ctrl_reset_config(ctrl_info);
8641
8642 rc = pqi_process_config_table(ctrl_info);
8643 if (rc)
8644 return rc;
8645
8646 pqi_start_heartbeat_timer(ctrl_info);
8647
8648 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8649 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8650 if (rc) {
8651 dev_err(&ctrl_info->pci_dev->dev,
8652 "error obtaining advanced RAID bypass configuration\n");
8653 return rc;
8654 }
8655 ctrl_info->ciss_report_log_flags |=
8656 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8657 }
8658
8659 rc = pqi_enable_events(ctrl_info);
8660 if (rc) {
8661 dev_err(&ctrl_info->pci_dev->dev,
8662 "error enabling events\n");
8663 return rc;
8664 }
8665
8666 rc = pqi_get_ctrl_product_details(ctrl_info);
8667 if (rc) {
8668 dev_err(&ctrl_info->pci_dev->dev,
8669 "error obtaining product details\n");
8670 return rc;
8671 }
8672
8673 rc = pqi_set_diag_rescan(ctrl_info);
8674 if (rc) {
8675 dev_err(&ctrl_info->pci_dev->dev,
8676 "error enabling multi-lun rescan\n");
8677 return rc;
8678 }
8679
8680 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8681 if (rc) {
8682 dev_err(&ctrl_info->pci_dev->dev,
8683 "error updating host wellness\n");
8684 return rc;
8685 }
8686
8687 if (pqi_ofa_in_progress(ctrl_info)) {
8688 pqi_ctrl_unblock_scan(ctrl_info);
8689 if (ctrl_info->ctrl_logging_supported) {
8690 if (!ctrl_info->ctrl_log_memory.host_memory)
8691 pqi_host_setup_buffer(ctrl_info,
8692 &ctrl_info->ctrl_log_memory,
8693 PQI_CTRL_LOG_TOTAL_SIZE,
8694 PQI_CTRL_LOG_MIN_SIZE);
8695 pqi_host_memory_update(ctrl_info,
8696 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8697 } else {
8698 if (ctrl_info->ctrl_log_memory.host_memory)
8699 pqi_host_free_buffer(ctrl_info,
8700 &ctrl_info->ctrl_log_memory);
8701 }
8702 }
8703
8704 pqi_scan_scsi_devices(ctrl_info);
8705
8706 return 0;
8707 }
8708
pqi_set_pcie_completion_timeout(struct pci_dev * pci_dev,u16 timeout)8709 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8710 {
8711 int rc;
8712
8713 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8714 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8715
8716 return pcibios_err_to_errno(rc);
8717 }
8718
pqi_pci_init(struct pqi_ctrl_info * ctrl_info)8719 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8720 {
8721 int rc;
8722 u64 mask;
8723
8724 rc = pci_enable_device(ctrl_info->pci_dev);
8725 if (rc) {
8726 dev_err(&ctrl_info->pci_dev->dev,
8727 "failed to enable PCI device\n");
8728 return rc;
8729 }
8730
8731 if (sizeof(dma_addr_t) > 4)
8732 mask = DMA_BIT_MASK(64);
8733 else
8734 mask = DMA_BIT_MASK(32);
8735
8736 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8737 if (rc) {
8738 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8739 goto disable_device;
8740 }
8741
8742 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8743 if (rc) {
8744 dev_err(&ctrl_info->pci_dev->dev,
8745 "failed to obtain PCI resources\n");
8746 goto disable_device;
8747 }
8748
8749 ctrl_info->iomem_base = ioremap(pci_resource_start(
8750 ctrl_info->pci_dev, 0),
8751 pci_resource_len(ctrl_info->pci_dev, 0));
8752 if (!ctrl_info->iomem_base) {
8753 dev_err(&ctrl_info->pci_dev->dev,
8754 "failed to map memory for controller registers\n");
8755 rc = -ENOMEM;
8756 goto release_regions;
8757 }
8758
8759 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8760
8761 /* Increase the PCIe completion timeout. */
8762 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8763 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8764 if (rc) {
8765 dev_err(&ctrl_info->pci_dev->dev,
8766 "failed to set PCIe completion timeout\n");
8767 goto release_regions;
8768 }
8769
8770 /* Enable bus mastering. */
8771 pci_set_master(ctrl_info->pci_dev);
8772
8773 ctrl_info->registers = ctrl_info->iomem_base;
8774 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8775
8776 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8777
8778 return 0;
8779
8780 release_regions:
8781 pci_release_regions(ctrl_info->pci_dev);
8782 disable_device:
8783 pci_disable_device(ctrl_info->pci_dev);
8784
8785 return rc;
8786 }
8787
pqi_cleanup_pci_init(struct pqi_ctrl_info * ctrl_info)8788 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8789 {
8790 iounmap(ctrl_info->iomem_base);
8791 pci_release_regions(ctrl_info->pci_dev);
8792 if (pci_is_enabled(ctrl_info->pci_dev))
8793 pci_disable_device(ctrl_info->pci_dev);
8794 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8795 }
8796
pqi_alloc_ctrl_info(int numa_node)8797 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8798 {
8799 struct pqi_ctrl_info *ctrl_info;
8800
8801 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8802 GFP_KERNEL, numa_node);
8803 if (!ctrl_info)
8804 return NULL;
8805
8806 mutex_init(&ctrl_info->scan_mutex);
8807 mutex_init(&ctrl_info->lun_reset_mutex);
8808 mutex_init(&ctrl_info->ofa_mutex);
8809
8810 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8811 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8812
8813 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8814 atomic_set(&ctrl_info->num_interrupts, 0);
8815
8816 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8817 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8818
8819 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8820 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8821
8822 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8823 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8824
8825 sema_init(&ctrl_info->sync_request_sem,
8826 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8827 init_waitqueue_head(&ctrl_info->block_requests_wait);
8828
8829 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8830 ctrl_info->irq_mode = IRQ_MODE_NONE;
8831 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8832
8833 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8834 ctrl_info->max_transfer_encrypted_sas_sata =
8835 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8836 ctrl_info->max_transfer_encrypted_nvme =
8837 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8838 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8839 ctrl_info->max_write_raid_1_10_2drive = ~0;
8840 ctrl_info->max_write_raid_1_10_3drive = ~0;
8841 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8842
8843 return ctrl_info;
8844 }
8845
pqi_free_ctrl_info(struct pqi_ctrl_info * ctrl_info)8846 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8847 {
8848 kfree(ctrl_info);
8849 }
8850
pqi_free_interrupts(struct pqi_ctrl_info * ctrl_info)8851 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8852 {
8853 pqi_free_irqs(ctrl_info);
8854 pqi_disable_msix_interrupts(ctrl_info);
8855 }
8856
pqi_free_ctrl_resources(struct pqi_ctrl_info * ctrl_info)8857 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8858 {
8859 pqi_free_interrupts(ctrl_info);
8860 if (ctrl_info->queue_memory_base)
8861 dma_free_coherent(&ctrl_info->pci_dev->dev,
8862 ctrl_info->queue_memory_length,
8863 ctrl_info->queue_memory_base,
8864 ctrl_info->queue_memory_base_dma_handle);
8865 if (ctrl_info->admin_queue_memory_base)
8866 dma_free_coherent(&ctrl_info->pci_dev->dev,
8867 ctrl_info->admin_queue_memory_length,
8868 ctrl_info->admin_queue_memory_base,
8869 ctrl_info->admin_queue_memory_base_dma_handle);
8870 pqi_free_all_io_requests(ctrl_info);
8871 if (ctrl_info->error_buffer)
8872 dma_free_coherent(&ctrl_info->pci_dev->dev,
8873 ctrl_info->error_buffer_length,
8874 ctrl_info->error_buffer,
8875 ctrl_info->error_buffer_dma_handle);
8876 if (ctrl_info->iomem_base)
8877 pqi_cleanup_pci_init(ctrl_info);
8878 pqi_free_ctrl_info(ctrl_info);
8879 }
8880
pqi_remove_ctrl(struct pqi_ctrl_info * ctrl_info)8881 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8882 {
8883 ctrl_info->controller_online = false;
8884 pqi_stop_heartbeat_timer(ctrl_info);
8885 pqi_ctrl_block_requests(ctrl_info);
8886 pqi_cancel_rescan_worker(ctrl_info);
8887 pqi_cancel_update_time_worker(ctrl_info);
8888 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8889 pqi_fail_all_outstanding_requests(ctrl_info);
8890 ctrl_info->pqi_mode_enabled = false;
8891 }
8892 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
8893 pqi_unregister_scsi(ctrl_info);
8894 if (ctrl_info->pqi_mode_enabled)
8895 pqi_revert_to_sis_mode(ctrl_info);
8896 pqi_free_ctrl_resources(ctrl_info);
8897 }
8898
pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info * ctrl_info)8899 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8900 {
8901 pqi_ctrl_block_scan(ctrl_info);
8902 pqi_scsi_block_requests(ctrl_info);
8903 pqi_ctrl_block_device_reset(ctrl_info);
8904 pqi_ctrl_block_requests(ctrl_info);
8905 pqi_ctrl_wait_until_quiesced(ctrl_info);
8906 pqi_stop_heartbeat_timer(ctrl_info);
8907 }
8908
pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info * ctrl_info)8909 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8910 {
8911 pqi_start_heartbeat_timer(ctrl_info);
8912 pqi_ctrl_unblock_requests(ctrl_info);
8913 pqi_ctrl_unblock_device_reset(ctrl_info);
8914 pqi_scsi_unblock_requests(ctrl_info);
8915 pqi_ctrl_unblock_scan(ctrl_info);
8916 }
8917
pqi_ofa_ctrl_restart(struct pqi_ctrl_info * ctrl_info,unsigned int delay_secs)8918 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8919 {
8920 ssleep(delay_secs);
8921
8922 return pqi_ctrl_init_resume(ctrl_info);
8923 }
8924
pqi_host_alloc_mem(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 chunk_size)8925 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8926 struct pqi_host_memory_descriptor *host_memory_descriptor,
8927 u32 total_size, u32 chunk_size)
8928 {
8929 int i;
8930 u32 sg_count;
8931 struct device *dev;
8932 struct pqi_host_memory *host_memory;
8933 struct pqi_sg_descriptor *mem_descriptor;
8934 dma_addr_t dma_handle;
8935
8936 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8937 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
8938 goto out;
8939
8940 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
8941 if (!host_memory_descriptor->host_chunk_virt_address)
8942 goto out;
8943
8944 dev = &ctrl_info->pci_dev->dev;
8945 host_memory = host_memory_descriptor->host_memory;
8946
8947 for (i = 0; i < sg_count; i++) {
8948 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8949 if (!host_memory_descriptor->host_chunk_virt_address[i])
8950 goto out_free_chunks;
8951 mem_descriptor = &host_memory->sg_descriptor[i];
8952 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8953 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8954 }
8955
8956 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8957 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
8958 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
8959
8960 return 0;
8961
8962 out_free_chunks:
8963 while (--i >= 0) {
8964 mem_descriptor = &host_memory->sg_descriptor[i];
8965 dma_free_coherent(dev, chunk_size,
8966 host_memory_descriptor->host_chunk_virt_address[i],
8967 get_unaligned_le64(&mem_descriptor->address));
8968 }
8969 kfree(host_memory_descriptor->host_chunk_virt_address);
8970 out:
8971 return -ENOMEM;
8972 }
8973
pqi_host_alloc_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_required_size,u32 min_required_size)8974 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
8975 struct pqi_host_memory_descriptor *host_memory_descriptor,
8976 u32 total_required_size, u32 min_required_size)
8977 {
8978 u32 chunk_size;
8979 u32 min_chunk_size;
8980
8981 if (total_required_size == 0 || min_required_size == 0)
8982 return 0;
8983
8984 total_required_size = PAGE_ALIGN(total_required_size);
8985 min_required_size = PAGE_ALIGN(min_required_size);
8986 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
8987 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8988
8989 while (total_required_size >= min_required_size) {
8990 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
8991 if (pqi_host_alloc_mem(ctrl_info,
8992 host_memory_descriptor, total_required_size,
8993 chunk_size) == 0)
8994 return 0;
8995 chunk_size /= 2;
8996 chunk_size = PAGE_ALIGN(chunk_size);
8997 }
8998 total_required_size /= 2;
8999 total_required_size = PAGE_ALIGN(total_required_size);
9000 }
9001
9002 return -ENOMEM;
9003 }
9004
pqi_host_setup_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u32 total_size,u32 min_size)9005 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
9006 struct pqi_host_memory_descriptor *host_memory_descriptor,
9007 u32 total_size, u32 min_size)
9008 {
9009 struct device *dev;
9010 struct pqi_host_memory *host_memory;
9011
9012 dev = &ctrl_info->pci_dev->dev;
9013
9014 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
9015 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
9016 if (!host_memory)
9017 return;
9018
9019 host_memory_descriptor->host_memory = host_memory;
9020
9021 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
9022 total_size, min_size) < 0) {
9023 dev_err(dev, "failed to allocate firmware usable host buffer\n");
9024 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9025 host_memory_descriptor->host_memory_dma_handle);
9026 host_memory_descriptor->host_memory = NULL;
9027 return;
9028 }
9029 }
9030
pqi_host_free_buffer(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor)9031 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
9032 struct pqi_host_memory_descriptor *host_memory_descriptor)
9033 {
9034 unsigned int i;
9035 struct device *dev;
9036 struct pqi_host_memory *host_memory;
9037 struct pqi_sg_descriptor *mem_descriptor;
9038 unsigned int num_memory_descriptors;
9039
9040 host_memory = host_memory_descriptor->host_memory;
9041 if (!host_memory)
9042 return;
9043
9044 dev = &ctrl_info->pci_dev->dev;
9045
9046 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
9047 goto out;
9048
9049 mem_descriptor = host_memory->sg_descriptor;
9050 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
9051
9052 for (i = 0; i < num_memory_descriptors; i++) {
9053 dma_free_coherent(dev,
9054 get_unaligned_le32(&mem_descriptor[i].length),
9055 host_memory_descriptor->host_chunk_virt_address[i],
9056 get_unaligned_le64(&mem_descriptor[i].address));
9057 }
9058 kfree(host_memory_descriptor->host_chunk_virt_address);
9059
9060 out:
9061 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9062 host_memory_descriptor->host_memory_dma_handle);
9063 host_memory_descriptor->host_memory = NULL;
9064 }
9065
pqi_host_memory_update(struct pqi_ctrl_info * ctrl_info,struct pqi_host_memory_descriptor * host_memory_descriptor,u16 function_code)9066 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
9067 struct pqi_host_memory_descriptor *host_memory_descriptor,
9068 u16 function_code)
9069 {
9070 u32 buffer_length;
9071 struct pqi_vendor_general_request request;
9072 struct pqi_host_memory *host_memory;
9073
9074 memset(&request, 0, sizeof(request));
9075
9076 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
9077 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
9078 put_unaligned_le16(function_code, &request.function_code);
9079
9080 host_memory = host_memory_descriptor->host_memory;
9081
9082 if (host_memory) {
9083 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
9084 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
9085 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
9086
9087 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
9088 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
9089 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
9090 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
9091 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
9092 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
9093 }
9094 }
9095
9096 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
9097 }
9098
9099 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
9100 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
9101 .status = SAM_STAT_CHECK_CONDITION,
9102 };
9103
pqi_fail_all_outstanding_requests(struct pqi_ctrl_info * ctrl_info)9104 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
9105 {
9106 unsigned int i;
9107 struct pqi_io_request *io_request;
9108 struct scsi_cmnd *scmd;
9109 struct scsi_device *sdev;
9110
9111 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9112 io_request = &ctrl_info->io_request_pool[i];
9113 if (atomic_read(&io_request->refcount) == 0)
9114 continue;
9115
9116 scmd = io_request->scmd;
9117 if (scmd) {
9118 sdev = scmd->device;
9119 if (!sdev || !scsi_device_online(sdev)) {
9120 pqi_free_io_request(io_request);
9121 continue;
9122 } else {
9123 set_host_byte(scmd, DID_NO_CONNECT);
9124 }
9125 } else {
9126 io_request->status = -ENXIO;
9127 io_request->error_info =
9128 &pqi_ctrl_offline_raid_error_info;
9129 }
9130
9131 io_request->io_complete_callback(io_request,
9132 io_request->context);
9133 }
9134 }
9135
pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info * ctrl_info)9136 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
9137 {
9138 pqi_perform_lockup_action();
9139 pqi_stop_heartbeat_timer(ctrl_info);
9140 pqi_free_interrupts(ctrl_info);
9141 pqi_cancel_rescan_worker(ctrl_info);
9142 pqi_cancel_update_time_worker(ctrl_info);
9143 pqi_ctrl_wait_until_quiesced(ctrl_info);
9144 pqi_fail_all_outstanding_requests(ctrl_info);
9145 pqi_ctrl_unblock_requests(ctrl_info);
9146 pqi_take_ctrl_devices_offline(ctrl_info);
9147 }
9148
pqi_ctrl_offline_worker(struct work_struct * work)9149 static void pqi_ctrl_offline_worker(struct work_struct *work)
9150 {
9151 struct pqi_ctrl_info *ctrl_info;
9152
9153 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9154 pqi_take_ctrl_offline_deferred(ctrl_info);
9155 }
9156
pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9157 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9158 {
9159 char *string;
9160
9161 switch (ctrl_shutdown_reason) {
9162 case PQI_IQ_NOT_DRAINED_TIMEOUT:
9163 string = "inbound queue not drained timeout";
9164 break;
9165 case PQI_LUN_RESET_TIMEOUT:
9166 string = "LUN reset timeout";
9167 break;
9168 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9169 string = "I/O pending timeout after LUN reset";
9170 break;
9171 case PQI_NO_HEARTBEAT:
9172 string = "no controller heartbeat detected";
9173 break;
9174 case PQI_FIRMWARE_KERNEL_NOT_UP:
9175 string = "firmware kernel not ready";
9176 break;
9177 case PQI_OFA_RESPONSE_TIMEOUT:
9178 string = "OFA response timeout";
9179 break;
9180 case PQI_INVALID_REQ_ID:
9181 string = "invalid request ID";
9182 break;
9183 case PQI_UNMATCHED_REQ_ID:
9184 string = "unmatched request ID";
9185 break;
9186 case PQI_IO_PI_OUT_OF_RANGE:
9187 string = "I/O queue producer index out of range";
9188 break;
9189 case PQI_EVENT_PI_OUT_OF_RANGE:
9190 string = "event queue producer index out of range";
9191 break;
9192 case PQI_UNEXPECTED_IU_TYPE:
9193 string = "unexpected IU type";
9194 break;
9195 default:
9196 string = "unknown reason";
9197 break;
9198 }
9199
9200 return string;
9201 }
9202
pqi_take_ctrl_offline(struct pqi_ctrl_info * ctrl_info,enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)9203 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9204 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9205 {
9206 if (!ctrl_info->controller_online)
9207 return;
9208
9209 ctrl_info->controller_online = false;
9210 ctrl_info->pqi_mode_enabled = false;
9211 pqi_ctrl_block_requests(ctrl_info);
9212 if (!pqi_disable_ctrl_shutdown)
9213 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9214 pci_disable_device(ctrl_info->pci_dev);
9215 dev_err(&ctrl_info->pci_dev->dev,
9216 "controller offline: reason code 0x%x (%s)\n",
9217 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9218 schedule_work(&ctrl_info->ctrl_offline_work);
9219 }
9220
pqi_take_ctrl_devices_offline(struct pqi_ctrl_info * ctrl_info)9221 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info)
9222 {
9223 int rc;
9224 unsigned long flags;
9225 struct pqi_scsi_dev *device;
9226
9227 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
9228 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
9229 rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list);
9230 if (rc)
9231 continue;
9232
9233 /*
9234 * Is the sdev pointer NULL?
9235 */
9236 if (device->sdev)
9237 scsi_device_set_state(device->sdev, SDEV_OFFLINE);
9238 }
9239 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
9240 }
9241
pqi_print_ctrl_info(struct pci_dev * pci_dev,const struct pci_device_id * id)9242 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9243 const struct pci_device_id *id)
9244 {
9245 char *ctrl_description;
9246
9247 if (id->driver_data)
9248 ctrl_description = (char *)id->driver_data;
9249 else
9250 ctrl_description = "Microchip Smart Family Controller";
9251
9252 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9253 }
9254
pqi_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)9255 static int pqi_pci_probe(struct pci_dev *pci_dev,
9256 const struct pci_device_id *id)
9257 {
9258 int rc;
9259 int node;
9260 struct pqi_ctrl_info *ctrl_info;
9261
9262 pqi_print_ctrl_info(pci_dev, id);
9263
9264 if (pqi_disable_device_id_wildcards &&
9265 id->subvendor == PCI_ANY_ID &&
9266 id->subdevice == PCI_ANY_ID) {
9267 dev_warn(&pci_dev->dev,
9268 "controller not probed because device ID wildcards are disabled\n");
9269 return -ENODEV;
9270 }
9271
9272 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9273 dev_warn(&pci_dev->dev,
9274 "controller device ID matched using wildcards\n");
9275
9276 node = dev_to_node(&pci_dev->dev);
9277 if (node == NUMA_NO_NODE) {
9278 node = cpu_to_node(0);
9279 if (node == NUMA_NO_NODE)
9280 node = 0;
9281 set_dev_node(&pci_dev->dev, node);
9282 }
9283
9284 ctrl_info = pqi_alloc_ctrl_info(node);
9285 if (!ctrl_info) {
9286 dev_err(&pci_dev->dev,
9287 "failed to allocate controller info block\n");
9288 return -ENOMEM;
9289 }
9290 ctrl_info->numa_node = node;
9291
9292 ctrl_info->pci_dev = pci_dev;
9293
9294 rc = pqi_pci_init(ctrl_info);
9295 if (rc)
9296 goto error;
9297
9298 rc = pqi_ctrl_init(ctrl_info);
9299 if (rc)
9300 goto error;
9301
9302 return 0;
9303
9304 error:
9305 pqi_remove_ctrl(ctrl_info);
9306
9307 return rc;
9308 }
9309
pqi_pci_remove(struct pci_dev * pci_dev)9310 static void pqi_pci_remove(struct pci_dev *pci_dev)
9311 {
9312 struct pqi_ctrl_info *ctrl_info;
9313 u16 vendor_id;
9314 int rc;
9315
9316 ctrl_info = pci_get_drvdata(pci_dev);
9317 if (!ctrl_info)
9318 return;
9319
9320 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9321 if (vendor_id == 0xffff)
9322 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9323 else
9324 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9325
9326 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9327 rc = pqi_flush_cache(ctrl_info, RESTART);
9328 if (rc)
9329 dev_err(&pci_dev->dev,
9330 "unable to flush controller cache during remove\n");
9331 }
9332
9333 pqi_remove_ctrl(ctrl_info);
9334 }
9335
pqi_crash_if_pending_command(struct pqi_ctrl_info * ctrl_info)9336 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9337 {
9338 unsigned int i;
9339 struct pqi_io_request *io_request;
9340 struct scsi_cmnd *scmd;
9341
9342 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9343 io_request = &ctrl_info->io_request_pool[i];
9344 if (atomic_read(&io_request->refcount) == 0)
9345 continue;
9346 scmd = io_request->scmd;
9347 WARN_ON(scmd != NULL); /* IO command from SML */
9348 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9349 }
9350 }
9351
pqi_shutdown(struct pci_dev * pci_dev)9352 static void pqi_shutdown(struct pci_dev *pci_dev)
9353 {
9354 int rc;
9355 struct pqi_ctrl_info *ctrl_info;
9356 enum bmic_flush_cache_shutdown_event shutdown_event;
9357
9358 ctrl_info = pci_get_drvdata(pci_dev);
9359 if (!ctrl_info) {
9360 dev_err(&pci_dev->dev,
9361 "cache could not be flushed\n");
9362 return;
9363 }
9364
9365 pqi_wait_until_ofa_finished(ctrl_info);
9366
9367 pqi_scsi_block_requests(ctrl_info);
9368 pqi_ctrl_block_device_reset(ctrl_info);
9369 pqi_ctrl_block_requests(ctrl_info);
9370 pqi_ctrl_wait_until_quiesced(ctrl_info);
9371
9372 if (system_state == SYSTEM_RESTART)
9373 shutdown_event = RESTART;
9374 else
9375 shutdown_event = SHUTDOWN;
9376
9377 /*
9378 * Write all data in the controller's battery-backed cache to
9379 * storage.
9380 */
9381 rc = pqi_flush_cache(ctrl_info, shutdown_event);
9382 if (rc)
9383 dev_err(&pci_dev->dev,
9384 "unable to flush controller cache during shutdown\n");
9385
9386 pqi_crash_if_pending_command(ctrl_info);
9387 pqi_reset(ctrl_info);
9388 }
9389
pqi_process_lockup_action_param(void)9390 static void pqi_process_lockup_action_param(void)
9391 {
9392 unsigned int i;
9393
9394 if (!pqi_lockup_action_param)
9395 return;
9396
9397 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9398 if (strcmp(pqi_lockup_action_param,
9399 pqi_lockup_actions[i].name) == 0) {
9400 pqi_lockup_action = pqi_lockup_actions[i].action;
9401 return;
9402 }
9403 }
9404
9405 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9406 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9407 }
9408
9409 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9410 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9411
pqi_process_ctrl_ready_timeout_param(void)9412 static void pqi_process_ctrl_ready_timeout_param(void)
9413 {
9414 if (pqi_ctrl_ready_timeout_secs == 0)
9415 return;
9416
9417 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9418 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9419 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9420 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9421 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9422 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9423 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9424 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9425 }
9426
9427 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9428 }
9429
pqi_process_module_params(void)9430 static void pqi_process_module_params(void)
9431 {
9432 pqi_process_lockup_action_param();
9433 pqi_process_ctrl_ready_timeout_param();
9434 }
9435
9436 #if defined(CONFIG_PM)
9437
pqi_get_flush_cache_shutdown_event(struct pci_dev * pci_dev)9438 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9439 {
9440 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9441 return RESTART;
9442
9443 return SUSPEND;
9444 }
9445
pqi_suspend_or_freeze(struct device * dev,bool suspend)9446 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9447 {
9448 struct pci_dev *pci_dev;
9449 struct pqi_ctrl_info *ctrl_info;
9450
9451 pci_dev = to_pci_dev(dev);
9452 ctrl_info = pci_get_drvdata(pci_dev);
9453
9454 pqi_wait_until_ofa_finished(ctrl_info);
9455
9456 pqi_ctrl_block_scan(ctrl_info);
9457 pqi_scsi_block_requests(ctrl_info);
9458 pqi_ctrl_block_device_reset(ctrl_info);
9459 pqi_ctrl_block_requests(ctrl_info);
9460 pqi_ctrl_wait_until_quiesced(ctrl_info);
9461
9462 if (suspend) {
9463 enum bmic_flush_cache_shutdown_event shutdown_event;
9464
9465 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9466 pqi_flush_cache(ctrl_info, shutdown_event);
9467 }
9468
9469 pqi_stop_heartbeat_timer(ctrl_info);
9470 pqi_crash_if_pending_command(ctrl_info);
9471 pqi_free_irqs(ctrl_info);
9472
9473 ctrl_info->controller_online = false;
9474 ctrl_info->pqi_mode_enabled = false;
9475
9476 return 0;
9477 }
9478
pqi_suspend(struct device * dev)9479 static __maybe_unused int pqi_suspend(struct device *dev)
9480 {
9481 return pqi_suspend_or_freeze(dev, true);
9482 }
9483
pqi_resume_or_restore(struct device * dev)9484 static int pqi_resume_or_restore(struct device *dev)
9485 {
9486 int rc;
9487 struct pci_dev *pci_dev;
9488 struct pqi_ctrl_info *ctrl_info;
9489
9490 pci_dev = to_pci_dev(dev);
9491 ctrl_info = pci_get_drvdata(pci_dev);
9492
9493 rc = pqi_request_irqs(ctrl_info);
9494 if (rc)
9495 return rc;
9496
9497 pqi_ctrl_unblock_device_reset(ctrl_info);
9498 pqi_ctrl_unblock_requests(ctrl_info);
9499 pqi_scsi_unblock_requests(ctrl_info);
9500 pqi_ctrl_unblock_scan(ctrl_info);
9501
9502 ssleep(PQI_POST_RESET_DELAY_SECS);
9503
9504 return pqi_ctrl_init_resume(ctrl_info);
9505 }
9506
pqi_freeze(struct device * dev)9507 static int pqi_freeze(struct device *dev)
9508 {
9509 return pqi_suspend_or_freeze(dev, false);
9510 }
9511
pqi_thaw(struct device * dev)9512 static int pqi_thaw(struct device *dev)
9513 {
9514 int rc;
9515 struct pci_dev *pci_dev;
9516 struct pqi_ctrl_info *ctrl_info;
9517
9518 pci_dev = to_pci_dev(dev);
9519 ctrl_info = pci_get_drvdata(pci_dev);
9520
9521 rc = pqi_request_irqs(ctrl_info);
9522 if (rc)
9523 return rc;
9524
9525 ctrl_info->controller_online = true;
9526 ctrl_info->pqi_mode_enabled = true;
9527
9528 pqi_ctrl_unblock_device_reset(ctrl_info);
9529 pqi_ctrl_unblock_requests(ctrl_info);
9530 pqi_scsi_unblock_requests(ctrl_info);
9531 pqi_ctrl_unblock_scan(ctrl_info);
9532
9533 return 0;
9534 }
9535
pqi_poweroff(struct device * dev)9536 static int pqi_poweroff(struct device *dev)
9537 {
9538 struct pci_dev *pci_dev;
9539 struct pqi_ctrl_info *ctrl_info;
9540 enum bmic_flush_cache_shutdown_event shutdown_event;
9541
9542 pci_dev = to_pci_dev(dev);
9543 ctrl_info = pci_get_drvdata(pci_dev);
9544
9545 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9546 pqi_flush_cache(ctrl_info, shutdown_event);
9547
9548 return 0;
9549 }
9550
9551 static const struct dev_pm_ops pqi_pm_ops = {
9552 .suspend = pqi_suspend,
9553 .resume = pqi_resume_or_restore,
9554 .freeze = pqi_freeze,
9555 .thaw = pqi_thaw,
9556 .poweroff = pqi_poweroff,
9557 .restore = pqi_resume_or_restore,
9558 };
9559
9560 #endif /* CONFIG_PM */
9561
9562 /* Define the PCI IDs for the controllers that we support. */
9563 static const struct pci_device_id pqi_pci_id_table[] = {
9564 {
9565 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9566 0x105b, 0x1211)
9567 },
9568 {
9569 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9570 0x105b, 0x1321)
9571 },
9572 {
9573 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9574 0x152d, 0x8a22)
9575 },
9576 {
9577 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9578 0x152d, 0x8a23)
9579 },
9580 {
9581 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9582 0x152d, 0x8a24)
9583 },
9584 {
9585 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9586 0x152d, 0x8a36)
9587 },
9588 {
9589 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9590 0x152d, 0x8a37)
9591 },
9592 {
9593 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9594 0x193d, 0x0462)
9595 },
9596 {
9597 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9598 0x193d, 0x1104)
9599 },
9600 {
9601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9602 0x193d, 0x1105)
9603 },
9604 {
9605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9606 0x193d, 0x1106)
9607 },
9608 {
9609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9610 0x193d, 0x1107)
9611 },
9612 {
9613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9614 0x193d, 0x1108)
9615 },
9616 {
9617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9618 0x193d, 0x1109)
9619 },
9620 {
9621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9622 0x193d, 0x110b)
9623 },
9624 {
9625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9626 0x193d, 0x1110)
9627 },
9628 {
9629 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9630 0x193d, 0x8460)
9631 },
9632 {
9633 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9634 0x193d, 0x8461)
9635 },
9636 {
9637 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9638 0x193d, 0x8462)
9639 },
9640 {
9641 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9642 0x193d, 0xc460)
9643 },
9644 {
9645 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9646 0x193d, 0xc461)
9647 },
9648 {
9649 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9650 0x193d, 0xf460)
9651 },
9652 {
9653 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9654 0x193d, 0xf461)
9655 },
9656 {
9657 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9658 0x1bd4, 0x0045)
9659 },
9660 {
9661 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9662 0x1bd4, 0x0046)
9663 },
9664 {
9665 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9666 0x1bd4, 0x0047)
9667 },
9668 {
9669 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9670 0x1bd4, 0x0048)
9671 },
9672 {
9673 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9674 0x1bd4, 0x004a)
9675 },
9676 {
9677 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9678 0x1bd4, 0x004b)
9679 },
9680 {
9681 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9682 0x1bd4, 0x004c)
9683 },
9684 {
9685 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9686 0x1bd4, 0x004f)
9687 },
9688 {
9689 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9690 0x1bd4, 0x0051)
9691 },
9692 {
9693 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9694 0x1bd4, 0x0052)
9695 },
9696 {
9697 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9698 0x1bd4, 0x0053)
9699 },
9700 {
9701 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9702 0x1bd4, 0x0054)
9703 },
9704 {
9705 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9706 0x1bd4, 0x006b)
9707 },
9708 {
9709 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9710 0x1bd4, 0x006c)
9711 },
9712 {
9713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9714 0x1bd4, 0x006d)
9715 },
9716 {
9717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9718 0x1bd4, 0x006f)
9719 },
9720 {
9721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9722 0x1bd4, 0x0070)
9723 },
9724 {
9725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9726 0x1bd4, 0x0071)
9727 },
9728 {
9729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9730 0x1bd4, 0x0072)
9731 },
9732 {
9733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9734 0x1bd4, 0x0086)
9735 },
9736 {
9737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9738 0x1bd4, 0x0087)
9739 },
9740 {
9741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9742 0x1bd4, 0x0088)
9743 },
9744 {
9745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9746 0x1bd4, 0x0089)
9747 },
9748 {
9749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9750 0x1bd4, 0x00a3)
9751 },
9752 {
9753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9754 0x1ff9, 0x00a1)
9755 },
9756 {
9757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9758 0x1f3a, 0x0104)
9759 },
9760 {
9761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9762 0x19e5, 0xd227)
9763 },
9764 {
9765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9766 0x19e5, 0xd228)
9767 },
9768 {
9769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9770 0x19e5, 0xd229)
9771 },
9772 {
9773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9774 0x19e5, 0xd22a)
9775 },
9776 {
9777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9778 0x19e5, 0xd22b)
9779 },
9780 {
9781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9782 0x19e5, 0xd22c)
9783 },
9784 {
9785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9786 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9787 },
9788 {
9789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9790 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9791 },
9792 {
9793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9794 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9795 },
9796 {
9797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9798 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9799 },
9800 {
9801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9802 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9803 },
9804 {
9805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9806 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9807 },
9808 {
9809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9810 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9811 },
9812 {
9813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9814 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9815 },
9816 {
9817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9818 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9819 },
9820 {
9821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9822 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9823 },
9824 {
9825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9826 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9827 },
9828 {
9829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9830 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9831 },
9832 {
9833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9834 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9835 },
9836 {
9837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9838 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9839 },
9840 {
9841 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9842 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9843 },
9844 {
9845 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9846 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9847 },
9848 {
9849 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9850 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9851 },
9852 {
9853 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9854 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9855 },
9856 {
9857 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9858 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9859 },
9860 {
9861 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9862 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9863 },
9864 {
9865 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9866 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9867 },
9868 {
9869 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9870 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9871 },
9872 {
9873 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9874 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9875 },
9876 {
9877 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9878 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9879 },
9880 {
9881 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9882 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9883 },
9884 {
9885 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9886 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9887 },
9888 {
9889 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9890 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9891 },
9892 {
9893 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9894 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9895 },
9896 {
9897 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9898 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9899 },
9900 {
9901 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9902 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9903 },
9904 {
9905 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9906 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9907 },
9908 {
9909 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9910 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9911 },
9912 {
9913 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9914 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9915 },
9916 {
9917 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9918 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9919 },
9920 {
9921 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9922 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9923 },
9924 {
9925 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9926 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9927 },
9928 {
9929 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9930 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9931 },
9932 {
9933 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9934 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9935 },
9936 {
9937 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9938 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9939 },
9940 {
9941 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9942 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9943 },
9944 {
9945 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9946 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9947 },
9948 {
9949 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9950 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9951 },
9952 {
9953 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9954 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9955 },
9956 {
9957 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9958 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9959 },
9960 {
9961 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9962 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9963 },
9964 {
9965 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9966 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9967 },
9968 {
9969 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9970 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9971 },
9972 {
9973 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9974 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9975 },
9976 {
9977 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9978 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9979 },
9980 {
9981 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9982 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9983 },
9984 {
9985 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9986 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9987 },
9988 {
9989 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9990 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9991 },
9992 {
9993 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9994 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9995 },
9996 {
9997 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9998 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9999 },
10000 {
10001 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10002 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
10003 },
10004 {
10005 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10006 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
10007 },
10008 {
10009 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10010 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
10011 },
10012 {
10013 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10014 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
10015 },
10016 {
10017 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10018 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
10019 },
10020 {
10021 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10022 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
10023 },
10024 {
10025 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10026 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
10027 },
10028 {
10029 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10030 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
10031 },
10032 {
10033 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10034 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
10035 },
10036 {
10037 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10038 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
10039 },
10040 {
10041 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10042 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
10043 },
10044 {
10045 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10046 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
10047 },
10048 {
10049 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10050 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
10051 },
10052 {
10053 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10054 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
10055 },
10056 {
10057 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10058 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
10059 },
10060 {
10061 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10062 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
10063 },
10064 {
10065 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10066 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
10067 },
10068 {
10069 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10070 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
10071 },
10072 {
10073 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10074 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
10075 },
10076 {
10077 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10078 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
10079 },
10080 {
10081 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10082 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
10083 },
10084 {
10085 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10086 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
10087 },
10088 {
10089 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10090 0x207d, 0x4044)
10091 },
10092 {
10093 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10094 0x207d, 0x4054)
10095 },
10096 {
10097 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10098 0x207d, 0x4084)
10099 },
10100 {
10101 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10102 0x207d, 0x4094)
10103 },
10104 {
10105 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10106 0x207d, 0x4140)
10107 },
10108 {
10109 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10110 0x207d, 0x4240)
10111 },
10112 {
10113 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10114 PCI_VENDOR_ID_ADVANTECH, 0x8312)
10115 },
10116 {
10117 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10118 PCI_VENDOR_ID_DELL, 0x1fe0)
10119 },
10120 {
10121 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10122 PCI_VENDOR_ID_HP, 0x0600)
10123 },
10124 {
10125 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10126 PCI_VENDOR_ID_HP, 0x0601)
10127 },
10128 {
10129 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10130 PCI_VENDOR_ID_HP, 0x0602)
10131 },
10132 {
10133 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10134 PCI_VENDOR_ID_HP, 0x0603)
10135 },
10136 {
10137 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10138 PCI_VENDOR_ID_HP, 0x0609)
10139 },
10140 {
10141 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10142 PCI_VENDOR_ID_HP, 0x0650)
10143 },
10144 {
10145 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10146 PCI_VENDOR_ID_HP, 0x0651)
10147 },
10148 {
10149 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10150 PCI_VENDOR_ID_HP, 0x0652)
10151 },
10152 {
10153 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10154 PCI_VENDOR_ID_HP, 0x0653)
10155 },
10156 {
10157 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10158 PCI_VENDOR_ID_HP, 0x0654)
10159 },
10160 {
10161 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10162 PCI_VENDOR_ID_HP, 0x0655)
10163 },
10164 {
10165 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10166 PCI_VENDOR_ID_HP, 0x0700)
10167 },
10168 {
10169 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10170 PCI_VENDOR_ID_HP, 0x0701)
10171 },
10172 {
10173 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10174 PCI_VENDOR_ID_HP, 0x1001)
10175 },
10176 {
10177 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10178 PCI_VENDOR_ID_HP, 0x1002)
10179 },
10180 {
10181 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10182 PCI_VENDOR_ID_HP, 0x1100)
10183 },
10184 {
10185 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10186 PCI_VENDOR_ID_HP, 0x1101)
10187 },
10188 {
10189 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10190 0x1590, 0x0294)
10191 },
10192 {
10193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10194 0x1590, 0x02db)
10195 },
10196 {
10197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10198 0x1590, 0x02dc)
10199 },
10200 {
10201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10202 0x1590, 0x032e)
10203 },
10204 {
10205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10206 0x1590, 0x036f)
10207 },
10208 {
10209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10210 0x1590, 0x0381)
10211 },
10212 {
10213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10214 0x1590, 0x0382)
10215 },
10216 {
10217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10218 0x1590, 0x0383)
10219 },
10220 {
10221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10222 0x1d8d, 0x0800)
10223 },
10224 {
10225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10226 0x1d8d, 0x0908)
10227 },
10228 {
10229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10230 0x1d8d, 0x0806)
10231 },
10232 {
10233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10234 0x1d8d, 0x0916)
10235 },
10236 {
10237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10238 PCI_VENDOR_ID_GIGABYTE, 0x1000)
10239 },
10240 {
10241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10242 0x1dfc, 0x3161)
10243 },
10244 {
10245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10246 0x1f0c, 0x3161)
10247 },
10248 {
10249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10250 0x1cf2, 0x0804)
10251 },
10252 {
10253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10254 0x1cf2, 0x0805)
10255 },
10256 {
10257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10258 0x1cf2, 0x0806)
10259 },
10260 {
10261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10262 0x1cf2, 0x5445)
10263 },
10264 {
10265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10266 0x1cf2, 0x5446)
10267 },
10268 {
10269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10270 0x1cf2, 0x5447)
10271 },
10272 {
10273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10274 0x1cf2, 0x5449)
10275 },
10276 {
10277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10278 0x1cf2, 0x544a)
10279 },
10280 {
10281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10282 0x1cf2, 0x544b)
10283 },
10284 {
10285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10286 0x1cf2, 0x544d)
10287 },
10288 {
10289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10290 0x1cf2, 0x544e)
10291 },
10292 {
10293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10294 0x1cf2, 0x544f)
10295 },
10296 {
10297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10298 0x1cf2, 0x54da)
10299 },
10300 {
10301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10302 0x1cf2, 0x54db)
10303 },
10304 {
10305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10306 0x1cf2, 0x54dc)
10307 },
10308 {
10309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10310 0x1cf2, 0x0b27)
10311 },
10312 {
10313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10314 0x1cf2, 0x0b29)
10315 },
10316 {
10317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10318 0x1cf2, 0x0b45)
10319 },
10320 {
10321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10322 0x1cc4, 0x0101)
10323 },
10324 {
10325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10326 0x1cc4, 0x0201)
10327 },
10328 {
10329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10330 0x1018, 0x8238)
10331 },
10332 {
10333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10334 0x1f3f, 0x0610)
10335 },
10336 {
10337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10338 PCI_VENDOR_ID_LENOVO, 0x0220)
10339 },
10340 {
10341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10342 PCI_VENDOR_ID_LENOVO, 0x0221)
10343 },
10344 {
10345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10346 PCI_VENDOR_ID_LENOVO, 0x0222)
10347 },
10348 {
10349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10350 PCI_VENDOR_ID_LENOVO, 0x0223)
10351 },
10352 {
10353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10354 PCI_VENDOR_ID_LENOVO, 0x0224)
10355 },
10356 {
10357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10358 PCI_VENDOR_ID_LENOVO, 0x0225)
10359 },
10360 {
10361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10362 PCI_VENDOR_ID_LENOVO, 0x0520)
10363 },
10364 {
10365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10366 PCI_VENDOR_ID_LENOVO, 0x0521)
10367 },
10368 {
10369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10370 PCI_VENDOR_ID_LENOVO, 0x0522)
10371 },
10372 {
10373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10374 PCI_VENDOR_ID_LENOVO, 0x0620)
10375 },
10376 {
10377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10378 PCI_VENDOR_ID_LENOVO, 0x0621)
10379 },
10380 {
10381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10382 PCI_VENDOR_ID_LENOVO, 0x0622)
10383 },
10384 {
10385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10386 PCI_VENDOR_ID_LENOVO, 0x0623)
10387 },
10388 {
10389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10390 PCI_VENDOR_ID_LENOVO, 0x0624)
10391 },
10392 {
10393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10394 PCI_VENDOR_ID_LENOVO, 0x0625)
10395 },
10396 {
10397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10398 PCI_VENDOR_ID_LENOVO, 0x0626)
10399 },
10400 {
10401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10402 PCI_VENDOR_ID_LENOVO, 0x0627)
10403 },
10404 {
10405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10406 PCI_VENDOR_ID_LENOVO, 0x0628)
10407 },
10408 {
10409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10410 0x1014, 0x0718)
10411 },
10412 {
10413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10414 0x1137, 0x02f8)
10415 },
10416 {
10417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10418 0x1137, 0x02f9)
10419 },
10420 {
10421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10422 0x1137, 0x02fa)
10423 },
10424 {
10425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10426 0x1137, 0x02fe)
10427 },
10428 {
10429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10430 0x1137, 0x02ff)
10431 },
10432 {
10433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10434 0x1137, 0x0300)
10435 },
10436 {
10437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10438 0x1ded, 0x3301)
10439 },
10440 {
10441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10442 0x1ff9, 0x0045)
10443 },
10444 {
10445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10446 0x1ff9, 0x0046)
10447 },
10448 {
10449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10450 0x1ff9, 0x0047)
10451 },
10452 {
10453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10454 0x1ff9, 0x0048)
10455 },
10456 {
10457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10458 0x1ff9, 0x004a)
10459 },
10460 {
10461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10462 0x1ff9, 0x004b)
10463 },
10464 {
10465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10466 0x1ff9, 0x004c)
10467 },
10468 {
10469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10470 0x1ff9, 0x004f)
10471 },
10472 {
10473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10474 0x1ff9, 0x0051)
10475 },
10476 {
10477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10478 0x1ff9, 0x0052)
10479 },
10480 {
10481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10482 0x1ff9, 0x0053)
10483 },
10484 {
10485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10486 0x1ff9, 0x0054)
10487 },
10488 {
10489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10490 0x1ff9, 0x006b)
10491 },
10492 {
10493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10494 0x1ff9, 0x006c)
10495 },
10496 {
10497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10498 0x1ff9, 0x006d)
10499 },
10500 {
10501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10502 0x1ff9, 0x006f)
10503 },
10504 {
10505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10506 0x1ff9, 0x0070)
10507 },
10508 {
10509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10510 0x1ff9, 0x0071)
10511 },
10512 {
10513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10514 0x1ff9, 0x0072)
10515 },
10516 {
10517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10518 0x1ff9, 0x0086)
10519 },
10520 {
10521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10522 0x1ff9, 0x0087)
10523 },
10524 {
10525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10526 0x1ff9, 0x0088)
10527 },
10528 {
10529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10530 0x1ff9, 0x0089)
10531 },
10532 {
10533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10534 0x1e93, 0x1000)
10535 },
10536 {
10537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10538 0x1e93, 0x1001)
10539 },
10540 {
10541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10542 0x1e93, 0x1002)
10543 },
10544 {
10545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10546 0x1e93, 0x1005)
10547 },
10548 {
10549 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10550 0x1f51, 0x1001)
10551 },
10552 {
10553 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10554 0x1f51, 0x1002)
10555 },
10556 {
10557 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10558 0x1f51, 0x1003)
10559 },
10560 {
10561 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10562 0x1f51, 0x1004)
10563 },
10564 {
10565 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10566 0x1f51, 0x1005)
10567 },
10568 {
10569 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10570 0x1f51, 0x1006)
10571 },
10572 {
10573 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10574 0x1f51, 0x1007)
10575 },
10576 {
10577 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10578 0x1f51, 0x1008)
10579 },
10580 {
10581 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10582 0x1f51, 0x1009)
10583 },
10584 {
10585 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10586 0x1f51, 0x100a)
10587 },
10588 {
10589 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10590 0x1f51, 0x100b)
10591 },
10592 {
10593 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10594 0x1f51, 0x100e)
10595 },
10596 {
10597 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10598 0x1f51, 0x100f)
10599 },
10600 {
10601 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10602 0x1f51, 0x1010)
10603 },
10604 {
10605 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10606 0x1f51, 0x1011)
10607 },
10608 {
10609 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10610 0x1f51, 0x1043)
10611 },
10612 {
10613 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10614 0x1f51, 0x1044)
10615 },
10616 {
10617 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10618 0x1f51, 0x1045)
10619 },
10620 {
10621 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10622 0x1ff9, 0x00a3)
10623 },
10624 {
10625 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10626 PCI_ANY_ID, PCI_ANY_ID)
10627 },
10628 { 0 }
10629 };
10630
10631 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10632
10633 static struct pci_driver pqi_pci_driver = {
10634 .name = DRIVER_NAME_SHORT,
10635 .id_table = pqi_pci_id_table,
10636 .probe = pqi_pci_probe,
10637 .remove = pqi_pci_remove,
10638 .shutdown = pqi_shutdown,
10639 #if defined(CONFIG_PM)
10640 .driver = {
10641 .pm = &pqi_pm_ops
10642 },
10643 #endif
10644 };
10645
pqi_init(void)10646 static int __init pqi_init(void)
10647 {
10648 int rc;
10649
10650 pr_info(DRIVER_NAME "\n");
10651 pqi_verify_structures();
10652 sis_verify_structures();
10653
10654 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10655 if (!pqi_sas_transport_template)
10656 return -ENODEV;
10657
10658 pqi_process_module_params();
10659
10660 rc = pci_register_driver(&pqi_pci_driver);
10661 if (rc)
10662 sas_release_transport(pqi_sas_transport_template);
10663
10664 return rc;
10665 }
10666
pqi_cleanup(void)10667 static void __exit pqi_cleanup(void)
10668 {
10669 pci_unregister_driver(&pqi_pci_driver);
10670 sas_release_transport(pqi_sas_transport_template);
10671 }
10672
10673 module_init(pqi_init);
10674 module_exit(pqi_cleanup);
10675
pqi_verify_structures(void)10676 static void pqi_verify_structures(void)
10677 {
10678 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10679 sis_host_to_ctrl_doorbell) != 0x20);
10680 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10681 sis_interrupt_mask) != 0x34);
10682 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10683 sis_ctrl_to_host_doorbell) != 0x9c);
10684 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10685 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10686 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10687 sis_driver_scratch) != 0xb0);
10688 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10689 sis_product_identifier) != 0xb4);
10690 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10691 sis_firmware_status) != 0xbc);
10692 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10693 sis_ctrl_shutdown_reason_code) != 0xcc);
10694 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10695 sis_mailbox) != 0x1000);
10696 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10697 pqi_registers) != 0x4000);
10698
10699 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10700 iu_type) != 0x0);
10701 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10702 iu_length) != 0x2);
10703 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10704 response_queue_id) != 0x4);
10705 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10706 driver_flags) != 0x6);
10707 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10708
10709 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10710 status) != 0x0);
10711 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10712 service_response) != 0x1);
10713 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10714 data_present) != 0x2);
10715 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10716 reserved) != 0x3);
10717 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10718 residual_count) != 0x4);
10719 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10720 data_length) != 0x8);
10721 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10722 reserved1) != 0xa);
10723 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10724 data) != 0xc);
10725 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10726
10727 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10728 data_in_result) != 0x0);
10729 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10730 data_out_result) != 0x1);
10731 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10732 reserved) != 0x2);
10733 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10734 status) != 0x5);
10735 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10736 status_qualifier) != 0x6);
10737 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10738 sense_data_length) != 0x8);
10739 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10740 response_data_length) != 0xa);
10741 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10742 data_in_transferred) != 0xc);
10743 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10744 data_out_transferred) != 0x10);
10745 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10746 data) != 0x14);
10747 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10748
10749 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10750 signature) != 0x0);
10751 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10752 function_and_status_code) != 0x8);
10753 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10754 max_admin_iq_elements) != 0x10);
10755 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10756 max_admin_oq_elements) != 0x11);
10757 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10758 admin_iq_element_length) != 0x12);
10759 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10760 admin_oq_element_length) != 0x13);
10761 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10762 max_reset_timeout) != 0x14);
10763 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10764 legacy_intx_status) != 0x18);
10765 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10766 legacy_intx_mask_set) != 0x1c);
10767 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10768 legacy_intx_mask_clear) != 0x20);
10769 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10770 device_status) != 0x40);
10771 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10772 admin_iq_pi_offset) != 0x48);
10773 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10774 admin_oq_ci_offset) != 0x50);
10775 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10776 admin_iq_element_array_addr) != 0x58);
10777 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10778 admin_oq_element_array_addr) != 0x60);
10779 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10780 admin_iq_ci_addr) != 0x68);
10781 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10782 admin_oq_pi_addr) != 0x70);
10783 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10784 admin_iq_num_elements) != 0x78);
10785 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10786 admin_oq_num_elements) != 0x79);
10787 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10788 admin_queue_int_msg_num) != 0x7a);
10789 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10790 device_error) != 0x80);
10791 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10792 error_details) != 0x88);
10793 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10794 device_reset) != 0x90);
10795 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10796 power_action) != 0x94);
10797 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10798
10799 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10800 header.iu_type) != 0);
10801 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10802 header.iu_length) != 2);
10803 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10804 header.driver_flags) != 6);
10805 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10806 request_id) != 8);
10807 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10808 function_code) != 10);
10809 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10810 data.report_device_capability.buffer_length) != 44);
10811 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10812 data.report_device_capability.sg_descriptor) != 48);
10813 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10814 data.create_operational_iq.queue_id) != 12);
10815 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10816 data.create_operational_iq.element_array_addr) != 16);
10817 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10818 data.create_operational_iq.ci_addr) != 24);
10819 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10820 data.create_operational_iq.num_elements) != 32);
10821 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10822 data.create_operational_iq.element_length) != 34);
10823 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10824 data.create_operational_iq.queue_protocol) != 36);
10825 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10826 data.create_operational_oq.queue_id) != 12);
10827 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10828 data.create_operational_oq.element_array_addr) != 16);
10829 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10830 data.create_operational_oq.pi_addr) != 24);
10831 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10832 data.create_operational_oq.num_elements) != 32);
10833 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10834 data.create_operational_oq.element_length) != 34);
10835 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10836 data.create_operational_oq.queue_protocol) != 36);
10837 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10838 data.create_operational_oq.int_msg_num) != 40);
10839 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10840 data.create_operational_oq.coalescing_count) != 42);
10841 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10842 data.create_operational_oq.min_coalescing_time) != 44);
10843 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10844 data.create_operational_oq.max_coalescing_time) != 48);
10845 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10846 data.delete_operational_queue.queue_id) != 12);
10847 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10848 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10849 data.create_operational_iq) != 64 - 11);
10850 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10851 data.create_operational_oq) != 64 - 11);
10852 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10853 data.delete_operational_queue) != 64 - 11);
10854
10855 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10856 header.iu_type) != 0);
10857 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10858 header.iu_length) != 2);
10859 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10860 header.driver_flags) != 6);
10861 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10862 request_id) != 8);
10863 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10864 function_code) != 10);
10865 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10866 status) != 11);
10867 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10868 data.create_operational_iq.status_descriptor) != 12);
10869 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10870 data.create_operational_iq.iq_pi_offset) != 16);
10871 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10872 data.create_operational_oq.status_descriptor) != 12);
10873 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10874 data.create_operational_oq.oq_ci_offset) != 16);
10875 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10876
10877 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10878 header.iu_type) != 0);
10879 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10880 header.iu_length) != 2);
10881 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10882 header.response_queue_id) != 4);
10883 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10884 header.driver_flags) != 6);
10885 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10886 request_id) != 8);
10887 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10888 nexus_id) != 10);
10889 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10890 buffer_length) != 12);
10891 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10892 lun_number) != 16);
10893 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10894 protocol_specific) != 24);
10895 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10896 error_index) != 27);
10897 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10898 cdb) != 32);
10899 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10900 timeout) != 60);
10901 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10902 sg_descriptors) != 64);
10903 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10904 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10905
10906 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10907 header.iu_type) != 0);
10908 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10909 header.iu_length) != 2);
10910 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10911 header.response_queue_id) != 4);
10912 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10913 header.driver_flags) != 6);
10914 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10915 request_id) != 8);
10916 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10917 nexus_id) != 12);
10918 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10919 buffer_length) != 16);
10920 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10921 data_encryption_key_index) != 22);
10922 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10923 encrypt_tweak_lower) != 24);
10924 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10925 encrypt_tweak_upper) != 28);
10926 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10927 cdb) != 32);
10928 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10929 error_index) != 48);
10930 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10931 num_sg_descriptors) != 50);
10932 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10933 cdb_length) != 51);
10934 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10935 lun_number) != 52);
10936 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10937 sg_descriptors) != 64);
10938 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10939 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10940
10941 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10942 header.iu_type) != 0);
10943 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10944 header.iu_length) != 2);
10945 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10946 request_id) != 8);
10947 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10948 error_index) != 10);
10949
10950 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10951 header.iu_type) != 0);
10952 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10953 header.iu_length) != 2);
10954 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10955 header.response_queue_id) != 4);
10956 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10957 request_id) != 8);
10958 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10959 data.report_event_configuration.buffer_length) != 12);
10960 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10961 data.report_event_configuration.sg_descriptors) != 16);
10962 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10963 data.set_event_configuration.global_event_oq_id) != 10);
10964 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10965 data.set_event_configuration.buffer_length) != 12);
10966 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10967 data.set_event_configuration.sg_descriptors) != 16);
10968
10969 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10970 max_inbound_iu_length) != 6);
10971 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10972 max_outbound_iu_length) != 14);
10973 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10974
10975 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10976 data_length) != 0);
10977 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10978 iq_arbitration_priority_support_bitmask) != 8);
10979 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10980 maximum_aw_a) != 9);
10981 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10982 maximum_aw_b) != 10);
10983 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10984 maximum_aw_c) != 11);
10985 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10986 max_inbound_queues) != 16);
10987 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10988 max_elements_per_iq) != 18);
10989 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10990 max_iq_element_length) != 24);
10991 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10992 min_iq_element_length) != 26);
10993 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10994 max_outbound_queues) != 30);
10995 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10996 max_elements_per_oq) != 32);
10997 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10998 intr_coalescing_time_granularity) != 34);
10999 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11000 max_oq_element_length) != 36);
11001 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11002 min_oq_element_length) != 38);
11003 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
11004 iu_layer_descriptors) != 64);
11005 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
11006
11007 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
11008 event_type) != 0);
11009 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
11010 oq_id) != 2);
11011 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
11012
11013 BUILD_BUG_ON(offsetof(struct pqi_event_config,
11014 num_event_descriptors) != 2);
11015 BUILD_BUG_ON(offsetof(struct pqi_event_config,
11016 descriptors) != 4);
11017
11018 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
11019 ARRAY_SIZE(pqi_supported_event_types));
11020
11021 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11022 header.iu_type) != 0);
11023 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11024 header.iu_length) != 2);
11025 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11026 event_type) != 8);
11027 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11028 event_id) != 10);
11029 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11030 additional_event_id) != 12);
11031 BUILD_BUG_ON(offsetof(struct pqi_event_response,
11032 data) != 16);
11033 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
11034
11035 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11036 header.iu_type) != 0);
11037 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11038 header.iu_length) != 2);
11039 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11040 event_type) != 8);
11041 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11042 event_id) != 10);
11043 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
11044 additional_event_id) != 12);
11045 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
11046
11047 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11048 header.iu_type) != 0);
11049 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11050 header.iu_length) != 2);
11051 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11052 request_id) != 8);
11053 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11054 nexus_id) != 10);
11055 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11056 timeout) != 14);
11057 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11058 lun_number) != 16);
11059 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11060 protocol_specific) != 24);
11061 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11062 outbound_queue_id_to_manage) != 26);
11063 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11064 request_id_to_manage) != 28);
11065 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
11066 task_management_function) != 30);
11067 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
11068
11069 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11070 header.iu_type) != 0);
11071 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11072 header.iu_length) != 2);
11073 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11074 request_id) != 8);
11075 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11076 nexus_id) != 10);
11077 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11078 additional_response_info) != 12);
11079 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
11080 response_code) != 15);
11081 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
11082
11083 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11084 configured_logical_drive_count) != 0);
11085 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11086 configuration_signature) != 1);
11087 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11088 firmware_version_short) != 5);
11089 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11090 extended_logical_unit_count) != 154);
11091 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11092 firmware_build_number) != 190);
11093 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11094 vendor_id) != 200);
11095 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11096 product_id) != 208);
11097 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11098 extra_controller_flags) != 286);
11099 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11100 controller_mode) != 292);
11101 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11102 spare_part_number) != 293);
11103 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
11104 firmware_version_long) != 325);
11105
11106 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11107 phys_bay_in_box) != 115);
11108 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11109 device_type) != 120);
11110 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11111 redundant_path_present_map) != 1736);
11112 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11113 active_path_number) != 1738);
11114 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11115 alternate_paths_phys_connector) != 1739);
11116 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11117 alternate_paths_phys_box_on_port) != 1755);
11118 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
11119 current_queue_depth_limit) != 1796);
11120 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
11121
11122 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
11123 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11124 page_code) != 0);
11125 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11126 subpage_code) != 1);
11127 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11128 buffer_length) != 2);
11129
11130 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
11131 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11132 page_code) != 0);
11133 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11134 subpage_code) != 1);
11135 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11136 page_length) != 2);
11137
11138 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
11139 != 18);
11140 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11141 header) != 0);
11142 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11143 firmware_read_support) != 4);
11144 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11145 driver_read_support) != 5);
11146 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11147 firmware_write_support) != 6);
11148 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11149 driver_write_support) != 7);
11150 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11151 max_transfer_encrypted_sas_sata) != 8);
11152 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11153 max_transfer_encrypted_nvme) != 10);
11154 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11155 max_write_raid_5_6) != 12);
11156 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11157 max_write_raid_1_10_2drive) != 14);
11158 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11159 max_write_raid_1_10_3drive) != 16);
11160
11161 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
11162 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
11163 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
11164 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11165 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
11166 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11167 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
11168 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
11169 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11170 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
11171 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
11172 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11173
11174 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
11175 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
11176 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
11177 }
11178