1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries 4 * Copyright 2016 Microsemi Corporation 5 * Copyright 2014-2015 PMC-Sierra, Inc. 6 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; version 2 of the License. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 15 * NON INFRINGEMENT. See the GNU General Public License for more details. 16 * 17 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 18 * 19 */ 20 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <linux/types.h> 24 #include <linux/pci.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/fs.h> 29 #include <linux/timer.h> 30 #include <linux/init.h> 31 #include <linux/spinlock.h> 32 #include <linux/compat.h> 33 #include <linux/blktrace_api.h> 34 #include <linux/uaccess.h> 35 #include <linux/io.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/completion.h> 38 #include <linux/moduleparam.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_cmnd.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_tcq.h> 44 #include <scsi/scsi_eh.h> 45 #include <scsi/scsi_transport_sas.h> 46 #include <scsi/scsi_dbg.h> 47 #include <linux/cciss_ioctl.h> 48 #include <linux/string.h> 49 #include <linux/bitmap.h> 50 #include <linux/atomic.h> 51 #include <linux/jiffies.h> 52 #include <linux/percpu-defs.h> 53 #include <linux/percpu.h> 54 #include <linux/unaligned.h> 55 #include <asm/div64.h> 56 #include "hpsa_cmd.h" 57 #include "hpsa.h" 58 59 /* 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 61 * with an optional trailing '-' followed by a byte value (0-255). 62 */ 63 #define HPSA_DRIVER_VERSION "3.4.20-200" 64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 65 #define HPSA "hpsa" 66 67 /* How long to wait for CISS doorbell communication */ 68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ 69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ 70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ 71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ 72 #define MAX_IOCTL_CONFIG_WAIT 1000 73 74 /*define how many times we will try a command because of bus resets */ 75 #define MAX_CMD_RETRIES 3 76 /* How long to wait before giving up on a command */ 77 #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) 78 79 /* Embedded module documentation macros - see modules.h */ 80 MODULE_AUTHOR("Hewlett-Packard Company"); 81 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 82 HPSA_DRIVER_VERSION); 83 MODULE_VERSION(HPSA_DRIVER_VERSION); 84 MODULE_LICENSE("GPL"); 85 MODULE_ALIAS("cciss"); 86 87 static int hpsa_simple_mode; 88 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 89 MODULE_PARM_DESC(hpsa_simple_mode, 90 "Use 'simple mode' rather than 'performant mode'"); 91 92 /* define the PCI info for the cards we can control */ 93 static const struct pci_device_id hpsa_pci_device_id[] = { 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, 137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, 138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, 139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, 140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, 141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, 142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 149 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 150 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 151 {0,} 152 }; 153 154 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 155 156 /* board_id = Subsystem Device ID & Vendor ID 157 * product = Marketing Name for the board 158 * access = Address of the struct of function pointers 159 */ 160 static struct board_type products[] = { 161 {0x40700E11, "Smart Array 5300", &SA5A_access}, 162 {0x40800E11, "Smart Array 5i", &SA5B_access}, 163 {0x40820E11, "Smart Array 532", &SA5B_access}, 164 {0x40830E11, "Smart Array 5312", &SA5B_access}, 165 {0x409A0E11, "Smart Array 641", &SA5A_access}, 166 {0x409B0E11, "Smart Array 642", &SA5A_access}, 167 {0x409C0E11, "Smart Array 6400", &SA5A_access}, 168 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, 169 {0x40910E11, "Smart Array 6i", &SA5A_access}, 170 {0x3225103C, "Smart Array P600", &SA5A_access}, 171 {0x3223103C, "Smart Array P800", &SA5A_access}, 172 {0x3234103C, "Smart Array P400", &SA5A_access}, 173 {0x3235103C, "Smart Array P400i", &SA5A_access}, 174 {0x3211103C, "Smart Array E200i", &SA5A_access}, 175 {0x3212103C, "Smart Array E200", &SA5A_access}, 176 {0x3213103C, "Smart Array E200i", &SA5A_access}, 177 {0x3214103C, "Smart Array E200i", &SA5A_access}, 178 {0x3215103C, "Smart Array E200i", &SA5A_access}, 179 {0x3237103C, "Smart Array E500", &SA5A_access}, 180 {0x323D103C, "Smart Array P700m", &SA5A_access}, 181 {0x3241103C, "Smart Array P212", &SA5_access}, 182 {0x3243103C, "Smart Array P410", &SA5_access}, 183 {0x3245103C, "Smart Array P410i", &SA5_access}, 184 {0x3247103C, "Smart Array P411", &SA5_access}, 185 {0x3249103C, "Smart Array P812", &SA5_access}, 186 {0x324A103C, "Smart Array P712m", &SA5_access}, 187 {0x324B103C, "Smart Array P711m", &SA5_access}, 188 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ 189 {0x3350103C, "Smart Array P222", &SA5_access}, 190 {0x3351103C, "Smart Array P420", &SA5_access}, 191 {0x3352103C, "Smart Array P421", &SA5_access}, 192 {0x3353103C, "Smart Array P822", &SA5_access}, 193 {0x3354103C, "Smart Array P420i", &SA5_access}, 194 {0x3355103C, "Smart Array P220i", &SA5_access}, 195 {0x3356103C, "Smart Array P721m", &SA5_access}, 196 {0x1920103C, "Smart Array P430i", &SA5_access}, 197 {0x1921103C, "Smart Array P830i", &SA5_access}, 198 {0x1922103C, "Smart Array P430", &SA5_access}, 199 {0x1923103C, "Smart Array P431", &SA5_access}, 200 {0x1924103C, "Smart Array P830", &SA5_access}, 201 {0x1925103C, "Smart Array P831", &SA5_access}, 202 {0x1926103C, "Smart Array P731m", &SA5_access}, 203 {0x1928103C, "Smart Array P230i", &SA5_access}, 204 {0x1929103C, "Smart Array P530", &SA5_access}, 205 {0x21BD103C, "Smart Array P244br", &SA5_access}, 206 {0x21BE103C, "Smart Array P741m", &SA5_access}, 207 {0x21BF103C, "Smart HBA H240ar", &SA5_access}, 208 {0x21C0103C, "Smart Array P440ar", &SA5_access}, 209 {0x21C1103C, "Smart Array P840ar", &SA5_access}, 210 {0x21C2103C, "Smart Array P440", &SA5_access}, 211 {0x21C3103C, "Smart Array P441", &SA5_access}, 212 {0x21C4103C, "Smart Array", &SA5_access}, 213 {0x21C5103C, "Smart Array P841", &SA5_access}, 214 {0x21C6103C, "Smart HBA H244br", &SA5_access}, 215 {0x21C7103C, "Smart HBA H240", &SA5_access}, 216 {0x21C8103C, "Smart HBA H241", &SA5_access}, 217 {0x21C9103C, "Smart Array", &SA5_access}, 218 {0x21CA103C, "Smart Array P246br", &SA5_access}, 219 {0x21CB103C, "Smart Array P840", &SA5_access}, 220 {0x21CC103C, "Smart Array", &SA5_access}, 221 {0x21CD103C, "Smart Array", &SA5_access}, 222 {0x21CE103C, "Smart HBA", &SA5_access}, 223 {0x05809005, "SmartHBA-SA", &SA5_access}, 224 {0x05819005, "SmartHBA-SA 8i", &SA5_access}, 225 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, 226 {0x05839005, "SmartHBA-SA 8e", &SA5_access}, 227 {0x05849005, "SmartHBA-SA 16i", &SA5_access}, 228 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, 229 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 230 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 231 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 232 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 233 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 234 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 235 }; 236 237 static struct scsi_transport_template *hpsa_sas_transport_template; 238 static int hpsa_add_sas_host(struct ctlr_info *h); 239 static void hpsa_delete_sas_host(struct ctlr_info *h); 240 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, 241 struct hpsa_scsi_dev_t *device); 242 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); 243 static struct hpsa_scsi_dev_t 244 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, 245 struct sas_rphy *rphy); 246 247 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) 248 static const struct scsi_cmnd hpsa_cmd_busy; 249 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) 250 static const struct scsi_cmnd hpsa_cmd_idle; 251 static int number_of_controllers; 252 253 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 254 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 255 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, 256 void __user *arg); 257 static int hpsa_passthru_ioctl(struct ctlr_info *h, 258 IOCTL_Command_struct *iocommand); 259 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, 260 BIG_IOCTL_Command_struct *ioc); 261 262 #ifdef CONFIG_COMPAT 263 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, 264 void __user *arg); 265 #endif 266 267 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 268 static struct CommandList *cmd_alloc(struct ctlr_info *h); 269 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); 270 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 271 struct scsi_cmnd *scmd); 272 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 273 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 274 int cmd_type); 275 static void hpsa_free_cmd_pool(struct ctlr_info *h); 276 #define VPD_PAGE (1 << 8) 277 #define HPSA_SIMPLE_ERROR_BITS 0x03 278 279 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 280 static void hpsa_scan_start(struct Scsi_Host *); 281 static int hpsa_scan_finished(struct Scsi_Host *sh, 282 unsigned long elapsed_time); 283 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 284 285 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 286 static int hpsa_sdev_init(struct scsi_device *sdev); 287 static int hpsa_sdev_configure(struct scsi_device *sdev, 288 struct queue_limits *lim); 289 static void hpsa_sdev_destroy(struct scsi_device *sdev); 290 291 static void hpsa_update_scsi_devices(struct ctlr_info *h); 292 static int check_for_unit_attention(struct ctlr_info *h, 293 struct CommandList *c); 294 static void check_ioctl_unit_attention(struct ctlr_info *h, 295 struct CommandList *c); 296 /* performant mode helper functions */ 297 static void calc_bucket_map(int *bucket, int num_buckets, 298 int nsgs, int min_blocks, u32 *bucket_map); 299 static void hpsa_free_performant_mode(struct ctlr_info *h); 300 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 301 static inline u32 next_command(struct ctlr_info *h, u8 q); 302 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 303 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 304 u64 *cfg_offset); 305 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 306 unsigned long *memory_bar); 307 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, 308 bool *legacy_board); 309 static int wait_for_device_to_become_ready(struct ctlr_info *h, 310 unsigned char lunaddr[], 311 int reply_queue); 312 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 313 int wait_for_ready); 314 static inline void finish_cmd(struct CommandList *c); 315 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 316 #define BOARD_NOT_READY 0 317 #define BOARD_READY 1 318 static void hpsa_drain_accel_commands(struct ctlr_info *h); 319 static void hpsa_flush_cache(struct ctlr_info *h); 320 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 321 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 322 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); 323 static void hpsa_command_resubmit_worker(struct work_struct *work); 324 static u32 lockup_detected(struct ctlr_info *h); 325 static int detect_controller_lockup(struct ctlr_info *h); 326 static void hpsa_disable_rld_caching(struct ctlr_info *h); 327 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 328 struct ReportExtendedLUNdata *buf, int bufsize); 329 static bool hpsa_vpd_page_supported(struct ctlr_info *h, 330 unsigned char scsi3addr[], u8 page); 331 static int hpsa_luns_changed(struct ctlr_info *h); 332 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, 333 struct hpsa_scsi_dev_t *dev, 334 unsigned char *scsi3addr); 335 336 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 337 { 338 unsigned long *priv = shost_priv(sdev->host); 339 return (struct ctlr_info *) *priv; 340 } 341 342 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 343 { 344 unsigned long *priv = shost_priv(sh); 345 return (struct ctlr_info *) *priv; 346 } 347 348 static inline bool hpsa_is_cmd_idle(struct CommandList *c) 349 { 350 return c->scsi_cmd == SCSI_CMD_IDLE; 351 } 352 353 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ 354 static void decode_sense_data(const u8 *sense_data, int sense_data_len, 355 u8 *sense_key, u8 *asc, u8 *ascq) 356 { 357 struct scsi_sense_hdr sshdr; 358 bool rc; 359 360 *sense_key = -1; 361 *asc = -1; 362 *ascq = -1; 363 364 if (sense_data_len < 1) 365 return; 366 367 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); 368 if (rc) { 369 *sense_key = sshdr.sense_key; 370 *asc = sshdr.asc; 371 *ascq = sshdr.ascq; 372 } 373 } 374 375 static int check_for_unit_attention(struct ctlr_info *h, 376 struct CommandList *c) 377 { 378 u8 sense_key, asc, ascq; 379 int sense_len; 380 381 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 382 sense_len = sizeof(c->err_info->SenseInfo); 383 else 384 sense_len = c->err_info->SenseLen; 385 386 decode_sense_data(c->err_info->SenseInfo, sense_len, 387 &sense_key, &asc, &ascq); 388 if (sense_key != UNIT_ATTENTION || asc == 0xff) 389 return 0; 390 391 switch (asc) { 392 case STATE_CHANGED: 393 dev_warn(&h->pdev->dev, 394 "%s: a state change detected, command retried\n", 395 h->devname); 396 break; 397 case LUN_FAILED: 398 dev_warn(&h->pdev->dev, 399 "%s: LUN failure detected\n", h->devname); 400 break; 401 case REPORT_LUNS_CHANGED: 402 dev_warn(&h->pdev->dev, 403 "%s: report LUN data changed\n", h->devname); 404 /* 405 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 406 * target (array) devices. 407 */ 408 break; 409 case POWER_OR_RESET: 410 dev_warn(&h->pdev->dev, 411 "%s: a power on or device reset detected\n", 412 h->devname); 413 break; 414 case UNIT_ATTENTION_CLEARED: 415 dev_warn(&h->pdev->dev, 416 "%s: unit attention cleared by another initiator\n", 417 h->devname); 418 break; 419 default: 420 dev_warn(&h->pdev->dev, 421 "%s: unknown unit attention detected\n", 422 h->devname); 423 break; 424 } 425 return 1; 426 } 427 428 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 429 { 430 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 431 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 432 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 433 return 0; 434 dev_warn(&h->pdev->dev, HPSA "device busy"); 435 return 1; 436 } 437 438 static u32 lockup_detected(struct ctlr_info *h); 439 static ssize_t host_show_lockup_detected(struct device *dev, 440 struct device_attribute *attr, char *buf) 441 { 442 int ld; 443 struct ctlr_info *h; 444 struct Scsi_Host *shost = class_to_shost(dev); 445 446 h = shost_to_hba(shost); 447 ld = lockup_detected(h); 448 449 return sprintf(buf, "ld=%d\n", ld); 450 } 451 452 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 453 struct device_attribute *attr, 454 const char *buf, size_t count) 455 { 456 int status, len; 457 struct ctlr_info *h; 458 struct Scsi_Host *shost = class_to_shost(dev); 459 char tmpbuf[10]; 460 461 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 462 return -EACCES; 463 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 464 strncpy(tmpbuf, buf, len); 465 tmpbuf[len] = '\0'; 466 if (sscanf(tmpbuf, "%d", &status) != 1) 467 return -EINVAL; 468 h = shost_to_hba(shost); 469 h->acciopath_status = !!status; 470 dev_warn(&h->pdev->dev, 471 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 472 h->acciopath_status ? "enabled" : "disabled"); 473 return count; 474 } 475 476 static ssize_t host_store_raid_offload_debug(struct device *dev, 477 struct device_attribute *attr, 478 const char *buf, size_t count) 479 { 480 int debug_level, len; 481 struct ctlr_info *h; 482 struct Scsi_Host *shost = class_to_shost(dev); 483 char tmpbuf[10]; 484 485 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 486 return -EACCES; 487 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 488 strncpy(tmpbuf, buf, len); 489 tmpbuf[len] = '\0'; 490 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 491 return -EINVAL; 492 if (debug_level < 0) 493 debug_level = 0; 494 h = shost_to_hba(shost); 495 h->raid_offload_debug = debug_level; 496 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 497 h->raid_offload_debug); 498 return count; 499 } 500 501 static ssize_t host_store_rescan(struct device *dev, 502 struct device_attribute *attr, 503 const char *buf, size_t count) 504 { 505 struct ctlr_info *h; 506 struct Scsi_Host *shost = class_to_shost(dev); 507 h = shost_to_hba(shost); 508 hpsa_scan_start(h->scsi_host); 509 return count; 510 } 511 512 static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) 513 { 514 device->offload_enabled = 0; 515 device->offload_to_be_enabled = 0; 516 } 517 518 static ssize_t host_show_firmware_revision(struct device *dev, 519 struct device_attribute *attr, char *buf) 520 { 521 struct ctlr_info *h; 522 struct Scsi_Host *shost = class_to_shost(dev); 523 unsigned char *fwrev; 524 525 h = shost_to_hba(shost); 526 if (!h->hba_inquiry_data) 527 return 0; 528 fwrev = &h->hba_inquiry_data[32]; 529 return snprintf(buf, 20, "%c%c%c%c\n", 530 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 531 } 532 533 static ssize_t host_show_commands_outstanding(struct device *dev, 534 struct device_attribute *attr, char *buf) 535 { 536 struct Scsi_Host *shost = class_to_shost(dev); 537 struct ctlr_info *h = shost_to_hba(shost); 538 539 return snprintf(buf, 20, "%d\n", 540 atomic_read(&h->commands_outstanding)); 541 } 542 543 static ssize_t host_show_transport_mode(struct device *dev, 544 struct device_attribute *attr, char *buf) 545 { 546 struct ctlr_info *h; 547 struct Scsi_Host *shost = class_to_shost(dev); 548 549 h = shost_to_hba(shost); 550 return snprintf(buf, 20, "%s\n", 551 h->transMethod & CFGTBL_Trans_Performant ? 552 "performant" : "simple"); 553 } 554 555 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 556 struct device_attribute *attr, char *buf) 557 { 558 struct ctlr_info *h; 559 struct Scsi_Host *shost = class_to_shost(dev); 560 561 h = shost_to_hba(shost); 562 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 563 (h->acciopath_status == 1) ? "enabled" : "disabled"); 564 } 565 566 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 567 static u32 unresettable_controller[] = { 568 0x324a103C, /* Smart Array P712m */ 569 0x324b103C, /* Smart Array P711m */ 570 0x3223103C, /* Smart Array P800 */ 571 0x3234103C, /* Smart Array P400 */ 572 0x3235103C, /* Smart Array P400i */ 573 0x3211103C, /* Smart Array E200i */ 574 0x3212103C, /* Smart Array E200 */ 575 0x3213103C, /* Smart Array E200i */ 576 0x3214103C, /* Smart Array E200i */ 577 0x3215103C, /* Smart Array E200i */ 578 0x3237103C, /* Smart Array E500 */ 579 0x323D103C, /* Smart Array P700m */ 580 0x40800E11, /* Smart Array 5i */ 581 0x409C0E11, /* Smart Array 6400 */ 582 0x409D0E11, /* Smart Array 6400 EM */ 583 0x40700E11, /* Smart Array 5300 */ 584 0x40820E11, /* Smart Array 532 */ 585 0x40830E11, /* Smart Array 5312 */ 586 0x409A0E11, /* Smart Array 641 */ 587 0x409B0E11, /* Smart Array 642 */ 588 0x40910E11, /* Smart Array 6i */ 589 }; 590 591 /* List of controllers which cannot even be soft reset */ 592 static u32 soft_unresettable_controller[] = { 593 0x40800E11, /* Smart Array 5i */ 594 0x40700E11, /* Smart Array 5300 */ 595 0x40820E11, /* Smart Array 532 */ 596 0x40830E11, /* Smart Array 5312 */ 597 0x409A0E11, /* Smart Array 641 */ 598 0x409B0E11, /* Smart Array 642 */ 599 0x40910E11, /* Smart Array 6i */ 600 /* Exclude 640x boards. These are two pci devices in one slot 601 * which share a battery backed cache module. One controls the 602 * cache, the other accesses the cache through the one that controls 603 * it. If we reset the one controlling the cache, the other will 604 * likely not be happy. Just forbid resetting this conjoined mess. 605 * The 640x isn't really supported by hpsa anyway. 606 */ 607 0x409C0E11, /* Smart Array 6400 */ 608 0x409D0E11, /* Smart Array 6400 EM */ 609 }; 610 611 static int board_id_in_array(u32 a[], int nelems, u32 board_id) 612 { 613 int i; 614 615 for (i = 0; i < nelems; i++) 616 if (a[i] == board_id) 617 return 1; 618 return 0; 619 } 620 621 static int ctlr_is_hard_resettable(u32 board_id) 622 { 623 return !board_id_in_array(unresettable_controller, 624 ARRAY_SIZE(unresettable_controller), board_id); 625 } 626 627 static int ctlr_is_soft_resettable(u32 board_id) 628 { 629 return !board_id_in_array(soft_unresettable_controller, 630 ARRAY_SIZE(soft_unresettable_controller), board_id); 631 } 632 633 static int ctlr_is_resettable(u32 board_id) 634 { 635 return ctlr_is_hard_resettable(board_id) || 636 ctlr_is_soft_resettable(board_id); 637 } 638 639 static ssize_t host_show_resettable(struct device *dev, 640 struct device_attribute *attr, char *buf) 641 { 642 struct ctlr_info *h; 643 struct Scsi_Host *shost = class_to_shost(dev); 644 645 h = shost_to_hba(shost); 646 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 647 } 648 649 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 650 { 651 return (scsi3addr[3] & 0xC0) == 0x40; 652 } 653 654 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", 655 "1(+0)ADM", "UNKNOWN", "PHYS DRV" 656 }; 657 #define HPSA_RAID_0 0 658 #define HPSA_RAID_4 1 659 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 660 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 661 #define HPSA_RAID_51 4 662 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 663 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 664 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) 665 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) 666 667 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) 668 { 669 return !device->physical_device; 670 } 671 672 static ssize_t raid_level_show(struct device *dev, 673 struct device_attribute *attr, char *buf) 674 { 675 ssize_t l = 0; 676 unsigned char rlevel; 677 struct ctlr_info *h; 678 struct scsi_device *sdev; 679 struct hpsa_scsi_dev_t *hdev; 680 unsigned long flags; 681 682 sdev = to_scsi_device(dev); 683 h = sdev_to_hba(sdev); 684 spin_lock_irqsave(&h->lock, flags); 685 hdev = sdev->hostdata; 686 if (!hdev) { 687 spin_unlock_irqrestore(&h->lock, flags); 688 return -ENODEV; 689 } 690 691 /* Is this even a logical drive? */ 692 if (!is_logical_device(hdev)) { 693 spin_unlock_irqrestore(&h->lock, flags); 694 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 695 return l; 696 } 697 698 rlevel = hdev->raid_level; 699 spin_unlock_irqrestore(&h->lock, flags); 700 if (rlevel > RAID_UNKNOWN) 701 rlevel = RAID_UNKNOWN; 702 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 703 return l; 704 } 705 706 static ssize_t lunid_show(struct device *dev, 707 struct device_attribute *attr, char *buf) 708 { 709 struct ctlr_info *h; 710 struct scsi_device *sdev; 711 struct hpsa_scsi_dev_t *hdev; 712 unsigned long flags; 713 unsigned char lunid[8]; 714 715 sdev = to_scsi_device(dev); 716 h = sdev_to_hba(sdev); 717 spin_lock_irqsave(&h->lock, flags); 718 hdev = sdev->hostdata; 719 if (!hdev) { 720 spin_unlock_irqrestore(&h->lock, flags); 721 return -ENODEV; 722 } 723 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 724 spin_unlock_irqrestore(&h->lock, flags); 725 return snprintf(buf, 20, "0x%8phN\n", lunid); 726 } 727 728 static ssize_t unique_id_show(struct device *dev, 729 struct device_attribute *attr, char *buf) 730 { 731 struct ctlr_info *h; 732 struct scsi_device *sdev; 733 struct hpsa_scsi_dev_t *hdev; 734 unsigned long flags; 735 unsigned char sn[16]; 736 737 sdev = to_scsi_device(dev); 738 h = sdev_to_hba(sdev); 739 spin_lock_irqsave(&h->lock, flags); 740 hdev = sdev->hostdata; 741 if (!hdev) { 742 spin_unlock_irqrestore(&h->lock, flags); 743 return -ENODEV; 744 } 745 memcpy(sn, hdev->device_id, sizeof(sn)); 746 spin_unlock_irqrestore(&h->lock, flags); 747 return snprintf(buf, 16 * 2 + 2, 748 "%02X%02X%02X%02X%02X%02X%02X%02X" 749 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 750 sn[0], sn[1], sn[2], sn[3], 751 sn[4], sn[5], sn[6], sn[7], 752 sn[8], sn[9], sn[10], sn[11], 753 sn[12], sn[13], sn[14], sn[15]); 754 } 755 756 static ssize_t sas_address_show(struct device *dev, 757 struct device_attribute *attr, char *buf) 758 { 759 struct ctlr_info *h; 760 struct scsi_device *sdev; 761 struct hpsa_scsi_dev_t *hdev; 762 unsigned long flags; 763 u64 sas_address; 764 765 sdev = to_scsi_device(dev); 766 h = sdev_to_hba(sdev); 767 spin_lock_irqsave(&h->lock, flags); 768 hdev = sdev->hostdata; 769 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { 770 spin_unlock_irqrestore(&h->lock, flags); 771 return -ENODEV; 772 } 773 sas_address = hdev->sas_address; 774 spin_unlock_irqrestore(&h->lock, flags); 775 776 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); 777 } 778 779 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 780 struct device_attribute *attr, char *buf) 781 { 782 struct ctlr_info *h; 783 struct scsi_device *sdev; 784 struct hpsa_scsi_dev_t *hdev; 785 unsigned long flags; 786 int offload_enabled; 787 788 sdev = to_scsi_device(dev); 789 h = sdev_to_hba(sdev); 790 spin_lock_irqsave(&h->lock, flags); 791 hdev = sdev->hostdata; 792 if (!hdev) { 793 spin_unlock_irqrestore(&h->lock, flags); 794 return -ENODEV; 795 } 796 offload_enabled = hdev->offload_enabled; 797 spin_unlock_irqrestore(&h->lock, flags); 798 799 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) 800 return snprintf(buf, 20, "%d\n", offload_enabled); 801 else 802 return snprintf(buf, 40, "%s\n", 803 "Not applicable for a controller"); 804 } 805 806 #define MAX_PATHS 8 807 static ssize_t path_info_show(struct device *dev, 808 struct device_attribute *attr, char *buf) 809 { 810 struct ctlr_info *h; 811 struct scsi_device *sdev; 812 struct hpsa_scsi_dev_t *hdev; 813 unsigned long flags; 814 int i; 815 int output_len = 0; 816 u8 box; 817 u8 bay; 818 u8 path_map_index = 0; 819 char *active; 820 unsigned char phys_connector[2]; 821 822 sdev = to_scsi_device(dev); 823 h = sdev_to_hba(sdev); 824 spin_lock_irqsave(&h->devlock, flags); 825 hdev = sdev->hostdata; 826 if (!hdev) { 827 spin_unlock_irqrestore(&h->devlock, flags); 828 return -ENODEV; 829 } 830 831 bay = hdev->bay; 832 for (i = 0; i < MAX_PATHS; i++) { 833 path_map_index = 1<<i; 834 if (i == hdev->active_path_index) 835 active = "Active"; 836 else if (hdev->path_map & path_map_index) 837 active = "Inactive"; 838 else 839 continue; 840 841 output_len += scnprintf(buf + output_len, 842 PAGE_SIZE - output_len, 843 "[%d:%d:%d:%d] %20.20s ", 844 h->scsi_host->host_no, 845 hdev->bus, hdev->target, hdev->lun, 846 scsi_device_type(hdev->devtype)); 847 848 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { 849 output_len += scnprintf(buf + output_len, 850 PAGE_SIZE - output_len, 851 "%s\n", active); 852 continue; 853 } 854 855 box = hdev->box[i]; 856 memcpy(&phys_connector, &hdev->phys_connector[i], 857 sizeof(phys_connector)); 858 if (phys_connector[0] < '0') 859 phys_connector[0] = '0'; 860 if (phys_connector[1] < '0') 861 phys_connector[1] = '0'; 862 output_len += scnprintf(buf + output_len, 863 PAGE_SIZE - output_len, 864 "PORT: %.2s ", 865 phys_connector); 866 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && 867 hdev->expose_device) { 868 if (box == 0 || box == 0xFF) { 869 output_len += scnprintf(buf + output_len, 870 PAGE_SIZE - output_len, 871 "BAY: %hhu %s\n", 872 bay, active); 873 } else { 874 output_len += scnprintf(buf + output_len, 875 PAGE_SIZE - output_len, 876 "BOX: %hhu BAY: %hhu %s\n", 877 box, bay, active); 878 } 879 } else if (box != 0 && box != 0xFF) { 880 output_len += scnprintf(buf + output_len, 881 PAGE_SIZE - output_len, "BOX: %hhu %s\n", 882 box, active); 883 } else 884 output_len += scnprintf(buf + output_len, 885 PAGE_SIZE - output_len, "%s\n", active); 886 } 887 888 spin_unlock_irqrestore(&h->devlock, flags); 889 return output_len; 890 } 891 892 static ssize_t host_show_ctlr_num(struct device *dev, 893 struct device_attribute *attr, char *buf) 894 { 895 struct ctlr_info *h; 896 struct Scsi_Host *shost = class_to_shost(dev); 897 898 h = shost_to_hba(shost); 899 return snprintf(buf, 20, "%d\n", h->ctlr); 900 } 901 902 static ssize_t host_show_legacy_board(struct device *dev, 903 struct device_attribute *attr, char *buf) 904 { 905 struct ctlr_info *h; 906 struct Scsi_Host *shost = class_to_shost(dev); 907 908 h = shost_to_hba(shost); 909 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); 910 } 911 912 static DEVICE_ATTR_RO(raid_level); 913 static DEVICE_ATTR_RO(lunid); 914 static DEVICE_ATTR_RO(unique_id); 915 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 916 static DEVICE_ATTR_RO(sas_address); 917 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 918 host_show_hp_ssd_smart_path_enabled, NULL); 919 static DEVICE_ATTR_RO(path_info); 920 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 921 host_show_hp_ssd_smart_path_status, 922 host_store_hp_ssd_smart_path_status); 923 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 924 host_store_raid_offload_debug); 925 static DEVICE_ATTR(firmware_revision, S_IRUGO, 926 host_show_firmware_revision, NULL); 927 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 928 host_show_commands_outstanding, NULL); 929 static DEVICE_ATTR(transport_mode, S_IRUGO, 930 host_show_transport_mode, NULL); 931 static DEVICE_ATTR(resettable, S_IRUGO, 932 host_show_resettable, NULL); 933 static DEVICE_ATTR(lockup_detected, S_IRUGO, 934 host_show_lockup_detected, NULL); 935 static DEVICE_ATTR(ctlr_num, S_IRUGO, 936 host_show_ctlr_num, NULL); 937 static DEVICE_ATTR(legacy_board, S_IRUGO, 938 host_show_legacy_board, NULL); 939 940 static struct attribute *hpsa_sdev_attrs[] = { 941 &dev_attr_raid_level.attr, 942 &dev_attr_lunid.attr, 943 &dev_attr_unique_id.attr, 944 &dev_attr_hp_ssd_smart_path_enabled.attr, 945 &dev_attr_path_info.attr, 946 &dev_attr_sas_address.attr, 947 NULL, 948 }; 949 950 ATTRIBUTE_GROUPS(hpsa_sdev); 951 952 static struct attribute *hpsa_shost_attrs[] = { 953 &dev_attr_rescan.attr, 954 &dev_attr_firmware_revision.attr, 955 &dev_attr_commands_outstanding.attr, 956 &dev_attr_transport_mode.attr, 957 &dev_attr_resettable.attr, 958 &dev_attr_hp_ssd_smart_path_status.attr, 959 &dev_attr_raid_offload_debug.attr, 960 &dev_attr_lockup_detected.attr, 961 &dev_attr_ctlr_num.attr, 962 &dev_attr_legacy_board.attr, 963 NULL, 964 }; 965 966 ATTRIBUTE_GROUPS(hpsa_shost); 967 968 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ 969 HPSA_MAX_CONCURRENT_PASSTHRUS) 970 971 static const struct scsi_host_template hpsa_driver_template = { 972 .module = THIS_MODULE, 973 .name = HPSA, 974 .proc_name = HPSA, 975 .queuecommand = hpsa_scsi_queue_command, 976 .scan_start = hpsa_scan_start, 977 .scan_finished = hpsa_scan_finished, 978 .change_queue_depth = hpsa_change_queue_depth, 979 .this_id = -1, 980 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 981 .ioctl = hpsa_ioctl, 982 .sdev_init = hpsa_sdev_init, 983 .sdev_configure = hpsa_sdev_configure, 984 .sdev_destroy = hpsa_sdev_destroy, 985 #ifdef CONFIG_COMPAT 986 .compat_ioctl = hpsa_compat_ioctl, 987 #endif 988 .sdev_groups = hpsa_sdev_groups, 989 .shost_groups = hpsa_shost_groups, 990 .max_sectors = 2048, 991 .no_write_same = 1, 992 }; 993 994 static inline u32 next_command(struct ctlr_info *h, u8 q) 995 { 996 u32 a; 997 struct reply_queue_buffer *rq = &h->reply_queue[q]; 998 999 if (h->transMethod & CFGTBL_Trans_io_accel1) 1000 return h->access.command_completed(h, q); 1001 1002 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 1003 return h->access.command_completed(h, q); 1004 1005 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 1006 a = rq->head[rq->current_entry]; 1007 rq->current_entry++; 1008 atomic_dec(&h->commands_outstanding); 1009 } else { 1010 a = FIFO_EMPTY; 1011 } 1012 /* Check for wraparound */ 1013 if (rq->current_entry == h->max_commands) { 1014 rq->current_entry = 0; 1015 rq->wraparound ^= 1; 1016 } 1017 return a; 1018 } 1019 1020 /* 1021 * There are some special bits in the bus address of the 1022 * command that we have to set for the controller to know 1023 * how to process the command: 1024 * 1025 * Normal performant mode: 1026 * bit 0: 1 means performant mode, 0 means simple mode. 1027 * bits 1-3 = block fetch table entry 1028 * bits 4-6 = command type (== 0) 1029 * 1030 * ioaccel1 mode: 1031 * bit 0 = "performant mode" bit. 1032 * bits 1-3 = block fetch table entry 1033 * bits 4-6 = command type (== 110) 1034 * (command type is needed because ioaccel1 mode 1035 * commands are submitted through the same register as normal 1036 * mode commands, so this is how the controller knows whether 1037 * the command is normal mode or ioaccel1 mode.) 1038 * 1039 * ioaccel2 mode: 1040 * bit 0 = "performant mode" bit. 1041 * bits 1-4 = block fetch table entry (note extra bit) 1042 * bits 4-6 = not needed, because ioaccel2 mode has 1043 * a separate special register for submitting commands. 1044 */ 1045 1046 /* 1047 * set_performant_mode: Modify the tag for cciss performant 1048 * set bit 0 for pull model, bits 3-1 for block fetch 1049 * register number 1050 */ 1051 #define DEFAULT_REPLY_QUEUE (-1) 1052 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, 1053 int reply_queue) 1054 { 1055 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 1056 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 1057 if (unlikely(!h->msix_vectors)) 1058 return; 1059 c->Header.ReplyQueue = reply_queue; 1060 } 1061 } 1062 1063 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 1064 struct CommandList *c, 1065 int reply_queue) 1066 { 1067 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 1068 1069 /* 1070 * Tell the controller to post the reply to the queue for this 1071 * processor. This seems to give the best I/O throughput. 1072 */ 1073 cp->ReplyQueue = reply_queue; 1074 /* 1075 * Set the bits in the address sent down to include: 1076 * - performant mode bit (bit 0) 1077 * - pull count (bits 1-3) 1078 * - command type (bits 4-6) 1079 */ 1080 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 1081 IOACCEL1_BUSADDR_CMDTYPE; 1082 } 1083 1084 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, 1085 struct CommandList *c, 1086 int reply_queue) 1087 { 1088 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) 1089 &h->ioaccel2_cmd_pool[c->cmdindex]; 1090 1091 /* Tell the controller to post the reply to the queue for this 1092 * processor. This seems to give the best I/O throughput. 1093 */ 1094 cp->reply_queue = reply_queue; 1095 /* Set the bits in the address sent down to include: 1096 * - performant mode bit not used in ioaccel mode 2 1097 * - pull count (bits 0-3) 1098 * - command type isn't needed for ioaccel2 1099 */ 1100 c->busaddr |= h->ioaccel2_blockFetchTable[0]; 1101 } 1102 1103 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 1104 struct CommandList *c, 1105 int reply_queue) 1106 { 1107 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 1108 1109 /* 1110 * Tell the controller to post the reply to the queue for this 1111 * processor. This seems to give the best I/O throughput. 1112 */ 1113 cp->reply_queue = reply_queue; 1114 /* 1115 * Set the bits in the address sent down to include: 1116 * - performant mode bit not used in ioaccel mode 2 1117 * - pull count (bits 0-3) 1118 * - command type isn't needed for ioaccel2 1119 */ 1120 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 1121 } 1122 1123 static int is_firmware_flash_cmd(u8 *cdb) 1124 { 1125 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 1126 } 1127 1128 /* 1129 * During firmware flash, the heartbeat register may not update as frequently 1130 * as it should. So we dial down lockup detection during firmware flash. and 1131 * dial it back up when firmware flash completes. 1132 */ 1133 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 1134 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 1135 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) 1136 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 1137 struct CommandList *c) 1138 { 1139 if (!is_firmware_flash_cmd(c->Request.CDB)) 1140 return; 1141 atomic_inc(&h->firmware_flash_in_progress); 1142 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 1143 } 1144 1145 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 1146 struct CommandList *c) 1147 { 1148 if (is_firmware_flash_cmd(c->Request.CDB) && 1149 atomic_dec_and_test(&h->firmware_flash_in_progress)) 1150 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 1151 } 1152 1153 static void __enqueue_cmd_and_start_io(struct ctlr_info *h, 1154 struct CommandList *c, int reply_queue) 1155 { 1156 dial_down_lockup_detection_during_fw_flash(h, c); 1157 atomic_inc(&h->commands_outstanding); 1158 /* 1159 * Check to see if the command is being retried. 1160 */ 1161 if (c->device && !c->retry_pending) 1162 atomic_inc(&c->device->commands_outstanding); 1163 1164 reply_queue = h->reply_map[raw_smp_processor_id()]; 1165 switch (c->cmd_type) { 1166 case CMD_IOACCEL1: 1167 set_ioaccel1_performant_mode(h, c, reply_queue); 1168 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 1169 break; 1170 case CMD_IOACCEL2: 1171 set_ioaccel2_performant_mode(h, c, reply_queue); 1172 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 1173 break; 1174 case IOACCEL2_TMF: 1175 set_ioaccel2_tmf_performant_mode(h, c, reply_queue); 1176 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 1177 break; 1178 default: 1179 set_performant_mode(h, c, reply_queue); 1180 h->access.submit_command(h, c); 1181 } 1182 } 1183 1184 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) 1185 { 1186 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); 1187 } 1188 1189 static inline int is_hba_lunid(unsigned char scsi3addr[]) 1190 { 1191 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 1192 } 1193 1194 static inline int is_scsi_rev_5(struct ctlr_info *h) 1195 { 1196 if (!h->hba_inquiry_data) 1197 return 0; 1198 if ((h->hba_inquiry_data[2] & 0x07) == 5) 1199 return 1; 1200 return 0; 1201 } 1202 1203 static int hpsa_find_target_lun(struct ctlr_info *h, 1204 unsigned char scsi3addr[], int bus, int *target, int *lun) 1205 { 1206 /* finds an unused bus, target, lun for a new physical device 1207 * assumes h->devlock is held 1208 */ 1209 int i, found = 0; 1210 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 1211 1212 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 1213 1214 for (i = 0; i < h->ndevices; i++) { 1215 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 1216 __set_bit(h->dev[i]->target, lun_taken); 1217 } 1218 1219 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 1220 if (i < HPSA_MAX_DEVICES) { 1221 /* *bus = 1; */ 1222 *target = i; 1223 *lun = 0; 1224 found = 1; 1225 } 1226 return !found; 1227 } 1228 1229 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, 1230 struct hpsa_scsi_dev_t *dev, char *description) 1231 { 1232 #define LABEL_SIZE 25 1233 char label[LABEL_SIZE]; 1234 1235 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) 1236 return; 1237 1238 switch (dev->devtype) { 1239 case TYPE_RAID: 1240 snprintf(label, LABEL_SIZE, "controller"); 1241 break; 1242 case TYPE_ENCLOSURE: 1243 snprintf(label, LABEL_SIZE, "enclosure"); 1244 break; 1245 case TYPE_DISK: 1246 case TYPE_ZBC: 1247 if (dev->external) 1248 snprintf(label, LABEL_SIZE, "external"); 1249 else if (!is_logical_dev_addr_mode(dev->scsi3addr)) 1250 snprintf(label, LABEL_SIZE, "%s", 1251 raid_label[PHYSICAL_DRIVE]); 1252 else 1253 snprintf(label, LABEL_SIZE, "RAID-%s", 1254 dev->raid_level > RAID_UNKNOWN ? "?" : 1255 raid_label[dev->raid_level]); 1256 break; 1257 case TYPE_ROM: 1258 snprintf(label, LABEL_SIZE, "rom"); 1259 break; 1260 case TYPE_TAPE: 1261 snprintf(label, LABEL_SIZE, "tape"); 1262 break; 1263 case TYPE_MEDIUM_CHANGER: 1264 snprintf(label, LABEL_SIZE, "changer"); 1265 break; 1266 default: 1267 snprintf(label, LABEL_SIZE, "UNKNOWN"); 1268 break; 1269 } 1270 1271 dev_printk(level, &h->pdev->dev, 1272 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", 1273 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 1274 description, 1275 scsi_device_type(dev->devtype), 1276 dev->vendor, 1277 dev->model, 1278 label, 1279 dev->offload_config ? '+' : '-', 1280 dev->offload_to_be_enabled ? '+' : '-', 1281 dev->expose_device); 1282 } 1283 1284 /* Add an entry into h->dev[] array. */ 1285 static int hpsa_scsi_add_entry(struct ctlr_info *h, 1286 struct hpsa_scsi_dev_t *device, 1287 struct hpsa_scsi_dev_t *added[], int *nadded) 1288 { 1289 /* assumes h->devlock is held */ 1290 int n = h->ndevices; 1291 int i; 1292 unsigned char addr1[8], addr2[8]; 1293 struct hpsa_scsi_dev_t *sd; 1294 1295 if (n >= HPSA_MAX_DEVICES) { 1296 dev_err(&h->pdev->dev, "too many devices, some will be " 1297 "inaccessible.\n"); 1298 return -1; 1299 } 1300 1301 /* physical devices do not have lun or target assigned until now. */ 1302 if (device->lun != -1) 1303 /* Logical device, lun is already assigned. */ 1304 goto lun_assigned; 1305 1306 /* If this device a non-zero lun of a multi-lun device 1307 * byte 4 of the 8-byte LUN addr will contain the logical 1308 * unit no, zero otherwise. 1309 */ 1310 if (device->scsi3addr[4] == 0) { 1311 /* This is not a non-zero lun of a multi-lun device */ 1312 if (hpsa_find_target_lun(h, device->scsi3addr, 1313 device->bus, &device->target, &device->lun) != 0) 1314 return -1; 1315 goto lun_assigned; 1316 } 1317 1318 /* This is a non-zero lun of a multi-lun device. 1319 * Search through our list and find the device which 1320 * has the same 8 byte LUN address, excepting byte 4 and 5. 1321 * Assign the same bus and target for this new LUN. 1322 * Use the logical unit number from the firmware. 1323 */ 1324 memcpy(addr1, device->scsi3addr, 8); 1325 addr1[4] = 0; 1326 addr1[5] = 0; 1327 for (i = 0; i < n; i++) { 1328 sd = h->dev[i]; 1329 memcpy(addr2, sd->scsi3addr, 8); 1330 addr2[4] = 0; 1331 addr2[5] = 0; 1332 /* differ only in byte 4 and 5? */ 1333 if (memcmp(addr1, addr2, 8) == 0) { 1334 device->bus = sd->bus; 1335 device->target = sd->target; 1336 device->lun = device->scsi3addr[4]; 1337 break; 1338 } 1339 } 1340 if (device->lun == -1) { 1341 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 1342 " suspect firmware bug or unsupported hardware " 1343 "configuration.\n"); 1344 return -1; 1345 } 1346 1347 lun_assigned: 1348 1349 h->dev[n] = device; 1350 h->ndevices++; 1351 added[*nadded] = device; 1352 (*nadded)++; 1353 hpsa_show_dev_msg(KERN_INFO, h, device, 1354 device->expose_device ? "added" : "masked"); 1355 return 0; 1356 } 1357 1358 /* 1359 * Called during a scan operation. 1360 * 1361 * Update an entry in h->dev[] array. 1362 */ 1363 static void hpsa_scsi_update_entry(struct ctlr_info *h, 1364 int entry, struct hpsa_scsi_dev_t *new_entry) 1365 { 1366 /* assumes h->devlock is held */ 1367 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1368 1369 /* Raid level changed. */ 1370 h->dev[entry]->raid_level = new_entry->raid_level; 1371 1372 /* 1373 * ioacccel_handle may have changed for a dual domain disk 1374 */ 1375 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1376 1377 /* Raid offload parameters changed. Careful about the ordering. */ 1378 if (new_entry->offload_config && new_entry->offload_to_be_enabled) { 1379 /* 1380 * if drive is newly offload_enabled, we want to copy the 1381 * raid map data first. If previously offload_enabled and 1382 * offload_config were set, raid map data had better be 1383 * the same as it was before. If raid map data has changed 1384 * then it had better be the case that 1385 * h->dev[entry]->offload_enabled is currently 0. 1386 */ 1387 h->dev[entry]->raid_map = new_entry->raid_map; 1388 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1389 } 1390 if (new_entry->offload_to_be_enabled) { 1391 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1392 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ 1393 } 1394 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; 1395 h->dev[entry]->offload_config = new_entry->offload_config; 1396 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 1397 h->dev[entry]->queue_depth = new_entry->queue_depth; 1398 1399 /* 1400 * We can turn off ioaccel offload now, but need to delay turning 1401 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we 1402 * can't do that until all the devices are updated. 1403 */ 1404 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; 1405 1406 /* 1407 * turn ioaccel off immediately if told to do so. 1408 */ 1409 if (!new_entry->offload_to_be_enabled) 1410 h->dev[entry]->offload_enabled = 0; 1411 1412 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); 1413 } 1414 1415 /* Replace an entry from h->dev[] array. */ 1416 static void hpsa_scsi_replace_entry(struct ctlr_info *h, 1417 int entry, struct hpsa_scsi_dev_t *new_entry, 1418 struct hpsa_scsi_dev_t *added[], int *nadded, 1419 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1420 { 1421 /* assumes h->devlock is held */ 1422 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1423 removed[*nremoved] = h->dev[entry]; 1424 (*nremoved)++; 1425 1426 /* 1427 * New physical devices won't have target/lun assigned yet 1428 * so we need to preserve the values in the slot we are replacing. 1429 */ 1430 if (new_entry->target == -1) { 1431 new_entry->target = h->dev[entry]->target; 1432 new_entry->lun = h->dev[entry]->lun; 1433 } 1434 1435 h->dev[entry] = new_entry; 1436 added[*nadded] = new_entry; 1437 (*nadded)++; 1438 1439 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); 1440 } 1441 1442 /* Remove an entry from h->dev[] array. */ 1443 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, 1444 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1445 { 1446 /* assumes h->devlock is held */ 1447 int i; 1448 struct hpsa_scsi_dev_t *sd; 1449 1450 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1451 1452 sd = h->dev[entry]; 1453 removed[*nremoved] = h->dev[entry]; 1454 (*nremoved)++; 1455 1456 for (i = entry; i < h->ndevices-1; i++) 1457 h->dev[i] = h->dev[i+1]; 1458 h->ndevices--; 1459 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); 1460 } 1461 1462 #define SCSI3ADDR_EQ(a, b) ( \ 1463 (a)[7] == (b)[7] && \ 1464 (a)[6] == (b)[6] && \ 1465 (a)[5] == (b)[5] && \ 1466 (a)[4] == (b)[4] && \ 1467 (a)[3] == (b)[3] && \ 1468 (a)[2] == (b)[2] && \ 1469 (a)[1] == (b)[1] && \ 1470 (a)[0] == (b)[0]) 1471 1472 static void fixup_botched_add(struct ctlr_info *h, 1473 struct hpsa_scsi_dev_t *added) 1474 { 1475 /* called when scsi_add_device fails in order to re-adjust 1476 * h->dev[] to match the mid layer's view. 1477 */ 1478 unsigned long flags; 1479 int i, j; 1480 1481 spin_lock_irqsave(&h->lock, flags); 1482 for (i = 0; i < h->ndevices; i++) { 1483 if (h->dev[i] == added) { 1484 for (j = i; j < h->ndevices-1; j++) 1485 h->dev[j] = h->dev[j+1]; 1486 h->ndevices--; 1487 break; 1488 } 1489 } 1490 spin_unlock_irqrestore(&h->lock, flags); 1491 kfree(added); 1492 } 1493 1494 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1495 struct hpsa_scsi_dev_t *dev2) 1496 { 1497 /* we compare everything except lun and target as these 1498 * are not yet assigned. Compare parts likely 1499 * to differ first 1500 */ 1501 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1502 sizeof(dev1->scsi3addr)) != 0) 1503 return 0; 1504 if (memcmp(dev1->device_id, dev2->device_id, 1505 sizeof(dev1->device_id)) != 0) 1506 return 0; 1507 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1508 return 0; 1509 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1510 return 0; 1511 if (dev1->devtype != dev2->devtype) 1512 return 0; 1513 if (dev1->bus != dev2->bus) 1514 return 0; 1515 return 1; 1516 } 1517 1518 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1519 struct hpsa_scsi_dev_t *dev2) 1520 { 1521 /* Device attributes that can change, but don't mean 1522 * that the device is a different device, nor that the OS 1523 * needs to be told anything about the change. 1524 */ 1525 if (dev1->raid_level != dev2->raid_level) 1526 return 1; 1527 if (dev1->offload_config != dev2->offload_config) 1528 return 1; 1529 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled) 1530 return 1; 1531 if (!is_logical_dev_addr_mode(dev1->scsi3addr)) 1532 if (dev1->queue_depth != dev2->queue_depth) 1533 return 1; 1534 /* 1535 * This can happen for dual domain devices. An active 1536 * path change causes the ioaccel handle to change 1537 * 1538 * for example note the handle differences between p0 and p1 1539 * Device WWN ,WWN hash,Handle 1540 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003 1541 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004 1542 */ 1543 if (dev1->ioaccel_handle != dev2->ioaccel_handle) 1544 return 1; 1545 return 0; 1546 } 1547 1548 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1549 * and return needle location in *index. If scsi3addr matches, but not 1550 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1551 * location in *index. 1552 * In the case of a minor device attribute change, such as RAID level, just 1553 * return DEVICE_UPDATED, along with the updated device's location in index. 1554 * If needle not found, return DEVICE_NOT_FOUND. 1555 */ 1556 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1557 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1558 int *index) 1559 { 1560 int i; 1561 #define DEVICE_NOT_FOUND 0 1562 #define DEVICE_CHANGED 1 1563 #define DEVICE_SAME 2 1564 #define DEVICE_UPDATED 3 1565 if (needle == NULL) 1566 return DEVICE_NOT_FOUND; 1567 1568 for (i = 0; i < haystack_size; i++) { 1569 if (haystack[i] == NULL) /* previously removed. */ 1570 continue; 1571 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1572 *index = i; 1573 if (device_is_the_same(needle, haystack[i])) { 1574 if (device_updated(needle, haystack[i])) 1575 return DEVICE_UPDATED; 1576 return DEVICE_SAME; 1577 } else { 1578 /* Keep offline devices offline */ 1579 if (needle->volume_offline) 1580 return DEVICE_NOT_FOUND; 1581 return DEVICE_CHANGED; 1582 } 1583 } 1584 } 1585 *index = -1; 1586 return DEVICE_NOT_FOUND; 1587 } 1588 1589 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1590 unsigned char scsi3addr[]) 1591 { 1592 struct offline_device_entry *device; 1593 unsigned long flags; 1594 1595 /* Check to see if device is already on the list */ 1596 spin_lock_irqsave(&h->offline_device_lock, flags); 1597 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1598 if (memcmp(device->scsi3addr, scsi3addr, 1599 sizeof(device->scsi3addr)) == 0) { 1600 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1601 return; 1602 } 1603 } 1604 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1605 1606 /* Device is not on the list, add it. */ 1607 device = kmalloc(sizeof(*device), GFP_KERNEL); 1608 if (!device) 1609 return; 1610 1611 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1612 spin_lock_irqsave(&h->offline_device_lock, flags); 1613 list_add_tail(&device->offline_list, &h->offline_device_list); 1614 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1615 } 1616 1617 /* Print a message explaining various offline volume states */ 1618 static void hpsa_show_volume_status(struct ctlr_info *h, 1619 struct hpsa_scsi_dev_t *sd) 1620 { 1621 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1622 dev_info(&h->pdev->dev, 1623 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1624 h->scsi_host->host_no, 1625 sd->bus, sd->target, sd->lun); 1626 switch (sd->volume_offline) { 1627 case HPSA_LV_OK: 1628 break; 1629 case HPSA_LV_UNDERGOING_ERASE: 1630 dev_info(&h->pdev->dev, 1631 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1632 h->scsi_host->host_no, 1633 sd->bus, sd->target, sd->lun); 1634 break; 1635 case HPSA_LV_NOT_AVAILABLE: 1636 dev_info(&h->pdev->dev, 1637 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", 1638 h->scsi_host->host_no, 1639 sd->bus, sd->target, sd->lun); 1640 break; 1641 case HPSA_LV_UNDERGOING_RPI: 1642 dev_info(&h->pdev->dev, 1643 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", 1644 h->scsi_host->host_no, 1645 sd->bus, sd->target, sd->lun); 1646 break; 1647 case HPSA_LV_PENDING_RPI: 1648 dev_info(&h->pdev->dev, 1649 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1650 h->scsi_host->host_no, 1651 sd->bus, sd->target, sd->lun); 1652 break; 1653 case HPSA_LV_ENCRYPTED_NO_KEY: 1654 dev_info(&h->pdev->dev, 1655 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1656 h->scsi_host->host_no, 1657 sd->bus, sd->target, sd->lun); 1658 break; 1659 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1660 dev_info(&h->pdev->dev, 1661 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1662 h->scsi_host->host_no, 1663 sd->bus, sd->target, sd->lun); 1664 break; 1665 case HPSA_LV_UNDERGOING_ENCRYPTION: 1666 dev_info(&h->pdev->dev, 1667 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1668 h->scsi_host->host_no, 1669 sd->bus, sd->target, sd->lun); 1670 break; 1671 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1672 dev_info(&h->pdev->dev, 1673 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1674 h->scsi_host->host_no, 1675 sd->bus, sd->target, sd->lun); 1676 break; 1677 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1678 dev_info(&h->pdev->dev, 1679 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1680 h->scsi_host->host_no, 1681 sd->bus, sd->target, sd->lun); 1682 break; 1683 case HPSA_LV_PENDING_ENCRYPTION: 1684 dev_info(&h->pdev->dev, 1685 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1686 h->scsi_host->host_no, 1687 sd->bus, sd->target, sd->lun); 1688 break; 1689 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1690 dev_info(&h->pdev->dev, 1691 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1692 h->scsi_host->host_no, 1693 sd->bus, sd->target, sd->lun); 1694 break; 1695 } 1696 } 1697 1698 /* 1699 * Figure the list of physical drive pointers for a logical drive with 1700 * raid offload configured. 1701 */ 1702 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, 1703 struct hpsa_scsi_dev_t *dev[], int ndevices, 1704 struct hpsa_scsi_dev_t *logical_drive) 1705 { 1706 struct raid_map_data *map = &logical_drive->raid_map; 1707 struct raid_map_disk_data *dd = &map->data[0]; 1708 int i, j; 1709 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 1710 le16_to_cpu(map->metadata_disks_per_row); 1711 int nraid_map_entries = le16_to_cpu(map->row_cnt) * 1712 le16_to_cpu(map->layout_map_count) * 1713 total_disks_per_row; 1714 int nphys_disk = le16_to_cpu(map->layout_map_count) * 1715 total_disks_per_row; 1716 int qdepth; 1717 1718 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) 1719 nraid_map_entries = RAID_MAP_MAX_ENTRIES; 1720 1721 logical_drive->nphysical_disks = nraid_map_entries; 1722 1723 qdepth = 0; 1724 for (i = 0; i < nraid_map_entries; i++) { 1725 logical_drive->phys_disk[i] = NULL; 1726 if (!logical_drive->offload_config) 1727 continue; 1728 for (j = 0; j < ndevices; j++) { 1729 if (dev[j] == NULL) 1730 continue; 1731 if (dev[j]->devtype != TYPE_DISK && 1732 dev[j]->devtype != TYPE_ZBC) 1733 continue; 1734 if (is_logical_device(dev[j])) 1735 continue; 1736 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) 1737 continue; 1738 1739 logical_drive->phys_disk[i] = dev[j]; 1740 if (i < nphys_disk) 1741 qdepth = min(h->nr_cmds, qdepth + 1742 logical_drive->phys_disk[i]->queue_depth); 1743 break; 1744 } 1745 1746 /* 1747 * This can happen if a physical drive is removed and 1748 * the logical drive is degraded. In that case, the RAID 1749 * map data will refer to a physical disk which isn't actually 1750 * present. And in that case offload_enabled should already 1751 * be 0, but we'll turn it off here just in case 1752 */ 1753 if (!logical_drive->phys_disk[i]) { 1754 dev_warn(&h->pdev->dev, 1755 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n", 1756 __func__, 1757 h->scsi_host->host_no, logical_drive->bus, 1758 logical_drive->target, logical_drive->lun); 1759 hpsa_turn_off_ioaccel_for_device(logical_drive); 1760 logical_drive->queue_depth = 8; 1761 } 1762 } 1763 if (nraid_map_entries) 1764 /* 1765 * This is correct for reads, too high for full stripe writes, 1766 * way too high for partial stripe writes 1767 */ 1768 logical_drive->queue_depth = qdepth; 1769 else { 1770 if (logical_drive->external) 1771 logical_drive->queue_depth = EXTERNAL_QD; 1772 else 1773 logical_drive->queue_depth = h->nr_cmds; 1774 } 1775 } 1776 1777 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, 1778 struct hpsa_scsi_dev_t *dev[], int ndevices) 1779 { 1780 int i; 1781 1782 for (i = 0; i < ndevices; i++) { 1783 if (dev[i] == NULL) 1784 continue; 1785 if (dev[i]->devtype != TYPE_DISK && 1786 dev[i]->devtype != TYPE_ZBC) 1787 continue; 1788 if (!is_logical_device(dev[i])) 1789 continue; 1790 1791 /* 1792 * If offload is currently enabled, the RAID map and 1793 * phys_disk[] assignment *better* not be changing 1794 * because we would be changing ioaccel phsy_disk[] pointers 1795 * on a ioaccel volume processing I/O requests. 1796 * 1797 * If an ioaccel volume status changed, initially because it was 1798 * re-configured and thus underwent a transformation, or 1799 * a drive failed, we would have received a state change 1800 * request and ioaccel should have been turned off. When the 1801 * transformation completes, we get another state change 1802 * request to turn ioaccel back on. In this case, we need 1803 * to update the ioaccel information. 1804 * 1805 * Thus: If it is not currently enabled, but will be after 1806 * the scan completes, make sure the ioaccel pointers 1807 * are up to date. 1808 */ 1809 1810 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled) 1811 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); 1812 } 1813 } 1814 1815 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1816 { 1817 int rc = 0; 1818 1819 if (!h->scsi_host) 1820 return 1; 1821 1822 if (is_logical_device(device)) /* RAID */ 1823 rc = scsi_add_device(h->scsi_host, device->bus, 1824 device->target, device->lun); 1825 else /* HBA */ 1826 rc = hpsa_add_sas_device(h->sas_host, device); 1827 1828 return rc; 1829 } 1830 1831 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, 1832 struct hpsa_scsi_dev_t *dev) 1833 { 1834 int i; 1835 int count = 0; 1836 1837 for (i = 0; i < h->nr_cmds; i++) { 1838 struct CommandList *c = h->cmd_pool + i; 1839 int refcount = atomic_inc_return(&c->refcount); 1840 1841 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, 1842 dev->scsi3addr)) { 1843 unsigned long flags; 1844 1845 spin_lock_irqsave(&h->lock, flags); /* Implied MB */ 1846 if (!hpsa_is_cmd_idle(c)) 1847 ++count; 1848 spin_unlock_irqrestore(&h->lock, flags); 1849 } 1850 1851 cmd_free(h, c); 1852 } 1853 1854 return count; 1855 } 1856 1857 #define NUM_WAIT 20 1858 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, 1859 struct hpsa_scsi_dev_t *device) 1860 { 1861 int cmds = 0; 1862 int waits = 0; 1863 int num_wait = NUM_WAIT; 1864 1865 if (device->external) 1866 num_wait = HPSA_EH_PTRAID_TIMEOUT; 1867 1868 while (1) { 1869 cmds = hpsa_find_outstanding_commands_for_dev(h, device); 1870 if (cmds == 0) 1871 break; 1872 if (++waits > num_wait) 1873 break; 1874 msleep(1000); 1875 } 1876 1877 if (waits > num_wait) { 1878 dev_warn(&h->pdev->dev, 1879 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n", 1880 __func__, 1881 h->scsi_host->host_no, 1882 device->bus, device->target, device->lun, cmds); 1883 } 1884 } 1885 1886 static void hpsa_remove_device(struct ctlr_info *h, 1887 struct hpsa_scsi_dev_t *device) 1888 { 1889 struct scsi_device *sdev = NULL; 1890 1891 if (!h->scsi_host) 1892 return; 1893 1894 /* 1895 * Allow for commands to drain 1896 */ 1897 device->removed = 1; 1898 hpsa_wait_for_outstanding_commands_for_dev(h, device); 1899 1900 if (is_logical_device(device)) { /* RAID */ 1901 sdev = scsi_device_lookup(h->scsi_host, device->bus, 1902 device->target, device->lun); 1903 if (sdev) { 1904 scsi_remove_device(sdev); 1905 scsi_device_put(sdev); 1906 } else { 1907 /* 1908 * We don't expect to get here. Future commands 1909 * to this device will get a selection timeout as 1910 * if the device were gone. 1911 */ 1912 hpsa_show_dev_msg(KERN_WARNING, h, device, 1913 "didn't find device for removal."); 1914 } 1915 } else { /* HBA */ 1916 1917 hpsa_remove_sas_device(device); 1918 } 1919 } 1920 1921 static void adjust_hpsa_scsi_table(struct ctlr_info *h, 1922 struct hpsa_scsi_dev_t *sd[], int nsds) 1923 { 1924 /* sd contains scsi3 addresses and devtypes, and inquiry 1925 * data. This function takes what's in sd to be the current 1926 * reality and updates h->dev[] to reflect that reality. 1927 */ 1928 int i, entry, device_change, changes = 0; 1929 struct hpsa_scsi_dev_t *csd; 1930 unsigned long flags; 1931 struct hpsa_scsi_dev_t **added, **removed; 1932 int nadded, nremoved; 1933 1934 /* 1935 * A reset can cause a device status to change 1936 * re-schedule the scan to see what happened. 1937 */ 1938 spin_lock_irqsave(&h->reset_lock, flags); 1939 if (h->reset_in_progress) { 1940 h->drv_req_rescan = 1; 1941 spin_unlock_irqrestore(&h->reset_lock, flags); 1942 return; 1943 } 1944 spin_unlock_irqrestore(&h->reset_lock, flags); 1945 1946 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL); 1947 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL); 1948 1949 if (!added || !removed) { 1950 dev_warn(&h->pdev->dev, "out of memory in " 1951 "adjust_hpsa_scsi_table\n"); 1952 goto free_and_out; 1953 } 1954 1955 spin_lock_irqsave(&h->devlock, flags); 1956 1957 /* find any devices in h->dev[] that are not in 1958 * sd[] and remove them from h->dev[], and for any 1959 * devices which have changed, remove the old device 1960 * info and add the new device info. 1961 * If minor device attributes change, just update 1962 * the existing device structure. 1963 */ 1964 i = 0; 1965 nremoved = 0; 1966 nadded = 0; 1967 while (i < h->ndevices) { 1968 csd = h->dev[i]; 1969 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1970 if (device_change == DEVICE_NOT_FOUND) { 1971 changes++; 1972 hpsa_scsi_remove_entry(h, i, removed, &nremoved); 1973 continue; /* remove ^^^, hence i not incremented */ 1974 } else if (device_change == DEVICE_CHANGED) { 1975 changes++; 1976 hpsa_scsi_replace_entry(h, i, sd[entry], 1977 added, &nadded, removed, &nremoved); 1978 /* Set it to NULL to prevent it from being freed 1979 * at the bottom of hpsa_update_scsi_devices() 1980 */ 1981 sd[entry] = NULL; 1982 } else if (device_change == DEVICE_UPDATED) { 1983 hpsa_scsi_update_entry(h, i, sd[entry]); 1984 } 1985 i++; 1986 } 1987 1988 /* Now, make sure every device listed in sd[] is also 1989 * listed in h->dev[], adding them if they aren't found 1990 */ 1991 1992 for (i = 0; i < nsds; i++) { 1993 if (!sd[i]) /* if already added above. */ 1994 continue; 1995 1996 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1997 * as the SCSI mid-layer does not handle such devices well. 1998 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1999 * at 160Hz, and prevents the system from coming up. 2000 */ 2001 if (sd[i]->volume_offline) { 2002 hpsa_show_volume_status(h, sd[i]); 2003 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); 2004 continue; 2005 } 2006 2007 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 2008 h->ndevices, &entry); 2009 if (device_change == DEVICE_NOT_FOUND) { 2010 changes++; 2011 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) 2012 break; 2013 sd[i] = NULL; /* prevent from being freed later. */ 2014 } else if (device_change == DEVICE_CHANGED) { 2015 /* should never happen... */ 2016 changes++; 2017 dev_warn(&h->pdev->dev, 2018 "device unexpectedly changed.\n"); 2019 /* but if it does happen, we just ignore that device */ 2020 } 2021 } 2022 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); 2023 2024 /* 2025 * Now that h->dev[]->phys_disk[] is coherent, we can enable 2026 * any logical drives that need it enabled. 2027 * 2028 * The raid map should be current by now. 2029 * 2030 * We are updating the device list used for I/O requests. 2031 */ 2032 for (i = 0; i < h->ndevices; i++) { 2033 if (h->dev[i] == NULL) 2034 continue; 2035 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; 2036 } 2037 2038 spin_unlock_irqrestore(&h->devlock, flags); 2039 2040 /* Monitor devices which are in one of several NOT READY states to be 2041 * brought online later. This must be done without holding h->devlock, 2042 * so don't touch h->dev[] 2043 */ 2044 for (i = 0; i < nsds; i++) { 2045 if (!sd[i]) /* if already added above. */ 2046 continue; 2047 if (sd[i]->volume_offline) 2048 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 2049 } 2050 2051 /* Don't notify scsi mid layer of any changes the first time through 2052 * (or if there are no changes) scsi_scan_host will do it later the 2053 * first time through. 2054 */ 2055 if (!changes) 2056 goto free_and_out; 2057 2058 /* Notify scsi mid layer of any removed devices */ 2059 for (i = 0; i < nremoved; i++) { 2060 if (removed[i] == NULL) 2061 continue; 2062 if (removed[i]->expose_device) 2063 hpsa_remove_device(h, removed[i]); 2064 kfree(removed[i]); 2065 removed[i] = NULL; 2066 } 2067 2068 /* Notify scsi mid layer of any added devices */ 2069 for (i = 0; i < nadded; i++) { 2070 int rc = 0; 2071 2072 if (added[i] == NULL) 2073 continue; 2074 if (!(added[i]->expose_device)) 2075 continue; 2076 rc = hpsa_add_device(h, added[i]); 2077 if (!rc) 2078 continue; 2079 dev_warn(&h->pdev->dev, 2080 "addition failed %d, device not added.", rc); 2081 /* now we have to remove it from h->dev, 2082 * since it didn't get added to scsi mid layer 2083 */ 2084 fixup_botched_add(h, added[i]); 2085 h->drv_req_rescan = 1; 2086 } 2087 2088 free_and_out: 2089 kfree(added); 2090 kfree(removed); 2091 } 2092 2093 /* 2094 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 2095 * Assume's h->devlock is held. 2096 */ 2097 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 2098 int bus, int target, int lun) 2099 { 2100 int i; 2101 struct hpsa_scsi_dev_t *sd; 2102 2103 for (i = 0; i < h->ndevices; i++) { 2104 sd = h->dev[i]; 2105 if (sd->bus == bus && sd->target == target && sd->lun == lun) 2106 return sd; 2107 } 2108 return NULL; 2109 } 2110 2111 static int hpsa_sdev_init(struct scsi_device *sdev) 2112 { 2113 struct hpsa_scsi_dev_t *sd = NULL; 2114 unsigned long flags; 2115 struct ctlr_info *h; 2116 2117 h = sdev_to_hba(sdev); 2118 spin_lock_irqsave(&h->devlock, flags); 2119 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { 2120 struct scsi_target *starget; 2121 struct sas_rphy *rphy; 2122 2123 starget = scsi_target(sdev); 2124 rphy = target_to_rphy(starget); 2125 sd = hpsa_find_device_by_sas_rphy(h, rphy); 2126 if (sd) { 2127 sd->target = sdev_id(sdev); 2128 sd->lun = sdev->lun; 2129 } 2130 } 2131 if (!sd) 2132 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 2133 sdev_id(sdev), sdev->lun); 2134 2135 if (sd && sd->expose_device) { 2136 atomic_set(&sd->ioaccel_cmds_out, 0); 2137 sdev->hostdata = sd; 2138 } else 2139 sdev->hostdata = NULL; 2140 spin_unlock_irqrestore(&h->devlock, flags); 2141 return 0; 2142 } 2143 2144 /* configure scsi device based on internal per-device structure */ 2145 #define CTLR_TIMEOUT (120 * HZ) 2146 static int hpsa_sdev_configure(struct scsi_device *sdev, 2147 struct queue_limits *lim) 2148 { 2149 struct hpsa_scsi_dev_t *sd; 2150 int queue_depth; 2151 2152 sd = sdev->hostdata; 2153 sdev->no_uld_attach = !sd || !sd->expose_device; 2154 2155 if (sd) { 2156 sd->was_removed = 0; 2157 queue_depth = sd->queue_depth != 0 ? 2158 sd->queue_depth : sdev->host->can_queue; 2159 if (sd->external) { 2160 queue_depth = EXTERNAL_QD; 2161 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; 2162 blk_queue_rq_timeout(sdev->request_queue, 2163 HPSA_EH_PTRAID_TIMEOUT); 2164 } 2165 if (is_hba_lunid(sd->scsi3addr)) { 2166 sdev->eh_timeout = CTLR_TIMEOUT; 2167 blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); 2168 } 2169 } else { 2170 queue_depth = sdev->host->can_queue; 2171 } 2172 2173 scsi_change_queue_depth(sdev, queue_depth); 2174 2175 return 0; 2176 } 2177 2178 static void hpsa_sdev_destroy(struct scsi_device *sdev) 2179 { 2180 struct hpsa_scsi_dev_t *hdev = NULL; 2181 2182 hdev = sdev->hostdata; 2183 2184 if (hdev) 2185 hdev->was_removed = 1; 2186 } 2187 2188 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) 2189 { 2190 int i; 2191 2192 if (!h->ioaccel2_cmd_sg_list) 2193 return; 2194 for (i = 0; i < h->nr_cmds; i++) { 2195 kfree(h->ioaccel2_cmd_sg_list[i]); 2196 h->ioaccel2_cmd_sg_list[i] = NULL; 2197 } 2198 kfree(h->ioaccel2_cmd_sg_list); 2199 h->ioaccel2_cmd_sg_list = NULL; 2200 } 2201 2202 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) 2203 { 2204 int i; 2205 2206 if (h->chainsize <= 0) 2207 return 0; 2208 2209 h->ioaccel2_cmd_sg_list = 2210 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list), 2211 GFP_KERNEL); 2212 if (!h->ioaccel2_cmd_sg_list) 2213 return -ENOMEM; 2214 for (i = 0; i < h->nr_cmds; i++) { 2215 h->ioaccel2_cmd_sg_list[i] = 2216 kmalloc_array(h->maxsgentries, 2217 sizeof(*h->ioaccel2_cmd_sg_list[i]), 2218 GFP_KERNEL); 2219 if (!h->ioaccel2_cmd_sg_list[i]) 2220 goto clean; 2221 } 2222 return 0; 2223 2224 clean: 2225 hpsa_free_ioaccel2_sg_chain_blocks(h); 2226 return -ENOMEM; 2227 } 2228 2229 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 2230 { 2231 int i; 2232 2233 if (!h->cmd_sg_list) 2234 return; 2235 for (i = 0; i < h->nr_cmds; i++) { 2236 kfree(h->cmd_sg_list[i]); 2237 h->cmd_sg_list[i] = NULL; 2238 } 2239 kfree(h->cmd_sg_list); 2240 h->cmd_sg_list = NULL; 2241 } 2242 2243 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) 2244 { 2245 int i; 2246 2247 if (h->chainsize <= 0) 2248 return 0; 2249 2250 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list), 2251 GFP_KERNEL); 2252 if (!h->cmd_sg_list) 2253 return -ENOMEM; 2254 2255 for (i = 0; i < h->nr_cmds; i++) { 2256 h->cmd_sg_list[i] = kmalloc_array(h->chainsize, 2257 sizeof(*h->cmd_sg_list[i]), 2258 GFP_KERNEL); 2259 if (!h->cmd_sg_list[i]) 2260 goto clean; 2261 2262 } 2263 return 0; 2264 2265 clean: 2266 hpsa_free_sg_chain_blocks(h); 2267 return -ENOMEM; 2268 } 2269 2270 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, 2271 struct io_accel2_cmd *cp, struct CommandList *c) 2272 { 2273 struct ioaccel2_sg_element *chain_block; 2274 u64 temp64; 2275 u32 chain_size; 2276 2277 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; 2278 chain_size = le32_to_cpu(cp->sg[0].length); 2279 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, 2280 DMA_TO_DEVICE); 2281 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2282 /* prevent subsequent unmapping */ 2283 cp->sg->address = 0; 2284 return -1; 2285 } 2286 cp->sg->address = cpu_to_le64(temp64); 2287 return 0; 2288 } 2289 2290 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, 2291 struct io_accel2_cmd *cp) 2292 { 2293 struct ioaccel2_sg_element *chain_sg; 2294 u64 temp64; 2295 u32 chain_size; 2296 2297 chain_sg = cp->sg; 2298 temp64 = le64_to_cpu(chain_sg->address); 2299 chain_size = le32_to_cpu(cp->sg[0].length); 2300 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); 2301 } 2302 2303 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 2304 struct CommandList *c) 2305 { 2306 struct SGDescriptor *chain_sg, *chain_block; 2307 u64 temp64; 2308 u32 chain_len; 2309 2310 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 2311 chain_block = h->cmd_sg_list[c->cmdindex]; 2312 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 2313 chain_len = sizeof(*chain_sg) * 2314 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 2315 chain_sg->Len = cpu_to_le32(chain_len); 2316 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, 2317 DMA_TO_DEVICE); 2318 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2319 /* prevent subsequent unmapping */ 2320 chain_sg->Addr = cpu_to_le64(0); 2321 return -1; 2322 } 2323 chain_sg->Addr = cpu_to_le64(temp64); 2324 return 0; 2325 } 2326 2327 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 2328 struct CommandList *c) 2329 { 2330 struct SGDescriptor *chain_sg; 2331 2332 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) 2333 return; 2334 2335 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 2336 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), 2337 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); 2338 } 2339 2340 2341 /* Decode the various types of errors on ioaccel2 path. 2342 * Return 1 for any error that should generate a RAID path retry. 2343 * Return 0 for errors that don't require a RAID path retry. 2344 */ 2345 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 2346 struct CommandList *c, 2347 struct scsi_cmnd *cmd, 2348 struct io_accel2_cmd *c2, 2349 struct hpsa_scsi_dev_t *dev) 2350 { 2351 int data_len; 2352 int retry = 0; 2353 u32 ioaccel2_resid = 0; 2354 2355 switch (c2->error_data.serv_response) { 2356 case IOACCEL2_SERV_RESPONSE_COMPLETE: 2357 switch (c2->error_data.status) { 2358 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 2359 if (cmd) 2360 cmd->result = 0; 2361 break; 2362 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 2363 cmd->result |= SAM_STAT_CHECK_CONDITION; 2364 if (c2->error_data.data_present != 2365 IOACCEL2_SENSE_DATA_PRESENT) { 2366 memset(cmd->sense_buffer, 0, 2367 SCSI_SENSE_BUFFERSIZE); 2368 break; 2369 } 2370 /* copy the sense data */ 2371 data_len = c2->error_data.sense_data_len; 2372 if (data_len > SCSI_SENSE_BUFFERSIZE) 2373 data_len = SCSI_SENSE_BUFFERSIZE; 2374 if (data_len > sizeof(c2->error_data.sense_data_buff)) 2375 data_len = 2376 sizeof(c2->error_data.sense_data_buff); 2377 memcpy(cmd->sense_buffer, 2378 c2->error_data.sense_data_buff, data_len); 2379 retry = 1; 2380 break; 2381 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 2382 retry = 1; 2383 break; 2384 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 2385 retry = 1; 2386 break; 2387 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 2388 retry = 1; 2389 break; 2390 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 2391 retry = 1; 2392 break; 2393 default: 2394 retry = 1; 2395 break; 2396 } 2397 break; 2398 case IOACCEL2_SERV_RESPONSE_FAILURE: 2399 switch (c2->error_data.status) { 2400 case IOACCEL2_STATUS_SR_IO_ERROR: 2401 case IOACCEL2_STATUS_SR_IO_ABORTED: 2402 case IOACCEL2_STATUS_SR_OVERRUN: 2403 retry = 1; 2404 break; 2405 case IOACCEL2_STATUS_SR_UNDERRUN: 2406 cmd->result = (DID_OK << 16); /* host byte */ 2407 ioaccel2_resid = get_unaligned_le32( 2408 &c2->error_data.resid_cnt[0]); 2409 scsi_set_resid(cmd, ioaccel2_resid); 2410 break; 2411 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: 2412 case IOACCEL2_STATUS_SR_INVALID_DEVICE: 2413 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: 2414 /* 2415 * Did an HBA disk disappear? We will eventually 2416 * get a state change event from the controller but 2417 * in the meantime, we need to tell the OS that the 2418 * HBA disk is no longer there and stop I/O 2419 * from going down. This allows the potential re-insert 2420 * of the disk to get the same device node. 2421 */ 2422 if (dev->physical_device && dev->expose_device) { 2423 cmd->result = DID_NO_CONNECT << 16; 2424 dev->removed = 1; 2425 h->drv_req_rescan = 1; 2426 dev_warn(&h->pdev->dev, 2427 "%s: device is gone!\n", __func__); 2428 } else 2429 /* 2430 * Retry by sending down the RAID path. 2431 * We will get an event from ctlr to 2432 * trigger rescan regardless. 2433 */ 2434 retry = 1; 2435 break; 2436 default: 2437 retry = 1; 2438 } 2439 break; 2440 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 2441 break; 2442 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 2443 break; 2444 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 2445 retry = 1; 2446 break; 2447 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 2448 break; 2449 default: 2450 retry = 1; 2451 break; 2452 } 2453 2454 if (dev->in_reset) 2455 retry = 0; 2456 2457 return retry; /* retry on raid path? */ 2458 } 2459 2460 static void hpsa_cmd_resolve_events(struct ctlr_info *h, 2461 struct CommandList *c) 2462 { 2463 struct hpsa_scsi_dev_t *dev = c->device; 2464 2465 /* 2466 * Reset c->scsi_cmd here so that the reset handler will know 2467 * this command has completed. Then, check to see if the handler is 2468 * waiting for this command, and, if so, wake it. 2469 */ 2470 c->scsi_cmd = SCSI_CMD_IDLE; 2471 mb(); /* Declare command idle before checking for pending events. */ 2472 if (dev) { 2473 atomic_dec(&dev->commands_outstanding); 2474 if (dev->in_reset && 2475 atomic_read(&dev->commands_outstanding) <= 0) 2476 wake_up_all(&h->event_sync_wait_queue); 2477 } 2478 } 2479 2480 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, 2481 struct CommandList *c) 2482 { 2483 hpsa_cmd_resolve_events(h, c); 2484 cmd_tagged_free(h, c); 2485 } 2486 2487 static void hpsa_cmd_free_and_done(struct ctlr_info *h, 2488 struct CommandList *c, struct scsi_cmnd *cmd) 2489 { 2490 hpsa_cmd_resolve_and_free(h, c); 2491 if (cmd) 2492 scsi_done(cmd); 2493 } 2494 2495 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) 2496 { 2497 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 2498 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 2499 } 2500 2501 static void process_ioaccel2_completion(struct ctlr_info *h, 2502 struct CommandList *c, struct scsi_cmnd *cmd, 2503 struct hpsa_scsi_dev_t *dev) 2504 { 2505 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 2506 2507 /* check for good status */ 2508 if (likely(c2->error_data.serv_response == 0 && 2509 c2->error_data.status == 0)) { 2510 cmd->result = 0; 2511 return hpsa_cmd_free_and_done(h, c, cmd); 2512 } 2513 2514 /* 2515 * Any RAID offload error results in retry which will use 2516 * the normal I/O path so the controller can handle whatever is 2517 * wrong. 2518 */ 2519 if (is_logical_device(dev) && 2520 c2->error_data.serv_response == 2521 IOACCEL2_SERV_RESPONSE_FAILURE) { 2522 if (c2->error_data.status == 2523 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { 2524 hpsa_turn_off_ioaccel_for_device(dev); 2525 } 2526 2527 if (dev->in_reset) { 2528 cmd->result = DID_RESET << 16; 2529 return hpsa_cmd_free_and_done(h, c, cmd); 2530 } 2531 2532 return hpsa_retry_cmd(h, c); 2533 } 2534 2535 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) 2536 return hpsa_retry_cmd(h, c); 2537 2538 return hpsa_cmd_free_and_done(h, c, cmd); 2539 } 2540 2541 /* Returns 0 on success, < 0 otherwise. */ 2542 static int hpsa_evaluate_tmf_status(struct ctlr_info *h, 2543 struct CommandList *cp) 2544 { 2545 u8 tmf_status = cp->err_info->ScsiStatus; 2546 2547 switch (tmf_status) { 2548 case CISS_TMF_COMPLETE: 2549 /* 2550 * CISS_TMF_COMPLETE never happens, instead, 2551 * ei->CommandStatus == 0 for this case. 2552 */ 2553 case CISS_TMF_SUCCESS: 2554 return 0; 2555 case CISS_TMF_INVALID_FRAME: 2556 case CISS_TMF_NOT_SUPPORTED: 2557 case CISS_TMF_FAILED: 2558 case CISS_TMF_WRONG_LUN: 2559 case CISS_TMF_OVERLAPPED_TAG: 2560 break; 2561 default: 2562 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", 2563 tmf_status); 2564 break; 2565 } 2566 return -tmf_status; 2567 } 2568 2569 static void complete_scsi_command(struct CommandList *cp) 2570 { 2571 struct scsi_cmnd *cmd; 2572 struct ctlr_info *h; 2573 struct ErrorInfo *ei; 2574 struct hpsa_scsi_dev_t *dev; 2575 struct io_accel2_cmd *c2; 2576 2577 u8 sense_key; 2578 u8 asc; /* additional sense code */ 2579 u8 ascq; /* additional sense code qualifier */ 2580 unsigned long sense_data_size; 2581 2582 ei = cp->err_info; 2583 cmd = cp->scsi_cmd; 2584 h = cp->h; 2585 2586 if (!cmd->device) { 2587 cmd->result = DID_NO_CONNECT << 16; 2588 return hpsa_cmd_free_and_done(h, cp, cmd); 2589 } 2590 2591 dev = cmd->device->hostdata; 2592 if (!dev) { 2593 cmd->result = DID_NO_CONNECT << 16; 2594 return hpsa_cmd_free_and_done(h, cp, cmd); 2595 } 2596 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; 2597 2598 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 2599 if ((cp->cmd_type == CMD_SCSI) && 2600 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 2601 hpsa_unmap_sg_chain_block(h, cp); 2602 2603 if ((cp->cmd_type == CMD_IOACCEL2) && 2604 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) 2605 hpsa_unmap_ioaccel2_sg_chain_block(h, c2); 2606 2607 cmd->result = (DID_OK << 16); /* host byte */ 2608 2609 /* SCSI command has already been cleaned up in SML */ 2610 if (dev->was_removed) { 2611 hpsa_cmd_resolve_and_free(h, cp); 2612 return; 2613 } 2614 2615 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { 2616 if (dev->physical_device && dev->expose_device && 2617 dev->removed) { 2618 cmd->result = DID_NO_CONNECT << 16; 2619 return hpsa_cmd_free_and_done(h, cp, cmd); 2620 } 2621 if (likely(cp->phys_disk != NULL)) 2622 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 2623 } 2624 2625 /* 2626 * We check for lockup status here as it may be set for 2627 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by 2628 * fail_all_oustanding_cmds() 2629 */ 2630 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { 2631 /* DID_NO_CONNECT will prevent a retry */ 2632 cmd->result = DID_NO_CONNECT << 16; 2633 return hpsa_cmd_free_and_done(h, cp, cmd); 2634 } 2635 2636 if (cp->cmd_type == CMD_IOACCEL2) 2637 return process_ioaccel2_completion(h, cp, cmd, dev); 2638 2639 scsi_set_resid(cmd, ei->ResidualCnt); 2640 if (ei->CommandStatus == 0) 2641 return hpsa_cmd_free_and_done(h, cp, cmd); 2642 2643 /* For I/O accelerator commands, copy over some fields to the normal 2644 * CISS header used below for error handling. 2645 */ 2646 if (cp->cmd_type == CMD_IOACCEL1) { 2647 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 2648 cp->Header.SGList = scsi_sg_count(cmd); 2649 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); 2650 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & 2651 IOACCEL1_IOFLAGS_CDBLEN_MASK; 2652 cp->Header.tag = c->tag; 2653 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 2654 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 2655 2656 /* Any RAID offload error results in retry which will use 2657 * the normal I/O path so the controller can handle whatever's 2658 * wrong. 2659 */ 2660 if (is_logical_device(dev)) { 2661 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 2662 dev->offload_enabled = 0; 2663 return hpsa_retry_cmd(h, cp); 2664 } 2665 } 2666 2667 /* an error has occurred */ 2668 switch (ei->CommandStatus) { 2669 2670 case CMD_TARGET_STATUS: 2671 cmd->result |= ei->ScsiStatus; 2672 /* copy the sense data */ 2673 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 2674 sense_data_size = SCSI_SENSE_BUFFERSIZE; 2675 else 2676 sense_data_size = sizeof(ei->SenseInfo); 2677 if (ei->SenseLen < sense_data_size) 2678 sense_data_size = ei->SenseLen; 2679 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 2680 if (ei->ScsiStatus) 2681 decode_sense_data(ei->SenseInfo, sense_data_size, 2682 &sense_key, &asc, &ascq); 2683 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 2684 switch (sense_key) { 2685 case ABORTED_COMMAND: 2686 cmd->result |= DID_SOFT_ERROR << 16; 2687 break; 2688 case UNIT_ATTENTION: 2689 if (asc == 0x3F && ascq == 0x0E) 2690 h->drv_req_rescan = 1; 2691 break; 2692 case ILLEGAL_REQUEST: 2693 if (asc == 0x25 && ascq == 0x00) { 2694 dev->removed = 1; 2695 cmd->result = DID_NO_CONNECT << 16; 2696 } 2697 break; 2698 } 2699 break; 2700 } 2701 /* Problem was not a check condition 2702 * Pass it up to the upper layers... 2703 */ 2704 if (ei->ScsiStatus) { 2705 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 2706 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 2707 "Returning result: 0x%x\n", 2708 cp, ei->ScsiStatus, 2709 sense_key, asc, ascq, 2710 cmd->result); 2711 } else { /* scsi status is zero??? How??? */ 2712 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 2713 "Returning no connection.\n", cp), 2714 2715 /* Ordinarily, this case should never happen, 2716 * but there is a bug in some released firmware 2717 * revisions that allows it to happen if, for 2718 * example, a 4100 backplane loses power and 2719 * the tape drive is in it. We assume that 2720 * it's a fatal error of some kind because we 2721 * can't show that it wasn't. We will make it 2722 * look like selection timeout since that is 2723 * the most common reason for this to occur, 2724 * and it's severe enough. 2725 */ 2726 2727 cmd->result = DID_NO_CONNECT << 16; 2728 } 2729 break; 2730 2731 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2732 break; 2733 case CMD_DATA_OVERRUN: 2734 dev_warn(&h->pdev->dev, 2735 "CDB %16phN data overrun\n", cp->Request.CDB); 2736 break; 2737 case CMD_INVALID: { 2738 /* print_bytes(cp, sizeof(*cp), 1, 0); 2739 print_cmd(cp); */ 2740 /* We get CMD_INVALID if you address a non-existent device 2741 * instead of a selection timeout (no response). You will 2742 * see this if you yank out a drive, then try to access it. 2743 * This is kind of a shame because it means that any other 2744 * CMD_INVALID (e.g. driver bug) will get interpreted as a 2745 * missing target. */ 2746 cmd->result = DID_NO_CONNECT << 16; 2747 } 2748 break; 2749 case CMD_PROTOCOL_ERR: 2750 cmd->result = DID_ERROR << 16; 2751 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", 2752 cp->Request.CDB); 2753 break; 2754 case CMD_HARDWARE_ERR: 2755 cmd->result = DID_ERROR << 16; 2756 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", 2757 cp->Request.CDB); 2758 break; 2759 case CMD_CONNECTION_LOST: 2760 cmd->result = DID_ERROR << 16; 2761 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", 2762 cp->Request.CDB); 2763 break; 2764 case CMD_ABORTED: 2765 cmd->result = DID_ABORT << 16; 2766 break; 2767 case CMD_ABORT_FAILED: 2768 cmd->result = DID_ERROR << 16; 2769 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", 2770 cp->Request.CDB); 2771 break; 2772 case CMD_UNSOLICITED_ABORT: 2773 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 2774 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", 2775 cp->Request.CDB); 2776 break; 2777 case CMD_TIMEOUT: 2778 cmd->result = DID_TIME_OUT << 16; 2779 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", 2780 cp->Request.CDB); 2781 break; 2782 case CMD_UNABORTABLE: 2783 cmd->result = DID_ERROR << 16; 2784 dev_warn(&h->pdev->dev, "Command unabortable\n"); 2785 break; 2786 case CMD_TMF_STATUS: 2787 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ 2788 cmd->result = DID_ERROR << 16; 2789 break; 2790 case CMD_IOACCEL_DISABLED: 2791 /* This only handles the direct pass-through case since RAID 2792 * offload is handled above. Just attempt a retry. 2793 */ 2794 cmd->result = DID_SOFT_ERROR << 16; 2795 dev_warn(&h->pdev->dev, 2796 "cp %p had HP SSD Smart Path error\n", cp); 2797 break; 2798 default: 2799 cmd->result = DID_ERROR << 16; 2800 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 2801 cp, ei->CommandStatus); 2802 } 2803 2804 return hpsa_cmd_free_and_done(h, cp, cmd); 2805 } 2806 2807 static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, 2808 int sg_used, enum dma_data_direction data_direction) 2809 { 2810 int i; 2811 2812 for (i = 0; i < sg_used; i++) 2813 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), 2814 le32_to_cpu(c->SG[i].Len), 2815 data_direction); 2816 } 2817 2818 static int hpsa_map_one(struct pci_dev *pdev, 2819 struct CommandList *cp, 2820 unsigned char *buf, 2821 size_t buflen, 2822 enum dma_data_direction data_direction) 2823 { 2824 u64 addr64; 2825 2826 if (buflen == 0 || data_direction == DMA_NONE) { 2827 cp->Header.SGList = 0; 2828 cp->Header.SGTotal = cpu_to_le16(0); 2829 return 0; 2830 } 2831 2832 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); 2833 if (dma_mapping_error(&pdev->dev, addr64)) { 2834 /* Prevent subsequent unmap of something never mapped */ 2835 cp->Header.SGList = 0; 2836 cp->Header.SGTotal = cpu_to_le16(0); 2837 return -1; 2838 } 2839 cp->SG[0].Addr = cpu_to_le64(addr64); 2840 cp->SG[0].Len = cpu_to_le32(buflen); 2841 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ 2842 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ 2843 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ 2844 return 0; 2845 } 2846 2847 #define NO_TIMEOUT ((unsigned long) -1) 2848 #define DEFAULT_TIMEOUT 30000 /* milliseconds */ 2849 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 2850 struct CommandList *c, int reply_queue, unsigned long timeout_msecs) 2851 { 2852 DECLARE_COMPLETION_ONSTACK(wait); 2853 2854 c->waiting = &wait; 2855 __enqueue_cmd_and_start_io(h, c, reply_queue); 2856 if (timeout_msecs == NO_TIMEOUT) { 2857 /* TODO: get rid of this no-timeout thing */ 2858 wait_for_completion_io(&wait); 2859 return IO_OK; 2860 } 2861 if (!wait_for_completion_io_timeout(&wait, 2862 msecs_to_jiffies(timeout_msecs))) { 2863 dev_warn(&h->pdev->dev, "Command timed out.\n"); 2864 return -ETIMEDOUT; 2865 } 2866 return IO_OK; 2867 } 2868 2869 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, 2870 int reply_queue, unsigned long timeout_msecs) 2871 { 2872 if (unlikely(lockup_detected(h))) { 2873 c->err_info->CommandStatus = CMD_CTLR_LOCKUP; 2874 return IO_OK; 2875 } 2876 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); 2877 } 2878 2879 static u32 lockup_detected(struct ctlr_info *h) 2880 { 2881 int cpu; 2882 u32 rc, *lockup_detected; 2883 2884 cpu = get_cpu(); 2885 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 2886 rc = *lockup_detected; 2887 put_cpu(); 2888 return rc; 2889 } 2890 2891 #define MAX_DRIVER_CMD_RETRIES 25 2892 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2893 struct CommandList *c, enum dma_data_direction data_direction, 2894 unsigned long timeout_msecs) 2895 { 2896 int backoff_time = 10, retry_count = 0; 2897 int rc; 2898 2899 do { 2900 memset(c->err_info, 0, sizeof(*c->err_info)); 2901 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 2902 timeout_msecs); 2903 if (rc) 2904 break; 2905 retry_count++; 2906 if (retry_count > 3) { 2907 msleep(backoff_time); 2908 if (backoff_time < 1000) 2909 backoff_time *= 2; 2910 } 2911 } while ((check_for_unit_attention(h, c) || 2912 check_for_busy(h, c)) && 2913 retry_count <= MAX_DRIVER_CMD_RETRIES); 2914 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2915 if (retry_count > MAX_DRIVER_CMD_RETRIES) 2916 rc = -EIO; 2917 return rc; 2918 } 2919 2920 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2921 struct CommandList *c) 2922 { 2923 const u8 *cdb = c->Request.CDB; 2924 const u8 *lun = c->Header.LUN.LunAddrBytes; 2925 2926 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n", 2927 txt, lun, cdb); 2928 } 2929 2930 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2931 struct CommandList *cp) 2932 { 2933 const struct ErrorInfo *ei = cp->err_info; 2934 struct device *d = &cp->h->pdev->dev; 2935 u8 sense_key, asc, ascq; 2936 int sense_len; 2937 2938 switch (ei->CommandStatus) { 2939 case CMD_TARGET_STATUS: 2940 if (ei->SenseLen > sizeof(ei->SenseInfo)) 2941 sense_len = sizeof(ei->SenseInfo); 2942 else 2943 sense_len = ei->SenseLen; 2944 decode_sense_data(ei->SenseInfo, sense_len, 2945 &sense_key, &asc, &ascq); 2946 hpsa_print_cmd(h, "SCSI status", cp); 2947 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2948 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", 2949 sense_key, asc, ascq); 2950 else 2951 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); 2952 if (ei->ScsiStatus == 0) 2953 dev_warn(d, "SCSI status is abnormally zero. " 2954 "(probably indicates selection timeout " 2955 "reported incorrectly due to a known " 2956 "firmware bug, circa July, 2001.)\n"); 2957 break; 2958 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2959 break; 2960 case CMD_DATA_OVERRUN: 2961 hpsa_print_cmd(h, "overrun condition", cp); 2962 break; 2963 case CMD_INVALID: { 2964 /* controller unfortunately reports SCSI passthru's 2965 * to non-existent targets as invalid commands. 2966 */ 2967 hpsa_print_cmd(h, "invalid command", cp); 2968 dev_warn(d, "probably means device no longer present\n"); 2969 } 2970 break; 2971 case CMD_PROTOCOL_ERR: 2972 hpsa_print_cmd(h, "protocol error", cp); 2973 break; 2974 case CMD_HARDWARE_ERR: 2975 hpsa_print_cmd(h, "hardware error", cp); 2976 break; 2977 case CMD_CONNECTION_LOST: 2978 hpsa_print_cmd(h, "connection lost", cp); 2979 break; 2980 case CMD_ABORTED: 2981 hpsa_print_cmd(h, "aborted", cp); 2982 break; 2983 case CMD_ABORT_FAILED: 2984 hpsa_print_cmd(h, "abort failed", cp); 2985 break; 2986 case CMD_UNSOLICITED_ABORT: 2987 hpsa_print_cmd(h, "unsolicited abort", cp); 2988 break; 2989 case CMD_TIMEOUT: 2990 hpsa_print_cmd(h, "timed out", cp); 2991 break; 2992 case CMD_UNABORTABLE: 2993 hpsa_print_cmd(h, "unabortable", cp); 2994 break; 2995 case CMD_CTLR_LOCKUP: 2996 hpsa_print_cmd(h, "controller lockup detected", cp); 2997 break; 2998 default: 2999 hpsa_print_cmd(h, "unknown status", cp); 3000 dev_warn(d, "Unknown command status %x\n", 3001 ei->CommandStatus); 3002 } 3003 } 3004 3005 static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, 3006 u8 page, u8 *buf, size_t bufsize) 3007 { 3008 int rc = IO_OK; 3009 struct CommandList *c; 3010 struct ErrorInfo *ei; 3011 3012 c = cmd_alloc(h); 3013 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, 3014 page, scsi3addr, TYPE_CMD)) { 3015 rc = -1; 3016 goto out; 3017 } 3018 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3019 NO_TIMEOUT); 3020 if (rc) 3021 goto out; 3022 ei = c->err_info; 3023 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3024 hpsa_scsi_interpret_error(h, c); 3025 rc = -1; 3026 } 3027 out: 3028 cmd_free(h, c); 3029 return rc; 3030 } 3031 3032 static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, 3033 u8 *scsi3addr) 3034 { 3035 u8 *buf; 3036 u64 sa = 0; 3037 int rc = 0; 3038 3039 buf = kzalloc(1024, GFP_KERNEL); 3040 if (!buf) 3041 return 0; 3042 3043 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, 3044 buf, 1024); 3045 3046 if (rc) 3047 goto out; 3048 3049 sa = get_unaligned_be64(buf+12); 3050 3051 out: 3052 kfree(buf); 3053 return sa; 3054 } 3055 3056 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 3057 u16 page, unsigned char *buf, 3058 unsigned char bufsize) 3059 { 3060 int rc = IO_OK; 3061 struct CommandList *c; 3062 struct ErrorInfo *ei; 3063 3064 c = cmd_alloc(h); 3065 3066 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 3067 page, scsi3addr, TYPE_CMD)) { 3068 rc = -1; 3069 goto out; 3070 } 3071 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3072 NO_TIMEOUT); 3073 if (rc) 3074 goto out; 3075 ei = c->err_info; 3076 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3077 hpsa_scsi_interpret_error(h, c); 3078 rc = -1; 3079 } 3080 out: 3081 cmd_free(h, c); 3082 return rc; 3083 } 3084 3085 static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, 3086 u8 reset_type, int reply_queue) 3087 { 3088 int rc = IO_OK; 3089 struct CommandList *c; 3090 struct ErrorInfo *ei; 3091 3092 c = cmd_alloc(h); 3093 c->device = dev; 3094 3095 /* fill_cmd can't fail here, no data buffer to map. */ 3096 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); 3097 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 3098 if (rc) { 3099 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 3100 goto out; 3101 } 3102 /* no unmap needed here because no data xfer. */ 3103 3104 ei = c->err_info; 3105 if (ei->CommandStatus != 0) { 3106 hpsa_scsi_interpret_error(h, c); 3107 rc = -1; 3108 } 3109 out: 3110 cmd_free(h, c); 3111 return rc; 3112 } 3113 3114 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, 3115 struct hpsa_scsi_dev_t *dev, 3116 unsigned char *scsi3addr) 3117 { 3118 int i; 3119 bool match = false; 3120 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 3121 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; 3122 3123 if (hpsa_is_cmd_idle(c)) 3124 return false; 3125 3126 switch (c->cmd_type) { 3127 case CMD_SCSI: 3128 case CMD_IOCTL_PEND: 3129 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, 3130 sizeof(c->Header.LUN.LunAddrBytes)); 3131 break; 3132 3133 case CMD_IOACCEL1: 3134 case CMD_IOACCEL2: 3135 if (c->phys_disk == dev) { 3136 /* HBA mode match */ 3137 match = true; 3138 } else { 3139 /* Possible RAID mode -- check each phys dev. */ 3140 /* FIXME: Do we need to take out a lock here? If 3141 * so, we could just call hpsa_get_pdisk_of_ioaccel2() 3142 * instead. */ 3143 for (i = 0; i < dev->nphysical_disks && !match; i++) { 3144 /* FIXME: an alternate test might be 3145 * 3146 * match = dev->phys_disk[i]->ioaccel_handle 3147 * == c2->scsi_nexus; */ 3148 match = dev->phys_disk[i] == c->phys_disk; 3149 } 3150 } 3151 break; 3152 3153 case IOACCEL2_TMF: 3154 for (i = 0; i < dev->nphysical_disks && !match; i++) { 3155 match = dev->phys_disk[i]->ioaccel_handle == 3156 le32_to_cpu(ac->it_nexus); 3157 } 3158 break; 3159 3160 case 0: /* The command is in the middle of being initialized. */ 3161 match = false; 3162 break; 3163 3164 default: 3165 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", 3166 c->cmd_type); 3167 BUG(); 3168 } 3169 3170 return match; 3171 } 3172 3173 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, 3174 u8 reset_type, int reply_queue) 3175 { 3176 int rc = 0; 3177 3178 /* We can really only handle one reset at a time */ 3179 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { 3180 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); 3181 return -EINTR; 3182 } 3183 3184 rc = hpsa_send_reset(h, dev, reset_type, reply_queue); 3185 if (!rc) { 3186 /* incremented by sending the reset request */ 3187 atomic_dec(&dev->commands_outstanding); 3188 wait_event(h->event_sync_wait_queue, 3189 atomic_read(&dev->commands_outstanding) <= 0 || 3190 lockup_detected(h)); 3191 } 3192 3193 if (unlikely(lockup_detected(h))) { 3194 dev_warn(&h->pdev->dev, 3195 "Controller lockup detected during reset wait\n"); 3196 rc = -ENODEV; 3197 } 3198 3199 if (!rc) 3200 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); 3201 3202 mutex_unlock(&h->reset_mutex); 3203 return rc; 3204 } 3205 3206 static void hpsa_get_raid_level(struct ctlr_info *h, 3207 unsigned char *scsi3addr, unsigned char *raid_level) 3208 { 3209 int rc; 3210 unsigned char *buf; 3211 3212 *raid_level = RAID_UNKNOWN; 3213 buf = kzalloc(64, GFP_KERNEL); 3214 if (!buf) 3215 return; 3216 3217 if (!hpsa_vpd_page_supported(h, scsi3addr, 3218 HPSA_VPD_LV_DEVICE_GEOMETRY)) 3219 goto exit; 3220 3221 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 3222 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64); 3223 3224 if (rc == 0) 3225 *raid_level = buf[8]; 3226 if (*raid_level > RAID_UNKNOWN) 3227 *raid_level = RAID_UNKNOWN; 3228 exit: 3229 kfree(buf); 3230 return; 3231 } 3232 3233 #define HPSA_MAP_DEBUG 3234 #ifdef HPSA_MAP_DEBUG 3235 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 3236 struct raid_map_data *map_buff) 3237 { 3238 struct raid_map_disk_data *dd = &map_buff->data[0]; 3239 int map, row, col; 3240 u16 map_cnt, row_cnt, disks_per_row; 3241 3242 if (rc != 0) 3243 return; 3244 3245 /* Show details only if debugging has been activated. */ 3246 if (h->raid_offload_debug < 2) 3247 return; 3248 3249 dev_info(&h->pdev->dev, "structure_size = %u\n", 3250 le32_to_cpu(map_buff->structure_size)); 3251 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 3252 le32_to_cpu(map_buff->volume_blk_size)); 3253 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 3254 le64_to_cpu(map_buff->volume_blk_cnt)); 3255 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 3256 map_buff->phys_blk_shift); 3257 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 3258 map_buff->parity_rotation_shift); 3259 dev_info(&h->pdev->dev, "strip_size = %u\n", 3260 le16_to_cpu(map_buff->strip_size)); 3261 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 3262 le64_to_cpu(map_buff->disk_starting_blk)); 3263 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 3264 le64_to_cpu(map_buff->disk_blk_cnt)); 3265 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 3266 le16_to_cpu(map_buff->data_disks_per_row)); 3267 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 3268 le16_to_cpu(map_buff->metadata_disks_per_row)); 3269 dev_info(&h->pdev->dev, "row_cnt = %u\n", 3270 le16_to_cpu(map_buff->row_cnt)); 3271 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 3272 le16_to_cpu(map_buff->layout_map_count)); 3273 dev_info(&h->pdev->dev, "flags = 0x%x\n", 3274 le16_to_cpu(map_buff->flags)); 3275 dev_info(&h->pdev->dev, "encryption = %s\n", 3276 le16_to_cpu(map_buff->flags) & 3277 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 3278 dev_info(&h->pdev->dev, "dekindex = %u\n", 3279 le16_to_cpu(map_buff->dekindex)); 3280 map_cnt = le16_to_cpu(map_buff->layout_map_count); 3281 for (map = 0; map < map_cnt; map++) { 3282 dev_info(&h->pdev->dev, "Map%u:\n", map); 3283 row_cnt = le16_to_cpu(map_buff->row_cnt); 3284 for (row = 0; row < row_cnt; row++) { 3285 dev_info(&h->pdev->dev, " Row%u:\n", row); 3286 disks_per_row = 3287 le16_to_cpu(map_buff->data_disks_per_row); 3288 for (col = 0; col < disks_per_row; col++, dd++) 3289 dev_info(&h->pdev->dev, 3290 " D%02u: h=0x%04x xor=%u,%u\n", 3291 col, dd->ioaccel_handle, 3292 dd->xor_mult[0], dd->xor_mult[1]); 3293 disks_per_row = 3294 le16_to_cpu(map_buff->metadata_disks_per_row); 3295 for (col = 0; col < disks_per_row; col++, dd++) 3296 dev_info(&h->pdev->dev, 3297 " M%02u: h=0x%04x xor=%u,%u\n", 3298 col, dd->ioaccel_handle, 3299 dd->xor_mult[0], dd->xor_mult[1]); 3300 } 3301 } 3302 } 3303 #else 3304 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 3305 __attribute__((unused)) int rc, 3306 __attribute__((unused)) struct raid_map_data *map_buff) 3307 { 3308 } 3309 #endif 3310 3311 static int hpsa_get_raid_map(struct ctlr_info *h, 3312 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 3313 { 3314 int rc = 0; 3315 struct CommandList *c; 3316 struct ErrorInfo *ei; 3317 3318 c = cmd_alloc(h); 3319 3320 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 3321 sizeof(this_device->raid_map), 0, 3322 scsi3addr, TYPE_CMD)) { 3323 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); 3324 cmd_free(h, c); 3325 return -1; 3326 } 3327 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3328 NO_TIMEOUT); 3329 if (rc) 3330 goto out; 3331 ei = c->err_info; 3332 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3333 hpsa_scsi_interpret_error(h, c); 3334 rc = -1; 3335 goto out; 3336 } 3337 cmd_free(h, c); 3338 3339 /* @todo in the future, dynamically allocate RAID map memory */ 3340 if (le32_to_cpu(this_device->raid_map.structure_size) > 3341 sizeof(this_device->raid_map)) { 3342 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 3343 rc = -1; 3344 } 3345 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 3346 return rc; 3347 out: 3348 cmd_free(h, c); 3349 return rc; 3350 } 3351 3352 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, 3353 unsigned char scsi3addr[], u16 bmic_device_index, 3354 struct bmic_sense_subsystem_info *buf, size_t bufsize) 3355 { 3356 int rc = IO_OK; 3357 struct CommandList *c; 3358 struct ErrorInfo *ei; 3359 3360 c = cmd_alloc(h); 3361 3362 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, 3363 0, RAID_CTLR_LUNID, TYPE_CMD); 3364 if (rc) 3365 goto out; 3366 3367 c->Request.CDB[2] = bmic_device_index & 0xff; 3368 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3369 3370 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3371 NO_TIMEOUT); 3372 if (rc) 3373 goto out; 3374 ei = c->err_info; 3375 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3376 hpsa_scsi_interpret_error(h, c); 3377 rc = -1; 3378 } 3379 out: 3380 cmd_free(h, c); 3381 return rc; 3382 } 3383 3384 static int hpsa_bmic_id_controller(struct ctlr_info *h, 3385 struct bmic_identify_controller *buf, size_t bufsize) 3386 { 3387 int rc = IO_OK; 3388 struct CommandList *c; 3389 struct ErrorInfo *ei; 3390 3391 c = cmd_alloc(h); 3392 3393 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, 3394 0, RAID_CTLR_LUNID, TYPE_CMD); 3395 if (rc) 3396 goto out; 3397 3398 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3399 NO_TIMEOUT); 3400 if (rc) 3401 goto out; 3402 ei = c->err_info; 3403 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3404 hpsa_scsi_interpret_error(h, c); 3405 rc = -1; 3406 } 3407 out: 3408 cmd_free(h, c); 3409 return rc; 3410 } 3411 3412 static int hpsa_bmic_id_physical_device(struct ctlr_info *h, 3413 unsigned char scsi3addr[], u16 bmic_device_index, 3414 struct bmic_identify_physical_device *buf, size_t bufsize) 3415 { 3416 int rc = IO_OK; 3417 struct CommandList *c; 3418 struct ErrorInfo *ei; 3419 3420 c = cmd_alloc(h); 3421 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, 3422 0, RAID_CTLR_LUNID, TYPE_CMD); 3423 if (rc) 3424 goto out; 3425 3426 c->Request.CDB[2] = bmic_device_index & 0xff; 3427 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3428 3429 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3430 NO_TIMEOUT); 3431 ei = c->err_info; 3432 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3433 hpsa_scsi_interpret_error(h, c); 3434 rc = -1; 3435 } 3436 out: 3437 cmd_free(h, c); 3438 3439 return rc; 3440 } 3441 3442 /* 3443 * get enclosure information 3444 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number 3445 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure 3446 * Uses id_physical_device to determine the box_index. 3447 */ 3448 static void hpsa_get_enclosure_info(struct ctlr_info *h, 3449 unsigned char *scsi3addr, 3450 struct ReportExtendedLUNdata *rlep, int rle_index, 3451 struct hpsa_scsi_dev_t *encl_dev) 3452 { 3453 int rc = -1; 3454 struct CommandList *c = NULL; 3455 struct ErrorInfo *ei = NULL; 3456 struct bmic_sense_storage_box_params *bssbp = NULL; 3457 struct bmic_identify_physical_device *id_phys = NULL; 3458 struct ext_report_lun_entry *rle; 3459 u16 bmic_device_index = 0; 3460 3461 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) 3462 return; 3463 3464 rle = &rlep->LUN[rle_index]; 3465 3466 encl_dev->eli = 3467 hpsa_get_enclosure_logical_identifier(h, scsi3addr); 3468 3469 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3470 3471 if (encl_dev->target == -1 || encl_dev->lun == -1) { 3472 rc = IO_OK; 3473 goto out; 3474 } 3475 3476 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { 3477 rc = IO_OK; 3478 goto out; 3479 } 3480 3481 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); 3482 if (!bssbp) 3483 goto out; 3484 3485 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 3486 if (!id_phys) 3487 goto out; 3488 3489 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, 3490 id_phys, sizeof(*id_phys)); 3491 if (rc) { 3492 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", 3493 __func__, encl_dev->external, bmic_device_index); 3494 goto out; 3495 } 3496 3497 c = cmd_alloc(h); 3498 3499 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, 3500 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); 3501 3502 if (rc) 3503 goto out; 3504 3505 if (id_phys->phys_connector[1] == 'E') 3506 c->Request.CDB[5] = id_phys->box_index; 3507 else 3508 c->Request.CDB[5] = 0; 3509 3510 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3511 NO_TIMEOUT); 3512 if (rc) 3513 goto out; 3514 3515 ei = c->err_info; 3516 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3517 rc = -1; 3518 goto out; 3519 } 3520 3521 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; 3522 memcpy(&encl_dev->phys_connector[id_phys->active_path_number], 3523 bssbp->phys_connector, sizeof(bssbp->phys_connector)); 3524 3525 rc = IO_OK; 3526 out: 3527 kfree(bssbp); 3528 kfree(id_phys); 3529 3530 if (c) 3531 cmd_free(h, c); 3532 3533 if (rc != IO_OK) 3534 hpsa_show_dev_msg(KERN_INFO, h, encl_dev, 3535 "Error, could not get enclosure information"); 3536 } 3537 3538 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, 3539 unsigned char *scsi3addr) 3540 { 3541 struct ReportExtendedLUNdata *physdev; 3542 u32 nphysicals; 3543 u64 sa = 0; 3544 int i; 3545 3546 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); 3547 if (!physdev) 3548 return 0; 3549 3550 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { 3551 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 3552 kfree(physdev); 3553 return 0; 3554 } 3555 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; 3556 3557 for (i = 0; i < nphysicals; i++) 3558 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { 3559 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); 3560 break; 3561 } 3562 3563 kfree(physdev); 3564 3565 return sa; 3566 } 3567 3568 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, 3569 struct hpsa_scsi_dev_t *dev) 3570 { 3571 int rc; 3572 u64 sa = 0; 3573 3574 if (is_hba_lunid(scsi3addr)) { 3575 struct bmic_sense_subsystem_info *ssi; 3576 3577 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 3578 if (!ssi) 3579 return; 3580 3581 rc = hpsa_bmic_sense_subsystem_information(h, 3582 scsi3addr, 0, ssi, sizeof(*ssi)); 3583 if (rc == 0) { 3584 sa = get_unaligned_be64(ssi->primary_world_wide_id); 3585 h->sas_address = sa; 3586 } 3587 3588 kfree(ssi); 3589 } else 3590 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); 3591 3592 dev->sas_address = sa; 3593 } 3594 3595 static void hpsa_ext_ctrl_present(struct ctlr_info *h, 3596 struct ReportExtendedLUNdata *physdev) 3597 { 3598 u32 nphysicals; 3599 int i; 3600 3601 if (h->discovery_polling) 3602 return; 3603 3604 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1; 3605 3606 for (i = 0; i < nphysicals; i++) { 3607 if (physdev->LUN[i].device_type == 3608 BMIC_DEVICE_TYPE_CONTROLLER 3609 && !is_hba_lunid(physdev->LUN[i].lunid)) { 3610 dev_info(&h->pdev->dev, 3611 "External controller present, activate discovery polling and disable rld caching\n"); 3612 hpsa_disable_rld_caching(h); 3613 h->discovery_polling = 1; 3614 break; 3615 } 3616 } 3617 } 3618 3619 /* Get a device id from inquiry page 0x83 */ 3620 static bool hpsa_vpd_page_supported(struct ctlr_info *h, 3621 unsigned char scsi3addr[], u8 page) 3622 { 3623 int rc; 3624 int i; 3625 int pages; 3626 unsigned char *buf, bufsize; 3627 3628 buf = kzalloc(256, GFP_KERNEL); 3629 if (!buf) 3630 return false; 3631 3632 /* Get the size of the page list first */ 3633 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3634 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 3635 buf, HPSA_VPD_HEADER_SZ); 3636 if (rc != 0) 3637 goto exit_unsupported; 3638 pages = buf[3]; 3639 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 3640 bufsize = pages + HPSA_VPD_HEADER_SZ; 3641 else 3642 bufsize = 255; 3643 3644 /* Get the whole VPD page list */ 3645 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3646 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 3647 buf, bufsize); 3648 if (rc != 0) 3649 goto exit_unsupported; 3650 3651 pages = buf[3]; 3652 for (i = 1; i <= pages; i++) 3653 if (buf[3 + i] == page) 3654 goto exit_supported; 3655 exit_unsupported: 3656 kfree(buf); 3657 return false; 3658 exit_supported: 3659 kfree(buf); 3660 return true; 3661 } 3662 3663 /* 3664 * Called during a scan operation. 3665 * Sets ioaccel status on the new device list, not the existing device list 3666 * 3667 * The device list used during I/O will be updated later in 3668 * adjust_hpsa_scsi_table. 3669 */ 3670 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 3671 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 3672 { 3673 int rc; 3674 unsigned char *buf; 3675 u8 ioaccel_status; 3676 3677 this_device->offload_config = 0; 3678 this_device->offload_enabled = 0; 3679 this_device->offload_to_be_enabled = 0; 3680 3681 buf = kzalloc(64, GFP_KERNEL); 3682 if (!buf) 3683 return; 3684 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 3685 goto out; 3686 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3687 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 3688 if (rc != 0) 3689 goto out; 3690 3691 #define IOACCEL_STATUS_BYTE 4 3692 #define OFFLOAD_CONFIGURED_BIT 0x01 3693 #define OFFLOAD_ENABLED_BIT 0x02 3694 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 3695 this_device->offload_config = 3696 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 3697 if (this_device->offload_config) { 3698 bool offload_enabled = 3699 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 3700 /* 3701 * Check to see if offload can be enabled. 3702 */ 3703 if (offload_enabled) { 3704 rc = hpsa_get_raid_map(h, scsi3addr, this_device); 3705 if (rc) /* could not load raid_map */ 3706 goto out; 3707 this_device->offload_to_be_enabled = 1; 3708 } 3709 } 3710 3711 out: 3712 kfree(buf); 3713 return; 3714 } 3715 3716 /* Get the device id from inquiry page 0x83 */ 3717 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 3718 unsigned char *device_id, int index, int buflen) 3719 { 3720 int rc; 3721 unsigned char *buf; 3722 3723 /* Does controller have VPD for device id? */ 3724 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID)) 3725 return 1; /* not supported */ 3726 3727 buf = kzalloc(64, GFP_KERNEL); 3728 if (!buf) 3729 return -ENOMEM; 3730 3731 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 3732 HPSA_VPD_LV_DEVICE_ID, buf, 64); 3733 if (rc == 0) { 3734 if (buflen > 16) 3735 buflen = 16; 3736 memcpy(device_id, &buf[8], buflen); 3737 } 3738 3739 kfree(buf); 3740 3741 return rc; /*0 - got id, otherwise, didn't */ 3742 } 3743 3744 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 3745 void *buf, int bufsize, 3746 int extended_response) 3747 { 3748 int rc = IO_OK; 3749 struct CommandList *c; 3750 unsigned char scsi3addr[8]; 3751 struct ErrorInfo *ei; 3752 3753 c = cmd_alloc(h); 3754 3755 /* address the controller */ 3756 memset(scsi3addr, 0, sizeof(scsi3addr)); 3757 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 3758 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 3759 rc = -EAGAIN; 3760 goto out; 3761 } 3762 if (extended_response) 3763 c->Request.CDB[1] = extended_response; 3764 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 3765 NO_TIMEOUT); 3766 if (rc) 3767 goto out; 3768 ei = c->err_info; 3769 if (ei->CommandStatus != 0 && 3770 ei->CommandStatus != CMD_DATA_UNDERRUN) { 3771 hpsa_scsi_interpret_error(h, c); 3772 rc = -EIO; 3773 } else { 3774 struct ReportLUNdata *rld = buf; 3775 3776 if (rld->extended_response_flag != extended_response) { 3777 if (!h->legacy_board) { 3778 dev_err(&h->pdev->dev, 3779 "report luns requested format %u, got %u\n", 3780 extended_response, 3781 rld->extended_response_flag); 3782 rc = -EINVAL; 3783 } else 3784 rc = -EOPNOTSUPP; 3785 } 3786 } 3787 out: 3788 cmd_free(h, c); 3789 return rc; 3790 } 3791 3792 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 3793 struct ReportExtendedLUNdata *buf, int bufsize) 3794 { 3795 int rc; 3796 struct ReportLUNdata *lbuf; 3797 3798 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, 3799 HPSA_REPORT_PHYS_EXTENDED); 3800 if (!rc || rc != -EOPNOTSUPP) 3801 return rc; 3802 3803 /* REPORT PHYS EXTENDED is not supported */ 3804 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL); 3805 if (!lbuf) 3806 return -ENOMEM; 3807 3808 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0); 3809 if (!rc) { 3810 int i; 3811 u32 nphys; 3812 3813 /* Copy ReportLUNdata header */ 3814 memcpy(buf, lbuf, 8); 3815 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8; 3816 for (i = 0; i < nphys; i++) 3817 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8); 3818 } 3819 kfree(lbuf); 3820 return rc; 3821 } 3822 3823 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 3824 struct ReportLUNdata *buf, int bufsize) 3825 { 3826 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 3827 } 3828 3829 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 3830 int bus, int target, int lun) 3831 { 3832 device->bus = bus; 3833 device->target = target; 3834 device->lun = lun; 3835 } 3836 3837 /* Use VPD inquiry to get details of volume status */ 3838 static int hpsa_get_volume_status(struct ctlr_info *h, 3839 unsigned char scsi3addr[]) 3840 { 3841 int rc; 3842 int status; 3843 int size; 3844 unsigned char *buf; 3845 3846 buf = kzalloc(64, GFP_KERNEL); 3847 if (!buf) 3848 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3849 3850 /* Does controller have VPD for logical volume status? */ 3851 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 3852 goto exit_failed; 3853 3854 /* Get the size of the VPD return buffer */ 3855 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 3856 buf, HPSA_VPD_HEADER_SZ); 3857 if (rc != 0) 3858 goto exit_failed; 3859 size = buf[3]; 3860 3861 /* Now get the whole VPD buffer */ 3862 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 3863 buf, size + HPSA_VPD_HEADER_SZ); 3864 if (rc != 0) 3865 goto exit_failed; 3866 status = buf[4]; /* status byte */ 3867 3868 kfree(buf); 3869 return status; 3870 exit_failed: 3871 kfree(buf); 3872 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3873 } 3874 3875 /* Determine offline status of a volume. 3876 * Return either: 3877 * 0 (not offline) 3878 * 0xff (offline for unknown reasons) 3879 * # (integer code indicating one of several NOT READY states 3880 * describing why a volume is to be kept offline) 3881 */ 3882 static unsigned char hpsa_volume_offline(struct ctlr_info *h, 3883 unsigned char scsi3addr[]) 3884 { 3885 struct CommandList *c; 3886 unsigned char *sense; 3887 u8 sense_key, asc, ascq; 3888 int sense_len; 3889 int rc, ldstat = 0; 3890 #define ASC_LUN_NOT_READY 0x04 3891 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 3892 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 3893 3894 c = cmd_alloc(h); 3895 3896 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 3897 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 3898 NO_TIMEOUT); 3899 if (rc) { 3900 cmd_free(h, c); 3901 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3902 } 3903 sense = c->err_info->SenseInfo; 3904 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 3905 sense_len = sizeof(c->err_info->SenseInfo); 3906 else 3907 sense_len = c->err_info->SenseLen; 3908 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); 3909 cmd_free(h, c); 3910 3911 /* Determine the reason for not ready state */ 3912 ldstat = hpsa_get_volume_status(h, scsi3addr); 3913 3914 /* Keep volume offline in certain cases: */ 3915 switch (ldstat) { 3916 case HPSA_LV_FAILED: 3917 case HPSA_LV_UNDERGOING_ERASE: 3918 case HPSA_LV_NOT_AVAILABLE: 3919 case HPSA_LV_UNDERGOING_RPI: 3920 case HPSA_LV_PENDING_RPI: 3921 case HPSA_LV_ENCRYPTED_NO_KEY: 3922 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 3923 case HPSA_LV_UNDERGOING_ENCRYPTION: 3924 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 3925 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 3926 return ldstat; 3927 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 3928 /* If VPD status page isn't available, 3929 * use ASC/ASCQ to determine state 3930 */ 3931 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 3932 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 3933 return ldstat; 3934 break; 3935 default: 3936 break; 3937 } 3938 return HPSA_LV_OK; 3939 } 3940 3941 static int hpsa_update_device_info(struct ctlr_info *h, 3942 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 3943 unsigned char *is_OBDR_device) 3944 { 3945 3946 #define OBDR_SIG_OFFSET 43 3947 #define OBDR_TAPE_SIG "$DR-10" 3948 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 3949 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 3950 3951 unsigned char *inq_buff; 3952 unsigned char *obdr_sig; 3953 int rc = 0; 3954 3955 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 3956 if (!inq_buff) { 3957 rc = -ENOMEM; 3958 goto bail_out; 3959 } 3960 3961 /* Do an inquiry to the device to see what it is. */ 3962 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3963 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3964 dev_err(&h->pdev->dev, 3965 "%s: inquiry failed, device will be skipped.\n", 3966 __func__); 3967 rc = HPSA_INQUIRY_FAILED; 3968 goto bail_out; 3969 } 3970 3971 scsi_sanitize_inquiry_string(&inq_buff[8], 8); 3972 scsi_sanitize_inquiry_string(&inq_buff[16], 16); 3973 3974 this_device->devtype = (inq_buff[0] & 0x1f); 3975 memcpy(this_device->scsi3addr, scsi3addr, 8); 3976 memcpy(this_device->vendor, &inq_buff[8], 3977 sizeof(this_device->vendor)); 3978 memcpy(this_device->model, &inq_buff[16], 3979 sizeof(this_device->model)); 3980 this_device->rev = inq_buff[2]; 3981 memset(this_device->device_id, 0, 3982 sizeof(this_device->device_id)); 3983 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, 3984 sizeof(this_device->device_id)) < 0) { 3985 dev_err(&h->pdev->dev, 3986 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n", 3987 h->ctlr, __func__, 3988 h->scsi_host->host_no, 3989 this_device->bus, this_device->target, 3990 this_device->lun, 3991 scsi_device_type(this_device->devtype), 3992 this_device->model); 3993 rc = HPSA_LV_FAILED; 3994 goto bail_out; 3995 } 3996 3997 if ((this_device->devtype == TYPE_DISK || 3998 this_device->devtype == TYPE_ZBC) && 3999 is_logical_dev_addr_mode(scsi3addr)) { 4000 unsigned char volume_offline; 4001 4002 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 4003 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 4004 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 4005 volume_offline = hpsa_volume_offline(h, scsi3addr); 4006 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && 4007 h->legacy_board) { 4008 /* 4009 * Legacy boards might not support volume status 4010 */ 4011 dev_info(&h->pdev->dev, 4012 "C0:T%d:L%d Volume status not available, assuming online.\n", 4013 this_device->target, this_device->lun); 4014 volume_offline = 0; 4015 } 4016 this_device->volume_offline = volume_offline; 4017 if (volume_offline == HPSA_LV_FAILED) { 4018 rc = HPSA_LV_FAILED; 4019 dev_err(&h->pdev->dev, 4020 "%s: LV failed, device will be skipped.\n", 4021 __func__); 4022 goto bail_out; 4023 } 4024 } else { 4025 this_device->raid_level = RAID_UNKNOWN; 4026 this_device->offload_config = 0; 4027 hpsa_turn_off_ioaccel_for_device(this_device); 4028 this_device->hba_ioaccel_enabled = 0; 4029 this_device->volume_offline = 0; 4030 this_device->queue_depth = h->nr_cmds; 4031 } 4032 4033 if (this_device->external) 4034 this_device->queue_depth = EXTERNAL_QD; 4035 4036 if (is_OBDR_device) { 4037 /* See if this is a One-Button-Disaster-Recovery device 4038 * by looking for "$DR-10" at offset 43 in inquiry data. 4039 */ 4040 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 4041 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 4042 strncmp(obdr_sig, OBDR_TAPE_SIG, 4043 OBDR_SIG_LEN) == 0); 4044 } 4045 kfree(inq_buff); 4046 return 0; 4047 4048 bail_out: 4049 kfree(inq_buff); 4050 return rc; 4051 } 4052 4053 /* 4054 * Helper function to assign bus, target, lun mapping of devices. 4055 * Logical drive target and lun are assigned at this time, but 4056 * physical device lun and target assignment are deferred (assigned 4057 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 4058 */ 4059 static void figure_bus_target_lun(struct ctlr_info *h, 4060 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 4061 { 4062 u32 lunid = get_unaligned_le32(lunaddrbytes); 4063 4064 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 4065 /* physical device, target and lun filled in later */ 4066 if (is_hba_lunid(lunaddrbytes)) { 4067 int bus = HPSA_HBA_BUS; 4068 4069 if (!device->rev) 4070 bus = HPSA_LEGACY_HBA_BUS; 4071 hpsa_set_bus_target_lun(device, 4072 bus, 0, lunid & 0x3fff); 4073 } else 4074 /* defer target, lun assignment for physical devices */ 4075 hpsa_set_bus_target_lun(device, 4076 HPSA_PHYSICAL_DEVICE_BUS, -1, -1); 4077 return; 4078 } 4079 /* It's a logical device */ 4080 if (device->external) { 4081 hpsa_set_bus_target_lun(device, 4082 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, 4083 lunid & 0x00ff); 4084 return; 4085 } 4086 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, 4087 0, lunid & 0x3fff); 4088 } 4089 4090 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, 4091 int i, int nphysicals, int nlocal_logicals) 4092 { 4093 /* In report logicals, local logicals are listed first, 4094 * then any externals. 4095 */ 4096 int logicals_start = nphysicals + (raid_ctlr_position == 0); 4097 4098 if (i == raid_ctlr_position) 4099 return 0; 4100 4101 if (i < logicals_start) 4102 return 0; 4103 4104 /* i is in logicals range, but still within local logicals */ 4105 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) 4106 return 0; 4107 4108 return 1; /* it's an external lun */ 4109 } 4110 4111 /* 4112 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 4113 * logdev. The number of luns in physdev and logdev are returned in 4114 * *nphysicals and *nlogicals, respectively. 4115 * Returns 0 on success, -1 otherwise. 4116 */ 4117 static int hpsa_gather_lun_info(struct ctlr_info *h, 4118 struct ReportExtendedLUNdata *physdev, u32 *nphysicals, 4119 struct ReportLUNdata *logdev, u32 *nlogicals) 4120 { 4121 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { 4122 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 4123 return -1; 4124 } 4125 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; 4126 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 4127 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", 4128 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); 4129 *nphysicals = HPSA_MAX_PHYS_LUN; 4130 } 4131 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { 4132 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 4133 return -1; 4134 } 4135 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 4136 /* Reject Logicals in excess of our max capability. */ 4137 if (*nlogicals > HPSA_MAX_LUN) { 4138 dev_warn(&h->pdev->dev, 4139 "maximum logical LUNs (%d) exceeded. " 4140 "%d LUNs ignored.\n", HPSA_MAX_LUN, 4141 *nlogicals - HPSA_MAX_LUN); 4142 *nlogicals = HPSA_MAX_LUN; 4143 } 4144 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 4145 dev_warn(&h->pdev->dev, 4146 "maximum logical + physical LUNs (%d) exceeded. " 4147 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 4148 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 4149 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 4150 } 4151 return 0; 4152 } 4153 4154 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, 4155 int i, int nphysicals, int nlogicals, 4156 struct ReportExtendedLUNdata *physdev_list, 4157 struct ReportLUNdata *logdev_list) 4158 { 4159 /* Helper function, figure out where the LUN ID info is coming from 4160 * given index i, lists of physical and logical devices, where in 4161 * the list the raid controller is supposed to appear (first or last) 4162 */ 4163 4164 int logicals_start = nphysicals + (raid_ctlr_position == 0); 4165 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 4166 4167 if (i == raid_ctlr_position) 4168 return RAID_CTLR_LUNID; 4169 4170 if (i < logicals_start) 4171 return &physdev_list->LUN[i - 4172 (raid_ctlr_position == 0)].lunid[0]; 4173 4174 if (i < last_device) 4175 return &logdev_list->LUN[i - nphysicals - 4176 (raid_ctlr_position == 0)][0]; 4177 BUG(); 4178 return NULL; 4179 } 4180 4181 /* get physical drive ioaccel handle and queue depth */ 4182 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 4183 struct hpsa_scsi_dev_t *dev, 4184 struct ReportExtendedLUNdata *rlep, int rle_index, 4185 struct bmic_identify_physical_device *id_phys) 4186 { 4187 int rc; 4188 struct ext_report_lun_entry *rle; 4189 4190 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) 4191 return; 4192 4193 rle = &rlep->LUN[rle_index]; 4194 4195 dev->ioaccel_handle = rle->ioaccel_handle; 4196 if ((rle->device_flags & 0x08) && dev->ioaccel_handle) 4197 dev->hba_ioaccel_enabled = 1; 4198 memset(id_phys, 0, sizeof(*id_phys)); 4199 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], 4200 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, 4201 sizeof(*id_phys)); 4202 if (!rc) 4203 /* Reserve space for FW operations */ 4204 #define DRIVE_CMDS_RESERVED_FOR_FW 2 4205 #define DRIVE_QUEUE_DEPTH 7 4206 dev->queue_depth = 4207 le16_to_cpu(id_phys->current_queue_depth_limit) - 4208 DRIVE_CMDS_RESERVED_FOR_FW; 4209 else 4210 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ 4211 } 4212 4213 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, 4214 struct ReportExtendedLUNdata *rlep, int rle_index, 4215 struct bmic_identify_physical_device *id_phys) 4216 { 4217 struct ext_report_lun_entry *rle; 4218 4219 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) 4220 return; 4221 4222 rle = &rlep->LUN[rle_index]; 4223 4224 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) 4225 this_device->hba_ioaccel_enabled = 1; 4226 4227 memcpy(&this_device->active_path_index, 4228 &id_phys->active_path_number, 4229 sizeof(this_device->active_path_index)); 4230 memcpy(&this_device->path_map, 4231 &id_phys->redundant_path_present_map, 4232 sizeof(this_device->path_map)); 4233 memcpy(&this_device->box, 4234 &id_phys->alternate_paths_phys_box_on_port, 4235 sizeof(this_device->box)); 4236 memcpy(&this_device->phys_connector, 4237 &id_phys->alternate_paths_phys_connector, 4238 sizeof(this_device->phys_connector)); 4239 memcpy(&this_device->bay, 4240 &id_phys->phys_bay_in_box, 4241 sizeof(this_device->bay)); 4242 } 4243 4244 /* get number of local logical disks. */ 4245 static int hpsa_set_local_logical_count(struct ctlr_info *h, 4246 struct bmic_identify_controller *id_ctlr, 4247 u32 *nlocals) 4248 { 4249 int rc; 4250 4251 if (!id_ctlr) { 4252 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", 4253 __func__); 4254 return -ENOMEM; 4255 } 4256 memset(id_ctlr, 0, sizeof(*id_ctlr)); 4257 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); 4258 if (!rc) 4259 if (id_ctlr->configured_logical_drive_count < 255) 4260 *nlocals = id_ctlr->configured_logical_drive_count; 4261 else 4262 *nlocals = le16_to_cpu( 4263 id_ctlr->extended_logical_unit_count); 4264 else 4265 *nlocals = -1; 4266 return rc; 4267 } 4268 4269 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) 4270 { 4271 struct bmic_identify_physical_device *id_phys; 4272 bool is_spare = false; 4273 int rc; 4274 4275 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 4276 if (!id_phys) 4277 return false; 4278 4279 rc = hpsa_bmic_id_physical_device(h, 4280 lunaddrbytes, 4281 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), 4282 id_phys, sizeof(*id_phys)); 4283 if (rc == 0) 4284 is_spare = (id_phys->more_flags >> 6) & 0x01; 4285 4286 kfree(id_phys); 4287 return is_spare; 4288 } 4289 4290 #define RPL_DEV_FLAG_NON_DISK 0x1 4291 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 4292 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 4293 4294 #define BMIC_DEVICE_TYPE_ENCLOSURE 6 4295 4296 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, 4297 struct ext_report_lun_entry *rle) 4298 { 4299 u8 device_flags; 4300 u8 device_type; 4301 4302 if (!MASKED_DEVICE(lunaddrbytes)) 4303 return false; 4304 4305 device_flags = rle->device_flags; 4306 device_type = rle->device_type; 4307 4308 if (device_flags & RPL_DEV_FLAG_NON_DISK) { 4309 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) 4310 return false; 4311 return true; 4312 } 4313 4314 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) 4315 return false; 4316 4317 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) 4318 return false; 4319 4320 /* 4321 * Spares may be spun down, we do not want to 4322 * do an Inquiry to a RAID set spare drive as 4323 * that would have them spun up, that is a 4324 * performance hit because I/O to the RAID device 4325 * stops while the spin up occurs which can take 4326 * over 50 seconds. 4327 */ 4328 if (hpsa_is_disk_spare(h, lunaddrbytes)) 4329 return true; 4330 4331 return false; 4332 } 4333 4334 static void hpsa_update_scsi_devices(struct ctlr_info *h) 4335 { 4336 /* the idea here is we could get notified 4337 * that some devices have changed, so we do a report 4338 * physical luns and report logical luns cmd, and adjust 4339 * our list of devices accordingly. 4340 * 4341 * The scsi3addr's of devices won't change so long as the 4342 * adapter is not reset. That means we can rescan and 4343 * tell which devices we already know about, vs. new 4344 * devices, vs. disappearing devices. 4345 */ 4346 struct ReportExtendedLUNdata *physdev_list = NULL; 4347 struct ReportLUNdata *logdev_list = NULL; 4348 struct bmic_identify_physical_device *id_phys = NULL; 4349 struct bmic_identify_controller *id_ctlr = NULL; 4350 u32 nphysicals = 0; 4351 u32 nlogicals = 0; 4352 u32 nlocal_logicals = 0; 4353 u32 ndev_allocated = 0; 4354 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 4355 int ncurrent = 0; 4356 int i, ndevs_to_allocate; 4357 int raid_ctlr_position; 4358 bool physical_device; 4359 4360 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL); 4361 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 4362 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 4363 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 4364 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 4365 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); 4366 4367 if (!currentsd || !physdev_list || !logdev_list || 4368 !tmpdevice || !id_phys || !id_ctlr) { 4369 dev_err(&h->pdev->dev, "out of memory\n"); 4370 goto out; 4371 } 4372 4373 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ 4374 4375 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, 4376 logdev_list, &nlogicals)) { 4377 h->drv_req_rescan = 1; 4378 goto out; 4379 } 4380 4381 /* Set number of local logicals (non PTRAID) */ 4382 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { 4383 dev_warn(&h->pdev->dev, 4384 "%s: Can't determine number of local logical devices.\n", 4385 __func__); 4386 } 4387 4388 /* We might see up to the maximum number of logical and physical disks 4389 * plus external target devices, and a device for the local RAID 4390 * controller. 4391 */ 4392 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 4393 4394 hpsa_ext_ctrl_present(h, physdev_list); 4395 4396 /* Allocate the per device structures */ 4397 for (i = 0; i < ndevs_to_allocate; i++) { 4398 if (i >= HPSA_MAX_DEVICES) { 4399 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 4400 " %d devices ignored.\n", HPSA_MAX_DEVICES, 4401 ndevs_to_allocate - HPSA_MAX_DEVICES); 4402 break; 4403 } 4404 4405 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 4406 if (!currentsd[i]) { 4407 h->drv_req_rescan = 1; 4408 goto out; 4409 } 4410 ndev_allocated++; 4411 } 4412 4413 if (is_scsi_rev_5(h)) 4414 raid_ctlr_position = 0; 4415 else 4416 raid_ctlr_position = nphysicals + nlogicals; 4417 4418 /* adjust our table of devices */ 4419 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 4420 u8 *lunaddrbytes, is_OBDR = 0; 4421 int rc = 0; 4422 int phys_dev_index = i - (raid_ctlr_position == 0); 4423 bool skip_device = false; 4424 4425 memset(tmpdevice, 0, sizeof(*tmpdevice)); 4426 4427 physical_device = i < nphysicals + (raid_ctlr_position == 0); 4428 4429 /* Figure out where the LUN ID info is coming from */ 4430 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 4431 i, nphysicals, nlogicals, physdev_list, logdev_list); 4432 4433 /* Determine if this is a lun from an external target array */ 4434 tmpdevice->external = 4435 figure_external_status(h, raid_ctlr_position, i, 4436 nphysicals, nlocal_logicals); 4437 4438 /* 4439 * Skip over some devices such as a spare. 4440 */ 4441 if (phys_dev_index >= 0 && !tmpdevice->external && 4442 physical_device) { 4443 skip_device = hpsa_skip_device(h, lunaddrbytes, 4444 &physdev_list->LUN[phys_dev_index]); 4445 if (skip_device) 4446 continue; 4447 } 4448 4449 /* Get device type, vendor, model, device id, raid_map */ 4450 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 4451 &is_OBDR); 4452 if (rc == -ENOMEM) { 4453 dev_warn(&h->pdev->dev, 4454 "Out of memory, rescan deferred.\n"); 4455 h->drv_req_rescan = 1; 4456 goto out; 4457 } 4458 if (rc) { 4459 h->drv_req_rescan = 1; 4460 continue; 4461 } 4462 4463 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 4464 this_device = currentsd[ncurrent]; 4465 4466 *this_device = *tmpdevice; 4467 this_device->physical_device = physical_device; 4468 4469 /* 4470 * Expose all devices except for physical devices that 4471 * are masked. 4472 */ 4473 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) 4474 this_device->expose_device = 0; 4475 else 4476 this_device->expose_device = 1; 4477 4478 4479 /* 4480 * Get the SAS address for physical devices that are exposed. 4481 */ 4482 if (this_device->physical_device && this_device->expose_device) 4483 hpsa_get_sas_address(h, lunaddrbytes, this_device); 4484 4485 switch (this_device->devtype) { 4486 case TYPE_ROM: 4487 /* We don't *really* support actual CD-ROM devices, 4488 * just "One Button Disaster Recovery" tape drive 4489 * which temporarily pretends to be a CD-ROM drive. 4490 * So we check that the device is really an OBDR tape 4491 * device by checking for "$DR-10" in bytes 43-48 of 4492 * the inquiry data. 4493 */ 4494 if (is_OBDR) 4495 ncurrent++; 4496 break; 4497 case TYPE_DISK: 4498 case TYPE_ZBC: 4499 if (this_device->physical_device) { 4500 /* The disk is in HBA mode. */ 4501 /* Never use RAID mapper in HBA mode. */ 4502 this_device->offload_enabled = 0; 4503 hpsa_get_ioaccel_drive_info(h, this_device, 4504 physdev_list, phys_dev_index, id_phys); 4505 hpsa_get_path_info(this_device, 4506 physdev_list, phys_dev_index, id_phys); 4507 } 4508 ncurrent++; 4509 break; 4510 case TYPE_TAPE: 4511 case TYPE_MEDIUM_CHANGER: 4512 ncurrent++; 4513 break; 4514 case TYPE_ENCLOSURE: 4515 if (!this_device->external) 4516 hpsa_get_enclosure_info(h, lunaddrbytes, 4517 physdev_list, phys_dev_index, 4518 this_device); 4519 ncurrent++; 4520 break; 4521 case TYPE_RAID: 4522 /* Only present the Smartarray HBA as a RAID controller. 4523 * If it's a RAID controller other than the HBA itself 4524 * (an external RAID controller, MSA500 or similar) 4525 * don't present it. 4526 */ 4527 if (!is_hba_lunid(lunaddrbytes)) 4528 break; 4529 ncurrent++; 4530 break; 4531 default: 4532 break; 4533 } 4534 if (ncurrent >= HPSA_MAX_DEVICES) 4535 break; 4536 } 4537 4538 if (h->sas_host == NULL) { 4539 int rc = 0; 4540 4541 rc = hpsa_add_sas_host(h); 4542 if (rc) { 4543 dev_warn(&h->pdev->dev, 4544 "Could not add sas host %d\n", rc); 4545 goto out; 4546 } 4547 } 4548 4549 adjust_hpsa_scsi_table(h, currentsd, ncurrent); 4550 out: 4551 kfree(tmpdevice); 4552 for (i = 0; i < ndev_allocated; i++) 4553 kfree(currentsd[i]); 4554 kfree(currentsd); 4555 kfree(physdev_list); 4556 kfree(logdev_list); 4557 kfree(id_ctlr); 4558 kfree(id_phys); 4559 } 4560 4561 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, 4562 struct scatterlist *sg) 4563 { 4564 u64 addr64 = (u64) sg_dma_address(sg); 4565 unsigned int len = sg_dma_len(sg); 4566 4567 desc->Addr = cpu_to_le64(addr64); 4568 desc->Len = cpu_to_le32(len); 4569 desc->Ext = 0; 4570 } 4571 4572 /* 4573 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 4574 * dma mapping and fills in the scatter gather entries of the 4575 * hpsa command, cp. 4576 */ 4577 static int hpsa_scatter_gather(struct ctlr_info *h, 4578 struct CommandList *cp, 4579 struct scsi_cmnd *cmd) 4580 { 4581 struct scatterlist *sg; 4582 int use_sg, i, sg_limit, chained; 4583 struct SGDescriptor *curr_sg; 4584 4585 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4586 4587 use_sg = scsi_dma_map(cmd); 4588 if (use_sg < 0) 4589 return use_sg; 4590 4591 if (!use_sg) 4592 goto sglist_finished; 4593 4594 /* 4595 * If the number of entries is greater than the max for a single list, 4596 * then we have a chained list; we will set up all but one entry in the 4597 * first list (the last entry is saved for link information); 4598 * otherwise, we don't have a chained list and we'll set up at each of 4599 * the entries in the one list. 4600 */ 4601 curr_sg = cp->SG; 4602 chained = use_sg > h->max_cmd_sg_entries; 4603 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; 4604 scsi_for_each_sg(cmd, sg, sg_limit, i) { 4605 hpsa_set_sg_descriptor(curr_sg, sg); 4606 curr_sg++; 4607 } 4608 4609 if (chained) { 4610 /* 4611 * Continue with the chained list. Set curr_sg to the chained 4612 * list. Modify the limit to the total count less the entries 4613 * we've already set up. Resume the scan at the list entry 4614 * where the previous loop left off. 4615 */ 4616 curr_sg = h->cmd_sg_list[cp->cmdindex]; 4617 sg_limit = use_sg - sg_limit; 4618 for_each_sg(sg, sg, sg_limit, i) { 4619 hpsa_set_sg_descriptor(curr_sg, sg); 4620 curr_sg++; 4621 } 4622 } 4623 4624 /* Back the pointer up to the last entry and mark it as "last". */ 4625 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); 4626 4627 if (use_sg + chained > h->maxSG) 4628 h->maxSG = use_sg + chained; 4629 4630 if (chained) { 4631 cp->Header.SGList = h->max_cmd_sg_entries; 4632 cp->Header.SGTotal = cpu_to_le16(use_sg + 1); 4633 if (hpsa_map_sg_chain_block(h, cp)) { 4634 scsi_dma_unmap(cmd); 4635 return -1; 4636 } 4637 return 0; 4638 } 4639 4640 sglist_finished: 4641 4642 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 4643 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 4644 return 0; 4645 } 4646 4647 static inline void warn_zero_length_transfer(struct ctlr_info *h, 4648 u8 *cdb, int cdb_len, 4649 const char *func) 4650 { 4651 dev_warn(&h->pdev->dev, 4652 "%s: Blocking zero-length request: CDB:%*phN\n", 4653 func, cdb_len, cdb); 4654 } 4655 4656 #define IO_ACCEL_INELIGIBLE 1 4657 /* zero-length transfers trigger hardware errors. */ 4658 static bool is_zero_length_transfer(u8 *cdb) 4659 { 4660 u32 block_cnt; 4661 4662 /* Block zero-length transfer sizes on certain commands. */ 4663 switch (cdb[0]) { 4664 case READ_10: 4665 case WRITE_10: 4666 case VERIFY: /* 0x2F */ 4667 case WRITE_VERIFY: /* 0x2E */ 4668 block_cnt = get_unaligned_be16(&cdb[7]); 4669 break; 4670 case READ_12: 4671 case WRITE_12: 4672 case VERIFY_12: /* 0xAF */ 4673 case WRITE_VERIFY_12: /* 0xAE */ 4674 block_cnt = get_unaligned_be32(&cdb[6]); 4675 break; 4676 case READ_16: 4677 case WRITE_16: 4678 case VERIFY_16: /* 0x8F */ 4679 block_cnt = get_unaligned_be32(&cdb[10]); 4680 break; 4681 default: 4682 return false; 4683 } 4684 4685 return block_cnt == 0; 4686 } 4687 4688 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 4689 { 4690 int is_write = 0; 4691 u32 block; 4692 u32 block_cnt; 4693 4694 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 4695 switch (cdb[0]) { 4696 case WRITE_6: 4697 case WRITE_12: 4698 is_write = 1; 4699 fallthrough; 4700 case READ_6: 4701 case READ_12: 4702 if (*cdb_len == 6) { 4703 block = (((cdb[1] & 0x1F) << 16) | 4704 (cdb[2] << 8) | 4705 cdb[3]); 4706 block_cnt = cdb[4]; 4707 if (block_cnt == 0) 4708 block_cnt = 256; 4709 } else { 4710 BUG_ON(*cdb_len != 12); 4711 block = get_unaligned_be32(&cdb[2]); 4712 block_cnt = get_unaligned_be32(&cdb[6]); 4713 } 4714 if (block_cnt > 0xffff) 4715 return IO_ACCEL_INELIGIBLE; 4716 4717 cdb[0] = is_write ? WRITE_10 : READ_10; 4718 cdb[1] = 0; 4719 cdb[2] = (u8) (block >> 24); 4720 cdb[3] = (u8) (block >> 16); 4721 cdb[4] = (u8) (block >> 8); 4722 cdb[5] = (u8) (block); 4723 cdb[6] = 0; 4724 cdb[7] = (u8) (block_cnt >> 8); 4725 cdb[8] = (u8) (block_cnt); 4726 cdb[9] = 0; 4727 *cdb_len = 10; 4728 break; 4729 } 4730 return 0; 4731 } 4732 4733 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 4734 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4735 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4736 { 4737 struct scsi_cmnd *cmd = c->scsi_cmd; 4738 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 4739 unsigned int len; 4740 unsigned int total_len = 0; 4741 struct scatterlist *sg; 4742 u64 addr64; 4743 int use_sg, i; 4744 struct SGDescriptor *curr_sg; 4745 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 4746 4747 /* TODO: implement chaining support */ 4748 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 4749 atomic_dec(&phys_disk->ioaccel_cmds_out); 4750 return IO_ACCEL_INELIGIBLE; 4751 } 4752 4753 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 4754 4755 if (is_zero_length_transfer(cdb)) { 4756 warn_zero_length_transfer(h, cdb, cdb_len, __func__); 4757 atomic_dec(&phys_disk->ioaccel_cmds_out); 4758 return IO_ACCEL_INELIGIBLE; 4759 } 4760 4761 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4762 atomic_dec(&phys_disk->ioaccel_cmds_out); 4763 return IO_ACCEL_INELIGIBLE; 4764 } 4765 4766 c->cmd_type = CMD_IOACCEL1; 4767 4768 /* Adjust the DMA address to point to the accelerated command buffer */ 4769 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 4770 (c->cmdindex * sizeof(*cp)); 4771 BUG_ON(c->busaddr & 0x0000007F); 4772 4773 use_sg = scsi_dma_map(cmd); 4774 if (use_sg < 0) { 4775 atomic_dec(&phys_disk->ioaccel_cmds_out); 4776 return use_sg; 4777 } 4778 4779 if (use_sg) { 4780 curr_sg = cp->SG; 4781 scsi_for_each_sg(cmd, sg, use_sg, i) { 4782 addr64 = (u64) sg_dma_address(sg); 4783 len = sg_dma_len(sg); 4784 total_len += len; 4785 curr_sg->Addr = cpu_to_le64(addr64); 4786 curr_sg->Len = cpu_to_le32(len); 4787 curr_sg->Ext = cpu_to_le32(0); 4788 curr_sg++; 4789 } 4790 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 4791 4792 switch (cmd->sc_data_direction) { 4793 case DMA_TO_DEVICE: 4794 control |= IOACCEL1_CONTROL_DATA_OUT; 4795 break; 4796 case DMA_FROM_DEVICE: 4797 control |= IOACCEL1_CONTROL_DATA_IN; 4798 break; 4799 case DMA_NONE: 4800 control |= IOACCEL1_CONTROL_NODATAXFER; 4801 break; 4802 default: 4803 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4804 cmd->sc_data_direction); 4805 BUG(); 4806 break; 4807 } 4808 } else { 4809 control |= IOACCEL1_CONTROL_NODATAXFER; 4810 } 4811 4812 c->Header.SGList = use_sg; 4813 /* Fill out the command structure to submit */ 4814 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); 4815 cp->transfer_len = cpu_to_le32(total_len); 4816 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | 4817 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); 4818 cp->control = cpu_to_le32(control); 4819 memcpy(cp->CDB, cdb, cdb_len); 4820 memcpy(cp->CISS_LUN, scsi3addr, 8); 4821 /* Tag was already set at init time. */ 4822 enqueue_cmd_and_start_io(h, c); 4823 return 0; 4824 } 4825 4826 /* 4827 * Queue a command directly to a device behind the controller using the 4828 * I/O accelerator path. 4829 */ 4830 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 4831 struct CommandList *c) 4832 { 4833 struct scsi_cmnd *cmd = c->scsi_cmd; 4834 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4835 4836 if (!dev) 4837 return -1; 4838 4839 c->phys_disk = dev; 4840 4841 if (dev->in_reset) 4842 return -1; 4843 4844 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 4845 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); 4846 } 4847 4848 /* 4849 * Set encryption parameters for the ioaccel2 request 4850 */ 4851 static void set_encrypt_ioaccel2(struct ctlr_info *h, 4852 struct CommandList *c, struct io_accel2_cmd *cp) 4853 { 4854 struct scsi_cmnd *cmd = c->scsi_cmd; 4855 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4856 struct raid_map_data *map = &dev->raid_map; 4857 u64 first_block; 4858 4859 /* Are we doing encryption on this device */ 4860 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) 4861 return; 4862 /* Set the data encryption key index. */ 4863 cp->dekindex = map->dekindex; 4864 4865 /* Set the encryption enable flag, encoded into direction field. */ 4866 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 4867 4868 /* Set encryption tweak values based on logical block address 4869 * If block size is 512, tweak value is LBA. 4870 * For other block sizes, tweak is (LBA * block size)/ 512) 4871 */ 4872 switch (cmd->cmnd[0]) { 4873 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 4874 case READ_6: 4875 case WRITE_6: 4876 first_block = (((cmd->cmnd[1] & 0x1F) << 16) | 4877 (cmd->cmnd[2] << 8) | 4878 cmd->cmnd[3]); 4879 break; 4880 case WRITE_10: 4881 case READ_10: 4882 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 4883 case WRITE_12: 4884 case READ_12: 4885 first_block = get_unaligned_be32(&cmd->cmnd[2]); 4886 break; 4887 case WRITE_16: 4888 case READ_16: 4889 first_block = get_unaligned_be64(&cmd->cmnd[2]); 4890 break; 4891 default: 4892 dev_err(&h->pdev->dev, 4893 "ERROR: %s: size (0x%x) not supported for encryption\n", 4894 __func__, cmd->cmnd[0]); 4895 BUG(); 4896 break; 4897 } 4898 4899 if (le32_to_cpu(map->volume_blk_size) != 512) 4900 first_block = first_block * 4901 le32_to_cpu(map->volume_blk_size)/512; 4902 4903 cp->tweak_lower = cpu_to_le32(first_block); 4904 cp->tweak_upper = cpu_to_le32(first_block >> 32); 4905 } 4906 4907 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 4908 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4909 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4910 { 4911 struct scsi_cmnd *cmd = c->scsi_cmd; 4912 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 4913 struct ioaccel2_sg_element *curr_sg; 4914 int use_sg, i; 4915 struct scatterlist *sg; 4916 u64 addr64; 4917 u32 len; 4918 u32 total_len = 0; 4919 4920 if (!cmd->device) 4921 return -1; 4922 4923 if (!cmd->device->hostdata) 4924 return -1; 4925 4926 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4927 4928 if (is_zero_length_transfer(cdb)) { 4929 warn_zero_length_transfer(h, cdb, cdb_len, __func__); 4930 atomic_dec(&phys_disk->ioaccel_cmds_out); 4931 return IO_ACCEL_INELIGIBLE; 4932 } 4933 4934 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4935 atomic_dec(&phys_disk->ioaccel_cmds_out); 4936 return IO_ACCEL_INELIGIBLE; 4937 } 4938 4939 c->cmd_type = CMD_IOACCEL2; 4940 /* Adjust the DMA address to point to the accelerated command buffer */ 4941 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 4942 (c->cmdindex * sizeof(*cp)); 4943 BUG_ON(c->busaddr & 0x0000007F); 4944 4945 memset(cp, 0, sizeof(*cp)); 4946 cp->IU_type = IOACCEL2_IU_TYPE; 4947 4948 use_sg = scsi_dma_map(cmd); 4949 if (use_sg < 0) { 4950 atomic_dec(&phys_disk->ioaccel_cmds_out); 4951 return use_sg; 4952 } 4953 4954 if (use_sg) { 4955 curr_sg = cp->sg; 4956 if (use_sg > h->ioaccel_maxsg) { 4957 addr64 = le64_to_cpu( 4958 h->ioaccel2_cmd_sg_list[c->cmdindex]->address); 4959 curr_sg->address = cpu_to_le64(addr64); 4960 curr_sg->length = 0; 4961 curr_sg->reserved[0] = 0; 4962 curr_sg->reserved[1] = 0; 4963 curr_sg->reserved[2] = 0; 4964 curr_sg->chain_indicator = IOACCEL2_CHAIN; 4965 4966 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; 4967 } 4968 scsi_for_each_sg(cmd, sg, use_sg, i) { 4969 addr64 = (u64) sg_dma_address(sg); 4970 len = sg_dma_len(sg); 4971 total_len += len; 4972 curr_sg->address = cpu_to_le64(addr64); 4973 curr_sg->length = cpu_to_le32(len); 4974 curr_sg->reserved[0] = 0; 4975 curr_sg->reserved[1] = 0; 4976 curr_sg->reserved[2] = 0; 4977 curr_sg->chain_indicator = 0; 4978 curr_sg++; 4979 } 4980 4981 /* 4982 * Set the last s/g element bit 4983 */ 4984 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; 4985 4986 switch (cmd->sc_data_direction) { 4987 case DMA_TO_DEVICE: 4988 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4989 cp->direction |= IOACCEL2_DIR_DATA_OUT; 4990 break; 4991 case DMA_FROM_DEVICE: 4992 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4993 cp->direction |= IOACCEL2_DIR_DATA_IN; 4994 break; 4995 case DMA_NONE: 4996 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4997 cp->direction |= IOACCEL2_DIR_NO_DATA; 4998 break; 4999 default: 5000 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 5001 cmd->sc_data_direction); 5002 BUG(); 5003 break; 5004 } 5005 } else { 5006 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 5007 cp->direction |= IOACCEL2_DIR_NO_DATA; 5008 } 5009 5010 /* Set encryption parameters, if necessary */ 5011 set_encrypt_ioaccel2(h, c, cp); 5012 5013 cp->scsi_nexus = cpu_to_le32(ioaccel_handle); 5014 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); 5015 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 5016 5017 cp->data_len = cpu_to_le32(total_len); 5018 cp->err_ptr = cpu_to_le64(c->busaddr + 5019 offsetof(struct io_accel2_cmd, error_data)); 5020 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 5021 5022 /* fill in sg elements */ 5023 if (use_sg > h->ioaccel_maxsg) { 5024 cp->sg_count = 1; 5025 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); 5026 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { 5027 atomic_dec(&phys_disk->ioaccel_cmds_out); 5028 scsi_dma_unmap(cmd); 5029 return -1; 5030 } 5031 } else 5032 cp->sg_count = (u8) use_sg; 5033 5034 if (phys_disk->in_reset) { 5035 cmd->result = DID_RESET << 16; 5036 return -1; 5037 } 5038 5039 enqueue_cmd_and_start_io(h, c); 5040 return 0; 5041 } 5042 5043 /* 5044 * Queue a command to the correct I/O accelerator path. 5045 */ 5046 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 5047 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 5048 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 5049 { 5050 if (!c->scsi_cmd->device) 5051 return -1; 5052 5053 if (!c->scsi_cmd->device->hostdata) 5054 return -1; 5055 5056 if (phys_disk->in_reset) 5057 return -1; 5058 5059 /* Try to honor the device's queue depth */ 5060 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > 5061 phys_disk->queue_depth) { 5062 atomic_dec(&phys_disk->ioaccel_cmds_out); 5063 return IO_ACCEL_INELIGIBLE; 5064 } 5065 if (h->transMethod & CFGTBL_Trans_io_accel1) 5066 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 5067 cdb, cdb_len, scsi3addr, 5068 phys_disk); 5069 else 5070 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 5071 cdb, cdb_len, scsi3addr, 5072 phys_disk); 5073 } 5074 5075 static void raid_map_helper(struct raid_map_data *map, 5076 int offload_to_mirror, u32 *map_index, u32 *current_group) 5077 { 5078 if (offload_to_mirror == 0) { 5079 /* use physical disk in the first mirrored group. */ 5080 *map_index %= le16_to_cpu(map->data_disks_per_row); 5081 return; 5082 } 5083 do { 5084 /* determine mirror group that *map_index indicates */ 5085 *current_group = *map_index / 5086 le16_to_cpu(map->data_disks_per_row); 5087 if (offload_to_mirror == *current_group) 5088 continue; 5089 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { 5090 /* select map index from next group */ 5091 *map_index += le16_to_cpu(map->data_disks_per_row); 5092 (*current_group)++; 5093 } else { 5094 /* select map index from first group */ 5095 *map_index %= le16_to_cpu(map->data_disks_per_row); 5096 *current_group = 0; 5097 } 5098 } while (offload_to_mirror != *current_group); 5099 } 5100 5101 /* 5102 * Attempt to perform offload RAID mapping for a logical volume I/O. 5103 */ 5104 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 5105 struct CommandList *c) 5106 { 5107 struct scsi_cmnd *cmd = c->scsi_cmd; 5108 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 5109 struct raid_map_data *map = &dev->raid_map; 5110 struct raid_map_disk_data *dd = &map->data[0]; 5111 int is_write = 0; 5112 u32 map_index; 5113 u64 first_block, last_block; 5114 u32 block_cnt; 5115 u32 blocks_per_row; 5116 u64 first_row, last_row; 5117 u32 first_row_offset, last_row_offset; 5118 u32 first_column, last_column; 5119 u64 r0_first_row, r0_last_row; 5120 u32 r5or6_blocks_per_row; 5121 u64 r5or6_first_row, r5or6_last_row; 5122 u32 r5or6_first_row_offset, r5or6_last_row_offset; 5123 u32 r5or6_first_column, r5or6_last_column; 5124 u32 total_disks_per_row; 5125 u32 stripesize; 5126 u32 first_group, last_group, current_group; 5127 u32 map_row; 5128 u32 disk_handle; 5129 u64 disk_block; 5130 u32 disk_block_cnt; 5131 u8 cdb[16]; 5132 u8 cdb_len; 5133 u16 strip_size; 5134 #if BITS_PER_LONG == 32 5135 u64 tmpdiv; 5136 #endif 5137 int offload_to_mirror; 5138 5139 if (!dev) 5140 return -1; 5141 5142 if (dev->in_reset) 5143 return -1; 5144 5145 /* check for valid opcode, get LBA and block count */ 5146 switch (cmd->cmnd[0]) { 5147 case WRITE_6: 5148 is_write = 1; 5149 fallthrough; 5150 case READ_6: 5151 first_block = (((cmd->cmnd[1] & 0x1F) << 16) | 5152 (cmd->cmnd[2] << 8) | 5153 cmd->cmnd[3]); 5154 block_cnt = cmd->cmnd[4]; 5155 if (block_cnt == 0) 5156 block_cnt = 256; 5157 break; 5158 case WRITE_10: 5159 is_write = 1; 5160 fallthrough; 5161 case READ_10: 5162 first_block = 5163 (((u64) cmd->cmnd[2]) << 24) | 5164 (((u64) cmd->cmnd[3]) << 16) | 5165 (((u64) cmd->cmnd[4]) << 8) | 5166 cmd->cmnd[5]; 5167 block_cnt = 5168 (((u32) cmd->cmnd[7]) << 8) | 5169 cmd->cmnd[8]; 5170 break; 5171 case WRITE_12: 5172 is_write = 1; 5173 fallthrough; 5174 case READ_12: 5175 first_block = 5176 (((u64) cmd->cmnd[2]) << 24) | 5177 (((u64) cmd->cmnd[3]) << 16) | 5178 (((u64) cmd->cmnd[4]) << 8) | 5179 cmd->cmnd[5]; 5180 block_cnt = 5181 (((u32) cmd->cmnd[6]) << 24) | 5182 (((u32) cmd->cmnd[7]) << 16) | 5183 (((u32) cmd->cmnd[8]) << 8) | 5184 cmd->cmnd[9]; 5185 break; 5186 case WRITE_16: 5187 is_write = 1; 5188 fallthrough; 5189 case READ_16: 5190 first_block = 5191 (((u64) cmd->cmnd[2]) << 56) | 5192 (((u64) cmd->cmnd[3]) << 48) | 5193 (((u64) cmd->cmnd[4]) << 40) | 5194 (((u64) cmd->cmnd[5]) << 32) | 5195 (((u64) cmd->cmnd[6]) << 24) | 5196 (((u64) cmd->cmnd[7]) << 16) | 5197 (((u64) cmd->cmnd[8]) << 8) | 5198 cmd->cmnd[9]; 5199 block_cnt = 5200 (((u32) cmd->cmnd[10]) << 24) | 5201 (((u32) cmd->cmnd[11]) << 16) | 5202 (((u32) cmd->cmnd[12]) << 8) | 5203 cmd->cmnd[13]; 5204 break; 5205 default: 5206 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 5207 } 5208 last_block = first_block + block_cnt - 1; 5209 5210 /* check for write to non-RAID-0 */ 5211 if (is_write && dev->raid_level != 0) 5212 return IO_ACCEL_INELIGIBLE; 5213 5214 /* check for invalid block or wraparound */ 5215 if (last_block >= le64_to_cpu(map->volume_blk_cnt) || 5216 last_block < first_block) 5217 return IO_ACCEL_INELIGIBLE; 5218 5219 /* calculate stripe information for the request */ 5220 blocks_per_row = le16_to_cpu(map->data_disks_per_row) * 5221 le16_to_cpu(map->strip_size); 5222 strip_size = le16_to_cpu(map->strip_size); 5223 #if BITS_PER_LONG == 32 5224 tmpdiv = first_block; 5225 (void) do_div(tmpdiv, blocks_per_row); 5226 first_row = tmpdiv; 5227 tmpdiv = last_block; 5228 (void) do_div(tmpdiv, blocks_per_row); 5229 last_row = tmpdiv; 5230 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 5231 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 5232 tmpdiv = first_row_offset; 5233 (void) do_div(tmpdiv, strip_size); 5234 first_column = tmpdiv; 5235 tmpdiv = last_row_offset; 5236 (void) do_div(tmpdiv, strip_size); 5237 last_column = tmpdiv; 5238 #else 5239 first_row = first_block / blocks_per_row; 5240 last_row = last_block / blocks_per_row; 5241 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 5242 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 5243 first_column = first_row_offset / strip_size; 5244 last_column = last_row_offset / strip_size; 5245 #endif 5246 5247 /* if this isn't a single row/column then give to the controller */ 5248 if ((first_row != last_row) || (first_column != last_column)) 5249 return IO_ACCEL_INELIGIBLE; 5250 5251 /* proceeding with driver mapping */ 5252 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 5253 le16_to_cpu(map->metadata_disks_per_row); 5254 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 5255 le16_to_cpu(map->row_cnt); 5256 map_index = (map_row * total_disks_per_row) + first_column; 5257 5258 switch (dev->raid_level) { 5259 case HPSA_RAID_0: 5260 break; /* nothing special to do */ 5261 case HPSA_RAID_1: 5262 /* Handles load balance across RAID 1 members. 5263 * (2-drive R1 and R10 with even # of drives.) 5264 * Appropriate for SSDs, not optimal for HDDs 5265 * Ensure we have the correct raid_map. 5266 */ 5267 if (le16_to_cpu(map->layout_map_count) != 2) { 5268 hpsa_turn_off_ioaccel_for_device(dev); 5269 return IO_ACCEL_INELIGIBLE; 5270 } 5271 if (dev->offload_to_mirror) 5272 map_index += le16_to_cpu(map->data_disks_per_row); 5273 dev->offload_to_mirror = !dev->offload_to_mirror; 5274 break; 5275 case HPSA_RAID_ADM: 5276 /* Handles N-way mirrors (R1-ADM) 5277 * and R10 with # of drives divisible by 3.) 5278 * Ensure we have the correct raid_map. 5279 */ 5280 if (le16_to_cpu(map->layout_map_count) != 3) { 5281 hpsa_turn_off_ioaccel_for_device(dev); 5282 return IO_ACCEL_INELIGIBLE; 5283 } 5284 5285 offload_to_mirror = dev->offload_to_mirror; 5286 raid_map_helper(map, offload_to_mirror, 5287 &map_index, ¤t_group); 5288 /* set mirror group to use next time */ 5289 offload_to_mirror = 5290 (offload_to_mirror >= 5291 le16_to_cpu(map->layout_map_count) - 1) 5292 ? 0 : offload_to_mirror + 1; 5293 dev->offload_to_mirror = offload_to_mirror; 5294 /* Avoid direct use of dev->offload_to_mirror within this 5295 * function since multiple threads might simultaneously 5296 * increment it beyond the range of dev->layout_map_count -1. 5297 */ 5298 break; 5299 case HPSA_RAID_5: 5300 case HPSA_RAID_6: 5301 if (le16_to_cpu(map->layout_map_count) <= 1) 5302 break; 5303 5304 /* Verify first and last block are in same RAID group */ 5305 r5or6_blocks_per_row = 5306 le16_to_cpu(map->strip_size) * 5307 le16_to_cpu(map->data_disks_per_row); 5308 if (r5or6_blocks_per_row == 0) { 5309 hpsa_turn_off_ioaccel_for_device(dev); 5310 return IO_ACCEL_INELIGIBLE; 5311 } 5312 stripesize = r5or6_blocks_per_row * 5313 le16_to_cpu(map->layout_map_count); 5314 #if BITS_PER_LONG == 32 5315 tmpdiv = first_block; 5316 first_group = do_div(tmpdiv, stripesize); 5317 tmpdiv = first_group; 5318 (void) do_div(tmpdiv, r5or6_blocks_per_row); 5319 first_group = tmpdiv; 5320 tmpdiv = last_block; 5321 last_group = do_div(tmpdiv, stripesize); 5322 tmpdiv = last_group; 5323 (void) do_div(tmpdiv, r5or6_blocks_per_row); 5324 last_group = tmpdiv; 5325 #else 5326 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 5327 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 5328 #endif 5329 if (first_group != last_group) 5330 return IO_ACCEL_INELIGIBLE; 5331 5332 /* Verify request is in a single row of RAID 5/6 */ 5333 #if BITS_PER_LONG == 32 5334 tmpdiv = first_block; 5335 (void) do_div(tmpdiv, stripesize); 5336 first_row = r5or6_first_row = r0_first_row = tmpdiv; 5337 tmpdiv = last_block; 5338 (void) do_div(tmpdiv, stripesize); 5339 r5or6_last_row = r0_last_row = tmpdiv; 5340 #else 5341 first_row = r5or6_first_row = r0_first_row = 5342 first_block / stripesize; 5343 r5or6_last_row = r0_last_row = last_block / stripesize; 5344 #endif 5345 if (r5or6_first_row != r5or6_last_row) 5346 return IO_ACCEL_INELIGIBLE; 5347 5348 5349 /* Verify request is in a single column */ 5350 #if BITS_PER_LONG == 32 5351 tmpdiv = first_block; 5352 first_row_offset = do_div(tmpdiv, stripesize); 5353 tmpdiv = first_row_offset; 5354 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 5355 r5or6_first_row_offset = first_row_offset; 5356 tmpdiv = last_block; 5357 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 5358 tmpdiv = r5or6_last_row_offset; 5359 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 5360 tmpdiv = r5or6_first_row_offset; 5361 (void) do_div(tmpdiv, map->strip_size); 5362 first_column = r5or6_first_column = tmpdiv; 5363 tmpdiv = r5or6_last_row_offset; 5364 (void) do_div(tmpdiv, map->strip_size); 5365 r5or6_last_column = tmpdiv; 5366 #else 5367 first_row_offset = r5or6_first_row_offset = 5368 (u32)((first_block % stripesize) % 5369 r5or6_blocks_per_row); 5370 5371 r5or6_last_row_offset = 5372 (u32)((last_block % stripesize) % 5373 r5or6_blocks_per_row); 5374 5375 first_column = r5or6_first_column = 5376 r5or6_first_row_offset / le16_to_cpu(map->strip_size); 5377 r5or6_last_column = 5378 r5or6_last_row_offset / le16_to_cpu(map->strip_size); 5379 #endif 5380 if (r5or6_first_column != r5or6_last_column) 5381 return IO_ACCEL_INELIGIBLE; 5382 5383 /* Request is eligible */ 5384 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 5385 le16_to_cpu(map->row_cnt); 5386 5387 map_index = (first_group * 5388 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + 5389 (map_row * total_disks_per_row) + first_column; 5390 break; 5391 default: 5392 return IO_ACCEL_INELIGIBLE; 5393 } 5394 5395 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 5396 return IO_ACCEL_INELIGIBLE; 5397 5398 c->phys_disk = dev->phys_disk[map_index]; 5399 if (!c->phys_disk) 5400 return IO_ACCEL_INELIGIBLE; 5401 5402 disk_handle = dd[map_index].ioaccel_handle; 5403 disk_block = le64_to_cpu(map->disk_starting_blk) + 5404 first_row * le16_to_cpu(map->strip_size) + 5405 (first_row_offset - first_column * 5406 le16_to_cpu(map->strip_size)); 5407 disk_block_cnt = block_cnt; 5408 5409 /* handle differing logical/physical block sizes */ 5410 if (map->phys_blk_shift) { 5411 disk_block <<= map->phys_blk_shift; 5412 disk_block_cnt <<= map->phys_blk_shift; 5413 } 5414 BUG_ON(disk_block_cnt > 0xffff); 5415 5416 /* build the new CDB for the physical disk I/O */ 5417 if (disk_block > 0xffffffff) { 5418 cdb[0] = is_write ? WRITE_16 : READ_16; 5419 cdb[1] = 0; 5420 cdb[2] = (u8) (disk_block >> 56); 5421 cdb[3] = (u8) (disk_block >> 48); 5422 cdb[4] = (u8) (disk_block >> 40); 5423 cdb[5] = (u8) (disk_block >> 32); 5424 cdb[6] = (u8) (disk_block >> 24); 5425 cdb[7] = (u8) (disk_block >> 16); 5426 cdb[8] = (u8) (disk_block >> 8); 5427 cdb[9] = (u8) (disk_block); 5428 cdb[10] = (u8) (disk_block_cnt >> 24); 5429 cdb[11] = (u8) (disk_block_cnt >> 16); 5430 cdb[12] = (u8) (disk_block_cnt >> 8); 5431 cdb[13] = (u8) (disk_block_cnt); 5432 cdb[14] = 0; 5433 cdb[15] = 0; 5434 cdb_len = 16; 5435 } else { 5436 cdb[0] = is_write ? WRITE_10 : READ_10; 5437 cdb[1] = 0; 5438 cdb[2] = (u8) (disk_block >> 24); 5439 cdb[3] = (u8) (disk_block >> 16); 5440 cdb[4] = (u8) (disk_block >> 8); 5441 cdb[5] = (u8) (disk_block); 5442 cdb[6] = 0; 5443 cdb[7] = (u8) (disk_block_cnt >> 8); 5444 cdb[8] = (u8) (disk_block_cnt); 5445 cdb[9] = 0; 5446 cdb_len = 10; 5447 } 5448 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 5449 dev->scsi3addr, 5450 dev->phys_disk[map_index]); 5451 } 5452 5453 /* 5454 * Submit commands down the "normal" RAID stack path 5455 * All callers to hpsa_ciss_submit must check lockup_detected 5456 * beforehand, before (opt.) and after calling cmd_alloc 5457 */ 5458 static int hpsa_ciss_submit(struct ctlr_info *h, 5459 struct CommandList *c, struct scsi_cmnd *cmd, 5460 struct hpsa_scsi_dev_t *dev) 5461 { 5462 cmd->host_scribble = (unsigned char *) c; 5463 c->cmd_type = CMD_SCSI; 5464 c->scsi_cmd = cmd; 5465 c->Header.ReplyQueue = 0; /* unused in simple mode */ 5466 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); 5467 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); 5468 5469 /* Fill in the request block... */ 5470 5471 c->Request.Timeout = 0; 5472 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 5473 c->Request.CDBLen = cmd->cmd_len; 5474 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 5475 switch (cmd->sc_data_direction) { 5476 case DMA_TO_DEVICE: 5477 c->Request.type_attr_dir = 5478 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); 5479 break; 5480 case DMA_FROM_DEVICE: 5481 c->Request.type_attr_dir = 5482 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); 5483 break; 5484 case DMA_NONE: 5485 c->Request.type_attr_dir = 5486 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); 5487 break; 5488 case DMA_BIDIRECTIONAL: 5489 /* This can happen if a buggy application does a scsi passthru 5490 * and sets both inlen and outlen to non-zero. ( see 5491 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 5492 */ 5493 5494 c->Request.type_attr_dir = 5495 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); 5496 /* This is technically wrong, and hpsa controllers should 5497 * reject it with CMD_INVALID, which is the most correct 5498 * response, but non-fibre backends appear to let it 5499 * slide by, and give the same results as if this field 5500 * were set correctly. Either way is acceptable for 5501 * our purposes here. 5502 */ 5503 5504 break; 5505 5506 default: 5507 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 5508 cmd->sc_data_direction); 5509 BUG(); 5510 break; 5511 } 5512 5513 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 5514 hpsa_cmd_resolve_and_free(h, c); 5515 return SCSI_MLQUEUE_HOST_BUSY; 5516 } 5517 5518 if (dev->in_reset) { 5519 hpsa_cmd_resolve_and_free(h, c); 5520 return SCSI_MLQUEUE_HOST_BUSY; 5521 } 5522 5523 c->device = dev; 5524 5525 enqueue_cmd_and_start_io(h, c); 5526 /* the cmd'll come back via intr handler in complete_scsi_command() */ 5527 return 0; 5528 } 5529 5530 static void hpsa_cmd_init(struct ctlr_info *h, int index, 5531 struct CommandList *c) 5532 { 5533 dma_addr_t cmd_dma_handle, err_dma_handle; 5534 5535 /* Zero out all of commandlist except the last field, refcount */ 5536 memset(c, 0, offsetof(struct CommandList, refcount)); 5537 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); 5538 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); 5539 c->err_info = h->errinfo_pool + index; 5540 memset(c->err_info, 0, sizeof(*c->err_info)); 5541 err_dma_handle = h->errinfo_pool_dhandle 5542 + index * sizeof(*c->err_info); 5543 c->cmdindex = index; 5544 c->busaddr = (u32) cmd_dma_handle; 5545 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); 5546 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); 5547 c->h = h; 5548 c->scsi_cmd = SCSI_CMD_IDLE; 5549 } 5550 5551 static void hpsa_preinitialize_commands(struct ctlr_info *h) 5552 { 5553 int i; 5554 5555 for (i = 0; i < h->nr_cmds; i++) { 5556 struct CommandList *c = h->cmd_pool + i; 5557 5558 hpsa_cmd_init(h, i, c); 5559 atomic_set(&c->refcount, 0); 5560 } 5561 } 5562 5563 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, 5564 struct CommandList *c) 5565 { 5566 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); 5567 5568 BUG_ON(c->cmdindex != index); 5569 5570 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 5571 memset(c->err_info, 0, sizeof(*c->err_info)); 5572 c->busaddr = (u32) cmd_dma_handle; 5573 } 5574 5575 static int hpsa_ioaccel_submit(struct ctlr_info *h, 5576 struct CommandList *c, struct scsi_cmnd *cmd, 5577 bool retry) 5578 { 5579 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 5580 int rc = IO_ACCEL_INELIGIBLE; 5581 5582 if (!dev) 5583 return SCSI_MLQUEUE_HOST_BUSY; 5584 5585 if (dev->in_reset) 5586 return SCSI_MLQUEUE_HOST_BUSY; 5587 5588 if (hpsa_simple_mode) 5589 return IO_ACCEL_INELIGIBLE; 5590 5591 cmd->host_scribble = (unsigned char *) c; 5592 5593 if (dev->offload_enabled) { 5594 hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ 5595 c->cmd_type = CMD_SCSI; 5596 c->scsi_cmd = cmd; 5597 c->device = dev; 5598 if (retry) /* Resubmit but do not increment device->commands_outstanding. */ 5599 c->retry_pending = true; 5600 rc = hpsa_scsi_ioaccel_raid_map(h, c); 5601 if (rc < 0) /* scsi_dma_map failed. */ 5602 rc = SCSI_MLQUEUE_HOST_BUSY; 5603 } else if (dev->hba_ioaccel_enabled) { 5604 hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ 5605 c->cmd_type = CMD_SCSI; 5606 c->scsi_cmd = cmd; 5607 c->device = dev; 5608 if (retry) /* Resubmit but do not increment device->commands_outstanding. */ 5609 c->retry_pending = true; 5610 rc = hpsa_scsi_ioaccel_direct_map(h, c); 5611 if (rc < 0) /* scsi_dma_map failed. */ 5612 rc = SCSI_MLQUEUE_HOST_BUSY; 5613 } 5614 return rc; 5615 } 5616 5617 static void hpsa_command_resubmit_worker(struct work_struct *work) 5618 { 5619 struct scsi_cmnd *cmd; 5620 struct hpsa_scsi_dev_t *dev; 5621 struct CommandList *c = container_of(work, struct CommandList, work); 5622 5623 cmd = c->scsi_cmd; 5624 dev = cmd->device->hostdata; 5625 if (!dev) { 5626 cmd->result = DID_NO_CONNECT << 16; 5627 return hpsa_cmd_free_and_done(c->h, c, cmd); 5628 } 5629 5630 if (dev->in_reset) { 5631 cmd->result = DID_RESET << 16; 5632 return hpsa_cmd_free_and_done(c->h, c, cmd); 5633 } 5634 5635 if (c->cmd_type == CMD_IOACCEL2) { 5636 struct ctlr_info *h = c->h; 5637 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5638 int rc; 5639 5640 if (c2->error_data.serv_response == 5641 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { 5642 /* Resubmit with the retry_pending flag set. */ 5643 rc = hpsa_ioaccel_submit(h, c, cmd, true); 5644 if (rc == 0) 5645 return; 5646 if (rc == SCSI_MLQUEUE_HOST_BUSY) { 5647 /* 5648 * If we get here, it means dma mapping failed. 5649 * Try again via scsi mid layer, which will 5650 * then get SCSI_MLQUEUE_HOST_BUSY. 5651 */ 5652 cmd->result = DID_IMM_RETRY << 16; 5653 return hpsa_cmd_free_and_done(h, c, cmd); 5654 } 5655 /* else, fall thru and resubmit down CISS path */ 5656 } 5657 } 5658 hpsa_cmd_partial_init(c->h, c->cmdindex, c); 5659 /* 5660 * Here we have not come in though queue_command, so we 5661 * can set the retry_pending flag to true for a driver initiated 5662 * retry attempt (I.E. not a SML retry). 5663 * I.E. We are submitting a driver initiated retry. 5664 * Note: hpsa_ciss_submit does not zero out the command fields like 5665 * ioaccel submit does. 5666 */ 5667 c->retry_pending = true; 5668 if (hpsa_ciss_submit(c->h, c, cmd, dev)) { 5669 /* 5670 * If we get here, it means dma mapping failed. Try 5671 * again via scsi mid layer, which will then get 5672 * SCSI_MLQUEUE_HOST_BUSY. 5673 * 5674 * hpsa_ciss_submit will have already freed c 5675 * if it encountered a dma mapping failure. 5676 */ 5677 cmd->result = DID_IMM_RETRY << 16; 5678 scsi_done(cmd); 5679 } 5680 } 5681 5682 /* Running in struct Scsi_Host->host_lock less mode */ 5683 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 5684 { 5685 struct ctlr_info *h; 5686 struct hpsa_scsi_dev_t *dev; 5687 struct CommandList *c; 5688 int rc = 0; 5689 5690 /* Get the ptr to our adapter structure out of cmd->host. */ 5691 h = sdev_to_hba(cmd->device); 5692 5693 BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); 5694 5695 dev = cmd->device->hostdata; 5696 if (!dev) { 5697 cmd->result = DID_NO_CONNECT << 16; 5698 scsi_done(cmd); 5699 return 0; 5700 } 5701 5702 if (dev->removed) { 5703 cmd->result = DID_NO_CONNECT << 16; 5704 scsi_done(cmd); 5705 return 0; 5706 } 5707 5708 if (unlikely(lockup_detected(h))) { 5709 cmd->result = DID_NO_CONNECT << 16; 5710 scsi_done(cmd); 5711 return 0; 5712 } 5713 5714 if (dev->in_reset) 5715 return SCSI_MLQUEUE_DEVICE_BUSY; 5716 5717 c = cmd_tagged_alloc(h, cmd); 5718 if (c == NULL) 5719 return SCSI_MLQUEUE_DEVICE_BUSY; 5720 5721 /* 5722 * This is necessary because the SML doesn't zero out this field during 5723 * error recovery. 5724 */ 5725 cmd->result = 0; 5726 5727 /* 5728 * Call alternate submit routine for I/O accelerated commands. 5729 * Retries always go down the normal I/O path. 5730 * Note: If cmd->retries is non-zero, then this is a SML 5731 * initiated retry and not a driver initiated retry. 5732 * This command has been obtained from cmd_tagged_alloc 5733 * and is therefore a brand-new command. 5734 */ 5735 if (likely(cmd->retries == 0 && 5736 !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && 5737 h->acciopath_status)) { 5738 /* Submit with the retry_pending flag unset. */ 5739 rc = hpsa_ioaccel_submit(h, c, cmd, false); 5740 if (rc == 0) 5741 return 0; 5742 if (rc == SCSI_MLQUEUE_HOST_BUSY) { 5743 hpsa_cmd_resolve_and_free(h, c); 5744 return SCSI_MLQUEUE_HOST_BUSY; 5745 } 5746 } 5747 return hpsa_ciss_submit(h, c, cmd, dev); 5748 } 5749 5750 static void hpsa_scan_complete(struct ctlr_info *h) 5751 { 5752 unsigned long flags; 5753 5754 spin_lock_irqsave(&h->scan_lock, flags); 5755 h->scan_finished = 1; 5756 wake_up(&h->scan_wait_queue); 5757 spin_unlock_irqrestore(&h->scan_lock, flags); 5758 } 5759 5760 static void hpsa_scan_start(struct Scsi_Host *sh) 5761 { 5762 struct ctlr_info *h = shost_to_hba(sh); 5763 unsigned long flags; 5764 5765 /* 5766 * Don't let rescans be initiated on a controller known to be locked 5767 * up. If the controller locks up *during* a rescan, that thread is 5768 * probably hosed, but at least we can prevent new rescan threads from 5769 * piling up on a locked up controller. 5770 */ 5771 if (unlikely(lockup_detected(h))) 5772 return hpsa_scan_complete(h); 5773 5774 /* 5775 * If a scan is already waiting to run, no need to add another 5776 */ 5777 spin_lock_irqsave(&h->scan_lock, flags); 5778 if (h->scan_waiting) { 5779 spin_unlock_irqrestore(&h->scan_lock, flags); 5780 return; 5781 } 5782 5783 spin_unlock_irqrestore(&h->scan_lock, flags); 5784 5785 /* wait until any scan already in progress is finished. */ 5786 while (1) { 5787 spin_lock_irqsave(&h->scan_lock, flags); 5788 if (h->scan_finished) 5789 break; 5790 h->scan_waiting = 1; 5791 spin_unlock_irqrestore(&h->scan_lock, flags); 5792 wait_event(h->scan_wait_queue, h->scan_finished); 5793 /* Note: We don't need to worry about a race between this 5794 * thread and driver unload because the midlayer will 5795 * have incremented the reference count, so unload won't 5796 * happen if we're in here. 5797 */ 5798 } 5799 h->scan_finished = 0; /* mark scan as in progress */ 5800 h->scan_waiting = 0; 5801 spin_unlock_irqrestore(&h->scan_lock, flags); 5802 5803 if (unlikely(lockup_detected(h))) 5804 return hpsa_scan_complete(h); 5805 5806 /* 5807 * Do the scan after a reset completion 5808 */ 5809 spin_lock_irqsave(&h->reset_lock, flags); 5810 if (h->reset_in_progress) { 5811 h->drv_req_rescan = 1; 5812 spin_unlock_irqrestore(&h->reset_lock, flags); 5813 hpsa_scan_complete(h); 5814 return; 5815 } 5816 spin_unlock_irqrestore(&h->reset_lock, flags); 5817 5818 hpsa_update_scsi_devices(h); 5819 5820 hpsa_scan_complete(h); 5821 } 5822 5823 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 5824 { 5825 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; 5826 5827 if (!logical_drive) 5828 return -ENODEV; 5829 5830 if (qdepth < 1) 5831 qdepth = 1; 5832 else if (qdepth > logical_drive->queue_depth) 5833 qdepth = logical_drive->queue_depth; 5834 5835 return scsi_change_queue_depth(sdev, qdepth); 5836 } 5837 5838 static int hpsa_scan_finished(struct Scsi_Host *sh, 5839 unsigned long elapsed_time) 5840 { 5841 struct ctlr_info *h = shost_to_hba(sh); 5842 unsigned long flags; 5843 int finished; 5844 5845 spin_lock_irqsave(&h->scan_lock, flags); 5846 finished = h->scan_finished; 5847 spin_unlock_irqrestore(&h->scan_lock, flags); 5848 return finished; 5849 } 5850 5851 static int hpsa_scsi_host_alloc(struct ctlr_info *h) 5852 { 5853 struct Scsi_Host *sh; 5854 5855 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *)); 5856 if (sh == NULL) { 5857 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); 5858 return -ENOMEM; 5859 } 5860 5861 sh->io_port = 0; 5862 sh->n_io_port = 0; 5863 sh->this_id = -1; 5864 sh->max_channel = 3; 5865 sh->max_cmd_len = MAX_COMMAND_SIZE; 5866 sh->max_lun = HPSA_MAX_LUN; 5867 sh->max_id = HPSA_MAX_LUN; 5868 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; 5869 sh->cmd_per_lun = sh->can_queue; 5870 sh->sg_tablesize = h->maxsgentries; 5871 sh->transportt = hpsa_sas_transport_template; 5872 sh->hostdata[0] = (unsigned long) h; 5873 sh->irq = pci_irq_vector(h->pdev, 0); 5874 sh->unique_id = sh->irq; 5875 5876 h->scsi_host = sh; 5877 return 0; 5878 } 5879 5880 static int hpsa_scsi_add_host(struct ctlr_info *h) 5881 { 5882 int rv; 5883 5884 rv = scsi_add_host(h->scsi_host, &h->pdev->dev); 5885 if (rv) { 5886 dev_err(&h->pdev->dev, "scsi_add_host failed\n"); 5887 return rv; 5888 } 5889 scsi_scan_host(h->scsi_host); 5890 return 0; 5891 } 5892 5893 /* 5894 * The block layer has already gone to the trouble of picking out a unique, 5895 * small-integer tag for this request. We use an offset from that value as 5896 * an index to select our command block. (The offset allows us to reserve the 5897 * low-numbered entries for our own uses.) 5898 */ 5899 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) 5900 { 5901 int idx = scsi_cmd_to_rq(scmd)->tag; 5902 5903 if (idx < 0) 5904 return idx; 5905 5906 /* Offset to leave space for internal cmds. */ 5907 return idx += HPSA_NRESERVED_CMDS; 5908 } 5909 5910 /* 5911 * Send a TEST_UNIT_READY command to the specified LUN using the specified 5912 * reply queue; returns zero if the unit is ready, and non-zero otherwise. 5913 */ 5914 static int hpsa_send_test_unit_ready(struct ctlr_info *h, 5915 struct CommandList *c, unsigned char lunaddr[], 5916 int reply_queue) 5917 { 5918 int rc; 5919 5920 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 5921 (void) fill_cmd(c, TEST_UNIT_READY, h, 5922 NULL, 0, 0, lunaddr, TYPE_CMD); 5923 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5924 if (rc) 5925 return rc; 5926 /* no unmap needed here because no data xfer. */ 5927 5928 /* Check if the unit is already ready. */ 5929 if (c->err_info->CommandStatus == CMD_SUCCESS) 5930 return 0; 5931 5932 /* 5933 * The first command sent after reset will receive "unit attention" to 5934 * indicate that the LUN has been reset...this is actually what we're 5935 * looking for (but, success is good too). 5936 */ 5937 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5938 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 5939 (c->err_info->SenseInfo[2] == NO_SENSE || 5940 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 5941 return 0; 5942 5943 return 1; 5944 } 5945 5946 /* 5947 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; 5948 * returns zero when the unit is ready, and non-zero when giving up. 5949 */ 5950 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, 5951 struct CommandList *c, 5952 unsigned char lunaddr[], int reply_queue) 5953 { 5954 int rc; 5955 int count = 0; 5956 int waittime = 1; /* seconds */ 5957 5958 /* Send test unit ready until device ready, or give up. */ 5959 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { 5960 5961 /* 5962 * Wait for a bit. do this first, because if we send 5963 * the TUR right away, the reset will just abort it. 5964 */ 5965 msleep(1000 * waittime); 5966 5967 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); 5968 if (!rc) 5969 break; 5970 5971 /* Increase wait time with each try, up to a point. */ 5972 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 5973 waittime *= 2; 5974 5975 dev_warn(&h->pdev->dev, 5976 "waiting %d secs for device to become ready.\n", 5977 waittime); 5978 } 5979 5980 return rc; 5981 } 5982 5983 static int wait_for_device_to_become_ready(struct ctlr_info *h, 5984 unsigned char lunaddr[], 5985 int reply_queue) 5986 { 5987 int first_queue; 5988 int last_queue; 5989 int rq; 5990 int rc = 0; 5991 struct CommandList *c; 5992 5993 c = cmd_alloc(h); 5994 5995 /* 5996 * If no specific reply queue was requested, then send the TUR 5997 * repeatedly, requesting a reply on each reply queue; otherwise execute 5998 * the loop exactly once using only the specified queue. 5999 */ 6000 if (reply_queue == DEFAULT_REPLY_QUEUE) { 6001 first_queue = 0; 6002 last_queue = h->nreply_queues - 1; 6003 } else { 6004 first_queue = reply_queue; 6005 last_queue = reply_queue; 6006 } 6007 6008 for (rq = first_queue; rq <= last_queue; rq++) { 6009 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); 6010 if (rc) 6011 break; 6012 } 6013 6014 if (rc) 6015 dev_warn(&h->pdev->dev, "giving up on device.\n"); 6016 else 6017 dev_warn(&h->pdev->dev, "device is ready.\n"); 6018 6019 cmd_free(h, c); 6020 return rc; 6021 } 6022 6023 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 6024 * complaining. Doing a host- or bus-reset can't do anything good here. 6025 */ 6026 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 6027 { 6028 int rc = SUCCESS; 6029 int i; 6030 struct ctlr_info *h; 6031 struct hpsa_scsi_dev_t *dev = NULL; 6032 u8 reset_type; 6033 char msg[48]; 6034 unsigned long flags; 6035 6036 /* find the controller to which the command to be aborted was sent */ 6037 h = sdev_to_hba(scsicmd->device); 6038 if (h == NULL) /* paranoia */ 6039 return FAILED; 6040 6041 spin_lock_irqsave(&h->reset_lock, flags); 6042 h->reset_in_progress = 1; 6043 spin_unlock_irqrestore(&h->reset_lock, flags); 6044 6045 if (lockup_detected(h)) { 6046 rc = FAILED; 6047 goto return_reset_status; 6048 } 6049 6050 dev = scsicmd->device->hostdata; 6051 if (!dev) { 6052 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); 6053 rc = FAILED; 6054 goto return_reset_status; 6055 } 6056 6057 if (dev->devtype == TYPE_ENCLOSURE) { 6058 rc = SUCCESS; 6059 goto return_reset_status; 6060 } 6061 6062 /* if controller locked up, we can guarantee command won't complete */ 6063 if (lockup_detected(h)) { 6064 snprintf(msg, sizeof(msg), 6065 "cmd %d RESET FAILED, lockup detected", 6066 hpsa_get_cmd_index(scsicmd)); 6067 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 6068 rc = FAILED; 6069 goto return_reset_status; 6070 } 6071 6072 /* this reset request might be the result of a lockup; check */ 6073 if (detect_controller_lockup(h)) { 6074 snprintf(msg, sizeof(msg), 6075 "cmd %d RESET FAILED, new lockup detected", 6076 hpsa_get_cmd_index(scsicmd)); 6077 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 6078 rc = FAILED; 6079 goto return_reset_status; 6080 } 6081 6082 /* Do not attempt on controller */ 6083 if (is_hba_lunid(dev->scsi3addr)) { 6084 rc = SUCCESS; 6085 goto return_reset_status; 6086 } 6087 6088 if (is_logical_dev_addr_mode(dev->scsi3addr)) 6089 reset_type = HPSA_DEVICE_RESET_MSG; 6090 else 6091 reset_type = HPSA_PHYS_TARGET_RESET; 6092 6093 sprintf(msg, "resetting %s", 6094 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); 6095 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 6096 6097 /* 6098 * wait to see if any commands will complete before sending reset 6099 */ 6100 dev->in_reset = true; /* block any new cmds from OS for this device */ 6101 for (i = 0; i < 10; i++) { 6102 if (atomic_read(&dev->commands_outstanding) > 0) 6103 msleep(1000); 6104 else 6105 break; 6106 } 6107 6108 /* send a reset to the SCSI LUN which the command was sent to */ 6109 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); 6110 if (rc == 0) 6111 rc = SUCCESS; 6112 else 6113 rc = FAILED; 6114 6115 sprintf(msg, "reset %s %s", 6116 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", 6117 rc == SUCCESS ? "completed successfully" : "failed"); 6118 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 6119 6120 return_reset_status: 6121 spin_lock_irqsave(&h->reset_lock, flags); 6122 h->reset_in_progress = 0; 6123 if (dev) 6124 dev->in_reset = false; 6125 spin_unlock_irqrestore(&h->reset_lock, flags); 6126 return rc; 6127 } 6128 6129 /* 6130 * For operations with an associated SCSI command, a command block is allocated 6131 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the 6132 * block request tag as an index into a table of entries. cmd_tagged_free() is 6133 * the complement, although cmd_free() may be called instead. 6134 * This function is only called for new requests from queue_command. 6135 */ 6136 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 6137 struct scsi_cmnd *scmd) 6138 { 6139 int idx = hpsa_get_cmd_index(scmd); 6140 struct CommandList *c = h->cmd_pool + idx; 6141 6142 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { 6143 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", 6144 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); 6145 /* The index value comes from the block layer, so if it's out of 6146 * bounds, it's probably not our bug. 6147 */ 6148 BUG(); 6149 } 6150 6151 if (unlikely(!hpsa_is_cmd_idle(c))) { 6152 /* 6153 * We expect that the SCSI layer will hand us a unique tag 6154 * value. Thus, there should never be a collision here between 6155 * two requests...because if the selected command isn't idle 6156 * then someone is going to be very disappointed. 6157 */ 6158 if (idx != h->last_collision_tag) { /* Print once per tag */ 6159 dev_warn(&h->pdev->dev, 6160 "%s: tag collision (tag=%d)\n", __func__, idx); 6161 if (scmd) 6162 scsi_print_command(scmd); 6163 h->last_collision_tag = idx; 6164 } 6165 return NULL; 6166 } 6167 6168 atomic_inc(&c->refcount); 6169 hpsa_cmd_partial_init(h, idx, c); 6170 6171 /* 6172 * This is a new command obtained from queue_command so 6173 * there have not been any driver initiated retry attempts. 6174 */ 6175 c->retry_pending = false; 6176 6177 return c; 6178 } 6179 6180 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) 6181 { 6182 /* 6183 * Release our reference to the block. We don't need to do anything 6184 * else to free it, because it is accessed by index. 6185 */ 6186 (void)atomic_dec(&c->refcount); 6187 } 6188 6189 /* 6190 * For operations that cannot sleep, a command block is allocated at init, 6191 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 6192 * which ones are free or in use. Lock must be held when calling this. 6193 * cmd_free() is the complement. 6194 * This function never gives up and returns NULL. If it hangs, 6195 * another thread must call cmd_free() to free some tags. 6196 */ 6197 6198 static struct CommandList *cmd_alloc(struct ctlr_info *h) 6199 { 6200 struct CommandList *c; 6201 int refcount, i; 6202 int offset = 0; 6203 6204 /* 6205 * There is some *extremely* small but non-zero chance that that 6206 * multiple threads could get in here, and one thread could 6207 * be scanning through the list of bits looking for a free 6208 * one, but the free ones are always behind him, and other 6209 * threads sneak in behind him and eat them before he can 6210 * get to them, so that while there is always a free one, a 6211 * very unlucky thread might be starved anyway, never able to 6212 * beat the other threads. In reality, this happens so 6213 * infrequently as to be indistinguishable from never. 6214 * 6215 * Note that we start allocating commands before the SCSI host structure 6216 * is initialized. Since the search starts at bit zero, this 6217 * all works, since we have at least one command structure available; 6218 * however, it means that the structures with the low indexes have to be 6219 * reserved for driver-initiated requests, while requests from the block 6220 * layer will use the higher indexes. 6221 */ 6222 6223 for (;;) { 6224 i = find_next_zero_bit(h->cmd_pool_bits, 6225 HPSA_NRESERVED_CMDS, 6226 offset); 6227 if (unlikely(i >= HPSA_NRESERVED_CMDS)) { 6228 offset = 0; 6229 continue; 6230 } 6231 c = h->cmd_pool + i; 6232 refcount = atomic_inc_return(&c->refcount); 6233 if (unlikely(refcount > 1)) { 6234 cmd_free(h, c); /* already in use */ 6235 offset = (i + 1) % HPSA_NRESERVED_CMDS; 6236 continue; 6237 } 6238 set_bit(i, h->cmd_pool_bits); 6239 break; /* it's ours now. */ 6240 } 6241 hpsa_cmd_partial_init(h, i, c); 6242 c->device = NULL; 6243 6244 /* 6245 * cmd_alloc is for "internal" commands and they are never 6246 * retried. 6247 */ 6248 c->retry_pending = false; 6249 6250 return c; 6251 } 6252 6253 /* 6254 * This is the complementary operation to cmd_alloc(). Note, however, in some 6255 * corner cases it may also be used to free blocks allocated by 6256 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and 6257 * the clear-bit is harmless. 6258 */ 6259 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 6260 { 6261 if (atomic_dec_and_test(&c->refcount)) { 6262 int i; 6263 6264 i = c - h->cmd_pool; 6265 clear_bit(i, h->cmd_pool_bits); 6266 } 6267 } 6268 6269 #ifdef CONFIG_COMPAT 6270 6271 static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, 6272 void __user *arg) 6273 { 6274 struct ctlr_info *h = sdev_to_hba(dev); 6275 IOCTL32_Command_struct __user *arg32 = arg; 6276 IOCTL_Command_struct arg64; 6277 int err; 6278 u32 cp; 6279 6280 if (!arg) 6281 return -EINVAL; 6282 6283 memset(&arg64, 0, sizeof(arg64)); 6284 if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) 6285 return -EFAULT; 6286 if (get_user(cp, &arg32->buf)) 6287 return -EFAULT; 6288 arg64.buf = compat_ptr(cp); 6289 6290 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6291 return -EAGAIN; 6292 err = hpsa_passthru_ioctl(h, &arg64); 6293 atomic_inc(&h->passthru_cmds_avail); 6294 if (err) 6295 return err; 6296 if (copy_to_user(&arg32->error_info, &arg64.error_info, 6297 sizeof(arg32->error_info))) 6298 return -EFAULT; 6299 return 0; 6300 } 6301 6302 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 6303 unsigned int cmd, void __user *arg) 6304 { 6305 struct ctlr_info *h = sdev_to_hba(dev); 6306 BIG_IOCTL32_Command_struct __user *arg32 = arg; 6307 BIG_IOCTL_Command_struct arg64; 6308 int err; 6309 u32 cp; 6310 6311 if (!arg) 6312 return -EINVAL; 6313 memset(&arg64, 0, sizeof(arg64)); 6314 if (copy_from_user(&arg64, arg32, 6315 offsetof(BIG_IOCTL32_Command_struct, buf))) 6316 return -EFAULT; 6317 if (get_user(cp, &arg32->buf)) 6318 return -EFAULT; 6319 arg64.buf = compat_ptr(cp); 6320 6321 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6322 return -EAGAIN; 6323 err = hpsa_big_passthru_ioctl(h, &arg64); 6324 atomic_inc(&h->passthru_cmds_avail); 6325 if (err) 6326 return err; 6327 if (copy_to_user(&arg32->error_info, &arg64.error_info, 6328 sizeof(arg32->error_info))) 6329 return -EFAULT; 6330 return 0; 6331 } 6332 6333 static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, 6334 void __user *arg) 6335 { 6336 switch (cmd) { 6337 case CCISS_GETPCIINFO: 6338 case CCISS_GETINTINFO: 6339 case CCISS_SETINTINFO: 6340 case CCISS_GETNODENAME: 6341 case CCISS_SETNODENAME: 6342 case CCISS_GETHEARTBEAT: 6343 case CCISS_GETBUSTYPES: 6344 case CCISS_GETFIRMVER: 6345 case CCISS_GETDRIVVER: 6346 case CCISS_REVALIDVOLS: 6347 case CCISS_DEREGDISK: 6348 case CCISS_REGNEWDISK: 6349 case CCISS_REGNEWD: 6350 case CCISS_RESCANDISK: 6351 case CCISS_GETLUNINFO: 6352 return hpsa_ioctl(dev, cmd, arg); 6353 6354 case CCISS_PASSTHRU32: 6355 return hpsa_ioctl32_passthru(dev, cmd, arg); 6356 case CCISS_BIG_PASSTHRU32: 6357 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 6358 6359 default: 6360 return -ENOIOCTLCMD; 6361 } 6362 } 6363 #endif 6364 6365 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 6366 { 6367 struct hpsa_pci_info pciinfo; 6368 6369 if (!argp) 6370 return -EINVAL; 6371 pciinfo.domain = pci_domain_nr(h->pdev->bus); 6372 pciinfo.bus = h->pdev->bus->number; 6373 pciinfo.dev_fn = h->pdev->devfn; 6374 pciinfo.board_id = h->board_id; 6375 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 6376 return -EFAULT; 6377 return 0; 6378 } 6379 6380 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 6381 { 6382 DriverVer_type DriverVer; 6383 unsigned char vmaj, vmin, vsubmin; 6384 int rc; 6385 6386 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 6387 &vmaj, &vmin, &vsubmin); 6388 if (rc != 3) { 6389 dev_info(&h->pdev->dev, "driver version string '%s' " 6390 "unrecognized.", HPSA_DRIVER_VERSION); 6391 vmaj = 0; 6392 vmin = 0; 6393 vsubmin = 0; 6394 } 6395 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 6396 if (!argp) 6397 return -EINVAL; 6398 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 6399 return -EFAULT; 6400 return 0; 6401 } 6402 6403 static int hpsa_passthru_ioctl(struct ctlr_info *h, 6404 IOCTL_Command_struct *iocommand) 6405 { 6406 struct CommandList *c; 6407 char *buff = NULL; 6408 u64 temp64; 6409 int rc = 0; 6410 6411 if (!capable(CAP_SYS_RAWIO)) 6412 return -EPERM; 6413 if ((iocommand->buf_size < 1) && 6414 (iocommand->Request.Type.Direction != XFER_NONE)) { 6415 return -EINVAL; 6416 } 6417 if (iocommand->buf_size > 0) { 6418 buff = kmalloc(iocommand->buf_size, GFP_KERNEL); 6419 if (buff == NULL) 6420 return -ENOMEM; 6421 if (iocommand->Request.Type.Direction & XFER_WRITE) { 6422 /* Copy the data into the buffer we created */ 6423 if (copy_from_user(buff, iocommand->buf, 6424 iocommand->buf_size)) { 6425 rc = -EFAULT; 6426 goto out_kfree; 6427 } 6428 } else { 6429 memset(buff, 0, iocommand->buf_size); 6430 } 6431 } 6432 c = cmd_alloc(h); 6433 6434 /* Fill in the command type */ 6435 c->cmd_type = CMD_IOCTL_PEND; 6436 c->scsi_cmd = SCSI_CMD_BUSY; 6437 /* Fill in Command Header */ 6438 c->Header.ReplyQueue = 0; /* unused in simple mode */ 6439 if (iocommand->buf_size > 0) { /* buffer to fill */ 6440 c->Header.SGList = 1; 6441 c->Header.SGTotal = cpu_to_le16(1); 6442 } else { /* no buffers to fill */ 6443 c->Header.SGList = 0; 6444 c->Header.SGTotal = cpu_to_le16(0); 6445 } 6446 memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); 6447 6448 /* Fill in Request block */ 6449 memcpy(&c->Request, &iocommand->Request, 6450 sizeof(c->Request)); 6451 6452 /* Fill in the scatter gather information */ 6453 if (iocommand->buf_size > 0) { 6454 temp64 = dma_map_single(&h->pdev->dev, buff, 6455 iocommand->buf_size, DMA_BIDIRECTIONAL); 6456 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 6457 c->SG[0].Addr = cpu_to_le64(0); 6458 c->SG[0].Len = cpu_to_le32(0); 6459 rc = -ENOMEM; 6460 goto out; 6461 } 6462 c->SG[0].Addr = cpu_to_le64(temp64); 6463 c->SG[0].Len = cpu_to_le32(iocommand->buf_size); 6464 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 6465 } 6466 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6467 NO_TIMEOUT); 6468 if (iocommand->buf_size > 0) 6469 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); 6470 check_ioctl_unit_attention(h, c); 6471 if (rc) { 6472 rc = -EIO; 6473 goto out; 6474 } 6475 6476 /* Copy the error information out */ 6477 memcpy(&iocommand->error_info, c->err_info, 6478 sizeof(iocommand->error_info)); 6479 if ((iocommand->Request.Type.Direction & XFER_READ) && 6480 iocommand->buf_size > 0) { 6481 /* Copy the data out of the buffer we created */ 6482 if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { 6483 rc = -EFAULT; 6484 goto out; 6485 } 6486 } 6487 out: 6488 cmd_free(h, c); 6489 out_kfree: 6490 kfree(buff); 6491 return rc; 6492 } 6493 6494 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, 6495 BIG_IOCTL_Command_struct *ioc) 6496 { 6497 struct CommandList *c; 6498 unsigned char **buff = NULL; 6499 int *buff_size = NULL; 6500 u64 temp64; 6501 BYTE sg_used = 0; 6502 int status = 0; 6503 u32 left; 6504 u32 sz; 6505 BYTE __user *data_ptr; 6506 6507 if (!capable(CAP_SYS_RAWIO)) 6508 return -EPERM; 6509 6510 if ((ioc->buf_size < 1) && 6511 (ioc->Request.Type.Direction != XFER_NONE)) 6512 return -EINVAL; 6513 /* Check kmalloc limits using all SGs */ 6514 if (ioc->malloc_size > MAX_KMALLOC_SIZE) 6515 return -EINVAL; 6516 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) 6517 return -EINVAL; 6518 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); 6519 if (!buff) { 6520 status = -ENOMEM; 6521 goto cleanup1; 6522 } 6523 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL); 6524 if (!buff_size) { 6525 status = -ENOMEM; 6526 goto cleanup1; 6527 } 6528 left = ioc->buf_size; 6529 data_ptr = ioc->buf; 6530 while (left) { 6531 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 6532 buff_size[sg_used] = sz; 6533 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 6534 if (buff[sg_used] == NULL) { 6535 status = -ENOMEM; 6536 goto cleanup1; 6537 } 6538 if (ioc->Request.Type.Direction & XFER_WRITE) { 6539 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 6540 status = -EFAULT; 6541 goto cleanup1; 6542 } 6543 } else 6544 memset(buff[sg_used], 0, sz); 6545 left -= sz; 6546 data_ptr += sz; 6547 sg_used++; 6548 } 6549 c = cmd_alloc(h); 6550 6551 c->cmd_type = CMD_IOCTL_PEND; 6552 c->scsi_cmd = SCSI_CMD_BUSY; 6553 c->Header.ReplyQueue = 0; 6554 c->Header.SGList = (u8) sg_used; 6555 c->Header.SGTotal = cpu_to_le16(sg_used); 6556 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 6557 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 6558 if (ioc->buf_size > 0) { 6559 int i; 6560 for (i = 0; i < sg_used; i++) { 6561 temp64 = dma_map_single(&h->pdev->dev, buff[i], 6562 buff_size[i], DMA_BIDIRECTIONAL); 6563 if (dma_mapping_error(&h->pdev->dev, 6564 (dma_addr_t) temp64)) { 6565 c->SG[i].Addr = cpu_to_le64(0); 6566 c->SG[i].Len = cpu_to_le32(0); 6567 hpsa_pci_unmap(h->pdev, c, i, 6568 DMA_BIDIRECTIONAL); 6569 status = -ENOMEM; 6570 goto cleanup0; 6571 } 6572 c->SG[i].Addr = cpu_to_le64(temp64); 6573 c->SG[i].Len = cpu_to_le32(buff_size[i]); 6574 c->SG[i].Ext = cpu_to_le32(0); 6575 } 6576 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 6577 } 6578 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6579 NO_TIMEOUT); 6580 if (sg_used) 6581 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); 6582 check_ioctl_unit_attention(h, c); 6583 if (status) { 6584 status = -EIO; 6585 goto cleanup0; 6586 } 6587 6588 /* Copy the error information out */ 6589 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 6590 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 6591 int i; 6592 6593 /* Copy the data out of the buffer we created */ 6594 BYTE __user *ptr = ioc->buf; 6595 for (i = 0; i < sg_used; i++) { 6596 if (copy_to_user(ptr, buff[i], buff_size[i])) { 6597 status = -EFAULT; 6598 goto cleanup0; 6599 } 6600 ptr += buff_size[i]; 6601 } 6602 } 6603 status = 0; 6604 cleanup0: 6605 cmd_free(h, c); 6606 cleanup1: 6607 if (buff) { 6608 int i; 6609 6610 for (i = 0; i < sg_used; i++) 6611 kfree(buff[i]); 6612 kfree(buff); 6613 } 6614 kfree(buff_size); 6615 return status; 6616 } 6617 6618 static void check_ioctl_unit_attention(struct ctlr_info *h, 6619 struct CommandList *c) 6620 { 6621 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 6622 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 6623 (void) check_for_unit_attention(h, c); 6624 } 6625 6626 /* 6627 * ioctl 6628 */ 6629 static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, 6630 void __user *argp) 6631 { 6632 struct ctlr_info *h = sdev_to_hba(dev); 6633 int rc; 6634 6635 switch (cmd) { 6636 case CCISS_DEREGDISK: 6637 case CCISS_REGNEWDISK: 6638 case CCISS_REGNEWD: 6639 hpsa_scan_start(h->scsi_host); 6640 return 0; 6641 case CCISS_GETPCIINFO: 6642 return hpsa_getpciinfo_ioctl(h, argp); 6643 case CCISS_GETDRIVVER: 6644 return hpsa_getdrivver_ioctl(h, argp); 6645 case CCISS_PASSTHRU: { 6646 IOCTL_Command_struct iocommand; 6647 6648 if (!argp) 6649 return -EINVAL; 6650 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 6651 return -EFAULT; 6652 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6653 return -EAGAIN; 6654 rc = hpsa_passthru_ioctl(h, &iocommand); 6655 atomic_inc(&h->passthru_cmds_avail); 6656 if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) 6657 rc = -EFAULT; 6658 return rc; 6659 } 6660 case CCISS_BIG_PASSTHRU: { 6661 BIG_IOCTL_Command_struct ioc; 6662 if (!argp) 6663 return -EINVAL; 6664 if (copy_from_user(&ioc, argp, sizeof(ioc))) 6665 return -EFAULT; 6666 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6667 return -EAGAIN; 6668 rc = hpsa_big_passthru_ioctl(h, &ioc); 6669 atomic_inc(&h->passthru_cmds_avail); 6670 if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) 6671 rc = -EFAULT; 6672 return rc; 6673 } 6674 default: 6675 return -ENOTTY; 6676 } 6677 } 6678 6679 static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) 6680 { 6681 struct CommandList *c; 6682 6683 c = cmd_alloc(h); 6684 6685 /* fill_cmd can't fail here, no data buffer to map */ 6686 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 6687 RAID_CTLR_LUNID, TYPE_MSG); 6688 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 6689 c->waiting = NULL; 6690 enqueue_cmd_and_start_io(h, c); 6691 /* Don't wait for completion, the reset won't complete. Don't free 6692 * the command either. This is the last command we will send before 6693 * re-initializing everything, so it doesn't matter and won't leak. 6694 */ 6695 return; 6696 } 6697 6698 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 6699 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 6700 int cmd_type) 6701 { 6702 enum dma_data_direction dir = DMA_NONE; 6703 6704 c->cmd_type = CMD_IOCTL_PEND; 6705 c->scsi_cmd = SCSI_CMD_BUSY; 6706 c->Header.ReplyQueue = 0; 6707 if (buff != NULL && size > 0) { 6708 c->Header.SGList = 1; 6709 c->Header.SGTotal = cpu_to_le16(1); 6710 } else { 6711 c->Header.SGList = 0; 6712 c->Header.SGTotal = cpu_to_le16(0); 6713 } 6714 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 6715 6716 if (cmd_type == TYPE_CMD) { 6717 switch (cmd) { 6718 case HPSA_INQUIRY: 6719 /* are we trying to read a vital product page */ 6720 if (page_code & VPD_PAGE) { 6721 c->Request.CDB[1] = 0x01; 6722 c->Request.CDB[2] = (page_code & 0xff); 6723 } 6724 c->Request.CDBLen = 6; 6725 c->Request.type_attr_dir = 6726 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6727 c->Request.Timeout = 0; 6728 c->Request.CDB[0] = HPSA_INQUIRY; 6729 c->Request.CDB[4] = size & 0xFF; 6730 break; 6731 case RECEIVE_DIAGNOSTIC: 6732 c->Request.CDBLen = 6; 6733 c->Request.type_attr_dir = 6734 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6735 c->Request.Timeout = 0; 6736 c->Request.CDB[0] = cmd; 6737 c->Request.CDB[1] = 1; 6738 c->Request.CDB[2] = 1; 6739 c->Request.CDB[3] = (size >> 8) & 0xFF; 6740 c->Request.CDB[4] = size & 0xFF; 6741 break; 6742 case HPSA_REPORT_LOG: 6743 case HPSA_REPORT_PHYS: 6744 /* Talking to controller so It's a physical command 6745 mode = 00 target = 0. Nothing to write. 6746 */ 6747 c->Request.CDBLen = 12; 6748 c->Request.type_attr_dir = 6749 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6750 c->Request.Timeout = 0; 6751 c->Request.CDB[0] = cmd; 6752 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 6753 c->Request.CDB[7] = (size >> 16) & 0xFF; 6754 c->Request.CDB[8] = (size >> 8) & 0xFF; 6755 c->Request.CDB[9] = size & 0xFF; 6756 break; 6757 case BMIC_SENSE_DIAG_OPTIONS: 6758 c->Request.CDBLen = 16; 6759 c->Request.type_attr_dir = 6760 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6761 c->Request.Timeout = 0; 6762 /* Spec says this should be BMIC_WRITE */ 6763 c->Request.CDB[0] = BMIC_READ; 6764 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; 6765 break; 6766 case BMIC_SET_DIAG_OPTIONS: 6767 c->Request.CDBLen = 16; 6768 c->Request.type_attr_dir = 6769 TYPE_ATTR_DIR(cmd_type, 6770 ATTR_SIMPLE, XFER_WRITE); 6771 c->Request.Timeout = 0; 6772 c->Request.CDB[0] = BMIC_WRITE; 6773 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; 6774 break; 6775 case HPSA_CACHE_FLUSH: 6776 c->Request.CDBLen = 12; 6777 c->Request.type_attr_dir = 6778 TYPE_ATTR_DIR(cmd_type, 6779 ATTR_SIMPLE, XFER_WRITE); 6780 c->Request.Timeout = 0; 6781 c->Request.CDB[0] = BMIC_WRITE; 6782 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 6783 c->Request.CDB[7] = (size >> 8) & 0xFF; 6784 c->Request.CDB[8] = size & 0xFF; 6785 break; 6786 case TEST_UNIT_READY: 6787 c->Request.CDBLen = 6; 6788 c->Request.type_attr_dir = 6789 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6790 c->Request.Timeout = 0; 6791 break; 6792 case HPSA_GET_RAID_MAP: 6793 c->Request.CDBLen = 12; 6794 c->Request.type_attr_dir = 6795 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6796 c->Request.Timeout = 0; 6797 c->Request.CDB[0] = HPSA_CISS_READ; 6798 c->Request.CDB[1] = cmd; 6799 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 6800 c->Request.CDB[7] = (size >> 16) & 0xFF; 6801 c->Request.CDB[8] = (size >> 8) & 0xFF; 6802 c->Request.CDB[9] = size & 0xFF; 6803 break; 6804 case BMIC_SENSE_CONTROLLER_PARAMETERS: 6805 c->Request.CDBLen = 10; 6806 c->Request.type_attr_dir = 6807 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6808 c->Request.Timeout = 0; 6809 c->Request.CDB[0] = BMIC_READ; 6810 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 6811 c->Request.CDB[7] = (size >> 16) & 0xFF; 6812 c->Request.CDB[8] = (size >> 8) & 0xFF; 6813 break; 6814 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 6815 c->Request.CDBLen = 10; 6816 c->Request.type_attr_dir = 6817 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6818 c->Request.Timeout = 0; 6819 c->Request.CDB[0] = BMIC_READ; 6820 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; 6821 c->Request.CDB[7] = (size >> 16) & 0xFF; 6822 c->Request.CDB[8] = (size >> 8) & 0XFF; 6823 break; 6824 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 6825 c->Request.CDBLen = 10; 6826 c->Request.type_attr_dir = 6827 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6828 c->Request.Timeout = 0; 6829 c->Request.CDB[0] = BMIC_READ; 6830 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; 6831 c->Request.CDB[7] = (size >> 16) & 0xFF; 6832 c->Request.CDB[8] = (size >> 8) & 0XFF; 6833 break; 6834 case BMIC_SENSE_STORAGE_BOX_PARAMS: 6835 c->Request.CDBLen = 10; 6836 c->Request.type_attr_dir = 6837 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6838 c->Request.Timeout = 0; 6839 c->Request.CDB[0] = BMIC_READ; 6840 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; 6841 c->Request.CDB[7] = (size >> 16) & 0xFF; 6842 c->Request.CDB[8] = (size >> 8) & 0XFF; 6843 break; 6844 case BMIC_IDENTIFY_CONTROLLER: 6845 c->Request.CDBLen = 10; 6846 c->Request.type_attr_dir = 6847 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6848 c->Request.Timeout = 0; 6849 c->Request.CDB[0] = BMIC_READ; 6850 c->Request.CDB[1] = 0; 6851 c->Request.CDB[2] = 0; 6852 c->Request.CDB[3] = 0; 6853 c->Request.CDB[4] = 0; 6854 c->Request.CDB[5] = 0; 6855 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; 6856 c->Request.CDB[7] = (size >> 16) & 0xFF; 6857 c->Request.CDB[8] = (size >> 8) & 0XFF; 6858 c->Request.CDB[9] = 0; 6859 break; 6860 default: 6861 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 6862 BUG(); 6863 } 6864 } else if (cmd_type == TYPE_MSG) { 6865 switch (cmd) { 6866 6867 case HPSA_PHYS_TARGET_RESET: 6868 c->Request.CDBLen = 16; 6869 c->Request.type_attr_dir = 6870 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6871 c->Request.Timeout = 0; /* Don't time out */ 6872 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 6873 c->Request.CDB[0] = HPSA_RESET; 6874 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; 6875 /* Physical target reset needs no control bytes 4-7*/ 6876 c->Request.CDB[4] = 0x00; 6877 c->Request.CDB[5] = 0x00; 6878 c->Request.CDB[6] = 0x00; 6879 c->Request.CDB[7] = 0x00; 6880 break; 6881 case HPSA_DEVICE_RESET_MSG: 6882 c->Request.CDBLen = 16; 6883 c->Request.type_attr_dir = 6884 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6885 c->Request.Timeout = 0; /* Don't time out */ 6886 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 6887 c->Request.CDB[0] = cmd; 6888 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 6889 /* If bytes 4-7 are zero, it means reset the */ 6890 /* LunID device */ 6891 c->Request.CDB[4] = 0x00; 6892 c->Request.CDB[5] = 0x00; 6893 c->Request.CDB[6] = 0x00; 6894 c->Request.CDB[7] = 0x00; 6895 break; 6896 default: 6897 dev_warn(&h->pdev->dev, "unknown message type %d\n", 6898 cmd); 6899 BUG(); 6900 } 6901 } else { 6902 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 6903 BUG(); 6904 } 6905 6906 switch (GET_DIR(c->Request.type_attr_dir)) { 6907 case XFER_READ: 6908 dir = DMA_FROM_DEVICE; 6909 break; 6910 case XFER_WRITE: 6911 dir = DMA_TO_DEVICE; 6912 break; 6913 case XFER_NONE: 6914 dir = DMA_NONE; 6915 break; 6916 default: 6917 dir = DMA_BIDIRECTIONAL; 6918 } 6919 if (hpsa_map_one(h->pdev, c, buff, size, dir)) 6920 return -1; 6921 return 0; 6922 } 6923 6924 /* 6925 * Map (physical) PCI mem into (virtual) kernel space 6926 */ 6927 static void __iomem *remap_pci_mem(ulong base, ulong size) 6928 { 6929 ulong page_base = ((ulong) base) & PAGE_MASK; 6930 ulong page_offs = ((ulong) base) - page_base; 6931 void __iomem *page_remapped = ioremap(page_base, 6932 page_offs + size); 6933 6934 return page_remapped ? (page_remapped + page_offs) : NULL; 6935 } 6936 6937 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 6938 { 6939 return h->access.command_completed(h, q); 6940 } 6941 6942 static inline bool interrupt_pending(struct ctlr_info *h) 6943 { 6944 return h->access.intr_pending(h); 6945 } 6946 6947 static inline long interrupt_not_for_us(struct ctlr_info *h) 6948 { 6949 return (h->access.intr_pending(h) == 0) || 6950 (h->interrupts_enabled == 0); 6951 } 6952 6953 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 6954 u32 raw_tag) 6955 { 6956 if (unlikely(tag_index >= h->nr_cmds)) { 6957 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 6958 return 1; 6959 } 6960 return 0; 6961 } 6962 6963 static inline void finish_cmd(struct CommandList *c) 6964 { 6965 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 6966 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 6967 || c->cmd_type == CMD_IOACCEL2)) 6968 complete_scsi_command(c); 6969 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) 6970 complete(c->waiting); 6971 } 6972 6973 /* process completion of an indexed ("direct lookup") command */ 6974 static inline void process_indexed_cmd(struct ctlr_info *h, 6975 u32 raw_tag) 6976 { 6977 u32 tag_index; 6978 struct CommandList *c; 6979 6980 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; 6981 if (!bad_tag(h, tag_index, raw_tag)) { 6982 c = h->cmd_pool + tag_index; 6983 finish_cmd(c); 6984 } 6985 } 6986 6987 /* Some controllers, like p400, will give us one interrupt 6988 * after a soft reset, even if we turned interrupts off. 6989 * Only need to check for this in the hpsa_xxx_discard_completions 6990 * functions. 6991 */ 6992 static int ignore_bogus_interrupt(struct ctlr_info *h) 6993 { 6994 if (likely(!reset_devices)) 6995 return 0; 6996 6997 if (likely(h->interrupts_enabled)) 6998 return 0; 6999 7000 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 7001 "(known firmware bug.) Ignoring.\n"); 7002 7003 return 1; 7004 } 7005 7006 /* 7007 * Convert &h->q[x] (passed to interrupt handlers) back to h. 7008 * Relies on (h-q[x] == x) being true for x such that 7009 * 0 <= x < MAX_REPLY_QUEUES. 7010 */ 7011 static struct ctlr_info *queue_to_hba(u8 *queue) 7012 { 7013 return container_of((queue - *queue), struct ctlr_info, q[0]); 7014 } 7015 7016 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 7017 { 7018 struct ctlr_info *h = queue_to_hba(queue); 7019 u8 q = *(u8 *) queue; 7020 u32 raw_tag; 7021 7022 if (ignore_bogus_interrupt(h)) 7023 return IRQ_NONE; 7024 7025 if (interrupt_not_for_us(h)) 7026 return IRQ_NONE; 7027 h->last_intr_timestamp = get_jiffies_64(); 7028 while (interrupt_pending(h)) { 7029 raw_tag = get_next_completion(h, q); 7030 while (raw_tag != FIFO_EMPTY) 7031 raw_tag = next_command(h, q); 7032 } 7033 return IRQ_HANDLED; 7034 } 7035 7036 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 7037 { 7038 struct ctlr_info *h = queue_to_hba(queue); 7039 u32 raw_tag; 7040 u8 q = *(u8 *) queue; 7041 7042 if (ignore_bogus_interrupt(h)) 7043 return IRQ_NONE; 7044 7045 h->last_intr_timestamp = get_jiffies_64(); 7046 raw_tag = get_next_completion(h, q); 7047 while (raw_tag != FIFO_EMPTY) 7048 raw_tag = next_command(h, q); 7049 return IRQ_HANDLED; 7050 } 7051 7052 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 7053 { 7054 struct ctlr_info *h = queue_to_hba((u8 *) queue); 7055 u32 raw_tag; 7056 u8 q = *(u8 *) queue; 7057 7058 if (interrupt_not_for_us(h)) 7059 return IRQ_NONE; 7060 h->last_intr_timestamp = get_jiffies_64(); 7061 while (interrupt_pending(h)) { 7062 raw_tag = get_next_completion(h, q); 7063 while (raw_tag != FIFO_EMPTY) { 7064 process_indexed_cmd(h, raw_tag); 7065 raw_tag = next_command(h, q); 7066 } 7067 } 7068 return IRQ_HANDLED; 7069 } 7070 7071 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 7072 { 7073 struct ctlr_info *h = queue_to_hba(queue); 7074 u32 raw_tag; 7075 u8 q = *(u8 *) queue; 7076 7077 h->last_intr_timestamp = get_jiffies_64(); 7078 raw_tag = get_next_completion(h, q); 7079 while (raw_tag != FIFO_EMPTY) { 7080 process_indexed_cmd(h, raw_tag); 7081 raw_tag = next_command(h, q); 7082 } 7083 return IRQ_HANDLED; 7084 } 7085 7086 /* Send a message CDB to the firmware. Careful, this only works 7087 * in simple mode, not performant mode due to the tag lookup. 7088 * We only ever use this immediately after a controller reset. 7089 */ 7090 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 7091 unsigned char type) 7092 { 7093 struct Command { 7094 struct CommandListHeader CommandHeader; 7095 struct RequestBlock Request; 7096 struct ErrDescriptor ErrorDescriptor; 7097 }; 7098 struct Command *cmd; 7099 static const size_t cmd_sz = sizeof(*cmd) + 7100 sizeof(cmd->ErrorDescriptor); 7101 dma_addr_t paddr64; 7102 __le32 paddr32; 7103 u32 tag; 7104 void __iomem *vaddr; 7105 int i, err; 7106 7107 vaddr = pci_ioremap_bar(pdev, 0); 7108 if (vaddr == NULL) 7109 return -ENOMEM; 7110 7111 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 7112 * CCISS commands, so they must be allocated from the lower 4GiB of 7113 * memory. 7114 */ 7115 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7116 if (err) { 7117 iounmap(vaddr); 7118 return err; 7119 } 7120 7121 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL); 7122 if (cmd == NULL) { 7123 iounmap(vaddr); 7124 return -ENOMEM; 7125 } 7126 7127 /* This must fit, because of the 32-bit consistent DMA mask. Also, 7128 * although there's no guarantee, we assume that the address is at 7129 * least 4-byte aligned (most likely, it's page-aligned). 7130 */ 7131 paddr32 = cpu_to_le32(paddr64); 7132 7133 cmd->CommandHeader.ReplyQueue = 0; 7134 cmd->CommandHeader.SGList = 0; 7135 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 7136 cmd->CommandHeader.tag = cpu_to_le64(paddr64); 7137 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 7138 7139 cmd->Request.CDBLen = 16; 7140 cmd->Request.type_attr_dir = 7141 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); 7142 cmd->Request.Timeout = 0; /* Don't time out */ 7143 cmd->Request.CDB[0] = opcode; 7144 cmd->Request.CDB[1] = type; 7145 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 7146 cmd->ErrorDescriptor.Addr = 7147 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); 7148 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 7149 7150 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); 7151 7152 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 7153 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 7154 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) 7155 break; 7156 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 7157 } 7158 7159 iounmap(vaddr); 7160 7161 /* we leak the DMA buffer here ... no choice since the controller could 7162 * still complete the command. 7163 */ 7164 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 7165 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 7166 opcode, type); 7167 return -ETIMEDOUT; 7168 } 7169 7170 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64); 7171 7172 if (tag & HPSA_ERROR_BIT) { 7173 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 7174 opcode, type); 7175 return -EIO; 7176 } 7177 7178 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 7179 opcode, type); 7180 return 0; 7181 } 7182 7183 #define hpsa_noop(p) hpsa_message(p, 3, 0) 7184 7185 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 7186 void __iomem *vaddr, u32 use_doorbell) 7187 { 7188 7189 if (use_doorbell) { 7190 /* For everything after the P600, the PCI power state method 7191 * of resetting the controller doesn't work, so we have this 7192 * other way using the doorbell register. 7193 */ 7194 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 7195 writel(use_doorbell, vaddr + SA5_DOORBELL); 7196 7197 /* PMC hardware guys tell us we need a 10 second delay after 7198 * doorbell reset and before any attempt to talk to the board 7199 * at all to ensure that this actually works and doesn't fall 7200 * over in some weird corner cases. 7201 */ 7202 msleep(10000); 7203 } else { /* Try to do it the PCI power state way */ 7204 7205 /* Quoting from the Open CISS Specification: "The Power 7206 * Management Control/Status Register (CSR) controls the power 7207 * state of the device. The normal operating state is D0, 7208 * CSR=00h. The software off state is D3, CSR=03h. To reset 7209 * the controller, place the interface device in D3 then to D0, 7210 * this causes a secondary PCI reset which will reset the 7211 * controller." */ 7212 7213 int rc = 0; 7214 7215 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 7216 7217 /* enter the D3hot power management state */ 7218 rc = pci_set_power_state(pdev, PCI_D3hot); 7219 if (rc) 7220 return rc; 7221 7222 msleep(500); 7223 7224 /* enter the D0 power management state */ 7225 rc = pci_set_power_state(pdev, PCI_D0); 7226 if (rc) 7227 return rc; 7228 7229 /* 7230 * The P600 requires a small delay when changing states. 7231 * Otherwise we may think the board did not reset and we bail. 7232 * This for kdump only and is particular to the P600. 7233 */ 7234 msleep(500); 7235 } 7236 return 0; 7237 } 7238 7239 static void init_driver_version(char *driver_version, int len) 7240 { 7241 memset(driver_version, 0, len); 7242 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 7243 } 7244 7245 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 7246 { 7247 char *driver_version; 7248 int i, size = sizeof(cfgtable->driver_version); 7249 7250 driver_version = kmalloc(size, GFP_KERNEL); 7251 if (!driver_version) 7252 return -ENOMEM; 7253 7254 init_driver_version(driver_version, size); 7255 for (i = 0; i < size; i++) 7256 writeb(driver_version[i], &cfgtable->driver_version[i]); 7257 kfree(driver_version); 7258 return 0; 7259 } 7260 7261 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 7262 unsigned char *driver_ver) 7263 { 7264 int i; 7265 7266 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 7267 driver_ver[i] = readb(&cfgtable->driver_version[i]); 7268 } 7269 7270 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 7271 { 7272 7273 char *driver_ver, *old_driver_ver; 7274 int rc, size = sizeof(cfgtable->driver_version); 7275 7276 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL); 7277 if (!old_driver_ver) 7278 return -ENOMEM; 7279 driver_ver = old_driver_ver + size; 7280 7281 /* After a reset, the 32 bytes of "driver version" in the cfgtable 7282 * should have been changed, otherwise we know the reset failed. 7283 */ 7284 init_driver_version(old_driver_ver, size); 7285 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 7286 rc = !memcmp(driver_ver, old_driver_ver, size); 7287 kfree(old_driver_ver); 7288 return rc; 7289 } 7290 /* This does a hard reset of the controller using PCI power management 7291 * states or the using the doorbell register. 7292 */ 7293 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) 7294 { 7295 u64 cfg_offset; 7296 u32 cfg_base_addr; 7297 u64 cfg_base_addr_index; 7298 void __iomem *vaddr; 7299 unsigned long paddr; 7300 u32 misc_fw_support; 7301 int rc; 7302 struct CfgTable __iomem *cfgtable; 7303 u32 use_doorbell; 7304 u16 command_register; 7305 7306 /* For controllers as old as the P600, this is very nearly 7307 * the same thing as 7308 * 7309 * pci_save_state(pci_dev); 7310 * pci_set_power_state(pci_dev, PCI_D3hot); 7311 * pci_set_power_state(pci_dev, PCI_D0); 7312 * pci_restore_state(pci_dev); 7313 * 7314 * For controllers newer than the P600, the pci power state 7315 * method of resetting doesn't work so we have another way 7316 * using the doorbell register. 7317 */ 7318 7319 if (!ctlr_is_resettable(board_id)) { 7320 dev_warn(&pdev->dev, "Controller not resettable\n"); 7321 return -ENODEV; 7322 } 7323 7324 /* if controller is soft- but not hard resettable... */ 7325 if (!ctlr_is_hard_resettable(board_id)) 7326 return -ENOTSUPP; /* try soft reset later. */ 7327 7328 /* Save the PCI command register */ 7329 pci_read_config_word(pdev, 4, &command_register); 7330 pci_save_state(pdev); 7331 7332 /* find the first memory BAR, so we can find the cfg table */ 7333 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 7334 if (rc) 7335 return rc; 7336 vaddr = remap_pci_mem(paddr, 0x250); 7337 if (!vaddr) 7338 return -ENOMEM; 7339 7340 /* find cfgtable in order to check if reset via doorbell is supported */ 7341 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 7342 &cfg_base_addr_index, &cfg_offset); 7343 if (rc) 7344 goto unmap_vaddr; 7345 cfgtable = remap_pci_mem(pci_resource_start(pdev, 7346 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 7347 if (!cfgtable) { 7348 rc = -ENOMEM; 7349 goto unmap_vaddr; 7350 } 7351 rc = write_driver_ver_to_cfgtable(cfgtable); 7352 if (rc) 7353 goto unmap_cfgtable; 7354 7355 /* If reset via doorbell register is supported, use that. 7356 * There are two such methods. Favor the newest method. 7357 */ 7358 misc_fw_support = readl(&cfgtable->misc_fw_support); 7359 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 7360 if (use_doorbell) { 7361 use_doorbell = DOORBELL_CTLR_RESET2; 7362 } else { 7363 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 7364 if (use_doorbell) { 7365 dev_warn(&pdev->dev, 7366 "Soft reset not supported. Firmware update is required.\n"); 7367 rc = -ENOTSUPP; /* try soft reset */ 7368 goto unmap_cfgtable; 7369 } 7370 } 7371 7372 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 7373 if (rc) 7374 goto unmap_cfgtable; 7375 7376 pci_restore_state(pdev); 7377 pci_write_config_word(pdev, 4, command_register); 7378 7379 /* Some devices (notably the HP Smart Array 5i Controller) 7380 need a little pause here */ 7381 msleep(HPSA_POST_RESET_PAUSE_MSECS); 7382 7383 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 7384 if (rc) { 7385 dev_warn(&pdev->dev, 7386 "Failed waiting for board to become ready after hard reset\n"); 7387 goto unmap_cfgtable; 7388 } 7389 7390 rc = controller_reset_failed(vaddr); 7391 if (rc < 0) 7392 goto unmap_cfgtable; 7393 if (rc) { 7394 dev_warn(&pdev->dev, "Unable to successfully reset " 7395 "controller. Will try soft reset.\n"); 7396 rc = -ENOTSUPP; 7397 } else { 7398 dev_info(&pdev->dev, "board ready after hard reset.\n"); 7399 } 7400 7401 unmap_cfgtable: 7402 iounmap(cfgtable); 7403 7404 unmap_vaddr: 7405 iounmap(vaddr); 7406 return rc; 7407 } 7408 7409 /* 7410 * We cannot read the structure directly, for portability we must use 7411 * the io functions. 7412 * This is for debug only. 7413 */ 7414 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) 7415 { 7416 #ifdef HPSA_DEBUG 7417 int i; 7418 char temp_name[17]; 7419 7420 dev_info(dev, "Controller Configuration information\n"); 7421 dev_info(dev, "------------------------------------\n"); 7422 for (i = 0; i < 4; i++) 7423 temp_name[i] = readb(&(tb->Signature[i])); 7424 temp_name[4] = '\0'; 7425 dev_info(dev, " Signature = %s\n", temp_name); 7426 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 7427 dev_info(dev, " Transport methods supported = 0x%x\n", 7428 readl(&(tb->TransportSupport))); 7429 dev_info(dev, " Transport methods active = 0x%x\n", 7430 readl(&(tb->TransportActive))); 7431 dev_info(dev, " Requested transport Method = 0x%x\n", 7432 readl(&(tb->HostWrite.TransportRequest))); 7433 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 7434 readl(&(tb->HostWrite.CoalIntDelay))); 7435 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 7436 readl(&(tb->HostWrite.CoalIntCount))); 7437 dev_info(dev, " Max outstanding commands = %d\n", 7438 readl(&(tb->CmdsOutMax))); 7439 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 7440 for (i = 0; i < 16; i++) 7441 temp_name[i] = readb(&(tb->ServerName[i])); 7442 temp_name[16] = '\0'; 7443 dev_info(dev, " Server Name = %s\n", temp_name); 7444 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 7445 readl(&(tb->HeartBeat))); 7446 #endif /* HPSA_DEBUG */ 7447 } 7448 7449 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 7450 { 7451 int i, offset, mem_type, bar_type; 7452 7453 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 7454 return 0; 7455 offset = 0; 7456 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 7457 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 7458 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 7459 offset += 4; 7460 else { 7461 mem_type = pci_resource_flags(pdev, i) & 7462 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 7463 switch (mem_type) { 7464 case PCI_BASE_ADDRESS_MEM_TYPE_32: 7465 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 7466 offset += 4; /* 32 bit */ 7467 break; 7468 case PCI_BASE_ADDRESS_MEM_TYPE_64: 7469 offset += 8; 7470 break; 7471 default: /* reserved in PCI 2.2 */ 7472 dev_warn(&pdev->dev, 7473 "base address is invalid\n"); 7474 return -1; 7475 } 7476 } 7477 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 7478 return i + 1; 7479 } 7480 return -1; 7481 } 7482 7483 static void hpsa_disable_interrupt_mode(struct ctlr_info *h) 7484 { 7485 pci_free_irq_vectors(h->pdev); 7486 h->msix_vectors = 0; 7487 } 7488 7489 static void hpsa_setup_reply_map(struct ctlr_info *h) 7490 { 7491 const struct cpumask *mask; 7492 unsigned int queue, cpu; 7493 7494 for (queue = 0; queue < h->msix_vectors; queue++) { 7495 mask = pci_irq_get_affinity(h->pdev, queue); 7496 if (!mask) 7497 goto fallback; 7498 7499 for_each_cpu(cpu, mask) 7500 h->reply_map[cpu] = queue; 7501 } 7502 return; 7503 7504 fallback: 7505 for_each_possible_cpu(cpu) 7506 h->reply_map[cpu] = 0; 7507 } 7508 7509 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 7510 * controllers that are capable. If not, we use legacy INTx mode. 7511 */ 7512 static int hpsa_interrupt_mode(struct ctlr_info *h) 7513 { 7514 unsigned int flags = PCI_IRQ_INTX; 7515 int ret; 7516 7517 /* Some boards advertise MSI but don't really support it */ 7518 switch (h->board_id) { 7519 case 0x40700E11: 7520 case 0x40800E11: 7521 case 0x40820E11: 7522 case 0x40830E11: 7523 break; 7524 default: 7525 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES, 7526 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 7527 if (ret > 0) { 7528 h->msix_vectors = ret; 7529 return 0; 7530 } 7531 7532 flags |= PCI_IRQ_MSI; 7533 break; 7534 } 7535 7536 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags); 7537 if (ret < 0) 7538 return ret; 7539 return 0; 7540 } 7541 7542 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, 7543 bool *legacy_board) 7544 { 7545 int i; 7546 u32 subsystem_vendor_id, subsystem_device_id; 7547 7548 subsystem_vendor_id = pdev->subsystem_vendor; 7549 subsystem_device_id = pdev->subsystem_device; 7550 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 7551 subsystem_vendor_id; 7552 7553 if (legacy_board) 7554 *legacy_board = false; 7555 for (i = 0; i < ARRAY_SIZE(products); i++) 7556 if (*board_id == products[i].board_id) { 7557 if (products[i].access != &SA5A_access && 7558 products[i].access != &SA5B_access) 7559 return i; 7560 dev_warn(&pdev->dev, 7561 "legacy board ID: 0x%08x\n", 7562 *board_id); 7563 if (legacy_board) 7564 *legacy_board = true; 7565 return i; 7566 } 7567 7568 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id); 7569 if (legacy_board) 7570 *legacy_board = true; 7571 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 7572 } 7573 7574 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 7575 unsigned long *memory_bar) 7576 { 7577 int i; 7578 7579 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 7580 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 7581 /* addressing mode bits already removed */ 7582 *memory_bar = pci_resource_start(pdev, i); 7583 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 7584 *memory_bar); 7585 return 0; 7586 } 7587 dev_warn(&pdev->dev, "no memory BAR found\n"); 7588 return -ENODEV; 7589 } 7590 7591 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 7592 int wait_for_ready) 7593 { 7594 int i, iterations; 7595 u32 scratchpad; 7596 if (wait_for_ready) 7597 iterations = HPSA_BOARD_READY_ITERATIONS; 7598 else 7599 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 7600 7601 for (i = 0; i < iterations; i++) { 7602 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 7603 if (wait_for_ready) { 7604 if (scratchpad == HPSA_FIRMWARE_READY) 7605 return 0; 7606 } else { 7607 if (scratchpad != HPSA_FIRMWARE_READY) 7608 return 0; 7609 } 7610 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 7611 } 7612 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 7613 return -ENODEV; 7614 } 7615 7616 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 7617 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 7618 u64 *cfg_offset) 7619 { 7620 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 7621 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 7622 *cfg_base_addr &= (u32) 0x0000ffff; 7623 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 7624 if (*cfg_base_addr_index == -1) { 7625 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 7626 return -ENODEV; 7627 } 7628 return 0; 7629 } 7630 7631 static void hpsa_free_cfgtables(struct ctlr_info *h) 7632 { 7633 if (h->transtable) { 7634 iounmap(h->transtable); 7635 h->transtable = NULL; 7636 } 7637 if (h->cfgtable) { 7638 iounmap(h->cfgtable); 7639 h->cfgtable = NULL; 7640 } 7641 } 7642 7643 /* Find and map CISS config table and transfer table 7644 + * several items must be unmapped (freed) later 7645 + * */ 7646 static int hpsa_find_cfgtables(struct ctlr_info *h) 7647 { 7648 u64 cfg_offset; 7649 u32 cfg_base_addr; 7650 u64 cfg_base_addr_index; 7651 u32 trans_offset; 7652 int rc; 7653 7654 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7655 &cfg_base_addr_index, &cfg_offset); 7656 if (rc) 7657 return rc; 7658 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 7659 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 7660 if (!h->cfgtable) { 7661 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); 7662 return -ENOMEM; 7663 } 7664 rc = write_driver_ver_to_cfgtable(h->cfgtable); 7665 if (rc) 7666 return rc; 7667 /* Find performant mode table. */ 7668 trans_offset = readl(&h->cfgtable->TransMethodOffset); 7669 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 7670 cfg_base_addr_index)+cfg_offset+trans_offset, 7671 sizeof(*h->transtable)); 7672 if (!h->transtable) { 7673 dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); 7674 hpsa_free_cfgtables(h); 7675 return -ENOMEM; 7676 } 7677 return 0; 7678 } 7679 7680 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 7681 { 7682 #define MIN_MAX_COMMANDS 16 7683 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); 7684 7685 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); 7686 7687 /* Limit commands in memory limited kdump scenario. */ 7688 if (reset_devices && h->max_commands > 32) 7689 h->max_commands = 32; 7690 7691 if (h->max_commands < MIN_MAX_COMMANDS) { 7692 dev_warn(&h->pdev->dev, 7693 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", 7694 h->max_commands, 7695 MIN_MAX_COMMANDS); 7696 h->max_commands = MIN_MAX_COMMANDS; 7697 } 7698 } 7699 7700 /* If the controller reports that the total max sg entries is greater than 512, 7701 * then we know that chained SG blocks work. (Original smart arrays did not 7702 * support chained SG blocks and would return zero for max sg entries.) 7703 */ 7704 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) 7705 { 7706 return h->maxsgentries > 512; 7707 } 7708 7709 /* Interrogate the hardware for some limits: 7710 * max commands, max SG elements without chaining, and with chaining, 7711 * SG chain block size, etc. 7712 */ 7713 static void hpsa_find_board_params(struct ctlr_info *h) 7714 { 7715 hpsa_get_max_perf_mode_cmds(h); 7716 h->nr_cmds = h->max_commands; 7717 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 7718 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 7719 if (hpsa_supports_chained_sg_blocks(h)) { 7720 /* Limit in-command s/g elements to 32 save dma'able memory. */ 7721 h->max_cmd_sg_entries = 32; 7722 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; 7723 h->maxsgentries--; /* save one for chain pointer */ 7724 } else { 7725 /* 7726 * Original smart arrays supported at most 31 s/g entries 7727 * embedded inline in the command (trying to use more 7728 * would lock up the controller) 7729 */ 7730 h->max_cmd_sg_entries = 31; 7731 h->maxsgentries = 31; /* default to traditional values */ 7732 h->chainsize = 0; 7733 } 7734 7735 /* Find out what task management functions are supported and cache */ 7736 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 7737 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 7738 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 7739 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 7740 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 7741 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) 7742 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); 7743 } 7744 7745 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 7746 { 7747 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 7748 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); 7749 return false; 7750 } 7751 return true; 7752 } 7753 7754 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 7755 { 7756 u32 driver_support; 7757 7758 driver_support = readl(&(h->cfgtable->driver_support)); 7759 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 7760 #ifdef CONFIG_X86 7761 driver_support |= ENABLE_SCSI_PREFETCH; 7762 #endif 7763 driver_support |= ENABLE_UNIT_ATTN; 7764 writel(driver_support, &(h->cfgtable->driver_support)); 7765 } 7766 7767 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 7768 * in a prefetch beyond physical memory. 7769 */ 7770 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 7771 { 7772 u32 dma_prefetch; 7773 7774 if (h->board_id != 0x3225103C) 7775 return; 7776 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 7777 dma_prefetch |= 0x8000; 7778 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 7779 } 7780 7781 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 7782 { 7783 int i; 7784 u32 doorbell_value; 7785 unsigned long flags; 7786 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 7787 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { 7788 spin_lock_irqsave(&h->lock, flags); 7789 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 7790 spin_unlock_irqrestore(&h->lock, flags); 7791 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 7792 goto done; 7793 /* delay and try again */ 7794 msleep(CLEAR_EVENT_WAIT_INTERVAL); 7795 } 7796 return -ENODEV; 7797 done: 7798 return 0; 7799 } 7800 7801 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 7802 { 7803 int i; 7804 u32 doorbell_value; 7805 unsigned long flags; 7806 7807 /* under certain very rare conditions, this can take awhile. 7808 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 7809 * as we enter this code.) 7810 */ 7811 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { 7812 if (h->remove_in_progress) 7813 goto done; 7814 spin_lock_irqsave(&h->lock, flags); 7815 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 7816 spin_unlock_irqrestore(&h->lock, flags); 7817 if (!(doorbell_value & CFGTBL_ChangeReq)) 7818 goto done; 7819 /* delay and try again */ 7820 msleep(MODE_CHANGE_WAIT_INTERVAL); 7821 } 7822 return -ENODEV; 7823 done: 7824 return 0; 7825 } 7826 7827 /* return -ENODEV or other reason on error, 0 on success */ 7828 static int hpsa_enter_simple_mode(struct ctlr_info *h) 7829 { 7830 u32 trans_support; 7831 7832 trans_support = readl(&(h->cfgtable->TransportSupport)); 7833 if (!(trans_support & SIMPLE_MODE)) 7834 return -ENOTSUPP; 7835 7836 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 7837 7838 /* Update the field, and then ring the doorbell */ 7839 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 7840 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7841 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7842 if (hpsa_wait_for_mode_change_ack(h)) 7843 goto error; 7844 print_cfg_table(&h->pdev->dev, h->cfgtable); 7845 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 7846 goto error; 7847 h->transMethod = CFGTBL_Trans_Simple; 7848 return 0; 7849 error: 7850 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); 7851 return -ENODEV; 7852 } 7853 7854 /* free items allocated or mapped by hpsa_pci_init */ 7855 static void hpsa_free_pci_init(struct ctlr_info *h) 7856 { 7857 hpsa_free_cfgtables(h); /* pci_init 4 */ 7858 iounmap(h->vaddr); /* pci_init 3 */ 7859 h->vaddr = NULL; 7860 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ 7861 /* 7862 * call pci_disable_device before pci_release_regions per 7863 * Documentation/driver-api/pci/pci.rst 7864 */ 7865 pci_disable_device(h->pdev); /* pci_init 1 */ 7866 pci_release_regions(h->pdev); /* pci_init 2 */ 7867 } 7868 7869 /* several items must be freed later */ 7870 static int hpsa_pci_init(struct ctlr_info *h) 7871 { 7872 int prod_index, err; 7873 bool legacy_board; 7874 7875 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); 7876 if (prod_index < 0) 7877 return prod_index; 7878 h->product_name = products[prod_index].product_name; 7879 h->access = *(products[prod_index].access); 7880 h->legacy_board = legacy_board; 7881 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 7882 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 7883 7884 err = pci_enable_device(h->pdev); 7885 if (err) { 7886 dev_err(&h->pdev->dev, "failed to enable PCI device\n"); 7887 pci_disable_device(h->pdev); 7888 return err; 7889 } 7890 7891 err = pci_request_regions(h->pdev, HPSA); 7892 if (err) { 7893 dev_err(&h->pdev->dev, 7894 "failed to obtain PCI resources\n"); 7895 pci_disable_device(h->pdev); 7896 return err; 7897 } 7898 7899 pci_set_master(h->pdev); 7900 7901 err = hpsa_interrupt_mode(h); 7902 if (err) 7903 goto clean1; 7904 7905 /* setup mapping between CPU and reply queue */ 7906 hpsa_setup_reply_map(h); 7907 7908 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 7909 if (err) 7910 goto clean2; /* intmode+region, pci */ 7911 h->vaddr = remap_pci_mem(h->paddr, 0x250); 7912 if (!h->vaddr) { 7913 dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); 7914 err = -ENOMEM; 7915 goto clean2; /* intmode+region, pci */ 7916 } 7917 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 7918 if (err) 7919 goto clean3; /* vaddr, intmode+region, pci */ 7920 err = hpsa_find_cfgtables(h); 7921 if (err) 7922 goto clean3; /* vaddr, intmode+region, pci */ 7923 hpsa_find_board_params(h); 7924 7925 if (!hpsa_CISS_signature_present(h)) { 7926 err = -ENODEV; 7927 goto clean4; /* cfgtables, vaddr, intmode+region, pci */ 7928 } 7929 hpsa_set_driver_support_bits(h); 7930 hpsa_p600_dma_prefetch_quirk(h); 7931 err = hpsa_enter_simple_mode(h); 7932 if (err) 7933 goto clean4; /* cfgtables, vaddr, intmode+region, pci */ 7934 return 0; 7935 7936 clean4: /* cfgtables, vaddr, intmode+region, pci */ 7937 hpsa_free_cfgtables(h); 7938 clean3: /* vaddr, intmode+region, pci */ 7939 iounmap(h->vaddr); 7940 h->vaddr = NULL; 7941 clean2: /* intmode+region, pci */ 7942 hpsa_disable_interrupt_mode(h); 7943 clean1: 7944 /* 7945 * call pci_disable_device before pci_release_regions per 7946 * Documentation/driver-api/pci/pci.rst 7947 */ 7948 pci_disable_device(h->pdev); 7949 pci_release_regions(h->pdev); 7950 return err; 7951 } 7952 7953 static void hpsa_hba_inquiry(struct ctlr_info *h) 7954 { 7955 int rc; 7956 7957 #define HBA_INQUIRY_BYTE_COUNT 64 7958 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 7959 if (!h->hba_inquiry_data) 7960 return; 7961 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 7962 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 7963 if (rc != 0) { 7964 kfree(h->hba_inquiry_data); 7965 h->hba_inquiry_data = NULL; 7966 } 7967 } 7968 7969 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) 7970 { 7971 int rc, i; 7972 void __iomem *vaddr; 7973 7974 if (!reset_devices) 7975 return 0; 7976 7977 /* kdump kernel is loading, we don't know in which state is 7978 * the pci interface. The dev->enable_cnt is equal zero 7979 * so we call enable+disable, wait a while and switch it on. 7980 */ 7981 rc = pci_enable_device(pdev); 7982 if (rc) { 7983 dev_warn(&pdev->dev, "Failed to enable PCI device\n"); 7984 return -ENODEV; 7985 } 7986 pci_disable_device(pdev); 7987 msleep(260); /* a randomly chosen number */ 7988 rc = pci_enable_device(pdev); 7989 if (rc) { 7990 dev_warn(&pdev->dev, "failed to enable device.\n"); 7991 return -ENODEV; 7992 } 7993 7994 pci_set_master(pdev); 7995 7996 vaddr = pci_ioremap_bar(pdev, 0); 7997 if (vaddr == NULL) { 7998 rc = -ENOMEM; 7999 goto out_disable; 8000 } 8001 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); 8002 iounmap(vaddr); 8003 8004 /* Reset the controller with a PCI power-cycle or via doorbell */ 8005 rc = hpsa_kdump_hard_reset_controller(pdev, board_id); 8006 8007 /* -ENOTSUPP here means we cannot reset the controller 8008 * but it's already (and still) up and running in 8009 * "performant mode". Or, it might be 640x, which can't reset 8010 * due to concerns about shared bbwc between 6402/6404 pair. 8011 */ 8012 if (rc) 8013 goto out_disable; 8014 8015 /* Now try to get the controller to respond to a no-op */ 8016 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); 8017 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 8018 if (hpsa_noop(pdev) == 0) 8019 break; 8020 else 8021 dev_warn(&pdev->dev, "no-op failed%s\n", 8022 (i < 11 ? "; re-trying" : "")); 8023 } 8024 8025 out_disable: 8026 8027 pci_disable_device(pdev); 8028 return rc; 8029 } 8030 8031 static void hpsa_free_cmd_pool(struct ctlr_info *h) 8032 { 8033 bitmap_free(h->cmd_pool_bits); 8034 h->cmd_pool_bits = NULL; 8035 if (h->cmd_pool) { 8036 dma_free_coherent(&h->pdev->dev, 8037 h->nr_cmds * sizeof(struct CommandList), 8038 h->cmd_pool, 8039 h->cmd_pool_dhandle); 8040 h->cmd_pool = NULL; 8041 h->cmd_pool_dhandle = 0; 8042 } 8043 if (h->errinfo_pool) { 8044 dma_free_coherent(&h->pdev->dev, 8045 h->nr_cmds * sizeof(struct ErrorInfo), 8046 h->errinfo_pool, 8047 h->errinfo_pool_dhandle); 8048 h->errinfo_pool = NULL; 8049 h->errinfo_pool_dhandle = 0; 8050 } 8051 } 8052 8053 static int hpsa_alloc_cmd_pool(struct ctlr_info *h) 8054 { 8055 h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL); 8056 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, 8057 h->nr_cmds * sizeof(*h->cmd_pool), 8058 &h->cmd_pool_dhandle, GFP_KERNEL); 8059 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, 8060 h->nr_cmds * sizeof(*h->errinfo_pool), 8061 &h->errinfo_pool_dhandle, GFP_KERNEL); 8062 if ((h->cmd_pool_bits == NULL) 8063 || (h->cmd_pool == NULL) 8064 || (h->errinfo_pool == NULL)) { 8065 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 8066 goto clean_up; 8067 } 8068 hpsa_preinitialize_commands(h); 8069 return 0; 8070 clean_up: 8071 hpsa_free_cmd_pool(h); 8072 return -ENOMEM; 8073 } 8074 8075 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 8076 static void hpsa_free_irqs(struct ctlr_info *h) 8077 { 8078 int i; 8079 int irq_vector = 0; 8080 8081 if (hpsa_simple_mode) 8082 irq_vector = h->intr_mode; 8083 8084 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { 8085 /* Single reply queue, only one irq to free */ 8086 free_irq(pci_irq_vector(h->pdev, irq_vector), 8087 &h->q[h->intr_mode]); 8088 h->q[h->intr_mode] = 0; 8089 return; 8090 } 8091 8092 for (i = 0; i < h->msix_vectors; i++) { 8093 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); 8094 h->q[i] = 0; 8095 } 8096 for (; i < MAX_REPLY_QUEUES; i++) 8097 h->q[i] = 0; 8098 } 8099 8100 /* returns 0 on success; cleans up and returns -Enn on error */ 8101 static int hpsa_request_irqs(struct ctlr_info *h, 8102 irqreturn_t (*msixhandler)(int, void *), 8103 irqreturn_t (*intxhandler)(int, void *)) 8104 { 8105 int rc, i; 8106 int irq_vector = 0; 8107 8108 if (hpsa_simple_mode) 8109 irq_vector = h->intr_mode; 8110 8111 /* 8112 * initialize h->q[x] = x so that interrupt handlers know which 8113 * queue to process. 8114 */ 8115 for (i = 0; i < MAX_REPLY_QUEUES; i++) 8116 h->q[i] = (u8) i; 8117 8118 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) { 8119 /* If performant mode and MSI-X, use multiple reply queues */ 8120 for (i = 0; i < h->msix_vectors; i++) { 8121 sprintf(h->intrname[i], "%s-msix%d", h->devname, i); 8122 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler, 8123 0, h->intrname[i], 8124 &h->q[i]); 8125 if (rc) { 8126 int j; 8127 8128 dev_err(&h->pdev->dev, 8129 "failed to get irq %d for %s\n", 8130 pci_irq_vector(h->pdev, i), h->devname); 8131 for (j = 0; j < i; j++) { 8132 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); 8133 h->q[j] = 0; 8134 } 8135 for (; j < MAX_REPLY_QUEUES; j++) 8136 h->q[j] = 0; 8137 return rc; 8138 } 8139 } 8140 } else { 8141 /* Use single reply pool */ 8142 if (h->msix_vectors > 0 || h->pdev->msi_enabled) { 8143 sprintf(h->intrname[0], "%s-msi%s", h->devname, 8144 h->msix_vectors ? "x" : ""); 8145 rc = request_irq(pci_irq_vector(h->pdev, irq_vector), 8146 msixhandler, 0, 8147 h->intrname[0], 8148 &h->q[h->intr_mode]); 8149 } else { 8150 sprintf(h->intrname[h->intr_mode], 8151 "%s-intx", h->devname); 8152 rc = request_irq(pci_irq_vector(h->pdev, irq_vector), 8153 intxhandler, IRQF_SHARED, 8154 h->intrname[0], 8155 &h->q[h->intr_mode]); 8156 } 8157 } 8158 if (rc) { 8159 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", 8160 pci_irq_vector(h->pdev, irq_vector), h->devname); 8161 hpsa_free_irqs(h); 8162 return -ENODEV; 8163 } 8164 return 0; 8165 } 8166 8167 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 8168 { 8169 int rc; 8170 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); 8171 8172 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 8173 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); 8174 if (rc) { 8175 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 8176 return rc; 8177 } 8178 8179 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 8180 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 8181 if (rc) { 8182 dev_warn(&h->pdev->dev, "Board failed to become ready " 8183 "after soft reset.\n"); 8184 return rc; 8185 } 8186 8187 return 0; 8188 } 8189 8190 static void hpsa_free_reply_queues(struct ctlr_info *h) 8191 { 8192 int i; 8193 8194 for (i = 0; i < h->nreply_queues; i++) { 8195 if (!h->reply_queue[i].head) 8196 continue; 8197 dma_free_coherent(&h->pdev->dev, 8198 h->reply_queue_size, 8199 h->reply_queue[i].head, 8200 h->reply_queue[i].busaddr); 8201 h->reply_queue[i].head = NULL; 8202 h->reply_queue[i].busaddr = 0; 8203 } 8204 h->reply_queue_size = 0; 8205 } 8206 8207 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 8208 { 8209 hpsa_free_performant_mode(h); /* init_one 7 */ 8210 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ 8211 hpsa_free_cmd_pool(h); /* init_one 5 */ 8212 hpsa_free_irqs(h); /* init_one 4 */ 8213 scsi_host_put(h->scsi_host); /* init_one 3 */ 8214 h->scsi_host = NULL; /* init_one 3 */ 8215 hpsa_free_pci_init(h); /* init_one 2_5 */ 8216 free_percpu(h->lockup_detected); /* init_one 2 */ 8217 h->lockup_detected = NULL; /* init_one 2 */ 8218 if (h->resubmit_wq) { 8219 destroy_workqueue(h->resubmit_wq); /* init_one 1 */ 8220 h->resubmit_wq = NULL; 8221 } 8222 if (h->rescan_ctlr_wq) { 8223 destroy_workqueue(h->rescan_ctlr_wq); 8224 h->rescan_ctlr_wq = NULL; 8225 } 8226 if (h->monitor_ctlr_wq) { 8227 destroy_workqueue(h->monitor_ctlr_wq); 8228 h->monitor_ctlr_wq = NULL; 8229 } 8230 8231 kfree(h); /* init_one 1 */ 8232 } 8233 8234 /* Called when controller lockup detected. */ 8235 static void fail_all_outstanding_cmds(struct ctlr_info *h) 8236 { 8237 int i, refcount; 8238 struct CommandList *c; 8239 int failcount = 0; 8240 8241 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ 8242 for (i = 0; i < h->nr_cmds; i++) { 8243 c = h->cmd_pool + i; 8244 refcount = atomic_inc_return(&c->refcount); 8245 if (refcount > 1) { 8246 c->err_info->CommandStatus = CMD_CTLR_LOCKUP; 8247 finish_cmd(c); 8248 atomic_dec(&h->commands_outstanding); 8249 failcount++; 8250 } 8251 cmd_free(h, c); 8252 } 8253 dev_warn(&h->pdev->dev, 8254 "failed %d commands in fail_all\n", failcount); 8255 } 8256 8257 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 8258 { 8259 int cpu; 8260 8261 for_each_online_cpu(cpu) { 8262 u32 *lockup_detected; 8263 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 8264 *lockup_detected = value; 8265 } 8266 wmb(); /* be sure the per-cpu variables are out to memory */ 8267 } 8268 8269 static void controller_lockup_detected(struct ctlr_info *h) 8270 { 8271 unsigned long flags; 8272 u32 lockup_detected; 8273 8274 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8275 spin_lock_irqsave(&h->lock, flags); 8276 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 8277 if (!lockup_detected) { 8278 /* no heartbeat, but controller gave us a zero. */ 8279 dev_warn(&h->pdev->dev, 8280 "lockup detected after %d but scratchpad register is zero\n", 8281 h->heartbeat_sample_interval / HZ); 8282 lockup_detected = 0xffffffff; 8283 } 8284 set_lockup_detected_for_all_cpus(h, lockup_detected); 8285 spin_unlock_irqrestore(&h->lock, flags); 8286 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", 8287 lockup_detected, h->heartbeat_sample_interval / HZ); 8288 if (lockup_detected == 0xffff0000) { 8289 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n"); 8290 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); 8291 } 8292 pci_disable_device(h->pdev); 8293 fail_all_outstanding_cmds(h); 8294 } 8295 8296 static int detect_controller_lockup(struct ctlr_info *h) 8297 { 8298 u64 now; 8299 u32 heartbeat; 8300 unsigned long flags; 8301 8302 now = get_jiffies_64(); 8303 /* If we've received an interrupt recently, we're ok. */ 8304 if (time_after64(h->last_intr_timestamp + 8305 (h->heartbeat_sample_interval), now)) 8306 return false; 8307 8308 /* 8309 * If we've already checked the heartbeat recently, we're ok. 8310 * This could happen if someone sends us a signal. We 8311 * otherwise don't care about signals in this thread. 8312 */ 8313 if (time_after64(h->last_heartbeat_timestamp + 8314 (h->heartbeat_sample_interval), now)) 8315 return false; 8316 8317 /* If heartbeat has not changed since we last looked, we're not ok. */ 8318 spin_lock_irqsave(&h->lock, flags); 8319 heartbeat = readl(&h->cfgtable->HeartBeat); 8320 spin_unlock_irqrestore(&h->lock, flags); 8321 if (h->last_heartbeat == heartbeat) { 8322 controller_lockup_detected(h); 8323 return true; 8324 } 8325 8326 /* We're ok. */ 8327 h->last_heartbeat = heartbeat; 8328 h->last_heartbeat_timestamp = now; 8329 return false; 8330 } 8331 8332 /* 8333 * Set ioaccel status for all ioaccel volumes. 8334 * 8335 * Called from monitor controller worker (hpsa_event_monitor_worker) 8336 * 8337 * A Volume (or Volumes that comprise an Array set) may be undergoing a 8338 * transformation, so we will be turning off ioaccel for all volumes that 8339 * make up the Array. 8340 */ 8341 static void hpsa_set_ioaccel_status(struct ctlr_info *h) 8342 { 8343 int rc; 8344 int i; 8345 u8 ioaccel_status; 8346 unsigned char *buf; 8347 struct hpsa_scsi_dev_t *device; 8348 8349 if (!h) 8350 return; 8351 8352 buf = kmalloc(64, GFP_KERNEL); 8353 if (!buf) 8354 return; 8355 8356 /* 8357 * Run through current device list used during I/O requests. 8358 */ 8359 for (i = 0; i < h->ndevices; i++) { 8360 int offload_to_be_enabled = 0; 8361 int offload_config = 0; 8362 8363 device = h->dev[i]; 8364 8365 if (!device) 8366 continue; 8367 if (!hpsa_vpd_page_supported(h, device->scsi3addr, 8368 HPSA_VPD_LV_IOACCEL_STATUS)) 8369 continue; 8370 8371 memset(buf, 0, 64); 8372 8373 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr, 8374 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, 8375 buf, 64); 8376 if (rc != 0) 8377 continue; 8378 8379 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 8380 8381 /* 8382 * Check if offload is still configured on 8383 */ 8384 offload_config = 8385 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 8386 /* 8387 * If offload is configured on, check to see if ioaccel 8388 * needs to be enabled. 8389 */ 8390 if (offload_config) 8391 offload_to_be_enabled = 8392 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 8393 8394 /* 8395 * If ioaccel is to be re-enabled, re-enable later during the 8396 * scan operation so the driver can get a fresh raidmap 8397 * before turning ioaccel back on. 8398 */ 8399 if (offload_to_be_enabled) 8400 continue; 8401 8402 /* 8403 * Immediately turn off ioaccel for any volume the 8404 * controller tells us to. Some of the reasons could be: 8405 * transformation - change to the LVs of an Array. 8406 * degraded volume - component failure 8407 */ 8408 hpsa_turn_off_ioaccel_for_device(device); 8409 } 8410 8411 kfree(buf); 8412 } 8413 8414 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 8415 { 8416 char *event_type; 8417 8418 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 8419 return; 8420 8421 /* Ask the controller to clear the events we're handling. */ 8422 if ((h->transMethod & (CFGTBL_Trans_io_accel1 8423 | CFGTBL_Trans_io_accel2)) && 8424 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 8425 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 8426 8427 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 8428 event_type = "state change"; 8429 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 8430 event_type = "configuration change"; 8431 /* Stop sending new RAID offload reqs via the IO accelerator */ 8432 scsi_block_requests(h->scsi_host); 8433 hpsa_set_ioaccel_status(h); 8434 hpsa_drain_accel_commands(h); 8435 /* Set 'accelerator path config change' bit */ 8436 dev_warn(&h->pdev->dev, 8437 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 8438 h->events, event_type); 8439 writel(h->events, &(h->cfgtable->clear_event_notify)); 8440 /* Set the "clear event notify field update" bit 6 */ 8441 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 8442 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 8443 hpsa_wait_for_clear_event_notify_ack(h); 8444 scsi_unblock_requests(h->scsi_host); 8445 } else { 8446 /* Acknowledge controller notification events. */ 8447 writel(h->events, &(h->cfgtable->clear_event_notify)); 8448 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 8449 hpsa_wait_for_clear_event_notify_ack(h); 8450 } 8451 return; 8452 } 8453 8454 /* Check a register on the controller to see if there are configuration 8455 * changes (added/changed/removed logical drives, etc.) which mean that 8456 * we should rescan the controller for devices. 8457 * Also check flag for driver-initiated rescan. 8458 */ 8459 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 8460 { 8461 if (h->drv_req_rescan) { 8462 h->drv_req_rescan = 0; 8463 return 1; 8464 } 8465 8466 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 8467 return 0; 8468 8469 h->events = readl(&(h->cfgtable->event_notify)); 8470 return h->events & RESCAN_REQUIRED_EVENT_BITS; 8471 } 8472 8473 /* 8474 * Check if any of the offline devices have become ready 8475 */ 8476 static int hpsa_offline_devices_ready(struct ctlr_info *h) 8477 { 8478 unsigned long flags; 8479 struct offline_device_entry *d; 8480 struct list_head *this, *tmp; 8481 8482 spin_lock_irqsave(&h->offline_device_lock, flags); 8483 list_for_each_safe(this, tmp, &h->offline_device_list) { 8484 d = list_entry(this, struct offline_device_entry, 8485 offline_list); 8486 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8487 if (!hpsa_volume_offline(h, d->scsi3addr)) { 8488 spin_lock_irqsave(&h->offline_device_lock, flags); 8489 list_del(&d->offline_list); 8490 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8491 return 1; 8492 } 8493 spin_lock_irqsave(&h->offline_device_lock, flags); 8494 } 8495 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8496 return 0; 8497 } 8498 8499 static int hpsa_luns_changed(struct ctlr_info *h) 8500 { 8501 int rc = 1; /* assume there are changes */ 8502 struct ReportLUNdata *logdev = NULL; 8503 8504 /* if we can't find out if lun data has changed, 8505 * assume that it has. 8506 */ 8507 8508 if (!h->lastlogicals) 8509 return rc; 8510 8511 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); 8512 if (!logdev) 8513 return rc; 8514 8515 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { 8516 dev_warn(&h->pdev->dev, 8517 "report luns failed, can't track lun changes.\n"); 8518 goto out; 8519 } 8520 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { 8521 dev_info(&h->pdev->dev, 8522 "Lun changes detected.\n"); 8523 memcpy(h->lastlogicals, logdev, sizeof(*logdev)); 8524 goto out; 8525 } else 8526 rc = 0; /* no changes detected. */ 8527 out: 8528 kfree(logdev); 8529 return rc; 8530 } 8531 8532 static void hpsa_perform_rescan(struct ctlr_info *h) 8533 { 8534 struct Scsi_Host *sh = NULL; 8535 unsigned long flags; 8536 8537 /* 8538 * Do the scan after the reset 8539 */ 8540 spin_lock_irqsave(&h->reset_lock, flags); 8541 if (h->reset_in_progress) { 8542 h->drv_req_rescan = 1; 8543 spin_unlock_irqrestore(&h->reset_lock, flags); 8544 return; 8545 } 8546 spin_unlock_irqrestore(&h->reset_lock, flags); 8547 8548 sh = scsi_host_get(h->scsi_host); 8549 if (sh != NULL) { 8550 hpsa_scan_start(sh); 8551 scsi_host_put(sh); 8552 h->drv_req_rescan = 0; 8553 } 8554 } 8555 8556 /* 8557 * watch for controller events 8558 */ 8559 static void hpsa_event_monitor_worker(struct work_struct *work) 8560 { 8561 struct ctlr_info *h = container_of(to_delayed_work(work), 8562 struct ctlr_info, event_monitor_work); 8563 unsigned long flags; 8564 8565 spin_lock_irqsave(&h->lock, flags); 8566 if (h->remove_in_progress) { 8567 spin_unlock_irqrestore(&h->lock, flags); 8568 return; 8569 } 8570 spin_unlock_irqrestore(&h->lock, flags); 8571 8572 if (hpsa_ctlr_needs_rescan(h)) { 8573 hpsa_ack_ctlr_events(h); 8574 hpsa_perform_rescan(h); 8575 } 8576 8577 spin_lock_irqsave(&h->lock, flags); 8578 if (!h->remove_in_progress) 8579 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, 8580 HPSA_EVENT_MONITOR_INTERVAL); 8581 spin_unlock_irqrestore(&h->lock, flags); 8582 } 8583 8584 static void hpsa_rescan_ctlr_worker(struct work_struct *work) 8585 { 8586 unsigned long flags; 8587 struct ctlr_info *h = container_of(to_delayed_work(work), 8588 struct ctlr_info, rescan_ctlr_work); 8589 8590 spin_lock_irqsave(&h->lock, flags); 8591 if (h->remove_in_progress) { 8592 spin_unlock_irqrestore(&h->lock, flags); 8593 return; 8594 } 8595 spin_unlock_irqrestore(&h->lock, flags); 8596 8597 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { 8598 hpsa_perform_rescan(h); 8599 } else if (h->discovery_polling) { 8600 if (hpsa_luns_changed(h)) { 8601 dev_info(&h->pdev->dev, 8602 "driver discovery polling rescan.\n"); 8603 hpsa_perform_rescan(h); 8604 } 8605 } 8606 spin_lock_irqsave(&h->lock, flags); 8607 if (!h->remove_in_progress) 8608 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8609 h->heartbeat_sample_interval); 8610 spin_unlock_irqrestore(&h->lock, flags); 8611 } 8612 8613 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 8614 { 8615 unsigned long flags; 8616 struct ctlr_info *h = container_of(to_delayed_work(work), 8617 struct ctlr_info, monitor_ctlr_work); 8618 8619 detect_controller_lockup(h); 8620 if (lockup_detected(h)) 8621 return; 8622 8623 spin_lock_irqsave(&h->lock, flags); 8624 if (!h->remove_in_progress) 8625 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, 8626 h->heartbeat_sample_interval); 8627 spin_unlock_irqrestore(&h->lock, flags); 8628 } 8629 8630 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, 8631 char *name) 8632 { 8633 struct workqueue_struct *wq = NULL; 8634 8635 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); 8636 if (!wq) 8637 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); 8638 8639 return wq; 8640 } 8641 8642 static void hpda_free_ctlr_info(struct ctlr_info *h) 8643 { 8644 kfree(h->reply_map); 8645 kfree(h); 8646 } 8647 8648 static struct ctlr_info *hpda_alloc_ctlr_info(void) 8649 { 8650 struct ctlr_info *h; 8651 8652 h = kzalloc(sizeof(*h), GFP_KERNEL); 8653 if (!h) 8654 return NULL; 8655 8656 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); 8657 if (!h->reply_map) { 8658 kfree(h); 8659 return NULL; 8660 } 8661 return h; 8662 } 8663 8664 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8665 { 8666 int rc; 8667 struct ctlr_info *h; 8668 int try_soft_reset = 0; 8669 unsigned long flags; 8670 u32 board_id; 8671 8672 if (number_of_controllers == 0) 8673 printk(KERN_INFO DRIVER_NAME "\n"); 8674 8675 rc = hpsa_lookup_board_id(pdev, &board_id, NULL); 8676 if (rc < 0) { 8677 dev_warn(&pdev->dev, "Board ID not found\n"); 8678 return rc; 8679 } 8680 8681 rc = hpsa_init_reset_devices(pdev, board_id); 8682 if (rc) { 8683 if (rc != -ENOTSUPP) 8684 return rc; 8685 /* If the reset fails in a particular way (it has no way to do 8686 * a proper hard reset, so returns -ENOTSUPP) we can try to do 8687 * a soft reset once we get the controller configured up to the 8688 * point that it can accept a command. 8689 */ 8690 try_soft_reset = 1; 8691 rc = 0; 8692 } 8693 8694 reinit_after_soft_reset: 8695 8696 /* Command structures must be aligned on a 32-byte boundary because 8697 * the 5 lower bits of the address are used by the hardware. and by 8698 * the driver. See comments in hpsa.h for more info. 8699 */ 8700 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 8701 h = hpda_alloc_ctlr_info(); 8702 if (!h) { 8703 dev_err(&pdev->dev, "Failed to allocate controller head\n"); 8704 return -ENOMEM; 8705 } 8706 8707 h->pdev = pdev; 8708 8709 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 8710 INIT_LIST_HEAD(&h->offline_device_list); 8711 spin_lock_init(&h->lock); 8712 spin_lock_init(&h->offline_device_lock); 8713 spin_lock_init(&h->scan_lock); 8714 spin_lock_init(&h->reset_lock); 8715 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); 8716 8717 /* Allocate and clear per-cpu variable lockup_detected */ 8718 h->lockup_detected = alloc_percpu(u32); 8719 if (!h->lockup_detected) { 8720 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); 8721 rc = -ENOMEM; 8722 goto clean1; /* aer/h */ 8723 } 8724 set_lockup_detected_for_all_cpus(h, 0); 8725 8726 rc = hpsa_pci_init(h); 8727 if (rc) 8728 goto clean2; /* lu, aer/h */ 8729 8730 /* relies on h-> settings made by hpsa_pci_init, including 8731 * interrupt_mode h->intr */ 8732 rc = hpsa_scsi_host_alloc(h); 8733 if (rc) 8734 goto clean2_5; /* pci, lu, aer/h */ 8735 8736 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); 8737 h->ctlr = number_of_controllers; 8738 number_of_controllers++; 8739 8740 /* configure PCI DMA stuff */ 8741 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 8742 if (rc != 0) { 8743 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 8744 if (rc != 0) { 8745 dev_err(&pdev->dev, "no suitable DMA available\n"); 8746 goto clean3; /* shost, pci, lu, aer/h */ 8747 } 8748 } 8749 8750 /* make sure the board interrupts are off */ 8751 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8752 8753 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); 8754 if (rc) 8755 goto clean3; /* shost, pci, lu, aer/h */ 8756 rc = hpsa_alloc_cmd_pool(h); 8757 if (rc) 8758 goto clean4; /* irq, shost, pci, lu, aer/h */ 8759 rc = hpsa_alloc_sg_chain_blocks(h); 8760 if (rc) 8761 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ 8762 init_waitqueue_head(&h->scan_wait_queue); 8763 init_waitqueue_head(&h->event_sync_wait_queue); 8764 mutex_init(&h->reset_mutex); 8765 h->scan_finished = 1; /* no scan currently in progress */ 8766 h->scan_waiting = 0; 8767 8768 pci_set_drvdata(pdev, h); 8769 h->ndevices = 0; 8770 8771 spin_lock_init(&h->devlock); 8772 rc = hpsa_put_ctlr_into_performant_mode(h); 8773 if (rc) 8774 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ 8775 8776 /* create the resubmit workqueue */ 8777 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); 8778 if (!h->rescan_ctlr_wq) { 8779 rc = -ENOMEM; 8780 goto clean7; 8781 } 8782 8783 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); 8784 if (!h->resubmit_wq) { 8785 rc = -ENOMEM; 8786 goto clean7; /* aer/h */ 8787 } 8788 8789 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); 8790 if (!h->monitor_ctlr_wq) { 8791 rc = -ENOMEM; 8792 goto clean7; 8793 } 8794 8795 /* 8796 * At this point, the controller is ready to take commands. 8797 * Now, if reset_devices and the hard reset didn't work, try 8798 * the soft reset and see if that works. 8799 */ 8800 if (try_soft_reset) { 8801 8802 /* This is kind of gross. We may or may not get a completion 8803 * from the soft reset command, and if we do, then the value 8804 * from the fifo may or may not be valid. So, we wait 10 secs 8805 * after the reset throwing away any completions we get during 8806 * that time. Unregister the interrupt handler and register 8807 * fake ones to scoop up any residual completions. 8808 */ 8809 spin_lock_irqsave(&h->lock, flags); 8810 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8811 spin_unlock_irqrestore(&h->lock, flags); 8812 hpsa_free_irqs(h); 8813 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, 8814 hpsa_intx_discard_completions); 8815 if (rc) { 8816 dev_warn(&h->pdev->dev, 8817 "Failed to request_irq after soft reset.\n"); 8818 /* 8819 * cannot goto clean7 or free_irqs will be called 8820 * again. Instead, do its work 8821 */ 8822 hpsa_free_performant_mode(h); /* clean7 */ 8823 hpsa_free_sg_chain_blocks(h); /* clean6 */ 8824 hpsa_free_cmd_pool(h); /* clean5 */ 8825 /* 8826 * skip hpsa_free_irqs(h) clean4 since that 8827 * was just called before request_irqs failed 8828 */ 8829 goto clean3; 8830 } 8831 8832 rc = hpsa_kdump_soft_reset(h); 8833 if (rc) 8834 /* Neither hard nor soft reset worked, we're hosed. */ 8835 goto clean7; 8836 8837 dev_info(&h->pdev->dev, "Board READY.\n"); 8838 dev_info(&h->pdev->dev, 8839 "Waiting for stale completions to drain.\n"); 8840 h->access.set_intr_mask(h, HPSA_INTR_ON); 8841 msleep(10000); 8842 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8843 8844 rc = controller_reset_failed(h->cfgtable); 8845 if (rc) 8846 dev_info(&h->pdev->dev, 8847 "Soft reset appears to have failed.\n"); 8848 8849 /* since the controller's reset, we have to go back and re-init 8850 * everything. Easiest to just forget what we've done and do it 8851 * all over again. 8852 */ 8853 hpsa_undo_allocations_after_kdump_soft_reset(h); 8854 try_soft_reset = 0; 8855 if (rc) 8856 /* don't goto clean, we already unallocated */ 8857 return -ENODEV; 8858 8859 goto reinit_after_soft_reset; 8860 } 8861 8862 /* Enable Accelerated IO path at driver layer */ 8863 h->acciopath_status = 1; 8864 /* Disable discovery polling.*/ 8865 h->discovery_polling = 0; 8866 8867 8868 /* Turn the interrupts on so we can service requests */ 8869 h->access.set_intr_mask(h, HPSA_INTR_ON); 8870 8871 hpsa_hba_inquiry(h); 8872 8873 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); 8874 if (!h->lastlogicals) 8875 dev_info(&h->pdev->dev, 8876 "Can't track change to report lun data\n"); 8877 8878 /* hook into SCSI subsystem */ 8879 rc = hpsa_scsi_add_host(h); 8880 if (rc) 8881 goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8882 8883 /* Monitor the controller for firmware lockups */ 8884 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8885 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 8886 schedule_delayed_work(&h->monitor_ctlr_work, 8887 h->heartbeat_sample_interval); 8888 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); 8889 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8890 h->heartbeat_sample_interval); 8891 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); 8892 schedule_delayed_work(&h->event_monitor_work, 8893 HPSA_EVENT_MONITOR_INTERVAL); 8894 return 0; 8895 8896 clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8897 kfree(h->lastlogicals); 8898 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8899 hpsa_free_performant_mode(h); 8900 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8901 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ 8902 hpsa_free_sg_chain_blocks(h); 8903 clean5: /* cmd, irq, shost, pci, lu, aer/h */ 8904 hpsa_free_cmd_pool(h); 8905 clean4: /* irq, shost, pci, lu, aer/h */ 8906 hpsa_free_irqs(h); 8907 clean3: /* shost, pci, lu, aer/h */ 8908 scsi_host_put(h->scsi_host); 8909 h->scsi_host = NULL; 8910 clean2_5: /* pci, lu, aer/h */ 8911 hpsa_free_pci_init(h); 8912 clean2: /* lu, aer/h */ 8913 if (h->lockup_detected) { 8914 free_percpu(h->lockup_detected); 8915 h->lockup_detected = NULL; 8916 } 8917 clean1: /* wq/aer/h */ 8918 if (h->resubmit_wq) { 8919 destroy_workqueue(h->resubmit_wq); 8920 h->resubmit_wq = NULL; 8921 } 8922 if (h->rescan_ctlr_wq) { 8923 destroy_workqueue(h->rescan_ctlr_wq); 8924 h->rescan_ctlr_wq = NULL; 8925 } 8926 if (h->monitor_ctlr_wq) { 8927 destroy_workqueue(h->monitor_ctlr_wq); 8928 h->monitor_ctlr_wq = NULL; 8929 } 8930 hpda_free_ctlr_info(h); 8931 return rc; 8932 } 8933 8934 static void hpsa_flush_cache(struct ctlr_info *h) 8935 { 8936 char *flush_buf; 8937 struct CommandList *c; 8938 int rc; 8939 8940 if (unlikely(lockup_detected(h))) 8941 return; 8942 flush_buf = kzalloc(4, GFP_KERNEL); 8943 if (!flush_buf) 8944 return; 8945 8946 c = cmd_alloc(h); 8947 8948 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 8949 RAID_CTLR_LUNID, TYPE_CMD)) { 8950 goto out; 8951 } 8952 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, 8953 DEFAULT_TIMEOUT); 8954 if (rc) 8955 goto out; 8956 if (c->err_info->CommandStatus != 0) 8957 out: 8958 dev_warn(&h->pdev->dev, 8959 "error flushing cache on controller\n"); 8960 cmd_free(h, c); 8961 kfree(flush_buf); 8962 } 8963 8964 /* Make controller gather fresh report lun data each time we 8965 * send down a report luns request 8966 */ 8967 static void hpsa_disable_rld_caching(struct ctlr_info *h) 8968 { 8969 u32 *options; 8970 struct CommandList *c; 8971 int rc; 8972 8973 /* Don't bother trying to set diag options if locked up */ 8974 if (unlikely(h->lockup_detected)) 8975 return; 8976 8977 options = kzalloc(sizeof(*options), GFP_KERNEL); 8978 if (!options) 8979 return; 8980 8981 c = cmd_alloc(h); 8982 8983 /* first, get the current diag options settings */ 8984 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, 8985 RAID_CTLR_LUNID, TYPE_CMD)) 8986 goto errout; 8987 8988 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 8989 NO_TIMEOUT); 8990 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8991 goto errout; 8992 8993 /* Now, set the bit for disabling the RLD caching */ 8994 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; 8995 8996 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, 8997 RAID_CTLR_LUNID, TYPE_CMD)) 8998 goto errout; 8999 9000 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, 9001 NO_TIMEOUT); 9002 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 9003 goto errout; 9004 9005 /* Now verify that it got set: */ 9006 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, 9007 RAID_CTLR_LUNID, TYPE_CMD)) 9008 goto errout; 9009 9010 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, 9011 NO_TIMEOUT); 9012 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 9013 goto errout; 9014 9015 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) 9016 goto out; 9017 9018 errout: 9019 dev_err(&h->pdev->dev, 9020 "Error: failed to disable report lun data caching.\n"); 9021 out: 9022 cmd_free(h, c); 9023 kfree(options); 9024 } 9025 9026 static void __hpsa_shutdown(struct pci_dev *pdev) 9027 { 9028 struct ctlr_info *h; 9029 9030 h = pci_get_drvdata(pdev); 9031 /* Turn board interrupts off and send the flush cache command 9032 * sendcmd will turn off interrupt, and send the flush... 9033 * To write all data in the battery backed cache to disks 9034 */ 9035 hpsa_flush_cache(h); 9036 h->access.set_intr_mask(h, HPSA_INTR_OFF); 9037 hpsa_free_irqs(h); /* init_one 4 */ 9038 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ 9039 } 9040 9041 static void hpsa_shutdown(struct pci_dev *pdev) 9042 { 9043 __hpsa_shutdown(pdev); 9044 pci_disable_device(pdev); 9045 } 9046 9047 static void hpsa_free_device_info(struct ctlr_info *h) 9048 { 9049 int i; 9050 9051 for (i = 0; i < h->ndevices; i++) { 9052 kfree(h->dev[i]); 9053 h->dev[i] = NULL; 9054 } 9055 } 9056 9057 static void hpsa_remove_one(struct pci_dev *pdev) 9058 { 9059 struct ctlr_info *h; 9060 unsigned long flags; 9061 9062 if (pci_get_drvdata(pdev) == NULL) { 9063 dev_err(&pdev->dev, "unable to remove device\n"); 9064 return; 9065 } 9066 h = pci_get_drvdata(pdev); 9067 9068 /* Get rid of any controller monitoring work items */ 9069 spin_lock_irqsave(&h->lock, flags); 9070 h->remove_in_progress = 1; 9071 spin_unlock_irqrestore(&h->lock, flags); 9072 cancel_delayed_work_sync(&h->monitor_ctlr_work); 9073 cancel_delayed_work_sync(&h->rescan_ctlr_work); 9074 cancel_delayed_work_sync(&h->event_monitor_work); 9075 destroy_workqueue(h->rescan_ctlr_wq); 9076 destroy_workqueue(h->resubmit_wq); 9077 destroy_workqueue(h->monitor_ctlr_wq); 9078 9079 hpsa_delete_sas_host(h); 9080 9081 /* 9082 * Call before disabling interrupts. 9083 * scsi_remove_host can trigger I/O operations especially 9084 * when multipath is enabled. There can be SYNCHRONIZE CACHE 9085 * operations which cannot complete and will hang the system. 9086 */ 9087 if (h->scsi_host) 9088 scsi_remove_host(h->scsi_host); /* init_one 8 */ 9089 /* includes hpsa_free_irqs - init_one 4 */ 9090 /* includes hpsa_disable_interrupt_mode - pci_init 2 */ 9091 __hpsa_shutdown(pdev); 9092 9093 hpsa_free_device_info(h); /* scan */ 9094 9095 kfree(h->hba_inquiry_data); /* init_one 10 */ 9096 h->hba_inquiry_data = NULL; /* init_one 10 */ 9097 hpsa_free_ioaccel2_sg_chain_blocks(h); 9098 hpsa_free_performant_mode(h); /* init_one 7 */ 9099 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ 9100 hpsa_free_cmd_pool(h); /* init_one 5 */ 9101 kfree(h->lastlogicals); 9102 9103 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ 9104 9105 scsi_host_put(h->scsi_host); /* init_one 3 */ 9106 h->scsi_host = NULL; /* init_one 3 */ 9107 9108 /* includes hpsa_disable_interrupt_mode - pci_init 2 */ 9109 hpsa_free_pci_init(h); /* init_one 2.5 */ 9110 9111 free_percpu(h->lockup_detected); /* init_one 2 */ 9112 h->lockup_detected = NULL; /* init_one 2 */ 9113 9114 hpda_free_ctlr_info(h); /* init_one 1 */ 9115 } 9116 9117 static int __maybe_unused hpsa_suspend( 9118 __attribute__((unused)) struct device *dev) 9119 { 9120 return -ENOSYS; 9121 } 9122 9123 static int __maybe_unused hpsa_resume 9124 (__attribute__((unused)) struct device *dev) 9125 { 9126 return -ENOSYS; 9127 } 9128 9129 static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume); 9130 9131 static struct pci_driver hpsa_pci_driver = { 9132 .name = HPSA, 9133 .probe = hpsa_init_one, 9134 .remove = hpsa_remove_one, 9135 .id_table = hpsa_pci_device_id, /* id_table */ 9136 .shutdown = hpsa_shutdown, 9137 .driver.pm = &hpsa_pm_ops, 9138 }; 9139 9140 /* Fill in bucket_map[], given nsgs (the max number of 9141 * scatter gather elements supported) and bucket[], 9142 * which is an array of 8 integers. The bucket[] array 9143 * contains 8 different DMA transfer sizes (in 16 9144 * byte increments) which the controller uses to fetch 9145 * commands. This function fills in bucket_map[], which 9146 * maps a given number of scatter gather elements to one of 9147 * the 8 DMA transfer sizes. The point of it is to allow the 9148 * controller to only do as much DMA as needed to fetch the 9149 * command, with the DMA transfer size encoded in the lower 9150 * bits of the command address. 9151 */ 9152 static void calc_bucket_map(int bucket[], int num_buckets, 9153 int nsgs, int min_blocks, u32 *bucket_map) 9154 { 9155 int i, j, b, size; 9156 9157 /* Note, bucket_map must have nsgs+1 entries. */ 9158 for (i = 0; i <= nsgs; i++) { 9159 /* Compute size of a command with i SG entries */ 9160 size = i + min_blocks; 9161 b = num_buckets; /* Assume the biggest bucket */ 9162 /* Find the bucket that is just big enough */ 9163 for (j = 0; j < num_buckets; j++) { 9164 if (bucket[j] >= size) { 9165 b = j; 9166 break; 9167 } 9168 } 9169 /* for a command with i SG entries, use bucket b. */ 9170 bucket_map[i] = b; 9171 } 9172 } 9173 9174 /* 9175 * return -ENODEV on err, 0 on success (or no action) 9176 * allocates numerous items that must be freed later 9177 */ 9178 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 9179 { 9180 int i; 9181 unsigned long register_value; 9182 unsigned long transMethod = CFGTBL_Trans_Performant | 9183 (trans_support & CFGTBL_Trans_use_short_tags) | 9184 CFGTBL_Trans_enable_directed_msix | 9185 (trans_support & (CFGTBL_Trans_io_accel1 | 9186 CFGTBL_Trans_io_accel2)); 9187 struct access_method access = SA5_performant_access; 9188 9189 /* This is a bit complicated. There are 8 registers on 9190 * the controller which we write to to tell it 8 different 9191 * sizes of commands which there may be. It's a way of 9192 * reducing the DMA done to fetch each command. Encoded into 9193 * each command's tag are 3 bits which communicate to the controller 9194 * which of the eight sizes that command fits within. The size of 9195 * each command depends on how many scatter gather entries there are. 9196 * Each SG entry requires 16 bytes. The eight registers are programmed 9197 * with the number of 16-byte blocks a command of that size requires. 9198 * The smallest command possible requires 5 such 16 byte blocks. 9199 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 9200 * blocks. Note, this only extends to the SG entries contained 9201 * within the command block, and does not extend to chained blocks 9202 * of SG elements. bft[] contains the eight values we write to 9203 * the registers. They are not evenly distributed, but have more 9204 * sizes for small commands, and fewer sizes for larger commands. 9205 */ 9206 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 9207 #define MIN_IOACCEL2_BFT_ENTRY 5 9208 #define HPSA_IOACCEL2_HEADER_SZ 4 9209 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 9210 13, 14, 15, 16, 17, 18, 19, 9211 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 9212 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 9213 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 9214 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 9215 16 * MIN_IOACCEL2_BFT_ENTRY); 9216 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 9217 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 9218 /* 5 = 1 s/g entry or 4k 9219 * 6 = 2 s/g entry or 8k 9220 * 8 = 4 s/g entry or 16k 9221 * 10 = 6 s/g entry or 24k 9222 */ 9223 9224 /* If the controller supports either ioaccel method then 9225 * we can also use the RAID stack submit path that does not 9226 * perform the superfluous readl() after each command submission. 9227 */ 9228 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 9229 access = SA5_performant_access_no_read; 9230 9231 /* Controller spec: zero out this buffer. */ 9232 for (i = 0; i < h->nreply_queues; i++) 9233 memset(h->reply_queue[i].head, 0, h->reply_queue_size); 9234 9235 bft[7] = SG_ENTRIES_IN_CMD + 4; 9236 calc_bucket_map(bft, ARRAY_SIZE(bft), 9237 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 9238 for (i = 0; i < 8; i++) 9239 writel(bft[i], &h->transtable->BlockFetch[i]); 9240 9241 /* size of controller ring buffer */ 9242 writel(h->max_commands, &h->transtable->RepQSize); 9243 writel(h->nreply_queues, &h->transtable->RepQCount); 9244 writel(0, &h->transtable->RepQCtrAddrLow32); 9245 writel(0, &h->transtable->RepQCtrAddrHigh32); 9246 9247 for (i = 0; i < h->nreply_queues; i++) { 9248 writel(0, &h->transtable->RepQAddr[i].upper); 9249 writel(h->reply_queue[i].busaddr, 9250 &h->transtable->RepQAddr[i].lower); 9251 } 9252 9253 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 9254 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 9255 /* 9256 * enable outbound interrupt coalescing in accelerator mode; 9257 */ 9258 if (trans_support & CFGTBL_Trans_io_accel1) { 9259 access = SA5_ioaccel_mode1_access; 9260 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 9261 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 9262 } else 9263 if (trans_support & CFGTBL_Trans_io_accel2) 9264 access = SA5_ioaccel_mode2_access; 9265 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 9266 if (hpsa_wait_for_mode_change_ack(h)) { 9267 dev_err(&h->pdev->dev, 9268 "performant mode problem - doorbell timeout\n"); 9269 return -ENODEV; 9270 } 9271 register_value = readl(&(h->cfgtable->TransportActive)); 9272 if (!(register_value & CFGTBL_Trans_Performant)) { 9273 dev_err(&h->pdev->dev, 9274 "performant mode problem - transport not active\n"); 9275 return -ENODEV; 9276 } 9277 /* Change the access methods to the performant access methods */ 9278 h->access = access; 9279 h->transMethod = transMethod; 9280 9281 if (!((trans_support & CFGTBL_Trans_io_accel1) || 9282 (trans_support & CFGTBL_Trans_io_accel2))) 9283 return 0; 9284 9285 if (trans_support & CFGTBL_Trans_io_accel1) { 9286 /* Set up I/O accelerator mode */ 9287 for (i = 0; i < h->nreply_queues; i++) { 9288 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 9289 h->reply_queue[i].current_entry = 9290 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 9291 } 9292 bft[7] = h->ioaccel_maxsg + 8; 9293 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 9294 h->ioaccel1_blockFetchTable); 9295 9296 /* initialize all reply queue entries to unused */ 9297 for (i = 0; i < h->nreply_queues; i++) 9298 memset(h->reply_queue[i].head, 9299 (u8) IOACCEL_MODE1_REPLY_UNUSED, 9300 h->reply_queue_size); 9301 9302 /* set all the constant fields in the accelerator command 9303 * frames once at init time to save CPU cycles later. 9304 */ 9305 for (i = 0; i < h->nr_cmds; i++) { 9306 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 9307 9308 cp->function = IOACCEL1_FUNCTION_SCSIIO; 9309 cp->err_info = (u32) (h->errinfo_pool_dhandle + 9310 (i * sizeof(struct ErrorInfo))); 9311 cp->err_info_len = sizeof(struct ErrorInfo); 9312 cp->sgl_offset = IOACCEL1_SGLOFFSET; 9313 cp->host_context_flags = 9314 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); 9315 cp->timeout_sec = 0; 9316 cp->ReplyQueue = 0; 9317 cp->tag = 9318 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); 9319 cp->host_addr = 9320 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + 9321 (i * sizeof(struct io_accel1_cmd))); 9322 } 9323 } else if (trans_support & CFGTBL_Trans_io_accel2) { 9324 u64 cfg_offset, cfg_base_addr_index; 9325 u32 bft2_offset, cfg_base_addr; 9326 9327 hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 9328 &cfg_base_addr_index, &cfg_offset); 9329 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 9330 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 9331 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 9332 4, h->ioaccel2_blockFetchTable); 9333 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 9334 BUILD_BUG_ON(offsetof(struct CfgTable, 9335 io_accel_request_size_offset) != 0xb8); 9336 h->ioaccel2_bft2_regs = 9337 remap_pci_mem(pci_resource_start(h->pdev, 9338 cfg_base_addr_index) + 9339 cfg_offset + bft2_offset, 9340 ARRAY_SIZE(bft2) * 9341 sizeof(*h->ioaccel2_bft2_regs)); 9342 for (i = 0; i < ARRAY_SIZE(bft2); i++) 9343 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 9344 } 9345 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 9346 if (hpsa_wait_for_mode_change_ack(h)) { 9347 dev_err(&h->pdev->dev, 9348 "performant mode problem - enabling ioaccel mode\n"); 9349 return -ENODEV; 9350 } 9351 return 0; 9352 } 9353 9354 /* Free ioaccel1 mode command blocks and block fetch table */ 9355 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) 9356 { 9357 if (h->ioaccel_cmd_pool) { 9358 dma_free_coherent(&h->pdev->dev, 9359 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 9360 h->ioaccel_cmd_pool, 9361 h->ioaccel_cmd_pool_dhandle); 9362 h->ioaccel_cmd_pool = NULL; 9363 h->ioaccel_cmd_pool_dhandle = 0; 9364 } 9365 kfree(h->ioaccel1_blockFetchTable); 9366 h->ioaccel1_blockFetchTable = NULL; 9367 } 9368 9369 /* Allocate ioaccel1 mode command blocks and block fetch table */ 9370 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) 9371 { 9372 h->ioaccel_maxsg = 9373 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 9374 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 9375 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 9376 9377 /* Command structures must be aligned on a 128-byte boundary 9378 * because the 7 lower bits of the address are used by the 9379 * hardware. 9380 */ 9381 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 9382 IOACCEL1_COMMANDLIST_ALIGNMENT); 9383 h->ioaccel_cmd_pool = 9384 dma_alloc_coherent(&h->pdev->dev, 9385 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 9386 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); 9387 9388 h->ioaccel1_blockFetchTable = 9389 kmalloc(((h->ioaccel_maxsg + 1) * 9390 sizeof(u32)), GFP_KERNEL); 9391 9392 if ((h->ioaccel_cmd_pool == NULL) || 9393 (h->ioaccel1_blockFetchTable == NULL)) 9394 goto clean_up; 9395 9396 memset(h->ioaccel_cmd_pool, 0, 9397 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 9398 return 0; 9399 9400 clean_up: 9401 hpsa_free_ioaccel1_cmd_and_bft(h); 9402 return -ENOMEM; 9403 } 9404 9405 /* Free ioaccel2 mode command blocks and block fetch table */ 9406 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) 9407 { 9408 hpsa_free_ioaccel2_sg_chain_blocks(h); 9409 9410 if (h->ioaccel2_cmd_pool) { 9411 dma_free_coherent(&h->pdev->dev, 9412 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 9413 h->ioaccel2_cmd_pool, 9414 h->ioaccel2_cmd_pool_dhandle); 9415 h->ioaccel2_cmd_pool = NULL; 9416 h->ioaccel2_cmd_pool_dhandle = 0; 9417 } 9418 kfree(h->ioaccel2_blockFetchTable); 9419 h->ioaccel2_blockFetchTable = NULL; 9420 } 9421 9422 /* Allocate ioaccel2 mode command blocks and block fetch table */ 9423 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) 9424 { 9425 int rc; 9426 9427 /* Allocate ioaccel2 mode command blocks and block fetch table */ 9428 9429 h->ioaccel_maxsg = 9430 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 9431 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 9432 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 9433 9434 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 9435 IOACCEL2_COMMANDLIST_ALIGNMENT); 9436 h->ioaccel2_cmd_pool = 9437 dma_alloc_coherent(&h->pdev->dev, 9438 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 9439 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); 9440 9441 h->ioaccel2_blockFetchTable = 9442 kmalloc(((h->ioaccel_maxsg + 1) * 9443 sizeof(u32)), GFP_KERNEL); 9444 9445 if ((h->ioaccel2_cmd_pool == NULL) || 9446 (h->ioaccel2_blockFetchTable == NULL)) { 9447 rc = -ENOMEM; 9448 goto clean_up; 9449 } 9450 9451 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); 9452 if (rc) 9453 goto clean_up; 9454 9455 memset(h->ioaccel2_cmd_pool, 0, 9456 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 9457 return 0; 9458 9459 clean_up: 9460 hpsa_free_ioaccel2_cmd_and_bft(h); 9461 return rc; 9462 } 9463 9464 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */ 9465 static void hpsa_free_performant_mode(struct ctlr_info *h) 9466 { 9467 kfree(h->blockFetchTable); 9468 h->blockFetchTable = NULL; 9469 hpsa_free_reply_queues(h); 9470 hpsa_free_ioaccel1_cmd_and_bft(h); 9471 hpsa_free_ioaccel2_cmd_and_bft(h); 9472 } 9473 9474 /* return -ENODEV on error, 0 on success (or no action) 9475 * allocates numerous items that must be freed later 9476 */ 9477 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 9478 { 9479 u32 trans_support; 9480 int i, rc; 9481 9482 if (hpsa_simple_mode) 9483 return 0; 9484 9485 trans_support = readl(&(h->cfgtable->TransportSupport)); 9486 if (!(trans_support & PERFORMANT_MODE)) 9487 return 0; 9488 9489 /* Check for I/O accelerator mode support */ 9490 if (trans_support & CFGTBL_Trans_io_accel1) { 9491 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); 9492 if (rc) 9493 return rc; 9494 } else if (trans_support & CFGTBL_Trans_io_accel2) { 9495 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); 9496 if (rc) 9497 return rc; 9498 } 9499 9500 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1; 9501 hpsa_get_max_perf_mode_cmds(h); 9502 /* Performant mode ring buffer and supporting data structures */ 9503 h->reply_queue_size = h->max_commands * sizeof(u64); 9504 9505 for (i = 0; i < h->nreply_queues; i++) { 9506 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, 9507 h->reply_queue_size, 9508 &h->reply_queue[i].busaddr, 9509 GFP_KERNEL); 9510 if (!h->reply_queue[i].head) { 9511 rc = -ENOMEM; 9512 goto clean1; /* rq, ioaccel */ 9513 } 9514 h->reply_queue[i].size = h->max_commands; 9515 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 9516 h->reply_queue[i].current_entry = 0; 9517 } 9518 9519 /* Need a block fetch table for performant mode */ 9520 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 9521 sizeof(u32)), GFP_KERNEL); 9522 if (!h->blockFetchTable) { 9523 rc = -ENOMEM; 9524 goto clean1; /* rq, ioaccel */ 9525 } 9526 9527 rc = hpsa_enter_performant_mode(h, trans_support); 9528 if (rc) 9529 goto clean2; /* bft, rq, ioaccel */ 9530 return 0; 9531 9532 clean2: /* bft, rq, ioaccel */ 9533 kfree(h->blockFetchTable); 9534 h->blockFetchTable = NULL; 9535 clean1: /* rq, ioaccel */ 9536 hpsa_free_reply_queues(h); 9537 hpsa_free_ioaccel1_cmd_and_bft(h); 9538 hpsa_free_ioaccel2_cmd_and_bft(h); 9539 return rc; 9540 } 9541 9542 static int is_accelerated_cmd(struct CommandList *c) 9543 { 9544 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 9545 } 9546 9547 static void hpsa_drain_accel_commands(struct ctlr_info *h) 9548 { 9549 struct CommandList *c = NULL; 9550 int i, accel_cmds_out; 9551 int refcount; 9552 9553 do { /* wait for all outstanding ioaccel commands to drain out */ 9554 accel_cmds_out = 0; 9555 for (i = 0; i < h->nr_cmds; i++) { 9556 c = h->cmd_pool + i; 9557 refcount = atomic_inc_return(&c->refcount); 9558 if (refcount > 1) /* Command is allocated */ 9559 accel_cmds_out += is_accelerated_cmd(c); 9560 cmd_free(h, c); 9561 } 9562 if (accel_cmds_out <= 0) 9563 break; 9564 msleep(100); 9565 } while (1); 9566 } 9567 9568 static struct hpsa_sas_phy *hpsa_alloc_sas_phy( 9569 struct hpsa_sas_port *hpsa_sas_port) 9570 { 9571 struct hpsa_sas_phy *hpsa_sas_phy; 9572 struct sas_phy *phy; 9573 9574 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); 9575 if (!hpsa_sas_phy) 9576 return NULL; 9577 9578 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, 9579 hpsa_sas_port->next_phy_index); 9580 if (!phy) { 9581 kfree(hpsa_sas_phy); 9582 return NULL; 9583 } 9584 9585 hpsa_sas_port->next_phy_index++; 9586 hpsa_sas_phy->phy = phy; 9587 hpsa_sas_phy->parent_port = hpsa_sas_port; 9588 9589 return hpsa_sas_phy; 9590 } 9591 9592 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) 9593 { 9594 struct sas_phy *phy = hpsa_sas_phy->phy; 9595 9596 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); 9597 if (hpsa_sas_phy->added_to_port) 9598 list_del(&hpsa_sas_phy->phy_list_entry); 9599 sas_phy_delete(phy); 9600 kfree(hpsa_sas_phy); 9601 } 9602 9603 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) 9604 { 9605 int rc; 9606 struct hpsa_sas_port *hpsa_sas_port; 9607 struct sas_phy *phy; 9608 struct sas_identify *identify; 9609 9610 hpsa_sas_port = hpsa_sas_phy->parent_port; 9611 phy = hpsa_sas_phy->phy; 9612 9613 identify = &phy->identify; 9614 memset(identify, 0, sizeof(*identify)); 9615 identify->sas_address = hpsa_sas_port->sas_address; 9616 identify->device_type = SAS_END_DEVICE; 9617 identify->initiator_port_protocols = SAS_PROTOCOL_STP; 9618 identify->target_port_protocols = SAS_PROTOCOL_STP; 9619 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; 9620 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; 9621 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; 9622 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; 9623 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 9624 9625 rc = sas_phy_add(hpsa_sas_phy->phy); 9626 if (rc) 9627 return rc; 9628 9629 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); 9630 list_add_tail(&hpsa_sas_phy->phy_list_entry, 9631 &hpsa_sas_port->phy_list_head); 9632 hpsa_sas_phy->added_to_port = true; 9633 9634 return 0; 9635 } 9636 9637 static int 9638 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, 9639 struct sas_rphy *rphy) 9640 { 9641 struct sas_identify *identify; 9642 9643 identify = &rphy->identify; 9644 identify->sas_address = hpsa_sas_port->sas_address; 9645 identify->initiator_port_protocols = SAS_PROTOCOL_STP; 9646 identify->target_port_protocols = SAS_PROTOCOL_STP; 9647 9648 return sas_rphy_add(rphy); 9649 } 9650 9651 static struct hpsa_sas_port 9652 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, 9653 u64 sas_address) 9654 { 9655 int rc; 9656 struct hpsa_sas_port *hpsa_sas_port; 9657 struct sas_port *port; 9658 9659 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); 9660 if (!hpsa_sas_port) 9661 return NULL; 9662 9663 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); 9664 hpsa_sas_port->parent_node = hpsa_sas_node; 9665 9666 port = sas_port_alloc_num(hpsa_sas_node->parent_dev); 9667 if (!port) 9668 goto free_hpsa_port; 9669 9670 rc = sas_port_add(port); 9671 if (rc) 9672 goto free_sas_port; 9673 9674 hpsa_sas_port->port = port; 9675 hpsa_sas_port->sas_address = sas_address; 9676 list_add_tail(&hpsa_sas_port->port_list_entry, 9677 &hpsa_sas_node->port_list_head); 9678 9679 return hpsa_sas_port; 9680 9681 free_sas_port: 9682 sas_port_free(port); 9683 free_hpsa_port: 9684 kfree(hpsa_sas_port); 9685 9686 return NULL; 9687 } 9688 9689 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) 9690 { 9691 struct hpsa_sas_phy *hpsa_sas_phy; 9692 struct hpsa_sas_phy *next; 9693 9694 list_for_each_entry_safe(hpsa_sas_phy, next, 9695 &hpsa_sas_port->phy_list_head, phy_list_entry) 9696 hpsa_free_sas_phy(hpsa_sas_phy); 9697 9698 sas_port_delete(hpsa_sas_port->port); 9699 list_del(&hpsa_sas_port->port_list_entry); 9700 kfree(hpsa_sas_port); 9701 } 9702 9703 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) 9704 { 9705 struct hpsa_sas_node *hpsa_sas_node; 9706 9707 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); 9708 if (hpsa_sas_node) { 9709 hpsa_sas_node->parent_dev = parent_dev; 9710 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); 9711 } 9712 9713 return hpsa_sas_node; 9714 } 9715 9716 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) 9717 { 9718 struct hpsa_sas_port *hpsa_sas_port; 9719 struct hpsa_sas_port *next; 9720 9721 if (!hpsa_sas_node) 9722 return; 9723 9724 list_for_each_entry_safe(hpsa_sas_port, next, 9725 &hpsa_sas_node->port_list_head, port_list_entry) 9726 hpsa_free_sas_port(hpsa_sas_port); 9727 9728 kfree(hpsa_sas_node); 9729 } 9730 9731 static struct hpsa_scsi_dev_t 9732 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, 9733 struct sas_rphy *rphy) 9734 { 9735 int i; 9736 struct hpsa_scsi_dev_t *device; 9737 9738 for (i = 0; i < h->ndevices; i++) { 9739 device = h->dev[i]; 9740 if (!device->sas_port) 9741 continue; 9742 if (device->sas_port->rphy == rphy) 9743 return device; 9744 } 9745 9746 return NULL; 9747 } 9748 9749 static int hpsa_add_sas_host(struct ctlr_info *h) 9750 { 9751 int rc; 9752 struct device *parent_dev; 9753 struct hpsa_sas_node *hpsa_sas_node; 9754 struct hpsa_sas_port *hpsa_sas_port; 9755 struct hpsa_sas_phy *hpsa_sas_phy; 9756 9757 parent_dev = &h->scsi_host->shost_dev; 9758 9759 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); 9760 if (!hpsa_sas_node) 9761 return -ENOMEM; 9762 9763 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); 9764 if (!hpsa_sas_port) { 9765 rc = -ENODEV; 9766 goto free_sas_node; 9767 } 9768 9769 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); 9770 if (!hpsa_sas_phy) { 9771 rc = -ENODEV; 9772 goto free_sas_port; 9773 } 9774 9775 rc = hpsa_sas_port_add_phy(hpsa_sas_phy); 9776 if (rc) 9777 goto free_sas_phy; 9778 9779 h->sas_host = hpsa_sas_node; 9780 9781 return 0; 9782 9783 free_sas_phy: 9784 sas_phy_free(hpsa_sas_phy->phy); 9785 kfree(hpsa_sas_phy); 9786 free_sas_port: 9787 hpsa_free_sas_port(hpsa_sas_port); 9788 free_sas_node: 9789 hpsa_free_sas_node(hpsa_sas_node); 9790 9791 return rc; 9792 } 9793 9794 static void hpsa_delete_sas_host(struct ctlr_info *h) 9795 { 9796 hpsa_free_sas_node(h->sas_host); 9797 } 9798 9799 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, 9800 struct hpsa_scsi_dev_t *device) 9801 { 9802 int rc; 9803 struct hpsa_sas_port *hpsa_sas_port; 9804 struct sas_rphy *rphy; 9805 9806 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); 9807 if (!hpsa_sas_port) 9808 return -ENOMEM; 9809 9810 rphy = sas_end_device_alloc(hpsa_sas_port->port); 9811 if (!rphy) { 9812 rc = -ENODEV; 9813 goto free_sas_port; 9814 } 9815 9816 hpsa_sas_port->rphy = rphy; 9817 device->sas_port = hpsa_sas_port; 9818 9819 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); 9820 if (rc) 9821 goto free_sas_rphy; 9822 9823 return 0; 9824 9825 free_sas_rphy: 9826 sas_rphy_free(rphy); 9827 free_sas_port: 9828 hpsa_free_sas_port(hpsa_sas_port); 9829 device->sas_port = NULL; 9830 9831 return rc; 9832 } 9833 9834 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) 9835 { 9836 if (device->sas_port) { 9837 hpsa_free_sas_port(device->sas_port); 9838 device->sas_port = NULL; 9839 } 9840 } 9841 9842 static int 9843 hpsa_sas_get_linkerrors(struct sas_phy *phy) 9844 { 9845 return 0; 9846 } 9847 9848 static int 9849 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 9850 { 9851 struct Scsi_Host *shost = phy_to_shost(rphy); 9852 struct ctlr_info *h; 9853 struct hpsa_scsi_dev_t *sd; 9854 9855 if (!shost) 9856 return -ENXIO; 9857 9858 h = shost_to_hba(shost); 9859 9860 if (!h) 9861 return -ENXIO; 9862 9863 sd = hpsa_find_device_by_sas_rphy(h, rphy); 9864 if (!sd) 9865 return -ENXIO; 9866 9867 *identifier = sd->eli; 9868 9869 return 0; 9870 } 9871 9872 static int 9873 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) 9874 { 9875 return -ENXIO; 9876 } 9877 9878 static int 9879 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) 9880 { 9881 return 0; 9882 } 9883 9884 static int 9885 hpsa_sas_phy_enable(struct sas_phy *phy, int enable) 9886 { 9887 return 0; 9888 } 9889 9890 static int 9891 hpsa_sas_phy_setup(struct sas_phy *phy) 9892 { 9893 return 0; 9894 } 9895 9896 static void 9897 hpsa_sas_phy_release(struct sas_phy *phy) 9898 { 9899 } 9900 9901 static int 9902 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) 9903 { 9904 return -EINVAL; 9905 } 9906 9907 static struct sas_function_template hpsa_sas_transport_functions = { 9908 .get_linkerrors = hpsa_sas_get_linkerrors, 9909 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, 9910 .get_bay_identifier = hpsa_sas_get_bay_identifier, 9911 .phy_reset = hpsa_sas_phy_reset, 9912 .phy_enable = hpsa_sas_phy_enable, 9913 .phy_setup = hpsa_sas_phy_setup, 9914 .phy_release = hpsa_sas_phy_release, 9915 .set_phy_speed = hpsa_sas_phy_speed, 9916 }; 9917 9918 /* 9919 * This is it. Register the PCI driver information for the cards we control 9920 * the OS will call our registered routines when it finds one of our cards. 9921 */ 9922 static int __init hpsa_init(void) 9923 { 9924 int rc; 9925 9926 hpsa_sas_transport_template = 9927 sas_attach_transport(&hpsa_sas_transport_functions); 9928 if (!hpsa_sas_transport_template) 9929 return -ENODEV; 9930 9931 rc = pci_register_driver(&hpsa_pci_driver); 9932 9933 if (rc) 9934 sas_release_transport(hpsa_sas_transport_template); 9935 9936 return rc; 9937 } 9938 9939 static void __exit hpsa_cleanup(void) 9940 { 9941 pci_unregister_driver(&hpsa_pci_driver); 9942 sas_release_transport(hpsa_sas_transport_template); 9943 } 9944 9945 static void __attribute__((unused)) verify_offsets(void) 9946 { 9947 #define VERIFY_OFFSET(member, offset) \ 9948 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 9949 9950 VERIFY_OFFSET(structure_size, 0); 9951 VERIFY_OFFSET(volume_blk_size, 4); 9952 VERIFY_OFFSET(volume_blk_cnt, 8); 9953 VERIFY_OFFSET(phys_blk_shift, 16); 9954 VERIFY_OFFSET(parity_rotation_shift, 17); 9955 VERIFY_OFFSET(strip_size, 18); 9956 VERIFY_OFFSET(disk_starting_blk, 20); 9957 VERIFY_OFFSET(disk_blk_cnt, 28); 9958 VERIFY_OFFSET(data_disks_per_row, 36); 9959 VERIFY_OFFSET(metadata_disks_per_row, 38); 9960 VERIFY_OFFSET(row_cnt, 40); 9961 VERIFY_OFFSET(layout_map_count, 42); 9962 VERIFY_OFFSET(flags, 44); 9963 VERIFY_OFFSET(dekindex, 46); 9964 /* VERIFY_OFFSET(reserved, 48 */ 9965 VERIFY_OFFSET(data, 64); 9966 9967 #undef VERIFY_OFFSET 9968 9969 #define VERIFY_OFFSET(member, offset) \ 9970 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 9971 9972 VERIFY_OFFSET(IU_type, 0); 9973 VERIFY_OFFSET(direction, 1); 9974 VERIFY_OFFSET(reply_queue, 2); 9975 /* VERIFY_OFFSET(reserved1, 3); */ 9976 VERIFY_OFFSET(scsi_nexus, 4); 9977 VERIFY_OFFSET(Tag, 8); 9978 VERIFY_OFFSET(cdb, 16); 9979 VERIFY_OFFSET(cciss_lun, 32); 9980 VERIFY_OFFSET(data_len, 40); 9981 VERIFY_OFFSET(cmd_priority_task_attr, 44); 9982 VERIFY_OFFSET(sg_count, 45); 9983 /* VERIFY_OFFSET(reserved3 */ 9984 VERIFY_OFFSET(err_ptr, 48); 9985 VERIFY_OFFSET(err_len, 56); 9986 /* VERIFY_OFFSET(reserved4 */ 9987 VERIFY_OFFSET(sg, 64); 9988 9989 #undef VERIFY_OFFSET 9990 9991 #define VERIFY_OFFSET(member, offset) \ 9992 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 9993 9994 VERIFY_OFFSET(dev_handle, 0x00); 9995 VERIFY_OFFSET(reserved1, 0x02); 9996 VERIFY_OFFSET(function, 0x03); 9997 VERIFY_OFFSET(reserved2, 0x04); 9998 VERIFY_OFFSET(err_info, 0x0C); 9999 VERIFY_OFFSET(reserved3, 0x10); 10000 VERIFY_OFFSET(err_info_len, 0x12); 10001 VERIFY_OFFSET(reserved4, 0x13); 10002 VERIFY_OFFSET(sgl_offset, 0x14); 10003 VERIFY_OFFSET(reserved5, 0x15); 10004 VERIFY_OFFSET(transfer_len, 0x1C); 10005 VERIFY_OFFSET(reserved6, 0x20); 10006 VERIFY_OFFSET(io_flags, 0x24); 10007 VERIFY_OFFSET(reserved7, 0x26); 10008 VERIFY_OFFSET(LUN, 0x34); 10009 VERIFY_OFFSET(control, 0x3C); 10010 VERIFY_OFFSET(CDB, 0x40); 10011 VERIFY_OFFSET(reserved8, 0x50); 10012 VERIFY_OFFSET(host_context_flags, 0x60); 10013 VERIFY_OFFSET(timeout_sec, 0x62); 10014 VERIFY_OFFSET(ReplyQueue, 0x64); 10015 VERIFY_OFFSET(reserved9, 0x65); 10016 VERIFY_OFFSET(tag, 0x68); 10017 VERIFY_OFFSET(host_addr, 0x70); 10018 VERIFY_OFFSET(CISS_LUN, 0x78); 10019 VERIFY_OFFSET(SG, 0x78 + 8); 10020 #undef VERIFY_OFFSET 10021 } 10022 10023 module_init(hpsa_init); 10024 module_exit(hpsa_cleanup); 10025