1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/seq_file.h> 33 #include <linux/init.h> 34 #include <linux/spinlock.h> 35 #include <linux/compat.h> 36 #include <linux/blktrace_api.h> 37 #include <linux/uaccess.h> 38 #include <linux/io.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/completion.h> 41 #include <linux/moduleparam.h> 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <linux/cciss_ioctl.h> 48 #include <linux/string.h> 49 #include <linux/bitmap.h> 50 #include <linux/atomic.h> 51 #include <linux/kthread.h> 52 #include <linux/jiffies.h> 53 #include "hpsa_cmd.h" 54 #include "hpsa.h" 55 56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 57 #define HPSA_DRIVER_VERSION "2.0.2-1" 58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 59 60 /* How long to wait (in milliseconds) for board to go into simple mode */ 61 #define MAX_CONFIG_WAIT 30000 62 #define MAX_IOCTL_CONFIG_WAIT 1000 63 64 /*define how many times we will try a command because of bus resets */ 65 #define MAX_CMD_RETRIES 3 66 67 /* Embedded module documentation macros - see modules.h */ 68 MODULE_AUTHOR("Hewlett-Packard Company"); 69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 70 HPSA_DRIVER_VERSION); 71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 72 MODULE_VERSION(HPSA_DRIVER_VERSION); 73 MODULE_LICENSE("GPL"); 74 75 static int hpsa_allow_any; 76 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 77 MODULE_PARM_DESC(hpsa_allow_any, 78 "Allow hpsa driver to access unknown HP Smart Array hardware"); 79 static int hpsa_simple_mode; 80 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 81 MODULE_PARM_DESC(hpsa_simple_mode, 82 "Use 'simple mode' rather than 'performant mode'"); 83 84 /* define the PCI info for the cards we can control */ 85 static const struct pci_device_id hpsa_pci_device_id[] = { 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 103 {0,} 104 }; 105 106 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 107 108 /* board_id = Subsystem Device ID & Vendor ID 109 * product = Marketing Name for the board 110 * access = Address of the struct of function pointers 111 */ 112 static struct board_type products[] = { 113 {0x3241103C, "Smart Array P212", &SA5_access}, 114 {0x3243103C, "Smart Array P410", &SA5_access}, 115 {0x3245103C, "Smart Array P410i", &SA5_access}, 116 {0x3247103C, "Smart Array P411", &SA5_access}, 117 {0x3249103C, "Smart Array P812", &SA5_access}, 118 {0x324a103C, "Smart Array P712m", &SA5_access}, 119 {0x324b103C, "Smart Array P711m", &SA5_access}, 120 {0x3350103C, "Smart Array", &SA5_access}, 121 {0x3351103C, "Smart Array", &SA5_access}, 122 {0x3352103C, "Smart Array", &SA5_access}, 123 {0x3353103C, "Smart Array", &SA5_access}, 124 {0x3354103C, "Smart Array", &SA5_access}, 125 {0x3355103C, "Smart Array", &SA5_access}, 126 {0x3356103C, "Smart Array", &SA5_access}, 127 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 128 }; 129 130 static int number_of_controllers; 131 132 static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); 133 static spinlock_t lockup_detector_lock; 134 static struct task_struct *hpsa_lockup_detector; 135 136 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 137 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 138 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 139 static void start_io(struct ctlr_info *h); 140 141 #ifdef CONFIG_COMPAT 142 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 143 #endif 144 145 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 146 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 147 static struct CommandList *cmd_alloc(struct ctlr_info *h); 148 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 149 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 150 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 151 int cmd_type); 152 153 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 154 static void hpsa_scan_start(struct Scsi_Host *); 155 static int hpsa_scan_finished(struct Scsi_Host *sh, 156 unsigned long elapsed_time); 157 static int hpsa_change_queue_depth(struct scsi_device *sdev, 158 int qdepth, int reason); 159 160 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 161 static int hpsa_slave_alloc(struct scsi_device *sdev); 162 static void hpsa_slave_destroy(struct scsi_device *sdev); 163 164 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 165 static int check_for_unit_attention(struct ctlr_info *h, 166 struct CommandList *c); 167 static void check_ioctl_unit_attention(struct ctlr_info *h, 168 struct CommandList *c); 169 /* performant mode helper functions */ 170 static void calc_bucket_map(int *bucket, int num_buckets, 171 int nsgs, int *bucket_map); 172 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 173 static inline u32 next_command(struct ctlr_info *h); 174 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 175 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 176 u64 *cfg_offset); 177 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 178 unsigned long *memory_bar); 179 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 180 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 181 void __iomem *vaddr, int wait_for_ready); 182 #define BOARD_NOT_READY 0 183 #define BOARD_READY 1 184 185 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 186 { 187 unsigned long *priv = shost_priv(sdev->host); 188 return (struct ctlr_info *) *priv; 189 } 190 191 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 192 { 193 unsigned long *priv = shost_priv(sh); 194 return (struct ctlr_info *) *priv; 195 } 196 197 static int check_for_unit_attention(struct ctlr_info *h, 198 struct CommandList *c) 199 { 200 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 201 return 0; 202 203 switch (c->err_info->SenseInfo[12]) { 204 case STATE_CHANGED: 205 dev_warn(&h->pdev->dev, "hpsa%d: a state change " 206 "detected, command retried\n", h->ctlr); 207 break; 208 case LUN_FAILED: 209 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " 210 "detected, action required\n", h->ctlr); 211 break; 212 case REPORT_LUNS_CHANGED: 213 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 214 "changed, action required\n", h->ctlr); 215 /* 216 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 217 */ 218 break; 219 case POWER_OR_RESET: 220 dev_warn(&h->pdev->dev, "hpsa%d: a power on " 221 "or device reset detected\n", h->ctlr); 222 break; 223 case UNIT_ATTENTION_CLEARED: 224 dev_warn(&h->pdev->dev, "hpsa%d: unit attention " 225 "cleared by another initiator\n", h->ctlr); 226 break; 227 default: 228 dev_warn(&h->pdev->dev, "hpsa%d: unknown " 229 "unit attention detected\n", h->ctlr); 230 break; 231 } 232 return 1; 233 } 234 235 static ssize_t host_store_rescan(struct device *dev, 236 struct device_attribute *attr, 237 const char *buf, size_t count) 238 { 239 struct ctlr_info *h; 240 struct Scsi_Host *shost = class_to_shost(dev); 241 h = shost_to_hba(shost); 242 hpsa_scan_start(h->scsi_host); 243 return count; 244 } 245 246 static ssize_t host_show_firmware_revision(struct device *dev, 247 struct device_attribute *attr, char *buf) 248 { 249 struct ctlr_info *h; 250 struct Scsi_Host *shost = class_to_shost(dev); 251 unsigned char *fwrev; 252 253 h = shost_to_hba(shost); 254 if (!h->hba_inquiry_data) 255 return 0; 256 fwrev = &h->hba_inquiry_data[32]; 257 return snprintf(buf, 20, "%c%c%c%c\n", 258 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 259 } 260 261 static ssize_t host_show_commands_outstanding(struct device *dev, 262 struct device_attribute *attr, char *buf) 263 { 264 struct Scsi_Host *shost = class_to_shost(dev); 265 struct ctlr_info *h = shost_to_hba(shost); 266 267 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 268 } 269 270 static ssize_t host_show_transport_mode(struct device *dev, 271 struct device_attribute *attr, char *buf) 272 { 273 struct ctlr_info *h; 274 struct Scsi_Host *shost = class_to_shost(dev); 275 276 h = shost_to_hba(shost); 277 return snprintf(buf, 20, "%s\n", 278 h->transMethod & CFGTBL_Trans_Performant ? 279 "performant" : "simple"); 280 } 281 282 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 283 static u32 unresettable_controller[] = { 284 0x324a103C, /* Smart Array P712m */ 285 0x324b103C, /* SmartArray P711m */ 286 0x3223103C, /* Smart Array P800 */ 287 0x3234103C, /* Smart Array P400 */ 288 0x3235103C, /* Smart Array P400i */ 289 0x3211103C, /* Smart Array E200i */ 290 0x3212103C, /* Smart Array E200 */ 291 0x3213103C, /* Smart Array E200i */ 292 0x3214103C, /* Smart Array E200i */ 293 0x3215103C, /* Smart Array E200i */ 294 0x3237103C, /* Smart Array E500 */ 295 0x323D103C, /* Smart Array P700m */ 296 0x409C0E11, /* Smart Array 6400 */ 297 0x409D0E11, /* Smart Array 6400 EM */ 298 }; 299 300 /* List of controllers which cannot even be soft reset */ 301 static u32 soft_unresettable_controller[] = { 302 /* Exclude 640x boards. These are two pci devices in one slot 303 * which share a battery backed cache module. One controls the 304 * cache, the other accesses the cache through the one that controls 305 * it. If we reset the one controlling the cache, the other will 306 * likely not be happy. Just forbid resetting this conjoined mess. 307 * The 640x isn't really supported by hpsa anyway. 308 */ 309 0x409C0E11, /* Smart Array 6400 */ 310 0x409D0E11, /* Smart Array 6400 EM */ 311 }; 312 313 static int ctlr_is_hard_resettable(u32 board_id) 314 { 315 int i; 316 317 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 318 if (unresettable_controller[i] == board_id) 319 return 0; 320 return 1; 321 } 322 323 static int ctlr_is_soft_resettable(u32 board_id) 324 { 325 int i; 326 327 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 328 if (soft_unresettable_controller[i] == board_id) 329 return 0; 330 return 1; 331 } 332 333 static int ctlr_is_resettable(u32 board_id) 334 { 335 return ctlr_is_hard_resettable(board_id) || 336 ctlr_is_soft_resettable(board_id); 337 } 338 339 static ssize_t host_show_resettable(struct device *dev, 340 struct device_attribute *attr, char *buf) 341 { 342 struct ctlr_info *h; 343 struct Scsi_Host *shost = class_to_shost(dev); 344 345 h = shost_to_hba(shost); 346 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 347 } 348 349 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 350 { 351 return (scsi3addr[3] & 0xC0) == 0x40; 352 } 353 354 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 355 "UNKNOWN" 356 }; 357 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 358 359 static ssize_t raid_level_show(struct device *dev, 360 struct device_attribute *attr, char *buf) 361 { 362 ssize_t l = 0; 363 unsigned char rlevel; 364 struct ctlr_info *h; 365 struct scsi_device *sdev; 366 struct hpsa_scsi_dev_t *hdev; 367 unsigned long flags; 368 369 sdev = to_scsi_device(dev); 370 h = sdev_to_hba(sdev); 371 spin_lock_irqsave(&h->lock, flags); 372 hdev = sdev->hostdata; 373 if (!hdev) { 374 spin_unlock_irqrestore(&h->lock, flags); 375 return -ENODEV; 376 } 377 378 /* Is this even a logical drive? */ 379 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 380 spin_unlock_irqrestore(&h->lock, flags); 381 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 382 return l; 383 } 384 385 rlevel = hdev->raid_level; 386 spin_unlock_irqrestore(&h->lock, flags); 387 if (rlevel > RAID_UNKNOWN) 388 rlevel = RAID_UNKNOWN; 389 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 390 return l; 391 } 392 393 static ssize_t lunid_show(struct device *dev, 394 struct device_attribute *attr, char *buf) 395 { 396 struct ctlr_info *h; 397 struct scsi_device *sdev; 398 struct hpsa_scsi_dev_t *hdev; 399 unsigned long flags; 400 unsigned char lunid[8]; 401 402 sdev = to_scsi_device(dev); 403 h = sdev_to_hba(sdev); 404 spin_lock_irqsave(&h->lock, flags); 405 hdev = sdev->hostdata; 406 if (!hdev) { 407 spin_unlock_irqrestore(&h->lock, flags); 408 return -ENODEV; 409 } 410 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 411 spin_unlock_irqrestore(&h->lock, flags); 412 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 413 lunid[0], lunid[1], lunid[2], lunid[3], 414 lunid[4], lunid[5], lunid[6], lunid[7]); 415 } 416 417 static ssize_t unique_id_show(struct device *dev, 418 struct device_attribute *attr, char *buf) 419 { 420 struct ctlr_info *h; 421 struct scsi_device *sdev; 422 struct hpsa_scsi_dev_t *hdev; 423 unsigned long flags; 424 unsigned char sn[16]; 425 426 sdev = to_scsi_device(dev); 427 h = sdev_to_hba(sdev); 428 spin_lock_irqsave(&h->lock, flags); 429 hdev = sdev->hostdata; 430 if (!hdev) { 431 spin_unlock_irqrestore(&h->lock, flags); 432 return -ENODEV; 433 } 434 memcpy(sn, hdev->device_id, sizeof(sn)); 435 spin_unlock_irqrestore(&h->lock, flags); 436 return snprintf(buf, 16 * 2 + 2, 437 "%02X%02X%02X%02X%02X%02X%02X%02X" 438 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 439 sn[0], sn[1], sn[2], sn[3], 440 sn[4], sn[5], sn[6], sn[7], 441 sn[8], sn[9], sn[10], sn[11], 442 sn[12], sn[13], sn[14], sn[15]); 443 } 444 445 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 446 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 447 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 448 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 449 static DEVICE_ATTR(firmware_revision, S_IRUGO, 450 host_show_firmware_revision, NULL); 451 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 452 host_show_commands_outstanding, NULL); 453 static DEVICE_ATTR(transport_mode, S_IRUGO, 454 host_show_transport_mode, NULL); 455 static DEVICE_ATTR(resettable, S_IRUGO, 456 host_show_resettable, NULL); 457 458 static struct device_attribute *hpsa_sdev_attrs[] = { 459 &dev_attr_raid_level, 460 &dev_attr_lunid, 461 &dev_attr_unique_id, 462 NULL, 463 }; 464 465 static struct device_attribute *hpsa_shost_attrs[] = { 466 &dev_attr_rescan, 467 &dev_attr_firmware_revision, 468 &dev_attr_commands_outstanding, 469 &dev_attr_transport_mode, 470 &dev_attr_resettable, 471 NULL, 472 }; 473 474 static struct scsi_host_template hpsa_driver_template = { 475 .module = THIS_MODULE, 476 .name = "hpsa", 477 .proc_name = "hpsa", 478 .queuecommand = hpsa_scsi_queue_command, 479 .scan_start = hpsa_scan_start, 480 .scan_finished = hpsa_scan_finished, 481 .change_queue_depth = hpsa_change_queue_depth, 482 .this_id = -1, 483 .use_clustering = ENABLE_CLUSTERING, 484 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 485 .ioctl = hpsa_ioctl, 486 .slave_alloc = hpsa_slave_alloc, 487 .slave_destroy = hpsa_slave_destroy, 488 #ifdef CONFIG_COMPAT 489 .compat_ioctl = hpsa_compat_ioctl, 490 #endif 491 .sdev_attrs = hpsa_sdev_attrs, 492 .shost_attrs = hpsa_shost_attrs, 493 .max_sectors = 8192, 494 }; 495 496 497 /* Enqueuing and dequeuing functions for cmdlists. */ 498 static inline void addQ(struct list_head *list, struct CommandList *c) 499 { 500 list_add_tail(&c->list, list); 501 } 502 503 static inline u32 next_command(struct ctlr_info *h) 504 { 505 u32 a; 506 507 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 508 return h->access.command_completed(h); 509 510 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 511 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 512 (h->reply_pool_head)++; 513 h->commands_outstanding--; 514 } else { 515 a = FIFO_EMPTY; 516 } 517 /* Check for wraparound */ 518 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 519 h->reply_pool_head = h->reply_pool; 520 h->reply_pool_wraparound ^= 1; 521 } 522 return a; 523 } 524 525 /* set_performant_mode: Modify the tag for cciss performant 526 * set bit 0 for pull model, bits 3-1 for block fetch 527 * register number 528 */ 529 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 530 { 531 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 532 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 533 } 534 535 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 536 struct CommandList *c) 537 { 538 unsigned long flags; 539 540 set_performant_mode(h, c); 541 spin_lock_irqsave(&h->lock, flags); 542 addQ(&h->reqQ, c); 543 h->Qdepth++; 544 start_io(h); 545 spin_unlock_irqrestore(&h->lock, flags); 546 } 547 548 static inline void removeQ(struct CommandList *c) 549 { 550 if (WARN_ON(list_empty(&c->list))) 551 return; 552 list_del_init(&c->list); 553 } 554 555 static inline int is_hba_lunid(unsigned char scsi3addr[]) 556 { 557 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 558 } 559 560 static inline int is_scsi_rev_5(struct ctlr_info *h) 561 { 562 if (!h->hba_inquiry_data) 563 return 0; 564 if ((h->hba_inquiry_data[2] & 0x07) == 5) 565 return 1; 566 return 0; 567 } 568 569 static int hpsa_find_target_lun(struct ctlr_info *h, 570 unsigned char scsi3addr[], int bus, int *target, int *lun) 571 { 572 /* finds an unused bus, target, lun for a new physical device 573 * assumes h->devlock is held 574 */ 575 int i, found = 0; 576 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 577 578 memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3); 579 580 for (i = 0; i < h->ndevices; i++) { 581 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 582 set_bit(h->dev[i]->target, lun_taken); 583 } 584 585 for (i = 0; i < HPSA_MAX_DEVICES; i++) { 586 if (!test_bit(i, lun_taken)) { 587 /* *bus = 1; */ 588 *target = i; 589 *lun = 0; 590 found = 1; 591 break; 592 } 593 } 594 return !found; 595 } 596 597 /* Add an entry into h->dev[] array. */ 598 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 599 struct hpsa_scsi_dev_t *device, 600 struct hpsa_scsi_dev_t *added[], int *nadded) 601 { 602 /* assumes h->devlock is held */ 603 int n = h->ndevices; 604 int i; 605 unsigned char addr1[8], addr2[8]; 606 struct hpsa_scsi_dev_t *sd; 607 608 if (n >= HPSA_MAX_DEVICES) { 609 dev_err(&h->pdev->dev, "too many devices, some will be " 610 "inaccessible.\n"); 611 return -1; 612 } 613 614 /* physical devices do not have lun or target assigned until now. */ 615 if (device->lun != -1) 616 /* Logical device, lun is already assigned. */ 617 goto lun_assigned; 618 619 /* If this device a non-zero lun of a multi-lun device 620 * byte 4 of the 8-byte LUN addr will contain the logical 621 * unit no, zero otherise. 622 */ 623 if (device->scsi3addr[4] == 0) { 624 /* This is not a non-zero lun of a multi-lun device */ 625 if (hpsa_find_target_lun(h, device->scsi3addr, 626 device->bus, &device->target, &device->lun) != 0) 627 return -1; 628 goto lun_assigned; 629 } 630 631 /* This is a non-zero lun of a multi-lun device. 632 * Search through our list and find the device which 633 * has the same 8 byte LUN address, excepting byte 4. 634 * Assign the same bus and target for this new LUN. 635 * Use the logical unit number from the firmware. 636 */ 637 memcpy(addr1, device->scsi3addr, 8); 638 addr1[4] = 0; 639 for (i = 0; i < n; i++) { 640 sd = h->dev[i]; 641 memcpy(addr2, sd->scsi3addr, 8); 642 addr2[4] = 0; 643 /* differ only in byte 4? */ 644 if (memcmp(addr1, addr2, 8) == 0) { 645 device->bus = sd->bus; 646 device->target = sd->target; 647 device->lun = device->scsi3addr[4]; 648 break; 649 } 650 } 651 if (device->lun == -1) { 652 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 653 " suspect firmware bug or unsupported hardware " 654 "configuration.\n"); 655 return -1; 656 } 657 658 lun_assigned: 659 660 h->dev[n] = device; 661 h->ndevices++; 662 added[*nadded] = device; 663 (*nadded)++; 664 665 /* initially, (before registering with scsi layer) we don't 666 * know our hostno and we don't want to print anything first 667 * time anyway (the scsi layer's inquiries will show that info) 668 */ 669 /* if (hostno != -1) */ 670 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 671 scsi_device_type(device->devtype), hostno, 672 device->bus, device->target, device->lun); 673 return 0; 674 } 675 676 /* Replace an entry from h->dev[] array. */ 677 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 678 int entry, struct hpsa_scsi_dev_t *new_entry, 679 struct hpsa_scsi_dev_t *added[], int *nadded, 680 struct hpsa_scsi_dev_t *removed[], int *nremoved) 681 { 682 /* assumes h->devlock is held */ 683 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 684 removed[*nremoved] = h->dev[entry]; 685 (*nremoved)++; 686 687 /* 688 * New physical devices won't have target/lun assigned yet 689 * so we need to preserve the values in the slot we are replacing. 690 */ 691 if (new_entry->target == -1) { 692 new_entry->target = h->dev[entry]->target; 693 new_entry->lun = h->dev[entry]->lun; 694 } 695 696 h->dev[entry] = new_entry; 697 added[*nadded] = new_entry; 698 (*nadded)++; 699 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 700 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 701 new_entry->target, new_entry->lun); 702 } 703 704 /* Remove an entry from h->dev[] array. */ 705 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 706 struct hpsa_scsi_dev_t *removed[], int *nremoved) 707 { 708 /* assumes h->devlock is held */ 709 int i; 710 struct hpsa_scsi_dev_t *sd; 711 712 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 713 714 sd = h->dev[entry]; 715 removed[*nremoved] = h->dev[entry]; 716 (*nremoved)++; 717 718 for (i = entry; i < h->ndevices-1; i++) 719 h->dev[i] = h->dev[i+1]; 720 h->ndevices--; 721 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 722 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 723 sd->lun); 724 } 725 726 #define SCSI3ADDR_EQ(a, b) ( \ 727 (a)[7] == (b)[7] && \ 728 (a)[6] == (b)[6] && \ 729 (a)[5] == (b)[5] && \ 730 (a)[4] == (b)[4] && \ 731 (a)[3] == (b)[3] && \ 732 (a)[2] == (b)[2] && \ 733 (a)[1] == (b)[1] && \ 734 (a)[0] == (b)[0]) 735 736 static void fixup_botched_add(struct ctlr_info *h, 737 struct hpsa_scsi_dev_t *added) 738 { 739 /* called when scsi_add_device fails in order to re-adjust 740 * h->dev[] to match the mid layer's view. 741 */ 742 unsigned long flags; 743 int i, j; 744 745 spin_lock_irqsave(&h->lock, flags); 746 for (i = 0; i < h->ndevices; i++) { 747 if (h->dev[i] == added) { 748 for (j = i; j < h->ndevices-1; j++) 749 h->dev[j] = h->dev[j+1]; 750 h->ndevices--; 751 break; 752 } 753 } 754 spin_unlock_irqrestore(&h->lock, flags); 755 kfree(added); 756 } 757 758 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 759 struct hpsa_scsi_dev_t *dev2) 760 { 761 /* we compare everything except lun and target as these 762 * are not yet assigned. Compare parts likely 763 * to differ first 764 */ 765 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 766 sizeof(dev1->scsi3addr)) != 0) 767 return 0; 768 if (memcmp(dev1->device_id, dev2->device_id, 769 sizeof(dev1->device_id)) != 0) 770 return 0; 771 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 772 return 0; 773 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 774 return 0; 775 if (dev1->devtype != dev2->devtype) 776 return 0; 777 if (dev1->bus != dev2->bus) 778 return 0; 779 return 1; 780 } 781 782 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 783 * and return needle location in *index. If scsi3addr matches, but not 784 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 785 * location in *index. If needle not found, return DEVICE_NOT_FOUND. 786 */ 787 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 788 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 789 int *index) 790 { 791 int i; 792 #define DEVICE_NOT_FOUND 0 793 #define DEVICE_CHANGED 1 794 #define DEVICE_SAME 2 795 for (i = 0; i < haystack_size; i++) { 796 if (haystack[i] == NULL) /* previously removed. */ 797 continue; 798 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 799 *index = i; 800 if (device_is_the_same(needle, haystack[i])) 801 return DEVICE_SAME; 802 else 803 return DEVICE_CHANGED; 804 } 805 } 806 *index = -1; 807 return DEVICE_NOT_FOUND; 808 } 809 810 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 811 struct hpsa_scsi_dev_t *sd[], int nsds) 812 { 813 /* sd contains scsi3 addresses and devtypes, and inquiry 814 * data. This function takes what's in sd to be the current 815 * reality and updates h->dev[] to reflect that reality. 816 */ 817 int i, entry, device_change, changes = 0; 818 struct hpsa_scsi_dev_t *csd; 819 unsigned long flags; 820 struct hpsa_scsi_dev_t **added, **removed; 821 int nadded, nremoved; 822 struct Scsi_Host *sh = NULL; 823 824 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 825 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 826 827 if (!added || !removed) { 828 dev_warn(&h->pdev->dev, "out of memory in " 829 "adjust_hpsa_scsi_table\n"); 830 goto free_and_out; 831 } 832 833 spin_lock_irqsave(&h->devlock, flags); 834 835 /* find any devices in h->dev[] that are not in 836 * sd[] and remove them from h->dev[], and for any 837 * devices which have changed, remove the old device 838 * info and add the new device info. 839 */ 840 i = 0; 841 nremoved = 0; 842 nadded = 0; 843 while (i < h->ndevices) { 844 csd = h->dev[i]; 845 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 846 if (device_change == DEVICE_NOT_FOUND) { 847 changes++; 848 hpsa_scsi_remove_entry(h, hostno, i, 849 removed, &nremoved); 850 continue; /* remove ^^^, hence i not incremented */ 851 } else if (device_change == DEVICE_CHANGED) { 852 changes++; 853 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 854 added, &nadded, removed, &nremoved); 855 /* Set it to NULL to prevent it from being freed 856 * at the bottom of hpsa_update_scsi_devices() 857 */ 858 sd[entry] = NULL; 859 } 860 i++; 861 } 862 863 /* Now, make sure every device listed in sd[] is also 864 * listed in h->dev[], adding them if they aren't found 865 */ 866 867 for (i = 0; i < nsds; i++) { 868 if (!sd[i]) /* if already added above. */ 869 continue; 870 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 871 h->ndevices, &entry); 872 if (device_change == DEVICE_NOT_FOUND) { 873 changes++; 874 if (hpsa_scsi_add_entry(h, hostno, sd[i], 875 added, &nadded) != 0) 876 break; 877 sd[i] = NULL; /* prevent from being freed later. */ 878 } else if (device_change == DEVICE_CHANGED) { 879 /* should never happen... */ 880 changes++; 881 dev_warn(&h->pdev->dev, 882 "device unexpectedly changed.\n"); 883 /* but if it does happen, we just ignore that device */ 884 } 885 } 886 spin_unlock_irqrestore(&h->devlock, flags); 887 888 /* Don't notify scsi mid layer of any changes the first time through 889 * (or if there are no changes) scsi_scan_host will do it later the 890 * first time through. 891 */ 892 if (hostno == -1 || !changes) 893 goto free_and_out; 894 895 sh = h->scsi_host; 896 /* Notify scsi mid layer of any removed devices */ 897 for (i = 0; i < nremoved; i++) { 898 struct scsi_device *sdev = 899 scsi_device_lookup(sh, removed[i]->bus, 900 removed[i]->target, removed[i]->lun); 901 if (sdev != NULL) { 902 scsi_remove_device(sdev); 903 scsi_device_put(sdev); 904 } else { 905 /* We don't expect to get here. 906 * future cmds to this device will get selection 907 * timeout as if the device was gone. 908 */ 909 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 910 " for removal.", hostno, removed[i]->bus, 911 removed[i]->target, removed[i]->lun); 912 } 913 kfree(removed[i]); 914 removed[i] = NULL; 915 } 916 917 /* Notify scsi mid layer of any added devices */ 918 for (i = 0; i < nadded; i++) { 919 if (scsi_add_device(sh, added[i]->bus, 920 added[i]->target, added[i]->lun) == 0) 921 continue; 922 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 923 "device not added.\n", hostno, added[i]->bus, 924 added[i]->target, added[i]->lun); 925 /* now we have to remove it from h->dev, 926 * since it didn't get added to scsi mid layer 927 */ 928 fixup_botched_add(h, added[i]); 929 } 930 931 free_and_out: 932 kfree(added); 933 kfree(removed); 934 } 935 936 /* 937 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * 938 * Assume's h->devlock is held. 939 */ 940 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 941 int bus, int target, int lun) 942 { 943 int i; 944 struct hpsa_scsi_dev_t *sd; 945 946 for (i = 0; i < h->ndevices; i++) { 947 sd = h->dev[i]; 948 if (sd->bus == bus && sd->target == target && sd->lun == lun) 949 return sd; 950 } 951 return NULL; 952 } 953 954 /* link sdev->hostdata to our per-device structure. */ 955 static int hpsa_slave_alloc(struct scsi_device *sdev) 956 { 957 struct hpsa_scsi_dev_t *sd; 958 unsigned long flags; 959 struct ctlr_info *h; 960 961 h = sdev_to_hba(sdev); 962 spin_lock_irqsave(&h->devlock, flags); 963 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 964 sdev_id(sdev), sdev->lun); 965 if (sd != NULL) 966 sdev->hostdata = sd; 967 spin_unlock_irqrestore(&h->devlock, flags); 968 return 0; 969 } 970 971 static void hpsa_slave_destroy(struct scsi_device *sdev) 972 { 973 /* nothing to do. */ 974 } 975 976 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 977 { 978 int i; 979 980 if (!h->cmd_sg_list) 981 return; 982 for (i = 0; i < h->nr_cmds; i++) { 983 kfree(h->cmd_sg_list[i]); 984 h->cmd_sg_list[i] = NULL; 985 } 986 kfree(h->cmd_sg_list); 987 h->cmd_sg_list = NULL; 988 } 989 990 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 991 { 992 int i; 993 994 if (h->chainsize <= 0) 995 return 0; 996 997 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 998 GFP_KERNEL); 999 if (!h->cmd_sg_list) 1000 return -ENOMEM; 1001 for (i = 0; i < h->nr_cmds; i++) { 1002 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1003 h->chainsize, GFP_KERNEL); 1004 if (!h->cmd_sg_list[i]) 1005 goto clean; 1006 } 1007 return 0; 1008 1009 clean: 1010 hpsa_free_sg_chain_blocks(h); 1011 return -ENOMEM; 1012 } 1013 1014 static void hpsa_map_sg_chain_block(struct ctlr_info *h, 1015 struct CommandList *c) 1016 { 1017 struct SGDescriptor *chain_sg, *chain_block; 1018 u64 temp64; 1019 1020 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1021 chain_block = h->cmd_sg_list[c->cmdindex]; 1022 chain_sg->Ext = HPSA_SG_CHAIN; 1023 chain_sg->Len = sizeof(*chain_sg) * 1024 (c->Header.SGTotal - h->max_cmd_sg_entries); 1025 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1026 PCI_DMA_TODEVICE); 1027 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1028 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1029 } 1030 1031 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1032 struct CommandList *c) 1033 { 1034 struct SGDescriptor *chain_sg; 1035 union u64bit temp64; 1036 1037 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1038 return; 1039 1040 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1041 temp64.val32.lower = chain_sg->Addr.lower; 1042 temp64.val32.upper = chain_sg->Addr.upper; 1043 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1044 } 1045 1046 static void complete_scsi_command(struct CommandList *cp) 1047 { 1048 struct scsi_cmnd *cmd; 1049 struct ctlr_info *h; 1050 struct ErrorInfo *ei; 1051 1052 unsigned char sense_key; 1053 unsigned char asc; /* additional sense code */ 1054 unsigned char ascq; /* additional sense code qualifier */ 1055 unsigned long sense_data_size; 1056 1057 ei = cp->err_info; 1058 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1059 h = cp->h; 1060 1061 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1062 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1063 hpsa_unmap_sg_chain_block(h, cp); 1064 1065 cmd->result = (DID_OK << 16); /* host byte */ 1066 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1067 cmd->result |= ei->ScsiStatus; 1068 1069 /* copy the sense data whether we need to or not. */ 1070 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1071 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1072 else 1073 sense_data_size = sizeof(ei->SenseInfo); 1074 if (ei->SenseLen < sense_data_size) 1075 sense_data_size = ei->SenseLen; 1076 1077 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1078 scsi_set_resid(cmd, ei->ResidualCnt); 1079 1080 if (ei->CommandStatus == 0) { 1081 cmd->scsi_done(cmd); 1082 cmd_free(h, cp); 1083 return; 1084 } 1085 1086 /* an error has occurred */ 1087 switch (ei->CommandStatus) { 1088 1089 case CMD_TARGET_STATUS: 1090 if (ei->ScsiStatus) { 1091 /* Get sense key */ 1092 sense_key = 0xf & ei->SenseInfo[2]; 1093 /* Get additional sense code */ 1094 asc = ei->SenseInfo[12]; 1095 /* Get addition sense code qualifier */ 1096 ascq = ei->SenseInfo[13]; 1097 } 1098 1099 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1100 if (check_for_unit_attention(h, cp)) { 1101 cmd->result = DID_SOFT_ERROR << 16; 1102 break; 1103 } 1104 if (sense_key == ILLEGAL_REQUEST) { 1105 /* 1106 * SCSI REPORT_LUNS is commonly unsupported on 1107 * Smart Array. Suppress noisy complaint. 1108 */ 1109 if (cp->Request.CDB[0] == REPORT_LUNS) 1110 break; 1111 1112 /* If ASC/ASCQ indicate Logical Unit 1113 * Not Supported condition, 1114 */ 1115 if ((asc == 0x25) && (ascq == 0x0)) { 1116 dev_warn(&h->pdev->dev, "cp %p " 1117 "has check condition\n", cp); 1118 break; 1119 } 1120 } 1121 1122 if (sense_key == NOT_READY) { 1123 /* If Sense is Not Ready, Logical Unit 1124 * Not ready, Manual Intervention 1125 * required 1126 */ 1127 if ((asc == 0x04) && (ascq == 0x03)) { 1128 dev_warn(&h->pdev->dev, "cp %p " 1129 "has check condition: unit " 1130 "not ready, manual " 1131 "intervention required\n", cp); 1132 break; 1133 } 1134 } 1135 if (sense_key == ABORTED_COMMAND) { 1136 /* Aborted command is retryable */ 1137 dev_warn(&h->pdev->dev, "cp %p " 1138 "has check condition: aborted command: " 1139 "ASC: 0x%x, ASCQ: 0x%x\n", 1140 cp, asc, ascq); 1141 cmd->result = DID_SOFT_ERROR << 16; 1142 break; 1143 } 1144 /* Must be some other type of check condition */ 1145 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1146 "unknown type: " 1147 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1148 "Returning result: 0x%x, " 1149 "cmd=[%02x %02x %02x %02x %02x " 1150 "%02x %02x %02x %02x %02x %02x " 1151 "%02x %02x %02x %02x %02x]\n", 1152 cp, sense_key, asc, ascq, 1153 cmd->result, 1154 cmd->cmnd[0], cmd->cmnd[1], 1155 cmd->cmnd[2], cmd->cmnd[3], 1156 cmd->cmnd[4], cmd->cmnd[5], 1157 cmd->cmnd[6], cmd->cmnd[7], 1158 cmd->cmnd[8], cmd->cmnd[9], 1159 cmd->cmnd[10], cmd->cmnd[11], 1160 cmd->cmnd[12], cmd->cmnd[13], 1161 cmd->cmnd[14], cmd->cmnd[15]); 1162 break; 1163 } 1164 1165 1166 /* Problem was not a check condition 1167 * Pass it up to the upper layers... 1168 */ 1169 if (ei->ScsiStatus) { 1170 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1171 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1172 "Returning result: 0x%x\n", 1173 cp, ei->ScsiStatus, 1174 sense_key, asc, ascq, 1175 cmd->result); 1176 } else { /* scsi status is zero??? How??? */ 1177 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1178 "Returning no connection.\n", cp), 1179 1180 /* Ordinarily, this case should never happen, 1181 * but there is a bug in some released firmware 1182 * revisions that allows it to happen if, for 1183 * example, a 4100 backplane loses power and 1184 * the tape drive is in it. We assume that 1185 * it's a fatal error of some kind because we 1186 * can't show that it wasn't. We will make it 1187 * look like selection timeout since that is 1188 * the most common reason for this to occur, 1189 * and it's severe enough. 1190 */ 1191 1192 cmd->result = DID_NO_CONNECT << 16; 1193 } 1194 break; 1195 1196 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1197 break; 1198 case CMD_DATA_OVERRUN: 1199 dev_warn(&h->pdev->dev, "cp %p has" 1200 " completed with data overrun " 1201 "reported\n", cp); 1202 break; 1203 case CMD_INVALID: { 1204 /* print_bytes(cp, sizeof(*cp), 1, 0); 1205 print_cmd(cp); */ 1206 /* We get CMD_INVALID if you address a non-existent device 1207 * instead of a selection timeout (no response). You will 1208 * see this if you yank out a drive, then try to access it. 1209 * This is kind of a shame because it means that any other 1210 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1211 * missing target. */ 1212 cmd->result = DID_NO_CONNECT << 16; 1213 } 1214 break; 1215 case CMD_PROTOCOL_ERR: 1216 dev_warn(&h->pdev->dev, "cp %p has " 1217 "protocol error \n", cp); 1218 break; 1219 case CMD_HARDWARE_ERR: 1220 cmd->result = DID_ERROR << 16; 1221 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1222 break; 1223 case CMD_CONNECTION_LOST: 1224 cmd->result = DID_ERROR << 16; 1225 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1226 break; 1227 case CMD_ABORTED: 1228 cmd->result = DID_ABORT << 16; 1229 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1230 cp, ei->ScsiStatus); 1231 break; 1232 case CMD_ABORT_FAILED: 1233 cmd->result = DID_ERROR << 16; 1234 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1235 break; 1236 case CMD_UNSOLICITED_ABORT: 1237 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1238 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1239 "abort\n", cp); 1240 break; 1241 case CMD_TIMEOUT: 1242 cmd->result = DID_TIME_OUT << 16; 1243 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1244 break; 1245 case CMD_UNABORTABLE: 1246 cmd->result = DID_ERROR << 16; 1247 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1248 break; 1249 default: 1250 cmd->result = DID_ERROR << 16; 1251 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1252 cp, ei->CommandStatus); 1253 } 1254 cmd->scsi_done(cmd); 1255 cmd_free(h, cp); 1256 } 1257 1258 static int hpsa_scsi_detect(struct ctlr_info *h) 1259 { 1260 struct Scsi_Host *sh; 1261 int error; 1262 1263 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 1264 if (sh == NULL) 1265 goto fail; 1266 1267 sh->io_port = 0; 1268 sh->n_io_port = 0; 1269 sh->this_id = -1; 1270 sh->max_channel = 3; 1271 sh->max_cmd_len = MAX_COMMAND_SIZE; 1272 sh->max_lun = HPSA_MAX_LUN; 1273 sh->max_id = HPSA_MAX_LUN; 1274 sh->can_queue = h->nr_cmds; 1275 sh->cmd_per_lun = h->nr_cmds; 1276 sh->sg_tablesize = h->maxsgentries; 1277 h->scsi_host = sh; 1278 sh->hostdata[0] = (unsigned long) h; 1279 sh->irq = h->intr[h->intr_mode]; 1280 sh->unique_id = sh->irq; 1281 error = scsi_add_host(sh, &h->pdev->dev); 1282 if (error) 1283 goto fail_host_put; 1284 scsi_scan_host(sh); 1285 return 0; 1286 1287 fail_host_put: 1288 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" 1289 " failed for controller %d\n", h->ctlr); 1290 scsi_host_put(sh); 1291 return error; 1292 fail: 1293 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" 1294 " failed for controller %d\n", h->ctlr); 1295 return -ENOMEM; 1296 } 1297 1298 static void hpsa_pci_unmap(struct pci_dev *pdev, 1299 struct CommandList *c, int sg_used, int data_direction) 1300 { 1301 int i; 1302 union u64bit addr64; 1303 1304 for (i = 0; i < sg_used; i++) { 1305 addr64.val32.lower = c->SG[i].Addr.lower; 1306 addr64.val32.upper = c->SG[i].Addr.upper; 1307 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1308 data_direction); 1309 } 1310 } 1311 1312 static void hpsa_map_one(struct pci_dev *pdev, 1313 struct CommandList *cp, 1314 unsigned char *buf, 1315 size_t buflen, 1316 int data_direction) 1317 { 1318 u64 addr64; 1319 1320 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1321 cp->Header.SGList = 0; 1322 cp->Header.SGTotal = 0; 1323 return; 1324 } 1325 1326 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1327 cp->SG[0].Addr.lower = 1328 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1329 cp->SG[0].Addr.upper = 1330 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1331 cp->SG[0].Len = buflen; 1332 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1333 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1334 } 1335 1336 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1337 struct CommandList *c) 1338 { 1339 DECLARE_COMPLETION_ONSTACK(wait); 1340 1341 c->waiting = &wait; 1342 enqueue_cmd_and_start_io(h, c); 1343 wait_for_completion(&wait); 1344 } 1345 1346 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1347 struct CommandList *c) 1348 { 1349 unsigned long flags; 1350 1351 /* If controller lockup detected, fake a hardware error. */ 1352 spin_lock_irqsave(&h->lock, flags); 1353 if (unlikely(h->lockup_detected)) { 1354 spin_unlock_irqrestore(&h->lock, flags); 1355 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1356 } else { 1357 spin_unlock_irqrestore(&h->lock, flags); 1358 hpsa_scsi_do_simple_cmd_core(h, c); 1359 } 1360 } 1361 1362 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1363 struct CommandList *c, int data_direction) 1364 { 1365 int retry_count = 0; 1366 1367 do { 1368 memset(c->err_info, 0, sizeof(*c->err_info)); 1369 hpsa_scsi_do_simple_cmd_core(h, c); 1370 retry_count++; 1371 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1372 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1373 } 1374 1375 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1376 { 1377 struct ErrorInfo *ei; 1378 struct device *d = &cp->h->pdev->dev; 1379 1380 ei = cp->err_info; 1381 switch (ei->CommandStatus) { 1382 case CMD_TARGET_STATUS: 1383 dev_warn(d, "cmd %p has completed with errors\n", cp); 1384 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1385 ei->ScsiStatus); 1386 if (ei->ScsiStatus == 0) 1387 dev_warn(d, "SCSI status is abnormally zero. " 1388 "(probably indicates selection timeout " 1389 "reported incorrectly due to a known " 1390 "firmware bug, circa July, 2001.)\n"); 1391 break; 1392 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1393 dev_info(d, "UNDERRUN\n"); 1394 break; 1395 case CMD_DATA_OVERRUN: 1396 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1397 break; 1398 case CMD_INVALID: { 1399 /* controller unfortunately reports SCSI passthru's 1400 * to non-existent targets as invalid commands. 1401 */ 1402 dev_warn(d, "cp %p is reported invalid (probably means " 1403 "target device no longer present)\n", cp); 1404 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1405 print_cmd(cp); */ 1406 } 1407 break; 1408 case CMD_PROTOCOL_ERR: 1409 dev_warn(d, "cp %p has protocol error \n", cp); 1410 break; 1411 case CMD_HARDWARE_ERR: 1412 /* cmd->result = DID_ERROR << 16; */ 1413 dev_warn(d, "cp %p had hardware error\n", cp); 1414 break; 1415 case CMD_CONNECTION_LOST: 1416 dev_warn(d, "cp %p had connection lost\n", cp); 1417 break; 1418 case CMD_ABORTED: 1419 dev_warn(d, "cp %p was aborted\n", cp); 1420 break; 1421 case CMD_ABORT_FAILED: 1422 dev_warn(d, "cp %p reports abort failed\n", cp); 1423 break; 1424 case CMD_UNSOLICITED_ABORT: 1425 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1426 break; 1427 case CMD_TIMEOUT: 1428 dev_warn(d, "cp %p timed out\n", cp); 1429 break; 1430 case CMD_UNABORTABLE: 1431 dev_warn(d, "Command unabortable\n"); 1432 break; 1433 default: 1434 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1435 ei->CommandStatus); 1436 } 1437 } 1438 1439 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1440 unsigned char page, unsigned char *buf, 1441 unsigned char bufsize) 1442 { 1443 int rc = IO_OK; 1444 struct CommandList *c; 1445 struct ErrorInfo *ei; 1446 1447 c = cmd_special_alloc(h); 1448 1449 if (c == NULL) { /* trouble... */ 1450 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1451 return -ENOMEM; 1452 } 1453 1454 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1455 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1456 ei = c->err_info; 1457 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1458 hpsa_scsi_interpret_error(c); 1459 rc = -1; 1460 } 1461 cmd_special_free(h, c); 1462 return rc; 1463 } 1464 1465 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 1466 { 1467 int rc = IO_OK; 1468 struct CommandList *c; 1469 struct ErrorInfo *ei; 1470 1471 c = cmd_special_alloc(h); 1472 1473 if (c == NULL) { /* trouble... */ 1474 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1475 return -ENOMEM; 1476 } 1477 1478 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1479 hpsa_scsi_do_simple_cmd_core(h, c); 1480 /* no unmap needed here because no data xfer. */ 1481 1482 ei = c->err_info; 1483 if (ei->CommandStatus != 0) { 1484 hpsa_scsi_interpret_error(c); 1485 rc = -1; 1486 } 1487 cmd_special_free(h, c); 1488 return rc; 1489 } 1490 1491 static void hpsa_get_raid_level(struct ctlr_info *h, 1492 unsigned char *scsi3addr, unsigned char *raid_level) 1493 { 1494 int rc; 1495 unsigned char *buf; 1496 1497 *raid_level = RAID_UNKNOWN; 1498 buf = kzalloc(64, GFP_KERNEL); 1499 if (!buf) 1500 return; 1501 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 1502 if (rc == 0) 1503 *raid_level = buf[8]; 1504 if (*raid_level > RAID_UNKNOWN) 1505 *raid_level = RAID_UNKNOWN; 1506 kfree(buf); 1507 return; 1508 } 1509 1510 /* Get the device id from inquiry page 0x83 */ 1511 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 1512 unsigned char *device_id, int buflen) 1513 { 1514 int rc; 1515 unsigned char *buf; 1516 1517 if (buflen > 16) 1518 buflen = 16; 1519 buf = kzalloc(64, GFP_KERNEL); 1520 if (!buf) 1521 return -1; 1522 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1523 if (rc == 0) 1524 memcpy(device_id, &buf[8], buflen); 1525 kfree(buf); 1526 return rc != 0; 1527 } 1528 1529 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 1530 struct ReportLUNdata *buf, int bufsize, 1531 int extended_response) 1532 { 1533 int rc = IO_OK; 1534 struct CommandList *c; 1535 unsigned char scsi3addr[8]; 1536 struct ErrorInfo *ei; 1537 1538 c = cmd_special_alloc(h); 1539 if (c == NULL) { /* trouble... */ 1540 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1541 return -1; 1542 } 1543 /* address the controller */ 1544 memset(scsi3addr, 0, sizeof(scsi3addr)); 1545 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1546 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1547 if (extended_response) 1548 c->Request.CDB[1] = extended_response; 1549 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1550 ei = c->err_info; 1551 if (ei->CommandStatus != 0 && 1552 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1553 hpsa_scsi_interpret_error(c); 1554 rc = -1; 1555 } 1556 cmd_special_free(h, c); 1557 return rc; 1558 } 1559 1560 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 1561 struct ReportLUNdata *buf, 1562 int bufsize, int extended_response) 1563 { 1564 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 1565 } 1566 1567 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 1568 struct ReportLUNdata *buf, int bufsize) 1569 { 1570 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 1571 } 1572 1573 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 1574 int bus, int target, int lun) 1575 { 1576 device->bus = bus; 1577 device->target = target; 1578 device->lun = lun; 1579 } 1580 1581 static int hpsa_update_device_info(struct ctlr_info *h, 1582 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 1583 unsigned char *is_OBDR_device) 1584 { 1585 1586 #define OBDR_SIG_OFFSET 43 1587 #define OBDR_TAPE_SIG "$DR-10" 1588 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 1589 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 1590 1591 unsigned char *inq_buff; 1592 unsigned char *obdr_sig; 1593 1594 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1595 if (!inq_buff) 1596 goto bail_out; 1597 1598 /* Do an inquiry to the device to see what it is. */ 1599 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1600 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1601 /* Inquiry failed (msg printed already) */ 1602 dev_err(&h->pdev->dev, 1603 "hpsa_update_device_info: inquiry failed\n"); 1604 goto bail_out; 1605 } 1606 1607 this_device->devtype = (inq_buff[0] & 0x1f); 1608 memcpy(this_device->scsi3addr, scsi3addr, 8); 1609 memcpy(this_device->vendor, &inq_buff[8], 1610 sizeof(this_device->vendor)); 1611 memcpy(this_device->model, &inq_buff[16], 1612 sizeof(this_device->model)); 1613 memset(this_device->device_id, 0, 1614 sizeof(this_device->device_id)); 1615 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1616 sizeof(this_device->device_id)); 1617 1618 if (this_device->devtype == TYPE_DISK && 1619 is_logical_dev_addr_mode(scsi3addr)) 1620 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1621 else 1622 this_device->raid_level = RAID_UNKNOWN; 1623 1624 if (is_OBDR_device) { 1625 /* See if this is a One-Button-Disaster-Recovery device 1626 * by looking for "$DR-10" at offset 43 in inquiry data. 1627 */ 1628 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 1629 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 1630 strncmp(obdr_sig, OBDR_TAPE_SIG, 1631 OBDR_SIG_LEN) == 0); 1632 } 1633 1634 kfree(inq_buff); 1635 return 0; 1636 1637 bail_out: 1638 kfree(inq_buff); 1639 return 1; 1640 } 1641 1642 static unsigned char *msa2xxx_model[] = { 1643 "MSA2012", 1644 "MSA2024", 1645 "MSA2312", 1646 "MSA2324", 1647 "P2000 G3 SAS", 1648 NULL, 1649 }; 1650 1651 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1652 { 1653 int i; 1654 1655 for (i = 0; msa2xxx_model[i]; i++) 1656 if (strncmp(device->model, msa2xxx_model[i], 1657 strlen(msa2xxx_model[i])) == 0) 1658 return 1; 1659 return 0; 1660 } 1661 1662 /* Helper function to assign bus, target, lun mapping of devices. 1663 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical 1664 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1665 * Logical drive target and lun are assigned at this time, but 1666 * physical device lun and target assignment are deferred (assigned 1667 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1668 */ 1669 static void figure_bus_target_lun(struct ctlr_info *h, 1670 u8 *lunaddrbytes, int *bus, int *target, int *lun, 1671 struct hpsa_scsi_dev_t *device) 1672 { 1673 u32 lunid; 1674 1675 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1676 /* logical device */ 1677 if (unlikely(is_scsi_rev_5(h))) { 1678 /* p1210m, logical drives lun assignments 1679 * match SCSI REPORT LUNS data. 1680 */ 1681 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1682 *bus = 0; 1683 *target = 0; 1684 *lun = (lunid & 0x3fff) + 1; 1685 } else { 1686 /* not p1210m... */ 1687 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1688 if (is_msa2xxx(h, device)) { 1689 /* msa2xxx way, put logicals on bus 1 1690 * and match target/lun numbers box 1691 * reports. 1692 */ 1693 *bus = 1; 1694 *target = (lunid >> 16) & 0x3fff; 1695 *lun = lunid & 0x00ff; 1696 } else { 1697 /* Traditional smart array way. */ 1698 *bus = 0; 1699 *lun = 0; 1700 *target = lunid & 0x3fff; 1701 } 1702 } 1703 } else { 1704 /* physical device */ 1705 if (is_hba_lunid(lunaddrbytes)) 1706 if (unlikely(is_scsi_rev_5(h))) { 1707 *bus = 0; /* put p1210m ctlr at 0,0,0 */ 1708 *target = 0; 1709 *lun = 0; 1710 return; 1711 } else 1712 *bus = 3; /* traditional smartarray */ 1713 else 1714 *bus = 2; /* physical disk */ 1715 *target = -1; 1716 *lun = -1; /* we will fill these in later. */ 1717 } 1718 } 1719 1720 /* 1721 * If there is no lun 0 on a target, linux won't find any devices. 1722 * For the MSA2xxx boxes, we have to manually detect the enclosure 1723 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1724 * it for some reason. *tmpdevice is the target we're adding, 1725 * this_device is a pointer into the current element of currentsd[] 1726 * that we're building up in update_scsi_devices(), below. 1727 * lunzerobits is a bitmap that tracks which targets already have a 1728 * lun 0 assigned. 1729 * Returns 1 if an enclosure was added, 0 if not. 1730 */ 1731 static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1732 struct hpsa_scsi_dev_t *tmpdevice, 1733 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1734 int bus, int target, int lun, unsigned long lunzerobits[], 1735 int *nmsa2xxx_enclosures) 1736 { 1737 unsigned char scsi3addr[8]; 1738 1739 if (test_bit(target, lunzerobits)) 1740 return 0; /* There is already a lun 0 on this target. */ 1741 1742 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1743 return 0; /* It's the logical targets that may lack lun 0. */ 1744 1745 if (!is_msa2xxx(h, tmpdevice)) 1746 return 0; /* It's only the MSA2xxx that have this problem. */ 1747 1748 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1749 return 0; 1750 1751 memset(scsi3addr, 0, 8); 1752 scsi3addr[3] = target; 1753 if (is_hba_lunid(scsi3addr)) 1754 return 0; /* Don't add the RAID controller here. */ 1755 1756 if (is_scsi_rev_5(h)) 1757 return 0; /* p1210m doesn't need to do this. */ 1758 1759 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1760 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1761 "enclosures exceeded. Check your hardware " 1762 "configuration."); 1763 return 0; 1764 } 1765 1766 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1767 return 0; 1768 (*nmsa2xxx_enclosures)++; 1769 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1770 set_bit(target, lunzerobits); 1771 return 1; 1772 } 1773 1774 /* 1775 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 1776 * logdev. The number of luns in physdev and logdev are returned in 1777 * *nphysicals and *nlogicals, respectively. 1778 * Returns 0 on success, -1 otherwise. 1779 */ 1780 static int hpsa_gather_lun_info(struct ctlr_info *h, 1781 int reportlunsize, 1782 struct ReportLUNdata *physdev, u32 *nphysicals, 1783 struct ReportLUNdata *logdev, u32 *nlogicals) 1784 { 1785 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1786 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1787 return -1; 1788 } 1789 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 1790 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1791 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1792 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1793 *nphysicals - HPSA_MAX_PHYS_LUN); 1794 *nphysicals = HPSA_MAX_PHYS_LUN; 1795 } 1796 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 1797 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1798 return -1; 1799 } 1800 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 1801 /* Reject Logicals in excess of our max capability. */ 1802 if (*nlogicals > HPSA_MAX_LUN) { 1803 dev_warn(&h->pdev->dev, 1804 "maximum logical LUNs (%d) exceeded. " 1805 "%d LUNs ignored.\n", HPSA_MAX_LUN, 1806 *nlogicals - HPSA_MAX_LUN); 1807 *nlogicals = HPSA_MAX_LUN; 1808 } 1809 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 1810 dev_warn(&h->pdev->dev, 1811 "maximum logical + physical LUNs (%d) exceeded. " 1812 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1813 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 1814 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 1815 } 1816 return 0; 1817 } 1818 1819 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 1820 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 1821 struct ReportLUNdata *logdev_list) 1822 { 1823 /* Helper function, figure out where the LUN ID info is coming from 1824 * given index i, lists of physical and logical devices, where in 1825 * the list the raid controller is supposed to appear (first or last) 1826 */ 1827 1828 int logicals_start = nphysicals + (raid_ctlr_position == 0); 1829 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 1830 1831 if (i == raid_ctlr_position) 1832 return RAID_CTLR_LUNID; 1833 1834 if (i < logicals_start) 1835 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 1836 1837 if (i < last_device) 1838 return &logdev_list->LUN[i - nphysicals - 1839 (raid_ctlr_position == 0)][0]; 1840 BUG(); 1841 return NULL; 1842 } 1843 1844 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1845 { 1846 /* the idea here is we could get notified 1847 * that some devices have changed, so we do a report 1848 * physical luns and report logical luns cmd, and adjust 1849 * our list of devices accordingly. 1850 * 1851 * The scsi3addr's of devices won't change so long as the 1852 * adapter is not reset. That means we can rescan and 1853 * tell which devices we already know about, vs. new 1854 * devices, vs. disappearing devices. 1855 */ 1856 struct ReportLUNdata *physdev_list = NULL; 1857 struct ReportLUNdata *logdev_list = NULL; 1858 u32 nphysicals = 0; 1859 u32 nlogicals = 0; 1860 u32 ndev_allocated = 0; 1861 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1862 int ncurrent = 0; 1863 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1864 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1865 int bus, target, lun; 1866 int raid_ctlr_position; 1867 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1868 1869 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 1870 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1871 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1872 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1873 1874 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 1875 dev_err(&h->pdev->dev, "out of memory\n"); 1876 goto out; 1877 } 1878 memset(lunzerobits, 0, sizeof(lunzerobits)); 1879 1880 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 1881 logdev_list, &nlogicals)) 1882 goto out; 1883 1884 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them 1885 * but each of them 4 times through different paths. The plus 1 1886 * is for the RAID controller. 1887 */ 1888 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; 1889 1890 /* Allocate the per device structures */ 1891 for (i = 0; i < ndevs_to_allocate; i++) { 1892 if (i >= HPSA_MAX_DEVICES) { 1893 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 1894 " %d devices ignored.\n", HPSA_MAX_DEVICES, 1895 ndevs_to_allocate - HPSA_MAX_DEVICES); 1896 break; 1897 } 1898 1899 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 1900 if (!currentsd[i]) { 1901 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 1902 __FILE__, __LINE__); 1903 goto out; 1904 } 1905 ndev_allocated++; 1906 } 1907 1908 if (unlikely(is_scsi_rev_5(h))) 1909 raid_ctlr_position = 0; 1910 else 1911 raid_ctlr_position = nphysicals + nlogicals; 1912 1913 /* adjust our table of devices */ 1914 nmsa2xxx_enclosures = 0; 1915 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1916 u8 *lunaddrbytes, is_OBDR = 0; 1917 1918 /* Figure out where the LUN ID info is coming from */ 1919 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1920 i, nphysicals, nlogicals, physdev_list, logdev_list); 1921 /* skip masked physical devices. */ 1922 if (lunaddrbytes[3] & 0xC0 && 1923 i < nphysicals + (raid_ctlr_position == 0)) 1924 continue; 1925 1926 /* Get device type, vendor, model, device id */ 1927 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1928 &is_OBDR)) 1929 continue; /* skip it if we can't talk to it. */ 1930 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1931 tmpdevice); 1932 this_device = currentsd[ncurrent]; 1933 1934 /* 1935 * For the msa2xxx boxes, we have to insert a LUN 0 which 1936 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1937 * is nonetheless an enclosure device there. We have to 1938 * present that otherwise linux won't find anything if 1939 * there is no lun 0. 1940 */ 1941 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, 1942 lunaddrbytes, bus, target, lun, lunzerobits, 1943 &nmsa2xxx_enclosures)) { 1944 ncurrent++; 1945 this_device = currentsd[ncurrent]; 1946 } 1947 1948 *this_device = *tmpdevice; 1949 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1950 1951 switch (this_device->devtype) { 1952 case TYPE_ROM: 1953 /* We don't *really* support actual CD-ROM devices, 1954 * just "One Button Disaster Recovery" tape drive 1955 * which temporarily pretends to be a CD-ROM drive. 1956 * So we check that the device is really an OBDR tape 1957 * device by checking for "$DR-10" in bytes 43-48 of 1958 * the inquiry data. 1959 */ 1960 if (is_OBDR) 1961 ncurrent++; 1962 break; 1963 case TYPE_DISK: 1964 if (i < nphysicals) 1965 break; 1966 ncurrent++; 1967 break; 1968 case TYPE_TAPE: 1969 case TYPE_MEDIUM_CHANGER: 1970 ncurrent++; 1971 break; 1972 case TYPE_RAID: 1973 /* Only present the Smartarray HBA as a RAID controller. 1974 * If it's a RAID controller other than the HBA itself 1975 * (an external RAID controller, MSA500 or similar) 1976 * don't present it. 1977 */ 1978 if (!is_hba_lunid(lunaddrbytes)) 1979 break; 1980 ncurrent++; 1981 break; 1982 default: 1983 break; 1984 } 1985 if (ncurrent >= HPSA_MAX_DEVICES) 1986 break; 1987 } 1988 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 1989 out: 1990 kfree(tmpdevice); 1991 for (i = 0; i < ndev_allocated; i++) 1992 kfree(currentsd[i]); 1993 kfree(currentsd); 1994 kfree(physdev_list); 1995 kfree(logdev_list); 1996 } 1997 1998 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1999 * dma mapping and fills in the scatter gather entries of the 2000 * hpsa command, cp. 2001 */ 2002 static int hpsa_scatter_gather(struct ctlr_info *h, 2003 struct CommandList *cp, 2004 struct scsi_cmnd *cmd) 2005 { 2006 unsigned int len; 2007 struct scatterlist *sg; 2008 u64 addr64; 2009 int use_sg, i, sg_index, chained; 2010 struct SGDescriptor *curr_sg; 2011 2012 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 2013 2014 use_sg = scsi_dma_map(cmd); 2015 if (use_sg < 0) 2016 return use_sg; 2017 2018 if (!use_sg) 2019 goto sglist_finished; 2020 2021 curr_sg = cp->SG; 2022 chained = 0; 2023 sg_index = 0; 2024 scsi_for_each_sg(cmd, sg, use_sg, i) { 2025 if (i == h->max_cmd_sg_entries - 1 && 2026 use_sg > h->max_cmd_sg_entries) { 2027 chained = 1; 2028 curr_sg = h->cmd_sg_list[cp->cmdindex]; 2029 sg_index = 0; 2030 } 2031 addr64 = (u64) sg_dma_address(sg); 2032 len = sg_dma_len(sg); 2033 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 2034 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 2035 curr_sg->Len = len; 2036 curr_sg->Ext = 0; /* we are not chaining */ 2037 curr_sg++; 2038 } 2039 2040 if (use_sg + chained > h->maxSG) 2041 h->maxSG = use_sg + chained; 2042 2043 if (chained) { 2044 cp->Header.SGList = h->max_cmd_sg_entries; 2045 cp->Header.SGTotal = (u16) (use_sg + 1); 2046 hpsa_map_sg_chain_block(h, cp); 2047 return 0; 2048 } 2049 2050 sglist_finished: 2051 2052 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 2053 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 2054 return 0; 2055 } 2056 2057 2058 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 2059 void (*done)(struct scsi_cmnd *)) 2060 { 2061 struct ctlr_info *h; 2062 struct hpsa_scsi_dev_t *dev; 2063 unsigned char scsi3addr[8]; 2064 struct CommandList *c; 2065 unsigned long flags; 2066 2067 /* Get the ptr to our adapter structure out of cmd->host. */ 2068 h = sdev_to_hba(cmd->device); 2069 dev = cmd->device->hostdata; 2070 if (!dev) { 2071 cmd->result = DID_NO_CONNECT << 16; 2072 done(cmd); 2073 return 0; 2074 } 2075 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 2076 2077 spin_lock_irqsave(&h->lock, flags); 2078 if (unlikely(h->lockup_detected)) { 2079 spin_unlock_irqrestore(&h->lock, flags); 2080 cmd->result = DID_ERROR << 16; 2081 done(cmd); 2082 return 0; 2083 } 2084 /* Need a lock as this is being allocated from the pool */ 2085 c = cmd_alloc(h); 2086 spin_unlock_irqrestore(&h->lock, flags); 2087 if (c == NULL) { /* trouble... */ 2088 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2089 return SCSI_MLQUEUE_HOST_BUSY; 2090 } 2091 2092 /* Fill in the command list header */ 2093 2094 cmd->scsi_done = done; /* save this for use by completion code */ 2095 2096 /* save c in case we have to abort it */ 2097 cmd->host_scribble = (unsigned char *) c; 2098 2099 c->cmd_type = CMD_SCSI; 2100 c->scsi_cmd = cmd; 2101 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2102 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 2103 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 2104 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 2105 2106 /* Fill in the request block... */ 2107 2108 c->Request.Timeout = 0; 2109 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 2110 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 2111 c->Request.CDBLen = cmd->cmd_len; 2112 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 2113 c->Request.Type.Type = TYPE_CMD; 2114 c->Request.Type.Attribute = ATTR_SIMPLE; 2115 switch (cmd->sc_data_direction) { 2116 case DMA_TO_DEVICE: 2117 c->Request.Type.Direction = XFER_WRITE; 2118 break; 2119 case DMA_FROM_DEVICE: 2120 c->Request.Type.Direction = XFER_READ; 2121 break; 2122 case DMA_NONE: 2123 c->Request.Type.Direction = XFER_NONE; 2124 break; 2125 case DMA_BIDIRECTIONAL: 2126 /* This can happen if a buggy application does a scsi passthru 2127 * and sets both inlen and outlen to non-zero. ( see 2128 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 2129 */ 2130 2131 c->Request.Type.Direction = XFER_RSVD; 2132 /* This is technically wrong, and hpsa controllers should 2133 * reject it with CMD_INVALID, which is the most correct 2134 * response, but non-fibre backends appear to let it 2135 * slide by, and give the same results as if this field 2136 * were set correctly. Either way is acceptable for 2137 * our purposes here. 2138 */ 2139 2140 break; 2141 2142 default: 2143 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2144 cmd->sc_data_direction); 2145 BUG(); 2146 break; 2147 } 2148 2149 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 2150 cmd_free(h, c); 2151 return SCSI_MLQUEUE_HOST_BUSY; 2152 } 2153 enqueue_cmd_and_start_io(h, c); 2154 /* the cmd'll come back via intr handler in complete_scsi_command() */ 2155 return 0; 2156 } 2157 2158 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 2159 2160 static void hpsa_scan_start(struct Scsi_Host *sh) 2161 { 2162 struct ctlr_info *h = shost_to_hba(sh); 2163 unsigned long flags; 2164 2165 /* wait until any scan already in progress is finished. */ 2166 while (1) { 2167 spin_lock_irqsave(&h->scan_lock, flags); 2168 if (h->scan_finished) 2169 break; 2170 spin_unlock_irqrestore(&h->scan_lock, flags); 2171 wait_event(h->scan_wait_queue, h->scan_finished); 2172 /* Note: We don't need to worry about a race between this 2173 * thread and driver unload because the midlayer will 2174 * have incremented the reference count, so unload won't 2175 * happen if we're in here. 2176 */ 2177 } 2178 h->scan_finished = 0; /* mark scan as in progress */ 2179 spin_unlock_irqrestore(&h->scan_lock, flags); 2180 2181 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 2182 2183 spin_lock_irqsave(&h->scan_lock, flags); 2184 h->scan_finished = 1; /* mark scan as finished. */ 2185 wake_up_all(&h->scan_wait_queue); 2186 spin_unlock_irqrestore(&h->scan_lock, flags); 2187 } 2188 2189 static int hpsa_scan_finished(struct Scsi_Host *sh, 2190 unsigned long elapsed_time) 2191 { 2192 struct ctlr_info *h = shost_to_hba(sh); 2193 unsigned long flags; 2194 int finished; 2195 2196 spin_lock_irqsave(&h->scan_lock, flags); 2197 finished = h->scan_finished; 2198 spin_unlock_irqrestore(&h->scan_lock, flags); 2199 return finished; 2200 } 2201 2202 static int hpsa_change_queue_depth(struct scsi_device *sdev, 2203 int qdepth, int reason) 2204 { 2205 struct ctlr_info *h = sdev_to_hba(sdev); 2206 2207 if (reason != SCSI_QDEPTH_DEFAULT) 2208 return -ENOTSUPP; 2209 2210 if (qdepth < 1) 2211 qdepth = 1; 2212 else 2213 if (qdepth > h->nr_cmds) 2214 qdepth = h->nr_cmds; 2215 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2216 return sdev->queue_depth; 2217 } 2218 2219 static void hpsa_unregister_scsi(struct ctlr_info *h) 2220 { 2221 /* we are being forcibly unloaded, and may not refuse. */ 2222 scsi_remove_host(h->scsi_host); 2223 scsi_host_put(h->scsi_host); 2224 h->scsi_host = NULL; 2225 } 2226 2227 static int hpsa_register_scsi(struct ctlr_info *h) 2228 { 2229 int rc; 2230 2231 rc = hpsa_scsi_detect(h); 2232 if (rc != 0) 2233 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2234 " hpsa_scsi_detect(), rc is %d\n", rc); 2235 return rc; 2236 } 2237 2238 static int wait_for_device_to_become_ready(struct ctlr_info *h, 2239 unsigned char lunaddr[]) 2240 { 2241 int rc = 0; 2242 int count = 0; 2243 int waittime = 1; /* seconds */ 2244 struct CommandList *c; 2245 2246 c = cmd_special_alloc(h); 2247 if (!c) { 2248 dev_warn(&h->pdev->dev, "out of memory in " 2249 "wait_for_device_to_become_ready.\n"); 2250 return IO_ERROR; 2251 } 2252 2253 /* Send test unit ready until device ready, or give up. */ 2254 while (count < HPSA_TUR_RETRY_LIMIT) { 2255 2256 /* Wait for a bit. do this first, because if we send 2257 * the TUR right away, the reset will just abort it. 2258 */ 2259 msleep(1000 * waittime); 2260 count++; 2261 2262 /* Increase wait time with each try, up to a point. */ 2263 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 2264 waittime = waittime * 2; 2265 2266 /* Send the Test Unit Ready */ 2267 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); 2268 hpsa_scsi_do_simple_cmd_core(h, c); 2269 /* no unmap needed here because no data xfer. */ 2270 2271 if (c->err_info->CommandStatus == CMD_SUCCESS) 2272 break; 2273 2274 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2275 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 2276 (c->err_info->SenseInfo[2] == NO_SENSE || 2277 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 2278 break; 2279 2280 dev_warn(&h->pdev->dev, "waiting %d secs " 2281 "for device to become ready.\n", waittime); 2282 rc = 1; /* device not ready. */ 2283 } 2284 2285 if (rc) 2286 dev_warn(&h->pdev->dev, "giving up on device.\n"); 2287 else 2288 dev_warn(&h->pdev->dev, "device is ready.\n"); 2289 2290 cmd_special_free(h, c); 2291 return rc; 2292 } 2293 2294 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 2295 * complaining. Doing a host- or bus-reset can't do anything good here. 2296 */ 2297 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 2298 { 2299 int rc; 2300 struct ctlr_info *h; 2301 struct hpsa_scsi_dev_t *dev; 2302 2303 /* find the controller to which the command to be aborted was sent */ 2304 h = sdev_to_hba(scsicmd->device); 2305 if (h == NULL) /* paranoia */ 2306 return FAILED; 2307 dev = scsicmd->device->hostdata; 2308 if (!dev) { 2309 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2310 "device lookup failed.\n"); 2311 return FAILED; 2312 } 2313 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 2314 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 2315 /* send a reset to the SCSI LUN which the command was sent to */ 2316 rc = hpsa_send_reset(h, dev->scsi3addr); 2317 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2318 return SUCCESS; 2319 2320 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 2321 return FAILED; 2322 } 2323 2324 /* 2325 * For operations that cannot sleep, a command block is allocated at init, 2326 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2327 * which ones are free or in use. Lock must be held when calling this. 2328 * cmd_free() is the complement. 2329 */ 2330 static struct CommandList *cmd_alloc(struct ctlr_info *h) 2331 { 2332 struct CommandList *c; 2333 int i; 2334 union u64bit temp64; 2335 dma_addr_t cmd_dma_handle, err_dma_handle; 2336 2337 do { 2338 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2339 if (i == h->nr_cmds) 2340 return NULL; 2341 } while (test_and_set_bit 2342 (i & (BITS_PER_LONG - 1), 2343 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2344 c = h->cmd_pool + i; 2345 memset(c, 0, sizeof(*c)); 2346 cmd_dma_handle = h->cmd_pool_dhandle 2347 + i * sizeof(*c); 2348 c->err_info = h->errinfo_pool + i; 2349 memset(c->err_info, 0, sizeof(*c->err_info)); 2350 err_dma_handle = h->errinfo_pool_dhandle 2351 + i * sizeof(*c->err_info); 2352 h->nr_allocs++; 2353 2354 c->cmdindex = i; 2355 2356 INIT_LIST_HEAD(&c->list); 2357 c->busaddr = (u32) cmd_dma_handle; 2358 temp64.val = (u64) err_dma_handle; 2359 c->ErrDesc.Addr.lower = temp64.val32.lower; 2360 c->ErrDesc.Addr.upper = temp64.val32.upper; 2361 c->ErrDesc.Len = sizeof(*c->err_info); 2362 2363 c->h = h; 2364 return c; 2365 } 2366 2367 /* For operations that can wait for kmalloc to possibly sleep, 2368 * this routine can be called. Lock need not be held to call 2369 * cmd_special_alloc. cmd_special_free() is the complement. 2370 */ 2371 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 2372 { 2373 struct CommandList *c; 2374 union u64bit temp64; 2375 dma_addr_t cmd_dma_handle, err_dma_handle; 2376 2377 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 2378 if (c == NULL) 2379 return NULL; 2380 memset(c, 0, sizeof(*c)); 2381 2382 c->cmdindex = -1; 2383 2384 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 2385 &err_dma_handle); 2386 2387 if (c->err_info == NULL) { 2388 pci_free_consistent(h->pdev, 2389 sizeof(*c), c, cmd_dma_handle); 2390 return NULL; 2391 } 2392 memset(c->err_info, 0, sizeof(*c->err_info)); 2393 2394 INIT_LIST_HEAD(&c->list); 2395 c->busaddr = (u32) cmd_dma_handle; 2396 temp64.val = (u64) err_dma_handle; 2397 c->ErrDesc.Addr.lower = temp64.val32.lower; 2398 c->ErrDesc.Addr.upper = temp64.val32.upper; 2399 c->ErrDesc.Len = sizeof(*c->err_info); 2400 2401 c->h = h; 2402 return c; 2403 } 2404 2405 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2406 { 2407 int i; 2408 2409 i = c - h->cmd_pool; 2410 clear_bit(i & (BITS_PER_LONG - 1), 2411 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2412 h->nr_frees++; 2413 } 2414 2415 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2416 { 2417 union u64bit temp64; 2418 2419 temp64.val32.lower = c->ErrDesc.Addr.lower; 2420 temp64.val32.upper = c->ErrDesc.Addr.upper; 2421 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2422 c->err_info, (dma_addr_t) temp64.val); 2423 pci_free_consistent(h->pdev, sizeof(*c), 2424 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 2425 } 2426 2427 #ifdef CONFIG_COMPAT 2428 2429 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2430 { 2431 IOCTL32_Command_struct __user *arg32 = 2432 (IOCTL32_Command_struct __user *) arg; 2433 IOCTL_Command_struct arg64; 2434 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 2435 int err; 2436 u32 cp; 2437 2438 memset(&arg64, 0, sizeof(arg64)); 2439 err = 0; 2440 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2441 sizeof(arg64.LUN_info)); 2442 err |= copy_from_user(&arg64.Request, &arg32->Request, 2443 sizeof(arg64.Request)); 2444 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2445 sizeof(arg64.error_info)); 2446 err |= get_user(arg64.buf_size, &arg32->buf_size); 2447 err |= get_user(cp, &arg32->buf); 2448 arg64.buf = compat_ptr(cp); 2449 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2450 2451 if (err) 2452 return -EFAULT; 2453 2454 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2455 if (err) 2456 return err; 2457 err |= copy_in_user(&arg32->error_info, &p->error_info, 2458 sizeof(arg32->error_info)); 2459 if (err) 2460 return -EFAULT; 2461 return err; 2462 } 2463 2464 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 2465 int cmd, void *arg) 2466 { 2467 BIG_IOCTL32_Command_struct __user *arg32 = 2468 (BIG_IOCTL32_Command_struct __user *) arg; 2469 BIG_IOCTL_Command_struct arg64; 2470 BIG_IOCTL_Command_struct __user *p = 2471 compat_alloc_user_space(sizeof(arg64)); 2472 int err; 2473 u32 cp; 2474 2475 memset(&arg64, 0, sizeof(arg64)); 2476 err = 0; 2477 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2478 sizeof(arg64.LUN_info)); 2479 err |= copy_from_user(&arg64.Request, &arg32->Request, 2480 sizeof(arg64.Request)); 2481 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2482 sizeof(arg64.error_info)); 2483 err |= get_user(arg64.buf_size, &arg32->buf_size); 2484 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 2485 err |= get_user(cp, &arg32->buf); 2486 arg64.buf = compat_ptr(cp); 2487 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2488 2489 if (err) 2490 return -EFAULT; 2491 2492 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2493 if (err) 2494 return err; 2495 err |= copy_in_user(&arg32->error_info, &p->error_info, 2496 sizeof(arg32->error_info)); 2497 if (err) 2498 return -EFAULT; 2499 return err; 2500 } 2501 2502 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 2503 { 2504 switch (cmd) { 2505 case CCISS_GETPCIINFO: 2506 case CCISS_GETINTINFO: 2507 case CCISS_SETINTINFO: 2508 case CCISS_GETNODENAME: 2509 case CCISS_SETNODENAME: 2510 case CCISS_GETHEARTBEAT: 2511 case CCISS_GETBUSTYPES: 2512 case CCISS_GETFIRMVER: 2513 case CCISS_GETDRIVVER: 2514 case CCISS_REVALIDVOLS: 2515 case CCISS_DEREGDISK: 2516 case CCISS_REGNEWDISK: 2517 case CCISS_REGNEWD: 2518 case CCISS_RESCANDISK: 2519 case CCISS_GETLUNINFO: 2520 return hpsa_ioctl(dev, cmd, arg); 2521 2522 case CCISS_PASSTHRU32: 2523 return hpsa_ioctl32_passthru(dev, cmd, arg); 2524 case CCISS_BIG_PASSTHRU32: 2525 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 2526 2527 default: 2528 return -ENOIOCTLCMD; 2529 } 2530 } 2531 #endif 2532 2533 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2534 { 2535 struct hpsa_pci_info pciinfo; 2536 2537 if (!argp) 2538 return -EINVAL; 2539 pciinfo.domain = pci_domain_nr(h->pdev->bus); 2540 pciinfo.bus = h->pdev->bus->number; 2541 pciinfo.dev_fn = h->pdev->devfn; 2542 pciinfo.board_id = h->board_id; 2543 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 2544 return -EFAULT; 2545 return 0; 2546 } 2547 2548 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 2549 { 2550 DriverVer_type DriverVer; 2551 unsigned char vmaj, vmin, vsubmin; 2552 int rc; 2553 2554 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 2555 &vmaj, &vmin, &vsubmin); 2556 if (rc != 3) { 2557 dev_info(&h->pdev->dev, "driver version string '%s' " 2558 "unrecognized.", HPSA_DRIVER_VERSION); 2559 vmaj = 0; 2560 vmin = 0; 2561 vsubmin = 0; 2562 } 2563 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 2564 if (!argp) 2565 return -EINVAL; 2566 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 2567 return -EFAULT; 2568 return 0; 2569 } 2570 2571 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2572 { 2573 IOCTL_Command_struct iocommand; 2574 struct CommandList *c; 2575 char *buff = NULL; 2576 union u64bit temp64; 2577 2578 if (!argp) 2579 return -EINVAL; 2580 if (!capable(CAP_SYS_RAWIO)) 2581 return -EPERM; 2582 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 2583 return -EFAULT; 2584 if ((iocommand.buf_size < 1) && 2585 (iocommand.Request.Type.Direction != XFER_NONE)) { 2586 return -EINVAL; 2587 } 2588 if (iocommand.buf_size > 0) { 2589 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2590 if (buff == NULL) 2591 return -EFAULT; 2592 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2593 /* Copy the data into the buffer we created */ 2594 if (copy_from_user(buff, iocommand.buf, 2595 iocommand.buf_size)) { 2596 kfree(buff); 2597 return -EFAULT; 2598 } 2599 } else { 2600 memset(buff, 0, iocommand.buf_size); 2601 } 2602 } 2603 c = cmd_special_alloc(h); 2604 if (c == NULL) { 2605 kfree(buff); 2606 return -ENOMEM; 2607 } 2608 /* Fill in the command type */ 2609 c->cmd_type = CMD_IOCTL_PEND; 2610 /* Fill in Command Header */ 2611 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2612 if (iocommand.buf_size > 0) { /* buffer to fill */ 2613 c->Header.SGList = 1; 2614 c->Header.SGTotal = 1; 2615 } else { /* no buffers to fill */ 2616 c->Header.SGList = 0; 2617 c->Header.SGTotal = 0; 2618 } 2619 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 2620 /* use the kernel address the cmd block for tag */ 2621 c->Header.Tag.lower = c->busaddr; 2622 2623 /* Fill in Request block */ 2624 memcpy(&c->Request, &iocommand.Request, 2625 sizeof(c->Request)); 2626 2627 /* Fill in the scatter gather information */ 2628 if (iocommand.buf_size > 0) { 2629 temp64.val = pci_map_single(h->pdev, buff, 2630 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 2631 c->SG[0].Addr.lower = temp64.val32.lower; 2632 c->SG[0].Addr.upper = temp64.val32.upper; 2633 c->SG[0].Len = iocommand.buf_size; 2634 c->SG[0].Ext = 0; /* we are not chaining*/ 2635 } 2636 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 2637 if (iocommand.buf_size > 0) 2638 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2639 check_ioctl_unit_attention(h, c); 2640 2641 /* Copy the error information out */ 2642 memcpy(&iocommand.error_info, c->err_info, 2643 sizeof(iocommand.error_info)); 2644 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 2645 kfree(buff); 2646 cmd_special_free(h, c); 2647 return -EFAULT; 2648 } 2649 if (iocommand.Request.Type.Direction == XFER_READ && 2650 iocommand.buf_size > 0) { 2651 /* Copy the data out of the buffer we created */ 2652 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2653 kfree(buff); 2654 cmd_special_free(h, c); 2655 return -EFAULT; 2656 } 2657 } 2658 kfree(buff); 2659 cmd_special_free(h, c); 2660 return 0; 2661 } 2662 2663 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2664 { 2665 BIG_IOCTL_Command_struct *ioc; 2666 struct CommandList *c; 2667 unsigned char **buff = NULL; 2668 int *buff_size = NULL; 2669 union u64bit temp64; 2670 BYTE sg_used = 0; 2671 int status = 0; 2672 int i; 2673 u32 left; 2674 u32 sz; 2675 BYTE __user *data_ptr; 2676 2677 if (!argp) 2678 return -EINVAL; 2679 if (!capable(CAP_SYS_RAWIO)) 2680 return -EPERM; 2681 ioc = (BIG_IOCTL_Command_struct *) 2682 kmalloc(sizeof(*ioc), GFP_KERNEL); 2683 if (!ioc) { 2684 status = -ENOMEM; 2685 goto cleanup1; 2686 } 2687 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 2688 status = -EFAULT; 2689 goto cleanup1; 2690 } 2691 if ((ioc->buf_size < 1) && 2692 (ioc->Request.Type.Direction != XFER_NONE)) { 2693 status = -EINVAL; 2694 goto cleanup1; 2695 } 2696 /* Check kmalloc limits using all SGs */ 2697 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 2698 status = -EINVAL; 2699 goto cleanup1; 2700 } 2701 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 2702 status = -EINVAL; 2703 goto cleanup1; 2704 } 2705 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 2706 if (!buff) { 2707 status = -ENOMEM; 2708 goto cleanup1; 2709 } 2710 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 2711 if (!buff_size) { 2712 status = -ENOMEM; 2713 goto cleanup1; 2714 } 2715 left = ioc->buf_size; 2716 data_ptr = ioc->buf; 2717 while (left) { 2718 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 2719 buff_size[sg_used] = sz; 2720 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 2721 if (buff[sg_used] == NULL) { 2722 status = -ENOMEM; 2723 goto cleanup1; 2724 } 2725 if (ioc->Request.Type.Direction == XFER_WRITE) { 2726 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 2727 status = -ENOMEM; 2728 goto cleanup1; 2729 } 2730 } else 2731 memset(buff[sg_used], 0, sz); 2732 left -= sz; 2733 data_ptr += sz; 2734 sg_used++; 2735 } 2736 c = cmd_special_alloc(h); 2737 if (c == NULL) { 2738 status = -ENOMEM; 2739 goto cleanup1; 2740 } 2741 c->cmd_type = CMD_IOCTL_PEND; 2742 c->Header.ReplyQueue = 0; 2743 c->Header.SGList = c->Header.SGTotal = sg_used; 2744 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2745 c->Header.Tag.lower = c->busaddr; 2746 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2747 if (ioc->buf_size > 0) { 2748 int i; 2749 for (i = 0; i < sg_used; i++) { 2750 temp64.val = pci_map_single(h->pdev, buff[i], 2751 buff_size[i], PCI_DMA_BIDIRECTIONAL); 2752 c->SG[i].Addr.lower = temp64.val32.lower; 2753 c->SG[i].Addr.upper = temp64.val32.upper; 2754 c->SG[i].Len = buff_size[i]; 2755 /* we are not chaining */ 2756 c->SG[i].Ext = 0; 2757 } 2758 } 2759 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 2760 if (sg_used) 2761 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2762 check_ioctl_unit_attention(h, c); 2763 /* Copy the error information out */ 2764 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2765 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 2766 cmd_special_free(h, c); 2767 status = -EFAULT; 2768 goto cleanup1; 2769 } 2770 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 2771 /* Copy the data out of the buffer we created */ 2772 BYTE __user *ptr = ioc->buf; 2773 for (i = 0; i < sg_used; i++) { 2774 if (copy_to_user(ptr, buff[i], buff_size[i])) { 2775 cmd_special_free(h, c); 2776 status = -EFAULT; 2777 goto cleanup1; 2778 } 2779 ptr += buff_size[i]; 2780 } 2781 } 2782 cmd_special_free(h, c); 2783 status = 0; 2784 cleanup1: 2785 if (buff) { 2786 for (i = 0; i < sg_used; i++) 2787 kfree(buff[i]); 2788 kfree(buff); 2789 } 2790 kfree(buff_size); 2791 kfree(ioc); 2792 return status; 2793 } 2794 2795 static void check_ioctl_unit_attention(struct ctlr_info *h, 2796 struct CommandList *c) 2797 { 2798 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2799 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 2800 (void) check_for_unit_attention(h, c); 2801 } 2802 /* 2803 * ioctl 2804 */ 2805 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 2806 { 2807 struct ctlr_info *h; 2808 void __user *argp = (void __user *)arg; 2809 2810 h = sdev_to_hba(dev); 2811 2812 switch (cmd) { 2813 case CCISS_DEREGDISK: 2814 case CCISS_REGNEWDISK: 2815 case CCISS_REGNEWD: 2816 hpsa_scan_start(h->scsi_host); 2817 return 0; 2818 case CCISS_GETPCIINFO: 2819 return hpsa_getpciinfo_ioctl(h, argp); 2820 case CCISS_GETDRIVVER: 2821 return hpsa_getdrivver_ioctl(h, argp); 2822 case CCISS_PASSTHRU: 2823 return hpsa_passthru_ioctl(h, argp); 2824 case CCISS_BIG_PASSTHRU: 2825 return hpsa_big_passthru_ioctl(h, argp); 2826 default: 2827 return -ENOTTY; 2828 } 2829 } 2830 2831 static int __devinit hpsa_send_host_reset(struct ctlr_info *h, 2832 unsigned char *scsi3addr, u8 reset_type) 2833 { 2834 struct CommandList *c; 2835 2836 c = cmd_alloc(h); 2837 if (!c) 2838 return -ENOMEM; 2839 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2840 RAID_CTLR_LUNID, TYPE_MSG); 2841 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 2842 c->waiting = NULL; 2843 enqueue_cmd_and_start_io(h, c); 2844 /* Don't wait for completion, the reset won't complete. Don't free 2845 * the command either. This is the last command we will send before 2846 * re-initializing everything, so it doesn't matter and won't leak. 2847 */ 2848 return 0; 2849 } 2850 2851 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2852 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2853 int cmd_type) 2854 { 2855 int pci_dir = XFER_NONE; 2856 2857 c->cmd_type = CMD_IOCTL_PEND; 2858 c->Header.ReplyQueue = 0; 2859 if (buff != NULL && size > 0) { 2860 c->Header.SGList = 1; 2861 c->Header.SGTotal = 1; 2862 } else { 2863 c->Header.SGList = 0; 2864 c->Header.SGTotal = 0; 2865 } 2866 c->Header.Tag.lower = c->busaddr; 2867 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2868 2869 c->Request.Type.Type = cmd_type; 2870 if (cmd_type == TYPE_CMD) { 2871 switch (cmd) { 2872 case HPSA_INQUIRY: 2873 /* are we trying to read a vital product page */ 2874 if (page_code != 0) { 2875 c->Request.CDB[1] = 0x01; 2876 c->Request.CDB[2] = page_code; 2877 } 2878 c->Request.CDBLen = 6; 2879 c->Request.Type.Attribute = ATTR_SIMPLE; 2880 c->Request.Type.Direction = XFER_READ; 2881 c->Request.Timeout = 0; 2882 c->Request.CDB[0] = HPSA_INQUIRY; 2883 c->Request.CDB[4] = size & 0xFF; 2884 break; 2885 case HPSA_REPORT_LOG: 2886 case HPSA_REPORT_PHYS: 2887 /* Talking to controller so It's a physical command 2888 mode = 00 target = 0. Nothing to write. 2889 */ 2890 c->Request.CDBLen = 12; 2891 c->Request.Type.Attribute = ATTR_SIMPLE; 2892 c->Request.Type.Direction = XFER_READ; 2893 c->Request.Timeout = 0; 2894 c->Request.CDB[0] = cmd; 2895 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2896 c->Request.CDB[7] = (size >> 16) & 0xFF; 2897 c->Request.CDB[8] = (size >> 8) & 0xFF; 2898 c->Request.CDB[9] = size & 0xFF; 2899 break; 2900 case HPSA_CACHE_FLUSH: 2901 c->Request.CDBLen = 12; 2902 c->Request.Type.Attribute = ATTR_SIMPLE; 2903 c->Request.Type.Direction = XFER_WRITE; 2904 c->Request.Timeout = 0; 2905 c->Request.CDB[0] = BMIC_WRITE; 2906 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2907 c->Request.CDB[7] = (size >> 8) & 0xFF; 2908 c->Request.CDB[8] = size & 0xFF; 2909 break; 2910 case TEST_UNIT_READY: 2911 c->Request.CDBLen = 6; 2912 c->Request.Type.Attribute = ATTR_SIMPLE; 2913 c->Request.Type.Direction = XFER_NONE; 2914 c->Request.Timeout = 0; 2915 break; 2916 default: 2917 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 2918 BUG(); 2919 return; 2920 } 2921 } else if (cmd_type == TYPE_MSG) { 2922 switch (cmd) { 2923 2924 case HPSA_DEVICE_RESET_MSG: 2925 c->Request.CDBLen = 16; 2926 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 2927 c->Request.Type.Attribute = ATTR_SIMPLE; 2928 c->Request.Type.Direction = XFER_NONE; 2929 c->Request.Timeout = 0; /* Don't time out */ 2930 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2931 c->Request.CDB[0] = cmd; 2932 c->Request.CDB[1] = 0x03; /* Reset target above */ 2933 /* If bytes 4-7 are zero, it means reset the */ 2934 /* LunID device */ 2935 c->Request.CDB[4] = 0x00; 2936 c->Request.CDB[5] = 0x00; 2937 c->Request.CDB[6] = 0x00; 2938 c->Request.CDB[7] = 0x00; 2939 break; 2940 2941 default: 2942 dev_warn(&h->pdev->dev, "unknown message type %d\n", 2943 cmd); 2944 BUG(); 2945 } 2946 } else { 2947 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2948 BUG(); 2949 } 2950 2951 switch (c->Request.Type.Direction) { 2952 case XFER_READ: 2953 pci_dir = PCI_DMA_FROMDEVICE; 2954 break; 2955 case XFER_WRITE: 2956 pci_dir = PCI_DMA_TODEVICE; 2957 break; 2958 case XFER_NONE: 2959 pci_dir = PCI_DMA_NONE; 2960 break; 2961 default: 2962 pci_dir = PCI_DMA_BIDIRECTIONAL; 2963 } 2964 2965 hpsa_map_one(h->pdev, c, buff, size, pci_dir); 2966 2967 return; 2968 } 2969 2970 /* 2971 * Map (physical) PCI mem into (virtual) kernel space 2972 */ 2973 static void __iomem *remap_pci_mem(ulong base, ulong size) 2974 { 2975 ulong page_base = ((ulong) base) & PAGE_MASK; 2976 ulong page_offs = ((ulong) base) - page_base; 2977 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2978 2979 return page_remapped ? (page_remapped + page_offs) : NULL; 2980 } 2981 2982 /* Takes cmds off the submission queue and sends them to the hardware, 2983 * then puts them on the queue of cmds waiting for completion. 2984 */ 2985 static void start_io(struct ctlr_info *h) 2986 { 2987 struct CommandList *c; 2988 2989 while (!list_empty(&h->reqQ)) { 2990 c = list_entry(h->reqQ.next, struct CommandList, list); 2991 /* can't do anything if fifo is full */ 2992 if ((h->access.fifo_full(h))) { 2993 dev_warn(&h->pdev->dev, "fifo full\n"); 2994 break; 2995 } 2996 2997 /* Get the first entry from the Request Q */ 2998 removeQ(c); 2999 h->Qdepth--; 3000 3001 /* Tell the controller execute command */ 3002 h->access.submit_command(h, c); 3003 3004 /* Put job onto the completed Q */ 3005 addQ(&h->cmpQ, c); 3006 } 3007 } 3008 3009 static inline unsigned long get_next_completion(struct ctlr_info *h) 3010 { 3011 return h->access.command_completed(h); 3012 } 3013 3014 static inline bool interrupt_pending(struct ctlr_info *h) 3015 { 3016 return h->access.intr_pending(h); 3017 } 3018 3019 static inline long interrupt_not_for_us(struct ctlr_info *h) 3020 { 3021 return (h->access.intr_pending(h) == 0) || 3022 (h->interrupts_enabled == 0); 3023 } 3024 3025 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 3026 u32 raw_tag) 3027 { 3028 if (unlikely(tag_index >= h->nr_cmds)) { 3029 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 3030 return 1; 3031 } 3032 return 0; 3033 } 3034 3035 static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 3036 { 3037 removeQ(c); 3038 if (likely(c->cmd_type == CMD_SCSI)) 3039 complete_scsi_command(c); 3040 else if (c->cmd_type == CMD_IOCTL_PEND) 3041 complete(c->waiting); 3042 } 3043 3044 static inline u32 hpsa_tag_contains_index(u32 tag) 3045 { 3046 return tag & DIRECT_LOOKUP_BIT; 3047 } 3048 3049 static inline u32 hpsa_tag_to_index(u32 tag) 3050 { 3051 return tag >> DIRECT_LOOKUP_SHIFT; 3052 } 3053 3054 3055 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 3056 { 3057 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3058 #define HPSA_SIMPLE_ERROR_BITS 0x03 3059 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3060 return tag & ~HPSA_SIMPLE_ERROR_BITS; 3061 return tag & ~HPSA_PERF_ERROR_BITS; 3062 } 3063 3064 /* process completion of an indexed ("direct lookup") command */ 3065 static inline u32 process_indexed_cmd(struct ctlr_info *h, 3066 u32 raw_tag) 3067 { 3068 u32 tag_index; 3069 struct CommandList *c; 3070 3071 tag_index = hpsa_tag_to_index(raw_tag); 3072 if (bad_tag(h, tag_index, raw_tag)) 3073 return next_command(h); 3074 c = h->cmd_pool + tag_index; 3075 finish_cmd(c, raw_tag); 3076 return next_command(h); 3077 } 3078 3079 /* process completion of a non-indexed command */ 3080 static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 3081 u32 raw_tag) 3082 { 3083 u32 tag; 3084 struct CommandList *c = NULL; 3085 3086 tag = hpsa_tag_discard_error_bits(h, raw_tag); 3087 list_for_each_entry(c, &h->cmpQ, list) { 3088 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3089 finish_cmd(c, raw_tag); 3090 return next_command(h); 3091 } 3092 } 3093 bad_tag(h, h->nr_cmds + 1, raw_tag); 3094 return next_command(h); 3095 } 3096 3097 /* Some controllers, like p400, will give us one interrupt 3098 * after a soft reset, even if we turned interrupts off. 3099 * Only need to check for this in the hpsa_xxx_discard_completions 3100 * functions. 3101 */ 3102 static int ignore_bogus_interrupt(struct ctlr_info *h) 3103 { 3104 if (likely(!reset_devices)) 3105 return 0; 3106 3107 if (likely(h->interrupts_enabled)) 3108 return 0; 3109 3110 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 3111 "(known firmware bug.) Ignoring.\n"); 3112 3113 return 1; 3114 } 3115 3116 static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) 3117 { 3118 struct ctlr_info *h = dev_id; 3119 unsigned long flags; 3120 u32 raw_tag; 3121 3122 if (ignore_bogus_interrupt(h)) 3123 return IRQ_NONE; 3124 3125 if (interrupt_not_for_us(h)) 3126 return IRQ_NONE; 3127 spin_lock_irqsave(&h->lock, flags); 3128 h->last_intr_timestamp = get_jiffies_64(); 3129 while (interrupt_pending(h)) { 3130 raw_tag = get_next_completion(h); 3131 while (raw_tag != FIFO_EMPTY) 3132 raw_tag = next_command(h); 3133 } 3134 spin_unlock_irqrestore(&h->lock, flags); 3135 return IRQ_HANDLED; 3136 } 3137 3138 static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) 3139 { 3140 struct ctlr_info *h = dev_id; 3141 unsigned long flags; 3142 u32 raw_tag; 3143 3144 if (ignore_bogus_interrupt(h)) 3145 return IRQ_NONE; 3146 3147 spin_lock_irqsave(&h->lock, flags); 3148 h->last_intr_timestamp = get_jiffies_64(); 3149 raw_tag = get_next_completion(h); 3150 while (raw_tag != FIFO_EMPTY) 3151 raw_tag = next_command(h); 3152 spin_unlock_irqrestore(&h->lock, flags); 3153 return IRQ_HANDLED; 3154 } 3155 3156 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3157 { 3158 struct ctlr_info *h = dev_id; 3159 unsigned long flags; 3160 u32 raw_tag; 3161 3162 if (interrupt_not_for_us(h)) 3163 return IRQ_NONE; 3164 spin_lock_irqsave(&h->lock, flags); 3165 h->last_intr_timestamp = get_jiffies_64(); 3166 while (interrupt_pending(h)) { 3167 raw_tag = get_next_completion(h); 3168 while (raw_tag != FIFO_EMPTY) { 3169 if (hpsa_tag_contains_index(raw_tag)) 3170 raw_tag = process_indexed_cmd(h, raw_tag); 3171 else 3172 raw_tag = process_nonindexed_cmd(h, raw_tag); 3173 } 3174 } 3175 spin_unlock_irqrestore(&h->lock, flags); 3176 return IRQ_HANDLED; 3177 } 3178 3179 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 3180 { 3181 struct ctlr_info *h = dev_id; 3182 unsigned long flags; 3183 u32 raw_tag; 3184 3185 spin_lock_irqsave(&h->lock, flags); 3186 h->last_intr_timestamp = get_jiffies_64(); 3187 raw_tag = get_next_completion(h); 3188 while (raw_tag != FIFO_EMPTY) { 3189 if (hpsa_tag_contains_index(raw_tag)) 3190 raw_tag = process_indexed_cmd(h, raw_tag); 3191 else 3192 raw_tag = process_nonindexed_cmd(h, raw_tag); 3193 } 3194 spin_unlock_irqrestore(&h->lock, flags); 3195 return IRQ_HANDLED; 3196 } 3197 3198 /* Send a message CDB to the firmware. Careful, this only works 3199 * in simple mode, not performant mode due to the tag lookup. 3200 * We only ever use this immediately after a controller reset. 3201 */ 3202 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 3203 unsigned char type) 3204 { 3205 struct Command { 3206 struct CommandListHeader CommandHeader; 3207 struct RequestBlock Request; 3208 struct ErrDescriptor ErrorDescriptor; 3209 }; 3210 struct Command *cmd; 3211 static const size_t cmd_sz = sizeof(*cmd) + 3212 sizeof(cmd->ErrorDescriptor); 3213 dma_addr_t paddr64; 3214 uint32_t paddr32, tag; 3215 void __iomem *vaddr; 3216 int i, err; 3217 3218 vaddr = pci_ioremap_bar(pdev, 0); 3219 if (vaddr == NULL) 3220 return -ENOMEM; 3221 3222 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 3223 * CCISS commands, so they must be allocated from the lower 4GiB of 3224 * memory. 3225 */ 3226 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3227 if (err) { 3228 iounmap(vaddr); 3229 return -ENOMEM; 3230 } 3231 3232 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 3233 if (cmd == NULL) { 3234 iounmap(vaddr); 3235 return -ENOMEM; 3236 } 3237 3238 /* This must fit, because of the 32-bit consistent DMA mask. Also, 3239 * although there's no guarantee, we assume that the address is at 3240 * least 4-byte aligned (most likely, it's page-aligned). 3241 */ 3242 paddr32 = paddr64; 3243 3244 cmd->CommandHeader.ReplyQueue = 0; 3245 cmd->CommandHeader.SGList = 0; 3246 cmd->CommandHeader.SGTotal = 0; 3247 cmd->CommandHeader.Tag.lower = paddr32; 3248 cmd->CommandHeader.Tag.upper = 0; 3249 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 3250 3251 cmd->Request.CDBLen = 16; 3252 cmd->Request.Type.Type = TYPE_MSG; 3253 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 3254 cmd->Request.Type.Direction = XFER_NONE; 3255 cmd->Request.Timeout = 0; /* Don't time out */ 3256 cmd->Request.CDB[0] = opcode; 3257 cmd->Request.CDB[1] = type; 3258 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 3259 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 3260 cmd->ErrorDescriptor.Addr.upper = 0; 3261 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 3262 3263 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 3264 3265 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3266 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3267 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 3268 break; 3269 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3270 } 3271 3272 iounmap(vaddr); 3273 3274 /* we leak the DMA buffer here ... no choice since the controller could 3275 * still complete the command. 3276 */ 3277 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 3278 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 3279 opcode, type); 3280 return -ETIMEDOUT; 3281 } 3282 3283 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 3284 3285 if (tag & HPSA_ERROR_BIT) { 3286 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 3287 opcode, type); 3288 return -EIO; 3289 } 3290 3291 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 3292 opcode, type); 3293 return 0; 3294 } 3295 3296 #define hpsa_noop(p) hpsa_message(p, 3, 0) 3297 3298 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3299 void * __iomem vaddr, u32 use_doorbell) 3300 { 3301 u16 pmcsr; 3302 int pos; 3303 3304 if (use_doorbell) { 3305 /* For everything after the P600, the PCI power state method 3306 * of resetting the controller doesn't work, so we have this 3307 * other way using the doorbell register. 3308 */ 3309 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3310 writel(use_doorbell, vaddr + SA5_DOORBELL); 3311 } else { /* Try to do it the PCI power state way */ 3312 3313 /* Quoting from the Open CISS Specification: "The Power 3314 * Management Control/Status Register (CSR) controls the power 3315 * state of the device. The normal operating state is D0, 3316 * CSR=00h. The software off state is D3, CSR=03h. To reset 3317 * the controller, place the interface device in D3 then to D0, 3318 * this causes a secondary PCI reset which will reset the 3319 * controller." */ 3320 3321 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 3322 if (pos == 0) { 3323 dev_err(&pdev->dev, 3324 "hpsa_reset_controller: " 3325 "PCI PM not supported\n"); 3326 return -ENODEV; 3327 } 3328 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3329 /* enter the D3hot power management state */ 3330 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3331 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3332 pmcsr |= PCI_D3hot; 3333 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3334 3335 msleep(500); 3336 3337 /* enter the D0 power management state */ 3338 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3339 pmcsr |= PCI_D0; 3340 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3341 3342 /* 3343 * The P600 requires a small delay when changing states. 3344 * Otherwise we may think the board did not reset and we bail. 3345 * This for kdump only and is particular to the P600. 3346 */ 3347 msleep(500); 3348 } 3349 return 0; 3350 } 3351 3352 static __devinit void init_driver_version(char *driver_version, int len) 3353 { 3354 memset(driver_version, 0, len); 3355 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); 3356 } 3357 3358 static __devinit int write_driver_ver_to_cfgtable( 3359 struct CfgTable __iomem *cfgtable) 3360 { 3361 char *driver_version; 3362 int i, size = sizeof(cfgtable->driver_version); 3363 3364 driver_version = kmalloc(size, GFP_KERNEL); 3365 if (!driver_version) 3366 return -ENOMEM; 3367 3368 init_driver_version(driver_version, size); 3369 for (i = 0; i < size; i++) 3370 writeb(driver_version[i], &cfgtable->driver_version[i]); 3371 kfree(driver_version); 3372 return 0; 3373 } 3374 3375 static __devinit void read_driver_ver_from_cfgtable( 3376 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) 3377 { 3378 int i; 3379 3380 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 3381 driver_ver[i] = readb(&cfgtable->driver_version[i]); 3382 } 3383 3384 static __devinit int controller_reset_failed( 3385 struct CfgTable __iomem *cfgtable) 3386 { 3387 3388 char *driver_ver, *old_driver_ver; 3389 int rc, size = sizeof(cfgtable->driver_version); 3390 3391 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 3392 if (!old_driver_ver) 3393 return -ENOMEM; 3394 driver_ver = old_driver_ver + size; 3395 3396 /* After a reset, the 32 bytes of "driver version" in the cfgtable 3397 * should have been changed, otherwise we know the reset failed. 3398 */ 3399 init_driver_version(old_driver_ver, size); 3400 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 3401 rc = !memcmp(driver_ver, old_driver_ver, size); 3402 kfree(old_driver_ver); 3403 return rc; 3404 } 3405 /* This does a hard reset of the controller using PCI power management 3406 * states or the using the doorbell register. 3407 */ 3408 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3409 { 3410 u64 cfg_offset; 3411 u32 cfg_base_addr; 3412 u64 cfg_base_addr_index; 3413 void __iomem *vaddr; 3414 unsigned long paddr; 3415 u32 misc_fw_support; 3416 int rc; 3417 struct CfgTable __iomem *cfgtable; 3418 u32 use_doorbell; 3419 u32 board_id; 3420 u16 command_register; 3421 3422 /* For controllers as old as the P600, this is very nearly 3423 * the same thing as 3424 * 3425 * pci_save_state(pci_dev); 3426 * pci_set_power_state(pci_dev, PCI_D3hot); 3427 * pci_set_power_state(pci_dev, PCI_D0); 3428 * pci_restore_state(pci_dev); 3429 * 3430 * For controllers newer than the P600, the pci power state 3431 * method of resetting doesn't work so we have another way 3432 * using the doorbell register. 3433 */ 3434 3435 rc = hpsa_lookup_board_id(pdev, &board_id); 3436 if (rc < 0 || !ctlr_is_resettable(board_id)) { 3437 dev_warn(&pdev->dev, "Not resetting device.\n"); 3438 return -ENODEV; 3439 } 3440 3441 /* if controller is soft- but not hard resettable... */ 3442 if (!ctlr_is_hard_resettable(board_id)) 3443 return -ENOTSUPP; /* try soft reset later. */ 3444 3445 /* Save the PCI command register */ 3446 pci_read_config_word(pdev, 4, &command_register); 3447 /* Turn the board off. This is so that later pci_restore_state() 3448 * won't turn the board on before the rest of config space is ready. 3449 */ 3450 pci_disable_device(pdev); 3451 pci_save_state(pdev); 3452 3453 /* find the first memory BAR, so we can find the cfg table */ 3454 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3455 if (rc) 3456 return rc; 3457 vaddr = remap_pci_mem(paddr, 0x250); 3458 if (!vaddr) 3459 return -ENOMEM; 3460 3461 /* find cfgtable in order to check if reset via doorbell is supported */ 3462 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 3463 &cfg_base_addr_index, &cfg_offset); 3464 if (rc) 3465 goto unmap_vaddr; 3466 cfgtable = remap_pci_mem(pci_resource_start(pdev, 3467 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 3468 if (!cfgtable) { 3469 rc = -ENOMEM; 3470 goto unmap_vaddr; 3471 } 3472 rc = write_driver_ver_to_cfgtable(cfgtable); 3473 if (rc) 3474 goto unmap_vaddr; 3475 3476 /* If reset via doorbell register is supported, use that. 3477 * There are two such methods. Favor the newest method. 3478 */ 3479 misc_fw_support = readl(&cfgtable->misc_fw_support); 3480 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 3481 if (use_doorbell) { 3482 use_doorbell = DOORBELL_CTLR_RESET2; 3483 } else { 3484 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3485 if (use_doorbell) { 3486 dev_warn(&pdev->dev, "Soft reset not supported. " 3487 "Firmware update is required.\n"); 3488 rc = -ENOTSUPP; /* try soft reset */ 3489 goto unmap_cfgtable; 3490 } 3491 } 3492 3493 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3494 if (rc) 3495 goto unmap_cfgtable; 3496 3497 pci_restore_state(pdev); 3498 rc = pci_enable_device(pdev); 3499 if (rc) { 3500 dev_warn(&pdev->dev, "failed to enable device.\n"); 3501 goto unmap_cfgtable; 3502 } 3503 pci_write_config_word(pdev, 4, command_register); 3504 3505 /* Some devices (notably the HP Smart Array 5i Controller) 3506 need a little pause here */ 3507 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3508 3509 /* Wait for board to become not ready, then ready. */ 3510 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 3511 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3512 if (rc) { 3513 dev_warn(&pdev->dev, 3514 "failed waiting for board to reset." 3515 " Will try soft reset.\n"); 3516 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 3517 goto unmap_cfgtable; 3518 } 3519 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3520 if (rc) { 3521 dev_warn(&pdev->dev, 3522 "failed waiting for board to become ready " 3523 "after hard reset\n"); 3524 goto unmap_cfgtable; 3525 } 3526 3527 rc = controller_reset_failed(vaddr); 3528 if (rc < 0) 3529 goto unmap_cfgtable; 3530 if (rc) { 3531 dev_warn(&pdev->dev, "Unable to successfully reset " 3532 "controller. Will try soft reset.\n"); 3533 rc = -ENOTSUPP; 3534 } else { 3535 dev_info(&pdev->dev, "board ready after hard reset.\n"); 3536 } 3537 3538 unmap_cfgtable: 3539 iounmap(cfgtable); 3540 3541 unmap_vaddr: 3542 iounmap(vaddr); 3543 return rc; 3544 } 3545 3546 /* 3547 * We cannot read the structure directly, for portability we must use 3548 * the io functions. 3549 * This is for debug only. 3550 */ 3551 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3552 { 3553 #ifdef HPSA_DEBUG 3554 int i; 3555 char temp_name[17]; 3556 3557 dev_info(dev, "Controller Configuration information\n"); 3558 dev_info(dev, "------------------------------------\n"); 3559 for (i = 0; i < 4; i++) 3560 temp_name[i] = readb(&(tb->Signature[i])); 3561 temp_name[4] = '\0'; 3562 dev_info(dev, " Signature = %s\n", temp_name); 3563 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 3564 dev_info(dev, " Transport methods supported = 0x%x\n", 3565 readl(&(tb->TransportSupport))); 3566 dev_info(dev, " Transport methods active = 0x%x\n", 3567 readl(&(tb->TransportActive))); 3568 dev_info(dev, " Requested transport Method = 0x%x\n", 3569 readl(&(tb->HostWrite.TransportRequest))); 3570 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 3571 readl(&(tb->HostWrite.CoalIntDelay))); 3572 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 3573 readl(&(tb->HostWrite.CoalIntCount))); 3574 dev_info(dev, " Max outstanding commands = 0x%d\n", 3575 readl(&(tb->CmdsOutMax))); 3576 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3577 for (i = 0; i < 16; i++) 3578 temp_name[i] = readb(&(tb->ServerName[i])); 3579 temp_name[16] = '\0'; 3580 dev_info(dev, " Server Name = %s\n", temp_name); 3581 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3582 readl(&(tb->HeartBeat))); 3583 #endif /* HPSA_DEBUG */ 3584 } 3585 3586 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3587 { 3588 int i, offset, mem_type, bar_type; 3589 3590 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3591 return 0; 3592 offset = 0; 3593 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3594 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3595 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3596 offset += 4; 3597 else { 3598 mem_type = pci_resource_flags(pdev, i) & 3599 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3600 switch (mem_type) { 3601 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3602 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3603 offset += 4; /* 32 bit */ 3604 break; 3605 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3606 offset += 8; 3607 break; 3608 default: /* reserved in PCI 2.2 */ 3609 dev_warn(&pdev->dev, 3610 "base address is invalid\n"); 3611 return -1; 3612 break; 3613 } 3614 } 3615 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3616 return i + 1; 3617 } 3618 return -1; 3619 } 3620 3621 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3622 * controllers that are capable. If not, we use IO-APIC mode. 3623 */ 3624 3625 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 3626 { 3627 #ifdef CONFIG_PCI_MSI 3628 int err; 3629 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 3630 {0, 2}, {0, 3} 3631 }; 3632 3633 /* Some boards advertise MSI but don't really support it */ 3634 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3635 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3636 goto default_int_mode; 3637 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3638 dev_info(&h->pdev->dev, "MSIX\n"); 3639 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 3640 if (!err) { 3641 h->intr[0] = hpsa_msix_entries[0].vector; 3642 h->intr[1] = hpsa_msix_entries[1].vector; 3643 h->intr[2] = hpsa_msix_entries[2].vector; 3644 h->intr[3] = hpsa_msix_entries[3].vector; 3645 h->msix_vector = 1; 3646 return; 3647 } 3648 if (err > 0) { 3649 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 3650 "available\n", err); 3651 goto default_int_mode; 3652 } else { 3653 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 3654 err); 3655 goto default_int_mode; 3656 } 3657 } 3658 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3659 dev_info(&h->pdev->dev, "MSI\n"); 3660 if (!pci_enable_msi(h->pdev)) 3661 h->msi_vector = 1; 3662 else 3663 dev_warn(&h->pdev->dev, "MSI init failed\n"); 3664 } 3665 default_int_mode: 3666 #endif /* CONFIG_PCI_MSI */ 3667 /* if we get here we're going to use the default interrupt mode */ 3668 h->intr[h->intr_mode] = h->pdev->irq; 3669 } 3670 3671 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3672 { 3673 int i; 3674 u32 subsystem_vendor_id, subsystem_device_id; 3675 3676 subsystem_vendor_id = pdev->subsystem_vendor; 3677 subsystem_device_id = pdev->subsystem_device; 3678 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3679 subsystem_vendor_id; 3680 3681 for (i = 0; i < ARRAY_SIZE(products); i++) 3682 if (*board_id == products[i].board_id) 3683 return i; 3684 3685 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 3686 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 3687 !hpsa_allow_any) { 3688 dev_warn(&pdev->dev, "unrecognized board ID: " 3689 "0x%08x, ignoring.\n", *board_id); 3690 return -ENODEV; 3691 } 3692 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 3693 } 3694 3695 static inline bool hpsa_board_disabled(struct pci_dev *pdev) 3696 { 3697 u16 command; 3698 3699 (void) pci_read_config_word(pdev, PCI_COMMAND, &command); 3700 return ((command & PCI_COMMAND_MEMORY) == 0); 3701 } 3702 3703 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 3704 unsigned long *memory_bar) 3705 { 3706 int i; 3707 3708 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3709 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3710 /* addressing mode bits already removed */ 3711 *memory_bar = pci_resource_start(pdev, i); 3712 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3713 *memory_bar); 3714 return 0; 3715 } 3716 dev_warn(&pdev->dev, "no memory BAR found\n"); 3717 return -ENODEV; 3718 } 3719 3720 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 3721 void __iomem *vaddr, int wait_for_ready) 3722 { 3723 int i, iterations; 3724 u32 scratchpad; 3725 if (wait_for_ready) 3726 iterations = HPSA_BOARD_READY_ITERATIONS; 3727 else 3728 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 3729 3730 for (i = 0; i < iterations; i++) { 3731 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 3732 if (wait_for_ready) { 3733 if (scratchpad == HPSA_FIRMWARE_READY) 3734 return 0; 3735 } else { 3736 if (scratchpad != HPSA_FIRMWARE_READY) 3737 return 0; 3738 } 3739 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3740 } 3741 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3742 return -ENODEV; 3743 } 3744 3745 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 3746 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3747 u64 *cfg_offset) 3748 { 3749 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3750 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3751 *cfg_base_addr &= (u32) 0x0000ffff; 3752 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3753 if (*cfg_base_addr_index == -1) { 3754 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3755 return -ENODEV; 3756 } 3757 return 0; 3758 } 3759 3760 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) 3761 { 3762 u64 cfg_offset; 3763 u32 cfg_base_addr; 3764 u64 cfg_base_addr_index; 3765 u32 trans_offset; 3766 int rc; 3767 3768 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3769 &cfg_base_addr_index, &cfg_offset); 3770 if (rc) 3771 return rc; 3772 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3773 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3774 if (!h->cfgtable) 3775 return -ENOMEM; 3776 rc = write_driver_ver_to_cfgtable(h->cfgtable); 3777 if (rc) 3778 return rc; 3779 /* Find performant mode table. */ 3780 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3781 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3782 cfg_base_addr_index)+cfg_offset+trans_offset, 3783 sizeof(*h->transtable)); 3784 if (!h->transtable) 3785 return -ENOMEM; 3786 return 0; 3787 } 3788 3789 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3790 { 3791 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3792 3793 /* Limit commands in memory limited kdump scenario. */ 3794 if (reset_devices && h->max_commands > 32) 3795 h->max_commands = 32; 3796 3797 if (h->max_commands < 16) { 3798 dev_warn(&h->pdev->dev, "Controller reports " 3799 "max supported commands of %d, an obvious lie. " 3800 "Using 16. Ensure that firmware is up to date.\n", 3801 h->max_commands); 3802 h->max_commands = 16; 3803 } 3804 } 3805 3806 /* Interrogate the hardware for some limits: 3807 * max commands, max SG elements without chaining, and with chaining, 3808 * SG chain block size, etc. 3809 */ 3810 static void __devinit hpsa_find_board_params(struct ctlr_info *h) 3811 { 3812 hpsa_get_max_perf_mode_cmds(h); 3813 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3814 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3815 /* 3816 * Limit in-command s/g elements to 32 save dma'able memory. 3817 * Howvever spec says if 0, use 31 3818 */ 3819 h->max_cmd_sg_entries = 31; 3820 if (h->maxsgentries > 512) { 3821 h->max_cmd_sg_entries = 32; 3822 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3823 h->maxsgentries--; /* save one for chain pointer */ 3824 } else { 3825 h->maxsgentries = 31; /* default to traditional values */ 3826 h->chainsize = 0; 3827 } 3828 } 3829 3830 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 3831 { 3832 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3833 (readb(&h->cfgtable->Signature[1]) != 'I') || 3834 (readb(&h->cfgtable->Signature[2]) != 'S') || 3835 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3836 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3837 return false; 3838 } 3839 return true; 3840 } 3841 3842 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3843 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) 3844 { 3845 #ifdef CONFIG_X86 3846 u32 prefetch; 3847 3848 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3849 prefetch |= 0x100; 3850 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3851 #endif 3852 } 3853 3854 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3855 * in a prefetch beyond physical memory. 3856 */ 3857 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 3858 { 3859 u32 dma_prefetch; 3860 3861 if (h->board_id != 0x3225103C) 3862 return; 3863 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3864 dma_prefetch |= 0x8000; 3865 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3866 } 3867 3868 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3869 { 3870 int i; 3871 u32 doorbell_value; 3872 unsigned long flags; 3873 3874 /* under certain very rare conditions, this can take awhile. 3875 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3876 * as we enter this code.) 3877 */ 3878 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3879 spin_lock_irqsave(&h->lock, flags); 3880 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 3881 spin_unlock_irqrestore(&h->lock, flags); 3882 if (!(doorbell_value & CFGTBL_ChangeReq)) 3883 break; 3884 /* delay and try again */ 3885 usleep_range(10000, 20000); 3886 } 3887 } 3888 3889 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) 3890 { 3891 u32 trans_support; 3892 3893 trans_support = readl(&(h->cfgtable->TransportSupport)); 3894 if (!(trans_support & SIMPLE_MODE)) 3895 return -ENOTSUPP; 3896 3897 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3898 /* Update the field, and then ring the doorbell */ 3899 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3900 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3901 hpsa_wait_for_mode_change_ack(h); 3902 print_cfg_table(&h->pdev->dev, h->cfgtable); 3903 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3904 dev_warn(&h->pdev->dev, 3905 "unable to get board into simple mode\n"); 3906 return -ENODEV; 3907 } 3908 h->transMethod = CFGTBL_Trans_Simple; 3909 return 0; 3910 } 3911 3912 static int __devinit hpsa_pci_init(struct ctlr_info *h) 3913 { 3914 int prod_index, err; 3915 3916 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 3917 if (prod_index < 0) 3918 return -ENODEV; 3919 h->product_name = products[prod_index].product_name; 3920 h->access = *(products[prod_index].access); 3921 3922 if (hpsa_board_disabled(h->pdev)) { 3923 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3924 return -ENODEV; 3925 } 3926 3927 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 3928 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 3929 3930 err = pci_enable_device(h->pdev); 3931 if (err) { 3932 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3933 return err; 3934 } 3935 3936 err = pci_request_regions(h->pdev, "hpsa"); 3937 if (err) { 3938 dev_err(&h->pdev->dev, 3939 "cannot obtain PCI resources, aborting\n"); 3940 return err; 3941 } 3942 hpsa_interrupt_mode(h); 3943 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 3944 if (err) 3945 goto err_out_free_res; 3946 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3947 if (!h->vaddr) { 3948 err = -ENOMEM; 3949 goto err_out_free_res; 3950 } 3951 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 3952 if (err) 3953 goto err_out_free_res; 3954 err = hpsa_find_cfgtables(h); 3955 if (err) 3956 goto err_out_free_res; 3957 hpsa_find_board_params(h); 3958 3959 if (!hpsa_CISS_signature_present(h)) { 3960 err = -ENODEV; 3961 goto err_out_free_res; 3962 } 3963 hpsa_enable_scsi_prefetch(h); 3964 hpsa_p600_dma_prefetch_quirk(h); 3965 err = hpsa_enter_simple_mode(h); 3966 if (err) 3967 goto err_out_free_res; 3968 return 0; 3969 3970 err_out_free_res: 3971 if (h->transtable) 3972 iounmap(h->transtable); 3973 if (h->cfgtable) 3974 iounmap(h->cfgtable); 3975 if (h->vaddr) 3976 iounmap(h->vaddr); 3977 /* 3978 * Deliberately omit pci_disable_device(): it does something nasty to 3979 * Smart Array controllers that pci_enable_device does not undo 3980 */ 3981 pci_release_regions(h->pdev); 3982 return err; 3983 } 3984 3985 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) 3986 { 3987 int rc; 3988 3989 #define HBA_INQUIRY_BYTE_COUNT 64 3990 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 3991 if (!h->hba_inquiry_data) 3992 return; 3993 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 3994 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 3995 if (rc != 0) { 3996 kfree(h->hba_inquiry_data); 3997 h->hba_inquiry_data = NULL; 3998 } 3999 } 4000 4001 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) 4002 { 4003 int rc, i; 4004 4005 if (!reset_devices) 4006 return 0; 4007 4008 /* Reset the controller with a PCI power-cycle or via doorbell */ 4009 rc = hpsa_kdump_hard_reset_controller(pdev); 4010 4011 /* -ENOTSUPP here means we cannot reset the controller 4012 * but it's already (and still) up and running in 4013 * "performant mode". Or, it might be 640x, which can't reset 4014 * due to concerns about shared bbwc between 6402/6404 pair. 4015 */ 4016 if (rc == -ENOTSUPP) 4017 return rc; /* just try to do the kdump anyhow. */ 4018 if (rc) 4019 return -ENODEV; 4020 4021 /* Now try to get the controller to respond to a no-op */ 4022 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 4023 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 4024 if (hpsa_noop(pdev) == 0) 4025 break; 4026 else 4027 dev_warn(&pdev->dev, "no-op failed%s\n", 4028 (i < 11 ? "; re-trying" : "")); 4029 } 4030 return 0; 4031 } 4032 4033 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) 4034 { 4035 h->cmd_pool_bits = kzalloc( 4036 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 4037 sizeof(unsigned long), GFP_KERNEL); 4038 h->cmd_pool = pci_alloc_consistent(h->pdev, 4039 h->nr_cmds * sizeof(*h->cmd_pool), 4040 &(h->cmd_pool_dhandle)); 4041 h->errinfo_pool = pci_alloc_consistent(h->pdev, 4042 h->nr_cmds * sizeof(*h->errinfo_pool), 4043 &(h->errinfo_pool_dhandle)); 4044 if ((h->cmd_pool_bits == NULL) 4045 || (h->cmd_pool == NULL) 4046 || (h->errinfo_pool == NULL)) { 4047 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 4048 return -ENOMEM; 4049 } 4050 return 0; 4051 } 4052 4053 static void hpsa_free_cmd_pool(struct ctlr_info *h) 4054 { 4055 kfree(h->cmd_pool_bits); 4056 if (h->cmd_pool) 4057 pci_free_consistent(h->pdev, 4058 h->nr_cmds * sizeof(struct CommandList), 4059 h->cmd_pool, h->cmd_pool_dhandle); 4060 if (h->errinfo_pool) 4061 pci_free_consistent(h->pdev, 4062 h->nr_cmds * sizeof(struct ErrorInfo), 4063 h->errinfo_pool, 4064 h->errinfo_pool_dhandle); 4065 } 4066 4067 static int hpsa_request_irq(struct ctlr_info *h, 4068 irqreturn_t (*msixhandler)(int, void *), 4069 irqreturn_t (*intxhandler)(int, void *)) 4070 { 4071 int rc; 4072 4073 if (h->msix_vector || h->msi_vector) 4074 rc = request_irq(h->intr[h->intr_mode], msixhandler, 4075 IRQF_DISABLED, h->devname, h); 4076 else 4077 rc = request_irq(h->intr[h->intr_mode], intxhandler, 4078 IRQF_DISABLED, h->devname, h); 4079 if (rc) { 4080 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 4081 h->intr[h->intr_mode], h->devname); 4082 return -ENODEV; 4083 } 4084 return 0; 4085 } 4086 4087 static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) 4088 { 4089 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 4090 HPSA_RESET_TYPE_CONTROLLER)) { 4091 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 4092 return -EIO; 4093 } 4094 4095 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 4096 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 4097 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 4098 return -1; 4099 } 4100 4101 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 4102 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 4103 dev_warn(&h->pdev->dev, "Board failed to become ready " 4104 "after soft reset.\n"); 4105 return -1; 4106 } 4107 4108 return 0; 4109 } 4110 4111 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 4112 { 4113 free_irq(h->intr[h->intr_mode], h); 4114 #ifdef CONFIG_PCI_MSI 4115 if (h->msix_vector) 4116 pci_disable_msix(h->pdev); 4117 else if (h->msi_vector) 4118 pci_disable_msi(h->pdev); 4119 #endif /* CONFIG_PCI_MSI */ 4120 hpsa_free_sg_chain_blocks(h); 4121 hpsa_free_cmd_pool(h); 4122 kfree(h->blockFetchTable); 4123 pci_free_consistent(h->pdev, h->reply_pool_size, 4124 h->reply_pool, h->reply_pool_dhandle); 4125 if (h->vaddr) 4126 iounmap(h->vaddr); 4127 if (h->transtable) 4128 iounmap(h->transtable); 4129 if (h->cfgtable) 4130 iounmap(h->cfgtable); 4131 pci_release_regions(h->pdev); 4132 kfree(h); 4133 } 4134 4135 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) 4136 { 4137 assert_spin_locked(&lockup_detector_lock); 4138 if (!hpsa_lockup_detector) 4139 return; 4140 if (h->lockup_detected) 4141 return; /* already stopped the lockup detector */ 4142 list_del(&h->lockup_list); 4143 } 4144 4145 /* Called when controller lockup detected. */ 4146 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 4147 { 4148 struct CommandList *c = NULL; 4149 4150 assert_spin_locked(&h->lock); 4151 /* Mark all outstanding commands as failed and complete them. */ 4152 while (!list_empty(list)) { 4153 c = list_entry(list->next, struct CommandList, list); 4154 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 4155 finish_cmd(c, c->Header.Tag.lower); 4156 } 4157 } 4158 4159 static void controller_lockup_detected(struct ctlr_info *h) 4160 { 4161 unsigned long flags; 4162 4163 assert_spin_locked(&lockup_detector_lock); 4164 remove_ctlr_from_lockup_detector_list(h); 4165 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4166 spin_lock_irqsave(&h->lock, flags); 4167 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 4168 spin_unlock_irqrestore(&h->lock, flags); 4169 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 4170 h->lockup_detected); 4171 pci_disable_device(h->pdev); 4172 spin_lock_irqsave(&h->lock, flags); 4173 fail_all_cmds_on_list(h, &h->cmpQ); 4174 fail_all_cmds_on_list(h, &h->reqQ); 4175 spin_unlock_irqrestore(&h->lock, flags); 4176 } 4177 4178 #define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ) 4179 #define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2) 4180 4181 static void detect_controller_lockup(struct ctlr_info *h) 4182 { 4183 u64 now; 4184 u32 heartbeat; 4185 unsigned long flags; 4186 4187 assert_spin_locked(&lockup_detector_lock); 4188 now = get_jiffies_64(); 4189 /* If we've received an interrupt recently, we're ok. */ 4190 if (time_after64(h->last_intr_timestamp + 4191 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4192 return; 4193 4194 /* 4195 * If we've already checked the heartbeat recently, we're ok. 4196 * This could happen if someone sends us a signal. We 4197 * otherwise don't care about signals in this thread. 4198 */ 4199 if (time_after64(h->last_heartbeat_timestamp + 4200 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4201 return; 4202 4203 /* If heartbeat has not changed since we last looked, we're not ok. */ 4204 spin_lock_irqsave(&h->lock, flags); 4205 heartbeat = readl(&h->cfgtable->HeartBeat); 4206 spin_unlock_irqrestore(&h->lock, flags); 4207 if (h->last_heartbeat == heartbeat) { 4208 controller_lockup_detected(h); 4209 return; 4210 } 4211 4212 /* We're ok. */ 4213 h->last_heartbeat = heartbeat; 4214 h->last_heartbeat_timestamp = now; 4215 } 4216 4217 static int detect_controller_lockup_thread(void *notused) 4218 { 4219 struct ctlr_info *h; 4220 unsigned long flags; 4221 4222 while (1) { 4223 struct list_head *this, *tmp; 4224 4225 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); 4226 if (kthread_should_stop()) 4227 break; 4228 spin_lock_irqsave(&lockup_detector_lock, flags); 4229 list_for_each_safe(this, tmp, &hpsa_ctlr_list) { 4230 h = list_entry(this, struct ctlr_info, lockup_list); 4231 detect_controller_lockup(h); 4232 } 4233 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4234 } 4235 return 0; 4236 } 4237 4238 static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) 4239 { 4240 unsigned long flags; 4241 4242 spin_lock_irqsave(&lockup_detector_lock, flags); 4243 list_add_tail(&h->lockup_list, &hpsa_ctlr_list); 4244 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4245 } 4246 4247 static void start_controller_lockup_detector(struct ctlr_info *h) 4248 { 4249 /* Start the lockup detector thread if not already started */ 4250 if (!hpsa_lockup_detector) { 4251 spin_lock_init(&lockup_detector_lock); 4252 hpsa_lockup_detector = 4253 kthread_run(detect_controller_lockup_thread, 4254 NULL, "hpsa"); 4255 } 4256 if (!hpsa_lockup_detector) { 4257 dev_warn(&h->pdev->dev, 4258 "Could not start lockup detector thread\n"); 4259 return; 4260 } 4261 add_ctlr_to_lockup_detector_list(h); 4262 } 4263 4264 static void stop_controller_lockup_detector(struct ctlr_info *h) 4265 { 4266 unsigned long flags; 4267 4268 spin_lock_irqsave(&lockup_detector_lock, flags); 4269 remove_ctlr_from_lockup_detector_list(h); 4270 /* If the list of ctlr's to monitor is empty, stop the thread */ 4271 if (list_empty(&hpsa_ctlr_list)) { 4272 kthread_stop(hpsa_lockup_detector); 4273 hpsa_lockup_detector = NULL; 4274 } 4275 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4276 } 4277 4278 static int __devinit hpsa_init_one(struct pci_dev *pdev, 4279 const struct pci_device_id *ent) 4280 { 4281 int dac, rc; 4282 struct ctlr_info *h; 4283 int try_soft_reset = 0; 4284 unsigned long flags; 4285 4286 if (number_of_controllers == 0) 4287 printk(KERN_INFO DRIVER_NAME "\n"); 4288 4289 rc = hpsa_init_reset_devices(pdev); 4290 if (rc) { 4291 if (rc != -ENOTSUPP) 4292 return rc; 4293 /* If the reset fails in a particular way (it has no way to do 4294 * a proper hard reset, so returns -ENOTSUPP) we can try to do 4295 * a soft reset once we get the controller configured up to the 4296 * point that it can accept a command. 4297 */ 4298 try_soft_reset = 1; 4299 rc = 0; 4300 } 4301 4302 reinit_after_soft_reset: 4303 4304 /* Command structures must be aligned on a 32-byte boundary because 4305 * the 5 lower bits of the address are used by the hardware. and by 4306 * the driver. See comments in hpsa.h for more info. 4307 */ 4308 #define COMMANDLIST_ALIGNMENT 32 4309 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 4310 h = kzalloc(sizeof(*h), GFP_KERNEL); 4311 if (!h) 4312 return -ENOMEM; 4313 4314 h->pdev = pdev; 4315 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 4316 INIT_LIST_HEAD(&h->cmpQ); 4317 INIT_LIST_HEAD(&h->reqQ); 4318 spin_lock_init(&h->lock); 4319 spin_lock_init(&h->scan_lock); 4320 rc = hpsa_pci_init(h); 4321 if (rc != 0) 4322 goto clean1; 4323 4324 sprintf(h->devname, "hpsa%d", number_of_controllers); 4325 h->ctlr = number_of_controllers; 4326 number_of_controllers++; 4327 4328 /* configure PCI DMA stuff */ 4329 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4330 if (rc == 0) { 4331 dac = 1; 4332 } else { 4333 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4334 if (rc == 0) { 4335 dac = 0; 4336 } else { 4337 dev_err(&pdev->dev, "no suitable DMA available\n"); 4338 goto clean1; 4339 } 4340 } 4341 4342 /* make sure the board interrupts are off */ 4343 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4344 4345 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 4346 goto clean2; 4347 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4348 h->devname, pdev->device, 4349 h->intr[h->intr_mode], dac ? "" : " not"); 4350 if (hpsa_allocate_cmd_pool(h)) 4351 goto clean4; 4352 if (hpsa_allocate_sg_chain_blocks(h)) 4353 goto clean4; 4354 init_waitqueue_head(&h->scan_wait_queue); 4355 h->scan_finished = 1; /* no scan currently in progress */ 4356 4357 pci_set_drvdata(pdev, h); 4358 h->ndevices = 0; 4359 h->scsi_host = NULL; 4360 spin_lock_init(&h->devlock); 4361 hpsa_put_ctlr_into_performant_mode(h); 4362 4363 /* At this point, the controller is ready to take commands. 4364 * Now, if reset_devices and the hard reset didn't work, try 4365 * the soft reset and see if that works. 4366 */ 4367 if (try_soft_reset) { 4368 4369 /* This is kind of gross. We may or may not get a completion 4370 * from the soft reset command, and if we do, then the value 4371 * from the fifo may or may not be valid. So, we wait 10 secs 4372 * after the reset throwing away any completions we get during 4373 * that time. Unregister the interrupt handler and register 4374 * fake ones to scoop up any residual completions. 4375 */ 4376 spin_lock_irqsave(&h->lock, flags); 4377 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4378 spin_unlock_irqrestore(&h->lock, flags); 4379 free_irq(h->intr[h->intr_mode], h); 4380 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 4381 hpsa_intx_discard_completions); 4382 if (rc) { 4383 dev_warn(&h->pdev->dev, "Failed to request_irq after " 4384 "soft reset.\n"); 4385 goto clean4; 4386 } 4387 4388 rc = hpsa_kdump_soft_reset(h); 4389 if (rc) 4390 /* Neither hard nor soft reset worked, we're hosed. */ 4391 goto clean4; 4392 4393 dev_info(&h->pdev->dev, "Board READY.\n"); 4394 dev_info(&h->pdev->dev, 4395 "Waiting for stale completions to drain.\n"); 4396 h->access.set_intr_mask(h, HPSA_INTR_ON); 4397 msleep(10000); 4398 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4399 4400 rc = controller_reset_failed(h->cfgtable); 4401 if (rc) 4402 dev_info(&h->pdev->dev, 4403 "Soft reset appears to have failed.\n"); 4404 4405 /* since the controller's reset, we have to go back and re-init 4406 * everything. Easiest to just forget what we've done and do it 4407 * all over again. 4408 */ 4409 hpsa_undo_allocations_after_kdump_soft_reset(h); 4410 try_soft_reset = 0; 4411 if (rc) 4412 /* don't go to clean4, we already unallocated */ 4413 return -ENODEV; 4414 4415 goto reinit_after_soft_reset; 4416 } 4417 4418 /* Turn the interrupts on so we can service requests */ 4419 h->access.set_intr_mask(h, HPSA_INTR_ON); 4420 4421 hpsa_hba_inquiry(h); 4422 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4423 start_controller_lockup_detector(h); 4424 return 1; 4425 4426 clean4: 4427 hpsa_free_sg_chain_blocks(h); 4428 hpsa_free_cmd_pool(h); 4429 free_irq(h->intr[h->intr_mode], h); 4430 clean2: 4431 clean1: 4432 kfree(h); 4433 return rc; 4434 } 4435 4436 static void hpsa_flush_cache(struct ctlr_info *h) 4437 { 4438 char *flush_buf; 4439 struct CommandList *c; 4440 4441 flush_buf = kzalloc(4, GFP_KERNEL); 4442 if (!flush_buf) 4443 return; 4444 4445 c = cmd_special_alloc(h); 4446 if (!c) { 4447 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4448 goto out_of_memory; 4449 } 4450 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 4451 RAID_CTLR_LUNID, TYPE_CMD); 4452 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 4453 if (c->err_info->CommandStatus != 0) 4454 dev_warn(&h->pdev->dev, 4455 "error flushing cache on controller\n"); 4456 cmd_special_free(h, c); 4457 out_of_memory: 4458 kfree(flush_buf); 4459 } 4460 4461 static void hpsa_shutdown(struct pci_dev *pdev) 4462 { 4463 struct ctlr_info *h; 4464 4465 h = pci_get_drvdata(pdev); 4466 /* Turn board interrupts off and send the flush cache command 4467 * sendcmd will turn off interrupt, and send the flush... 4468 * To write all data in the battery backed cache to disks 4469 */ 4470 hpsa_flush_cache(h); 4471 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4472 free_irq(h->intr[h->intr_mode], h); 4473 #ifdef CONFIG_PCI_MSI 4474 if (h->msix_vector) 4475 pci_disable_msix(h->pdev); 4476 else if (h->msi_vector) 4477 pci_disable_msi(h->pdev); 4478 #endif /* CONFIG_PCI_MSI */ 4479 } 4480 4481 static void __devexit hpsa_remove_one(struct pci_dev *pdev) 4482 { 4483 struct ctlr_info *h; 4484 4485 if (pci_get_drvdata(pdev) == NULL) { 4486 dev_err(&pdev->dev, "unable to remove device\n"); 4487 return; 4488 } 4489 h = pci_get_drvdata(pdev); 4490 stop_controller_lockup_detector(h); 4491 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 4492 hpsa_shutdown(pdev); 4493 iounmap(h->vaddr); 4494 iounmap(h->transtable); 4495 iounmap(h->cfgtable); 4496 hpsa_free_sg_chain_blocks(h); 4497 pci_free_consistent(h->pdev, 4498 h->nr_cmds * sizeof(struct CommandList), 4499 h->cmd_pool, h->cmd_pool_dhandle); 4500 pci_free_consistent(h->pdev, 4501 h->nr_cmds * sizeof(struct ErrorInfo), 4502 h->errinfo_pool, h->errinfo_pool_dhandle); 4503 pci_free_consistent(h->pdev, h->reply_pool_size, 4504 h->reply_pool, h->reply_pool_dhandle); 4505 kfree(h->cmd_pool_bits); 4506 kfree(h->blockFetchTable); 4507 kfree(h->hba_inquiry_data); 4508 /* 4509 * Deliberately omit pci_disable_device(): it does something nasty to 4510 * Smart Array controllers that pci_enable_device does not undo 4511 */ 4512 pci_release_regions(pdev); 4513 pci_set_drvdata(pdev, NULL); 4514 kfree(h); 4515 } 4516 4517 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 4518 __attribute__((unused)) pm_message_t state) 4519 { 4520 return -ENOSYS; 4521 } 4522 4523 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 4524 { 4525 return -ENOSYS; 4526 } 4527 4528 static struct pci_driver hpsa_pci_driver = { 4529 .name = "hpsa", 4530 .probe = hpsa_init_one, 4531 .remove = __devexit_p(hpsa_remove_one), 4532 .id_table = hpsa_pci_device_id, /* id_table */ 4533 .shutdown = hpsa_shutdown, 4534 .suspend = hpsa_suspend, 4535 .resume = hpsa_resume, 4536 }; 4537 4538 /* Fill in bucket_map[], given nsgs (the max number of 4539 * scatter gather elements supported) and bucket[], 4540 * which is an array of 8 integers. The bucket[] array 4541 * contains 8 different DMA transfer sizes (in 16 4542 * byte increments) which the controller uses to fetch 4543 * commands. This function fills in bucket_map[], which 4544 * maps a given number of scatter gather elements to one of 4545 * the 8 DMA transfer sizes. The point of it is to allow the 4546 * controller to only do as much DMA as needed to fetch the 4547 * command, with the DMA transfer size encoded in the lower 4548 * bits of the command address. 4549 */ 4550 static void calc_bucket_map(int bucket[], int num_buckets, 4551 int nsgs, int *bucket_map) 4552 { 4553 int i, j, b, size; 4554 4555 /* even a command with 0 SGs requires 4 blocks */ 4556 #define MINIMUM_TRANSFER_BLOCKS 4 4557 #define NUM_BUCKETS 8 4558 /* Note, bucket_map must have nsgs+1 entries. */ 4559 for (i = 0; i <= nsgs; i++) { 4560 /* Compute size of a command with i SG entries */ 4561 size = i + MINIMUM_TRANSFER_BLOCKS; 4562 b = num_buckets; /* Assume the biggest bucket */ 4563 /* Find the bucket that is just big enough */ 4564 for (j = 0; j < 8; j++) { 4565 if (bucket[j] >= size) { 4566 b = j; 4567 break; 4568 } 4569 } 4570 /* for a command with i SG entries, use bucket b. */ 4571 bucket_map[i] = b; 4572 } 4573 } 4574 4575 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, 4576 u32 use_short_tags) 4577 { 4578 int i; 4579 unsigned long register_value; 4580 4581 /* This is a bit complicated. There are 8 registers on 4582 * the controller which we write to to tell it 8 different 4583 * sizes of commands which there may be. It's a way of 4584 * reducing the DMA done to fetch each command. Encoded into 4585 * each command's tag are 3 bits which communicate to the controller 4586 * which of the eight sizes that command fits within. The size of 4587 * each command depends on how many scatter gather entries there are. 4588 * Each SG entry requires 16 bytes. The eight registers are programmed 4589 * with the number of 16-byte blocks a command of that size requires. 4590 * The smallest command possible requires 5 such 16 byte blocks. 4591 * the largest command possible requires MAXSGENTRIES + 4 16-byte 4592 * blocks. Note, this only extends to the SG entries contained 4593 * within the command block, and does not extend to chained blocks 4594 * of SG elements. bft[] contains the eight values we write to 4595 * the registers. They are not evenly distributed, but have more 4596 * sizes for small commands, and fewer sizes for larger commands. 4597 */ 4598 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 4599 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 4600 /* 5 = 1 s/g entry or 4k 4601 * 6 = 2 s/g entry or 8k 4602 * 8 = 4 s/g entry or 16k 4603 * 10 = 6 s/g entry or 24k 4604 */ 4605 4606 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4607 4608 /* Controller spec: zero out this buffer. */ 4609 memset(h->reply_pool, 0, h->reply_pool_size); 4610 h->reply_pool_head = h->reply_pool; 4611 4612 bft[7] = h->max_sg_entries + 4; 4613 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4614 for (i = 0; i < 8; i++) 4615 writel(bft[i], &h->transtable->BlockFetch[i]); 4616 4617 /* size of controller ring buffer */ 4618 writel(h->max_commands, &h->transtable->RepQSize); 4619 writel(1, &h->transtable->RepQCount); 4620 writel(0, &h->transtable->RepQCtrAddrLow32); 4621 writel(0, &h->transtable->RepQCtrAddrHigh32); 4622 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4623 writel(0, &h->transtable->RepQAddr0High32); 4624 writel(CFGTBL_Trans_Performant | use_short_tags, 4625 &(h->cfgtable->HostWrite.TransportRequest)); 4626 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4627 hpsa_wait_for_mode_change_ack(h); 4628 register_value = readl(&(h->cfgtable->TransportActive)); 4629 if (!(register_value & CFGTBL_Trans_Performant)) { 4630 dev_warn(&h->pdev->dev, "unable to get board into" 4631 " performant mode\n"); 4632 return; 4633 } 4634 /* Change the access methods to the performant access methods */ 4635 h->access = SA5_performant_access; 4636 h->transMethod = CFGTBL_Trans_Performant; 4637 } 4638 4639 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4640 { 4641 u32 trans_support; 4642 4643 if (hpsa_simple_mode) 4644 return; 4645 4646 trans_support = readl(&(h->cfgtable->TransportSupport)); 4647 if (!(trans_support & PERFORMANT_MODE)) 4648 return; 4649 4650 hpsa_get_max_perf_mode_cmds(h); 4651 h->max_sg_entries = 32; 4652 /* Performant mode ring buffer and supporting data structures */ 4653 h->reply_pool_size = h->max_commands * sizeof(u64); 4654 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4655 &(h->reply_pool_dhandle)); 4656 4657 /* Need a block fetch table for performant mode */ 4658 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * 4659 sizeof(u32)), GFP_KERNEL); 4660 4661 if ((h->reply_pool == NULL) 4662 || (h->blockFetchTable == NULL)) 4663 goto clean_up; 4664 4665 hpsa_enter_performant_mode(h, 4666 trans_support & CFGTBL_Trans_use_short_tags); 4667 4668 return; 4669 4670 clean_up: 4671 if (h->reply_pool) 4672 pci_free_consistent(h->pdev, h->reply_pool_size, 4673 h->reply_pool, h->reply_pool_dhandle); 4674 kfree(h->blockFetchTable); 4675 } 4676 4677 /* 4678 * This is it. Register the PCI driver information for the cards we control 4679 * the OS will call our registered routines when it finds one of our cards. 4680 */ 4681 static int __init hpsa_init(void) 4682 { 4683 return pci_register_driver(&hpsa_pci_driver); 4684 } 4685 4686 static void __exit hpsa_cleanup(void) 4687 { 4688 pci_unregister_driver(&hpsa_pci_driver); 4689 } 4690 4691 module_init(hpsa_init); 4692 module_exit(hpsa_cleanup); 4693