1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers 4 * 5 * This driver supports the newer, SCSI-based firmware interface only. 6 * 7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com> 8 * 9 * Based on the original DAC960 driver, which has 10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com> 11 * Portions Copyright 2002 by Mylex (An IBM Business Unit) 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/delay.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/raid_class.h> 20 #include <asm/unaligned.h> 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_device.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_tcq.h> 26 #include "myrs.h" 27 28 static struct raid_template *myrs_raid_template; 29 30 static struct myrs_devstate_name_entry { 31 enum myrs_devstate state; 32 char *name; 33 } myrs_devstate_name_list[] = { 34 { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" }, 35 { MYRS_DEVICE_ONLINE, "Online" }, 36 { MYRS_DEVICE_REBUILD, "Rebuild" }, 37 { MYRS_DEVICE_MISSING, "Missing" }, 38 { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" }, 39 { MYRS_DEVICE_OFFLINE, "Offline" }, 40 { MYRS_DEVICE_CRITICAL, "Critical" }, 41 { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" }, 42 { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" }, 43 { MYRS_DEVICE_STANDBY, "Standby" }, 44 { MYRS_DEVICE_INVALID_STATE, "Invalid" }, 45 }; 46 47 static char *myrs_devstate_name(enum myrs_devstate state) 48 { 49 struct myrs_devstate_name_entry *entry = myrs_devstate_name_list; 50 int i; 51 52 for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) { 53 if (entry[i].state == state) 54 return entry[i].name; 55 } 56 return NULL; 57 } 58 59 static struct myrs_raid_level_name_entry { 60 enum myrs_raid_level level; 61 char *name; 62 } myrs_raid_level_name_list[] = { 63 { MYRS_RAID_LEVEL0, "RAID0" }, 64 { MYRS_RAID_LEVEL1, "RAID1" }, 65 { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" }, 66 { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" }, 67 { MYRS_RAID_LEVEL6, "RAID6" }, 68 { MYRS_RAID_JBOD, "JBOD" }, 69 { MYRS_RAID_NEWSPAN, "New Mylex SPAN" }, 70 { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" }, 71 { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" }, 72 { MYRS_RAID_SPAN, "Mylex SPAN" }, 73 { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" }, 74 { MYRS_RAID_LEVELE, "RAIDE (concatenation)" }, 75 { MYRS_RAID_PHYSICAL, "Physical device" }, 76 }; 77 78 static char *myrs_raid_level_name(enum myrs_raid_level level) 79 { 80 struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list; 81 int i; 82 83 for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) { 84 if (entry[i].level == level) 85 return entry[i].name; 86 } 87 return NULL; 88 } 89 90 /* 91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk 92 */ 93 static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk) 94 { 95 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 96 97 memset(mbox, 0, sizeof(union myrs_cmd_mbox)); 98 cmd_blk->status = 0; 99 } 100 101 /* 102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers. 103 */ 104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) 105 { 106 void __iomem *base = cs->io_base; 107 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox; 109 110 cs->write_cmd_mbox(next_mbox, mbox); 111 112 if (cs->prev_cmd_mbox1->words[0] == 0 || 113 cs->prev_cmd_mbox2->words[0] == 0) 114 cs->get_cmd_mbox(base); 115 116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1; 117 cs->prev_cmd_mbox1 = next_mbox; 118 119 if (++next_mbox > cs->last_cmd_mbox) 120 next_mbox = cs->first_cmd_mbox; 121 122 cs->next_cmd_mbox = next_mbox; 123 } 124 125 /* 126 * myrs_exec_cmd - executes V2 Command and waits for completion. 127 */ 128 static void myrs_exec_cmd(struct myrs_hba *cs, 129 struct myrs_cmdblk *cmd_blk) 130 { 131 DECLARE_COMPLETION_ONSTACK(complete); 132 unsigned long flags; 133 134 cmd_blk->complete = &complete; 135 spin_lock_irqsave(&cs->queue_lock, flags); 136 myrs_qcmd(cs, cmd_blk); 137 spin_unlock_irqrestore(&cs->queue_lock, flags); 138 139 wait_for_completion(&complete); 140 } 141 142 /* 143 * myrs_report_progress - prints progress message 144 */ 145 static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num, 146 unsigned char *msg, unsigned long blocks, 147 unsigned long size) 148 { 149 shost_printk(KERN_INFO, cs->host, 150 "Logical Drive %d: %s in Progress: %d%% completed\n", 151 ldev_num, msg, 152 (100 * (int)(blocks >> 7)) / (int)(size >> 7)); 153 } 154 155 /* 156 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command 157 */ 158 static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs) 159 { 160 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; 161 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 162 dma_addr_t ctlr_info_addr; 163 union myrs_sgl *sgl; 164 unsigned char status; 165 unsigned short ldev_present, ldev_critical, ldev_offline; 166 167 ldev_present = cs->ctlr_info->ldev_present; 168 ldev_critical = cs->ctlr_info->ldev_critical; 169 ldev_offline = cs->ctlr_info->ldev_offline; 170 171 ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info, 172 sizeof(struct myrs_ctlr_info), 173 DMA_FROM_DEVICE); 174 if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr)) 175 return MYRS_STATUS_FAILED; 176 177 mutex_lock(&cs->dcmd_mutex); 178 myrs_reset_cmd(cmd_blk); 179 mbox->ctlr_info.id = MYRS_DCMD_TAG; 180 mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL; 181 mbox->ctlr_info.control.dma_ctrl_to_host = true; 182 mbox->ctlr_info.control.no_autosense = true; 183 mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info); 184 mbox->ctlr_info.ctlr_num = 0; 185 mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO; 186 sgl = &mbox->ctlr_info.dma_addr; 187 sgl->sge[0].sge_addr = ctlr_info_addr; 188 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; 189 dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n"); 190 myrs_exec_cmd(cs, cmd_blk); 191 status = cmd_blk->status; 192 mutex_unlock(&cs->dcmd_mutex); 193 dma_unmap_single(&cs->pdev->dev, ctlr_info_addr, 194 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE); 195 if (status == MYRS_STATUS_SUCCESS) { 196 if (cs->ctlr_info->bg_init_active + 197 cs->ctlr_info->ldev_init_active + 198 cs->ctlr_info->pdev_init_active + 199 cs->ctlr_info->cc_active + 200 cs->ctlr_info->rbld_active + 201 cs->ctlr_info->exp_active != 0) 202 cs->needs_update = true; 203 if (cs->ctlr_info->ldev_present != ldev_present || 204 cs->ctlr_info->ldev_critical != ldev_critical || 205 cs->ctlr_info->ldev_offline != ldev_offline) 206 shost_printk(KERN_INFO, cs->host, 207 "Logical drive count changes (%d/%d/%d)\n", 208 cs->ctlr_info->ldev_critical, 209 cs->ctlr_info->ldev_offline, 210 cs->ctlr_info->ldev_present); 211 } 212 213 return status; 214 } 215 216 /* 217 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command 218 */ 219 static unsigned char myrs_get_ldev_info(struct myrs_hba *cs, 220 unsigned short ldev_num, struct myrs_ldev_info *ldev_info) 221 { 222 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; 223 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 224 dma_addr_t ldev_info_addr; 225 struct myrs_ldev_info ldev_info_orig; 226 union myrs_sgl *sgl; 227 unsigned char status; 228 229 memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info)); 230 ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info, 231 sizeof(struct myrs_ldev_info), 232 DMA_FROM_DEVICE); 233 if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr)) 234 return MYRS_STATUS_FAILED; 235 236 mutex_lock(&cs->dcmd_mutex); 237 myrs_reset_cmd(cmd_blk); 238 mbox->ldev_info.id = MYRS_DCMD_TAG; 239 mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL; 240 mbox->ldev_info.control.dma_ctrl_to_host = true; 241 mbox->ldev_info.control.no_autosense = true; 242 mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info); 243 mbox->ldev_info.ldev.ldev_num = ldev_num; 244 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID; 245 sgl = &mbox->ldev_info.dma_addr; 246 sgl->sge[0].sge_addr = ldev_info_addr; 247 sgl->sge[0].sge_count = mbox->ldev_info.dma_size; 248 dev_dbg(&cs->host->shost_gendev, 249 "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num); 250 myrs_exec_cmd(cs, cmd_blk); 251 status = cmd_blk->status; 252 mutex_unlock(&cs->dcmd_mutex); 253 dma_unmap_single(&cs->pdev->dev, ldev_info_addr, 254 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE); 255 if (status == MYRS_STATUS_SUCCESS) { 256 unsigned short ldev_num = ldev_info->ldev_num; 257 struct myrs_ldev_info *new = ldev_info; 258 struct myrs_ldev_info *old = &ldev_info_orig; 259 unsigned long ldev_size = new->cfg_devsize; 260 261 if (new->dev_state != old->dev_state) { 262 const char *name; 263 264 name = myrs_devstate_name(new->dev_state); 265 shost_printk(KERN_INFO, cs->host, 266 "Logical Drive %d is now %s\n", 267 ldev_num, name ? name : "Invalid"); 268 } 269 if ((new->soft_errs != old->soft_errs) || 270 (new->cmds_failed != old->cmds_failed) || 271 (new->deferred_write_errs != old->deferred_write_errs)) 272 shost_printk(KERN_INFO, cs->host, 273 "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n", 274 ldev_num, new->soft_errs, 275 new->cmds_failed, 276 new->deferred_write_errs); 277 if (new->bg_init_active) 278 myrs_report_progress(cs, ldev_num, 279 "Background Initialization", 280 new->bg_init_lba, ldev_size); 281 else if (new->fg_init_active) 282 myrs_report_progress(cs, ldev_num, 283 "Foreground Initialization", 284 new->fg_init_lba, ldev_size); 285 else if (new->migration_active) 286 myrs_report_progress(cs, ldev_num, 287 "Data Migration", 288 new->migration_lba, ldev_size); 289 else if (new->patrol_active) 290 myrs_report_progress(cs, ldev_num, 291 "Patrol Operation", 292 new->patrol_lba, ldev_size); 293 if (old->bg_init_active && !new->bg_init_active) 294 shost_printk(KERN_INFO, cs->host, 295 "Logical Drive %d: Background Initialization %s\n", 296 ldev_num, 297 (new->ldev_control.ldev_init_done ? 298 "Completed" : "Failed")); 299 } 300 return status; 301 } 302 303 /* 304 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command 305 */ 306 static unsigned char myrs_get_pdev_info(struct myrs_hba *cs, 307 unsigned char channel, unsigned char target, unsigned char lun, 308 struct myrs_pdev_info *pdev_info) 309 { 310 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; 311 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 312 dma_addr_t pdev_info_addr; 313 union myrs_sgl *sgl; 314 unsigned char status; 315 316 pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info, 317 sizeof(struct myrs_pdev_info), 318 DMA_FROM_DEVICE); 319 if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr)) 320 return MYRS_STATUS_FAILED; 321 322 mutex_lock(&cs->dcmd_mutex); 323 myrs_reset_cmd(cmd_blk); 324 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; 325 mbox->pdev_info.id = MYRS_DCMD_TAG; 326 mbox->pdev_info.control.dma_ctrl_to_host = true; 327 mbox->pdev_info.control.no_autosense = true; 328 mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info); 329 mbox->pdev_info.pdev.lun = lun; 330 mbox->pdev_info.pdev.target = target; 331 mbox->pdev_info.pdev.channel = channel; 332 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID; 333 sgl = &mbox->pdev_info.dma_addr; 334 sgl->sge[0].sge_addr = pdev_info_addr; 335 sgl->sge[0].sge_count = mbox->pdev_info.dma_size; 336 dev_dbg(&cs->host->shost_gendev, 337 "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n", 338 channel, target, lun); 339 myrs_exec_cmd(cs, cmd_blk); 340 status = cmd_blk->status; 341 mutex_unlock(&cs->dcmd_mutex); 342 dma_unmap_single(&cs->pdev->dev, pdev_info_addr, 343 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE); 344 return status; 345 } 346 347 /* 348 * myrs_dev_op - executes a "Device Operation" Command 349 */ 350 static unsigned char myrs_dev_op(struct myrs_hba *cs, 351 enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev) 352 { 353 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk; 354 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 355 unsigned char status; 356 357 mutex_lock(&cs->dcmd_mutex); 358 myrs_reset_cmd(cmd_blk); 359 mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL; 360 mbox->dev_op.id = MYRS_DCMD_TAG; 361 mbox->dev_op.control.dma_ctrl_to_host = true; 362 mbox->dev_op.control.no_autosense = true; 363 mbox->dev_op.ioctl_opcode = opcode; 364 mbox->dev_op.opdev = opdev; 365 myrs_exec_cmd(cs, cmd_blk); 366 status = cmd_blk->status; 367 mutex_unlock(&cs->dcmd_mutex); 368 return status; 369 } 370 371 /* 372 * myrs_translate_pdev - translates a Physical Device Channel and 373 * TargetID into a Logical Device. 374 */ 375 static unsigned char myrs_translate_pdev(struct myrs_hba *cs, 376 unsigned char channel, unsigned char target, unsigned char lun, 377 struct myrs_devmap *devmap) 378 { 379 struct pci_dev *pdev = cs->pdev; 380 dma_addr_t devmap_addr; 381 struct myrs_cmdblk *cmd_blk; 382 union myrs_cmd_mbox *mbox; 383 union myrs_sgl *sgl; 384 unsigned char status; 385 386 memset(devmap, 0x0, sizeof(struct myrs_devmap)); 387 devmap_addr = dma_map_single(&pdev->dev, devmap, 388 sizeof(struct myrs_devmap), 389 DMA_FROM_DEVICE); 390 if (dma_mapping_error(&pdev->dev, devmap_addr)) 391 return MYRS_STATUS_FAILED; 392 393 mutex_lock(&cs->dcmd_mutex); 394 cmd_blk = &cs->dcmd_blk; 395 mbox = &cmd_blk->mbox; 396 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL; 397 mbox->pdev_info.control.dma_ctrl_to_host = true; 398 mbox->pdev_info.control.no_autosense = true; 399 mbox->pdev_info.dma_size = sizeof(struct myrs_devmap); 400 mbox->pdev_info.pdev.target = target; 401 mbox->pdev_info.pdev.channel = channel; 402 mbox->pdev_info.pdev.lun = lun; 403 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV; 404 sgl = &mbox->pdev_info.dma_addr; 405 sgl->sge[0].sge_addr = devmap_addr; 406 sgl->sge[0].sge_count = mbox->pdev_info.dma_size; 407 408 myrs_exec_cmd(cs, cmd_blk); 409 status = cmd_blk->status; 410 mutex_unlock(&cs->dcmd_mutex); 411 dma_unmap_single(&pdev->dev, devmap_addr, 412 sizeof(struct myrs_devmap), DMA_FROM_DEVICE); 413 return status; 414 } 415 416 /* 417 * myrs_get_event - executes a Get Event Command 418 */ 419 static unsigned char myrs_get_event(struct myrs_hba *cs, 420 unsigned int event_num, struct myrs_event *event_buf) 421 { 422 struct pci_dev *pdev = cs->pdev; 423 dma_addr_t event_addr; 424 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; 425 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 426 union myrs_sgl *sgl; 427 unsigned char status; 428 429 event_addr = dma_map_single(&pdev->dev, event_buf, 430 sizeof(struct myrs_event), DMA_FROM_DEVICE); 431 if (dma_mapping_error(&pdev->dev, event_addr)) 432 return MYRS_STATUS_FAILED; 433 434 mbox->get_event.opcode = MYRS_CMD_OP_IOCTL; 435 mbox->get_event.dma_size = sizeof(struct myrs_event); 436 mbox->get_event.evnum_upper = event_num >> 16; 437 mbox->get_event.ctlr_num = 0; 438 mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT; 439 mbox->get_event.evnum_lower = event_num & 0xFFFF; 440 sgl = &mbox->get_event.dma_addr; 441 sgl->sge[0].sge_addr = event_addr; 442 sgl->sge[0].sge_count = mbox->get_event.dma_size; 443 myrs_exec_cmd(cs, cmd_blk); 444 status = cmd_blk->status; 445 dma_unmap_single(&pdev->dev, event_addr, 446 sizeof(struct myrs_event), DMA_FROM_DEVICE); 447 448 return status; 449 } 450 451 /* 452 * myrs_get_fwstatus - executes a Get Health Status Command 453 */ 454 static unsigned char myrs_get_fwstatus(struct myrs_hba *cs) 455 { 456 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk; 457 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 458 union myrs_sgl *sgl; 459 unsigned char status = cmd_blk->status; 460 461 myrs_reset_cmd(cmd_blk); 462 mbox->common.opcode = MYRS_CMD_OP_IOCTL; 463 mbox->common.id = MYRS_MCMD_TAG; 464 mbox->common.control.dma_ctrl_to_host = true; 465 mbox->common.control.no_autosense = true; 466 mbox->common.dma_size = sizeof(struct myrs_fwstat); 467 mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS; 468 sgl = &mbox->common.dma_addr; 469 sgl->sge[0].sge_addr = cs->fwstat_addr; 470 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size; 471 dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n"); 472 myrs_exec_cmd(cs, cmd_blk); 473 status = cmd_blk->status; 474 475 return status; 476 } 477 478 /* 479 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface 480 */ 481 static bool myrs_enable_mmio_mbox(struct myrs_hba *cs, 482 enable_mbox_t enable_mbox_fn) 483 { 484 void __iomem *base = cs->io_base; 485 struct pci_dev *pdev = cs->pdev; 486 union myrs_cmd_mbox *cmd_mbox; 487 struct myrs_stat_mbox *stat_mbox; 488 union myrs_cmd_mbox *mbox; 489 dma_addr_t mbox_addr; 490 unsigned char status = MYRS_STATUS_FAILED; 491 492 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) 493 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 494 dev_err(&pdev->dev, "DMA mask out of range\n"); 495 return false; 496 } 497 498 /* Temporary dma mapping, used only in the scope of this function */ 499 mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), 500 &mbox_addr, GFP_KERNEL); 501 if (dma_mapping_error(&pdev->dev, mbox_addr)) 502 return false; 503 504 /* These are the base addresses for the command memory mailbox array */ 505 cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox); 506 cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size, 507 &cs->cmd_mbox_addr, GFP_KERNEL); 508 if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) { 509 dev_err(&pdev->dev, "Failed to map command mailbox\n"); 510 goto out_free; 511 } 512 cs->first_cmd_mbox = cmd_mbox; 513 cmd_mbox += MYRS_MAX_CMD_MBOX - 1; 514 cs->last_cmd_mbox = cmd_mbox; 515 cs->next_cmd_mbox = cs->first_cmd_mbox; 516 cs->prev_cmd_mbox1 = cs->last_cmd_mbox; 517 cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1; 518 519 /* These are the base addresses for the status memory mailbox array */ 520 cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox); 521 stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size, 522 &cs->stat_mbox_addr, GFP_KERNEL); 523 if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) { 524 dev_err(&pdev->dev, "Failed to map status mailbox\n"); 525 goto out_free; 526 } 527 528 cs->first_stat_mbox = stat_mbox; 529 stat_mbox += MYRS_MAX_STAT_MBOX - 1; 530 cs->last_stat_mbox = stat_mbox; 531 cs->next_stat_mbox = cs->first_stat_mbox; 532 533 cs->fwstat_buf = dma_alloc_coherent(&pdev->dev, 534 sizeof(struct myrs_fwstat), 535 &cs->fwstat_addr, GFP_KERNEL); 536 if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) { 537 dev_err(&pdev->dev, "Failed to map firmware health buffer\n"); 538 cs->fwstat_buf = NULL; 539 goto out_free; 540 } 541 cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), 542 GFP_KERNEL | GFP_DMA); 543 if (!cs->ctlr_info) 544 goto out_free; 545 546 cs->event_buf = kzalloc(sizeof(struct myrs_event), 547 GFP_KERNEL | GFP_DMA); 548 if (!cs->event_buf) 549 goto out_free; 550 551 /* Enable the Memory Mailbox Interface. */ 552 memset(mbox, 0, sizeof(union myrs_cmd_mbox)); 553 mbox->set_mbox.id = 1; 554 mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL; 555 mbox->set_mbox.control.no_autosense = true; 556 mbox->set_mbox.first_cmd_mbox_size_kb = 557 (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10; 558 mbox->set_mbox.first_stat_mbox_size_kb = 559 (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10; 560 mbox->set_mbox.second_cmd_mbox_size_kb = 0; 561 mbox->set_mbox.second_stat_mbox_size_kb = 0; 562 mbox->set_mbox.sense_len = 0; 563 mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX; 564 mbox->set_mbox.fwstat_buf_size_kb = 1; 565 mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr; 566 mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr; 567 mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr; 568 status = enable_mbox_fn(base, mbox_addr); 569 570 out_free: 571 dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox), 572 mbox, mbox_addr); 573 if (status != MYRS_STATUS_SUCCESS) 574 dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n", 575 status); 576 return (status == MYRS_STATUS_SUCCESS); 577 } 578 579 /* 580 * myrs_get_config - reads the Configuration Information 581 */ 582 static int myrs_get_config(struct myrs_hba *cs) 583 { 584 struct myrs_ctlr_info *info = cs->ctlr_info; 585 struct Scsi_Host *shost = cs->host; 586 unsigned char status; 587 unsigned char model[20]; 588 unsigned char fw_version[12]; 589 int i, model_len; 590 591 /* Get data into dma-able area, then copy into permanent location */ 592 mutex_lock(&cs->cinfo_mutex); 593 status = myrs_get_ctlr_info(cs); 594 mutex_unlock(&cs->cinfo_mutex); 595 if (status != MYRS_STATUS_SUCCESS) { 596 shost_printk(KERN_ERR, shost, 597 "Failed to get controller information\n"); 598 return -ENODEV; 599 } 600 601 /* Initialize the Controller Model Name and Full Model Name fields. */ 602 model_len = sizeof(info->ctlr_name); 603 if (model_len > sizeof(model)-1) 604 model_len = sizeof(model)-1; 605 memcpy(model, info->ctlr_name, model_len); 606 model_len--; 607 while (model[model_len] == ' ' || model[model_len] == '\0') 608 model_len--; 609 model[++model_len] = '\0'; 610 strcpy(cs->model_name, "DAC960 "); 611 strcat(cs->model_name, model); 612 /* Initialize the Controller Firmware Version field. */ 613 sprintf(fw_version, "%d.%02d-%02d", 614 info->fw_major_version, info->fw_minor_version, 615 info->fw_turn_number); 616 if (info->fw_major_version == 6 && 617 info->fw_minor_version == 0 && 618 info->fw_turn_number < 1) { 619 shost_printk(KERN_WARNING, shost, 620 "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n" 621 "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n" 622 "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n", 623 fw_version); 624 return -ENODEV; 625 } 626 /* Initialize the Controller Channels and Targets. */ 627 shost->max_channel = info->physchan_present + info->virtchan_present; 628 shost->max_id = info->max_targets[0]; 629 for (i = 1; i < 16; i++) { 630 if (!info->max_targets[i]) 631 continue; 632 if (shost->max_id < info->max_targets[i]) 633 shost->max_id = info->max_targets[i]; 634 } 635 636 /* 637 * Initialize the Controller Queue Depth, Driver Queue Depth, 638 * Logical Drive Count, Maximum Blocks per Command, Controller 639 * Scatter/Gather Limit, and Driver Scatter/Gather Limit. 640 * The Driver Queue Depth must be at most three less than 641 * the Controller Queue Depth; tag '1' is reserved for 642 * direct commands, and tag '2' for monitoring commands. 643 */ 644 shost->can_queue = info->max_tcq - 3; 645 if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3) 646 shost->can_queue = MYRS_MAX_CMD_MBOX - 3; 647 shost->max_sectors = info->max_transfer_size; 648 shost->sg_tablesize = info->max_sge; 649 if (shost->sg_tablesize > MYRS_SG_LIMIT) 650 shost->sg_tablesize = MYRS_SG_LIMIT; 651 652 shost_printk(KERN_INFO, shost, 653 "Configuring %s PCI RAID Controller\n", model); 654 shost_printk(KERN_INFO, shost, 655 " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n", 656 fw_version, info->physchan_present, info->mem_size_mb); 657 658 shost_printk(KERN_INFO, shost, 659 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n", 660 shost->can_queue, shost->max_sectors); 661 662 shost_printk(KERN_INFO, shost, 663 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n", 664 shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT); 665 for (i = 0; i < info->physchan_max; i++) { 666 if (!info->max_targets[i]) 667 continue; 668 shost_printk(KERN_INFO, shost, 669 " Device Channel %d: max %d devices\n", 670 i, info->max_targets[i]); 671 } 672 shost_printk(KERN_INFO, shost, 673 " Physical: %d/%d channels, %d disks, %d devices\n", 674 info->physchan_present, info->physchan_max, 675 info->pdisk_present, info->pdev_present); 676 677 shost_printk(KERN_INFO, shost, 678 " Logical: %d/%d channels, %d disks\n", 679 info->virtchan_present, info->virtchan_max, 680 info->ldev_present); 681 return 0; 682 } 683 684 /* 685 * myrs_log_event - prints a Controller Event message 686 */ 687 static struct { 688 int ev_code; 689 unsigned char *ev_msg; 690 } myrs_ev_list[] = { 691 /* Physical Device Events (0x0000 - 0x007F) */ 692 { 0x0001, "P Online" }, 693 { 0x0002, "P Standby" }, 694 { 0x0005, "P Automatic Rebuild Started" }, 695 { 0x0006, "P Manual Rebuild Started" }, 696 { 0x0007, "P Rebuild Completed" }, 697 { 0x0008, "P Rebuild Cancelled" }, 698 { 0x0009, "P Rebuild Failed for Unknown Reasons" }, 699 { 0x000A, "P Rebuild Failed due to New Physical Device" }, 700 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" }, 701 { 0x000C, "S Offline" }, 702 { 0x000D, "P Found" }, 703 { 0x000E, "P Removed" }, 704 { 0x000F, "P Unconfigured" }, 705 { 0x0010, "P Expand Capacity Started" }, 706 { 0x0011, "P Expand Capacity Completed" }, 707 { 0x0012, "P Expand Capacity Failed" }, 708 { 0x0013, "P Command Timed Out" }, 709 { 0x0014, "P Command Aborted" }, 710 { 0x0015, "P Command Retried" }, 711 { 0x0016, "P Parity Error" }, 712 { 0x0017, "P Soft Error" }, 713 { 0x0018, "P Miscellaneous Error" }, 714 { 0x0019, "P Reset" }, 715 { 0x001A, "P Active Spare Found" }, 716 { 0x001B, "P Warm Spare Found" }, 717 { 0x001C, "S Sense Data Received" }, 718 { 0x001D, "P Initialization Started" }, 719 { 0x001E, "P Initialization Completed" }, 720 { 0x001F, "P Initialization Failed" }, 721 { 0x0020, "P Initialization Cancelled" }, 722 { 0x0021, "P Failed because Write Recovery Failed" }, 723 { 0x0022, "P Failed because SCSI Bus Reset Failed" }, 724 { 0x0023, "P Failed because of Double Check Condition" }, 725 { 0x0024, "P Failed because Device Cannot Be Accessed" }, 726 { 0x0025, "P Failed because of Gross Error on SCSI Processor" }, 727 { 0x0026, "P Failed because of Bad Tag from Device" }, 728 { 0x0027, "P Failed because of Command Timeout" }, 729 { 0x0028, "P Failed because of System Reset" }, 730 { 0x0029, "P Failed because of Busy Status or Parity Error" }, 731 { 0x002A, "P Failed because Host Set Device to Failed State" }, 732 { 0x002B, "P Failed because of Selection Timeout" }, 733 { 0x002C, "P Failed because of SCSI Bus Phase Error" }, 734 { 0x002D, "P Failed because Device Returned Unknown Status" }, 735 { 0x002E, "P Failed because Device Not Ready" }, 736 { 0x002F, "P Failed because Device Not Found at Startup" }, 737 { 0x0030, "P Failed because COD Write Operation Failed" }, 738 { 0x0031, "P Failed because BDT Write Operation Failed" }, 739 { 0x0039, "P Missing at Startup" }, 740 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" }, 741 { 0x003C, "P Temporarily Offline Device Automatically Made Online" }, 742 { 0x003D, "P Standby Rebuild Started" }, 743 /* Logical Device Events (0x0080 - 0x00FF) */ 744 { 0x0080, "M Consistency Check Started" }, 745 { 0x0081, "M Consistency Check Completed" }, 746 { 0x0082, "M Consistency Check Cancelled" }, 747 { 0x0083, "M Consistency Check Completed With Errors" }, 748 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" }, 749 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" }, 750 { 0x0086, "L Offline" }, 751 { 0x0087, "L Critical" }, 752 { 0x0088, "L Online" }, 753 { 0x0089, "M Automatic Rebuild Started" }, 754 { 0x008A, "M Manual Rebuild Started" }, 755 { 0x008B, "M Rebuild Completed" }, 756 { 0x008C, "M Rebuild Cancelled" }, 757 { 0x008D, "M Rebuild Failed for Unknown Reasons" }, 758 { 0x008E, "M Rebuild Failed due to New Physical Device" }, 759 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" }, 760 { 0x0090, "M Initialization Started" }, 761 { 0x0091, "M Initialization Completed" }, 762 { 0x0092, "M Initialization Cancelled" }, 763 { 0x0093, "M Initialization Failed" }, 764 { 0x0094, "L Found" }, 765 { 0x0095, "L Deleted" }, 766 { 0x0096, "M Expand Capacity Started" }, 767 { 0x0097, "M Expand Capacity Completed" }, 768 { 0x0098, "M Expand Capacity Failed" }, 769 { 0x0099, "L Bad Block Found" }, 770 { 0x009A, "L Size Changed" }, 771 { 0x009B, "L Type Changed" }, 772 { 0x009C, "L Bad Data Block Found" }, 773 { 0x009E, "L Read of Data Block in BDT" }, 774 { 0x009F, "L Write Back Data for Disk Block Lost" }, 775 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" }, 776 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" }, 777 { 0x00A2, "L Standby Rebuild Started" }, 778 /* Fault Management Events (0x0100 - 0x017F) */ 779 { 0x0140, "E Fan %d Failed" }, 780 { 0x0141, "E Fan %d OK" }, 781 { 0x0142, "E Fan %d Not Present" }, 782 { 0x0143, "E Power Supply %d Failed" }, 783 { 0x0144, "E Power Supply %d OK" }, 784 { 0x0145, "E Power Supply %d Not Present" }, 785 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" }, 786 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" }, 787 { 0x0148, "E Temperature Sensor %d Temperature Normal" }, 788 { 0x0149, "E Temperature Sensor %d Not Present" }, 789 { 0x014A, "E Enclosure Management Unit %d Access Critical" }, 790 { 0x014B, "E Enclosure Management Unit %d Access OK" }, 791 { 0x014C, "E Enclosure Management Unit %d Access Offline" }, 792 /* Controller Events (0x0180 - 0x01FF) */ 793 { 0x0181, "C Cache Write Back Error" }, 794 { 0x0188, "C Battery Backup Unit Found" }, 795 { 0x0189, "C Battery Backup Unit Charge Level Low" }, 796 { 0x018A, "C Battery Backup Unit Charge Level OK" }, 797 { 0x0193, "C Installation Aborted" }, 798 { 0x0195, "C Battery Backup Unit Physically Removed" }, 799 { 0x0196, "C Memory Error During Warm Boot" }, 800 { 0x019E, "C Memory Soft ECC Error Corrected" }, 801 { 0x019F, "C Memory Hard ECC Error Corrected" }, 802 { 0x01A2, "C Battery Backup Unit Failed" }, 803 { 0x01AB, "C Mirror Race Recovery Failed" }, 804 { 0x01AC, "C Mirror Race on Critical Drive" }, 805 /* Controller Internal Processor Events */ 806 { 0x0380, "C Internal Controller Hung" }, 807 { 0x0381, "C Internal Controller Firmware Breakpoint" }, 808 { 0x0390, "C Internal Controller i960 Processor Specific Error" }, 809 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" }, 810 { 0, "" } 811 }; 812 813 static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev) 814 { 815 unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE]; 816 int ev_idx = 0, ev_code; 817 unsigned char ev_type, *ev_msg; 818 struct Scsi_Host *shost = cs->host; 819 struct scsi_device *sdev; 820 struct scsi_sense_hdr sshdr = {0}; 821 unsigned char sense_info[4]; 822 unsigned char cmd_specific[4]; 823 824 if (ev->ev_code == 0x1C) { 825 if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) { 826 memset(&sshdr, 0x0, sizeof(sshdr)); 827 memset(sense_info, 0x0, sizeof(sense_info)); 828 memset(cmd_specific, 0x0, sizeof(cmd_specific)); 829 } else { 830 memcpy(sense_info, &ev->sense_data[3], 4); 831 memcpy(cmd_specific, &ev->sense_data[7], 4); 832 } 833 } 834 if (sshdr.sense_key == VENDOR_SPECIFIC && 835 (sshdr.asc == 0x80 || sshdr.asc == 0x81)) 836 ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq); 837 while (true) { 838 ev_code = myrs_ev_list[ev_idx].ev_code; 839 if (ev_code == ev->ev_code || ev_code == 0) 840 break; 841 ev_idx++; 842 } 843 ev_type = myrs_ev_list[ev_idx].ev_msg[0]; 844 ev_msg = &myrs_ev_list[ev_idx].ev_msg[2]; 845 if (ev_code == 0) { 846 shost_printk(KERN_WARNING, shost, 847 "Unknown Controller Event Code %04X\n", 848 ev->ev_code); 849 return; 850 } 851 switch (ev_type) { 852 case 'P': 853 sdev = scsi_device_lookup(shost, ev->channel, 854 ev->target, 0); 855 sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n", 856 ev->ev_seq, ev_msg); 857 if (sdev && sdev->hostdata && 858 sdev->channel < cs->ctlr_info->physchan_present) { 859 struct myrs_pdev_info *pdev_info = sdev->hostdata; 860 861 switch (ev->ev_code) { 862 case 0x0001: 863 case 0x0007: 864 pdev_info->dev_state = MYRS_DEVICE_ONLINE; 865 break; 866 case 0x0002: 867 pdev_info->dev_state = MYRS_DEVICE_STANDBY; 868 break; 869 case 0x000C: 870 pdev_info->dev_state = MYRS_DEVICE_OFFLINE; 871 break; 872 case 0x000E: 873 pdev_info->dev_state = MYRS_DEVICE_MISSING; 874 break; 875 case 0x000F: 876 pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED; 877 break; 878 } 879 } 880 break; 881 case 'L': 882 shost_printk(KERN_INFO, shost, 883 "event %d: Logical Drive %d %s\n", 884 ev->ev_seq, ev->lun, ev_msg); 885 cs->needs_update = true; 886 break; 887 case 'M': 888 shost_printk(KERN_INFO, shost, 889 "event %d: Logical Drive %d %s\n", 890 ev->ev_seq, ev->lun, ev_msg); 891 cs->needs_update = true; 892 break; 893 case 'S': 894 if (sshdr.sense_key == NO_SENSE || 895 (sshdr.sense_key == NOT_READY && 896 sshdr.asc == 0x04 && (sshdr.ascq == 0x01 || 897 sshdr.ascq == 0x02))) 898 break; 899 shost_printk(KERN_INFO, shost, 900 "event %d: Physical Device %d:%d %s\n", 901 ev->ev_seq, ev->channel, ev->target, ev_msg); 902 shost_printk(KERN_INFO, shost, 903 "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n", 904 ev->channel, ev->target, 905 sshdr.sense_key, sshdr.asc, sshdr.ascq); 906 shost_printk(KERN_INFO, shost, 907 "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n", 908 ev->channel, ev->target, 909 sense_info[0], sense_info[1], 910 sense_info[2], sense_info[3], 911 cmd_specific[0], cmd_specific[1], 912 cmd_specific[2], cmd_specific[3]); 913 break; 914 case 'E': 915 if (cs->disable_enc_msg) 916 break; 917 sprintf(msg_buf, ev_msg, ev->lun); 918 shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n", 919 ev->ev_seq, ev->target, msg_buf); 920 break; 921 case 'C': 922 shost_printk(KERN_INFO, shost, "event %d: Controller %s\n", 923 ev->ev_seq, ev_msg); 924 break; 925 default: 926 shost_printk(KERN_INFO, shost, 927 "event %d: Unknown Event Code %04X\n", 928 ev->ev_seq, ev->ev_code); 929 break; 930 } 931 } 932 933 /* 934 * SCSI sysfs interface functions 935 */ 936 static ssize_t raid_state_show(struct device *dev, 937 struct device_attribute *attr, char *buf) 938 { 939 struct scsi_device *sdev = to_scsi_device(dev); 940 struct myrs_hba *cs = shost_priv(sdev->host); 941 int ret; 942 943 if (!sdev->hostdata) 944 return snprintf(buf, 16, "Unknown\n"); 945 946 if (sdev->channel >= cs->ctlr_info->physchan_present) { 947 struct myrs_ldev_info *ldev_info = sdev->hostdata; 948 const char *name; 949 950 name = myrs_devstate_name(ldev_info->dev_state); 951 if (name) 952 ret = snprintf(buf, 32, "%s\n", name); 953 else 954 ret = snprintf(buf, 32, "Invalid (%02X)\n", 955 ldev_info->dev_state); 956 } else { 957 struct myrs_pdev_info *pdev_info; 958 const char *name; 959 960 pdev_info = sdev->hostdata; 961 name = myrs_devstate_name(pdev_info->dev_state); 962 if (name) 963 ret = snprintf(buf, 32, "%s\n", name); 964 else 965 ret = snprintf(buf, 32, "Invalid (%02X)\n", 966 pdev_info->dev_state); 967 } 968 return ret; 969 } 970 971 static ssize_t raid_state_store(struct device *dev, 972 struct device_attribute *attr, const char *buf, size_t count) 973 { 974 struct scsi_device *sdev = to_scsi_device(dev); 975 struct myrs_hba *cs = shost_priv(sdev->host); 976 struct myrs_cmdblk *cmd_blk; 977 union myrs_cmd_mbox *mbox; 978 enum myrs_devstate new_state; 979 unsigned short ldev_num; 980 unsigned char status; 981 982 if (!strncmp(buf, "offline", 7) || 983 !strncmp(buf, "kill", 4)) 984 new_state = MYRS_DEVICE_OFFLINE; 985 else if (!strncmp(buf, "online", 6)) 986 new_state = MYRS_DEVICE_ONLINE; 987 else if (!strncmp(buf, "standby", 7)) 988 new_state = MYRS_DEVICE_STANDBY; 989 else 990 return -EINVAL; 991 992 if (sdev->channel < cs->ctlr_info->physchan_present) { 993 struct myrs_pdev_info *pdev_info = sdev->hostdata; 994 struct myrs_devmap *pdev_devmap = 995 (struct myrs_devmap *)&pdev_info->rsvd13; 996 997 if (pdev_info->dev_state == new_state) { 998 sdev_printk(KERN_INFO, sdev, 999 "Device already in %s\n", 1000 myrs_devstate_name(new_state)); 1001 return count; 1002 } 1003 status = myrs_translate_pdev(cs, sdev->channel, sdev->id, 1004 sdev->lun, pdev_devmap); 1005 if (status != MYRS_STATUS_SUCCESS) 1006 return -ENXIO; 1007 ldev_num = pdev_devmap->ldev_num; 1008 } else { 1009 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1010 1011 if (ldev_info->dev_state == new_state) { 1012 sdev_printk(KERN_INFO, sdev, 1013 "Device already in %s\n", 1014 myrs_devstate_name(new_state)); 1015 return count; 1016 } 1017 ldev_num = ldev_info->ldev_num; 1018 } 1019 mutex_lock(&cs->dcmd_mutex); 1020 cmd_blk = &cs->dcmd_blk; 1021 myrs_reset_cmd(cmd_blk); 1022 mbox = &cmd_blk->mbox; 1023 mbox->common.opcode = MYRS_CMD_OP_IOCTL; 1024 mbox->common.id = MYRS_DCMD_TAG; 1025 mbox->common.control.dma_ctrl_to_host = true; 1026 mbox->common.control.no_autosense = true; 1027 mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE; 1028 mbox->set_devstate.state = new_state; 1029 mbox->set_devstate.ldev.ldev_num = ldev_num; 1030 myrs_exec_cmd(cs, cmd_blk); 1031 status = cmd_blk->status; 1032 mutex_unlock(&cs->dcmd_mutex); 1033 if (status == MYRS_STATUS_SUCCESS) { 1034 if (sdev->channel < cs->ctlr_info->physchan_present) { 1035 struct myrs_pdev_info *pdev_info = sdev->hostdata; 1036 1037 pdev_info->dev_state = new_state; 1038 } else { 1039 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1040 1041 ldev_info->dev_state = new_state; 1042 } 1043 sdev_printk(KERN_INFO, sdev, 1044 "Set device state to %s\n", 1045 myrs_devstate_name(new_state)); 1046 return count; 1047 } 1048 sdev_printk(KERN_INFO, sdev, 1049 "Failed to set device state to %s, status 0x%02x\n", 1050 myrs_devstate_name(new_state), status); 1051 return -EINVAL; 1052 } 1053 static DEVICE_ATTR_RW(raid_state); 1054 1055 static ssize_t raid_level_show(struct device *dev, 1056 struct device_attribute *attr, char *buf) 1057 { 1058 struct scsi_device *sdev = to_scsi_device(dev); 1059 struct myrs_hba *cs = shost_priv(sdev->host); 1060 const char *name = NULL; 1061 1062 if (!sdev->hostdata) 1063 return snprintf(buf, 16, "Unknown\n"); 1064 1065 if (sdev->channel >= cs->ctlr_info->physchan_present) { 1066 struct myrs_ldev_info *ldev_info; 1067 1068 ldev_info = sdev->hostdata; 1069 name = myrs_raid_level_name(ldev_info->raid_level); 1070 if (!name) 1071 return snprintf(buf, 32, "Invalid (%02X)\n", 1072 ldev_info->dev_state); 1073 1074 } else 1075 name = myrs_raid_level_name(MYRS_RAID_PHYSICAL); 1076 1077 return snprintf(buf, 32, "%s\n", name); 1078 } 1079 static DEVICE_ATTR_RO(raid_level); 1080 1081 static ssize_t rebuild_show(struct device *dev, 1082 struct device_attribute *attr, char *buf) 1083 { 1084 struct scsi_device *sdev = to_scsi_device(dev); 1085 struct myrs_hba *cs = shost_priv(sdev->host); 1086 struct myrs_ldev_info *ldev_info; 1087 unsigned short ldev_num; 1088 unsigned char status; 1089 1090 if (sdev->channel < cs->ctlr_info->physchan_present) 1091 return snprintf(buf, 32, "physical device - not rebuilding\n"); 1092 1093 ldev_info = sdev->hostdata; 1094 ldev_num = ldev_info->ldev_num; 1095 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1096 if (status != MYRS_STATUS_SUCCESS) { 1097 sdev_printk(KERN_INFO, sdev, 1098 "Failed to get device information, status 0x%02x\n", 1099 status); 1100 return -EIO; 1101 } 1102 if (ldev_info->rbld_active) { 1103 return snprintf(buf, 32, "rebuilding block %zu of %zu\n", 1104 (size_t)ldev_info->rbld_lba, 1105 (size_t)ldev_info->cfg_devsize); 1106 } else 1107 return snprintf(buf, 32, "not rebuilding\n"); 1108 } 1109 1110 static ssize_t rebuild_store(struct device *dev, 1111 struct device_attribute *attr, const char *buf, size_t count) 1112 { 1113 struct scsi_device *sdev = to_scsi_device(dev); 1114 struct myrs_hba *cs = shost_priv(sdev->host); 1115 struct myrs_ldev_info *ldev_info; 1116 struct myrs_cmdblk *cmd_blk; 1117 union myrs_cmd_mbox *mbox; 1118 unsigned short ldev_num; 1119 unsigned char status; 1120 int rebuild, ret; 1121 1122 if (sdev->channel < cs->ctlr_info->physchan_present) 1123 return -EINVAL; 1124 1125 ldev_info = sdev->hostdata; 1126 if (!ldev_info) 1127 return -ENXIO; 1128 ldev_num = ldev_info->ldev_num; 1129 1130 ret = kstrtoint(buf, 0, &rebuild); 1131 if (ret) 1132 return ret; 1133 1134 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1135 if (status != MYRS_STATUS_SUCCESS) { 1136 sdev_printk(KERN_INFO, sdev, 1137 "Failed to get device information, status 0x%02x\n", 1138 status); 1139 return -EIO; 1140 } 1141 1142 if (rebuild && ldev_info->rbld_active) { 1143 sdev_printk(KERN_INFO, sdev, 1144 "Rebuild Not Initiated; already in progress\n"); 1145 return -EALREADY; 1146 } 1147 if (!rebuild && !ldev_info->rbld_active) { 1148 sdev_printk(KERN_INFO, sdev, 1149 "Rebuild Not Cancelled; no rebuild in progress\n"); 1150 return count; 1151 } 1152 1153 mutex_lock(&cs->dcmd_mutex); 1154 cmd_blk = &cs->dcmd_blk; 1155 myrs_reset_cmd(cmd_blk); 1156 mbox = &cmd_blk->mbox; 1157 mbox->common.opcode = MYRS_CMD_OP_IOCTL; 1158 mbox->common.id = MYRS_DCMD_TAG; 1159 mbox->common.control.dma_ctrl_to_host = true; 1160 mbox->common.control.no_autosense = true; 1161 if (rebuild) { 1162 mbox->ldev_info.ldev.ldev_num = ldev_num; 1163 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START; 1164 } else { 1165 mbox->ldev_info.ldev.ldev_num = ldev_num; 1166 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP; 1167 } 1168 myrs_exec_cmd(cs, cmd_blk); 1169 status = cmd_blk->status; 1170 mutex_unlock(&cs->dcmd_mutex); 1171 if (status) { 1172 sdev_printk(KERN_INFO, sdev, 1173 "Rebuild Not %s, status 0x%02x\n", 1174 rebuild ? "Initiated" : "Cancelled", status); 1175 ret = -EIO; 1176 } else { 1177 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n", 1178 rebuild ? "Initiated" : "Cancelled"); 1179 ret = count; 1180 } 1181 1182 return ret; 1183 } 1184 static DEVICE_ATTR_RW(rebuild); 1185 1186 static ssize_t consistency_check_show(struct device *dev, 1187 struct device_attribute *attr, char *buf) 1188 { 1189 struct scsi_device *sdev = to_scsi_device(dev); 1190 struct myrs_hba *cs = shost_priv(sdev->host); 1191 struct myrs_ldev_info *ldev_info; 1192 unsigned short ldev_num; 1193 unsigned char status; 1194 1195 if (sdev->channel < cs->ctlr_info->physchan_present) 1196 return snprintf(buf, 32, "physical device - not checking\n"); 1197 1198 ldev_info = sdev->hostdata; 1199 if (!ldev_info) 1200 return -ENXIO; 1201 ldev_num = ldev_info->ldev_num; 1202 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1203 if (ldev_info->cc_active) 1204 return snprintf(buf, 32, "checking block %zu of %zu\n", 1205 (size_t)ldev_info->cc_lba, 1206 (size_t)ldev_info->cfg_devsize); 1207 else 1208 return snprintf(buf, 32, "not checking\n"); 1209 } 1210 1211 static ssize_t consistency_check_store(struct device *dev, 1212 struct device_attribute *attr, const char *buf, size_t count) 1213 { 1214 struct scsi_device *sdev = to_scsi_device(dev); 1215 struct myrs_hba *cs = shost_priv(sdev->host); 1216 struct myrs_ldev_info *ldev_info; 1217 struct myrs_cmdblk *cmd_blk; 1218 union myrs_cmd_mbox *mbox; 1219 unsigned short ldev_num; 1220 unsigned char status; 1221 int check, ret; 1222 1223 if (sdev->channel < cs->ctlr_info->physchan_present) 1224 return -EINVAL; 1225 1226 ldev_info = sdev->hostdata; 1227 if (!ldev_info) 1228 return -ENXIO; 1229 ldev_num = ldev_info->ldev_num; 1230 1231 ret = kstrtoint(buf, 0, &check); 1232 if (ret) 1233 return ret; 1234 1235 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1236 if (status != MYRS_STATUS_SUCCESS) { 1237 sdev_printk(KERN_INFO, sdev, 1238 "Failed to get device information, status 0x%02x\n", 1239 status); 1240 return -EIO; 1241 } 1242 if (check && ldev_info->cc_active) { 1243 sdev_printk(KERN_INFO, sdev, 1244 "Consistency Check Not Initiated; " 1245 "already in progress\n"); 1246 return -EALREADY; 1247 } 1248 if (!check && !ldev_info->cc_active) { 1249 sdev_printk(KERN_INFO, sdev, 1250 "Consistency Check Not Cancelled; " 1251 "check not in progress\n"); 1252 return count; 1253 } 1254 1255 mutex_lock(&cs->dcmd_mutex); 1256 cmd_blk = &cs->dcmd_blk; 1257 myrs_reset_cmd(cmd_blk); 1258 mbox = &cmd_blk->mbox; 1259 mbox->common.opcode = MYRS_CMD_OP_IOCTL; 1260 mbox->common.id = MYRS_DCMD_TAG; 1261 mbox->common.control.dma_ctrl_to_host = true; 1262 mbox->common.control.no_autosense = true; 1263 if (check) { 1264 mbox->cc.ldev.ldev_num = ldev_num; 1265 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START; 1266 mbox->cc.restore_consistency = true; 1267 mbox->cc.initialized_area_only = false; 1268 } else { 1269 mbox->cc.ldev.ldev_num = ldev_num; 1270 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP; 1271 } 1272 myrs_exec_cmd(cs, cmd_blk); 1273 status = cmd_blk->status; 1274 mutex_unlock(&cs->dcmd_mutex); 1275 if (status != MYRS_STATUS_SUCCESS) { 1276 sdev_printk(KERN_INFO, sdev, 1277 "Consistency Check Not %s, status 0x%02x\n", 1278 check ? "Initiated" : "Cancelled", status); 1279 ret = -EIO; 1280 } else { 1281 sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n", 1282 check ? "Initiated" : "Cancelled"); 1283 ret = count; 1284 } 1285 1286 return ret; 1287 } 1288 static DEVICE_ATTR_RW(consistency_check); 1289 1290 static struct device_attribute *myrs_sdev_attrs[] = { 1291 &dev_attr_consistency_check, 1292 &dev_attr_rebuild, 1293 &dev_attr_raid_state, 1294 &dev_attr_raid_level, 1295 NULL, 1296 }; 1297 1298 static ssize_t serial_show(struct device *dev, 1299 struct device_attribute *attr, char *buf) 1300 { 1301 struct Scsi_Host *shost = class_to_shost(dev); 1302 struct myrs_hba *cs = shost_priv(shost); 1303 char serial[17]; 1304 1305 memcpy(serial, cs->ctlr_info->serial_number, 16); 1306 serial[16] = '\0'; 1307 return snprintf(buf, 16, "%s\n", serial); 1308 } 1309 static DEVICE_ATTR_RO(serial); 1310 1311 static ssize_t ctlr_num_show(struct device *dev, 1312 struct device_attribute *attr, char *buf) 1313 { 1314 struct Scsi_Host *shost = class_to_shost(dev); 1315 struct myrs_hba *cs = shost_priv(shost); 1316 1317 return snprintf(buf, 20, "%d\n", cs->host->host_no); 1318 } 1319 static DEVICE_ATTR_RO(ctlr_num); 1320 1321 static struct myrs_cpu_type_tbl { 1322 enum myrs_cpu_type type; 1323 char *name; 1324 } myrs_cpu_type_names[] = { 1325 { MYRS_CPUTYPE_i960CA, "i960CA" }, 1326 { MYRS_CPUTYPE_i960RD, "i960RD" }, 1327 { MYRS_CPUTYPE_i960RN, "i960RN" }, 1328 { MYRS_CPUTYPE_i960RP, "i960RP" }, 1329 { MYRS_CPUTYPE_NorthBay, "NorthBay" }, 1330 { MYRS_CPUTYPE_StrongArm, "StrongARM" }, 1331 { MYRS_CPUTYPE_i960RM, "i960RM" }, 1332 }; 1333 1334 static ssize_t processor_show(struct device *dev, 1335 struct device_attribute *attr, char *buf) 1336 { 1337 struct Scsi_Host *shost = class_to_shost(dev); 1338 struct myrs_hba *cs = shost_priv(shost); 1339 struct myrs_cpu_type_tbl *tbl; 1340 const char *first_processor = NULL; 1341 const char *second_processor = NULL; 1342 struct myrs_ctlr_info *info = cs->ctlr_info; 1343 ssize_t ret; 1344 int i; 1345 1346 if (info->cpu[0].cpu_count) { 1347 tbl = myrs_cpu_type_names; 1348 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { 1349 if (tbl[i].type == info->cpu[0].cpu_type) { 1350 first_processor = tbl[i].name; 1351 break; 1352 } 1353 } 1354 } 1355 if (info->cpu[1].cpu_count) { 1356 tbl = myrs_cpu_type_names; 1357 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) { 1358 if (tbl[i].type == info->cpu[1].cpu_type) { 1359 second_processor = tbl[i].name; 1360 break; 1361 } 1362 } 1363 } 1364 if (first_processor && second_processor) 1365 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n" 1366 "2: %s (%s, %d cpus)\n", 1367 info->cpu[0].cpu_name, 1368 first_processor, info->cpu[0].cpu_count, 1369 info->cpu[1].cpu_name, 1370 second_processor, info->cpu[1].cpu_count); 1371 else if (first_processor && !second_processor) 1372 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n", 1373 info->cpu[0].cpu_name, 1374 first_processor, info->cpu[0].cpu_count); 1375 else if (!first_processor && second_processor) 1376 ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n", 1377 info->cpu[1].cpu_name, 1378 second_processor, info->cpu[1].cpu_count); 1379 else 1380 ret = snprintf(buf, 64, "1: absent\n2: absent\n"); 1381 1382 return ret; 1383 } 1384 static DEVICE_ATTR_RO(processor); 1385 1386 static ssize_t model_show(struct device *dev, 1387 struct device_attribute *attr, char *buf) 1388 { 1389 struct Scsi_Host *shost = class_to_shost(dev); 1390 struct myrs_hba *cs = shost_priv(shost); 1391 1392 return snprintf(buf, 28, "%s\n", cs->model_name); 1393 } 1394 static DEVICE_ATTR_RO(model); 1395 1396 static ssize_t ctlr_type_show(struct device *dev, 1397 struct device_attribute *attr, char *buf) 1398 { 1399 struct Scsi_Host *shost = class_to_shost(dev); 1400 struct myrs_hba *cs = shost_priv(shost); 1401 1402 return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type); 1403 } 1404 static DEVICE_ATTR_RO(ctlr_type); 1405 1406 static ssize_t cache_size_show(struct device *dev, 1407 struct device_attribute *attr, char *buf) 1408 { 1409 struct Scsi_Host *shost = class_to_shost(dev); 1410 struct myrs_hba *cs = shost_priv(shost); 1411 1412 return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb); 1413 } 1414 static DEVICE_ATTR_RO(cache_size); 1415 1416 static ssize_t firmware_show(struct device *dev, 1417 struct device_attribute *attr, char *buf) 1418 { 1419 struct Scsi_Host *shost = class_to_shost(dev); 1420 struct myrs_hba *cs = shost_priv(shost); 1421 1422 return snprintf(buf, 16, "%d.%02d-%02d\n", 1423 cs->ctlr_info->fw_major_version, 1424 cs->ctlr_info->fw_minor_version, 1425 cs->ctlr_info->fw_turn_number); 1426 } 1427 static DEVICE_ATTR_RO(firmware); 1428 1429 static ssize_t discovery_store(struct device *dev, 1430 struct device_attribute *attr, const char *buf, size_t count) 1431 { 1432 struct Scsi_Host *shost = class_to_shost(dev); 1433 struct myrs_hba *cs = shost_priv(shost); 1434 struct myrs_cmdblk *cmd_blk; 1435 union myrs_cmd_mbox *mbox; 1436 unsigned char status; 1437 1438 mutex_lock(&cs->dcmd_mutex); 1439 cmd_blk = &cs->dcmd_blk; 1440 myrs_reset_cmd(cmd_blk); 1441 mbox = &cmd_blk->mbox; 1442 mbox->common.opcode = MYRS_CMD_OP_IOCTL; 1443 mbox->common.id = MYRS_DCMD_TAG; 1444 mbox->common.control.dma_ctrl_to_host = true; 1445 mbox->common.control.no_autosense = true; 1446 mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY; 1447 myrs_exec_cmd(cs, cmd_blk); 1448 status = cmd_blk->status; 1449 mutex_unlock(&cs->dcmd_mutex); 1450 if (status != MYRS_STATUS_SUCCESS) { 1451 shost_printk(KERN_INFO, shost, 1452 "Discovery Not Initiated, status %02X\n", 1453 status); 1454 return -EINVAL; 1455 } 1456 shost_printk(KERN_INFO, shost, "Discovery Initiated\n"); 1457 cs->next_evseq = 0; 1458 cs->needs_update = true; 1459 queue_delayed_work(cs->work_q, &cs->monitor_work, 1); 1460 flush_delayed_work(&cs->monitor_work); 1461 shost_printk(KERN_INFO, shost, "Discovery Completed\n"); 1462 1463 return count; 1464 } 1465 static DEVICE_ATTR_WO(discovery); 1466 1467 static ssize_t flush_cache_store(struct device *dev, 1468 struct device_attribute *attr, const char *buf, size_t count) 1469 { 1470 struct Scsi_Host *shost = class_to_shost(dev); 1471 struct myrs_hba *cs = shost_priv(shost); 1472 unsigned char status; 1473 1474 status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, 1475 MYRS_RAID_CONTROLLER); 1476 if (status == MYRS_STATUS_SUCCESS) { 1477 shost_printk(KERN_INFO, shost, "Cache Flush Completed\n"); 1478 return count; 1479 } 1480 shost_printk(KERN_INFO, shost, 1481 "Cache Flush failed, status 0x%02x\n", status); 1482 return -EIO; 1483 } 1484 static DEVICE_ATTR_WO(flush_cache); 1485 1486 static ssize_t disable_enclosure_messages_show(struct device *dev, 1487 struct device_attribute *attr, char *buf) 1488 { 1489 struct Scsi_Host *shost = class_to_shost(dev); 1490 struct myrs_hba *cs = shost_priv(shost); 1491 1492 return snprintf(buf, 3, "%d\n", cs->disable_enc_msg); 1493 } 1494 1495 static ssize_t disable_enclosure_messages_store(struct device *dev, 1496 struct device_attribute *attr, const char *buf, size_t count) 1497 { 1498 struct scsi_device *sdev = to_scsi_device(dev); 1499 struct myrs_hba *cs = shost_priv(sdev->host); 1500 int value, ret; 1501 1502 ret = kstrtoint(buf, 0, &value); 1503 if (ret) 1504 return ret; 1505 1506 if (value > 2) 1507 return -EINVAL; 1508 1509 cs->disable_enc_msg = value; 1510 return count; 1511 } 1512 static DEVICE_ATTR_RW(disable_enclosure_messages); 1513 1514 static struct device_attribute *myrs_shost_attrs[] = { 1515 &dev_attr_serial, 1516 &dev_attr_ctlr_num, 1517 &dev_attr_processor, 1518 &dev_attr_model, 1519 &dev_attr_ctlr_type, 1520 &dev_attr_cache_size, 1521 &dev_attr_firmware, 1522 &dev_attr_discovery, 1523 &dev_attr_flush_cache, 1524 &dev_attr_disable_enclosure_messages, 1525 NULL, 1526 }; 1527 1528 /* 1529 * SCSI midlayer interface 1530 */ 1531 static int myrs_host_reset(struct scsi_cmnd *scmd) 1532 { 1533 struct Scsi_Host *shost = scmd->device->host; 1534 struct myrs_hba *cs = shost_priv(shost); 1535 1536 cs->reset(cs->io_base); 1537 return SUCCESS; 1538 } 1539 1540 static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd, 1541 struct myrs_ldev_info *ldev_info) 1542 { 1543 unsigned char modes[32], *mode_pg; 1544 bool dbd; 1545 size_t mode_len; 1546 1547 dbd = (scmd->cmnd[1] & 0x08) == 0x08; 1548 if (dbd) { 1549 mode_len = 24; 1550 mode_pg = &modes[4]; 1551 } else { 1552 mode_len = 32; 1553 mode_pg = &modes[12]; 1554 } 1555 memset(modes, 0, sizeof(modes)); 1556 modes[0] = mode_len - 1; 1557 modes[2] = 0x10; /* Enable FUA */ 1558 if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO) 1559 modes[2] |= 0x80; 1560 if (!dbd) { 1561 unsigned char *block_desc = &modes[4]; 1562 1563 modes[3] = 8; 1564 put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]); 1565 put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]); 1566 } 1567 mode_pg[0] = 0x08; 1568 mode_pg[1] = 0x12; 1569 if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED) 1570 mode_pg[2] |= 0x01; 1571 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || 1572 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) 1573 mode_pg[2] |= 0x04; 1574 if (ldev_info->cacheline_size) { 1575 mode_pg[2] |= 0x08; 1576 put_unaligned_be16(1 << ldev_info->cacheline_size, 1577 &mode_pg[14]); 1578 } 1579 1580 scsi_sg_copy_from_buffer(scmd, modes, mode_len); 1581 } 1582 1583 static int myrs_queuecommand(struct Scsi_Host *shost, 1584 struct scsi_cmnd *scmd) 1585 { 1586 struct myrs_hba *cs = shost_priv(shost); 1587 struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd); 1588 union myrs_cmd_mbox *mbox = &cmd_blk->mbox; 1589 struct scsi_device *sdev = scmd->device; 1590 union myrs_sgl *hw_sge; 1591 dma_addr_t sense_addr; 1592 struct scatterlist *sgl; 1593 unsigned long flags, timeout; 1594 int nsge; 1595 1596 if (!scmd->device->hostdata) { 1597 scmd->result = (DID_NO_CONNECT << 16); 1598 scmd->scsi_done(scmd); 1599 return 0; 1600 } 1601 1602 switch (scmd->cmnd[0]) { 1603 case REPORT_LUNS: 1604 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 1605 0x20, 0x0); 1606 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 1607 scmd->scsi_done(scmd); 1608 return 0; 1609 case MODE_SENSE: 1610 if (scmd->device->channel >= cs->ctlr_info->physchan_present) { 1611 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1612 1613 if ((scmd->cmnd[2] & 0x3F) != 0x3F && 1614 (scmd->cmnd[2] & 0x3F) != 0x08) { 1615 /* Illegal request, invalid field in CDB */ 1616 scsi_build_sense_buffer(0, scmd->sense_buffer, 1617 ILLEGAL_REQUEST, 0x24, 0); 1618 scmd->result = (DRIVER_SENSE << 24) | 1619 SAM_STAT_CHECK_CONDITION; 1620 } else { 1621 myrs_mode_sense(cs, scmd, ldev_info); 1622 scmd->result = (DID_OK << 16); 1623 } 1624 scmd->scsi_done(scmd); 1625 return 0; 1626 } 1627 break; 1628 } 1629 1630 myrs_reset_cmd(cmd_blk); 1631 cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC, 1632 &sense_addr); 1633 if (!cmd_blk->sense) 1634 return SCSI_MLQUEUE_HOST_BUSY; 1635 cmd_blk->sense_addr = sense_addr; 1636 1637 timeout = scmd->request->timeout; 1638 if (scmd->cmd_len <= 10) { 1639 if (scmd->device->channel >= cs->ctlr_info->physchan_present) { 1640 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1641 1642 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10; 1643 mbox->SCSI_10.pdev.lun = ldev_info->lun; 1644 mbox->SCSI_10.pdev.target = ldev_info->target; 1645 mbox->SCSI_10.pdev.channel = ldev_info->channel; 1646 mbox->SCSI_10.pdev.ctlr = 0; 1647 } else { 1648 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU; 1649 mbox->SCSI_10.pdev.lun = sdev->lun; 1650 mbox->SCSI_10.pdev.target = sdev->id; 1651 mbox->SCSI_10.pdev.channel = sdev->channel; 1652 } 1653 mbox->SCSI_10.id = scmd->request->tag + 3; 1654 mbox->SCSI_10.control.dma_ctrl_to_host = 1655 (scmd->sc_data_direction == DMA_FROM_DEVICE); 1656 if (scmd->request->cmd_flags & REQ_FUA) 1657 mbox->SCSI_10.control.fua = true; 1658 mbox->SCSI_10.dma_size = scsi_bufflen(scmd); 1659 mbox->SCSI_10.sense_addr = cmd_blk->sense_addr; 1660 mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE; 1661 mbox->SCSI_10.cdb_len = scmd->cmd_len; 1662 if (timeout > 60) { 1663 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; 1664 mbox->SCSI_10.tmo.tmo_val = timeout / 60; 1665 } else { 1666 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; 1667 mbox->SCSI_10.tmo.tmo_val = timeout; 1668 } 1669 memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len); 1670 hw_sge = &mbox->SCSI_10.dma_addr; 1671 cmd_blk->dcdb = NULL; 1672 } else { 1673 dma_addr_t dcdb_dma; 1674 1675 cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC, 1676 &dcdb_dma); 1677 if (!cmd_blk->dcdb) { 1678 dma_pool_free(cs->sense_pool, cmd_blk->sense, 1679 cmd_blk->sense_addr); 1680 cmd_blk->sense = NULL; 1681 cmd_blk->sense_addr = 0; 1682 return SCSI_MLQUEUE_HOST_BUSY; 1683 } 1684 cmd_blk->dcdb_dma = dcdb_dma; 1685 if (scmd->device->channel >= cs->ctlr_info->physchan_present) { 1686 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1687 1688 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256; 1689 mbox->SCSI_255.pdev.lun = ldev_info->lun; 1690 mbox->SCSI_255.pdev.target = ldev_info->target; 1691 mbox->SCSI_255.pdev.channel = ldev_info->channel; 1692 mbox->SCSI_255.pdev.ctlr = 0; 1693 } else { 1694 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU; 1695 mbox->SCSI_255.pdev.lun = sdev->lun; 1696 mbox->SCSI_255.pdev.target = sdev->id; 1697 mbox->SCSI_255.pdev.channel = sdev->channel; 1698 } 1699 mbox->SCSI_255.id = scmd->request->tag + 3; 1700 mbox->SCSI_255.control.dma_ctrl_to_host = 1701 (scmd->sc_data_direction == DMA_FROM_DEVICE); 1702 if (scmd->request->cmd_flags & REQ_FUA) 1703 mbox->SCSI_255.control.fua = true; 1704 mbox->SCSI_255.dma_size = scsi_bufflen(scmd); 1705 mbox->SCSI_255.sense_addr = cmd_blk->sense_addr; 1706 mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE; 1707 mbox->SCSI_255.cdb_len = scmd->cmd_len; 1708 mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma; 1709 if (timeout > 60) { 1710 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES; 1711 mbox->SCSI_255.tmo.tmo_val = timeout / 60; 1712 } else { 1713 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS; 1714 mbox->SCSI_255.tmo.tmo_val = timeout; 1715 } 1716 memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len); 1717 hw_sge = &mbox->SCSI_255.dma_addr; 1718 } 1719 if (scmd->sc_data_direction == DMA_NONE) 1720 goto submit; 1721 nsge = scsi_dma_map(scmd); 1722 if (nsge == 1) { 1723 sgl = scsi_sglist(scmd); 1724 hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl); 1725 hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl); 1726 } else { 1727 struct myrs_sge *hw_sgl; 1728 dma_addr_t hw_sgl_addr; 1729 int i; 1730 1731 if (nsge > 2) { 1732 hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC, 1733 &hw_sgl_addr); 1734 if (WARN_ON(!hw_sgl)) { 1735 if (cmd_blk->dcdb) { 1736 dma_pool_free(cs->dcdb_pool, 1737 cmd_blk->dcdb, 1738 cmd_blk->dcdb_dma); 1739 cmd_blk->dcdb = NULL; 1740 cmd_blk->dcdb_dma = 0; 1741 } 1742 dma_pool_free(cs->sense_pool, 1743 cmd_blk->sense, 1744 cmd_blk->sense_addr); 1745 cmd_blk->sense = NULL; 1746 cmd_blk->sense_addr = 0; 1747 return SCSI_MLQUEUE_HOST_BUSY; 1748 } 1749 cmd_blk->sgl = hw_sgl; 1750 cmd_blk->sgl_addr = hw_sgl_addr; 1751 if (scmd->cmd_len <= 10) 1752 mbox->SCSI_10.control.add_sge_mem = true; 1753 else 1754 mbox->SCSI_255.control.add_sge_mem = true; 1755 hw_sge->ext.sge0_len = nsge; 1756 hw_sge->ext.sge0_addr = cmd_blk->sgl_addr; 1757 } else 1758 hw_sgl = hw_sge->sge; 1759 1760 scsi_for_each_sg(scmd, sgl, nsge, i) { 1761 if (WARN_ON(!hw_sgl)) { 1762 scsi_dma_unmap(scmd); 1763 scmd->result = (DID_ERROR << 16); 1764 scmd->scsi_done(scmd); 1765 return 0; 1766 } 1767 hw_sgl->sge_addr = (u64)sg_dma_address(sgl); 1768 hw_sgl->sge_count = (u64)sg_dma_len(sgl); 1769 hw_sgl++; 1770 } 1771 } 1772 submit: 1773 spin_lock_irqsave(&cs->queue_lock, flags); 1774 myrs_qcmd(cs, cmd_blk); 1775 spin_unlock_irqrestore(&cs->queue_lock, flags); 1776 1777 return 0; 1778 } 1779 1780 static unsigned short myrs_translate_ldev(struct myrs_hba *cs, 1781 struct scsi_device *sdev) 1782 { 1783 unsigned short ldev_num; 1784 unsigned int chan_offset = 1785 sdev->channel - cs->ctlr_info->physchan_present; 1786 1787 ldev_num = sdev->id + chan_offset * sdev->host->max_id; 1788 1789 return ldev_num; 1790 } 1791 1792 static int myrs_slave_alloc(struct scsi_device *sdev) 1793 { 1794 struct myrs_hba *cs = shost_priv(sdev->host); 1795 unsigned char status; 1796 1797 if (sdev->channel > sdev->host->max_channel) 1798 return 0; 1799 1800 if (sdev->channel >= cs->ctlr_info->physchan_present) { 1801 struct myrs_ldev_info *ldev_info; 1802 unsigned short ldev_num; 1803 1804 if (sdev->lun > 0) 1805 return -ENXIO; 1806 1807 ldev_num = myrs_translate_ldev(cs, sdev); 1808 1809 ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA); 1810 if (!ldev_info) 1811 return -ENOMEM; 1812 1813 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1814 if (status != MYRS_STATUS_SUCCESS) { 1815 sdev->hostdata = NULL; 1816 kfree(ldev_info); 1817 } else { 1818 enum raid_level level; 1819 1820 dev_dbg(&sdev->sdev_gendev, 1821 "Logical device mapping %d:%d:%d -> %d\n", 1822 ldev_info->channel, ldev_info->target, 1823 ldev_info->lun, ldev_info->ldev_num); 1824 1825 sdev->hostdata = ldev_info; 1826 switch (ldev_info->raid_level) { 1827 case MYRS_RAID_LEVEL0: 1828 level = RAID_LEVEL_LINEAR; 1829 break; 1830 case MYRS_RAID_LEVEL1: 1831 level = RAID_LEVEL_1; 1832 break; 1833 case MYRS_RAID_LEVEL3: 1834 case MYRS_RAID_LEVEL3F: 1835 case MYRS_RAID_LEVEL3L: 1836 level = RAID_LEVEL_3; 1837 break; 1838 case MYRS_RAID_LEVEL5: 1839 case MYRS_RAID_LEVEL5L: 1840 level = RAID_LEVEL_5; 1841 break; 1842 case MYRS_RAID_LEVEL6: 1843 level = RAID_LEVEL_6; 1844 break; 1845 case MYRS_RAID_LEVELE: 1846 case MYRS_RAID_NEWSPAN: 1847 case MYRS_RAID_SPAN: 1848 level = RAID_LEVEL_LINEAR; 1849 break; 1850 case MYRS_RAID_JBOD: 1851 level = RAID_LEVEL_JBOD; 1852 break; 1853 default: 1854 level = RAID_LEVEL_UNKNOWN; 1855 break; 1856 } 1857 raid_set_level(myrs_raid_template, 1858 &sdev->sdev_gendev, level); 1859 if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) { 1860 const char *name; 1861 1862 name = myrs_devstate_name(ldev_info->dev_state); 1863 sdev_printk(KERN_DEBUG, sdev, 1864 "logical device in state %s\n", 1865 name ? name : "Invalid"); 1866 } 1867 } 1868 } else { 1869 struct myrs_pdev_info *pdev_info; 1870 1871 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA); 1872 if (!pdev_info) 1873 return -ENOMEM; 1874 1875 status = myrs_get_pdev_info(cs, sdev->channel, 1876 sdev->id, sdev->lun, 1877 pdev_info); 1878 if (status != MYRS_STATUS_SUCCESS) { 1879 sdev->hostdata = NULL; 1880 kfree(pdev_info); 1881 return -ENXIO; 1882 } 1883 sdev->hostdata = pdev_info; 1884 } 1885 return 0; 1886 } 1887 1888 static int myrs_slave_configure(struct scsi_device *sdev) 1889 { 1890 struct myrs_hba *cs = shost_priv(sdev->host); 1891 struct myrs_ldev_info *ldev_info; 1892 1893 if (sdev->channel > sdev->host->max_channel) 1894 return -ENXIO; 1895 1896 if (sdev->channel < cs->ctlr_info->physchan_present) { 1897 /* Skip HBA device */ 1898 if (sdev->type == TYPE_RAID) 1899 return -ENXIO; 1900 sdev->no_uld_attach = 1; 1901 return 0; 1902 } 1903 if (sdev->lun != 0) 1904 return -ENXIO; 1905 1906 ldev_info = sdev->hostdata; 1907 if (!ldev_info) 1908 return -ENXIO; 1909 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED || 1910 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED) 1911 sdev->wce_default_on = 1; 1912 sdev->tagged_supported = 1; 1913 return 0; 1914 } 1915 1916 static void myrs_slave_destroy(struct scsi_device *sdev) 1917 { 1918 kfree(sdev->hostdata); 1919 } 1920 1921 static struct scsi_host_template myrs_template = { 1922 .module = THIS_MODULE, 1923 .name = "DAC960", 1924 .proc_name = "myrs", 1925 .queuecommand = myrs_queuecommand, 1926 .eh_host_reset_handler = myrs_host_reset, 1927 .slave_alloc = myrs_slave_alloc, 1928 .slave_configure = myrs_slave_configure, 1929 .slave_destroy = myrs_slave_destroy, 1930 .cmd_size = sizeof(struct myrs_cmdblk), 1931 .shost_attrs = myrs_shost_attrs, 1932 .sdev_attrs = myrs_sdev_attrs, 1933 .this_id = -1, 1934 }; 1935 1936 static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev, 1937 const struct pci_device_id *entry) 1938 { 1939 struct Scsi_Host *shost; 1940 struct myrs_hba *cs; 1941 1942 shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba)); 1943 if (!shost) 1944 return NULL; 1945 1946 shost->max_cmd_len = 16; 1947 shost->max_lun = 256; 1948 cs = shost_priv(shost); 1949 mutex_init(&cs->dcmd_mutex); 1950 mutex_init(&cs->cinfo_mutex); 1951 cs->host = shost; 1952 1953 return cs; 1954 } 1955 1956 /* 1957 * RAID template functions 1958 */ 1959 1960 /** 1961 * myrs_is_raid - return boolean indicating device is raid volume 1962 * @dev the device struct object 1963 */ 1964 static int 1965 myrs_is_raid(struct device *dev) 1966 { 1967 struct scsi_device *sdev = to_scsi_device(dev); 1968 struct myrs_hba *cs = shost_priv(sdev->host); 1969 1970 return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0; 1971 } 1972 1973 /** 1974 * myrs_get_resync - get raid volume resync percent complete 1975 * @dev the device struct object 1976 */ 1977 static void 1978 myrs_get_resync(struct device *dev) 1979 { 1980 struct scsi_device *sdev = to_scsi_device(dev); 1981 struct myrs_hba *cs = shost_priv(sdev->host); 1982 struct myrs_ldev_info *ldev_info = sdev->hostdata; 1983 u64 percent_complete = 0; 1984 u8 status; 1985 1986 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) 1987 return; 1988 if (ldev_info->rbld_active) { 1989 unsigned short ldev_num = ldev_info->ldev_num; 1990 1991 status = myrs_get_ldev_info(cs, ldev_num, ldev_info); 1992 percent_complete = ldev_info->rbld_lba * 100; 1993 do_div(percent_complete, ldev_info->cfg_devsize); 1994 } 1995 raid_set_resync(myrs_raid_template, dev, percent_complete); 1996 } 1997 1998 /** 1999 * myrs_get_state - get raid volume status 2000 * @dev the device struct object 2001 */ 2002 static void 2003 myrs_get_state(struct device *dev) 2004 { 2005 struct scsi_device *sdev = to_scsi_device(dev); 2006 struct myrs_hba *cs = shost_priv(sdev->host); 2007 struct myrs_ldev_info *ldev_info = sdev->hostdata; 2008 enum raid_state state = RAID_STATE_UNKNOWN; 2009 2010 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info) 2011 state = RAID_STATE_UNKNOWN; 2012 else { 2013 switch (ldev_info->dev_state) { 2014 case MYRS_DEVICE_ONLINE: 2015 state = RAID_STATE_ACTIVE; 2016 break; 2017 case MYRS_DEVICE_SUSPECTED_CRITICAL: 2018 case MYRS_DEVICE_CRITICAL: 2019 state = RAID_STATE_DEGRADED; 2020 break; 2021 case MYRS_DEVICE_REBUILD: 2022 state = RAID_STATE_RESYNCING; 2023 break; 2024 case MYRS_DEVICE_UNCONFIGURED: 2025 case MYRS_DEVICE_INVALID_STATE: 2026 state = RAID_STATE_UNKNOWN; 2027 break; 2028 default: 2029 state = RAID_STATE_OFFLINE; 2030 } 2031 } 2032 raid_set_state(myrs_raid_template, dev, state); 2033 } 2034 2035 static struct raid_function_template myrs_raid_functions = { 2036 .cookie = &myrs_template, 2037 .is_raid = myrs_is_raid, 2038 .get_resync = myrs_get_resync, 2039 .get_state = myrs_get_state, 2040 }; 2041 2042 /* 2043 * PCI interface functions 2044 */ 2045 static void myrs_flush_cache(struct myrs_hba *cs) 2046 { 2047 myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER); 2048 } 2049 2050 static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk, 2051 struct scsi_cmnd *scmd) 2052 { 2053 unsigned char status; 2054 2055 if (!cmd_blk) 2056 return; 2057 2058 scsi_dma_unmap(scmd); 2059 status = cmd_blk->status; 2060 if (cmd_blk->sense) { 2061 if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) { 2062 unsigned int sense_len = SCSI_SENSE_BUFFERSIZE; 2063 2064 if (sense_len > cmd_blk->sense_len) 2065 sense_len = cmd_blk->sense_len; 2066 memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len); 2067 } 2068 dma_pool_free(cs->sense_pool, cmd_blk->sense, 2069 cmd_blk->sense_addr); 2070 cmd_blk->sense = NULL; 2071 cmd_blk->sense_addr = 0; 2072 } 2073 if (cmd_blk->dcdb) { 2074 dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb, 2075 cmd_blk->dcdb_dma); 2076 cmd_blk->dcdb = NULL; 2077 cmd_blk->dcdb_dma = 0; 2078 } 2079 if (cmd_blk->sgl) { 2080 dma_pool_free(cs->sg_pool, cmd_blk->sgl, 2081 cmd_blk->sgl_addr); 2082 cmd_blk->sgl = NULL; 2083 cmd_blk->sgl_addr = 0; 2084 } 2085 if (cmd_blk->residual) 2086 scsi_set_resid(scmd, cmd_blk->residual); 2087 if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE || 2088 status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2) 2089 scmd->result = (DID_BAD_TARGET << 16); 2090 else 2091 scmd->result = (DID_OK << 16) | status; 2092 scmd->scsi_done(scmd); 2093 } 2094 2095 static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk) 2096 { 2097 if (!cmd_blk) 2098 return; 2099 2100 if (cmd_blk->complete) { 2101 complete(cmd_blk->complete); 2102 cmd_blk->complete = NULL; 2103 } 2104 } 2105 2106 static void myrs_monitor(struct work_struct *work) 2107 { 2108 struct myrs_hba *cs = container_of(work, struct myrs_hba, 2109 monitor_work.work); 2110 struct Scsi_Host *shost = cs->host; 2111 struct myrs_ctlr_info *info = cs->ctlr_info; 2112 unsigned int epoch = cs->fwstat_buf->epoch; 2113 unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL; 2114 unsigned char status; 2115 2116 dev_dbg(&shost->shost_gendev, "monitor tick\n"); 2117 2118 status = myrs_get_fwstatus(cs); 2119 2120 if (cs->needs_update) { 2121 cs->needs_update = false; 2122 mutex_lock(&cs->cinfo_mutex); 2123 status = myrs_get_ctlr_info(cs); 2124 mutex_unlock(&cs->cinfo_mutex); 2125 } 2126 if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) { 2127 status = myrs_get_event(cs, cs->next_evseq, 2128 cs->event_buf); 2129 if (status == MYRS_STATUS_SUCCESS) { 2130 myrs_log_event(cs, cs->event_buf); 2131 cs->next_evseq++; 2132 interval = 1; 2133 } 2134 } 2135 2136 if (time_after(jiffies, cs->secondary_monitor_time 2137 + MYRS_SECONDARY_MONITOR_INTERVAL)) 2138 cs->secondary_monitor_time = jiffies; 2139 2140 if (info->bg_init_active + 2141 info->ldev_init_active + 2142 info->pdev_init_active + 2143 info->cc_active + 2144 info->rbld_active + 2145 info->exp_active != 0) { 2146 struct scsi_device *sdev; 2147 2148 shost_for_each_device(sdev, shost) { 2149 struct myrs_ldev_info *ldev_info; 2150 int ldev_num; 2151 2152 if (sdev->channel < info->physchan_present) 2153 continue; 2154 ldev_info = sdev->hostdata; 2155 if (!ldev_info) 2156 continue; 2157 ldev_num = ldev_info->ldev_num; 2158 myrs_get_ldev_info(cs, ldev_num, ldev_info); 2159 } 2160 cs->needs_update = true; 2161 } 2162 if (epoch == cs->epoch && 2163 cs->fwstat_buf->next_evseq == cs->next_evseq && 2164 (cs->needs_update == false || 2165 time_before(jiffies, cs->primary_monitor_time 2166 + MYRS_PRIMARY_MONITOR_INTERVAL))) { 2167 interval = MYRS_SECONDARY_MONITOR_INTERVAL; 2168 } 2169 2170 if (interval > 1) 2171 cs->primary_monitor_time = jiffies; 2172 queue_delayed_work(cs->work_q, &cs->monitor_work, interval); 2173 } 2174 2175 static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs) 2176 { 2177 struct Scsi_Host *shost = cs->host; 2178 size_t elem_size, elem_align; 2179 2180 elem_align = sizeof(struct myrs_sge); 2181 elem_size = shost->sg_tablesize * elem_align; 2182 cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev, 2183 elem_size, elem_align, 0); 2184 if (cs->sg_pool == NULL) { 2185 shost_printk(KERN_ERR, shost, 2186 "Failed to allocate SG pool\n"); 2187 return false; 2188 } 2189 2190 cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev, 2191 MYRS_SENSE_SIZE, sizeof(int), 0); 2192 if (cs->sense_pool == NULL) { 2193 dma_pool_destroy(cs->sg_pool); 2194 cs->sg_pool = NULL; 2195 shost_printk(KERN_ERR, shost, 2196 "Failed to allocate sense data pool\n"); 2197 return false; 2198 } 2199 2200 cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev, 2201 MYRS_DCDB_SIZE, 2202 sizeof(unsigned char), 0); 2203 if (!cs->dcdb_pool) { 2204 dma_pool_destroy(cs->sg_pool); 2205 cs->sg_pool = NULL; 2206 dma_pool_destroy(cs->sense_pool); 2207 cs->sense_pool = NULL; 2208 shost_printk(KERN_ERR, shost, 2209 "Failed to allocate DCDB pool\n"); 2210 return false; 2211 } 2212 2213 snprintf(cs->work_q_name, sizeof(cs->work_q_name), 2214 "myrs_wq_%d", shost->host_no); 2215 cs->work_q = create_singlethread_workqueue(cs->work_q_name); 2216 if (!cs->work_q) { 2217 dma_pool_destroy(cs->dcdb_pool); 2218 cs->dcdb_pool = NULL; 2219 dma_pool_destroy(cs->sg_pool); 2220 cs->sg_pool = NULL; 2221 dma_pool_destroy(cs->sense_pool); 2222 cs->sense_pool = NULL; 2223 shost_printk(KERN_ERR, shost, 2224 "Failed to create workqueue\n"); 2225 return false; 2226 } 2227 2228 /* Initialize the Monitoring Timer. */ 2229 INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor); 2230 queue_delayed_work(cs->work_q, &cs->monitor_work, 1); 2231 2232 return true; 2233 } 2234 2235 static void myrs_destroy_mempools(struct myrs_hba *cs) 2236 { 2237 cancel_delayed_work_sync(&cs->monitor_work); 2238 destroy_workqueue(cs->work_q); 2239 2240 dma_pool_destroy(cs->sg_pool); 2241 dma_pool_destroy(cs->dcdb_pool); 2242 dma_pool_destroy(cs->sense_pool); 2243 } 2244 2245 static void myrs_unmap(struct myrs_hba *cs) 2246 { 2247 kfree(cs->event_buf); 2248 kfree(cs->ctlr_info); 2249 if (cs->fwstat_buf) { 2250 dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat), 2251 cs->fwstat_buf, cs->fwstat_addr); 2252 cs->fwstat_buf = NULL; 2253 } 2254 if (cs->first_stat_mbox) { 2255 dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size, 2256 cs->first_stat_mbox, cs->stat_mbox_addr); 2257 cs->first_stat_mbox = NULL; 2258 } 2259 if (cs->first_cmd_mbox) { 2260 dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size, 2261 cs->first_cmd_mbox, cs->cmd_mbox_addr); 2262 cs->first_cmd_mbox = NULL; 2263 } 2264 } 2265 2266 static void myrs_cleanup(struct myrs_hba *cs) 2267 { 2268 struct pci_dev *pdev = cs->pdev; 2269 2270 /* Free the memory mailbox, status, and related structures */ 2271 myrs_unmap(cs); 2272 2273 if (cs->mmio_base) { 2274 cs->disable_intr(cs); 2275 iounmap(cs->mmio_base); 2276 } 2277 if (cs->irq) 2278 free_irq(cs->irq, cs); 2279 if (cs->io_addr) 2280 release_region(cs->io_addr, 0x80); 2281 iounmap(cs->mmio_base); 2282 pci_set_drvdata(pdev, NULL); 2283 pci_disable_device(pdev); 2284 scsi_host_put(cs->host); 2285 } 2286 2287 static struct myrs_hba *myrs_detect(struct pci_dev *pdev, 2288 const struct pci_device_id *entry) 2289 { 2290 struct myrs_privdata *privdata = 2291 (struct myrs_privdata *)entry->driver_data; 2292 irq_handler_t irq_handler = privdata->irq_handler; 2293 unsigned int mmio_size = privdata->mmio_size; 2294 struct myrs_hba *cs = NULL; 2295 2296 cs = myrs_alloc_host(pdev, entry); 2297 if (!cs) { 2298 dev_err(&pdev->dev, "Unable to allocate Controller\n"); 2299 return NULL; 2300 } 2301 cs->pdev = pdev; 2302 2303 if (pci_enable_device(pdev)) 2304 goto Failure; 2305 2306 cs->pci_addr = pci_resource_start(pdev, 0); 2307 2308 pci_set_drvdata(pdev, cs); 2309 spin_lock_init(&cs->queue_lock); 2310 /* Map the Controller Register Window. */ 2311 if (mmio_size < PAGE_SIZE) 2312 mmio_size = PAGE_SIZE; 2313 cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); 2314 if (cs->mmio_base == NULL) { 2315 dev_err(&pdev->dev, 2316 "Unable to map Controller Register Window\n"); 2317 goto Failure; 2318 } 2319 2320 cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK); 2321 if (privdata->hw_init(pdev, cs, cs->io_base)) 2322 goto Failure; 2323 2324 /* Acquire shared access to the IRQ Channel. */ 2325 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) { 2326 dev_err(&pdev->dev, 2327 "Unable to acquire IRQ Channel %d\n", pdev->irq); 2328 goto Failure; 2329 } 2330 cs->irq = pdev->irq; 2331 return cs; 2332 2333 Failure: 2334 dev_err(&pdev->dev, 2335 "Failed to initialize Controller\n"); 2336 myrs_cleanup(cs); 2337 return NULL; 2338 } 2339 2340 /* 2341 * myrs_err_status reports Controller BIOS Messages passed through 2342 * the Error Status Register when the driver performs the BIOS handshaking. 2343 * It returns true for fatal errors and false otherwise. 2344 */ 2345 2346 static bool myrs_err_status(struct myrs_hba *cs, unsigned char status, 2347 unsigned char parm0, unsigned char parm1) 2348 { 2349 struct pci_dev *pdev = cs->pdev; 2350 2351 switch (status) { 2352 case 0x00: 2353 dev_info(&pdev->dev, 2354 "Physical Device %d:%d Not Responding\n", 2355 parm1, parm0); 2356 break; 2357 case 0x08: 2358 dev_notice(&pdev->dev, "Spinning Up Drives\n"); 2359 break; 2360 case 0x30: 2361 dev_notice(&pdev->dev, "Configuration Checksum Error\n"); 2362 break; 2363 case 0x60: 2364 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n"); 2365 break; 2366 case 0x70: 2367 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n"); 2368 break; 2369 case 0x90: 2370 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n", 2371 parm1, parm0); 2372 break; 2373 case 0xA0: 2374 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n"); 2375 break; 2376 case 0xB0: 2377 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n"); 2378 break; 2379 case 0xD0: 2380 dev_notice(&pdev->dev, "New Controller Configuration Found\n"); 2381 break; 2382 case 0xF0: 2383 dev_err(&pdev->dev, "Fatal Memory Parity Error\n"); 2384 return true; 2385 default: 2386 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n", 2387 status); 2388 return true; 2389 } 2390 return false; 2391 } 2392 2393 /* 2394 * Hardware-specific functions 2395 */ 2396 2397 /* 2398 * DAC960 GEM Series Controllers. 2399 */ 2400 2401 static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base) 2402 { 2403 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); 2404 2405 writel(val, base + DAC960_GEM_IDB_READ_OFFSET); 2406 } 2407 2408 static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base) 2409 { 2410 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24); 2411 2412 writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET); 2413 } 2414 2415 static inline void DAC960_GEM_gen_intr(void __iomem *base) 2416 { 2417 __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24); 2418 2419 writel(val, base + DAC960_GEM_IDB_READ_OFFSET); 2420 } 2421 2422 static inline void DAC960_GEM_reset_ctrl(void __iomem *base) 2423 { 2424 __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24); 2425 2426 writel(val, base + DAC960_GEM_IDB_READ_OFFSET); 2427 } 2428 2429 static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base) 2430 { 2431 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24); 2432 2433 writel(val, base + DAC960_GEM_IDB_READ_OFFSET); 2434 } 2435 2436 static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base) 2437 { 2438 __le32 val; 2439 2440 val = readl(base + DAC960_GEM_IDB_READ_OFFSET); 2441 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL; 2442 } 2443 2444 static inline bool DAC960_GEM_init_in_progress(void __iomem *base) 2445 { 2446 __le32 val; 2447 2448 val = readl(base + DAC960_GEM_IDB_READ_OFFSET); 2449 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS; 2450 } 2451 2452 static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base) 2453 { 2454 __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24); 2455 2456 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); 2457 } 2458 2459 static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base) 2460 { 2461 __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24); 2462 2463 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); 2464 } 2465 2466 static inline void DAC960_GEM_ack_intr(void __iomem *base) 2467 { 2468 __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ | 2469 DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24); 2470 2471 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET); 2472 } 2473 2474 static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base) 2475 { 2476 __le32 val; 2477 2478 val = readl(base + DAC960_GEM_ODB_READ_OFFSET); 2479 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL; 2480 } 2481 2482 static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base) 2483 { 2484 __le32 val; 2485 2486 val = readl(base + DAC960_GEM_ODB_READ_OFFSET); 2487 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL; 2488 } 2489 2490 static inline void DAC960_GEM_enable_intr(void __iomem *base) 2491 { 2492 __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ | 2493 DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24); 2494 writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET); 2495 } 2496 2497 static inline void DAC960_GEM_disable_intr(void __iomem *base) 2498 { 2499 __le32 val = 0; 2500 2501 writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET); 2502 } 2503 2504 static inline bool DAC960_GEM_intr_enabled(void __iomem *base) 2505 { 2506 __le32 val; 2507 2508 val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET); 2509 return !((le32_to_cpu(val) >> 24) & 2510 (DAC960_GEM_IRQMASK_HWMBOX_IRQ | 2511 DAC960_GEM_IRQMASK_MMBOX_IRQ)); 2512 } 2513 2514 static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, 2515 union myrs_cmd_mbox *mbox) 2516 { 2517 memcpy(&mem_mbox->words[1], &mbox->words[1], 2518 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); 2519 /* Barrier to avoid reordering */ 2520 wmb(); 2521 mem_mbox->words[0] = mbox->words[0]; 2522 /* Barrier to force PCI access */ 2523 mb(); 2524 } 2525 2526 static inline void DAC960_GEM_write_hw_mbox(void __iomem *base, 2527 dma_addr_t cmd_mbox_addr) 2528 { 2529 dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET); 2530 } 2531 2532 static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base) 2533 { 2534 return readw(base + DAC960_GEM_CMDSTS_OFFSET); 2535 } 2536 2537 static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base) 2538 { 2539 return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2); 2540 } 2541 2542 static inline bool 2543 DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error, 2544 unsigned char *param0, unsigned char *param1) 2545 { 2546 __le32 val; 2547 2548 val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET); 2549 if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING)) 2550 return false; 2551 *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24); 2552 *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0); 2553 *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1); 2554 writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET); 2555 return true; 2556 } 2557 2558 static inline unsigned char 2559 DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr) 2560 { 2561 unsigned char status; 2562 2563 while (DAC960_GEM_hw_mbox_is_full(base)) 2564 udelay(1); 2565 DAC960_GEM_write_hw_mbox(base, mbox_addr); 2566 DAC960_GEM_hw_mbox_new_cmd(base); 2567 while (!DAC960_GEM_hw_mbox_status_available(base)) 2568 udelay(1); 2569 status = DAC960_GEM_read_cmd_status(base); 2570 DAC960_GEM_ack_hw_mbox_intr(base); 2571 DAC960_GEM_ack_hw_mbox_status(base); 2572 2573 return status; 2574 } 2575 2576 static int DAC960_GEM_hw_init(struct pci_dev *pdev, 2577 struct myrs_hba *cs, void __iomem *base) 2578 { 2579 int timeout = 0; 2580 unsigned char status, parm0, parm1; 2581 2582 DAC960_GEM_disable_intr(base); 2583 DAC960_GEM_ack_hw_mbox_status(base); 2584 udelay(1000); 2585 while (DAC960_GEM_init_in_progress(base) && 2586 timeout < MYRS_MAILBOX_TIMEOUT) { 2587 if (DAC960_GEM_read_error_status(base, &status, 2588 &parm0, &parm1) && 2589 myrs_err_status(cs, status, parm0, parm1)) 2590 return -EIO; 2591 udelay(10); 2592 timeout++; 2593 } 2594 if (timeout == MYRS_MAILBOX_TIMEOUT) { 2595 dev_err(&pdev->dev, 2596 "Timeout waiting for Controller Initialisation\n"); 2597 return -ETIMEDOUT; 2598 } 2599 if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) { 2600 dev_err(&pdev->dev, 2601 "Unable to Enable Memory Mailbox Interface\n"); 2602 DAC960_GEM_reset_ctrl(base); 2603 return -EAGAIN; 2604 } 2605 DAC960_GEM_enable_intr(base); 2606 cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox; 2607 cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd; 2608 cs->disable_intr = DAC960_GEM_disable_intr; 2609 cs->reset = DAC960_GEM_reset_ctrl; 2610 return 0; 2611 } 2612 2613 static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg) 2614 { 2615 struct myrs_hba *cs = arg; 2616 void __iomem *base = cs->io_base; 2617 struct myrs_stat_mbox *next_stat_mbox; 2618 unsigned long flags; 2619 2620 spin_lock_irqsave(&cs->queue_lock, flags); 2621 DAC960_GEM_ack_intr(base); 2622 next_stat_mbox = cs->next_stat_mbox; 2623 while (next_stat_mbox->id > 0) { 2624 unsigned short id = next_stat_mbox->id; 2625 struct scsi_cmnd *scmd = NULL; 2626 struct myrs_cmdblk *cmd_blk = NULL; 2627 2628 if (id == MYRS_DCMD_TAG) 2629 cmd_blk = &cs->dcmd_blk; 2630 else if (id == MYRS_MCMD_TAG) 2631 cmd_blk = &cs->mcmd_blk; 2632 else { 2633 scmd = scsi_host_find_tag(cs->host, id - 3); 2634 if (scmd) 2635 cmd_blk = scsi_cmd_priv(scmd); 2636 } 2637 if (cmd_blk) { 2638 cmd_blk->status = next_stat_mbox->status; 2639 cmd_blk->sense_len = next_stat_mbox->sense_len; 2640 cmd_blk->residual = next_stat_mbox->residual; 2641 } else 2642 dev_err(&cs->pdev->dev, 2643 "Unhandled command completion %d\n", id); 2644 2645 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); 2646 if (++next_stat_mbox > cs->last_stat_mbox) 2647 next_stat_mbox = cs->first_stat_mbox; 2648 2649 if (cmd_blk) { 2650 if (id < 3) 2651 myrs_handle_cmdblk(cs, cmd_blk); 2652 else 2653 myrs_handle_scsi(cs, cmd_blk, scmd); 2654 } 2655 } 2656 cs->next_stat_mbox = next_stat_mbox; 2657 spin_unlock_irqrestore(&cs->queue_lock, flags); 2658 return IRQ_HANDLED; 2659 } 2660 2661 struct myrs_privdata DAC960_GEM_privdata = { 2662 .hw_init = DAC960_GEM_hw_init, 2663 .irq_handler = DAC960_GEM_intr_handler, 2664 .mmio_size = DAC960_GEM_mmio_size, 2665 }; 2666 2667 /* 2668 * DAC960 BA Series Controllers. 2669 */ 2670 2671 static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base) 2672 { 2673 writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); 2674 } 2675 2676 static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base) 2677 { 2678 writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET); 2679 } 2680 2681 static inline void DAC960_BA_gen_intr(void __iomem *base) 2682 { 2683 writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET); 2684 } 2685 2686 static inline void DAC960_BA_reset_ctrl(void __iomem *base) 2687 { 2688 writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET); 2689 } 2690 2691 static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base) 2692 { 2693 writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET); 2694 } 2695 2696 static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base) 2697 { 2698 u8 val; 2699 2700 val = readb(base + DAC960_BA_IDB_OFFSET); 2701 return !(val & DAC960_BA_IDB_HWMBOX_EMPTY); 2702 } 2703 2704 static inline bool DAC960_BA_init_in_progress(void __iomem *base) 2705 { 2706 u8 val; 2707 2708 val = readb(base + DAC960_BA_IDB_OFFSET); 2709 return !(val & DAC960_BA_IDB_INIT_DONE); 2710 } 2711 2712 static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base) 2713 { 2714 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET); 2715 } 2716 2717 static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base) 2718 { 2719 writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET); 2720 } 2721 2722 static inline void DAC960_BA_ack_intr(void __iomem *base) 2723 { 2724 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ, 2725 base + DAC960_BA_ODB_OFFSET); 2726 } 2727 2728 static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base) 2729 { 2730 u8 val; 2731 2732 val = readb(base + DAC960_BA_ODB_OFFSET); 2733 return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL; 2734 } 2735 2736 static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base) 2737 { 2738 u8 val; 2739 2740 val = readb(base + DAC960_BA_ODB_OFFSET); 2741 return val & DAC960_BA_ODB_MMBOX_STS_AVAIL; 2742 } 2743 2744 static inline void DAC960_BA_enable_intr(void __iomem *base) 2745 { 2746 writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET); 2747 } 2748 2749 static inline void DAC960_BA_disable_intr(void __iomem *base) 2750 { 2751 writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET); 2752 } 2753 2754 static inline bool DAC960_BA_intr_enabled(void __iomem *base) 2755 { 2756 u8 val; 2757 2758 val = readb(base + DAC960_BA_IRQMASK_OFFSET); 2759 return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ); 2760 } 2761 2762 static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, 2763 union myrs_cmd_mbox *mbox) 2764 { 2765 memcpy(&mem_mbox->words[1], &mbox->words[1], 2766 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); 2767 /* Barrier to avoid reordering */ 2768 wmb(); 2769 mem_mbox->words[0] = mbox->words[0]; 2770 /* Barrier to force PCI access */ 2771 mb(); 2772 } 2773 2774 2775 static inline void DAC960_BA_write_hw_mbox(void __iomem *base, 2776 dma_addr_t cmd_mbox_addr) 2777 { 2778 dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET); 2779 } 2780 2781 static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base) 2782 { 2783 return readw(base + DAC960_BA_CMDSTS_OFFSET); 2784 } 2785 2786 static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base) 2787 { 2788 return readw(base + DAC960_BA_CMDSTS_OFFSET + 2); 2789 } 2790 2791 static inline bool 2792 DAC960_BA_read_error_status(void __iomem *base, unsigned char *error, 2793 unsigned char *param0, unsigned char *param1) 2794 { 2795 u8 val; 2796 2797 val = readb(base + DAC960_BA_ERRSTS_OFFSET); 2798 if (!(val & DAC960_BA_ERRSTS_PENDING)) 2799 return false; 2800 val &= ~DAC960_BA_ERRSTS_PENDING; 2801 *error = val; 2802 *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0); 2803 *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1); 2804 writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET); 2805 return true; 2806 } 2807 2808 static inline unsigned char 2809 DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr) 2810 { 2811 unsigned char status; 2812 2813 while (DAC960_BA_hw_mbox_is_full(base)) 2814 udelay(1); 2815 DAC960_BA_write_hw_mbox(base, mbox_addr); 2816 DAC960_BA_hw_mbox_new_cmd(base); 2817 while (!DAC960_BA_hw_mbox_status_available(base)) 2818 udelay(1); 2819 status = DAC960_BA_read_cmd_status(base); 2820 DAC960_BA_ack_hw_mbox_intr(base); 2821 DAC960_BA_ack_hw_mbox_status(base); 2822 2823 return status; 2824 } 2825 2826 static int DAC960_BA_hw_init(struct pci_dev *pdev, 2827 struct myrs_hba *cs, void __iomem *base) 2828 { 2829 int timeout = 0; 2830 unsigned char status, parm0, parm1; 2831 2832 DAC960_BA_disable_intr(base); 2833 DAC960_BA_ack_hw_mbox_status(base); 2834 udelay(1000); 2835 while (DAC960_BA_init_in_progress(base) && 2836 timeout < MYRS_MAILBOX_TIMEOUT) { 2837 if (DAC960_BA_read_error_status(base, &status, 2838 &parm0, &parm1) && 2839 myrs_err_status(cs, status, parm0, parm1)) 2840 return -EIO; 2841 udelay(10); 2842 timeout++; 2843 } 2844 if (timeout == MYRS_MAILBOX_TIMEOUT) { 2845 dev_err(&pdev->dev, 2846 "Timeout waiting for Controller Initialisation\n"); 2847 return -ETIMEDOUT; 2848 } 2849 if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) { 2850 dev_err(&pdev->dev, 2851 "Unable to Enable Memory Mailbox Interface\n"); 2852 DAC960_BA_reset_ctrl(base); 2853 return -EAGAIN; 2854 } 2855 DAC960_BA_enable_intr(base); 2856 cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox; 2857 cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd; 2858 cs->disable_intr = DAC960_BA_disable_intr; 2859 cs->reset = DAC960_BA_reset_ctrl; 2860 return 0; 2861 } 2862 2863 static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg) 2864 { 2865 struct myrs_hba *cs = arg; 2866 void __iomem *base = cs->io_base; 2867 struct myrs_stat_mbox *next_stat_mbox; 2868 unsigned long flags; 2869 2870 spin_lock_irqsave(&cs->queue_lock, flags); 2871 DAC960_BA_ack_intr(base); 2872 next_stat_mbox = cs->next_stat_mbox; 2873 while (next_stat_mbox->id > 0) { 2874 unsigned short id = next_stat_mbox->id; 2875 struct scsi_cmnd *scmd = NULL; 2876 struct myrs_cmdblk *cmd_blk = NULL; 2877 2878 if (id == MYRS_DCMD_TAG) 2879 cmd_blk = &cs->dcmd_blk; 2880 else if (id == MYRS_MCMD_TAG) 2881 cmd_blk = &cs->mcmd_blk; 2882 else { 2883 scmd = scsi_host_find_tag(cs->host, id - 3); 2884 if (scmd) 2885 cmd_blk = scsi_cmd_priv(scmd); 2886 } 2887 if (cmd_blk) { 2888 cmd_blk->status = next_stat_mbox->status; 2889 cmd_blk->sense_len = next_stat_mbox->sense_len; 2890 cmd_blk->residual = next_stat_mbox->residual; 2891 } else 2892 dev_err(&cs->pdev->dev, 2893 "Unhandled command completion %d\n", id); 2894 2895 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); 2896 if (++next_stat_mbox > cs->last_stat_mbox) 2897 next_stat_mbox = cs->first_stat_mbox; 2898 2899 if (cmd_blk) { 2900 if (id < 3) 2901 myrs_handle_cmdblk(cs, cmd_blk); 2902 else 2903 myrs_handle_scsi(cs, cmd_blk, scmd); 2904 } 2905 } 2906 cs->next_stat_mbox = next_stat_mbox; 2907 spin_unlock_irqrestore(&cs->queue_lock, flags); 2908 return IRQ_HANDLED; 2909 } 2910 2911 struct myrs_privdata DAC960_BA_privdata = { 2912 .hw_init = DAC960_BA_hw_init, 2913 .irq_handler = DAC960_BA_intr_handler, 2914 .mmio_size = DAC960_BA_mmio_size, 2915 }; 2916 2917 /* 2918 * DAC960 LP Series Controllers. 2919 */ 2920 2921 static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base) 2922 { 2923 writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); 2924 } 2925 2926 static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base) 2927 { 2928 writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET); 2929 } 2930 2931 static inline void DAC960_LP_gen_intr(void __iomem *base) 2932 { 2933 writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET); 2934 } 2935 2936 static inline void DAC960_LP_reset_ctrl(void __iomem *base) 2937 { 2938 writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET); 2939 } 2940 2941 static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base) 2942 { 2943 writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET); 2944 } 2945 2946 static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base) 2947 { 2948 u8 val; 2949 2950 val = readb(base + DAC960_LP_IDB_OFFSET); 2951 return val & DAC960_LP_IDB_HWMBOX_FULL; 2952 } 2953 2954 static inline bool DAC960_LP_init_in_progress(void __iomem *base) 2955 { 2956 u8 val; 2957 2958 val = readb(base + DAC960_LP_IDB_OFFSET); 2959 return val & DAC960_LP_IDB_INIT_IN_PROGRESS; 2960 } 2961 2962 static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base) 2963 { 2964 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET); 2965 } 2966 2967 static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base) 2968 { 2969 writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET); 2970 } 2971 2972 static inline void DAC960_LP_ack_intr(void __iomem *base) 2973 { 2974 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ, 2975 base + DAC960_LP_ODB_OFFSET); 2976 } 2977 2978 static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base) 2979 { 2980 u8 val; 2981 2982 val = readb(base + DAC960_LP_ODB_OFFSET); 2983 return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL; 2984 } 2985 2986 static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base) 2987 { 2988 u8 val; 2989 2990 val = readb(base + DAC960_LP_ODB_OFFSET); 2991 return val & DAC960_LP_ODB_MMBOX_STS_AVAIL; 2992 } 2993 2994 static inline void DAC960_LP_enable_intr(void __iomem *base) 2995 { 2996 writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET); 2997 } 2998 2999 static inline void DAC960_LP_disable_intr(void __iomem *base) 3000 { 3001 writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET); 3002 } 3003 3004 static inline bool DAC960_LP_intr_enabled(void __iomem *base) 3005 { 3006 u8 val; 3007 3008 val = readb(base + DAC960_LP_IRQMASK_OFFSET); 3009 return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ); 3010 } 3011 3012 static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox, 3013 union myrs_cmd_mbox *mbox) 3014 { 3015 memcpy(&mem_mbox->words[1], &mbox->words[1], 3016 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int)); 3017 /* Barrier to avoid reordering */ 3018 wmb(); 3019 mem_mbox->words[0] = mbox->words[0]; 3020 /* Barrier to force PCI access */ 3021 mb(); 3022 } 3023 3024 static inline void DAC960_LP_write_hw_mbox(void __iomem *base, 3025 dma_addr_t cmd_mbox_addr) 3026 { 3027 dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET); 3028 } 3029 3030 static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base) 3031 { 3032 return readw(base + DAC960_LP_CMDSTS_OFFSET); 3033 } 3034 3035 static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base) 3036 { 3037 return readw(base + DAC960_LP_CMDSTS_OFFSET + 2); 3038 } 3039 3040 static inline bool 3041 DAC960_LP_read_error_status(void __iomem *base, unsigned char *error, 3042 unsigned char *param0, unsigned char *param1) 3043 { 3044 u8 val; 3045 3046 val = readb(base + DAC960_LP_ERRSTS_OFFSET); 3047 if (!(val & DAC960_LP_ERRSTS_PENDING)) 3048 return false; 3049 val &= ~DAC960_LP_ERRSTS_PENDING; 3050 *error = val; 3051 *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0); 3052 *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1); 3053 writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET); 3054 return true; 3055 } 3056 3057 static inline unsigned char 3058 DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr) 3059 { 3060 unsigned char status; 3061 3062 while (DAC960_LP_hw_mbox_is_full(base)) 3063 udelay(1); 3064 DAC960_LP_write_hw_mbox(base, mbox_addr); 3065 DAC960_LP_hw_mbox_new_cmd(base); 3066 while (!DAC960_LP_hw_mbox_status_available(base)) 3067 udelay(1); 3068 status = DAC960_LP_read_cmd_status(base); 3069 DAC960_LP_ack_hw_mbox_intr(base); 3070 DAC960_LP_ack_hw_mbox_status(base); 3071 3072 return status; 3073 } 3074 3075 static int DAC960_LP_hw_init(struct pci_dev *pdev, 3076 struct myrs_hba *cs, void __iomem *base) 3077 { 3078 int timeout = 0; 3079 unsigned char status, parm0, parm1; 3080 3081 DAC960_LP_disable_intr(base); 3082 DAC960_LP_ack_hw_mbox_status(base); 3083 udelay(1000); 3084 while (DAC960_LP_init_in_progress(base) && 3085 timeout < MYRS_MAILBOX_TIMEOUT) { 3086 if (DAC960_LP_read_error_status(base, &status, 3087 &parm0, &parm1) && 3088 myrs_err_status(cs, status, parm0, parm1)) 3089 return -EIO; 3090 udelay(10); 3091 timeout++; 3092 } 3093 if (timeout == MYRS_MAILBOX_TIMEOUT) { 3094 dev_err(&pdev->dev, 3095 "Timeout waiting for Controller Initialisation\n"); 3096 return -ETIMEDOUT; 3097 } 3098 if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) { 3099 dev_err(&pdev->dev, 3100 "Unable to Enable Memory Mailbox Interface\n"); 3101 DAC960_LP_reset_ctrl(base); 3102 return -ENODEV; 3103 } 3104 DAC960_LP_enable_intr(base); 3105 cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox; 3106 cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd; 3107 cs->disable_intr = DAC960_LP_disable_intr; 3108 cs->reset = DAC960_LP_reset_ctrl; 3109 3110 return 0; 3111 } 3112 3113 static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg) 3114 { 3115 struct myrs_hba *cs = arg; 3116 void __iomem *base = cs->io_base; 3117 struct myrs_stat_mbox *next_stat_mbox; 3118 unsigned long flags; 3119 3120 spin_lock_irqsave(&cs->queue_lock, flags); 3121 DAC960_LP_ack_intr(base); 3122 next_stat_mbox = cs->next_stat_mbox; 3123 while (next_stat_mbox->id > 0) { 3124 unsigned short id = next_stat_mbox->id; 3125 struct scsi_cmnd *scmd = NULL; 3126 struct myrs_cmdblk *cmd_blk = NULL; 3127 3128 if (id == MYRS_DCMD_TAG) 3129 cmd_blk = &cs->dcmd_blk; 3130 else if (id == MYRS_MCMD_TAG) 3131 cmd_blk = &cs->mcmd_blk; 3132 else { 3133 scmd = scsi_host_find_tag(cs->host, id - 3); 3134 if (scmd) 3135 cmd_blk = scsi_cmd_priv(scmd); 3136 } 3137 if (cmd_blk) { 3138 cmd_blk->status = next_stat_mbox->status; 3139 cmd_blk->sense_len = next_stat_mbox->sense_len; 3140 cmd_blk->residual = next_stat_mbox->residual; 3141 } else 3142 dev_err(&cs->pdev->dev, 3143 "Unhandled command completion %d\n", id); 3144 3145 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox)); 3146 if (++next_stat_mbox > cs->last_stat_mbox) 3147 next_stat_mbox = cs->first_stat_mbox; 3148 3149 if (cmd_blk) { 3150 if (id < 3) 3151 myrs_handle_cmdblk(cs, cmd_blk); 3152 else 3153 myrs_handle_scsi(cs, cmd_blk, scmd); 3154 } 3155 } 3156 cs->next_stat_mbox = next_stat_mbox; 3157 spin_unlock_irqrestore(&cs->queue_lock, flags); 3158 return IRQ_HANDLED; 3159 } 3160 3161 struct myrs_privdata DAC960_LP_privdata = { 3162 .hw_init = DAC960_LP_hw_init, 3163 .irq_handler = DAC960_LP_intr_handler, 3164 .mmio_size = DAC960_LP_mmio_size, 3165 }; 3166 3167 /* 3168 * Module functions 3169 */ 3170 static int 3171 myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry) 3172 { 3173 struct myrs_hba *cs; 3174 int ret; 3175 3176 cs = myrs_detect(dev, entry); 3177 if (!cs) 3178 return -ENODEV; 3179 3180 ret = myrs_get_config(cs); 3181 if (ret < 0) { 3182 myrs_cleanup(cs); 3183 return ret; 3184 } 3185 3186 if (!myrs_create_mempools(dev, cs)) { 3187 ret = -ENOMEM; 3188 goto failed; 3189 } 3190 3191 ret = scsi_add_host(cs->host, &dev->dev); 3192 if (ret) { 3193 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret); 3194 myrs_destroy_mempools(cs); 3195 goto failed; 3196 } 3197 scsi_scan_host(cs->host); 3198 return 0; 3199 failed: 3200 myrs_cleanup(cs); 3201 return ret; 3202 } 3203 3204 3205 static void myrs_remove(struct pci_dev *pdev) 3206 { 3207 struct myrs_hba *cs = pci_get_drvdata(pdev); 3208 3209 if (cs == NULL) 3210 return; 3211 3212 shost_printk(KERN_NOTICE, cs->host, "Flushing Cache..."); 3213 myrs_flush_cache(cs); 3214 myrs_destroy_mempools(cs); 3215 myrs_cleanup(cs); 3216 } 3217 3218 3219 static const struct pci_device_id myrs_id_table[] = { 3220 { 3221 PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX, 3222 PCI_DEVICE_ID_MYLEX_DAC960_GEM, 3223 PCI_VENDOR_ID_MYLEX, PCI_ANY_ID), 3224 .driver_data = (unsigned long) &DAC960_GEM_privdata, 3225 }, 3226 { 3227 PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata), 3228 }, 3229 { 3230 PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata), 3231 }, 3232 {0, }, 3233 }; 3234 3235 MODULE_DEVICE_TABLE(pci, myrs_id_table); 3236 3237 static struct pci_driver myrs_pci_driver = { 3238 .name = "myrs", 3239 .id_table = myrs_id_table, 3240 .probe = myrs_probe, 3241 .remove = myrs_remove, 3242 }; 3243 3244 static int __init myrs_init_module(void) 3245 { 3246 int ret; 3247 3248 myrs_raid_template = raid_class_attach(&myrs_raid_functions); 3249 if (!myrs_raid_template) 3250 return -ENODEV; 3251 3252 ret = pci_register_driver(&myrs_pci_driver); 3253 if (ret) 3254 raid_class_release(myrs_raid_template); 3255 3256 return ret; 3257 } 3258 3259 static void __exit myrs_cleanup_module(void) 3260 { 3261 pci_unregister_driver(&myrs_pci_driver); 3262 raid_class_release(myrs_raid_template); 3263 } 3264 3265 module_init(myrs_init_module); 3266 module_exit(myrs_cleanup_module); 3267 3268 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)"); 3269 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>"); 3270 MODULE_LICENSE("GPL"); 3271