1 /* 2 ******************************************************************************* 3 ** O.S : Linux 4 ** FILE NAME : arcmsr_hba.c 5 ** BY : Erich Chen 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA RAID Host adapter 8 ******************************************************************************* 9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved 10 ** 11 ** Web site: www.areca.com.tw 12 ** E-mail: support@areca.com.tw 13 ** 14 ** This program is free software; you can redistribute it and/or modify 15 ** it under the terms of the GNU General Public License version 2 as 16 ** published by the Free Software Foundation. 17 ** This program is distributed in the hope that it will be useful, 18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 ** GNU General Public License for more details. 21 ******************************************************************************* 22 ** Redistribution and use in source and binary forms, with or without 23 ** modification, are permitted provided that the following conditions 24 ** are met: 25 ** 1. Redistributions of source code must retain the above copyright 26 ** notice, this list of conditions and the following disclaimer. 27 ** 2. Redistributions in binary form must reproduce the above copyright 28 ** notice, this list of conditions and the following disclaimer in the 29 ** documentation and/or other materials provided with the distribution. 30 ** 3. The name of the author may not be used to endorse or promote products 31 ** derived from this software without specific prior written permission. 32 ** 33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT 38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 43 ******************************************************************************* 44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr 45 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt 46 ******************************************************************************* 47 */ 48 #include <linux/module.h> 49 #include <linux/reboot.h> 50 #include <linux/spinlock.h> 51 #include <linux/pci_ids.h> 52 #include <linux/interrupt.h> 53 #include <linux/moduleparam.h> 54 #include <linux/errno.h> 55 #include <linux/types.h> 56 #include <linux/delay.h> 57 #include <linux/dma-mapping.h> 58 #include <linux/timer.h> 59 #include <linux/pci.h> 60 #include <linux/aer.h> 61 #include <asm/dma.h> 62 #include <asm/io.h> 63 #include <asm/system.h> 64 #include <asm/uaccess.h> 65 #include <scsi/scsi_host.h> 66 #include <scsi/scsi.h> 67 #include <scsi/scsi_cmnd.h> 68 #include <scsi/scsi_tcq.h> 69 #include <scsi/scsi_device.h> 70 #include <scsi/scsi_transport.h> 71 #include <scsi/scsicam.h> 72 #include "arcmsr.h" 73 74 MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); 75 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); 76 MODULE_LICENSE("Dual BSD/GPL"); 77 MODULE_VERSION(ARCMSR_DRIVER_VERSION); 78 79 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 80 struct scsi_cmnd *cmd); 81 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 82 static int arcmsr_abort(struct scsi_cmnd *); 83 static int arcmsr_bus_reset(struct scsi_cmnd *); 84 static int arcmsr_bios_param(struct scsi_device *sdev, 85 struct block_device *bdev, sector_t capacity, int *info); 86 static int arcmsr_queue_command(struct scsi_cmnd *cmd, 87 void (*done) (struct scsi_cmnd *)); 88 static int arcmsr_probe(struct pci_dev *pdev, 89 const struct pci_device_id *id); 90 static void arcmsr_remove(struct pci_dev *pdev); 91 static void arcmsr_shutdown(struct pci_dev *pdev); 92 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 93 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); 94 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); 95 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 96 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); 97 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); 98 static const char *arcmsr_info(struct Scsi_Host *); 99 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 100 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 101 int queue_depth) 102 { 103 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 104 queue_depth = ARCMSR_MAX_CMD_PERLUN; 105 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 106 return queue_depth; 107 } 108 109 static struct scsi_host_template arcmsr_scsi_host_template = { 110 .module = THIS_MODULE, 111 .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" 112 ARCMSR_DRIVER_VERSION, 113 .info = arcmsr_info, 114 .queuecommand = arcmsr_queue_command, 115 .eh_abort_handler = arcmsr_abort, 116 .eh_bus_reset_handler = arcmsr_bus_reset, 117 .bios_param = arcmsr_bios_param, 118 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 119 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, 120 .this_id = ARCMSR_SCSI_INITIATOR_ID, 121 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES, 122 .max_sectors = ARCMSR_MAX_XFER_SECTORS, 123 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 124 .use_clustering = ENABLE_CLUSTERING, 125 .shost_attrs = arcmsr_host_attrs, 126 }; 127 #ifdef CONFIG_SCSI_ARCMSR_AER 128 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev); 129 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, 130 pci_channel_state_t state); 131 132 static struct pci_error_handlers arcmsr_pci_error_handlers = { 133 .error_detected = arcmsr_pci_error_detected, 134 .slot_reset = arcmsr_pci_slot_reset, 135 }; 136 #endif 137 static struct pci_device_id arcmsr_device_id_table[] = { 138 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 139 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, 140 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)}, 141 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)}, 142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)}, 143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)}, 144 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)}, 145 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)}, 146 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)}, 147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)}, 148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)}, 149 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)}, 150 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)}, 151 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)}, 152 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)}, 153 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, 154 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, 155 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, 156 {0, 0}, /* Terminating entry */ 157 }; 158 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 159 static struct pci_driver arcmsr_pci_driver = { 160 .name = "arcmsr", 161 .id_table = arcmsr_device_id_table, 162 .probe = arcmsr_probe, 163 .remove = arcmsr_remove, 164 .shutdown = arcmsr_shutdown, 165 #ifdef CONFIG_SCSI_ARCMSR_AER 166 .err_handler = &arcmsr_pci_error_handlers, 167 #endif 168 }; 169 170 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 171 { 172 irqreturn_t handle_state; 173 struct AdapterControlBlock *acb = dev_id; 174 175 spin_lock(acb->host->host_lock); 176 handle_state = arcmsr_interrupt(acb); 177 spin_unlock(acb->host->host_lock); 178 179 return handle_state; 180 } 181 182 static int arcmsr_bios_param(struct scsi_device *sdev, 183 struct block_device *bdev, sector_t capacity, int *geom) 184 { 185 int ret, heads, sectors, cylinders, total_capacity; 186 unsigned char *buffer;/* return copy of block device's partition table */ 187 188 buffer = scsi_bios_ptable(bdev); 189 if (buffer) { 190 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]); 191 kfree(buffer); 192 if (ret != -1) 193 return ret; 194 } 195 total_capacity = capacity; 196 heads = 64; 197 sectors = 32; 198 cylinders = total_capacity / (heads * sectors); 199 if (cylinders > 1024) { 200 heads = 255; 201 sectors = 63; 202 cylinders = total_capacity / (heads * sectors); 203 } 204 geom[0] = heads; 205 geom[1] = sectors; 206 geom[2] = cylinders; 207 return 0; 208 } 209 210 static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) 211 { 212 struct pci_dev *pdev = acb->pdev; 213 u16 dev_id; 214 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); 215 switch (dev_id) { 216 case 0x1201 : { 217 acb->adapter_type = ACB_ADAPTER_TYPE_B; 218 } 219 break; 220 221 default : acb->adapter_type = ACB_ADAPTER_TYPE_A; 222 } 223 } 224 225 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) 226 { 227 228 switch (acb->adapter_type) { 229 230 case ACB_ADAPTER_TYPE_A: { 231 struct pci_dev *pdev = acb->pdev; 232 void *dma_coherent; 233 dma_addr_t dma_coherent_handle, dma_addr; 234 struct CommandControlBlock *ccb_tmp; 235 uint32_t intmask_org; 236 int i, j; 237 238 acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 239 if (!acb->pmu) { 240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 241 acb->host->host_no); 242 } 243 244 dma_coherent = dma_alloc_coherent(&pdev->dev, 245 ARCMSR_MAX_FREECCB_NUM * 246 sizeof (struct CommandControlBlock) + 0x20, 247 &dma_coherent_handle, GFP_KERNEL); 248 if (!dma_coherent) 249 return -ENOMEM; 250 251 acb->dma_coherent = dma_coherent; 252 acb->dma_coherent_handle = dma_coherent_handle; 253 254 if (((unsigned long)dma_coherent & 0x1F)) { 255 dma_coherent = dma_coherent + 256 (0x20 - ((unsigned long)dma_coherent & 0x1F)); 257 dma_coherent_handle = dma_coherent_handle + 258 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 259 } 260 261 dma_addr = dma_coherent_handle; 262 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 263 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 264 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; 265 ccb_tmp->acb = acb; 266 acb->pccb_pool[i] = ccb_tmp; 267 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 268 dma_addr = dma_addr + sizeof(struct CommandControlBlock); 269 ccb_tmp++; 270 } 271 272 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; 273 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 274 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 275 acb->devstate[i][j] = ARECA_RAID_GONE; 276 277 /* 278 ** here we need to tell iop 331 our ccb_tmp.HighPart 279 ** if ccb_tmp.HighPart is not zero 280 */ 281 intmask_org = arcmsr_disable_outbound_ints(acb); 282 } 283 break; 284 285 case ACB_ADAPTER_TYPE_B: { 286 287 struct pci_dev *pdev = acb->pdev; 288 struct MessageUnit_B *reg; 289 void *mem_base0, *mem_base1; 290 void *dma_coherent; 291 dma_addr_t dma_coherent_handle, dma_addr; 292 uint32_t intmask_org; 293 struct CommandControlBlock *ccb_tmp; 294 int i, j; 295 296 dma_coherent = dma_alloc_coherent(&pdev->dev, 297 ((ARCMSR_MAX_FREECCB_NUM * 298 sizeof(struct CommandControlBlock) + 0x20) + 299 sizeof(struct MessageUnit_B)), 300 &dma_coherent_handle, GFP_KERNEL); 301 if (!dma_coherent) 302 return -ENOMEM; 303 304 acb->dma_coherent = dma_coherent; 305 acb->dma_coherent_handle = dma_coherent_handle; 306 307 if (((unsigned long)dma_coherent & 0x1F)) { 308 dma_coherent = dma_coherent + 309 (0x20 - ((unsigned long)dma_coherent & 0x1F)); 310 dma_coherent_handle = dma_coherent_handle + 311 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 312 } 313 314 reg = (struct MessageUnit_B *)(dma_coherent + 315 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 316 317 dma_addr = dma_coherent_handle; 318 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 319 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 320 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; 321 ccb_tmp->acb = acb; 322 acb->pccb_pool[i] = ccb_tmp; 323 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 324 dma_addr = dma_addr + sizeof(struct CommandControlBlock); 325 ccb_tmp++; 326 } 327 328 reg = (struct MessageUnit_B *)(dma_coherent + 329 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 330 acb->pmu = (struct MessageUnit *)reg; 331 mem_base0 = ioremap(pci_resource_start(pdev, 0), 332 pci_resource_len(pdev, 0)); 333 mem_base1 = ioremap(pci_resource_start(pdev, 2), 334 pci_resource_len(pdev, 2)); 335 reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 + 336 ARCMSR_DRV2IOP_DOORBELL); 337 reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + 338 ARCMSR_DRV2IOP_DOORBELL_MASK); 339 reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 + 340 ARCMSR_IOP2DRV_DOORBELL); 341 reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + 342 ARCMSR_IOP2DRV_DOORBELL_MASK); 343 reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 + 344 ARCMSR_IOCTL_WBUFFER); 345 reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 + 346 ARCMSR_IOCTL_RBUFFER); 347 reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 + 348 ARCMSR_MSGCODE_RWBUFFER); 349 350 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; 351 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 352 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 353 acb->devstate[i][j] = ARECA_RAID_GOOD; 354 355 /* 356 ** here we need to tell iop 331 our ccb_tmp.HighPart 357 ** if ccb_tmp.HighPart is not zero 358 */ 359 intmask_org = arcmsr_disable_outbound_ints(acb); 360 } 361 break; 362 } 363 return 0; 364 } 365 366 static int arcmsr_probe(struct pci_dev *pdev, 367 const struct pci_device_id *id) 368 { 369 struct Scsi_Host *host; 370 struct AdapterControlBlock *acb; 371 uint8_t bus, dev_fun; 372 int error; 373 374 error = pci_enable_device(pdev); 375 if (error) 376 goto out; 377 pci_set_master(pdev); 378 379 host = scsi_host_alloc(&arcmsr_scsi_host_template, 380 sizeof(struct AdapterControlBlock)); 381 if (!host) { 382 error = -ENOMEM; 383 goto out_disable_device; 384 } 385 acb = (struct AdapterControlBlock *)host->hostdata; 386 memset(acb, 0, sizeof (struct AdapterControlBlock)); 387 388 error = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 389 if (error) { 390 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 391 if (error) { 392 printk(KERN_WARNING 393 "scsi%d: No suitable DMA mask available\n", 394 host->host_no); 395 goto out_host_put; 396 } 397 } 398 bus = pdev->bus->number; 399 dev_fun = pdev->devfn; 400 acb->host = host; 401 acb->pdev = pdev; 402 host->max_sectors = ARCMSR_MAX_XFER_SECTORS; 403 host->max_lun = ARCMSR_MAX_TARGETLUN; 404 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/ 405 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/ 406 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES; 407 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ 408 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; 409 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 410 host->unique_id = (bus << 8) | dev_fun; 411 host->irq = pdev->irq; 412 error = pci_request_regions(pdev, "arcmsr"); 413 if (error) { 414 goto out_host_put; 415 } 416 arcmsr_define_adapter_type(acb); 417 418 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 419 ACB_F_MESSAGE_RQBUFFER_CLEARED | 420 ACB_F_MESSAGE_WQBUFFER_READED); 421 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 422 INIT_LIST_HEAD(&acb->ccb_free_list); 423 424 error = arcmsr_alloc_ccb_pool(acb); 425 if (error) 426 goto out_release_regions; 427 428 error = request_irq(pdev->irq, arcmsr_do_interrupt, 429 IRQF_SHARED, "arcmsr", acb); 430 if (error) 431 goto out_free_ccb_pool; 432 433 arcmsr_iop_init(acb); 434 pci_set_drvdata(pdev, host); 435 if (strncmp(acb->firm_version, "V1.42", 5) >= 0) 436 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; 437 438 error = scsi_add_host(host, &pdev->dev); 439 if (error) 440 goto out_free_irq; 441 442 error = arcmsr_alloc_sysfs_attr(acb); 443 if (error) 444 goto out_free_sysfs; 445 446 scsi_scan_host(host); 447 #ifdef CONFIG_SCSI_ARCMSR_AER 448 pci_enable_pcie_error_reporting(pdev); 449 #endif 450 return 0; 451 out_free_sysfs: 452 out_free_irq: 453 free_irq(pdev->irq, acb); 454 out_free_ccb_pool: 455 arcmsr_free_ccb_pool(acb); 456 iounmap(acb->pmu); 457 out_release_regions: 458 pci_release_regions(pdev); 459 out_host_put: 460 scsi_host_put(host); 461 out_disable_device: 462 pci_disable_device(pdev); 463 out: 464 return error; 465 } 466 467 static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 468 { 469 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 470 uint32_t Index; 471 uint8_t Retries = 0x00; 472 473 do { 474 for (Index = 0; Index < 100; Index++) { 475 if (readl(®->outbound_intstatus) & 476 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 477 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 478 ®->outbound_intstatus); 479 return 0x00; 480 } 481 msleep(10); 482 }/*max 1 seconds*/ 483 484 } while (Retries++ < 20);/*max 20 sec*/ 485 return 0xff; 486 } 487 488 static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 489 { 490 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 491 uint32_t Index; 492 uint8_t Retries = 0x00; 493 494 do { 495 for (Index = 0; Index < 100; Index++) { 496 if (readl(reg->iop2drv_doorbell_reg) 497 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 498 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 499 , reg->iop2drv_doorbell_reg); 500 return 0x00; 501 } 502 msleep(10); 503 }/*max 1 seconds*/ 504 505 } while (Retries++ < 20);/*max 20 sec*/ 506 return 0xff; 507 } 508 509 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 510 { 511 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 512 513 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 514 if (arcmsr_hba_wait_msgint_ready(acb)) 515 printk(KERN_NOTICE 516 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 517 , acb->host->host_no); 518 } 519 520 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 521 { 522 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 523 524 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); 525 if (arcmsr_hbb_wait_msgint_ready(acb)) 526 printk(KERN_NOTICE 527 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 528 , acb->host->host_no); 529 } 530 531 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 532 { 533 switch (acb->adapter_type) { 534 case ACB_ADAPTER_TYPE_A: { 535 arcmsr_abort_hba_allcmd(acb); 536 } 537 break; 538 539 case ACB_ADAPTER_TYPE_B: { 540 arcmsr_abort_hbb_allcmd(acb); 541 } 542 } 543 } 544 545 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 546 { 547 struct scsi_cmnd *pcmd = ccb->pcmd; 548 549 scsi_dma_unmap(pcmd); 550 } 551 552 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) 553 { 554 struct AdapterControlBlock *acb = ccb->acb; 555 struct scsi_cmnd *pcmd = ccb->pcmd; 556 557 arcmsr_pci_unmap_dma(ccb); 558 if (stand_flag == 1) 559 atomic_dec(&acb->ccboutstandingcount); 560 ccb->startdone = ARCMSR_CCB_DONE; 561 ccb->ccb_flags = 0; 562 list_add_tail(&ccb->list, &acb->ccb_free_list); 563 pcmd->scsi_done(pcmd); 564 } 565 566 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 567 { 568 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 569 int retry_count = 30; 570 571 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 572 do { 573 if (!arcmsr_hba_wait_msgint_ready(acb)) 574 break; 575 else { 576 retry_count--; 577 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 578 timeout, retry count down = %d \n", acb->host->host_no, retry_count); 579 } 580 } while (retry_count != 0); 581 } 582 583 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) 584 { 585 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 586 int retry_count = 30; 587 588 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); 589 do { 590 if (!arcmsr_hbb_wait_msgint_ready(acb)) 591 break; 592 else { 593 retry_count--; 594 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 595 timeout,retry count down = %d \n", acb->host->host_no, retry_count); 596 } 597 } while (retry_count != 0); 598 } 599 600 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 601 { 602 switch (acb->adapter_type) { 603 604 case ACB_ADAPTER_TYPE_A: { 605 arcmsr_flush_hba_cache(acb); 606 } 607 break; 608 609 case ACB_ADAPTER_TYPE_B: { 610 arcmsr_flush_hbb_cache(acb); 611 } 612 } 613 } 614 615 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) 616 { 617 618 struct scsi_cmnd *pcmd = ccb->pcmd; 619 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 620 621 pcmd->result = DID_OK << 16; 622 if (sensebuffer) { 623 int sense_data_length = 624 sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer) 625 ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer); 626 memset(sensebuffer, 0, sizeof(pcmd->sense_buffer)); 627 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); 628 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 629 sensebuffer->Valid = 1; 630 } 631 } 632 633 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) 634 { 635 u32 orig_mask = 0; 636 switch (acb->adapter_type) { 637 638 case ACB_ADAPTER_TYPE_A : { 639 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 640 orig_mask = readl(®->outbound_intmask)|\ 641 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; 642 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 643 ®->outbound_intmask); 644 } 645 break; 646 647 case ACB_ADAPTER_TYPE_B : { 648 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 649 orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ 650 (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 651 writel(0, reg->iop2drv_doorbell_mask_reg); 652 } 653 break; 654 } 655 return orig_mask; 656 } 657 658 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ 659 struct CommandControlBlock *ccb, uint32_t flag_ccb) 660 { 661 662 uint8_t id, lun; 663 id = ccb->pcmd->device->id; 664 lun = ccb->pcmd->device->lun; 665 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) { 666 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 667 acb->devstate[id][lun] = ARECA_RAID_GOOD; 668 ccb->pcmd->result = DID_OK << 16; 669 arcmsr_ccb_complete(ccb, 1); 670 } else { 671 switch (ccb->arcmsr_cdb.DeviceStatus) { 672 case ARCMSR_DEV_SELECT_TIMEOUT: { 673 acb->devstate[id][lun] = ARECA_RAID_GONE; 674 ccb->pcmd->result = DID_NO_CONNECT << 16; 675 arcmsr_ccb_complete(ccb, 1); 676 } 677 break; 678 679 case ARCMSR_DEV_ABORTED: 680 681 case ARCMSR_DEV_INIT_FAIL: { 682 acb->devstate[id][lun] = ARECA_RAID_GONE; 683 ccb->pcmd->result = DID_BAD_TARGET << 16; 684 arcmsr_ccb_complete(ccb, 1); 685 } 686 break; 687 688 case ARCMSR_DEV_CHECK_CONDITION: { 689 acb->devstate[id][lun] = ARECA_RAID_GOOD; 690 arcmsr_report_sense_info(ccb); 691 arcmsr_ccb_complete(ccb, 1); 692 } 693 break; 694 695 default: 696 printk(KERN_NOTICE 697 "arcmsr%d: scsi id = %d lun = %d" 698 " isr get command error done, " 699 "but got unknown DeviceStatus = 0x%x \n" 700 , acb->host->host_no 701 , id 702 , lun 703 , ccb->arcmsr_cdb.DeviceStatus); 704 acb->devstate[id][lun] = ARECA_RAID_GONE; 705 ccb->pcmd->result = DID_NO_CONNECT << 16; 706 arcmsr_ccb_complete(ccb, 1); 707 break; 708 } 709 } 710 } 711 712 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb) 713 714 { 715 struct CommandControlBlock *ccb; 716 717 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5)); 718 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 719 if (ccb->startdone == ARCMSR_CCB_ABORTED) { 720 struct scsi_cmnd *abortcmd = ccb->pcmd; 721 if (abortcmd) { 722 abortcmd->result |= DID_ABORT << 16; 723 arcmsr_ccb_complete(ccb, 1); 724 printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \ 725 isr got aborted command \n", acb->host->host_no, ccb); 726 } 727 } 728 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ 729 done acb = '0x%p'" 730 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" 731 " ccboutstandingcount = %d \n" 732 , acb->host->host_no 733 , acb 734 , ccb 735 , ccb->acb 736 , ccb->startdone 737 , atomic_read(&acb->ccboutstandingcount)); 738 } 739 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 740 } 741 742 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 743 { 744 int i = 0; 745 uint32_t flag_ccb; 746 747 switch (acb->adapter_type) { 748 749 case ACB_ADAPTER_TYPE_A: { 750 struct MessageUnit_A __iomem *reg = \ 751 (struct MessageUnit_A *)acb->pmu; 752 uint32_t outbound_intstatus; 753 outbound_intstatus = readl(®->outbound_intstatus) & \ 754 acb->outbound_int_enable; 755 /*clear and abort all outbound posted Q*/ 756 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 757 while (((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) \ 758 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 759 arcmsr_drain_donequeue(acb, flag_ccb); 760 } 761 } 762 break; 763 764 case ACB_ADAPTER_TYPE_B: { 765 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 766 /*clear all outbound posted Q*/ 767 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 768 if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { 769 writel(0, ®->done_qbuffer[i]); 770 arcmsr_drain_donequeue(acb, flag_ccb); 771 } 772 writel(0, ®->post_qbuffer[i]); 773 } 774 reg->doneq_index = 0; 775 reg->postq_index = 0; 776 } 777 break; 778 } 779 } 780 static void arcmsr_remove(struct pci_dev *pdev) 781 { 782 struct Scsi_Host *host = pci_get_drvdata(pdev); 783 struct AdapterControlBlock *acb = 784 (struct AdapterControlBlock *) host->hostdata; 785 int poll_count = 0; 786 787 arcmsr_free_sysfs_attr(acb); 788 scsi_remove_host(host); 789 arcmsr_stop_adapter_bgrb(acb); 790 arcmsr_flush_adapter_cache(acb); 791 arcmsr_disable_outbound_ints(acb); 792 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 793 acb->acb_flags &= ~ACB_F_IOP_INITED; 794 795 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) { 796 if (!atomic_read(&acb->ccboutstandingcount)) 797 break; 798 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 799 msleep(25); 800 } 801 802 if (atomic_read(&acb->ccboutstandingcount)) { 803 int i; 804 805 arcmsr_abort_allcmd(acb); 806 arcmsr_done4abort_postqueue(acb); 807 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 808 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 809 if (ccb->startdone == ARCMSR_CCB_START) { 810 ccb->startdone = ARCMSR_CCB_ABORTED; 811 ccb->pcmd->result = DID_ABORT << 16; 812 arcmsr_ccb_complete(ccb, 1); 813 } 814 } 815 } 816 817 free_irq(pdev->irq, acb); 818 iounmap(acb->pmu); 819 arcmsr_free_ccb_pool(acb); 820 pci_release_regions(pdev); 821 822 scsi_host_put(host); 823 824 pci_disable_device(pdev); 825 pci_set_drvdata(pdev, NULL); 826 } 827 828 static void arcmsr_shutdown(struct pci_dev *pdev) 829 { 830 struct Scsi_Host *host = pci_get_drvdata(pdev); 831 struct AdapterControlBlock *acb = 832 (struct AdapterControlBlock *)host->hostdata; 833 834 arcmsr_stop_adapter_bgrb(acb); 835 arcmsr_flush_adapter_cache(acb); 836 } 837 838 static int arcmsr_module_init(void) 839 { 840 int error = 0; 841 842 error = pci_register_driver(&arcmsr_pci_driver); 843 return error; 844 } 845 846 static void arcmsr_module_exit(void) 847 { 848 pci_unregister_driver(&arcmsr_pci_driver); 849 } 850 module_init(arcmsr_module_init); 851 module_exit(arcmsr_module_exit); 852 853 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ 854 u32 intmask_org) 855 { 856 u32 mask; 857 858 switch (acb->adapter_type) { 859 860 case ACB_ADAPTER_TYPE_A : { 861 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 862 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 863 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 864 writel(mask, ®->outbound_intmask); 865 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 866 } 867 break; 868 869 case ACB_ADAPTER_TYPE_B : { 870 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 871 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ 872 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); 873 writel(mask, reg->iop2drv_doorbell_mask_reg); 874 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 875 } 876 } 877 } 878 879 static void arcmsr_build_ccb(struct AdapterControlBlock *acb, 880 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 881 { 882 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 883 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 884 uint32_t address_lo, address_hi; 885 int arccdbsize = 0x30; 886 int nseg; 887 888 ccb->pcmd = pcmd; 889 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 890 arcmsr_cdb->Bus = 0; 891 arcmsr_cdb->TargetID = pcmd->device->id; 892 arcmsr_cdb->LUN = pcmd->device->lun; 893 arcmsr_cdb->Function = 1; 894 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; 895 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb; 896 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 897 898 nseg = scsi_dma_map(pcmd); 899 BUG_ON(nseg < 0); 900 901 if (nseg) { 902 int length, i, cdb_sgcount = 0; 903 struct scatterlist *sg; 904 905 /* map stor port SG list to our iop SG List. */ 906 scsi_for_each_sg(pcmd, sg, nseg, i) { 907 /* Get the physical address of the current data pointer */ 908 length = cpu_to_le32(sg_dma_len(sg)); 909 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); 910 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); 911 if (address_hi == 0) { 912 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 913 914 pdma_sg->address = address_lo; 915 pdma_sg->length = length; 916 psge += sizeof (struct SG32ENTRY); 917 arccdbsize += sizeof (struct SG32ENTRY); 918 } else { 919 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; 920 921 pdma_sg->addresshigh = address_hi; 922 pdma_sg->address = address_lo; 923 pdma_sg->length = length|IS_SG64_ADDR; 924 psge += sizeof (struct SG64ENTRY); 925 arccdbsize += sizeof (struct SG64ENTRY); 926 } 927 cdb_sgcount++; 928 } 929 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount; 930 arcmsr_cdb->DataLength = scsi_bufflen(pcmd); 931 if ( arccdbsize > 256) 932 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 933 } 934 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { 935 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 936 ccb->ccb_flags |= CCB_FLAG_WRITE; 937 } 938 } 939 940 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 941 { 942 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr; 943 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 944 atomic_inc(&acb->ccboutstandingcount); 945 ccb->startdone = ARCMSR_CCB_START; 946 947 switch (acb->adapter_type) { 948 case ACB_ADAPTER_TYPE_A: { 949 struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; 950 951 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 952 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 953 ®->inbound_queueport); 954 else { 955 writel(cdb_shifted_phyaddr, ®->inbound_queueport); 956 } 957 } 958 break; 959 960 case ACB_ADAPTER_TYPE_B: { 961 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 962 uint32_t ending_index, index = reg->postq_index; 963 964 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 965 writel(0, ®->post_qbuffer[ending_index]); 966 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 967 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ 968 ®->post_qbuffer[index]); 969 } 970 else { 971 writel(cdb_shifted_phyaddr, ®->post_qbuffer[index]); 972 } 973 index++; 974 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 975 reg->postq_index = index; 976 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg); 977 } 978 break; 979 } 980 } 981 982 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) 983 { 984 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 985 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 986 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 987 988 if (arcmsr_hba_wait_msgint_ready(acb)) { 989 printk(KERN_NOTICE 990 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 991 , acb->host->host_no); 992 } 993 } 994 995 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) 996 { 997 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 998 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 999 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); 1000 1001 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1002 printk(KERN_NOTICE 1003 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1004 , acb->host->host_no); 1005 } 1006 } 1007 1008 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1009 { 1010 switch (acb->adapter_type) { 1011 case ACB_ADAPTER_TYPE_A: { 1012 arcmsr_stop_hba_bgrb(acb); 1013 } 1014 break; 1015 1016 case ACB_ADAPTER_TYPE_B: { 1017 arcmsr_stop_hbb_bgrb(acb); 1018 } 1019 break; 1020 } 1021 } 1022 1023 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) 1024 { 1025 dma_free_coherent(&acb->pdev->dev, 1026 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, 1027 acb->dma_coherent, 1028 acb->dma_coherent_handle); 1029 } 1030 1031 void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1032 { 1033 switch (acb->adapter_type) { 1034 case ACB_ADAPTER_TYPE_A: { 1035 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1036 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 1037 } 1038 break; 1039 1040 case ACB_ADAPTER_TYPE_B: { 1041 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1042 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); 1043 } 1044 break; 1045 } 1046 } 1047 1048 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 1049 { 1050 switch (acb->adapter_type) { 1051 case ACB_ADAPTER_TYPE_A: { 1052 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1053 /* 1054 ** push inbound doorbell tell iop, driver data write ok 1055 ** and wait reply on next hwinterrupt for next Qbuffer post 1056 */ 1057 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell); 1058 } 1059 break; 1060 1061 case ACB_ADAPTER_TYPE_B: { 1062 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1063 /* 1064 ** push inbound doorbell tell iop, driver data write ok 1065 ** and wait reply on next hwinterrupt for next Qbuffer post 1066 */ 1067 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg); 1068 } 1069 break; 1070 } 1071 } 1072 1073 struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) 1074 { 1075 static struct QBUFFER *qbuffer; 1076 1077 switch (acb->adapter_type) { 1078 1079 case ACB_ADAPTER_TYPE_A: { 1080 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1081 qbuffer = (struct QBUFFER __iomem *) ®->message_rbuffer; 1082 } 1083 break; 1084 1085 case ACB_ADAPTER_TYPE_B: { 1086 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1087 qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg; 1088 } 1089 break; 1090 } 1091 return qbuffer; 1092 } 1093 1094 static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) 1095 { 1096 static struct QBUFFER *pqbuffer; 1097 1098 switch (acb->adapter_type) { 1099 1100 case ACB_ADAPTER_TYPE_A: { 1101 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1102 pqbuffer = (struct QBUFFER *) ®->message_wbuffer; 1103 } 1104 break; 1105 1106 case ACB_ADAPTER_TYPE_B: { 1107 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1108 pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; 1109 } 1110 break; 1111 } 1112 return pqbuffer; 1113 } 1114 1115 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 1116 { 1117 struct QBUFFER *prbuffer; 1118 struct QBUFFER *pQbuffer; 1119 uint8_t *iop_data; 1120 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 1121 1122 rqbuf_lastindex = acb->rqbuf_lastindex; 1123 rqbuf_firstindex = acb->rqbuf_firstindex; 1124 prbuffer = arcmsr_get_iop_rqbuffer(acb); 1125 iop_data = (uint8_t *)prbuffer->data; 1126 iop_len = prbuffer->data_len; 1127 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); 1128 1129 if (my_empty_len >= iop_len) 1130 { 1131 while (iop_len > 0) { 1132 pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex]; 1133 memcpy(pQbuffer, iop_data,1); 1134 rqbuf_lastindex++; 1135 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1136 iop_data++; 1137 iop_len--; 1138 } 1139 acb->rqbuf_lastindex = rqbuf_lastindex; 1140 arcmsr_iop_message_read(acb); 1141 } 1142 1143 else { 1144 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 1145 } 1146 } 1147 1148 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 1149 { 1150 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 1151 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { 1152 uint8_t *pQbuffer; 1153 struct QBUFFER *pwbuffer; 1154 uint8_t *iop_data; 1155 int32_t allxfer_len = 0; 1156 1157 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 1158 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1159 iop_data = (uint8_t __iomem *)pwbuffer->data; 1160 1161 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \ 1162 (allxfer_len < 124)) { 1163 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; 1164 memcpy(iop_data, pQbuffer, 1); 1165 acb->wqbuf_firstindex++; 1166 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1167 iop_data++; 1168 allxfer_len++; 1169 } 1170 pwbuffer->data_len = allxfer_len; 1171 1172 arcmsr_iop_message_wrote(acb); 1173 } 1174 1175 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) { 1176 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 1177 } 1178 } 1179 1180 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) 1181 { 1182 uint32_t outbound_doorbell; 1183 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1184 1185 outbound_doorbell = readl(®->outbound_doorbell); 1186 writel(outbound_doorbell, ®->outbound_doorbell); 1187 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1188 arcmsr_iop2drv_data_wrote_handle(acb); 1189 } 1190 1191 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1192 arcmsr_iop2drv_data_read_handle(acb); 1193 } 1194 } 1195 1196 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1197 { 1198 uint32_t flag_ccb; 1199 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1200 1201 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { 1202 arcmsr_drain_donequeue(acb, flag_ccb); 1203 } 1204 } 1205 1206 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1207 { 1208 uint32_t index; 1209 uint32_t flag_ccb; 1210 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1211 1212 index = reg->doneq_index; 1213 1214 while ((flag_ccb = readl(®->done_qbuffer[index])) != 0) { 1215 writel(0, ®->done_qbuffer[index]); 1216 arcmsr_drain_donequeue(acb, flag_ccb); 1217 index++; 1218 index %= ARCMSR_MAX_HBB_POSTQUEUE; 1219 reg->doneq_index = index; 1220 } 1221 } 1222 1223 static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) 1224 { 1225 uint32_t outbound_intstatus; 1226 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1227 1228 outbound_intstatus = readl(®->outbound_intstatus) & \ 1229 acb->outbound_int_enable; 1230 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { 1231 return 1; 1232 } 1233 writel(outbound_intstatus, ®->outbound_intstatus); 1234 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 1235 arcmsr_hba_doorbell_isr(acb); 1236 } 1237 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1238 arcmsr_hba_postqueue_isr(acb); 1239 } 1240 return 0; 1241 } 1242 1243 static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) 1244 { 1245 uint32_t outbound_doorbell; 1246 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1247 1248 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ 1249 acb->outbound_int_enable; 1250 if (!outbound_doorbell) 1251 return 1; 1252 1253 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1254 1255 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1256 arcmsr_iop2drv_data_wrote_handle(acb); 1257 } 1258 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 1259 arcmsr_iop2drv_data_read_handle(acb); 1260 } 1261 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1262 arcmsr_hbb_postqueue_isr(acb); 1263 } 1264 1265 return 0; 1266 } 1267 1268 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 1269 { 1270 switch (acb->adapter_type) { 1271 case ACB_ADAPTER_TYPE_A: { 1272 if (arcmsr_handle_hba_isr(acb)) { 1273 return IRQ_NONE; 1274 } 1275 } 1276 break; 1277 1278 case ACB_ADAPTER_TYPE_B: { 1279 if (arcmsr_handle_hbb_isr(acb)) { 1280 return IRQ_NONE; 1281 } 1282 } 1283 break; 1284 } 1285 return IRQ_HANDLED; 1286 } 1287 1288 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 1289 { 1290 if (acb) { 1291 /* stop adapter background rebuild */ 1292 if (acb->acb_flags & ACB_F_MSG_START_BGRB) { 1293 uint32_t intmask_org; 1294 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1295 intmask_org = arcmsr_disable_outbound_ints(acb); 1296 arcmsr_stop_adapter_bgrb(acb); 1297 arcmsr_flush_adapter_cache(acb); 1298 arcmsr_enable_outbound_ints(acb, intmask_org); 1299 } 1300 } 1301 } 1302 1303 void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) 1304 { 1305 int32_t wqbuf_firstindex, wqbuf_lastindex; 1306 uint8_t *pQbuffer; 1307 struct QBUFFER *pwbuffer; 1308 uint8_t *iop_data; 1309 int32_t allxfer_len = 0; 1310 1311 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1312 iop_data = (uint8_t __iomem *)pwbuffer->data; 1313 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 1314 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 1315 wqbuf_firstindex = acb->wqbuf_firstindex; 1316 wqbuf_lastindex = acb->wqbuf_lastindex; 1317 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) { 1318 pQbuffer = &acb->wqbuffer[wqbuf_firstindex]; 1319 memcpy(iop_data, pQbuffer, 1); 1320 wqbuf_firstindex++; 1321 wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1322 iop_data++; 1323 allxfer_len++; 1324 } 1325 acb->wqbuf_firstindex = wqbuf_firstindex; 1326 pwbuffer->data_len = allxfer_len; 1327 arcmsr_iop_message_wrote(acb); 1328 } 1329 } 1330 1331 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ 1332 struct scsi_cmnd *cmd) 1333 { 1334 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 1335 int retvalue = 0, transfer_len = 0; 1336 char *buffer; 1337 struct scatterlist *sg; 1338 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | 1339 (uint32_t ) cmd->cmnd[6] << 16 | 1340 (uint32_t ) cmd->cmnd[7] << 8 | 1341 (uint32_t ) cmd->cmnd[8]; 1342 /* 4 bytes: Areca io control code */ 1343 1344 sg = scsi_sglist(cmd); 1345 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1346 if (scsi_sg_count(cmd) > 1) { 1347 retvalue = ARCMSR_MESSAGE_FAIL; 1348 goto message_out; 1349 } 1350 transfer_len += sg->length; 1351 1352 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1353 retvalue = ARCMSR_MESSAGE_FAIL; 1354 goto message_out; 1355 } 1356 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1357 switch(controlcode) { 1358 1359 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1360 unsigned long *ver_addr; 1361 dma_addr_t buf_handle; 1362 uint8_t *pQbuffer, *ptmpQbuffer; 1363 int32_t allxfer_len = 0; 1364 1365 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1366 if (!ver_addr) { 1367 retvalue = ARCMSR_MESSAGE_FAIL; 1368 goto message_out; 1369 } 1370 ptmpQbuffer = (uint8_t *) ver_addr; 1371 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1372 && (allxfer_len < 1031)) { 1373 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1374 memcpy(ptmpQbuffer, pQbuffer, 1); 1375 acb->rqbuf_firstindex++; 1376 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1377 ptmpQbuffer++; 1378 allxfer_len++; 1379 } 1380 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1381 1382 struct QBUFFER *prbuffer; 1383 uint8_t *iop_data; 1384 int32_t iop_len; 1385 1386 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1387 prbuffer = arcmsr_get_iop_rqbuffer(acb); 1388 iop_data = (uint8_t *)prbuffer->data; 1389 iop_len = readl(&prbuffer->data_len); 1390 while (iop_len > 0) { 1391 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); 1392 acb->rqbuf_lastindex++; 1393 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1394 iop_data++; 1395 iop_len--; 1396 } 1397 arcmsr_iop_message_read(acb); 1398 } 1399 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1400 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1401 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1402 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1403 } 1404 break; 1405 1406 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1407 unsigned long *ver_addr; 1408 dma_addr_t buf_handle; 1409 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1410 uint8_t *pQbuffer, *ptmpuserbuffer; 1411 1412 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1413 if (!ver_addr) { 1414 retvalue = ARCMSR_MESSAGE_FAIL; 1415 goto message_out; 1416 } 1417 ptmpuserbuffer = (uint8_t *)ver_addr; 1418 user_len = pcmdmessagefld->cmdmessage.Length; 1419 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1420 wqbuf_lastindex = acb->wqbuf_lastindex; 1421 wqbuf_firstindex = acb->wqbuf_firstindex; 1422 if (wqbuf_lastindex != wqbuf_firstindex) { 1423 struct SENSE_DATA *sensebuffer = 1424 (struct SENSE_DATA *)cmd->sense_buffer; 1425 arcmsr_post_ioctldata2iop(acb); 1426 /* has error report sensedata */ 1427 sensebuffer->ErrorCode = 0x70; 1428 sensebuffer->SenseKey = ILLEGAL_REQUEST; 1429 sensebuffer->AdditionalSenseLength = 0x0A; 1430 sensebuffer->AdditionalSenseCode = 0x20; 1431 sensebuffer->Valid = 1; 1432 retvalue = ARCMSR_MESSAGE_FAIL; 1433 } else { 1434 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 1435 &(ARCMSR_MAX_QBUFFER - 1); 1436 if (my_empty_len >= user_len) { 1437 while (user_len > 0) { 1438 pQbuffer = 1439 &acb->wqbuffer[acb->wqbuf_lastindex]; 1440 memcpy(pQbuffer, ptmpuserbuffer, 1); 1441 acb->wqbuf_lastindex++; 1442 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1443 ptmpuserbuffer++; 1444 user_len--; 1445 } 1446 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1447 acb->acb_flags &= 1448 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1449 arcmsr_post_ioctldata2iop(acb); 1450 } 1451 } else { 1452 /* has error report sensedata */ 1453 struct SENSE_DATA *sensebuffer = 1454 (struct SENSE_DATA *)cmd->sense_buffer; 1455 sensebuffer->ErrorCode = 0x70; 1456 sensebuffer->SenseKey = ILLEGAL_REQUEST; 1457 sensebuffer->AdditionalSenseLength = 0x0A; 1458 sensebuffer->AdditionalSenseCode = 0x20; 1459 sensebuffer->Valid = 1; 1460 retvalue = ARCMSR_MESSAGE_FAIL; 1461 } 1462 } 1463 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1464 } 1465 break; 1466 1467 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1468 uint8_t *pQbuffer = acb->rqbuffer; 1469 1470 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1471 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1472 arcmsr_iop_message_read(acb); 1473 } 1474 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1475 acb->rqbuf_firstindex = 0; 1476 acb->rqbuf_lastindex = 0; 1477 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1478 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1479 } 1480 break; 1481 1482 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1483 uint8_t *pQbuffer = acb->wqbuffer; 1484 1485 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1486 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1487 arcmsr_iop_message_read(acb); 1488 } 1489 acb->acb_flags |= 1490 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1491 ACB_F_MESSAGE_WQBUFFER_READED); 1492 acb->wqbuf_firstindex = 0; 1493 acb->wqbuf_lastindex = 0; 1494 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1495 pcmdmessagefld->cmdmessage.ReturnCode = 1496 ARCMSR_MESSAGE_RETURNCODE_OK; 1497 } 1498 break; 1499 1500 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1501 uint8_t *pQbuffer; 1502 1503 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1504 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1505 arcmsr_iop_message_read(acb); 1506 } 1507 acb->acb_flags |= 1508 (ACB_F_MESSAGE_WQBUFFER_CLEARED 1509 | ACB_F_MESSAGE_RQBUFFER_CLEARED 1510 | ACB_F_MESSAGE_WQBUFFER_READED); 1511 acb->rqbuf_firstindex = 0; 1512 acb->rqbuf_lastindex = 0; 1513 acb->wqbuf_firstindex = 0; 1514 acb->wqbuf_lastindex = 0; 1515 pQbuffer = acb->rqbuffer; 1516 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1517 pQbuffer = acb->wqbuffer; 1518 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1519 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1520 } 1521 break; 1522 1523 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 1524 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1525 } 1526 break; 1527 1528 case ARCMSR_MESSAGE_SAY_HELLO: { 1529 int8_t *hello_string = "Hello! I am ARCMSR"; 1530 1531 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1532 , (int16_t)strlen(hello_string)); 1533 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1534 } 1535 break; 1536 1537 case ARCMSR_MESSAGE_SAY_GOODBYE: 1538 arcmsr_iop_parking(acb); 1539 break; 1540 1541 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1542 arcmsr_flush_adapter_cache(acb); 1543 break; 1544 1545 default: 1546 retvalue = ARCMSR_MESSAGE_FAIL; 1547 } 1548 message_out: 1549 sg = scsi_sglist(cmd); 1550 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1551 return retvalue; 1552 } 1553 1554 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) 1555 { 1556 struct list_head *head = &acb->ccb_free_list; 1557 struct CommandControlBlock *ccb = NULL; 1558 1559 if (!list_empty(head)) { 1560 ccb = list_entry(head->next, struct CommandControlBlock, list); 1561 list_del(head->next); 1562 } 1563 return ccb; 1564 } 1565 1566 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 1567 struct scsi_cmnd *cmd) 1568 { 1569 switch (cmd->cmnd[0]) { 1570 case INQUIRY: { 1571 unsigned char inqdata[36]; 1572 char *buffer; 1573 struct scatterlist *sg; 1574 1575 if (cmd->device->lun) { 1576 cmd->result = (DID_TIME_OUT << 16); 1577 cmd->scsi_done(cmd); 1578 return; 1579 } 1580 inqdata[0] = TYPE_PROCESSOR; 1581 /* Periph Qualifier & Periph Dev Type */ 1582 inqdata[1] = 0; 1583 /* rem media bit & Dev Type Modifier */ 1584 inqdata[2] = 0; 1585 /* ISO, ECMA, & ANSI versions */ 1586 inqdata[4] = 31; 1587 /* length of additional data */ 1588 strncpy(&inqdata[8], "Areca ", 8); 1589 /* Vendor Identification */ 1590 strncpy(&inqdata[16], "RAID controller ", 16); 1591 /* Product Identification */ 1592 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1593 1594 sg = scsi_sglist(cmd); 1595 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1596 1597 memcpy(buffer, inqdata, sizeof(inqdata)); 1598 sg = scsi_sglist(cmd); 1599 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1600 1601 cmd->scsi_done(cmd); 1602 } 1603 break; 1604 case WRITE_BUFFER: 1605 case READ_BUFFER: { 1606 if (arcmsr_iop_message_xfer(acb, cmd)) 1607 cmd->result = (DID_ERROR << 16); 1608 cmd->scsi_done(cmd); 1609 } 1610 break; 1611 default: 1612 cmd->scsi_done(cmd); 1613 } 1614 } 1615 1616 static int arcmsr_queue_command(struct scsi_cmnd *cmd, 1617 void (* done)(struct scsi_cmnd *)) 1618 { 1619 struct Scsi_Host *host = cmd->device->host; 1620 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 1621 struct CommandControlBlock *ccb; 1622 int target = cmd->device->id; 1623 int lun = cmd->device->lun; 1624 1625 cmd->scsi_done = done; 1626 cmd->host_scribble = NULL; 1627 cmd->result = 0; 1628 if (acb->acb_flags & ACB_F_BUS_RESET) { 1629 printk(KERN_NOTICE "arcmsr%d: bus reset" 1630 " and return busy \n" 1631 , acb->host->host_no); 1632 return SCSI_MLQUEUE_HOST_BUSY; 1633 } 1634 if (target == 16) { 1635 /* virtual device for iop message transfer */ 1636 arcmsr_handle_virtual_command(acb, cmd); 1637 return 0; 1638 } 1639 if (acb->devstate[target][lun] == ARECA_RAID_GONE) { 1640 uint8_t block_cmd; 1641 1642 block_cmd = cmd->cmnd[0] & 0x0f; 1643 if (block_cmd == 0x08 || block_cmd == 0x0a) { 1644 printk(KERN_NOTICE 1645 "arcmsr%d: block 'read/write'" 1646 "command with gone raid volume" 1647 " Cmd = %2x, TargetId = %d, Lun = %d \n" 1648 , acb->host->host_no 1649 , cmd->cmnd[0] 1650 , target, lun); 1651 cmd->result = (DID_NO_CONNECT << 16); 1652 cmd->scsi_done(cmd); 1653 return 0; 1654 } 1655 } 1656 if (atomic_read(&acb->ccboutstandingcount) >= 1657 ARCMSR_MAX_OUTSTANDING_CMD) 1658 return SCSI_MLQUEUE_HOST_BUSY; 1659 1660 ccb = arcmsr_get_freeccb(acb); 1661 if (!ccb) 1662 return SCSI_MLQUEUE_HOST_BUSY; 1663 1664 arcmsr_build_ccb(acb, ccb, cmd); 1665 arcmsr_post_ccb(acb, ccb); 1666 return 0; 1667 } 1668 1669 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 1670 { 1671 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1672 char *acb_firm_model = acb->firm_model; 1673 char *acb_firm_version = acb->firm_version; 1674 char *iop_firm_model = (char *) (®->message_rwbuffer[15]); 1675 char *iop_firm_version = (char *) (®->message_rwbuffer[17]); 1676 int count; 1677 1678 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 1679 if (arcmsr_hba_wait_msgint_ready(acb)) { 1680 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1681 miscellaneous data' timeout \n", acb->host->host_no); 1682 } 1683 1684 count = 8; 1685 while (count) { 1686 *acb_firm_model = readb(iop_firm_model); 1687 acb_firm_model++; 1688 iop_firm_model++; 1689 count--; 1690 } 1691 1692 count = 16; 1693 while (count) { 1694 *acb_firm_version = readb(iop_firm_version); 1695 acb_firm_version++; 1696 iop_firm_version++; 1697 count--; 1698 } 1699 1700 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" 1701 , acb->host->host_no 1702 , acb->firm_version); 1703 1704 acb->firm_request_len = readl(®->message_rwbuffer[1]); 1705 acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); 1706 acb->firm_sdram_size = readl(®->message_rwbuffer[3]); 1707 acb->firm_hd_channels = readl(®->message_rwbuffer[4]); 1708 } 1709 1710 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 1711 { 1712 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1713 uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg; 1714 char *acb_firm_model = acb->firm_model; 1715 char *acb_firm_version = acb->firm_version; 1716 char *iop_firm_model = (char *) (&lrwbuffer[15]); 1717 /*firm_model,15,60-67*/ 1718 char *iop_firm_version = (char *) (&lrwbuffer[17]); 1719 /*firm_version,17,68-83*/ 1720 int count; 1721 1722 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); 1723 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1724 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1725 miscellaneous data' timeout \n", acb->host->host_no); 1726 } 1727 1728 count = 8; 1729 while (count) 1730 { 1731 *acb_firm_model = readb(iop_firm_model); 1732 acb_firm_model++; 1733 iop_firm_model++; 1734 count--; 1735 } 1736 1737 count = 16; 1738 while (count) 1739 { 1740 *acb_firm_version = readb(iop_firm_version); 1741 acb_firm_version++; 1742 iop_firm_version++; 1743 count--; 1744 } 1745 1746 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", 1747 acb->host->host_no, 1748 acb->firm_version); 1749 1750 lrwbuffer++; 1751 acb->firm_request_len = readl(lrwbuffer++); 1752 /*firm_request_len,1,04-07*/ 1753 acb->firm_numbers_queue = readl(lrwbuffer++); 1754 /*firm_numbers_queue,2,08-11*/ 1755 acb->firm_sdram_size = readl(lrwbuffer++); 1756 /*firm_sdram_size,3,12-15*/ 1757 acb->firm_hd_channels = readl(lrwbuffer); 1758 /*firm_ide_channels,4,16-19*/ 1759 } 1760 1761 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 1762 { 1763 switch (acb->adapter_type) { 1764 case ACB_ADAPTER_TYPE_A: { 1765 arcmsr_get_hba_config(acb); 1766 } 1767 break; 1768 1769 case ACB_ADAPTER_TYPE_B: { 1770 arcmsr_get_hbb_config(acb); 1771 } 1772 break; 1773 } 1774 } 1775 1776 static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, 1777 struct CommandControlBlock *poll_ccb) 1778 { 1779 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1780 struct CommandControlBlock *ccb; 1781 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; 1782 1783 polling_hba_ccb_retry: 1784 poll_count++; 1785 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; 1786 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 1787 while (1) { 1788 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { 1789 if (poll_ccb_done) 1790 break; 1791 else { 1792 msleep(25); 1793 if (poll_count > 100) 1794 break; 1795 goto polling_hba_ccb_retry; 1796 } 1797 } 1798 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5)); 1799 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 1800 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 1801 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 1802 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 1803 " poll command abort successfully \n" 1804 , acb->host->host_no 1805 , ccb->pcmd->device->id 1806 , ccb->pcmd->device->lun 1807 , ccb); 1808 ccb->pcmd->result = DID_ABORT << 16; 1809 arcmsr_ccb_complete(ccb, 1); 1810 poll_ccb_done = 1; 1811 continue; 1812 } 1813 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 1814 " command done ccb = '0x%p'" 1815 "ccboutstandingcount = %d \n" 1816 , acb->host->host_no 1817 , ccb 1818 , atomic_read(&acb->ccboutstandingcount)); 1819 continue; 1820 } 1821 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 1822 } 1823 } 1824 1825 static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ 1826 struct CommandControlBlock *poll_ccb) 1827 { 1828 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1829 struct CommandControlBlock *ccb; 1830 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; 1831 int index; 1832 1833 polling_hbb_ccb_retry: 1834 poll_count++; 1835 /* clear doorbell interrupt */ 1836 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); 1837 while (1) { 1838 index = reg->doneq_index; 1839 if ((flag_ccb = readl(®->done_qbuffer[index])) == 0) { 1840 if (poll_ccb_done) 1841 break; 1842 else { 1843 msleep(25); 1844 if (poll_count > 100) 1845 break; 1846 goto polling_hbb_ccb_retry; 1847 } 1848 } 1849 writel(0, ®->done_qbuffer[index]); 1850 index++; 1851 /*if last index number set it to 0 */ 1852 index %= ARCMSR_MAX_HBB_POSTQUEUE; 1853 reg->doneq_index = index; 1854 /* check ifcommand done with no error*/ 1855 ccb = (struct CommandControlBlock *)\ 1856 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1857 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 1858 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 1859 if (ccb->startdone == ARCMSR_CCB_ABORTED) { 1860 printk(KERN_NOTICE "arcmsr%d: \ 1861 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n" 1862 ,acb->host->host_no 1863 ,ccb->pcmd->device->id 1864 ,ccb->pcmd->device->lun 1865 ,ccb); 1866 ccb->pcmd->result = DID_ABORT << 16; 1867 arcmsr_ccb_complete(ccb, 1); 1868 continue; 1869 } 1870 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 1871 " command done ccb = '0x%p'" 1872 "ccboutstandingcount = %d \n" 1873 , acb->host->host_no 1874 , ccb 1875 , atomic_read(&acb->ccboutstandingcount)); 1876 continue; 1877 } 1878 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 1879 } /*drain reply FIFO*/ 1880 } 1881 1882 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \ 1883 struct CommandControlBlock *poll_ccb) 1884 { 1885 switch (acb->adapter_type) { 1886 1887 case ACB_ADAPTER_TYPE_A: { 1888 arcmsr_polling_hba_ccbdone(acb,poll_ccb); 1889 } 1890 break; 1891 1892 case ACB_ADAPTER_TYPE_B: { 1893 arcmsr_polling_hbb_ccbdone(acb,poll_ccb); 1894 } 1895 } 1896 } 1897 1898 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 1899 { 1900 uint32_t cdb_phyaddr, ccb_phyaddr_hi32; 1901 dma_addr_t dma_coherent_handle; 1902 /* 1903 ******************************************************************** 1904 ** here we need to tell iop 331 our freeccb.HighPart 1905 ** if freeccb.HighPart is not zero 1906 ******************************************************************** 1907 */ 1908 dma_coherent_handle = acb->dma_coherent_handle; 1909 cdb_phyaddr = (uint32_t)(dma_coherent_handle); 1910 ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16); 1911 /* 1912 *********************************************************************** 1913 ** if adapter type B, set window of "post command Q" 1914 *********************************************************************** 1915 */ 1916 switch (acb->adapter_type) { 1917 1918 case ACB_ADAPTER_TYPE_A: { 1919 if (ccb_phyaddr_hi32 != 0) { 1920 struct MessageUnit_A __iomem *reg = \ 1921 (struct MessageUnit_A *)acb->pmu; 1922 uint32_t intmask_org; 1923 intmask_org = arcmsr_disable_outbound_ints(acb); 1924 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 1925 ®->message_rwbuffer[0]); 1926 writel(ccb_phyaddr_hi32, ®->message_rwbuffer[1]); 1927 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 1928 ®->inbound_msgaddr0); 1929 if (arcmsr_hba_wait_msgint_ready(acb)) { 1930 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 1931 part physical address timeout\n", 1932 acb->host->host_no); 1933 return 1; 1934 } 1935 arcmsr_enable_outbound_ints(acb, intmask_org); 1936 } 1937 } 1938 break; 1939 1940 case ACB_ADAPTER_TYPE_B: { 1941 unsigned long post_queue_phyaddr; 1942 uint32_t *rwbuffer; 1943 1944 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 1945 uint32_t intmask_org; 1946 intmask_org = arcmsr_disable_outbound_ints(acb); 1947 reg->postq_index = 0; 1948 reg->doneq_index = 0; 1949 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg); 1950 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1951 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \ 1952 acb->host->host_no); 1953 return 1; 1954 } 1955 post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \ 1956 sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ; 1957 rwbuffer = reg->msgcode_rwbuffer_reg; 1958 /* driver "set config" signature */ 1959 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 1960 /* normal should be zero */ 1961 writel(ccb_phyaddr_hi32, rwbuffer++); 1962 /* postQ size (256 + 8)*4 */ 1963 writel(post_queue_phyaddr, rwbuffer++); 1964 /* doneQ size (256 + 8)*4 */ 1965 writel(post_queue_phyaddr + 1056, rwbuffer++); 1966 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 1967 writel(1056, rwbuffer); 1968 1969 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg); 1970 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1971 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 1972 timeout \n",acb->host->host_no); 1973 return 1; 1974 } 1975 1976 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg); 1977 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1978 printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\ 1979 ,acb->host->host_no); 1980 return 1; 1981 } 1982 arcmsr_enable_outbound_ints(acb, intmask_org); 1983 } 1984 break; 1985 } 1986 return 0; 1987 } 1988 1989 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) 1990 { 1991 uint32_t firmware_state = 0; 1992 1993 switch (acb->adapter_type) { 1994 1995 case ACB_ADAPTER_TYPE_A: { 1996 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 1997 do { 1998 firmware_state = readl(®->outbound_msgaddr1); 1999 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); 2000 } 2001 break; 2002 2003 case ACB_ADAPTER_TYPE_B: { 2004 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 2005 do { 2006 firmware_state = readl(reg->iop2drv_doorbell_reg); 2007 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 2008 } 2009 break; 2010 } 2011 } 2012 2013 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 2014 { 2015 struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; 2016 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2017 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); 2018 if (arcmsr_hba_wait_msgint_ready(acb)) { 2019 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 2020 rebulid' timeout \n", acb->host->host_no); 2021 } 2022 } 2023 2024 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) 2025 { 2026 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 2027 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2028 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); 2029 if (arcmsr_hbb_wait_msgint_ready(acb)) { 2030 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 2031 rebulid' timeout \n",acb->host->host_no); 2032 } 2033 } 2034 2035 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 2036 { 2037 switch (acb->adapter_type) { 2038 case ACB_ADAPTER_TYPE_A: 2039 arcmsr_start_hba_bgrb(acb); 2040 break; 2041 case ACB_ADAPTER_TYPE_B: 2042 arcmsr_start_hbb_bgrb(acb); 2043 break; 2044 } 2045 } 2046 2047 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) 2048 { 2049 switch (acb->adapter_type) { 2050 case ACB_ADAPTER_TYPE_A: { 2051 struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; 2052 uint32_t outbound_doorbell; 2053 /* empty doorbell Qbuffer if door bell ringed */ 2054 outbound_doorbell = readl(®->outbound_doorbell); 2055 /*clear doorbell interrupt */ 2056 writel(outbound_doorbell, ®->outbound_doorbell); 2057 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 2058 } 2059 break; 2060 2061 case ACB_ADAPTER_TYPE_B: { 2062 struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; 2063 /*clear interrupt and message state*/ 2064 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); 2065 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); 2066 /* let IOP know data has been read */ 2067 } 2068 break; 2069 } 2070 } 2071 2072 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2073 { 2074 uint32_t intmask_org; 2075 2076 arcmsr_wait_firmware_ready(acb); 2077 arcmsr_iop_confirm(acb); 2078 /* disable all outbound interrupt */ 2079 intmask_org = arcmsr_disable_outbound_ints(acb); 2080 arcmsr_get_firmware_spec(acb); 2081 /*start background rebuild*/ 2082 arcmsr_start_adapter_bgrb(acb); 2083 /* empty doorbell Qbuffer if door bell ringed */ 2084 arcmsr_clear_doorbell_queue_buffer(acb); 2085 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2086 arcmsr_enable_outbound_ints(acb, intmask_org); 2087 acb->acb_flags |= ACB_F_IOP_INITED; 2088 } 2089 2090 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 2091 { 2092 struct CommandControlBlock *ccb; 2093 uint32_t intmask_org; 2094 int i = 0; 2095 2096 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2097 /* talk to iop 331 outstanding command aborted */ 2098 arcmsr_abort_allcmd(acb); 2099 2100 /* wait for 3 sec for all command aborted*/ 2101 ssleep(3); 2102 2103 /* disable all outbound interrupt */ 2104 intmask_org = arcmsr_disable_outbound_ints(acb); 2105 /* clear all outbound posted Q */ 2106 arcmsr_done4abort_postqueue(acb); 2107 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2108 ccb = acb->pccb_pool[i]; 2109 if (ccb->startdone == ARCMSR_CCB_START) { 2110 ccb->startdone = ARCMSR_CCB_ABORTED; 2111 arcmsr_ccb_complete(ccb, 1); 2112 } 2113 } 2114 /* enable all outbound interrupt */ 2115 arcmsr_enable_outbound_ints(acb, intmask_org); 2116 } 2117 } 2118 2119 static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2120 { 2121 struct AdapterControlBlock *acb = 2122 (struct AdapterControlBlock *)cmd->device->host->hostdata; 2123 int i; 2124 2125 acb->num_resets++; 2126 acb->acb_flags |= ACB_F_BUS_RESET; 2127 for (i = 0; i < 400; i++) { 2128 if (!atomic_read(&acb->ccboutstandingcount)) 2129 break; 2130 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 2131 msleep(25); 2132 } 2133 arcmsr_iop_reset(acb); 2134 acb->acb_flags &= ~ACB_F_BUS_RESET; 2135 return SUCCESS; 2136 } 2137 2138 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, 2139 struct CommandControlBlock *ccb) 2140 { 2141 u32 intmask; 2142 2143 ccb->startdone = ARCMSR_CCB_ABORTED; 2144 2145 /* 2146 ** Wait for 3 sec for all command done. 2147 */ 2148 ssleep(3); 2149 2150 intmask = arcmsr_disable_outbound_ints(acb); 2151 arcmsr_polling_ccbdone(acb, ccb); 2152 arcmsr_enable_outbound_ints(acb, intmask); 2153 } 2154 2155 static int arcmsr_abort(struct scsi_cmnd *cmd) 2156 { 2157 struct AdapterControlBlock *acb = 2158 (struct AdapterControlBlock *)cmd->device->host->hostdata; 2159 int i = 0; 2160 2161 printk(KERN_NOTICE 2162 "arcmsr%d: abort device command of scsi id = %d lun = %d \n", 2163 acb->host->host_no, cmd->device->id, cmd->device->lun); 2164 acb->num_aborts++; 2165 /* 2166 ************************************************ 2167 ** the all interrupt service routine is locked 2168 ** we need to handle it as soon as possible and exit 2169 ************************************************ 2170 */ 2171 if (!atomic_read(&acb->ccboutstandingcount)) 2172 return SUCCESS; 2173 2174 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2175 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 2176 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 2177 arcmsr_abort_one_cmd(acb, ccb); 2178 break; 2179 } 2180 } 2181 2182 return SUCCESS; 2183 } 2184 2185 static const char *arcmsr_info(struct Scsi_Host *host) 2186 { 2187 struct AdapterControlBlock *acb = 2188 (struct AdapterControlBlock *) host->hostdata; 2189 static char buf[256]; 2190 char *type; 2191 int raid6 = 1; 2192 2193 switch (acb->pdev->device) { 2194 case PCI_DEVICE_ID_ARECA_1110: 2195 case PCI_DEVICE_ID_ARECA_1200: 2196 case PCI_DEVICE_ID_ARECA_1202: 2197 case PCI_DEVICE_ID_ARECA_1210: 2198 raid6 = 0; 2199 /*FALLTHRU*/ 2200 case PCI_DEVICE_ID_ARECA_1120: 2201 case PCI_DEVICE_ID_ARECA_1130: 2202 case PCI_DEVICE_ID_ARECA_1160: 2203 case PCI_DEVICE_ID_ARECA_1170: 2204 case PCI_DEVICE_ID_ARECA_1201: 2205 case PCI_DEVICE_ID_ARECA_1220: 2206 case PCI_DEVICE_ID_ARECA_1230: 2207 case PCI_DEVICE_ID_ARECA_1260: 2208 case PCI_DEVICE_ID_ARECA_1270: 2209 case PCI_DEVICE_ID_ARECA_1280: 2210 type = "SATA"; 2211 break; 2212 case PCI_DEVICE_ID_ARECA_1380: 2213 case PCI_DEVICE_ID_ARECA_1381: 2214 case PCI_DEVICE_ID_ARECA_1680: 2215 case PCI_DEVICE_ID_ARECA_1681: 2216 type = "SAS"; 2217 break; 2218 default: 2219 type = "X-TYPE"; 2220 break; 2221 } 2222 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", 2223 type, raid6 ? "( RAID6 capable)" : "", 2224 ARCMSR_DRIVER_VERSION); 2225 return buf; 2226 } 2227 #ifdef CONFIG_SCSI_ARCMSR_AER 2228 static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev) 2229 { 2230 struct Scsi_Host *host = pci_get_drvdata(pdev); 2231 struct AdapterControlBlock *acb = 2232 (struct AdapterControlBlock *) host->hostdata; 2233 uint32_t intmask_org; 2234 int i, j; 2235 2236 if (pci_enable_device(pdev)) { 2237 return PCI_ERS_RESULT_DISCONNECT; 2238 } 2239 pci_set_master(pdev); 2240 intmask_org = arcmsr_disable_outbound_ints(acb); 2241 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2242 ACB_F_MESSAGE_RQBUFFER_CLEARED | 2243 ACB_F_MESSAGE_WQBUFFER_READED); 2244 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 2245 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 2246 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 2247 acb->devstate[i][j] = ARECA_RAID_GONE; 2248 2249 arcmsr_wait_firmware_ready(acb); 2250 arcmsr_iop_confirm(acb); 2251 /* disable all outbound interrupt */ 2252 arcmsr_get_firmware_spec(acb); 2253 /*start background rebuild*/ 2254 arcmsr_start_adapter_bgrb(acb); 2255 /* empty doorbell Qbuffer if door bell ringed */ 2256 arcmsr_clear_doorbell_queue_buffer(acb); 2257 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2258 arcmsr_enable_outbound_ints(acb, intmask_org); 2259 acb->acb_flags |= ACB_F_IOP_INITED; 2260 2261 pci_enable_pcie_error_reporting(pdev); 2262 return PCI_ERS_RESULT_RECOVERED; 2263 } 2264 2265 static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev) 2266 { 2267 struct Scsi_Host *host = pci_get_drvdata(pdev); 2268 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; 2269 struct CommandControlBlock *ccb; 2270 uint32_t intmask_org; 2271 int i = 0; 2272 2273 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2274 /* talk to iop 331 outstanding command aborted */ 2275 arcmsr_abort_allcmd(acb); 2276 /* wait for 3 sec for all command aborted*/ 2277 ssleep(3); 2278 /* disable all outbound interrupt */ 2279 intmask_org = arcmsr_disable_outbound_ints(acb); 2280 /* clear all outbound posted Q */ 2281 arcmsr_done4abort_postqueue(acb); 2282 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2283 ccb = acb->pccb_pool[i]; 2284 if (ccb->startdone == ARCMSR_CCB_START) { 2285 ccb->startdone = ARCMSR_CCB_ABORTED; 2286 arcmsr_ccb_complete(ccb, 1); 2287 } 2288 } 2289 /* enable all outbound interrupt */ 2290 arcmsr_enable_outbound_ints(acb, intmask_org); 2291 } 2292 pci_disable_device(pdev); 2293 } 2294 2295 static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev) 2296 { 2297 struct Scsi_Host *host = pci_get_drvdata(pdev); 2298 struct AdapterControlBlock *acb = \ 2299 (struct AdapterControlBlock *)host->hostdata; 2300 2301 arcmsr_stop_adapter_bgrb(acb); 2302 arcmsr_flush_adapter_cache(acb); 2303 } 2304 2305 static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, 2306 pci_channel_state_t state) 2307 { 2308 switch (state) { 2309 case pci_channel_io_frozen: 2310 arcmsr_pci_ers_need_reset_forepart(pdev); 2311 return PCI_ERS_RESULT_NEED_RESET; 2312 case pci_channel_io_perm_failure: 2313 arcmsr_pci_ers_disconnect_forepart(pdev); 2314 return PCI_ERS_RESULT_DISCONNECT; 2315 break; 2316 default: 2317 return PCI_ERS_RESULT_NEED_RESET; 2318 } 2319 } 2320 #endif 2321