1 /* 2 ******************************************************************************* 3 ** O.S : Linux 4 ** FILE NAME : arcmsr_hba.c 5 ** BY : Nick Cheng, C.L. Huang 6 ** Description: SCSI RAID Device Driver for Areca RAID Controller 7 ******************************************************************************* 8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved 9 ** 10 ** Web site: www.areca.com.tw 11 ** E-mail: support@areca.com.tw 12 ** 13 ** This program is free software; you can redistribute it and/or modify 14 ** it under the terms of the GNU General Public License version 2 as 15 ** published by the Free Software Foundation. 16 ** This program is distributed in the hope that it will be useful, 17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 ** GNU General Public License for more details. 20 ******************************************************************************* 21 ** Redistribution and use in source and binary forms, with or without 22 ** modification, are permitted provided that the following conditions 23 ** are met: 24 ** 1. Redistributions of source code must retain the above copyright 25 ** notice, this list of conditions and the following disclaimer. 26 ** 2. Redistributions in binary form must reproduce the above copyright 27 ** notice, this list of conditions and the following disclaimer in the 28 ** documentation and/or other materials provided with the distribution. 29 ** 3. The name of the author may not be used to endorse or promote products 30 ** derived from this software without specific prior written permission. 31 ** 32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT 37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 ******************************************************************************* 43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr 44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst 45 ******************************************************************************* 46 */ 47 #include <linux/module.h> 48 #include <linux/reboot.h> 49 #include <linux/spinlock.h> 50 #include <linux/pci_ids.h> 51 #include <linux/interrupt.h> 52 #include <linux/moduleparam.h> 53 #include <linux/errno.h> 54 #include <linux/types.h> 55 #include <linux/delay.h> 56 #include <linux/dma-mapping.h> 57 #include <linux/timer.h> 58 #include <linux/slab.h> 59 #include <linux/pci.h> 60 #include <linux/circ_buf.h> 61 #include <asm/dma.h> 62 #include <asm/io.h> 63 #include <linux/uaccess.h> 64 #include <scsi/scsi_host.h> 65 #include <scsi/scsi.h> 66 #include <scsi/scsi_cmnd.h> 67 #include <scsi/scsi_tcq.h> 68 #include <scsi/scsi_device.h> 69 #include <scsi/scsi_transport.h> 70 #include <scsi/scsicam.h> 71 #include "arcmsr.h" 72 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>"); 73 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); 74 MODULE_LICENSE("Dual BSD/GPL"); 75 MODULE_VERSION(ARCMSR_DRIVER_VERSION); 76 77 static int msix_enable = 1; 78 module_param(msix_enable, int, S_IRUGO); 79 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)"); 80 81 static int msi_enable = 1; 82 module_param(msi_enable, int, S_IRUGO); 83 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)"); 84 85 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 86 module_param(host_can_queue, int, S_IRUGO); 87 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128"); 88 89 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 90 module_param(cmd_per_lun, int, S_IRUGO); 91 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); 92 93 static int dma_mask_64 = 0; 94 module_param(dma_mask_64, int, S_IRUGO); 95 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)"); 96 97 static int set_date_time = 0; 98 module_param(set_date_time, int, S_IRUGO); 99 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); 100 101 static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT; 102 module_param(cmd_timeout, int, S_IRUGO); 103 MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90"); 104 105 #define ARCMSR_SLEEPTIME 10 106 #define ARCMSR_RETRYCOUNT 12 107 108 static wait_queue_head_t wait_q; 109 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 110 struct scsi_cmnd *cmd); 111 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 112 static int arcmsr_abort(struct scsi_cmnd *); 113 static int arcmsr_bus_reset(struct scsi_cmnd *); 114 static int arcmsr_bios_param(struct scsi_device *sdev, 115 struct block_device *bdev, sector_t capacity, int *info); 116 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 117 static int arcmsr_probe(struct pci_dev *pdev, 118 const struct pci_device_id *id); 119 static int __maybe_unused arcmsr_suspend(struct device *dev); 120 static int __maybe_unused arcmsr_resume(struct device *dev); 121 static void arcmsr_remove(struct pci_dev *pdev); 122 static void arcmsr_shutdown(struct pci_dev *pdev); 123 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 124 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); 125 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); 126 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 127 u32 intmask_org); 128 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 129 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); 130 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); 131 static void arcmsr_request_device_map(struct timer_list *t); 132 static void arcmsr_message_isr_bh_fn(struct work_struct *work); 133 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); 134 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 135 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); 136 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); 137 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); 138 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); 139 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb); 140 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); 141 static const char *arcmsr_info(struct Scsi_Host *); 142 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 143 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 144 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); 145 static void arcmsr_set_iop_datetime(struct timer_list *); 146 static int arcmsr_slave_config(struct scsi_device *sdev); 147 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) 148 { 149 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 150 queue_depth = ARCMSR_MAX_CMD_PERLUN; 151 return scsi_change_queue_depth(sdev, queue_depth); 152 } 153 154 static const struct scsi_host_template arcmsr_scsi_host_template = { 155 .module = THIS_MODULE, 156 .proc_name = ARCMSR_NAME, 157 .name = "Areca SAS/SATA RAID driver", 158 .info = arcmsr_info, 159 .queuecommand = arcmsr_queue_command, 160 .eh_abort_handler = arcmsr_abort, 161 .eh_bus_reset_handler = arcmsr_bus_reset, 162 .bios_param = arcmsr_bios_param, 163 .slave_configure = arcmsr_slave_config, 164 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 165 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, 166 .this_id = ARCMSR_SCSI_INITIATOR_ID, 167 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 168 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 169 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, 170 .shost_groups = arcmsr_host_groups, 171 .no_write_same = 1, 172 }; 173 174 static struct pci_device_id arcmsr_device_id_table[] = { 175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), 176 .driver_data = ACB_ADAPTER_TYPE_A}, 177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), 178 .driver_data = ACB_ADAPTER_TYPE_A}, 179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), 180 .driver_data = ACB_ADAPTER_TYPE_A}, 181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), 182 .driver_data = ACB_ADAPTER_TYPE_A}, 183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), 184 .driver_data = ACB_ADAPTER_TYPE_A}, 185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), 186 .driver_data = ACB_ADAPTER_TYPE_B}, 187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), 188 .driver_data = ACB_ADAPTER_TYPE_B}, 189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), 190 .driver_data = ACB_ADAPTER_TYPE_B}, 191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), 192 .driver_data = ACB_ADAPTER_TYPE_B}, 193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), 194 .driver_data = ACB_ADAPTER_TYPE_A}, 195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), 196 .driver_data = ACB_ADAPTER_TYPE_D}, 197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), 198 .driver_data = ACB_ADAPTER_TYPE_A}, 199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), 200 .driver_data = ACB_ADAPTER_TYPE_A}, 201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), 202 .driver_data = ACB_ADAPTER_TYPE_A}, 203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), 204 .driver_data = ACB_ADAPTER_TYPE_A}, 205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), 206 .driver_data = ACB_ADAPTER_TYPE_A}, 207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), 208 .driver_data = ACB_ADAPTER_TYPE_A}, 209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), 210 .driver_data = ACB_ADAPTER_TYPE_A}, 211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), 212 .driver_data = ACB_ADAPTER_TYPE_A}, 213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), 214 .driver_data = ACB_ADAPTER_TYPE_A}, 215 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), 216 .driver_data = ACB_ADAPTER_TYPE_C}, 217 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), 218 .driver_data = ACB_ADAPTER_TYPE_E}, 219 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886), 220 .driver_data = ACB_ADAPTER_TYPE_F}, 221 {0, 0}, /* Terminating entry */ 222 }; 223 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 224 225 static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume); 226 227 static struct pci_driver arcmsr_pci_driver = { 228 .name = "arcmsr", 229 .id_table = arcmsr_device_id_table, 230 .probe = arcmsr_probe, 231 .remove = arcmsr_remove, 232 .driver.pm = &arcmsr_pm_ops, 233 .shutdown = arcmsr_shutdown, 234 }; 235 /* 236 **************************************************************************** 237 **************************************************************************** 238 */ 239 240 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) 241 { 242 switch (acb->adapter_type) { 243 case ACB_ADAPTER_TYPE_B: 244 case ACB_ADAPTER_TYPE_D: 245 case ACB_ADAPTER_TYPE_E: 246 case ACB_ADAPTER_TYPE_F: 247 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, 248 acb->dma_coherent2, acb->dma_coherent_handle2); 249 break; 250 } 251 } 252 253 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) 254 { 255 struct pci_dev *pdev = acb->pdev; 256 switch (acb->adapter_type){ 257 case ACB_ADAPTER_TYPE_A:{ 258 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); 259 if (!acb->pmuA) { 260 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 261 return false; 262 } 263 break; 264 } 265 case ACB_ADAPTER_TYPE_B:{ 266 void __iomem *mem_base0, *mem_base1; 267 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 268 if (!mem_base0) { 269 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 270 return false; 271 } 272 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); 273 if (!mem_base1) { 274 iounmap(mem_base0); 275 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 276 return false; 277 } 278 acb->mem_base0 = mem_base0; 279 acb->mem_base1 = mem_base1; 280 break; 281 } 282 case ACB_ADAPTER_TYPE_C:{ 283 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); 284 if (!acb->pmuC) { 285 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 286 return false; 287 } 288 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 289 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ 290 return true; 291 } 292 break; 293 } 294 case ACB_ADAPTER_TYPE_D: { 295 void __iomem *mem_base0; 296 unsigned long addr, range; 297 298 addr = (unsigned long)pci_resource_start(pdev, 0); 299 range = pci_resource_len(pdev, 0); 300 mem_base0 = ioremap(addr, range); 301 if (!mem_base0) { 302 pr_notice("arcmsr%d: memory mapping region fail\n", 303 acb->host->host_no); 304 return false; 305 } 306 acb->mem_base0 = mem_base0; 307 break; 308 } 309 case ACB_ADAPTER_TYPE_E: { 310 acb->pmuE = ioremap(pci_resource_start(pdev, 1), 311 pci_resource_len(pdev, 1)); 312 if (!acb->pmuE) { 313 pr_notice("arcmsr%d: memory mapping region fail \n", 314 acb->host->host_no); 315 return false; 316 } 317 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/ 318 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */ 319 acb->in_doorbell = 0; 320 acb->out_doorbell = 0; 321 break; 322 } 323 case ACB_ADAPTER_TYPE_F: { 324 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 325 if (!acb->pmuF) { 326 pr_notice("arcmsr%d: memory mapping region fail\n", 327 acb->host->host_no); 328 return false; 329 } 330 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */ 331 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); 332 acb->in_doorbell = 0; 333 acb->out_doorbell = 0; 334 break; 335 } 336 } 337 return true; 338 } 339 340 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) 341 { 342 switch (acb->adapter_type) { 343 case ACB_ADAPTER_TYPE_A: 344 iounmap(acb->pmuA); 345 break; 346 case ACB_ADAPTER_TYPE_B: 347 iounmap(acb->mem_base0); 348 iounmap(acb->mem_base1); 349 break; 350 case ACB_ADAPTER_TYPE_C: 351 iounmap(acb->pmuC); 352 break; 353 case ACB_ADAPTER_TYPE_D: 354 iounmap(acb->mem_base0); 355 break; 356 case ACB_ADAPTER_TYPE_E: 357 iounmap(acb->pmuE); 358 break; 359 case ACB_ADAPTER_TYPE_F: 360 iounmap(acb->pmuF); 361 break; 362 } 363 } 364 365 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 366 { 367 irqreturn_t handle_state; 368 struct AdapterControlBlock *acb = dev_id; 369 370 handle_state = arcmsr_interrupt(acb); 371 return handle_state; 372 } 373 374 static int arcmsr_bios_param(struct scsi_device *sdev, 375 struct block_device *bdev, sector_t capacity, int *geom) 376 { 377 int heads, sectors, cylinders, total_capacity; 378 379 if (scsi_partsize(bdev, capacity, geom)) 380 return 0; 381 382 total_capacity = capacity; 383 heads = 64; 384 sectors = 32; 385 cylinders = total_capacity / (heads * sectors); 386 if (cylinders > 1024) { 387 heads = 255; 388 sectors = 63; 389 cylinders = total_capacity / (heads * sectors); 390 } 391 geom[0] = heads; 392 geom[1] = sectors; 393 geom[2] = cylinders; 394 return 0; 395 } 396 397 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) 398 { 399 struct MessageUnit_A __iomem *reg = acb->pmuA; 400 int i; 401 402 for (i = 0; i < 2000; i++) { 403 if (readl(®->outbound_intstatus) & 404 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 405 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 406 ®->outbound_intstatus); 407 return true; 408 } 409 msleep(10); 410 } /* max 20 seconds */ 411 412 return false; 413 } 414 415 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) 416 { 417 struct MessageUnit_B *reg = acb->pmuB; 418 int i; 419 420 for (i = 0; i < 2000; i++) { 421 if (readl(reg->iop2drv_doorbell) 422 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 423 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, 424 reg->iop2drv_doorbell); 425 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, 426 reg->drv2iop_doorbell); 427 return true; 428 } 429 msleep(10); 430 } /* max 20 seconds */ 431 432 return false; 433 } 434 435 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) 436 { 437 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 438 int i; 439 440 for (i = 0; i < 2000; i++) { 441 if (readl(&phbcmu->outbound_doorbell) 442 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 443 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, 444 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ 445 return true; 446 } 447 msleep(10); 448 } /* max 20 seconds */ 449 450 return false; 451 } 452 453 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) 454 { 455 struct MessageUnit_D *reg = pACB->pmuD; 456 int i; 457 458 for (i = 0; i < 2000; i++) { 459 if (readl(reg->outbound_doorbell) 460 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 461 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 462 reg->outbound_doorbell); 463 return true; 464 } 465 msleep(10); 466 } /* max 20 seconds */ 467 return false; 468 } 469 470 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB) 471 { 472 int i; 473 uint32_t read_doorbell; 474 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE; 475 476 for (i = 0; i < 2000; i++) { 477 read_doorbell = readl(&phbcmu->iobound_doorbell); 478 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 479 writel(0, &phbcmu->host_int_status); /*clear interrupt*/ 480 pACB->in_doorbell = read_doorbell; 481 return true; 482 } 483 msleep(10); 484 } /* max 20 seconds */ 485 return false; 486 } 487 488 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) 489 { 490 struct MessageUnit_A __iomem *reg = acb->pmuA; 491 int retry_count = 30; 492 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 493 do { 494 if (arcmsr_hbaA_wait_msgint_ready(acb)) 495 break; 496 else { 497 retry_count--; 498 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 499 timeout, retry count down = %d \n", acb->host->host_no, retry_count); 500 } 501 } while (retry_count != 0); 502 } 503 504 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) 505 { 506 struct MessageUnit_B *reg = acb->pmuB; 507 int retry_count = 30; 508 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); 509 do { 510 if (arcmsr_hbaB_wait_msgint_ready(acb)) 511 break; 512 else { 513 retry_count--; 514 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 515 timeout,retry count down = %d \n", acb->host->host_no, retry_count); 516 } 517 } while (retry_count != 0); 518 } 519 520 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) 521 { 522 struct MessageUnit_C __iomem *reg = pACB->pmuC; 523 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 524 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 525 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 526 do { 527 if (arcmsr_hbaC_wait_msgint_ready(pACB)) { 528 break; 529 } else { 530 retry_count--; 531 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 532 timeout,retry count down = %d \n", pACB->host->host_no, retry_count); 533 } 534 } while (retry_count != 0); 535 return; 536 } 537 538 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) 539 { 540 int retry_count = 15; 541 struct MessageUnit_D *reg = pACB->pmuD; 542 543 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); 544 do { 545 if (arcmsr_hbaD_wait_msgint_ready(pACB)) 546 break; 547 548 retry_count--; 549 pr_notice("arcmsr%d: wait 'flush adapter " 550 "cache' timeout, retry count down = %d\n", 551 pACB->host->host_no, retry_count); 552 } while (retry_count != 0); 553 } 554 555 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB) 556 { 557 int retry_count = 30; 558 struct MessageUnit_E __iomem *reg = pACB->pmuE; 559 560 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 561 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 562 writel(pACB->out_doorbell, ®->iobound_doorbell); 563 do { 564 if (arcmsr_hbaE_wait_msgint_ready(pACB)) 565 break; 566 retry_count--; 567 pr_notice("arcmsr%d: wait 'flush adapter " 568 "cache' timeout, retry count down = %d\n", 569 pACB->host->host_no, retry_count); 570 } while (retry_count != 0); 571 } 572 573 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 574 { 575 switch (acb->adapter_type) { 576 577 case ACB_ADAPTER_TYPE_A: 578 arcmsr_hbaA_flush_cache(acb); 579 break; 580 case ACB_ADAPTER_TYPE_B: 581 arcmsr_hbaB_flush_cache(acb); 582 break; 583 case ACB_ADAPTER_TYPE_C: 584 arcmsr_hbaC_flush_cache(acb); 585 break; 586 case ACB_ADAPTER_TYPE_D: 587 arcmsr_hbaD_flush_cache(acb); 588 break; 589 case ACB_ADAPTER_TYPE_E: 590 case ACB_ADAPTER_TYPE_F: 591 arcmsr_hbaE_flush_cache(acb); 592 break; 593 } 594 } 595 596 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb) 597 { 598 struct MessageUnit_B *reg = acb->pmuB; 599 600 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { 601 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); 602 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); 603 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); 604 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); 605 } else { 606 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); 607 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); 608 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); 609 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); 610 } 611 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); 612 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); 613 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); 614 } 615 616 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) 617 { 618 struct MessageUnit_D *reg = acb->pmuD; 619 620 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); 621 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); 622 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); 623 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); 624 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); 625 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); 626 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); 627 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); 628 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); 629 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); 630 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); 631 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); 632 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); 633 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); 634 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); 635 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); 636 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); 637 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); 638 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); 639 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); 640 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); 641 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); 642 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); 643 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); 644 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); 645 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); 646 } 647 648 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb) 649 { 650 dma_addr_t host_buffer_dma; 651 struct MessageUnit_F __iomem *pmuF; 652 653 memset(acb->dma_coherent2, 0xff, acb->completeQ_size); 654 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 + 655 acb->completeQ_size, 4); 656 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100; 657 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200; 658 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE); 659 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4); 660 pmuF = acb->pmuF; 661 /* host buffer low address, bit0:1 all buffer active */ 662 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0); 663 /* host buffer high address */ 664 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1); 665 /* set host buffer physical address */ 666 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell); 667 } 668 669 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) 670 { 671 bool rtn = true; 672 void *dma_coherent; 673 dma_addr_t dma_coherent_handle; 674 struct pci_dev *pdev = acb->pdev; 675 676 switch (acb->adapter_type) { 677 case ACB_ADAPTER_TYPE_B: { 678 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32); 679 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 680 &dma_coherent_handle, GFP_KERNEL); 681 if (!dma_coherent) { 682 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 683 return false; 684 } 685 acb->dma_coherent_handle2 = dma_coherent_handle; 686 acb->dma_coherent2 = dma_coherent; 687 acb->pmuB = (struct MessageUnit_B *)dma_coherent; 688 arcmsr_hbaB_assign_regAddr(acb); 689 } 690 break; 691 case ACB_ADAPTER_TYPE_D: { 692 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32); 693 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 694 &dma_coherent_handle, GFP_KERNEL); 695 if (!dma_coherent) { 696 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 697 return false; 698 } 699 acb->dma_coherent_handle2 = dma_coherent_handle; 700 acb->dma_coherent2 = dma_coherent; 701 acb->pmuD = (struct MessageUnit_D *)dma_coherent; 702 arcmsr_hbaD_assign_regAddr(acb); 703 } 704 break; 705 case ACB_ADAPTER_TYPE_E: { 706 uint32_t completeQ_size; 707 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 708 acb->ioqueue_size = roundup(completeQ_size, 32); 709 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 710 &dma_coherent_handle, GFP_KERNEL); 711 if (!dma_coherent){ 712 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 713 return false; 714 } 715 acb->dma_coherent_handle2 = dma_coherent_handle; 716 acb->dma_coherent2 = dma_coherent; 717 acb->pCompletionQ = dma_coherent; 718 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); 719 acb->doneq_index = 0; 720 } 721 break; 722 case ACB_ADAPTER_TYPE_F: { 723 uint32_t QueueDepth; 724 uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32}; 725 726 arcmsr_wait_firmware_ready(acb); 727 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7]; 728 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128; 729 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32); 730 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 731 &dma_coherent_handle, GFP_KERNEL); 732 if (!dma_coherent) { 733 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 734 return false; 735 } 736 acb->dma_coherent_handle2 = dma_coherent_handle; 737 acb->dma_coherent2 = dma_coherent; 738 acb->pCompletionQ = dma_coherent; 739 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ); 740 acb->doneq_index = 0; 741 arcmsr_hbaF_assign_regAddr(acb); 742 } 743 break; 744 default: 745 break; 746 } 747 return rtn; 748 } 749 750 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) 751 { 752 struct pci_dev *pdev = acb->pdev; 753 void *dma_coherent; 754 dma_addr_t dma_coherent_handle; 755 struct CommandControlBlock *ccb_tmp; 756 int i = 0, j = 0; 757 unsigned long cdb_phyaddr, next_ccb_phy; 758 unsigned long roundup_ccbsize; 759 unsigned long max_xfer_len; 760 unsigned long max_sg_entrys; 761 uint32_t firm_config_version, curr_phy_upper32; 762 763 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 764 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 765 acb->devstate[i][j] = ARECA_RAID_GONE; 766 767 max_xfer_len = ARCMSR_MAX_XFER_LEN; 768 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; 769 firm_config_version = acb->firm_cfg_version; 770 if((firm_config_version & 0xFF) >= 3){ 771 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ 772 max_sg_entrys = (max_xfer_len/4096); 773 } 774 acb->host->max_sectors = max_xfer_len/512; 775 acb->host->sg_tablesize = max_sg_entrys; 776 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 777 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; 778 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) 779 acb->uncache_size += acb->ioqueue_size; 780 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 781 if(!dma_coherent){ 782 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); 783 return -ENOMEM; 784 } 785 acb->dma_coherent = dma_coherent; 786 acb->dma_coherent_handle = dma_coherent_handle; 787 memset(dma_coherent, 0, acb->uncache_size); 788 acb->ccbsize = roundup_ccbsize; 789 ccb_tmp = dma_coherent; 790 curr_phy_upper32 = upper_32_bits(dma_coherent_handle); 791 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 792 for(i = 0; i < acb->maxFreeCCB; i++){ 793 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); 794 switch (acb->adapter_type) { 795 case ACB_ADAPTER_TYPE_A: 796 case ACB_ADAPTER_TYPE_B: 797 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; 798 break; 799 case ACB_ADAPTER_TYPE_C: 800 case ACB_ADAPTER_TYPE_D: 801 case ACB_ADAPTER_TYPE_E: 802 case ACB_ADAPTER_TYPE_F: 803 ccb_tmp->cdb_phyaddr = cdb_phyaddr; 804 break; 805 } 806 acb->pccb_pool[i] = ccb_tmp; 807 ccb_tmp->acb = acb; 808 ccb_tmp->smid = (u32)i << 16; 809 INIT_LIST_HEAD(&ccb_tmp->list); 810 next_ccb_phy = dma_coherent_handle + roundup_ccbsize; 811 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) { 812 acb->maxFreeCCB = i; 813 acb->host->can_queue = i; 814 break; 815 } 816 else 817 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 818 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); 819 dma_coherent_handle = next_ccb_phy; 820 } 821 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) { 822 acb->dma_coherent_handle2 = dma_coherent_handle; 823 acb->dma_coherent2 = ccb_tmp; 824 } 825 switch (acb->adapter_type) { 826 case ACB_ADAPTER_TYPE_B: 827 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; 828 arcmsr_hbaB_assign_regAddr(acb); 829 break; 830 case ACB_ADAPTER_TYPE_D: 831 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2; 832 arcmsr_hbaD_assign_regAddr(acb); 833 break; 834 case ACB_ADAPTER_TYPE_E: 835 acb->pCompletionQ = acb->dma_coherent2; 836 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); 837 acb->doneq_index = 0; 838 break; 839 } 840 return 0; 841 } 842 843 static void arcmsr_message_isr_bh_fn(struct work_struct *work) 844 { 845 struct AdapterControlBlock *acb = container_of(work, 846 struct AdapterControlBlock, arcmsr_do_message_isr_bh); 847 char *acb_dev_map = (char *)acb->device_map; 848 uint32_t __iomem *signature = NULL; 849 char __iomem *devicemap = NULL; 850 int target, lun; 851 struct scsi_device *psdev; 852 char diff, temp; 853 854 switch (acb->adapter_type) { 855 case ACB_ADAPTER_TYPE_A: { 856 struct MessageUnit_A __iomem *reg = acb->pmuA; 857 858 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 859 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 860 break; 861 } 862 case ACB_ADAPTER_TYPE_B: { 863 struct MessageUnit_B *reg = acb->pmuB; 864 865 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 866 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 867 break; 868 } 869 case ACB_ADAPTER_TYPE_C: { 870 struct MessageUnit_C __iomem *reg = acb->pmuC; 871 872 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 873 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 874 break; 875 } 876 case ACB_ADAPTER_TYPE_D: { 877 struct MessageUnit_D *reg = acb->pmuD; 878 879 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 880 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 881 break; 882 } 883 case ACB_ADAPTER_TYPE_E: { 884 struct MessageUnit_E __iomem *reg = acb->pmuE; 885 886 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 887 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 888 break; 889 } 890 case ACB_ADAPTER_TYPE_F: { 891 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]); 892 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]); 893 break; 894 } 895 } 896 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) 897 return; 898 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; 899 target++) { 900 temp = readb(devicemap); 901 diff = (*acb_dev_map) ^ temp; 902 if (diff != 0) { 903 *acb_dev_map = temp; 904 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; 905 lun++) { 906 if ((diff & 0x01) == 1 && 907 (temp & 0x01) == 1) { 908 scsi_add_device(acb->host, 909 0, target, lun); 910 } else if ((diff & 0x01) == 1 911 && (temp & 0x01) == 0) { 912 psdev = scsi_device_lookup(acb->host, 913 0, target, lun); 914 if (psdev != NULL) { 915 scsi_remove_device(psdev); 916 scsi_device_put(psdev); 917 } 918 } 919 temp >>= 1; 920 diff >>= 1; 921 } 922 } 923 devicemap++; 924 acb_dev_map++; 925 } 926 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; 927 } 928 929 static int 930 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) 931 { 932 unsigned long flags; 933 int nvec, i; 934 935 if (msix_enable == 0) 936 goto msi_int0; 937 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, 938 PCI_IRQ_MSIX); 939 if (nvec > 0) { 940 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); 941 flags = 0; 942 } else { 943 msi_int0: 944 if (msi_enable == 1) { 945 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 946 if (nvec == 1) { 947 dev_info(&pdev->dev, "msi enabled\n"); 948 goto msi_int1; 949 } 950 } 951 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 952 if (nvec < 1) 953 return FAILED; 954 msi_int1: 955 flags = IRQF_SHARED; 956 } 957 958 acb->vector_count = nvec; 959 for (i = 0; i < nvec; i++) { 960 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, 961 flags, "arcmsr", acb)) { 962 pr_warn("arcmsr%d: request_irq =%d failed!\n", 963 acb->host->host_no, pci_irq_vector(pdev, i)); 964 goto out_free_irq; 965 } 966 } 967 968 return SUCCESS; 969 out_free_irq: 970 while (--i >= 0) 971 free_irq(pci_irq_vector(pdev, i), acb); 972 pci_free_irq_vectors(pdev); 973 return FAILED; 974 } 975 976 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) 977 { 978 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 979 pacb->fw_flag = FW_NORMAL; 980 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); 981 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); 982 add_timer(&pacb->eternal_timer); 983 } 984 985 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) 986 { 987 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); 988 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); 989 add_timer(&pacb->refresh_timer); 990 } 991 992 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb) 993 { 994 struct pci_dev *pcidev = acb->pdev; 995 996 if (IS_DMA64) { 997 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) || 998 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64))) 999 goto dma32; 1000 if (acb->adapter_type <= ACB_ADAPTER_TYPE_B) 1001 return 0; 1002 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) || 1003 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) { 1004 printk("arcmsr: set DMA 64 mask failed\n"); 1005 return -ENXIO; 1006 } 1007 } else { 1008 dma32: 1009 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) || 1010 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) || 1011 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) { 1012 printk("arcmsr: set DMA 32-bit mask failed\n"); 1013 return -ENXIO; 1014 } 1015 } 1016 return 0; 1017 } 1018 1019 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1020 { 1021 struct Scsi_Host *host; 1022 struct AdapterControlBlock *acb; 1023 uint8_t bus,dev_fun; 1024 int error; 1025 error = pci_enable_device(pdev); 1026 if(error){ 1027 return -ENODEV; 1028 } 1029 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); 1030 if(!host){ 1031 goto pci_disable_dev; 1032 } 1033 init_waitqueue_head(&wait_q); 1034 bus = pdev->bus->number; 1035 dev_fun = pdev->devfn; 1036 acb = (struct AdapterControlBlock *) host->hostdata; 1037 memset(acb,0,sizeof(struct AdapterControlBlock)); 1038 acb->pdev = pdev; 1039 acb->adapter_type = id->driver_data; 1040 if (arcmsr_set_dma_mask(acb)) 1041 goto scsi_host_release; 1042 acb->host = host; 1043 host->max_lun = ARCMSR_MAX_TARGETLUN; 1044 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ 1045 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ 1046 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD)) 1047 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 1048 host->can_queue = host_can_queue; /* max simultaneous cmds */ 1049 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN)) 1050 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 1051 host->cmd_per_lun = cmd_per_lun; 1052 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 1053 host->unique_id = (bus << 8) | dev_fun; 1054 pci_set_drvdata(pdev, host); 1055 pci_set_master(pdev); 1056 error = pci_request_regions(pdev, "arcmsr"); 1057 if(error){ 1058 goto scsi_host_release; 1059 } 1060 spin_lock_init(&acb->eh_lock); 1061 spin_lock_init(&acb->ccblist_lock); 1062 spin_lock_init(&acb->postq_lock); 1063 spin_lock_init(&acb->doneq_lock); 1064 spin_lock_init(&acb->rqbuffer_lock); 1065 spin_lock_init(&acb->wqbuffer_lock); 1066 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1067 ACB_F_MESSAGE_RQBUFFER_CLEARED | 1068 ACB_F_MESSAGE_WQBUFFER_READED); 1069 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 1070 INIT_LIST_HEAD(&acb->ccb_free_list); 1071 error = arcmsr_remap_pciregion(acb); 1072 if(!error){ 1073 goto pci_release_regs; 1074 } 1075 error = arcmsr_alloc_io_queue(acb); 1076 if (!error) 1077 goto unmap_pci_region; 1078 error = arcmsr_get_firmware_spec(acb); 1079 if(!error){ 1080 goto free_hbb_mu; 1081 } 1082 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) 1083 arcmsr_free_io_queue(acb); 1084 error = arcmsr_alloc_ccb_pool(acb); 1085 if(error){ 1086 goto unmap_pci_region; 1087 } 1088 error = scsi_add_host(host, &pdev->dev); 1089 if(error){ 1090 goto free_ccb_pool; 1091 } 1092 if (arcmsr_request_irq(pdev, acb) == FAILED) 1093 goto scsi_host_remove; 1094 arcmsr_iop_init(acb); 1095 arcmsr_init_get_devmap_timer(acb); 1096 if (set_date_time) 1097 arcmsr_init_set_datetime_timer(acb); 1098 if(arcmsr_alloc_sysfs_attr(acb)) 1099 goto out_free_sysfs; 1100 scsi_scan_host(host); 1101 return 0; 1102 out_free_sysfs: 1103 if (set_date_time) 1104 del_timer_sync(&acb->refresh_timer); 1105 del_timer_sync(&acb->eternal_timer); 1106 flush_work(&acb->arcmsr_do_message_isr_bh); 1107 arcmsr_stop_adapter_bgrb(acb); 1108 arcmsr_flush_adapter_cache(acb); 1109 arcmsr_free_irq(pdev, acb); 1110 scsi_host_remove: 1111 scsi_remove_host(host); 1112 free_ccb_pool: 1113 arcmsr_free_ccb_pool(acb); 1114 goto unmap_pci_region; 1115 free_hbb_mu: 1116 arcmsr_free_io_queue(acb); 1117 unmap_pci_region: 1118 arcmsr_unmap_pciregion(acb); 1119 pci_release_regs: 1120 pci_release_regions(pdev); 1121 scsi_host_release: 1122 scsi_host_put(host); 1123 pci_disable_dev: 1124 pci_disable_device(pdev); 1125 return -ENODEV; 1126 } 1127 1128 static void arcmsr_free_irq(struct pci_dev *pdev, 1129 struct AdapterControlBlock *acb) 1130 { 1131 int i; 1132 1133 for (i = 0; i < acb->vector_count; i++) 1134 free_irq(pci_irq_vector(pdev, i), acb); 1135 pci_free_irq_vectors(pdev); 1136 } 1137 1138 static int __maybe_unused arcmsr_suspend(struct device *dev) 1139 { 1140 struct pci_dev *pdev = to_pci_dev(dev); 1141 struct Scsi_Host *host = pci_get_drvdata(pdev); 1142 struct AdapterControlBlock *acb = 1143 (struct AdapterControlBlock *)host->hostdata; 1144 1145 arcmsr_disable_outbound_ints(acb); 1146 arcmsr_free_irq(pdev, acb); 1147 del_timer_sync(&acb->eternal_timer); 1148 if (set_date_time) 1149 del_timer_sync(&acb->refresh_timer); 1150 flush_work(&acb->arcmsr_do_message_isr_bh); 1151 arcmsr_stop_adapter_bgrb(acb); 1152 arcmsr_flush_adapter_cache(acb); 1153 return 0; 1154 } 1155 1156 static int __maybe_unused arcmsr_resume(struct device *dev) 1157 { 1158 struct pci_dev *pdev = to_pci_dev(dev); 1159 struct Scsi_Host *host = pci_get_drvdata(pdev); 1160 struct AdapterControlBlock *acb = 1161 (struct AdapterControlBlock *)host->hostdata; 1162 1163 if (arcmsr_set_dma_mask(acb)) 1164 goto controller_unregister; 1165 if (arcmsr_request_irq(pdev, acb) == FAILED) 1166 goto controller_stop; 1167 switch (acb->adapter_type) { 1168 case ACB_ADAPTER_TYPE_B: { 1169 struct MessageUnit_B *reg = acb->pmuB; 1170 uint32_t i; 1171 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1172 reg->post_qbuffer[i] = 0; 1173 reg->done_qbuffer[i] = 0; 1174 } 1175 reg->postq_index = 0; 1176 reg->doneq_index = 0; 1177 break; 1178 } 1179 case ACB_ADAPTER_TYPE_E: 1180 writel(0, &acb->pmuE->host_int_status); 1181 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); 1182 acb->in_doorbell = 0; 1183 acb->out_doorbell = 0; 1184 acb->doneq_index = 0; 1185 break; 1186 case ACB_ADAPTER_TYPE_F: 1187 writel(0, &acb->pmuF->host_int_status); 1188 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); 1189 acb->in_doorbell = 0; 1190 acb->out_doorbell = 0; 1191 acb->doneq_index = 0; 1192 arcmsr_hbaF_assign_regAddr(acb); 1193 break; 1194 } 1195 arcmsr_iop_init(acb); 1196 arcmsr_init_get_devmap_timer(acb); 1197 if (set_date_time) 1198 arcmsr_init_set_datetime_timer(acb); 1199 return 0; 1200 controller_stop: 1201 arcmsr_stop_adapter_bgrb(acb); 1202 arcmsr_flush_adapter_cache(acb); 1203 controller_unregister: 1204 scsi_remove_host(host); 1205 arcmsr_free_ccb_pool(acb); 1206 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1207 arcmsr_free_io_queue(acb); 1208 arcmsr_unmap_pciregion(acb); 1209 scsi_host_put(host); 1210 return -ENODEV; 1211 } 1212 1213 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) 1214 { 1215 struct MessageUnit_A __iomem *reg = acb->pmuA; 1216 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1217 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1218 printk(KERN_NOTICE 1219 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1220 , acb->host->host_no); 1221 return false; 1222 } 1223 return true; 1224 } 1225 1226 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) 1227 { 1228 struct MessageUnit_B *reg = acb->pmuB; 1229 1230 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); 1231 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1232 printk(KERN_NOTICE 1233 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1234 , acb->host->host_no); 1235 return false; 1236 } 1237 return true; 1238 } 1239 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) 1240 { 1241 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1242 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1243 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1244 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1245 printk(KERN_NOTICE 1246 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1247 , pACB->host->host_no); 1248 return false; 1249 } 1250 return true; 1251 } 1252 1253 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) 1254 { 1255 struct MessageUnit_D *reg = pACB->pmuD; 1256 1257 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); 1258 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 1259 pr_notice("arcmsr%d: wait 'abort all outstanding " 1260 "command' timeout\n", pACB->host->host_no); 1261 return false; 1262 } 1263 return true; 1264 } 1265 1266 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB) 1267 { 1268 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1269 1270 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1271 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1272 writel(pACB->out_doorbell, ®->iobound_doorbell); 1273 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1274 pr_notice("arcmsr%d: wait 'abort all outstanding " 1275 "command' timeout\n", pACB->host->host_no); 1276 return false; 1277 } 1278 return true; 1279 } 1280 1281 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 1282 { 1283 uint8_t rtnval = 0; 1284 switch (acb->adapter_type) { 1285 case ACB_ADAPTER_TYPE_A: 1286 rtnval = arcmsr_hbaA_abort_allcmd(acb); 1287 break; 1288 case ACB_ADAPTER_TYPE_B: 1289 rtnval = arcmsr_hbaB_abort_allcmd(acb); 1290 break; 1291 case ACB_ADAPTER_TYPE_C: 1292 rtnval = arcmsr_hbaC_abort_allcmd(acb); 1293 break; 1294 case ACB_ADAPTER_TYPE_D: 1295 rtnval = arcmsr_hbaD_abort_allcmd(acb); 1296 break; 1297 case ACB_ADAPTER_TYPE_E: 1298 case ACB_ADAPTER_TYPE_F: 1299 rtnval = arcmsr_hbaE_abort_allcmd(acb); 1300 break; 1301 } 1302 return rtnval; 1303 } 1304 1305 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) 1306 { 1307 struct AdapterControlBlock *acb = ccb->acb; 1308 struct scsi_cmnd *pcmd = ccb->pcmd; 1309 unsigned long flags; 1310 atomic_dec(&acb->ccboutstandingcount); 1311 scsi_dma_unmap(ccb->pcmd); 1312 ccb->startdone = ARCMSR_CCB_DONE; 1313 spin_lock_irqsave(&acb->ccblist_lock, flags); 1314 list_add_tail(&ccb->list, &acb->ccb_free_list); 1315 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 1316 scsi_done(pcmd); 1317 } 1318 1319 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) 1320 { 1321 struct scsi_cmnd *pcmd = ccb->pcmd; 1322 1323 pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION; 1324 if (pcmd->sense_buffer) { 1325 struct SENSE_DATA *sensebuffer; 1326 1327 memcpy_and_pad(pcmd->sense_buffer, 1328 SCSI_SENSE_BUFFERSIZE, 1329 ccb->arcmsr_cdb.SenseData, 1330 sizeof(ccb->arcmsr_cdb.SenseData), 1331 0); 1332 1333 sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 1334 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 1335 sensebuffer->Valid = 1; 1336 } 1337 } 1338 1339 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) 1340 { 1341 u32 orig_mask = 0; 1342 switch (acb->adapter_type) { 1343 case ACB_ADAPTER_TYPE_A : { 1344 struct MessageUnit_A __iomem *reg = acb->pmuA; 1345 orig_mask = readl(®->outbound_intmask); 1346 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 1347 ®->outbound_intmask); 1348 } 1349 break; 1350 case ACB_ADAPTER_TYPE_B : { 1351 struct MessageUnit_B *reg = acb->pmuB; 1352 orig_mask = readl(reg->iop2drv_doorbell_mask); 1353 writel(0, reg->iop2drv_doorbell_mask); 1354 } 1355 break; 1356 case ACB_ADAPTER_TYPE_C:{ 1357 struct MessageUnit_C __iomem *reg = acb->pmuC; 1358 /* disable all outbound interrupt */ 1359 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */ 1360 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 1361 } 1362 break; 1363 case ACB_ADAPTER_TYPE_D: { 1364 struct MessageUnit_D *reg = acb->pmuD; 1365 /* disable all outbound interrupt */ 1366 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); 1367 } 1368 break; 1369 case ACB_ADAPTER_TYPE_E: 1370 case ACB_ADAPTER_TYPE_F: { 1371 struct MessageUnit_E __iomem *reg = acb->pmuE; 1372 orig_mask = readl(®->host_int_mask); 1373 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask); 1374 readl(®->host_int_mask); /* Dummy readl to force pci flush */ 1375 } 1376 break; 1377 } 1378 return orig_mask; 1379 } 1380 1381 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 1382 struct CommandControlBlock *ccb, bool error) 1383 { 1384 uint8_t id, lun; 1385 id = ccb->pcmd->device->id; 1386 lun = ccb->pcmd->device->lun; 1387 if (!error) { 1388 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 1389 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1390 ccb->pcmd->result = DID_OK << 16; 1391 arcmsr_ccb_complete(ccb); 1392 }else{ 1393 switch (ccb->arcmsr_cdb.DeviceStatus) { 1394 case ARCMSR_DEV_SELECT_TIMEOUT: { 1395 acb->devstate[id][lun] = ARECA_RAID_GONE; 1396 ccb->pcmd->result = DID_NO_CONNECT << 16; 1397 arcmsr_ccb_complete(ccb); 1398 } 1399 break; 1400 1401 case ARCMSR_DEV_ABORTED: 1402 1403 case ARCMSR_DEV_INIT_FAIL: { 1404 acb->devstate[id][lun] = ARECA_RAID_GONE; 1405 ccb->pcmd->result = DID_BAD_TARGET << 16; 1406 arcmsr_ccb_complete(ccb); 1407 } 1408 break; 1409 1410 case ARCMSR_DEV_CHECK_CONDITION: { 1411 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1412 arcmsr_report_sense_info(ccb); 1413 arcmsr_ccb_complete(ccb); 1414 } 1415 break; 1416 1417 default: 1418 printk(KERN_NOTICE 1419 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ 1420 but got unknown DeviceStatus = 0x%x \n" 1421 , acb->host->host_no 1422 , id 1423 , lun 1424 , ccb->arcmsr_cdb.DeviceStatus); 1425 acb->devstate[id][lun] = ARECA_RAID_GONE; 1426 ccb->pcmd->result = DID_NO_CONNECT << 16; 1427 arcmsr_ccb_complete(ccb); 1428 break; 1429 } 1430 } 1431 } 1432 1433 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 1434 { 1435 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 1436 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 1437 struct scsi_cmnd *abortcmd = pCCB->pcmd; 1438 if (abortcmd) { 1439 abortcmd->result |= DID_ABORT << 16; 1440 arcmsr_ccb_complete(pCCB); 1441 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", 1442 acb->host->host_no, pCCB); 1443 } 1444 return; 1445 } 1446 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ 1447 done acb = '0x%p'" 1448 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" 1449 " ccboutstandingcount = %d \n" 1450 , acb->host->host_no 1451 , acb 1452 , pCCB 1453 , pCCB->acb 1454 , pCCB->startdone 1455 , atomic_read(&acb->ccboutstandingcount)); 1456 return; 1457 } 1458 arcmsr_report_ccb_state(acb, pCCB, error); 1459 } 1460 1461 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 1462 { 1463 int i = 0; 1464 uint32_t flag_ccb; 1465 struct ARCMSR_CDB *pARCMSR_CDB; 1466 bool error; 1467 struct CommandControlBlock *pCCB; 1468 unsigned long ccb_cdb_phy; 1469 1470 switch (acb->adapter_type) { 1471 1472 case ACB_ADAPTER_TYPE_A: { 1473 struct MessageUnit_A __iomem *reg = acb->pmuA; 1474 uint32_t outbound_intstatus; 1475 outbound_intstatus = readl(®->outbound_intstatus) & 1476 acb->outbound_int_enable; 1477 /*clear and abort all outbound posted Q*/ 1478 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 1479 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) 1480 && (i++ < acb->maxOutstanding)) { 1481 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 1482 if (acb->cdb_phyadd_hipart) 1483 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1484 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1485 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1486 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1487 arcmsr_drain_donequeue(acb, pCCB, error); 1488 } 1489 } 1490 break; 1491 1492 case ACB_ADAPTER_TYPE_B: { 1493 struct MessageUnit_B *reg = acb->pmuB; 1494 /*clear all outbound posted Q*/ 1495 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ 1496 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1497 flag_ccb = reg->done_qbuffer[i]; 1498 if (flag_ccb != 0) { 1499 reg->done_qbuffer[i] = 0; 1500 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 1501 if (acb->cdb_phyadd_hipart) 1502 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1503 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1504 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1505 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1506 arcmsr_drain_donequeue(acb, pCCB, error); 1507 } 1508 reg->post_qbuffer[i] = 0; 1509 } 1510 reg->doneq_index = 0; 1511 reg->postq_index = 0; 1512 } 1513 break; 1514 case ACB_ADAPTER_TYPE_C: { 1515 struct MessageUnit_C __iomem *reg = acb->pmuC; 1516 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) { 1517 /*need to do*/ 1518 flag_ccb = readl(®->outbound_queueport_low); 1519 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 1520 if (acb->cdb_phyadd_hipart) 1521 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1522 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1523 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1524 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 1525 arcmsr_drain_donequeue(acb, pCCB, error); 1526 } 1527 } 1528 break; 1529 case ACB_ADAPTER_TYPE_D: { 1530 struct MessageUnit_D *pmu = acb->pmuD; 1531 uint32_t outbound_write_pointer; 1532 uint32_t doneq_index, index_stripped, addressLow, residual, toggle; 1533 unsigned long flags; 1534 1535 residual = atomic_read(&acb->ccboutstandingcount); 1536 for (i = 0; i < residual; i++) { 1537 spin_lock_irqsave(&acb->doneq_lock, flags); 1538 outbound_write_pointer = 1539 pmu->done_qbuffer[0].addressLow + 1; 1540 doneq_index = pmu->doneq_index; 1541 if ((doneq_index & 0xFFF) != 1542 (outbound_write_pointer & 0xFFF)) { 1543 toggle = doneq_index & 0x4000; 1544 index_stripped = (doneq_index & 0xFFF) + 1; 1545 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 1546 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 1547 ((toggle ^ 0x4000) + 1); 1548 doneq_index = pmu->doneq_index; 1549 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1550 addressLow = pmu->done_qbuffer[doneq_index & 1551 0xFFF].addressLow; 1552 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 1553 if (acb->cdb_phyadd_hipart) 1554 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1555 pARCMSR_CDB = (struct ARCMSR_CDB *) 1556 (acb->vir2phy_offset + ccb_cdb_phy); 1557 pCCB = container_of(pARCMSR_CDB, 1558 struct CommandControlBlock, arcmsr_cdb); 1559 error = (addressLow & 1560 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? 1561 true : false; 1562 arcmsr_drain_donequeue(acb, pCCB, error); 1563 writel(doneq_index, 1564 pmu->outboundlist_read_pointer); 1565 } else { 1566 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1567 mdelay(10); 1568 } 1569 } 1570 pmu->postq_index = 0; 1571 pmu->doneq_index = 0x40FF; 1572 } 1573 break; 1574 case ACB_ADAPTER_TYPE_E: 1575 arcmsr_hbaE_postqueue_isr(acb); 1576 break; 1577 case ACB_ADAPTER_TYPE_F: 1578 arcmsr_hbaF_postqueue_isr(acb); 1579 break; 1580 } 1581 } 1582 1583 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb) 1584 { 1585 char *acb_dev_map = (char *)acb->device_map; 1586 int target, lun, i; 1587 struct scsi_device *psdev; 1588 struct CommandControlBlock *ccb; 1589 char temp; 1590 1591 for (i = 0; i < acb->maxFreeCCB; i++) { 1592 ccb = acb->pccb_pool[i]; 1593 if (ccb->startdone == ARCMSR_CCB_START) { 1594 ccb->pcmd->result = DID_NO_CONNECT << 16; 1595 scsi_dma_unmap(ccb->pcmd); 1596 scsi_done(ccb->pcmd); 1597 } 1598 } 1599 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) { 1600 temp = *acb_dev_map; 1601 if (temp) { 1602 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { 1603 if (temp & 1) { 1604 psdev = scsi_device_lookup(acb->host, 1605 0, target, lun); 1606 if (psdev != NULL) { 1607 scsi_remove_device(psdev); 1608 scsi_device_put(psdev); 1609 } 1610 } 1611 temp >>= 1; 1612 } 1613 *acb_dev_map = 0; 1614 } 1615 acb_dev_map++; 1616 } 1617 } 1618 1619 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) 1620 { 1621 struct pci_dev *pdev; 1622 struct Scsi_Host *host; 1623 1624 host = acb->host; 1625 arcmsr_free_sysfs_attr(acb); 1626 scsi_remove_host(host); 1627 flush_work(&acb->arcmsr_do_message_isr_bh); 1628 del_timer_sync(&acb->eternal_timer); 1629 if (set_date_time) 1630 del_timer_sync(&acb->refresh_timer); 1631 pdev = acb->pdev; 1632 arcmsr_free_irq(pdev, acb); 1633 arcmsr_free_ccb_pool(acb); 1634 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1635 arcmsr_free_io_queue(acb); 1636 arcmsr_unmap_pciregion(acb); 1637 pci_release_regions(pdev); 1638 scsi_host_put(host); 1639 pci_disable_device(pdev); 1640 } 1641 1642 static void arcmsr_remove(struct pci_dev *pdev) 1643 { 1644 struct Scsi_Host *host = pci_get_drvdata(pdev); 1645 struct AdapterControlBlock *acb = 1646 (struct AdapterControlBlock *) host->hostdata; 1647 int poll_count = 0; 1648 uint16_t dev_id; 1649 1650 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); 1651 if (dev_id == 0xffff) { 1652 acb->acb_flags &= ~ACB_F_IOP_INITED; 1653 acb->acb_flags |= ACB_F_ADAPTER_REMOVED; 1654 arcmsr_remove_scsi_devices(acb); 1655 arcmsr_free_pcidev(acb); 1656 return; 1657 } 1658 arcmsr_free_sysfs_attr(acb); 1659 scsi_remove_host(host); 1660 flush_work(&acb->arcmsr_do_message_isr_bh); 1661 del_timer_sync(&acb->eternal_timer); 1662 if (set_date_time) 1663 del_timer_sync(&acb->refresh_timer); 1664 arcmsr_disable_outbound_ints(acb); 1665 arcmsr_stop_adapter_bgrb(acb); 1666 arcmsr_flush_adapter_cache(acb); 1667 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 1668 acb->acb_flags &= ~ACB_F_IOP_INITED; 1669 1670 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){ 1671 if (!atomic_read(&acb->ccboutstandingcount)) 1672 break; 1673 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 1674 msleep(25); 1675 } 1676 1677 if (atomic_read(&acb->ccboutstandingcount)) { 1678 int i; 1679 1680 arcmsr_abort_allcmd(acb); 1681 arcmsr_done4abort_postqueue(acb); 1682 for (i = 0; i < acb->maxFreeCCB; i++) { 1683 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 1684 if (ccb->startdone == ARCMSR_CCB_START) { 1685 ccb->startdone = ARCMSR_CCB_ABORTED; 1686 ccb->pcmd->result = DID_ABORT << 16; 1687 arcmsr_ccb_complete(ccb); 1688 } 1689 } 1690 } 1691 arcmsr_free_irq(pdev, acb); 1692 arcmsr_free_ccb_pool(acb); 1693 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1694 arcmsr_free_io_queue(acb); 1695 arcmsr_unmap_pciregion(acb); 1696 pci_release_regions(pdev); 1697 scsi_host_put(host); 1698 pci_disable_device(pdev); 1699 } 1700 1701 static void arcmsr_shutdown(struct pci_dev *pdev) 1702 { 1703 struct Scsi_Host *host = pci_get_drvdata(pdev); 1704 struct AdapterControlBlock *acb = 1705 (struct AdapterControlBlock *)host->hostdata; 1706 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 1707 return; 1708 del_timer_sync(&acb->eternal_timer); 1709 if (set_date_time) 1710 del_timer_sync(&acb->refresh_timer); 1711 arcmsr_disable_outbound_ints(acb); 1712 arcmsr_free_irq(pdev, acb); 1713 flush_work(&acb->arcmsr_do_message_isr_bh); 1714 arcmsr_stop_adapter_bgrb(acb); 1715 arcmsr_flush_adapter_cache(acb); 1716 } 1717 1718 static int __init arcmsr_module_init(void) 1719 { 1720 int error = 0; 1721 error = pci_register_driver(&arcmsr_pci_driver); 1722 return error; 1723 } 1724 1725 static void __exit arcmsr_module_exit(void) 1726 { 1727 pci_unregister_driver(&arcmsr_pci_driver); 1728 } 1729 module_init(arcmsr_module_init); 1730 module_exit(arcmsr_module_exit); 1731 1732 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 1733 u32 intmask_org) 1734 { 1735 u32 mask; 1736 switch (acb->adapter_type) { 1737 1738 case ACB_ADAPTER_TYPE_A: { 1739 struct MessageUnit_A __iomem *reg = acb->pmuA; 1740 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 1741 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| 1742 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 1743 writel(mask, ®->outbound_intmask); 1744 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1745 } 1746 break; 1747 1748 case ACB_ADAPTER_TYPE_B: { 1749 struct MessageUnit_B *reg = acb->pmuB; 1750 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | 1751 ARCMSR_IOP2DRV_DATA_READ_OK | 1752 ARCMSR_IOP2DRV_CDB_DONE | 1753 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 1754 writel(mask, reg->iop2drv_doorbell_mask); 1755 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 1756 } 1757 break; 1758 case ACB_ADAPTER_TYPE_C: { 1759 struct MessageUnit_C __iomem *reg = acb->pmuC; 1760 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 1761 writel(intmask_org & mask, ®->host_int_mask); 1762 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 1763 } 1764 break; 1765 case ACB_ADAPTER_TYPE_D: { 1766 struct MessageUnit_D *reg = acb->pmuD; 1767 1768 mask = ARCMSR_ARC1214_ALL_INT_ENABLE; 1769 writel(intmask_org | mask, reg->pcief0_int_enable); 1770 break; 1771 } 1772 case ACB_ADAPTER_TYPE_E: 1773 case ACB_ADAPTER_TYPE_F: { 1774 struct MessageUnit_E __iomem *reg = acb->pmuE; 1775 1776 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); 1777 writel(intmask_org & mask, ®->host_int_mask); 1778 break; 1779 } 1780 } 1781 } 1782 1783 static int arcmsr_build_ccb(struct AdapterControlBlock *acb, 1784 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 1785 { 1786 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1787 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 1788 __le32 address_lo, address_hi; 1789 int arccdbsize = 0x30; 1790 __le32 length = 0; 1791 int i; 1792 struct scatterlist *sg; 1793 int nseg; 1794 ccb->pcmd = pcmd; 1795 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 1796 arcmsr_cdb->TargetID = pcmd->device->id; 1797 arcmsr_cdb->LUN = pcmd->device->lun; 1798 arcmsr_cdb->Function = 1; 1799 arcmsr_cdb->msgContext = 0; 1800 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 1801 1802 nseg = scsi_dma_map(pcmd); 1803 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) 1804 return FAILED; 1805 scsi_for_each_sg(pcmd, sg, nseg, i) { 1806 /* Get the physical address of the current data pointer */ 1807 length = cpu_to_le32(sg_dma_len(sg)); 1808 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); 1809 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); 1810 if (address_hi == 0) { 1811 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 1812 1813 pdma_sg->address = address_lo; 1814 pdma_sg->length = length; 1815 psge += sizeof (struct SG32ENTRY); 1816 arccdbsize += sizeof (struct SG32ENTRY); 1817 } else { 1818 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; 1819 1820 pdma_sg->addresshigh = address_hi; 1821 pdma_sg->address = address_lo; 1822 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); 1823 psge += sizeof (struct SG64ENTRY); 1824 arccdbsize += sizeof (struct SG64ENTRY); 1825 } 1826 } 1827 arcmsr_cdb->sgcount = (uint8_t)nseg; 1828 arcmsr_cdb->DataLength = scsi_bufflen(pcmd); 1829 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); 1830 if ( arccdbsize > 256) 1831 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 1832 if (pcmd->sc_data_direction == DMA_TO_DEVICE) 1833 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 1834 ccb->arc_cdb_size = arccdbsize; 1835 return SUCCESS; 1836 } 1837 1838 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 1839 { 1840 uint32_t cdb_phyaddr = ccb->cdb_phyaddr; 1841 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1842 atomic_inc(&acb->ccboutstandingcount); 1843 ccb->startdone = ARCMSR_CCB_START; 1844 switch (acb->adapter_type) { 1845 case ACB_ADAPTER_TYPE_A: { 1846 struct MessageUnit_A __iomem *reg = acb->pmuA; 1847 1848 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 1849 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 1850 ®->inbound_queueport); 1851 else 1852 writel(cdb_phyaddr, ®->inbound_queueport); 1853 break; 1854 } 1855 1856 case ACB_ADAPTER_TYPE_B: { 1857 struct MessageUnit_B *reg = acb->pmuB; 1858 uint32_t ending_index, index = reg->postq_index; 1859 1860 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 1861 reg->post_qbuffer[ending_index] = 0; 1862 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1863 reg->post_qbuffer[index] = 1864 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; 1865 } else { 1866 reg->post_qbuffer[index] = cdb_phyaddr; 1867 } 1868 index++; 1869 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 1870 reg->postq_index = index; 1871 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); 1872 } 1873 break; 1874 case ACB_ADAPTER_TYPE_C: { 1875 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 1876 uint32_t ccb_post_stamp, arc_cdb_size; 1877 1878 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1879 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); 1880 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high); 1881 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1882 } 1883 break; 1884 case ACB_ADAPTER_TYPE_D: { 1885 struct MessageUnit_D *pmu = acb->pmuD; 1886 u16 index_stripped; 1887 u16 postq_index, toggle; 1888 unsigned long flags; 1889 struct InBound_SRB *pinbound_srb; 1890 1891 spin_lock_irqsave(&acb->postq_lock, flags); 1892 postq_index = pmu->postq_index; 1893 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); 1894 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr); 1895 pinbound_srb->addressLow = cdb_phyaddr; 1896 pinbound_srb->length = ccb->arc_cdb_size >> 2; 1897 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); 1898 toggle = postq_index & 0x4000; 1899 index_stripped = postq_index + 1; 1900 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); 1901 pmu->postq_index = index_stripped ? (index_stripped | toggle) : 1902 (toggle ^ 0x4000); 1903 writel(postq_index, pmu->inboundlist_write_pointer); 1904 spin_unlock_irqrestore(&acb->postq_lock, flags); 1905 break; 1906 } 1907 case ACB_ADAPTER_TYPE_E: { 1908 struct MessageUnit_E __iomem *pmu = acb->pmuE; 1909 u32 ccb_post_stamp, arc_cdb_size; 1910 1911 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1912 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6)); 1913 writel(0, &pmu->inbound_queueport_high); 1914 writel(ccb_post_stamp, &pmu->inbound_queueport_low); 1915 break; 1916 } 1917 case ACB_ADAPTER_TYPE_F: { 1918 struct MessageUnit_F __iomem *pmu = acb->pmuF; 1919 u32 ccb_post_stamp, arc_cdb_size; 1920 1921 if (ccb->arc_cdb_size <= 0x300) 1922 arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1; 1923 else { 1924 arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2; 1925 if (arc_cdb_size > 0xF) 1926 arc_cdb_size = 0xF; 1927 arc_cdb_size = (arc_cdb_size << 1) | 1; 1928 } 1929 ccb_post_stamp = (ccb->smid | arc_cdb_size); 1930 writel(0, &pmu->inbound_queueport_high); 1931 writel(ccb_post_stamp, &pmu->inbound_queueport_low); 1932 break; 1933 } 1934 } 1935 } 1936 1937 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) 1938 { 1939 struct MessageUnit_A __iomem *reg = acb->pmuA; 1940 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1941 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1942 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1943 printk(KERN_NOTICE 1944 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1945 , acb->host->host_no); 1946 } 1947 } 1948 1949 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) 1950 { 1951 struct MessageUnit_B *reg = acb->pmuB; 1952 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1953 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); 1954 1955 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1956 printk(KERN_NOTICE 1957 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1958 , acb->host->host_no); 1959 } 1960 } 1961 1962 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) 1963 { 1964 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1965 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1966 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1967 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1968 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1969 printk(KERN_NOTICE 1970 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1971 , pACB->host->host_no); 1972 } 1973 return; 1974 } 1975 1976 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) 1977 { 1978 struct MessageUnit_D *reg = pACB->pmuD; 1979 1980 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1981 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); 1982 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) 1983 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " 1984 "timeout\n", pACB->host->host_no); 1985 } 1986 1987 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) 1988 { 1989 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1990 1991 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1992 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1993 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1994 writel(pACB->out_doorbell, ®->iobound_doorbell); 1995 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1996 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " 1997 "timeout\n", pACB->host->host_no); 1998 } 1999 } 2000 2001 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 2002 { 2003 switch (acb->adapter_type) { 2004 case ACB_ADAPTER_TYPE_A: 2005 arcmsr_hbaA_stop_bgrb(acb); 2006 break; 2007 case ACB_ADAPTER_TYPE_B: 2008 arcmsr_hbaB_stop_bgrb(acb); 2009 break; 2010 case ACB_ADAPTER_TYPE_C: 2011 arcmsr_hbaC_stop_bgrb(acb); 2012 break; 2013 case ACB_ADAPTER_TYPE_D: 2014 arcmsr_hbaD_stop_bgrb(acb); 2015 break; 2016 case ACB_ADAPTER_TYPE_E: 2017 case ACB_ADAPTER_TYPE_F: 2018 arcmsr_hbaE_stop_bgrb(acb); 2019 break; 2020 } 2021 } 2022 2023 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) 2024 { 2025 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); 2026 } 2027 2028 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 2029 { 2030 switch (acb->adapter_type) { 2031 case ACB_ADAPTER_TYPE_A: { 2032 struct MessageUnit_A __iomem *reg = acb->pmuA; 2033 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 2034 } 2035 break; 2036 case ACB_ADAPTER_TYPE_B: { 2037 struct MessageUnit_B *reg = acb->pmuB; 2038 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 2039 } 2040 break; 2041 case ACB_ADAPTER_TYPE_C: { 2042 struct MessageUnit_C __iomem *reg = acb->pmuC; 2043 2044 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 2045 } 2046 break; 2047 case ACB_ADAPTER_TYPE_D: { 2048 struct MessageUnit_D *reg = acb->pmuD; 2049 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 2050 reg->inbound_doorbell); 2051 } 2052 break; 2053 case ACB_ADAPTER_TYPE_E: 2054 case ACB_ADAPTER_TYPE_F: { 2055 struct MessageUnit_E __iomem *reg = acb->pmuE; 2056 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 2057 writel(acb->out_doorbell, ®->iobound_doorbell); 2058 } 2059 break; 2060 } 2061 } 2062 2063 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 2064 { 2065 switch (acb->adapter_type) { 2066 case ACB_ADAPTER_TYPE_A: { 2067 struct MessageUnit_A __iomem *reg = acb->pmuA; 2068 /* 2069 ** push inbound doorbell tell iop, driver data write ok 2070 ** and wait reply on next hwinterrupt for next Qbuffer post 2071 */ 2072 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell); 2073 } 2074 break; 2075 2076 case ACB_ADAPTER_TYPE_B: { 2077 struct MessageUnit_B *reg = acb->pmuB; 2078 /* 2079 ** push inbound doorbell tell iop, driver data write ok 2080 ** and wait reply on next hwinterrupt for next Qbuffer post 2081 */ 2082 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); 2083 } 2084 break; 2085 case ACB_ADAPTER_TYPE_C: { 2086 struct MessageUnit_C __iomem *reg = acb->pmuC; 2087 /* 2088 ** push inbound doorbell tell iop, driver data write ok 2089 ** and wait reply on next hwinterrupt for next Qbuffer post 2090 */ 2091 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell); 2092 } 2093 break; 2094 case ACB_ADAPTER_TYPE_D: { 2095 struct MessageUnit_D *reg = acb->pmuD; 2096 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, 2097 reg->inbound_doorbell); 2098 } 2099 break; 2100 case ACB_ADAPTER_TYPE_E: 2101 case ACB_ADAPTER_TYPE_F: { 2102 struct MessageUnit_E __iomem *reg = acb->pmuE; 2103 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; 2104 writel(acb->out_doorbell, ®->iobound_doorbell); 2105 } 2106 break; 2107 } 2108 } 2109 2110 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) 2111 { 2112 struct QBUFFER __iomem *qbuffer = NULL; 2113 switch (acb->adapter_type) { 2114 2115 case ACB_ADAPTER_TYPE_A: { 2116 struct MessageUnit_A __iomem *reg = acb->pmuA; 2117 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 2118 } 2119 break; 2120 case ACB_ADAPTER_TYPE_B: { 2121 struct MessageUnit_B *reg = acb->pmuB; 2122 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 2123 } 2124 break; 2125 case ACB_ADAPTER_TYPE_C: { 2126 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 2127 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; 2128 } 2129 break; 2130 case ACB_ADAPTER_TYPE_D: { 2131 struct MessageUnit_D *reg = acb->pmuD; 2132 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 2133 } 2134 break; 2135 case ACB_ADAPTER_TYPE_E: { 2136 struct MessageUnit_E __iomem *reg = acb->pmuE; 2137 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 2138 } 2139 break; 2140 case ACB_ADAPTER_TYPE_F: { 2141 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer; 2142 } 2143 break; 2144 } 2145 return qbuffer; 2146 } 2147 2148 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) 2149 { 2150 struct QBUFFER __iomem *pqbuffer = NULL; 2151 switch (acb->adapter_type) { 2152 2153 case ACB_ADAPTER_TYPE_A: { 2154 struct MessageUnit_A __iomem *reg = acb->pmuA; 2155 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; 2156 } 2157 break; 2158 case ACB_ADAPTER_TYPE_B: { 2159 struct MessageUnit_B *reg = acb->pmuB; 2160 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 2161 } 2162 break; 2163 case ACB_ADAPTER_TYPE_C: { 2164 struct MessageUnit_C __iomem *reg = acb->pmuC; 2165 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 2166 } 2167 break; 2168 case ACB_ADAPTER_TYPE_D: { 2169 struct MessageUnit_D *reg = acb->pmuD; 2170 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 2171 } 2172 break; 2173 case ACB_ADAPTER_TYPE_E: { 2174 struct MessageUnit_E __iomem *reg = acb->pmuE; 2175 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 2176 } 2177 break; 2178 case ACB_ADAPTER_TYPE_F: 2179 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer; 2180 break; 2181 } 2182 return pqbuffer; 2183 } 2184 2185 static uint32_t 2186 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, 2187 struct QBUFFER __iomem *prbuffer) 2188 { 2189 uint8_t *pQbuffer; 2190 uint8_t *buf1 = NULL; 2191 uint32_t __iomem *iop_data; 2192 uint32_t iop_len, data_len, *buf2 = NULL; 2193 2194 iop_data = (uint32_t __iomem *)prbuffer->data; 2195 iop_len = readl(&prbuffer->data_len); 2196 if (iop_len > 0) { 2197 buf1 = kmalloc(128, GFP_ATOMIC); 2198 buf2 = (uint32_t *)buf1; 2199 if (buf1 == NULL) 2200 return 0; 2201 data_len = iop_len; 2202 while (data_len >= 4) { 2203 *buf2++ = readl(iop_data); 2204 iop_data++; 2205 data_len -= 4; 2206 } 2207 if (data_len) 2208 *buf2 = readl(iop_data); 2209 buf2 = (uint32_t *)buf1; 2210 } 2211 while (iop_len > 0) { 2212 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 2213 *pQbuffer = *buf1; 2214 acb->rqbuf_putIndex++; 2215 /* if last, index number set it to 0 */ 2216 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2217 buf1++; 2218 iop_len--; 2219 } 2220 kfree(buf2); 2221 /* let IOP know data has been read */ 2222 arcmsr_iop_message_read(acb); 2223 return 1; 2224 } 2225 2226 uint32_t 2227 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, 2228 struct QBUFFER __iomem *prbuffer) { 2229 2230 uint8_t *pQbuffer; 2231 uint8_t __iomem *iop_data; 2232 uint32_t iop_len; 2233 2234 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) 2235 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); 2236 iop_data = (uint8_t __iomem *)prbuffer->data; 2237 iop_len = readl(&prbuffer->data_len); 2238 while (iop_len > 0) { 2239 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 2240 *pQbuffer = readb(iop_data); 2241 acb->rqbuf_putIndex++; 2242 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2243 iop_data++; 2244 iop_len--; 2245 } 2246 arcmsr_iop_message_read(acb); 2247 return 1; 2248 } 2249 2250 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 2251 { 2252 unsigned long flags; 2253 struct QBUFFER __iomem *prbuffer; 2254 int32_t buf_empty_len; 2255 2256 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2257 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2258 if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) { 2259 buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) - 2260 (acb->rqbuf_putIndex - acb->rqbuf_getIndex); 2261 } else 2262 buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1; 2263 if (buf_empty_len >= readl(&prbuffer->data_len)) { 2264 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2265 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2266 } else 2267 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2268 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2269 } 2270 2271 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) 2272 { 2273 uint8_t *pQbuffer; 2274 struct QBUFFER __iomem *pwbuffer; 2275 uint8_t *buf1 = NULL; 2276 uint32_t __iomem *iop_data; 2277 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; 2278 2279 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2280 buf1 = kmalloc(128, GFP_ATOMIC); 2281 buf2 = (uint32_t *)buf1; 2282 if (buf1 == NULL) 2283 return; 2284 2285 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2286 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2287 iop_data = (uint32_t __iomem *)pwbuffer->data; 2288 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2289 && (allxfer_len < 124)) { 2290 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2291 *buf1 = *pQbuffer; 2292 acb->wqbuf_getIndex++; 2293 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2294 buf1++; 2295 allxfer_len++; 2296 } 2297 data_len = allxfer_len; 2298 buf1 = (uint8_t *)buf2; 2299 while (data_len >= 4) { 2300 data = *buf2++; 2301 writel(data, iop_data); 2302 iop_data++; 2303 data_len -= 4; 2304 } 2305 if (data_len) { 2306 data = *buf2; 2307 writel(data, iop_data); 2308 } 2309 writel(allxfer_len, &pwbuffer->data_len); 2310 kfree(buf1); 2311 arcmsr_iop_message_wrote(acb); 2312 } 2313 } 2314 2315 void 2316 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) 2317 { 2318 uint8_t *pQbuffer; 2319 struct QBUFFER __iomem *pwbuffer; 2320 uint8_t __iomem *iop_data; 2321 int32_t allxfer_len = 0; 2322 2323 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) { 2324 arcmsr_write_ioctldata2iop_in_DWORD(acb); 2325 return; 2326 } 2327 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2328 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2329 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2330 iop_data = (uint8_t __iomem *)pwbuffer->data; 2331 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2332 && (allxfer_len < 124)) { 2333 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2334 writeb(*pQbuffer, iop_data); 2335 acb->wqbuf_getIndex++; 2336 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2337 iop_data++; 2338 allxfer_len++; 2339 } 2340 writel(allxfer_len, &pwbuffer->data_len); 2341 arcmsr_iop_message_wrote(acb); 2342 } 2343 } 2344 2345 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 2346 { 2347 unsigned long flags; 2348 2349 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2350 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 2351 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2352 arcmsr_write_ioctldata2iop(acb); 2353 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) 2354 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 2355 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2356 } 2357 2358 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) 2359 { 2360 uint32_t outbound_doorbell; 2361 struct MessageUnit_A __iomem *reg = acb->pmuA; 2362 outbound_doorbell = readl(®->outbound_doorbell); 2363 do { 2364 writel(outbound_doorbell, ®->outbound_doorbell); 2365 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) 2366 arcmsr_iop2drv_data_wrote_handle(acb); 2367 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) 2368 arcmsr_iop2drv_data_read_handle(acb); 2369 outbound_doorbell = readl(®->outbound_doorbell); 2370 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 2371 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); 2372 } 2373 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) 2374 { 2375 uint32_t outbound_doorbell; 2376 struct MessageUnit_C __iomem *reg = pACB->pmuC; 2377 /* 2378 ******************************************************************* 2379 ** Maybe here we need to check wrqbuffer_lock is lock or not 2380 ** DOORBELL: din! don! 2381 ** check if there are any mail need to pack from firmware 2382 ******************************************************************* 2383 */ 2384 outbound_doorbell = readl(®->outbound_doorbell); 2385 do { 2386 writel(outbound_doorbell, ®->outbound_doorbell_clear); 2387 readl(®->outbound_doorbell_clear); 2388 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) 2389 arcmsr_iop2drv_data_wrote_handle(pACB); 2390 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) 2391 arcmsr_iop2drv_data_read_handle(pACB); 2392 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) 2393 arcmsr_hbaC_message_isr(pACB); 2394 outbound_doorbell = readl(®->outbound_doorbell); 2395 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 2396 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 2397 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); 2398 } 2399 2400 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) 2401 { 2402 uint32_t outbound_doorbell; 2403 struct MessageUnit_D *pmu = pACB->pmuD; 2404 2405 outbound_doorbell = readl(pmu->outbound_doorbell); 2406 do { 2407 writel(outbound_doorbell, pmu->outbound_doorbell); 2408 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) 2409 arcmsr_hbaD_message_isr(pACB); 2410 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) 2411 arcmsr_iop2drv_data_wrote_handle(pACB); 2412 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) 2413 arcmsr_iop2drv_data_read_handle(pACB); 2414 outbound_doorbell = readl(pmu->outbound_doorbell); 2415 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 2416 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 2417 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); 2418 } 2419 2420 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB) 2421 { 2422 uint32_t outbound_doorbell, in_doorbell, tmp, i; 2423 struct MessageUnit_E __iomem *reg = pACB->pmuE; 2424 2425 if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) { 2426 for (i = 0; i < 5; i++) { 2427 in_doorbell = readl(®->iobound_doorbell); 2428 if (in_doorbell != 0) 2429 break; 2430 } 2431 } else 2432 in_doorbell = readl(®->iobound_doorbell); 2433 outbound_doorbell = in_doorbell ^ pACB->in_doorbell; 2434 do { 2435 writel(0, ®->host_int_status); /* clear interrupt */ 2436 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 2437 arcmsr_iop2drv_data_wrote_handle(pACB); 2438 } 2439 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { 2440 arcmsr_iop2drv_data_read_handle(pACB); 2441 } 2442 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 2443 arcmsr_hbaE_message_isr(pACB); 2444 } 2445 tmp = in_doorbell; 2446 in_doorbell = readl(®->iobound_doorbell); 2447 outbound_doorbell = tmp ^ in_doorbell; 2448 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 2449 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 2450 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE)); 2451 pACB->in_doorbell = in_doorbell; 2452 } 2453 2454 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) 2455 { 2456 uint32_t flag_ccb; 2457 struct MessageUnit_A __iomem *reg = acb->pmuA; 2458 struct ARCMSR_CDB *pARCMSR_CDB; 2459 struct CommandControlBlock *pCCB; 2460 bool error; 2461 unsigned long cdb_phy_addr; 2462 2463 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { 2464 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; 2465 if (acb->cdb_phyadd_hipart) 2466 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; 2467 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); 2468 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2469 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2470 arcmsr_drain_donequeue(acb, pCCB, error); 2471 } 2472 } 2473 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) 2474 { 2475 uint32_t index; 2476 uint32_t flag_ccb; 2477 struct MessageUnit_B *reg = acb->pmuB; 2478 struct ARCMSR_CDB *pARCMSR_CDB; 2479 struct CommandControlBlock *pCCB; 2480 bool error; 2481 unsigned long cdb_phy_addr; 2482 2483 index = reg->doneq_index; 2484 while ((flag_ccb = reg->done_qbuffer[index]) != 0) { 2485 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; 2486 if (acb->cdb_phyadd_hipart) 2487 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; 2488 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); 2489 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2490 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2491 arcmsr_drain_donequeue(acb, pCCB, error); 2492 reg->done_qbuffer[index] = 0; 2493 index++; 2494 index %= ARCMSR_MAX_HBB_POSTQUEUE; 2495 reg->doneq_index = index; 2496 } 2497 } 2498 2499 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) 2500 { 2501 struct MessageUnit_C __iomem *phbcmu; 2502 struct ARCMSR_CDB *arcmsr_cdb; 2503 struct CommandControlBlock *ccb; 2504 uint32_t flag_ccb, throttling = 0; 2505 unsigned long ccb_cdb_phy; 2506 int error; 2507 2508 phbcmu = acb->pmuC; 2509 /* areca cdb command done */ 2510 /* Use correct offset and size for syncing */ 2511 2512 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != 2513 0xFFFFFFFF) { 2514 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 2515 if (acb->cdb_phyadd_hipart) 2516 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 2517 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2518 + ccb_cdb_phy); 2519 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, 2520 arcmsr_cdb); 2521 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2522 ? true : false; 2523 /* check if command done with no error */ 2524 arcmsr_drain_donequeue(acb, ccb, error); 2525 throttling++; 2526 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 2527 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, 2528 &phbcmu->inbound_doorbell); 2529 throttling = 0; 2530 } 2531 } 2532 } 2533 2534 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) 2535 { 2536 u32 outbound_write_pointer, doneq_index, index_stripped, toggle; 2537 uint32_t addressLow; 2538 int error; 2539 struct MessageUnit_D *pmu; 2540 struct ARCMSR_CDB *arcmsr_cdb; 2541 struct CommandControlBlock *ccb; 2542 unsigned long flags, ccb_cdb_phy; 2543 2544 spin_lock_irqsave(&acb->doneq_lock, flags); 2545 pmu = acb->pmuD; 2546 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 2547 doneq_index = pmu->doneq_index; 2548 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { 2549 do { 2550 toggle = doneq_index & 0x4000; 2551 index_stripped = (doneq_index & 0xFFF) + 1; 2552 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 2553 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 2554 ((toggle ^ 0x4000) + 1); 2555 doneq_index = pmu->doneq_index; 2556 addressLow = pmu->done_qbuffer[doneq_index & 2557 0xFFF].addressLow; 2558 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 2559 if (acb->cdb_phyadd_hipart) 2560 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 2561 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2562 + ccb_cdb_phy); 2563 ccb = container_of(arcmsr_cdb, 2564 struct CommandControlBlock, arcmsr_cdb); 2565 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2566 ? true : false; 2567 arcmsr_drain_donequeue(acb, ccb, error); 2568 writel(doneq_index, pmu->outboundlist_read_pointer); 2569 } while ((doneq_index & 0xFFF) != 2570 (outbound_write_pointer & 0xFFF)); 2571 } 2572 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, 2573 pmu->outboundlist_interrupt_cause); 2574 readl(pmu->outboundlist_interrupt_cause); 2575 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2576 } 2577 2578 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) 2579 { 2580 uint32_t doneq_index; 2581 uint16_t cmdSMID; 2582 int error; 2583 struct MessageUnit_E __iomem *pmu; 2584 struct CommandControlBlock *ccb; 2585 unsigned long flags; 2586 2587 spin_lock_irqsave(&acb->doneq_lock, flags); 2588 doneq_index = acb->doneq_index; 2589 pmu = acb->pmuE; 2590 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) { 2591 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2592 ccb = acb->pccb_pool[cmdSMID]; 2593 error = (acb->pCompletionQ[doneq_index].cmdFlag 2594 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 2595 arcmsr_drain_donequeue(acb, ccb, error); 2596 doneq_index++; 2597 if (doneq_index >= acb->completionQ_entry) 2598 doneq_index = 0; 2599 } 2600 acb->doneq_index = doneq_index; 2601 writel(doneq_index, &pmu->reply_post_consumer_index); 2602 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2603 } 2604 2605 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb) 2606 { 2607 uint32_t doneq_index; 2608 uint16_t cmdSMID; 2609 int error; 2610 struct MessageUnit_F __iomem *phbcmu; 2611 struct CommandControlBlock *ccb; 2612 unsigned long flags; 2613 2614 spin_lock_irqsave(&acb->doneq_lock, flags); 2615 doneq_index = acb->doneq_index; 2616 phbcmu = acb->pmuF; 2617 while (1) { 2618 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2619 if (cmdSMID == 0xffff) 2620 break; 2621 ccb = acb->pccb_pool[cmdSMID]; 2622 error = (acb->pCompletionQ[doneq_index].cmdFlag & 2623 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 2624 arcmsr_drain_donequeue(acb, ccb, error); 2625 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff; 2626 doneq_index++; 2627 if (doneq_index >= acb->completionQ_entry) 2628 doneq_index = 0; 2629 } 2630 acb->doneq_index = doneq_index; 2631 writel(doneq_index, &phbcmu->reply_post_consumer_index); 2632 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2633 } 2634 2635 /* 2636 ********************************************************************************** 2637 ** Handle a message interrupt 2638 ** 2639 ** The only message interrupt we expect is in response to a query for the current adapter config. 2640 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2641 ********************************************************************************** 2642 */ 2643 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) 2644 { 2645 struct MessageUnit_A __iomem *reg = acb->pmuA; 2646 /*clear interrupt and message state*/ 2647 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); 2648 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2649 schedule_work(&acb->arcmsr_do_message_isr_bh); 2650 } 2651 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) 2652 { 2653 struct MessageUnit_B *reg = acb->pmuB; 2654 2655 /*clear interrupt and message state*/ 2656 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2657 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2658 schedule_work(&acb->arcmsr_do_message_isr_bh); 2659 } 2660 /* 2661 ********************************************************************************** 2662 ** Handle a message interrupt 2663 ** 2664 ** The only message interrupt we expect is in response to a query for the 2665 ** current adapter config. 2666 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2667 ********************************************************************************** 2668 */ 2669 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) 2670 { 2671 struct MessageUnit_C __iomem *reg = acb->pmuC; 2672 /*clear interrupt and message state*/ 2673 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear); 2674 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2675 schedule_work(&acb->arcmsr_do_message_isr_bh); 2676 } 2677 2678 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) 2679 { 2680 struct MessageUnit_D *reg = acb->pmuD; 2681 2682 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); 2683 readl(reg->outbound_doorbell); 2684 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2685 schedule_work(&acb->arcmsr_do_message_isr_bh); 2686 } 2687 2688 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb) 2689 { 2690 struct MessageUnit_E __iomem *reg = acb->pmuE; 2691 2692 writel(0, ®->host_int_status); 2693 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2694 schedule_work(&acb->arcmsr_do_message_isr_bh); 2695 } 2696 2697 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) 2698 { 2699 uint32_t outbound_intstatus; 2700 struct MessageUnit_A __iomem *reg = acb->pmuA; 2701 outbound_intstatus = readl(®->outbound_intstatus) & 2702 acb->outbound_int_enable; 2703 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) 2704 return IRQ_NONE; 2705 do { 2706 writel(outbound_intstatus, ®->outbound_intstatus); 2707 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) 2708 arcmsr_hbaA_doorbell_isr(acb); 2709 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) 2710 arcmsr_hbaA_postqueue_isr(acb); 2711 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 2712 arcmsr_hbaA_message_isr(acb); 2713 outbound_intstatus = readl(®->outbound_intstatus) & 2714 acb->outbound_int_enable; 2715 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT 2716 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 2717 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); 2718 return IRQ_HANDLED; 2719 } 2720 2721 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) 2722 { 2723 uint32_t outbound_doorbell; 2724 struct MessageUnit_B *reg = acb->pmuB; 2725 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2726 acb->outbound_int_enable; 2727 if (!outbound_doorbell) 2728 return IRQ_NONE; 2729 do { 2730 writel(~outbound_doorbell, reg->iop2drv_doorbell); 2731 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 2732 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) 2733 arcmsr_iop2drv_data_wrote_handle(acb); 2734 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) 2735 arcmsr_iop2drv_data_read_handle(acb); 2736 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) 2737 arcmsr_hbaB_postqueue_isr(acb); 2738 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) 2739 arcmsr_hbaB_message_isr(acb); 2740 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2741 acb->outbound_int_enable; 2742 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK 2743 | ARCMSR_IOP2DRV_DATA_READ_OK 2744 | ARCMSR_IOP2DRV_CDB_DONE 2745 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); 2746 return IRQ_HANDLED; 2747 } 2748 2749 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) 2750 { 2751 uint32_t host_interrupt_status; 2752 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 2753 /* 2754 ********************************************* 2755 ** check outbound intstatus 2756 ********************************************* 2757 */ 2758 host_interrupt_status = readl(&phbcmu->host_int_status) & 2759 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2760 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); 2761 if (!host_interrupt_status) 2762 return IRQ_NONE; 2763 do { 2764 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) 2765 arcmsr_hbaC_doorbell_isr(pACB); 2766 /* MU post queue interrupts*/ 2767 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) 2768 arcmsr_hbaC_postqueue_isr(pACB); 2769 host_interrupt_status = readl(&phbcmu->host_int_status); 2770 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2771 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); 2772 return IRQ_HANDLED; 2773 } 2774 2775 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) 2776 { 2777 u32 host_interrupt_status; 2778 struct MessageUnit_D *pmu = pACB->pmuD; 2779 2780 host_interrupt_status = readl(pmu->host_int_status) & 2781 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2782 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); 2783 if (!host_interrupt_status) 2784 return IRQ_NONE; 2785 do { 2786 /* MU post queue interrupts*/ 2787 if (host_interrupt_status & 2788 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) 2789 arcmsr_hbaD_postqueue_isr(pACB); 2790 if (host_interrupt_status & 2791 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) 2792 arcmsr_hbaD_doorbell_isr(pACB); 2793 host_interrupt_status = readl(pmu->host_int_status); 2794 } while (host_interrupt_status & 2795 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2796 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); 2797 return IRQ_HANDLED; 2798 } 2799 2800 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) 2801 { 2802 uint32_t host_interrupt_status; 2803 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 2804 2805 host_interrupt_status = readl(&pmu->host_int_status) & 2806 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2807 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2808 if (!host_interrupt_status) 2809 return IRQ_NONE; 2810 do { 2811 /* MU ioctl transfer doorbell interrupts*/ 2812 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { 2813 arcmsr_hbaE_doorbell_isr(pACB); 2814 } 2815 /* MU post queue interrupts*/ 2816 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { 2817 arcmsr_hbaE_postqueue_isr(pACB); 2818 } 2819 host_interrupt_status = readl(&pmu->host_int_status); 2820 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2821 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2822 return IRQ_HANDLED; 2823 } 2824 2825 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB) 2826 { 2827 uint32_t host_interrupt_status; 2828 struct MessageUnit_F __iomem *phbcmu = pACB->pmuF; 2829 2830 host_interrupt_status = readl(&phbcmu->host_int_status) & 2831 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2832 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2833 if (!host_interrupt_status) 2834 return IRQ_NONE; 2835 do { 2836 /* MU post queue interrupts*/ 2837 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) 2838 arcmsr_hbaF_postqueue_isr(pACB); 2839 2840 /* MU ioctl transfer doorbell interrupts*/ 2841 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) 2842 arcmsr_hbaE_doorbell_isr(pACB); 2843 2844 host_interrupt_status = readl(&phbcmu->host_int_status); 2845 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2846 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2847 return IRQ_HANDLED; 2848 } 2849 2850 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 2851 { 2852 switch (acb->adapter_type) { 2853 case ACB_ADAPTER_TYPE_A: 2854 return arcmsr_hbaA_handle_isr(acb); 2855 case ACB_ADAPTER_TYPE_B: 2856 return arcmsr_hbaB_handle_isr(acb); 2857 case ACB_ADAPTER_TYPE_C: 2858 return arcmsr_hbaC_handle_isr(acb); 2859 case ACB_ADAPTER_TYPE_D: 2860 return arcmsr_hbaD_handle_isr(acb); 2861 case ACB_ADAPTER_TYPE_E: 2862 return arcmsr_hbaE_handle_isr(acb); 2863 case ACB_ADAPTER_TYPE_F: 2864 return arcmsr_hbaF_handle_isr(acb); 2865 default: 2866 return IRQ_NONE; 2867 } 2868 } 2869 2870 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 2871 { 2872 if (acb) { 2873 /* stop adapter background rebuild */ 2874 if (acb->acb_flags & ACB_F_MSG_START_BGRB) { 2875 uint32_t intmask_org; 2876 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 2877 intmask_org = arcmsr_disable_outbound_ints(acb); 2878 arcmsr_stop_adapter_bgrb(acb); 2879 arcmsr_flush_adapter_cache(acb); 2880 arcmsr_enable_outbound_ints(acb, intmask_org); 2881 } 2882 } 2883 } 2884 2885 2886 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) 2887 { 2888 uint32_t i; 2889 2890 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2891 for (i = 0; i < 15; i++) { 2892 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2893 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2894 acb->rqbuf_getIndex = 0; 2895 acb->rqbuf_putIndex = 0; 2896 arcmsr_iop_message_read(acb); 2897 mdelay(30); 2898 } else if (acb->rqbuf_getIndex != 2899 acb->rqbuf_putIndex) { 2900 acb->rqbuf_getIndex = 0; 2901 acb->rqbuf_putIndex = 0; 2902 mdelay(30); 2903 } else 2904 break; 2905 } 2906 } 2907 } 2908 2909 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 2910 struct scsi_cmnd *cmd) 2911 { 2912 char *buffer; 2913 unsigned short use_sg; 2914 int retvalue = 0, transfer_len = 0; 2915 unsigned long flags; 2916 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 2917 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | 2918 (uint32_t)cmd->cmnd[6] << 16 | 2919 (uint32_t)cmd->cmnd[7] << 8 | 2920 (uint32_t)cmd->cmnd[8]; 2921 struct scatterlist *sg; 2922 2923 use_sg = scsi_sg_count(cmd); 2924 sg = scsi_sglist(cmd); 2925 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 2926 if (use_sg > 1) { 2927 retvalue = ARCMSR_MESSAGE_FAIL; 2928 goto message_out; 2929 } 2930 transfer_len += sg->length; 2931 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 2932 retvalue = ARCMSR_MESSAGE_FAIL; 2933 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); 2934 goto message_out; 2935 } 2936 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; 2937 switch (controlcode) { 2938 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2939 unsigned char *ver_addr; 2940 uint8_t *ptmpQbuffer; 2941 uint32_t allxfer_len = 0; 2942 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2943 if (!ver_addr) { 2944 retvalue = ARCMSR_MESSAGE_FAIL; 2945 pr_info("%s: memory not enough!\n", __func__); 2946 goto message_out; 2947 } 2948 ptmpQbuffer = ver_addr; 2949 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2950 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { 2951 unsigned int tail = acb->rqbuf_getIndex; 2952 unsigned int head = acb->rqbuf_putIndex; 2953 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); 2954 2955 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); 2956 if (allxfer_len > ARCMSR_API_DATA_BUFLEN) 2957 allxfer_len = ARCMSR_API_DATA_BUFLEN; 2958 2959 if (allxfer_len <= cnt_to_end) 2960 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); 2961 else { 2962 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); 2963 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); 2964 } 2965 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; 2966 } 2967 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, 2968 allxfer_len); 2969 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2970 struct QBUFFER __iomem *prbuffer; 2971 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2972 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2973 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2974 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2975 } 2976 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2977 kfree(ver_addr); 2978 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2979 if (acb->fw_flag == FW_DEADLOCK) 2980 pcmdmessagefld->cmdmessage.ReturnCode = 2981 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2982 else 2983 pcmdmessagefld->cmdmessage.ReturnCode = 2984 ARCMSR_MESSAGE_RETURNCODE_OK; 2985 break; 2986 } 2987 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2988 unsigned char *ver_addr; 2989 uint32_t user_len; 2990 int32_t cnt2end; 2991 uint8_t *pQbuffer, *ptmpuserbuffer; 2992 2993 user_len = pcmdmessagefld->cmdmessage.Length; 2994 if (user_len > ARCMSR_API_DATA_BUFLEN) { 2995 retvalue = ARCMSR_MESSAGE_FAIL; 2996 goto message_out; 2997 } 2998 2999 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 3000 if (!ver_addr) { 3001 retvalue = ARCMSR_MESSAGE_FAIL; 3002 goto message_out; 3003 } 3004 ptmpuserbuffer = ver_addr; 3005 3006 memcpy(ptmpuserbuffer, 3007 pcmdmessagefld->messagedatabuffer, user_len); 3008 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3009 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { 3010 struct SENSE_DATA *sensebuffer = 3011 (struct SENSE_DATA *)cmd->sense_buffer; 3012 arcmsr_write_ioctldata2iop(acb); 3013 /* has error report sensedata */ 3014 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 3015 sensebuffer->SenseKey = ILLEGAL_REQUEST; 3016 sensebuffer->AdditionalSenseLength = 0x0A; 3017 sensebuffer->AdditionalSenseCode = 0x20; 3018 sensebuffer->Valid = 1; 3019 retvalue = ARCMSR_MESSAGE_FAIL; 3020 } else { 3021 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; 3022 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; 3023 if (user_len > cnt2end) { 3024 memcpy(pQbuffer, ptmpuserbuffer, cnt2end); 3025 ptmpuserbuffer += cnt2end; 3026 user_len -= cnt2end; 3027 acb->wqbuf_putIndex = 0; 3028 pQbuffer = acb->wqbuffer; 3029 } 3030 memcpy(pQbuffer, ptmpuserbuffer, user_len); 3031 acb->wqbuf_putIndex += user_len; 3032 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 3033 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 3034 acb->acb_flags &= 3035 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 3036 arcmsr_write_ioctldata2iop(acb); 3037 } 3038 } 3039 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3040 kfree(ver_addr); 3041 if (acb->fw_flag == FW_DEADLOCK) 3042 pcmdmessagefld->cmdmessage.ReturnCode = 3043 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3044 else 3045 pcmdmessagefld->cmdmessage.ReturnCode = 3046 ARCMSR_MESSAGE_RETURNCODE_OK; 3047 break; 3048 } 3049 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 3050 uint8_t *pQbuffer = acb->rqbuffer; 3051 3052 arcmsr_clear_iop2drv_rqueue_buffer(acb); 3053 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 3054 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 3055 acb->rqbuf_getIndex = 0; 3056 acb->rqbuf_putIndex = 0; 3057 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 3058 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 3059 if (acb->fw_flag == FW_DEADLOCK) 3060 pcmdmessagefld->cmdmessage.ReturnCode = 3061 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3062 else 3063 pcmdmessagefld->cmdmessage.ReturnCode = 3064 ARCMSR_MESSAGE_RETURNCODE_OK; 3065 break; 3066 } 3067 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 3068 uint8_t *pQbuffer = acb->wqbuffer; 3069 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3070 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 3071 ACB_F_MESSAGE_WQBUFFER_READED); 3072 acb->wqbuf_getIndex = 0; 3073 acb->wqbuf_putIndex = 0; 3074 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 3075 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3076 if (acb->fw_flag == FW_DEADLOCK) 3077 pcmdmessagefld->cmdmessage.ReturnCode = 3078 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3079 else 3080 pcmdmessagefld->cmdmessage.ReturnCode = 3081 ARCMSR_MESSAGE_RETURNCODE_OK; 3082 break; 3083 } 3084 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 3085 uint8_t *pQbuffer; 3086 arcmsr_clear_iop2drv_rqueue_buffer(acb); 3087 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 3088 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 3089 acb->rqbuf_getIndex = 0; 3090 acb->rqbuf_putIndex = 0; 3091 pQbuffer = acb->rqbuffer; 3092 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 3093 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 3094 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3095 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 3096 ACB_F_MESSAGE_WQBUFFER_READED); 3097 acb->wqbuf_getIndex = 0; 3098 acb->wqbuf_putIndex = 0; 3099 pQbuffer = acb->wqbuffer; 3100 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 3101 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3102 if (acb->fw_flag == FW_DEADLOCK) 3103 pcmdmessagefld->cmdmessage.ReturnCode = 3104 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3105 else 3106 pcmdmessagefld->cmdmessage.ReturnCode = 3107 ARCMSR_MESSAGE_RETURNCODE_OK; 3108 break; 3109 } 3110 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 3111 if (acb->fw_flag == FW_DEADLOCK) 3112 pcmdmessagefld->cmdmessage.ReturnCode = 3113 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3114 else 3115 pcmdmessagefld->cmdmessage.ReturnCode = 3116 ARCMSR_MESSAGE_RETURNCODE_3F; 3117 break; 3118 } 3119 case ARCMSR_MESSAGE_SAY_HELLO: { 3120 int8_t *hello_string = "Hello! I am ARCMSR"; 3121 if (acb->fw_flag == FW_DEADLOCK) 3122 pcmdmessagefld->cmdmessage.ReturnCode = 3123 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3124 else 3125 pcmdmessagefld->cmdmessage.ReturnCode = 3126 ARCMSR_MESSAGE_RETURNCODE_OK; 3127 memcpy(pcmdmessagefld->messagedatabuffer, 3128 hello_string, (int16_t)strlen(hello_string)); 3129 break; 3130 } 3131 case ARCMSR_MESSAGE_SAY_GOODBYE: { 3132 if (acb->fw_flag == FW_DEADLOCK) 3133 pcmdmessagefld->cmdmessage.ReturnCode = 3134 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3135 else 3136 pcmdmessagefld->cmdmessage.ReturnCode = 3137 ARCMSR_MESSAGE_RETURNCODE_OK; 3138 arcmsr_iop_parking(acb); 3139 break; 3140 } 3141 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 3142 if (acb->fw_flag == FW_DEADLOCK) 3143 pcmdmessagefld->cmdmessage.ReturnCode = 3144 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3145 else 3146 pcmdmessagefld->cmdmessage.ReturnCode = 3147 ARCMSR_MESSAGE_RETURNCODE_OK; 3148 arcmsr_flush_adapter_cache(acb); 3149 break; 3150 } 3151 default: 3152 retvalue = ARCMSR_MESSAGE_FAIL; 3153 pr_info("%s: unknown controlcode!\n", __func__); 3154 } 3155 message_out: 3156 if (use_sg) { 3157 struct scatterlist *sg = scsi_sglist(cmd); 3158 kunmap_atomic(buffer - sg->offset); 3159 } 3160 return retvalue; 3161 } 3162 3163 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) 3164 { 3165 struct list_head *head; 3166 struct CommandControlBlock *ccb = NULL; 3167 unsigned long flags; 3168 3169 spin_lock_irqsave(&acb->ccblist_lock, flags); 3170 head = &acb->ccb_free_list; 3171 if (!list_empty(head)) { 3172 ccb = list_entry(head->next, struct CommandControlBlock, list); 3173 list_del_init(&ccb->list); 3174 }else{ 3175 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 3176 return NULL; 3177 } 3178 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 3179 return ccb; 3180 } 3181 3182 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 3183 struct scsi_cmnd *cmd) 3184 { 3185 switch (cmd->cmnd[0]) { 3186 case INQUIRY: { 3187 unsigned char inqdata[36]; 3188 char *buffer; 3189 struct scatterlist *sg; 3190 3191 if (cmd->device->lun) { 3192 cmd->result = (DID_TIME_OUT << 16); 3193 scsi_done(cmd); 3194 return; 3195 } 3196 inqdata[0] = TYPE_PROCESSOR; 3197 /* Periph Qualifier & Periph Dev Type */ 3198 inqdata[1] = 0; 3199 /* rem media bit & Dev Type Modifier */ 3200 inqdata[2] = 0; 3201 /* ISO, ECMA, & ANSI versions */ 3202 inqdata[4] = 31; 3203 /* length of additional data */ 3204 memcpy(&inqdata[8], "Areca ", 8); 3205 /* Vendor Identification */ 3206 memcpy(&inqdata[16], "RAID controller ", 16); 3207 /* Product Identification */ 3208 memcpy(&inqdata[32], "R001", 4); /* Product Revision */ 3209 3210 sg = scsi_sglist(cmd); 3211 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 3212 3213 memcpy(buffer, inqdata, sizeof(inqdata)); 3214 sg = scsi_sglist(cmd); 3215 kunmap_atomic(buffer - sg->offset); 3216 3217 scsi_done(cmd); 3218 } 3219 break; 3220 case WRITE_BUFFER: 3221 case READ_BUFFER: { 3222 if (arcmsr_iop_message_xfer(acb, cmd)) 3223 cmd->result = (DID_ERROR << 16); 3224 scsi_done(cmd); 3225 } 3226 break; 3227 default: 3228 scsi_done(cmd); 3229 } 3230 } 3231 3232 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) 3233 { 3234 struct Scsi_Host *host = cmd->device->host; 3235 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 3236 struct CommandControlBlock *ccb; 3237 int target = cmd->device->id; 3238 3239 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) { 3240 cmd->result = (DID_NO_CONNECT << 16); 3241 scsi_done(cmd); 3242 return 0; 3243 } 3244 cmd->host_scribble = NULL; 3245 cmd->result = 0; 3246 if (target == 16) { 3247 /* virtual device for iop message transfer */ 3248 arcmsr_handle_virtual_command(acb, cmd); 3249 return 0; 3250 } 3251 ccb = arcmsr_get_freeccb(acb); 3252 if (!ccb) 3253 return SCSI_MLQUEUE_HOST_BUSY; 3254 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { 3255 cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; 3256 scsi_done(cmd); 3257 return 0; 3258 } 3259 arcmsr_post_ccb(acb, ccb); 3260 return 0; 3261 } 3262 3263 static DEF_SCSI_QCMD(arcmsr_queue_command) 3264 3265 static int arcmsr_slave_config(struct scsi_device *sdev) 3266 { 3267 unsigned int dev_timeout; 3268 3269 dev_timeout = sdev->request_queue->rq_timeout; 3270 if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout)) 3271 blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ); 3272 return 0; 3273 } 3274 3275 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer) 3276 { 3277 int count; 3278 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model; 3279 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version; 3280 uint32_t *acb_device_map = (uint32_t *)pACB->device_map; 3281 uint32_t *firm_model = &rwbuffer[15]; 3282 uint32_t *firm_version = &rwbuffer[17]; 3283 uint32_t *device_map = &rwbuffer[21]; 3284 3285 count = 2; 3286 while (count) { 3287 *acb_firm_model = readl(firm_model); 3288 acb_firm_model++; 3289 firm_model++; 3290 count--; 3291 } 3292 count = 4; 3293 while (count) { 3294 *acb_firm_version = readl(firm_version); 3295 acb_firm_version++; 3296 firm_version++; 3297 count--; 3298 } 3299 count = 4; 3300 while (count) { 3301 *acb_device_map = readl(device_map); 3302 acb_device_map++; 3303 device_map++; 3304 count--; 3305 } 3306 pACB->signature = readl(&rwbuffer[0]); 3307 pACB->firm_request_len = readl(&rwbuffer[1]); 3308 pACB->firm_numbers_queue = readl(&rwbuffer[2]); 3309 pACB->firm_sdram_size = readl(&rwbuffer[3]); 3310 pACB->firm_hd_channels = readl(&rwbuffer[4]); 3311 pACB->firm_cfg_version = readl(&rwbuffer[25]); 3312 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", 3313 pACB->host->host_no, 3314 pACB->firm_model, 3315 pACB->firm_version); 3316 } 3317 3318 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) 3319 { 3320 struct MessageUnit_A __iomem *reg = acb->pmuA; 3321 3322 arcmsr_wait_firmware_ready(acb); 3323 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3324 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3325 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3326 miscellaneous data' timeout \n", acb->host->host_no); 3327 return false; 3328 } 3329 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3330 return true; 3331 } 3332 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) 3333 { 3334 struct MessageUnit_B *reg = acb->pmuB; 3335 3336 arcmsr_wait_firmware_ready(acb); 3337 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 3338 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3339 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); 3340 return false; 3341 } 3342 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 3343 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3344 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3345 miscellaneous data' timeout \n", acb->host->host_no); 3346 return false; 3347 } 3348 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3349 return true; 3350 } 3351 3352 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) 3353 { 3354 uint32_t intmask_org; 3355 struct MessageUnit_C __iomem *reg = pACB->pmuC; 3356 3357 /* disable all outbound interrupt */ 3358 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3359 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 3360 /* wait firmware ready */ 3361 arcmsr_wait_firmware_ready(pACB); 3362 /* post "get config" instruction */ 3363 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3364 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3365 /* wait message ready */ 3366 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 3367 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3368 miscellaneous data' timeout \n", pACB->host->host_no); 3369 return false; 3370 } 3371 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3372 return true; 3373 } 3374 3375 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) 3376 { 3377 struct MessageUnit_D *reg = acb->pmuD; 3378 3379 if (readl(acb->pmuD->outbound_doorbell) & 3380 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 3381 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 3382 acb->pmuD->outbound_doorbell);/*clear interrupt*/ 3383 } 3384 arcmsr_wait_firmware_ready(acb); 3385 /* post "get config" instruction */ 3386 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 3387 /* wait message ready */ 3388 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 3389 pr_notice("arcmsr%d: wait get adapter firmware " 3390 "miscellaneous data timeout\n", acb->host->host_no); 3391 return false; 3392 } 3393 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer); 3394 return true; 3395 } 3396 3397 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) 3398 { 3399 struct MessageUnit_E __iomem *reg = pACB->pmuE; 3400 uint32_t intmask_org; 3401 3402 /* disable all outbound interrupt */ 3403 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3404 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); 3405 /* wait firmware ready */ 3406 arcmsr_wait_firmware_ready(pACB); 3407 mdelay(20); 3408 /* post "get config" instruction */ 3409 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3410 3411 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3412 writel(pACB->out_doorbell, ®->iobound_doorbell); 3413 /* wait message ready */ 3414 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3415 pr_notice("arcmsr%d: wait get adapter firmware " 3416 "miscellaneous data timeout\n", pACB->host->host_no); 3417 return false; 3418 } 3419 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3420 return true; 3421 } 3422 3423 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB) 3424 { 3425 struct MessageUnit_F __iomem *reg = pACB->pmuF; 3426 uint32_t intmask_org; 3427 3428 /* disable all outbound interrupt */ 3429 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3430 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); 3431 /* wait firmware ready */ 3432 arcmsr_wait_firmware_ready(pACB); 3433 /* post "get config" instruction */ 3434 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3435 3436 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3437 writel(pACB->out_doorbell, ®->iobound_doorbell); 3438 /* wait message ready */ 3439 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3440 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n", 3441 pACB->host->host_no); 3442 return false; 3443 } 3444 arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer); 3445 return true; 3446 } 3447 3448 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 3449 { 3450 bool rtn = false; 3451 3452 switch (acb->adapter_type) { 3453 case ACB_ADAPTER_TYPE_A: 3454 rtn = arcmsr_hbaA_get_config(acb); 3455 break; 3456 case ACB_ADAPTER_TYPE_B: 3457 rtn = arcmsr_hbaB_get_config(acb); 3458 break; 3459 case ACB_ADAPTER_TYPE_C: 3460 rtn = arcmsr_hbaC_get_config(acb); 3461 break; 3462 case ACB_ADAPTER_TYPE_D: 3463 rtn = arcmsr_hbaD_get_config(acb); 3464 break; 3465 case ACB_ADAPTER_TYPE_E: 3466 rtn = arcmsr_hbaE_get_config(acb); 3467 break; 3468 case ACB_ADAPTER_TYPE_F: 3469 rtn = arcmsr_hbaF_get_config(acb); 3470 break; 3471 default: 3472 break; 3473 } 3474 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3475 if (acb->host->can_queue >= acb->firm_numbers_queue) 3476 acb->host->can_queue = acb->maxOutstanding; 3477 else 3478 acb->maxOutstanding = acb->host->can_queue; 3479 acb->maxFreeCCB = acb->host->can_queue; 3480 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM) 3481 acb->maxFreeCCB += 64; 3482 return rtn; 3483 } 3484 3485 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, 3486 struct CommandControlBlock *poll_ccb) 3487 { 3488 struct MessageUnit_A __iomem *reg = acb->pmuA; 3489 struct CommandControlBlock *ccb; 3490 struct ARCMSR_CDB *arcmsr_cdb; 3491 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; 3492 int rtn; 3493 bool error; 3494 unsigned long ccb_cdb_phy; 3495 3496 polling_hba_ccb_retry: 3497 poll_count++; 3498 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; 3499 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 3500 while (1) { 3501 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { 3502 if (poll_ccb_done){ 3503 rtn = SUCCESS; 3504 break; 3505 }else { 3506 msleep(25); 3507 if (poll_count > 100){ 3508 rtn = FAILED; 3509 break; 3510 } 3511 goto polling_hba_ccb_retry; 3512 } 3513 } 3514 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 3515 if (acb->cdb_phyadd_hipart) 3516 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3517 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3518 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3519 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3520 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3521 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3522 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3523 " poll command abort successfully \n" 3524 , acb->host->host_no 3525 , ccb->pcmd->device->id 3526 , (u32)ccb->pcmd->device->lun 3527 , ccb); 3528 ccb->pcmd->result = DID_ABORT << 16; 3529 arcmsr_ccb_complete(ccb); 3530 continue; 3531 } 3532 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3533 " command done ccb = '0x%p'" 3534 "ccboutstandingcount = %d \n" 3535 , acb->host->host_no 3536 , ccb 3537 , atomic_read(&acb->ccboutstandingcount)); 3538 continue; 3539 } 3540 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3541 arcmsr_report_ccb_state(acb, ccb, error); 3542 } 3543 return rtn; 3544 } 3545 3546 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, 3547 struct CommandControlBlock *poll_ccb) 3548 { 3549 struct MessageUnit_B *reg = acb->pmuB; 3550 struct ARCMSR_CDB *arcmsr_cdb; 3551 struct CommandControlBlock *ccb; 3552 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; 3553 int index, rtn; 3554 bool error; 3555 unsigned long ccb_cdb_phy; 3556 3557 polling_hbb_ccb_retry: 3558 poll_count++; 3559 /* clear doorbell interrupt */ 3560 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3561 while(1){ 3562 index = reg->doneq_index; 3563 flag_ccb = reg->done_qbuffer[index]; 3564 if (flag_ccb == 0) { 3565 if (poll_ccb_done){ 3566 rtn = SUCCESS; 3567 break; 3568 }else { 3569 msleep(25); 3570 if (poll_count > 100){ 3571 rtn = FAILED; 3572 break; 3573 } 3574 goto polling_hbb_ccb_retry; 3575 } 3576 } 3577 reg->done_qbuffer[index] = 0; 3578 index++; 3579 /*if last index number set it to 0 */ 3580 index %= ARCMSR_MAX_HBB_POSTQUEUE; 3581 reg->doneq_index = index; 3582 /* check if command done with no error*/ 3583 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 3584 if (acb->cdb_phyadd_hipart) 3585 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3586 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3587 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3588 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3589 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3590 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3591 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3592 " poll command abort successfully \n" 3593 ,acb->host->host_no 3594 ,ccb->pcmd->device->id 3595 ,(u32)ccb->pcmd->device->lun 3596 ,ccb); 3597 ccb->pcmd->result = DID_ABORT << 16; 3598 arcmsr_ccb_complete(ccb); 3599 continue; 3600 } 3601 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3602 " command done ccb = '0x%p'" 3603 "ccboutstandingcount = %d \n" 3604 , acb->host->host_no 3605 , ccb 3606 , atomic_read(&acb->ccboutstandingcount)); 3607 continue; 3608 } 3609 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3610 arcmsr_report_ccb_state(acb, ccb, error); 3611 } 3612 return rtn; 3613 } 3614 3615 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, 3616 struct CommandControlBlock *poll_ccb) 3617 { 3618 struct MessageUnit_C __iomem *reg = acb->pmuC; 3619 uint32_t flag_ccb; 3620 struct ARCMSR_CDB *arcmsr_cdb; 3621 bool error; 3622 struct CommandControlBlock *pCCB; 3623 uint32_t poll_ccb_done = 0, poll_count = 0; 3624 int rtn; 3625 unsigned long ccb_cdb_phy; 3626 3627 polling_hbc_ccb_retry: 3628 poll_count++; 3629 while (1) { 3630 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { 3631 if (poll_ccb_done) { 3632 rtn = SUCCESS; 3633 break; 3634 } else { 3635 msleep(25); 3636 if (poll_count > 100) { 3637 rtn = FAILED; 3638 break; 3639 } 3640 goto polling_hbc_ccb_retry; 3641 } 3642 } 3643 flag_ccb = readl(®->outbound_queueport_low); 3644 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3645 if (acb->cdb_phyadd_hipart) 3646 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3647 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3648 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3649 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3650 /* check ifcommand done with no error*/ 3651 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3652 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3653 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3654 " poll command abort successfully \n" 3655 , acb->host->host_no 3656 , pCCB->pcmd->device->id 3657 , (u32)pCCB->pcmd->device->lun 3658 , pCCB); 3659 pCCB->pcmd->result = DID_ABORT << 16; 3660 arcmsr_ccb_complete(pCCB); 3661 continue; 3662 } 3663 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3664 " command done ccb = '0x%p'" 3665 "ccboutstandingcount = %d \n" 3666 , acb->host->host_no 3667 , pCCB 3668 , atomic_read(&acb->ccboutstandingcount)); 3669 continue; 3670 } 3671 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3672 arcmsr_report_ccb_state(acb, pCCB, error); 3673 } 3674 return rtn; 3675 } 3676 3677 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, 3678 struct CommandControlBlock *poll_ccb) 3679 { 3680 bool error; 3681 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb; 3682 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; 3683 unsigned long flags, ccb_cdb_phy; 3684 struct ARCMSR_CDB *arcmsr_cdb; 3685 struct CommandControlBlock *pCCB; 3686 struct MessageUnit_D *pmu = acb->pmuD; 3687 3688 polling_hbaD_ccb_retry: 3689 poll_count++; 3690 while (1) { 3691 spin_lock_irqsave(&acb->doneq_lock, flags); 3692 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 3693 doneq_index = pmu->doneq_index; 3694 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { 3695 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3696 if (poll_ccb_done) { 3697 rtn = SUCCESS; 3698 break; 3699 } else { 3700 msleep(25); 3701 if (poll_count > 40) { 3702 rtn = FAILED; 3703 break; 3704 } 3705 goto polling_hbaD_ccb_retry; 3706 } 3707 } 3708 toggle = doneq_index & 0x4000; 3709 index_stripped = (doneq_index & 0xFFF) + 1; 3710 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 3711 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 3712 ((toggle ^ 0x4000) + 1); 3713 doneq_index = pmu->doneq_index; 3714 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3715 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; 3716 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3717 if (acb->cdb_phyadd_hipart) 3718 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3719 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + 3720 ccb_cdb_phy); 3721 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, 3722 arcmsr_cdb); 3723 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3724 if ((pCCB->acb != acb) || 3725 (pCCB->startdone != ARCMSR_CCB_START)) { 3726 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3727 pr_notice("arcmsr%d: scsi id = %d " 3728 "lun = %d ccb = '0x%p' poll command " 3729 "abort successfully\n" 3730 , acb->host->host_no 3731 , pCCB->pcmd->device->id 3732 , (u32)pCCB->pcmd->device->lun 3733 , pCCB); 3734 pCCB->pcmd->result = DID_ABORT << 16; 3735 arcmsr_ccb_complete(pCCB); 3736 continue; 3737 } 3738 pr_notice("arcmsr%d: polling an illegal " 3739 "ccb command done ccb = '0x%p' " 3740 "ccboutstandingcount = %d\n" 3741 , acb->host->host_no 3742 , pCCB 3743 , atomic_read(&acb->ccboutstandingcount)); 3744 continue; 3745 } 3746 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 3747 ? true : false; 3748 arcmsr_report_ccb_state(acb, pCCB, error); 3749 } 3750 return rtn; 3751 } 3752 3753 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb, 3754 struct CommandControlBlock *poll_ccb) 3755 { 3756 bool error; 3757 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index; 3758 uint16_t cmdSMID; 3759 unsigned long flags; 3760 int rtn; 3761 struct CommandControlBlock *pCCB; 3762 struct MessageUnit_E __iomem *reg = acb->pmuE; 3763 3764 polling_hbaC_ccb_retry: 3765 poll_count++; 3766 while (1) { 3767 spin_lock_irqsave(&acb->doneq_lock, flags); 3768 doneq_index = acb->doneq_index; 3769 if ((readl(®->reply_post_producer_index) & 0xFFFF) == 3770 doneq_index) { 3771 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3772 if (poll_ccb_done) { 3773 rtn = SUCCESS; 3774 break; 3775 } else { 3776 msleep(25); 3777 if (poll_count > 40) { 3778 rtn = FAILED; 3779 break; 3780 } 3781 goto polling_hbaC_ccb_retry; 3782 } 3783 } 3784 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 3785 doneq_index++; 3786 if (doneq_index >= acb->completionQ_entry) 3787 doneq_index = 0; 3788 acb->doneq_index = doneq_index; 3789 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3790 pCCB = acb->pccb_pool[cmdSMID]; 3791 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3792 /* check if command done with no error*/ 3793 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3794 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3795 pr_notice("arcmsr%d: scsi id = %d " 3796 "lun = %d ccb = '0x%p' poll command " 3797 "abort successfully\n" 3798 , acb->host->host_no 3799 , pCCB->pcmd->device->id 3800 , (u32)pCCB->pcmd->device->lun 3801 , pCCB); 3802 pCCB->pcmd->result = DID_ABORT << 16; 3803 arcmsr_ccb_complete(pCCB); 3804 continue; 3805 } 3806 pr_notice("arcmsr%d: polling an illegal " 3807 "ccb command done ccb = '0x%p' " 3808 "ccboutstandingcount = %d\n" 3809 , acb->host->host_no 3810 , pCCB 3811 , atomic_read(&acb->ccboutstandingcount)); 3812 continue; 3813 } 3814 error = (acb->pCompletionQ[doneq_index].cmdFlag & 3815 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3816 arcmsr_report_ccb_state(acb, pCCB, error); 3817 } 3818 writel(doneq_index, ®->reply_post_consumer_index); 3819 return rtn; 3820 } 3821 3822 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 3823 struct CommandControlBlock *poll_ccb) 3824 { 3825 int rtn = 0; 3826 switch (acb->adapter_type) { 3827 3828 case ACB_ADAPTER_TYPE_A: 3829 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); 3830 break; 3831 case ACB_ADAPTER_TYPE_B: 3832 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); 3833 break; 3834 case ACB_ADAPTER_TYPE_C: 3835 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); 3836 break; 3837 case ACB_ADAPTER_TYPE_D: 3838 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); 3839 break; 3840 case ACB_ADAPTER_TYPE_E: 3841 case ACB_ADAPTER_TYPE_F: 3842 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); 3843 break; 3844 } 3845 return rtn; 3846 } 3847 3848 static void arcmsr_set_iop_datetime(struct timer_list *t) 3849 { 3850 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer); 3851 unsigned int next_time; 3852 struct tm tm; 3853 3854 union { 3855 struct { 3856 uint16_t signature; 3857 uint8_t year; 3858 uint8_t month; 3859 uint8_t date; 3860 uint8_t hour; 3861 uint8_t minute; 3862 uint8_t second; 3863 } a; 3864 struct { 3865 uint32_t msg_time[2]; 3866 } b; 3867 } datetime; 3868 3869 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm); 3870 3871 datetime.a.signature = 0x55AA; 3872 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */ 3873 datetime.a.month = tm.tm_mon; 3874 datetime.a.date = tm.tm_mday; 3875 datetime.a.hour = tm.tm_hour; 3876 datetime.a.minute = tm.tm_min; 3877 datetime.a.second = tm.tm_sec; 3878 3879 switch (pacb->adapter_type) { 3880 case ACB_ADAPTER_TYPE_A: { 3881 struct MessageUnit_A __iomem *reg = pacb->pmuA; 3882 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]); 3883 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]); 3884 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3885 break; 3886 } 3887 case ACB_ADAPTER_TYPE_B: { 3888 uint32_t __iomem *rwbuffer; 3889 struct MessageUnit_B *reg = pacb->pmuB; 3890 rwbuffer = reg->message_rwbuffer; 3891 writel(datetime.b.msg_time[0], rwbuffer++); 3892 writel(datetime.b.msg_time[1], rwbuffer++); 3893 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell); 3894 break; 3895 } 3896 case ACB_ADAPTER_TYPE_C: { 3897 struct MessageUnit_C __iomem *reg = pacb->pmuC; 3898 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3899 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3900 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3901 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3902 break; 3903 } 3904 case ACB_ADAPTER_TYPE_D: { 3905 uint32_t __iomem *rwbuffer; 3906 struct MessageUnit_D *reg = pacb->pmuD; 3907 rwbuffer = reg->msgcode_rwbuffer; 3908 writel(datetime.b.msg_time[0], rwbuffer++); 3909 writel(datetime.b.msg_time[1], rwbuffer++); 3910 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0); 3911 break; 3912 } 3913 case ACB_ADAPTER_TYPE_E: { 3914 struct MessageUnit_E __iomem *reg = pacb->pmuE; 3915 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3916 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3917 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3918 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3919 writel(pacb->out_doorbell, ®->iobound_doorbell); 3920 break; 3921 } 3922 case ACB_ADAPTER_TYPE_F: { 3923 struct MessageUnit_F __iomem *reg = pacb->pmuF; 3924 3925 pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0]; 3926 pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1]; 3927 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3928 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3929 writel(pacb->out_doorbell, ®->iobound_doorbell); 3930 break; 3931 } 3932 } 3933 if (sys_tz.tz_minuteswest) 3934 next_time = ARCMSR_HOURS; 3935 else 3936 next_time = ARCMSR_MINUTES; 3937 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time)); 3938 } 3939 3940 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3941 { 3942 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 3943 dma_addr_t dma_coherent_handle; 3944 3945 /* 3946 ******************************************************************** 3947 ** here we need to tell iop 331 our freeccb.HighPart 3948 ** if freeccb.HighPart is not zero 3949 ******************************************************************** 3950 */ 3951 switch (acb->adapter_type) { 3952 case ACB_ADAPTER_TYPE_B: 3953 case ACB_ADAPTER_TYPE_D: 3954 dma_coherent_handle = acb->dma_coherent_handle2; 3955 break; 3956 case ACB_ADAPTER_TYPE_E: 3957 case ACB_ADAPTER_TYPE_F: 3958 dma_coherent_handle = acb->dma_coherent_handle + 3959 offsetof(struct CommandControlBlock, arcmsr_cdb); 3960 break; 3961 default: 3962 dma_coherent_handle = acb->dma_coherent_handle; 3963 break; 3964 } 3965 cdb_phyaddr = lower_32_bits(dma_coherent_handle); 3966 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); 3967 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; 3968 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32; 3969 /* 3970 *********************************************************************** 3971 ** if adapter type B, set window of "post command Q" 3972 *********************************************************************** 3973 */ 3974 switch (acb->adapter_type) { 3975 3976 case ACB_ADAPTER_TYPE_A: { 3977 if (cdb_phyaddr_hi32 != 0) { 3978 struct MessageUnit_A __iomem *reg = acb->pmuA; 3979 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 3980 ®->message_rwbuffer[0]); 3981 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); 3982 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 3983 ®->inbound_msgaddr0); 3984 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3985 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 3986 part physical address timeout\n", 3987 acb->host->host_no); 3988 return 1; 3989 } 3990 } 3991 } 3992 break; 3993 3994 case ACB_ADAPTER_TYPE_B: { 3995 uint32_t __iomem *rwbuffer; 3996 3997 struct MessageUnit_B *reg = acb->pmuB; 3998 reg->postq_index = 0; 3999 reg->doneq_index = 0; 4000 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); 4001 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4002 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \ 4003 acb->host->host_no); 4004 return 1; 4005 } 4006 rwbuffer = reg->message_rwbuffer; 4007 /* driver "set config" signature */ 4008 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 4009 /* normal should be zero */ 4010 writel(cdb_phyaddr_hi32, rwbuffer++); 4011 /* postQ size (256 + 8)*4 */ 4012 writel(cdb_phyaddr, rwbuffer++); 4013 /* doneQ size (256 + 8)*4 */ 4014 writel(cdb_phyaddr + 1056, rwbuffer++); 4015 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 4016 writel(1056, rwbuffer); 4017 4018 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); 4019 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4020 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 4021 timeout \n",acb->host->host_no); 4022 return 1; 4023 } 4024 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 4025 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4026 pr_err("arcmsr%d: can't set driver mode.\n", 4027 acb->host->host_no); 4028 return 1; 4029 } 4030 } 4031 break; 4032 case ACB_ADAPTER_TYPE_C: { 4033 struct MessageUnit_C __iomem *reg = acb->pmuC; 4034 4035 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", 4036 acb->adapter_index, cdb_phyaddr_hi32); 4037 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 4038 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); 4039 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4040 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 4041 if (!arcmsr_hbaC_wait_msgint_ready(acb)) { 4042 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 4043 timeout \n", acb->host->host_no); 4044 return 1; 4045 } 4046 } 4047 break; 4048 case ACB_ADAPTER_TYPE_D: { 4049 uint32_t __iomem *rwbuffer; 4050 struct MessageUnit_D *reg = acb->pmuD; 4051 reg->postq_index = 0; 4052 reg->doneq_index = 0; 4053 rwbuffer = reg->msgcode_rwbuffer; 4054 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 4055 writel(cdb_phyaddr_hi32, rwbuffer++); 4056 writel(cdb_phyaddr, rwbuffer++); 4057 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * 4058 sizeof(struct InBound_SRB)), rwbuffer++); 4059 writel(0x100, rwbuffer); 4060 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); 4061 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 4062 pr_notice("arcmsr%d: 'set command Q window' timeout\n", 4063 acb->host->host_no); 4064 return 1; 4065 } 4066 } 4067 break; 4068 case ACB_ADAPTER_TYPE_E: { 4069 struct MessageUnit_E __iomem *reg = acb->pmuE; 4070 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 4071 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]); 4072 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]); 4073 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]); 4074 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]); 4075 writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]); 4076 writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]); 4077 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]); 4078 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4079 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4080 writel(acb->out_doorbell, ®->iobound_doorbell); 4081 if (!arcmsr_hbaE_wait_msgint_ready(acb)) { 4082 pr_notice("arcmsr%d: 'set command Q window' timeout \n", 4083 acb->host->host_no); 4084 return 1; 4085 } 4086 } 4087 break; 4088 case ACB_ADAPTER_TYPE_F: { 4089 struct MessageUnit_F __iomem *reg = acb->pmuF; 4090 4091 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG; 4092 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886; 4093 acb->msgcode_rwbuffer[2] = cdb_phyaddr; 4094 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32; 4095 acb->msgcode_rwbuffer[4] = acb->ccbsize; 4096 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2); 4097 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2); 4098 acb->msgcode_rwbuffer[7] = acb->completeQ_size; 4099 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4100 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4101 writel(acb->out_doorbell, ®->iobound_doorbell); 4102 if (!arcmsr_hbaE_wait_msgint_ready(acb)) { 4103 pr_notice("arcmsr%d: 'set command Q window' timeout\n", 4104 acb->host->host_no); 4105 return 1; 4106 } 4107 } 4108 break; 4109 } 4110 return 0; 4111 } 4112 4113 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) 4114 { 4115 uint32_t firmware_state = 0; 4116 switch (acb->adapter_type) { 4117 4118 case ACB_ADAPTER_TYPE_A: { 4119 struct MessageUnit_A __iomem *reg = acb->pmuA; 4120 do { 4121 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4122 msleep(20); 4123 firmware_state = readl(®->outbound_msgaddr1); 4124 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); 4125 } 4126 break; 4127 4128 case ACB_ADAPTER_TYPE_B: { 4129 struct MessageUnit_B *reg = acb->pmuB; 4130 do { 4131 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4132 msleep(20); 4133 firmware_state = readl(reg->iop2drv_doorbell); 4134 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 4135 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 4136 } 4137 break; 4138 case ACB_ADAPTER_TYPE_C: { 4139 struct MessageUnit_C __iomem *reg = acb->pmuC; 4140 do { 4141 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4142 msleep(20); 4143 firmware_state = readl(®->outbound_msgaddr1); 4144 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); 4145 } 4146 break; 4147 case ACB_ADAPTER_TYPE_D: { 4148 struct MessageUnit_D *reg = acb->pmuD; 4149 do { 4150 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4151 msleep(20); 4152 firmware_state = readl(reg->outbound_msgaddr1); 4153 } while ((firmware_state & 4154 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); 4155 } 4156 break; 4157 case ACB_ADAPTER_TYPE_E: 4158 case ACB_ADAPTER_TYPE_F: { 4159 struct MessageUnit_E __iomem *reg = acb->pmuE; 4160 do { 4161 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4162 msleep(20); 4163 firmware_state = readl(®->outbound_msgaddr1); 4164 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0); 4165 } 4166 break; 4167 } 4168 } 4169 4170 static void arcmsr_request_device_map(struct timer_list *t) 4171 { 4172 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); 4173 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) { 4174 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 4175 } else { 4176 acb->fw_flag = FW_NORMAL; 4177 switch (acb->adapter_type) { 4178 case ACB_ADAPTER_TYPE_A: { 4179 struct MessageUnit_A __iomem *reg = acb->pmuA; 4180 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4181 break; 4182 } 4183 case ACB_ADAPTER_TYPE_B: { 4184 struct MessageUnit_B *reg = acb->pmuB; 4185 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 4186 break; 4187 } 4188 case ACB_ADAPTER_TYPE_C: { 4189 struct MessageUnit_C __iomem *reg = acb->pmuC; 4190 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4191 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 4192 break; 4193 } 4194 case ACB_ADAPTER_TYPE_D: { 4195 struct MessageUnit_D *reg = acb->pmuD; 4196 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 4197 break; 4198 } 4199 case ACB_ADAPTER_TYPE_E: { 4200 struct MessageUnit_E __iomem *reg = acb->pmuE; 4201 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4202 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4203 writel(acb->out_doorbell, ®->iobound_doorbell); 4204 break; 4205 } 4206 case ACB_ADAPTER_TYPE_F: { 4207 struct MessageUnit_F __iomem *reg = acb->pmuF; 4208 uint32_t outMsg1 = readl(®->outbound_msgaddr1); 4209 4210 if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) || 4211 (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE)) 4212 goto nxt6s; 4213 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4214 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4215 writel(acb->out_doorbell, ®->iobound_doorbell); 4216 break; 4217 } 4218 default: 4219 return; 4220 } 4221 acb->acb_flags |= ACB_F_MSG_GET_CONFIG; 4222 nxt6s: 4223 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 4224 } 4225 } 4226 4227 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) 4228 { 4229 struct MessageUnit_A __iomem *reg = acb->pmuA; 4230 acb->acb_flags |= ACB_F_MSG_START_BGRB; 4231 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); 4232 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 4233 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4234 rebuild' timeout \n", acb->host->host_no); 4235 } 4236 } 4237 4238 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) 4239 { 4240 struct MessageUnit_B *reg = acb->pmuB; 4241 acb->acb_flags |= ACB_F_MSG_START_BGRB; 4242 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); 4243 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4244 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4245 rebuild' timeout \n",acb->host->host_no); 4246 } 4247 } 4248 4249 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) 4250 { 4251 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 4252 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4253 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); 4254 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); 4255 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 4256 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4257 rebuild' timeout \n", pACB->host->host_no); 4258 } 4259 return; 4260 } 4261 4262 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) 4263 { 4264 struct MessageUnit_D *pmu = pACB->pmuD; 4265 4266 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4267 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); 4268 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 4269 pr_notice("arcmsr%d: wait 'start adapter " 4270 "background rebuild' timeout\n", pACB->host->host_no); 4271 } 4272 } 4273 4274 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB) 4275 { 4276 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 4277 4278 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4279 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0); 4280 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4281 writel(pACB->out_doorbell, &pmu->iobound_doorbell); 4282 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 4283 pr_notice("arcmsr%d: wait 'start adapter " 4284 "background rebuild' timeout \n", pACB->host->host_no); 4285 } 4286 } 4287 4288 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 4289 { 4290 switch (acb->adapter_type) { 4291 case ACB_ADAPTER_TYPE_A: 4292 arcmsr_hbaA_start_bgrb(acb); 4293 break; 4294 case ACB_ADAPTER_TYPE_B: 4295 arcmsr_hbaB_start_bgrb(acb); 4296 break; 4297 case ACB_ADAPTER_TYPE_C: 4298 arcmsr_hbaC_start_bgrb(acb); 4299 break; 4300 case ACB_ADAPTER_TYPE_D: 4301 arcmsr_hbaD_start_bgrb(acb); 4302 break; 4303 case ACB_ADAPTER_TYPE_E: 4304 case ACB_ADAPTER_TYPE_F: 4305 arcmsr_hbaE_start_bgrb(acb); 4306 break; 4307 } 4308 } 4309 4310 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) 4311 { 4312 switch (acb->adapter_type) { 4313 case ACB_ADAPTER_TYPE_A: { 4314 struct MessageUnit_A __iomem *reg = acb->pmuA; 4315 uint32_t outbound_doorbell; 4316 /* empty doorbell Qbuffer if door bell ringed */ 4317 outbound_doorbell = readl(®->outbound_doorbell); 4318 /*clear doorbell interrupt */ 4319 writel(outbound_doorbell, ®->outbound_doorbell); 4320 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 4321 } 4322 break; 4323 4324 case ACB_ADAPTER_TYPE_B: { 4325 struct MessageUnit_B *reg = acb->pmuB; 4326 uint32_t outbound_doorbell, i; 4327 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 4328 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 4329 /* let IOP know data has been read */ 4330 for(i=0; i < 200; i++) { 4331 msleep(20); 4332 outbound_doorbell = readl(reg->iop2drv_doorbell); 4333 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 4334 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 4335 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 4336 } else 4337 break; 4338 } 4339 } 4340 break; 4341 case ACB_ADAPTER_TYPE_C: { 4342 struct MessageUnit_C __iomem *reg = acb->pmuC; 4343 uint32_t outbound_doorbell, i; 4344 /* empty doorbell Qbuffer if door bell ringed */ 4345 outbound_doorbell = readl(®->outbound_doorbell); 4346 writel(outbound_doorbell, ®->outbound_doorbell_clear); 4347 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 4348 for (i = 0; i < 200; i++) { 4349 msleep(20); 4350 outbound_doorbell = readl(®->outbound_doorbell); 4351 if (outbound_doorbell & 4352 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 4353 writel(outbound_doorbell, 4354 ®->outbound_doorbell_clear); 4355 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, 4356 ®->inbound_doorbell); 4357 } else 4358 break; 4359 } 4360 } 4361 break; 4362 case ACB_ADAPTER_TYPE_D: { 4363 struct MessageUnit_D *reg = acb->pmuD; 4364 uint32_t outbound_doorbell, i; 4365 /* empty doorbell Qbuffer if door bell ringed */ 4366 outbound_doorbell = readl(reg->outbound_doorbell); 4367 writel(outbound_doorbell, reg->outbound_doorbell); 4368 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 4369 reg->inbound_doorbell); 4370 for (i = 0; i < 200; i++) { 4371 msleep(20); 4372 outbound_doorbell = readl(reg->outbound_doorbell); 4373 if (outbound_doorbell & 4374 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { 4375 writel(outbound_doorbell, 4376 reg->outbound_doorbell); 4377 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 4378 reg->inbound_doorbell); 4379 } else 4380 break; 4381 } 4382 } 4383 break; 4384 case ACB_ADAPTER_TYPE_E: 4385 case ACB_ADAPTER_TYPE_F: { 4386 struct MessageUnit_E __iomem *reg = acb->pmuE; 4387 uint32_t i, tmp; 4388 4389 acb->in_doorbell = readl(®->iobound_doorbell); 4390 writel(0, ®->host_int_status); /*clear interrupt*/ 4391 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4392 writel(acb->out_doorbell, ®->iobound_doorbell); 4393 for(i=0; i < 200; i++) { 4394 msleep(20); 4395 tmp = acb->in_doorbell; 4396 acb->in_doorbell = readl(®->iobound_doorbell); 4397 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 4398 writel(0, ®->host_int_status); /*clear interrupt*/ 4399 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4400 writel(acb->out_doorbell, ®->iobound_doorbell); 4401 } else 4402 break; 4403 } 4404 } 4405 break; 4406 } 4407 } 4408 4409 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 4410 { 4411 switch (acb->adapter_type) { 4412 case ACB_ADAPTER_TYPE_A: 4413 return; 4414 case ACB_ADAPTER_TYPE_B: 4415 { 4416 struct MessageUnit_B *reg = acb->pmuB; 4417 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); 4418 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4419 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 4420 return; 4421 } 4422 } 4423 break; 4424 case ACB_ADAPTER_TYPE_C: 4425 return; 4426 } 4427 return; 4428 } 4429 4430 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) 4431 { 4432 uint8_t value[64]; 4433 int i, count = 0; 4434 struct MessageUnit_A __iomem *pmuA = acb->pmuA; 4435 struct MessageUnit_C __iomem *pmuC = acb->pmuC; 4436 struct MessageUnit_D *pmuD = acb->pmuD; 4437 4438 /* backup pci config data */ 4439 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); 4440 for (i = 0; i < 64; i++) { 4441 pci_read_config_byte(acb->pdev, i, &value[i]); 4442 } 4443 /* hardware reset signal */ 4444 if (acb->dev_id == 0x1680) { 4445 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); 4446 } else if (acb->dev_id == 0x1880) { 4447 do { 4448 count++; 4449 writel(0xF, &pmuC->write_sequence); 4450 writel(0x4, &pmuC->write_sequence); 4451 writel(0xB, &pmuC->write_sequence); 4452 writel(0x2, &pmuC->write_sequence); 4453 writel(0x7, &pmuC->write_sequence); 4454 writel(0xD, &pmuC->write_sequence); 4455 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 4456 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 4457 } else if (acb->dev_id == 0x1884) { 4458 struct MessageUnit_E __iomem *pmuE = acb->pmuE; 4459 do { 4460 count++; 4461 writel(0x4, &pmuE->write_sequence_3xxx); 4462 writel(0xB, &pmuE->write_sequence_3xxx); 4463 writel(0x2, &pmuE->write_sequence_3xxx); 4464 writel(0x7, &pmuE->write_sequence_3xxx); 4465 writel(0xD, &pmuE->write_sequence_3xxx); 4466 mdelay(10); 4467 } while (((readl(&pmuE->host_diagnostic_3xxx) & 4468 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); 4469 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); 4470 } else if (acb->dev_id == 0x1214) { 4471 writel(0x20, pmuD->reset_request); 4472 } else { 4473 pci_write_config_byte(acb->pdev, 0x84, 0x20); 4474 } 4475 msleep(2000); 4476 /* write back pci config data */ 4477 for (i = 0; i < 64; i++) { 4478 pci_write_config_byte(acb->pdev, i, value[i]); 4479 } 4480 msleep(1000); 4481 return; 4482 } 4483 4484 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) 4485 { 4486 bool rtn = true; 4487 4488 switch(acb->adapter_type) { 4489 case ACB_ADAPTER_TYPE_A:{ 4490 struct MessageUnit_A __iomem *reg = acb->pmuA; 4491 rtn = ((readl(®->outbound_msgaddr1) & 4492 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false; 4493 } 4494 break; 4495 case ACB_ADAPTER_TYPE_B:{ 4496 struct MessageUnit_B *reg = acb->pmuB; 4497 rtn = ((readl(reg->iop2drv_doorbell) & 4498 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false; 4499 } 4500 break; 4501 case ACB_ADAPTER_TYPE_C:{ 4502 struct MessageUnit_C __iomem *reg = acb->pmuC; 4503 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false; 4504 } 4505 break; 4506 case ACB_ADAPTER_TYPE_D:{ 4507 struct MessageUnit_D *reg = acb->pmuD; 4508 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ? 4509 true : false; 4510 } 4511 break; 4512 case ACB_ADAPTER_TYPE_E: 4513 case ACB_ADAPTER_TYPE_F:{ 4514 struct MessageUnit_E __iomem *reg = acb->pmuE; 4515 rtn = (readl(®->host_diagnostic_3xxx) & 4516 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; 4517 } 4518 break; 4519 } 4520 return rtn; 4521 } 4522 4523 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 4524 { 4525 uint32_t intmask_org; 4526 /* disable all outbound interrupt */ 4527 intmask_org = arcmsr_disable_outbound_ints(acb); 4528 arcmsr_wait_firmware_ready(acb); 4529 arcmsr_iop_confirm(acb); 4530 /*start background rebuild*/ 4531 arcmsr_start_adapter_bgrb(acb); 4532 /* empty doorbell Qbuffer if door bell ringed */ 4533 arcmsr_clear_doorbell_queue_buffer(acb); 4534 arcmsr_enable_eoi_mode(acb); 4535 /* enable outbound Post Queue,outbound doorbell Interrupt */ 4536 arcmsr_enable_outbound_ints(acb, intmask_org); 4537 acb->acb_flags |= ACB_F_IOP_INITED; 4538 } 4539 4540 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) 4541 { 4542 struct CommandControlBlock *ccb; 4543 uint32_t intmask_org; 4544 uint8_t rtnval = 0x00; 4545 int i = 0; 4546 unsigned long flags; 4547 4548 if (atomic_read(&acb->ccboutstandingcount) != 0) { 4549 /* disable all outbound interrupt */ 4550 intmask_org = arcmsr_disable_outbound_ints(acb); 4551 /* talk to iop 331 outstanding command aborted */ 4552 rtnval = arcmsr_abort_allcmd(acb); 4553 /* clear all outbound posted Q */ 4554 arcmsr_done4abort_postqueue(acb); 4555 for (i = 0; i < acb->maxFreeCCB; i++) { 4556 ccb = acb->pccb_pool[i]; 4557 if (ccb->startdone == ARCMSR_CCB_START) { 4558 scsi_dma_unmap(ccb->pcmd); 4559 ccb->startdone = ARCMSR_CCB_DONE; 4560 ccb->ccb_flags = 0; 4561 spin_lock_irqsave(&acb->ccblist_lock, flags); 4562 list_add_tail(&ccb->list, &acb->ccb_free_list); 4563 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 4564 } 4565 } 4566 atomic_set(&acb->ccboutstandingcount, 0); 4567 /* enable all outbound interrupt */ 4568 arcmsr_enable_outbound_ints(acb, intmask_org); 4569 return rtnval; 4570 } 4571 return rtnval; 4572 } 4573 4574 static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 4575 { 4576 struct AdapterControlBlock *acb; 4577 int retry_count = 0; 4578 int rtn = FAILED; 4579 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; 4580 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 4581 return SUCCESS; 4582 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d," 4583 " num_aborts = %d \n", acb->num_resets, acb->num_aborts); 4584 acb->num_resets++; 4585 4586 if (acb->acb_flags & ACB_F_BUS_RESET) { 4587 long timeout; 4588 pr_notice("arcmsr: there is a bus reset eh proceeding...\n"); 4589 timeout = wait_event_timeout(wait_q, (acb->acb_flags 4590 & ACB_F_BUS_RESET) == 0, 220 * HZ); 4591 if (timeout) 4592 return SUCCESS; 4593 } 4594 acb->acb_flags |= ACB_F_BUS_RESET; 4595 if (!arcmsr_iop_reset(acb)) { 4596 arcmsr_hardware_reset(acb); 4597 acb->acb_flags &= ~ACB_F_IOP_INITED; 4598 wait_reset_done: 4599 ssleep(ARCMSR_SLEEPTIME); 4600 if (arcmsr_reset_in_progress(acb)) { 4601 if (retry_count > ARCMSR_RETRYCOUNT) { 4602 acb->fw_flag = FW_DEADLOCK; 4603 pr_notice("arcmsr%d: waiting for hw bus reset" 4604 " return, RETRY TERMINATED!!\n", 4605 acb->host->host_no); 4606 return FAILED; 4607 } 4608 retry_count++; 4609 goto wait_reset_done; 4610 } 4611 arcmsr_iop_init(acb); 4612 acb->fw_flag = FW_NORMAL; 4613 mod_timer(&acb->eternal_timer, jiffies + 4614 msecs_to_jiffies(6 * HZ)); 4615 acb->acb_flags &= ~ACB_F_BUS_RESET; 4616 rtn = SUCCESS; 4617 pr_notice("arcmsr: scsi bus reset eh returns with success\n"); 4618 } else { 4619 acb->acb_flags &= ~ACB_F_BUS_RESET; 4620 acb->fw_flag = FW_NORMAL; 4621 mod_timer(&acb->eternal_timer, jiffies + 4622 msecs_to_jiffies(6 * HZ)); 4623 rtn = SUCCESS; 4624 } 4625 return rtn; 4626 } 4627 4628 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, 4629 struct CommandControlBlock *ccb) 4630 { 4631 int rtn; 4632 rtn = arcmsr_polling_ccbdone(acb, ccb); 4633 return rtn; 4634 } 4635 4636 static int arcmsr_abort(struct scsi_cmnd *cmd) 4637 { 4638 struct AdapterControlBlock *acb = 4639 (struct AdapterControlBlock *)cmd->device->host->hostdata; 4640 int i = 0; 4641 int rtn = FAILED; 4642 uint32_t intmask_org; 4643 4644 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 4645 return SUCCESS; 4646 printk(KERN_NOTICE 4647 "arcmsr%d: abort device command of scsi id = %d lun = %d\n", 4648 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); 4649 acb->acb_flags |= ACB_F_ABORT; 4650 acb->num_aborts++; 4651 /* 4652 ************************************************ 4653 ** the all interrupt service routine is locked 4654 ** we need to handle it as soon as possible and exit 4655 ************************************************ 4656 */ 4657 if (!atomic_read(&acb->ccboutstandingcount)) { 4658 acb->acb_flags &= ~ACB_F_ABORT; 4659 return rtn; 4660 } 4661 4662 intmask_org = arcmsr_disable_outbound_ints(acb); 4663 for (i = 0; i < acb->maxFreeCCB; i++) { 4664 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 4665 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 4666 ccb->startdone = ARCMSR_CCB_ABORTED; 4667 rtn = arcmsr_abort_one_cmd(acb, ccb); 4668 break; 4669 } 4670 } 4671 acb->acb_flags &= ~ACB_F_ABORT; 4672 arcmsr_enable_outbound_ints(acb, intmask_org); 4673 return rtn; 4674 } 4675 4676 static const char *arcmsr_info(struct Scsi_Host *host) 4677 { 4678 struct AdapterControlBlock *acb = 4679 (struct AdapterControlBlock *) host->hostdata; 4680 static char buf[256]; 4681 char *type; 4682 int raid6 = 1; 4683 switch (acb->pdev->device) { 4684 case PCI_DEVICE_ID_ARECA_1110: 4685 case PCI_DEVICE_ID_ARECA_1200: 4686 case PCI_DEVICE_ID_ARECA_1202: 4687 case PCI_DEVICE_ID_ARECA_1210: 4688 raid6 = 0; 4689 fallthrough; 4690 case PCI_DEVICE_ID_ARECA_1120: 4691 case PCI_DEVICE_ID_ARECA_1130: 4692 case PCI_DEVICE_ID_ARECA_1160: 4693 case PCI_DEVICE_ID_ARECA_1170: 4694 case PCI_DEVICE_ID_ARECA_1201: 4695 case PCI_DEVICE_ID_ARECA_1203: 4696 case PCI_DEVICE_ID_ARECA_1220: 4697 case PCI_DEVICE_ID_ARECA_1230: 4698 case PCI_DEVICE_ID_ARECA_1260: 4699 case PCI_DEVICE_ID_ARECA_1270: 4700 case PCI_DEVICE_ID_ARECA_1280: 4701 type = "SATA"; 4702 break; 4703 case PCI_DEVICE_ID_ARECA_1214: 4704 case PCI_DEVICE_ID_ARECA_1380: 4705 case PCI_DEVICE_ID_ARECA_1381: 4706 case PCI_DEVICE_ID_ARECA_1680: 4707 case PCI_DEVICE_ID_ARECA_1681: 4708 case PCI_DEVICE_ID_ARECA_1880: 4709 case PCI_DEVICE_ID_ARECA_1884: 4710 type = "SAS/SATA"; 4711 break; 4712 case PCI_DEVICE_ID_ARECA_1886: 4713 type = "NVMe/SAS/SATA"; 4714 break; 4715 default: 4716 type = "unknown"; 4717 raid6 = 0; 4718 break; 4719 } 4720 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", 4721 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); 4722 return buf; 4723 } 4724