xref: /linux/drivers/scsi/arcmsr/arcmsr_hba.c (revision c159dfbdd4fc62fa08f6715d9d6c34d39cf40446)
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Nick Cheng, C.L. Huang
6 **   Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
9 **
10 **     Web site: www.areca.com.tw
11 **       E-mail: support@areca.com.tw
12 **
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
23 ** are met:
24 ** 1. Redistributions of source code must retain the above copyright
25 **    notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 **    notice, this list of conditions and the following disclaimer in the
28 **    documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 **    derived from this software without specific prior written permission.
31 **
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
46 */
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/circ_buf.h>
61 #include <asm/dma.h>
62 #include <asm/io.h>
63 #include <linux/uaccess.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_tcq.h>
68 #include <scsi/scsi_device.h>
69 #include <scsi/scsi_transport.h>
70 #include <scsi/scsicam.h>
71 #include "arcmsr.h"
72 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
73 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
76 
77 static int msix_enable = 1;
78 module_param(msix_enable, int, S_IRUGO);
79 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
80 
81 static int msi_enable = 1;
82 module_param(msi_enable, int, S_IRUGO);
83 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
84 
85 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
86 module_param(host_can_queue, int, S_IRUGO);
87 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
88 
89 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
90 module_param(cmd_per_lun, int, S_IRUGO);
91 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
92 
93 static int dma_mask_64 = 0;
94 module_param(dma_mask_64, int, S_IRUGO);
95 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
96 
97 static int set_date_time = 0;
98 module_param(set_date_time, int, S_IRUGO);
99 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
100 
101 static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
102 module_param(cmd_timeout, int, S_IRUGO);
103 MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
104 
105 #define	ARCMSR_SLEEPTIME	10
106 #define	ARCMSR_RETRYCOUNT	12
107 
108 static wait_queue_head_t wait_q;
109 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
110 					struct scsi_cmnd *cmd);
111 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
112 static int arcmsr_abort(struct scsi_cmnd *);
113 static int arcmsr_bus_reset(struct scsi_cmnd *);
114 static int arcmsr_bios_param(struct scsi_device *sdev,
115 		struct block_device *bdev, sector_t capacity, int *info);
116 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
117 static int arcmsr_probe(struct pci_dev *pdev,
118 				const struct pci_device_id *id);
119 static int __maybe_unused arcmsr_suspend(struct device *dev);
120 static int __maybe_unused arcmsr_resume(struct device *dev);
121 static void arcmsr_remove(struct pci_dev *pdev);
122 static void arcmsr_shutdown(struct pci_dev *pdev);
123 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
124 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
125 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
126 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
127 	u32 intmask_org);
128 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
129 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
130 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
131 static void arcmsr_request_device_map(struct timer_list *t);
132 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
133 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
134 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
135 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
136 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
137 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
138 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
139 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
140 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
141 static const char *arcmsr_info(struct Scsi_Host *);
142 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
143 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
144 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
145 static void arcmsr_set_iop_datetime(struct timer_list *);
146 static int arcmsr_sdev_configure(struct scsi_device *sdev,
147 				 struct queue_limits *lim);
148 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
149 {
150 	if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
151 		queue_depth = ARCMSR_MAX_CMD_PERLUN;
152 	return scsi_change_queue_depth(sdev, queue_depth);
153 }
154 
155 static const struct scsi_host_template arcmsr_scsi_host_template = {
156 	.module			= THIS_MODULE,
157 	.proc_name		= ARCMSR_NAME,
158 	.name			= "Areca SAS/SATA RAID driver",
159 	.info			= arcmsr_info,
160 	.queuecommand		= arcmsr_queue_command,
161 	.eh_abort_handler	= arcmsr_abort,
162 	.eh_bus_reset_handler	= arcmsr_bus_reset,
163 	.bios_param		= arcmsr_bios_param,
164 	.sdev_configure		= arcmsr_sdev_configure,
165 	.change_queue_depth	= arcmsr_adjust_disk_queue_depth,
166 	.can_queue		= ARCMSR_DEFAULT_OUTSTANDING_CMD,
167 	.this_id		= ARCMSR_SCSI_INITIATOR_ID,
168 	.sg_tablesize	        = ARCMSR_DEFAULT_SG_ENTRIES,
169 	.max_sectors		= ARCMSR_MAX_XFER_SECTORS_C,
170 	.cmd_per_lun		= ARCMSR_DEFAULT_CMD_PERLUN,
171 	.shost_groups		= arcmsr_host_groups,
172 	.no_write_same		= 1,
173 };
174 
175 static const struct pci_device_id arcmsr_device_id_table[] = {
176 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
177 		.driver_data = ACB_ADAPTER_TYPE_A},
178 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
179 		.driver_data = ACB_ADAPTER_TYPE_A},
180 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
181 		.driver_data = ACB_ADAPTER_TYPE_A},
182 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
183 		.driver_data = ACB_ADAPTER_TYPE_A},
184 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
185 		.driver_data = ACB_ADAPTER_TYPE_A},
186 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
187 		.driver_data = ACB_ADAPTER_TYPE_B},
188 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
189 		.driver_data = ACB_ADAPTER_TYPE_B},
190 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
191 		.driver_data = ACB_ADAPTER_TYPE_B},
192 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
193 		.driver_data = ACB_ADAPTER_TYPE_B},
194 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
195 		.driver_data = ACB_ADAPTER_TYPE_A},
196 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
197 		.driver_data = ACB_ADAPTER_TYPE_D},
198 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
199 		.driver_data = ACB_ADAPTER_TYPE_A},
200 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
201 		.driver_data = ACB_ADAPTER_TYPE_A},
202 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
203 		.driver_data = ACB_ADAPTER_TYPE_A},
204 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
205 		.driver_data = ACB_ADAPTER_TYPE_A},
206 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
207 		.driver_data = ACB_ADAPTER_TYPE_A},
208 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
209 		.driver_data = ACB_ADAPTER_TYPE_A},
210 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
211 		.driver_data = ACB_ADAPTER_TYPE_A},
212 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
213 		.driver_data = ACB_ADAPTER_TYPE_A},
214 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
215 		.driver_data = ACB_ADAPTER_TYPE_A},
216 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
217 		.driver_data = ACB_ADAPTER_TYPE_C},
218 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
219 		.driver_data = ACB_ADAPTER_TYPE_C},
220 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
221 		.driver_data = ACB_ADAPTER_TYPE_E},
222 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
223 		.driver_data = ACB_ADAPTER_TYPE_F},
224 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
225 		.driver_data = ACB_ADAPTER_TYPE_F},
226 	{0, 0}, /* Terminating entry */
227 };
228 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
229 
230 static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
231 
232 static struct pci_driver arcmsr_pci_driver = {
233 	.name			= "arcmsr",
234 	.id_table		= arcmsr_device_id_table,
235 	.probe			= arcmsr_probe,
236 	.remove			= arcmsr_remove,
237 	.driver.pm		= &arcmsr_pm_ops,
238 	.shutdown		= arcmsr_shutdown,
239 };
240 /*
241 ****************************************************************************
242 ****************************************************************************
243 */
244 
245 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
246 {
247 	switch (acb->adapter_type) {
248 	case ACB_ADAPTER_TYPE_B:
249 	case ACB_ADAPTER_TYPE_D:
250 	case ACB_ADAPTER_TYPE_E:
251 	case ACB_ADAPTER_TYPE_F:
252 		dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
253 			acb->dma_coherent2, acb->dma_coherent_handle2);
254 		break;
255 	}
256 }
257 
258 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
259 {
260 	struct pci_dev *pdev = acb->pdev;
261 	switch (acb->adapter_type){
262 	case ACB_ADAPTER_TYPE_A:{
263 		acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
264 		if (!acb->pmuA) {
265 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
266 			return false;
267 		}
268 		break;
269 	}
270 	case ACB_ADAPTER_TYPE_B:{
271 		void __iomem *mem_base0, *mem_base1;
272 		mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
273 		if (!mem_base0) {
274 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
275 			return false;
276 		}
277 		mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
278 		if (!mem_base1) {
279 			iounmap(mem_base0);
280 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
281 			return false;
282 		}
283 		acb->mem_base0 = mem_base0;
284 		acb->mem_base1 = mem_base1;
285 		break;
286 	}
287 	case ACB_ADAPTER_TYPE_C:{
288 		acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
289 		if (!acb->pmuC) {
290 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
291 			return false;
292 		}
293 		if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
294 			writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
295 			return true;
296 		}
297 		break;
298 	}
299 	case ACB_ADAPTER_TYPE_D: {
300 		void __iomem *mem_base0;
301 		unsigned long addr, range;
302 
303 		addr = (unsigned long)pci_resource_start(pdev, 0);
304 		range = pci_resource_len(pdev, 0);
305 		mem_base0 = ioremap(addr, range);
306 		if (!mem_base0) {
307 			pr_notice("arcmsr%d: memory mapping region fail\n",
308 				acb->host->host_no);
309 			return false;
310 		}
311 		acb->mem_base0 = mem_base0;
312 		break;
313 		}
314 	case ACB_ADAPTER_TYPE_E: {
315 		acb->pmuE = ioremap(pci_resource_start(pdev, 1),
316 			pci_resource_len(pdev, 1));
317 		if (!acb->pmuE) {
318 			pr_notice("arcmsr%d: memory mapping region fail \n",
319 				acb->host->host_no);
320 			return false;
321 		}
322 		writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
323 		writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);	/* synchronize doorbell to 0 */
324 		acb->in_doorbell = 0;
325 		acb->out_doorbell = 0;
326 		break;
327 		}
328 	case ACB_ADAPTER_TYPE_F: {
329 		acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
330 		if (!acb->pmuF) {
331 			pr_notice("arcmsr%d: memory mapping region fail\n",
332 				acb->host->host_no);
333 			return false;
334 		}
335 		writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
336 		writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
337 		acb->in_doorbell = 0;
338 		acb->out_doorbell = 0;
339 		break;
340 		}
341 	}
342 	return true;
343 }
344 
345 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
346 {
347 	switch (acb->adapter_type) {
348 	case ACB_ADAPTER_TYPE_A:
349 		iounmap(acb->pmuA);
350 		break;
351 	case ACB_ADAPTER_TYPE_B:
352 		iounmap(acb->mem_base0);
353 		iounmap(acb->mem_base1);
354 		break;
355 	case ACB_ADAPTER_TYPE_C:
356 		iounmap(acb->pmuC);
357 		break;
358 	case ACB_ADAPTER_TYPE_D:
359 		iounmap(acb->mem_base0);
360 		break;
361 	case ACB_ADAPTER_TYPE_E:
362 		iounmap(acb->pmuE);
363 		break;
364 	case ACB_ADAPTER_TYPE_F:
365 		iounmap(acb->pmuF);
366 		break;
367 	}
368 }
369 
370 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
371 {
372 	irqreturn_t handle_state;
373 	struct AdapterControlBlock *acb = dev_id;
374 
375 	handle_state = arcmsr_interrupt(acb);
376 	return handle_state;
377 }
378 
379 static int arcmsr_bios_param(struct scsi_device *sdev,
380 		struct block_device *bdev, sector_t capacity, int *geom)
381 {
382 	int heads, sectors, cylinders, total_capacity;
383 
384 	if (scsi_partsize(bdev, capacity, geom))
385 		return 0;
386 
387 	total_capacity = capacity;
388 	heads = 64;
389 	sectors = 32;
390 	cylinders = total_capacity / (heads * sectors);
391 	if (cylinders > 1024) {
392 		heads = 255;
393 		sectors = 63;
394 		cylinders = total_capacity / (heads * sectors);
395 	}
396 	geom[0] = heads;
397 	geom[1] = sectors;
398 	geom[2] = cylinders;
399 	return 0;
400 }
401 
402 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
403 {
404 	struct MessageUnit_A __iomem *reg = acb->pmuA;
405 	int i;
406 
407 	for (i = 0; i < 2000; i++) {
408 		if (readl(&reg->outbound_intstatus) &
409 				ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
410 			writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
411 				&reg->outbound_intstatus);
412 			return true;
413 		}
414 		msleep(10);
415 	} /* max 20 seconds */
416 
417 	return false;
418 }
419 
420 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
421 {
422 	struct MessageUnit_B *reg = acb->pmuB;
423 	int i;
424 
425 	for (i = 0; i < 2000; i++) {
426 		if (readl(reg->iop2drv_doorbell)
427 			& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
428 			writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
429 					reg->iop2drv_doorbell);
430 			writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
431 					reg->drv2iop_doorbell);
432 			return true;
433 		}
434 		msleep(10);
435 	} /* max 20 seconds */
436 
437 	return false;
438 }
439 
440 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
441 {
442 	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
443 	int i;
444 
445 	for (i = 0; i < 2000; i++) {
446 		if (readl(&phbcmu->outbound_doorbell)
447 				& ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
448 			writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
449 				&phbcmu->outbound_doorbell_clear); /*clear interrupt*/
450 			return true;
451 		}
452 		msleep(10);
453 	} /* max 20 seconds */
454 
455 	return false;
456 }
457 
458 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
459 {
460 	struct MessageUnit_D *reg = pACB->pmuD;
461 	int i;
462 
463 	for (i = 0; i < 2000; i++) {
464 		if (readl(reg->outbound_doorbell)
465 			& ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
466 			writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
467 				reg->outbound_doorbell);
468 			return true;
469 		}
470 		msleep(10);
471 	} /* max 20 seconds */
472 	return false;
473 }
474 
475 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
476 {
477 	int i;
478 	uint32_t read_doorbell;
479 	struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
480 
481 	for (i = 0; i < 2000; i++) {
482 		read_doorbell = readl(&phbcmu->iobound_doorbell);
483 		if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
484 			writel(0, &phbcmu->host_int_status); /*clear interrupt*/
485 			pACB->in_doorbell = read_doorbell;
486 			return true;
487 		}
488 		msleep(10);
489 	} /* max 20 seconds */
490 	return false;
491 }
492 
493 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
494 {
495 	struct MessageUnit_A __iomem *reg = acb->pmuA;
496 	int retry_count = 30;
497 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
498 	do {
499 		if (arcmsr_hbaA_wait_msgint_ready(acb))
500 			break;
501 		else {
502 			retry_count--;
503 			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
504 			timeout, retry count down = %d \n", acb->host->host_no, retry_count);
505 		}
506 	} while (retry_count != 0);
507 }
508 
509 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
510 {
511 	struct MessageUnit_B *reg = acb->pmuB;
512 	int retry_count = 30;
513 	writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
514 	do {
515 		if (arcmsr_hbaB_wait_msgint_ready(acb))
516 			break;
517 		else {
518 			retry_count--;
519 			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
520 			timeout,retry count down = %d \n", acb->host->host_no, retry_count);
521 		}
522 	} while (retry_count != 0);
523 }
524 
525 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
526 {
527 	struct MessageUnit_C __iomem *reg = pACB->pmuC;
528 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
529 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
530 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
531 	do {
532 		if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
533 			break;
534 		} else {
535 			retry_count--;
536 			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
537 			timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
538 		}
539 	} while (retry_count != 0);
540 	return;
541 }
542 
543 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
544 {
545 	int retry_count = 15;
546 	struct MessageUnit_D *reg = pACB->pmuD;
547 
548 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
549 	do {
550 		if (arcmsr_hbaD_wait_msgint_ready(pACB))
551 			break;
552 
553 		retry_count--;
554 		pr_notice("arcmsr%d: wait 'flush adapter "
555 			"cache' timeout, retry count down = %d\n",
556 			pACB->host->host_no, retry_count);
557 	} while (retry_count != 0);
558 }
559 
560 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
561 {
562 	int retry_count = 30;
563 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
564 
565 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
566 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
567 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
568 	do {
569 		if (arcmsr_hbaE_wait_msgint_ready(pACB))
570 			break;
571 		retry_count--;
572 		pr_notice("arcmsr%d: wait 'flush adapter "
573 			"cache' timeout, retry count down = %d\n",
574 			pACB->host->host_no, retry_count);
575 	} while (retry_count != 0);
576 }
577 
578 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
579 {
580 	switch (acb->adapter_type) {
581 
582 	case ACB_ADAPTER_TYPE_A:
583 		arcmsr_hbaA_flush_cache(acb);
584 		break;
585 	case ACB_ADAPTER_TYPE_B:
586 		arcmsr_hbaB_flush_cache(acb);
587 		break;
588 	case ACB_ADAPTER_TYPE_C:
589 		arcmsr_hbaC_flush_cache(acb);
590 		break;
591 	case ACB_ADAPTER_TYPE_D:
592 		arcmsr_hbaD_flush_cache(acb);
593 		break;
594 	case ACB_ADAPTER_TYPE_E:
595 	case ACB_ADAPTER_TYPE_F:
596 		arcmsr_hbaE_flush_cache(acb);
597 		break;
598 	}
599 }
600 
601 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
602 {
603 	struct MessageUnit_B *reg = acb->pmuB;
604 
605 	if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
606 		reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
607 		reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
608 		reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
609 		reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
610 	} else {
611 		reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
612 		reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
613 		reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
614 		reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
615 	}
616 	reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
617 	reg->message_rbuffer =  MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
618 	reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
619 }
620 
621 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
622 {
623 	struct MessageUnit_D *reg = acb->pmuD;
624 
625 	reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
626 	reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
627 	reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
628 	reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
629 	reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
630 	reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
631 	reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
632 	reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
633 	reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
634 	reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
635 	reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
636 	reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
637 	reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
638 	reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
639 	reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
640 	reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
641 	reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
642 	reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
643 	reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
644 	reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
645 	reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
646 	reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
647 	reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
648 	reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
649 	reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
650 	reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
651 }
652 
653 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
654 {
655 	dma_addr_t host_buffer_dma;
656 	struct MessageUnit_F __iomem *pmuF;
657 
658 	memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
659 	acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
660 		acb->completeQ_size, 4);
661 	acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
662 	acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
663 	memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
664 	host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
665 	pmuF = acb->pmuF;
666 	/* host buffer low address, bit0:1 all buffer active */
667 	writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
668 	/* host buffer high address */
669 	writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
670 	/* set host buffer physical address */
671 	writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
672 }
673 
674 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
675 {
676 	bool rtn = true;
677 	void *dma_coherent;
678 	dma_addr_t dma_coherent_handle;
679 	struct pci_dev *pdev = acb->pdev;
680 
681 	switch (acb->adapter_type) {
682 	case ACB_ADAPTER_TYPE_B: {
683 		acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
684 		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
685 			&dma_coherent_handle, GFP_KERNEL);
686 		if (!dma_coherent) {
687 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
688 			return false;
689 		}
690 		acb->dma_coherent_handle2 = dma_coherent_handle;
691 		acb->dma_coherent2 = dma_coherent;
692 		acb->pmuB = (struct MessageUnit_B *)dma_coherent;
693 		arcmsr_hbaB_assign_regAddr(acb);
694 		}
695 		break;
696 	case ACB_ADAPTER_TYPE_D: {
697 		acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
698 		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
699 			&dma_coherent_handle, GFP_KERNEL);
700 		if (!dma_coherent) {
701 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
702 			return false;
703 		}
704 		acb->dma_coherent_handle2 = dma_coherent_handle;
705 		acb->dma_coherent2 = dma_coherent;
706 		acb->pmuD = (struct MessageUnit_D *)dma_coherent;
707 		arcmsr_hbaD_assign_regAddr(acb);
708 		}
709 		break;
710 	case ACB_ADAPTER_TYPE_E: {
711 		uint32_t completeQ_size;
712 		completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
713 		acb->ioqueue_size = roundup(completeQ_size, 32);
714 		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
715 			&dma_coherent_handle, GFP_KERNEL);
716 		if (!dma_coherent){
717 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
718 			return false;
719 		}
720 		acb->dma_coherent_handle2 = dma_coherent_handle;
721 		acb->dma_coherent2 = dma_coherent;
722 		acb->pCompletionQ = dma_coherent;
723 		acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
724 		acb->doneq_index = 0;
725 		}
726 		break;
727 	case ACB_ADAPTER_TYPE_F: {
728 		uint32_t QueueDepth;
729 		uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
730 
731 		arcmsr_wait_firmware_ready(acb);
732 		QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
733 		acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
734 		acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
735 		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
736 			&dma_coherent_handle, GFP_KERNEL);
737 		if (!dma_coherent) {
738 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
739 			return false;
740 		}
741 		acb->dma_coherent_handle2 = dma_coherent_handle;
742 		acb->dma_coherent2 = dma_coherent;
743 		acb->pCompletionQ = dma_coherent;
744 		acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
745 		acb->doneq_index = 0;
746 		arcmsr_hbaF_assign_regAddr(acb);
747 		}
748 		break;
749 	default:
750 		break;
751 	}
752 	return rtn;
753 }
754 
755 static int arcmsr_alloc_xor_buffer(struct AdapterControlBlock *acb)
756 {
757 	int rc = 0;
758 	struct pci_dev *pdev = acb->pdev;
759 	void *dma_coherent;
760 	dma_addr_t dma_coherent_handle;
761 	int i, xor_ram;
762 	struct Xor_sg *pXorPhys;
763 	void **pXorVirt;
764 	struct HostRamBuf *pRamBuf;
765 
766 	// allocate 1 MB * N physically continuous memory for XOR engine.
767 	xor_ram = (acb->firm_PicStatus >> 24) & 0x0f;
768 	acb->xor_mega = (xor_ram - 1) * 32 + 128 + 3;
769 	acb->init2cfg_size = sizeof(struct HostRamBuf) +
770 		(sizeof(struct XorHandle) * acb->xor_mega);
771 	dma_coherent = dma_alloc_coherent(&pdev->dev, acb->init2cfg_size,
772 		&dma_coherent_handle, GFP_KERNEL);
773 	acb->xorVirt = dma_coherent;
774 	acb->xorPhys = dma_coherent_handle;
775 	pXorPhys = (struct Xor_sg *)((unsigned long)dma_coherent +
776 		sizeof(struct HostRamBuf));
777 	acb->xorVirtOffset = sizeof(struct HostRamBuf) +
778 		(sizeof(struct Xor_sg) * acb->xor_mega);
779 	pXorVirt = (void **)((unsigned long)dma_coherent +
780 		(unsigned long)acb->xorVirtOffset);
781 	for (i = 0; i < acb->xor_mega; i++) {
782 		dma_coherent = dma_alloc_coherent(&pdev->dev,
783 			ARCMSR_XOR_SEG_SIZE,
784 			&dma_coherent_handle, GFP_KERNEL);
785 		if (dma_coherent) {
786 			pXorPhys->xorPhys = dma_coherent_handle;
787 			pXorPhys->xorBufLen = ARCMSR_XOR_SEG_SIZE;
788 			*pXorVirt = dma_coherent;
789 			pXorPhys++;
790 			pXorVirt++;
791 		} else {
792 			pr_info("arcmsr%d: alloc max XOR buffer = 0x%x MB\n",
793 				acb->host->host_no, i);
794 			rc = -ENOMEM;
795 			break;
796 		}
797 	}
798 	pRamBuf = (struct HostRamBuf *)acb->xorVirt;
799 	pRamBuf->hrbSignature = 0x53425248;	//HRBS
800 	pRamBuf->hrbSize = i * ARCMSR_XOR_SEG_SIZE;
801 	pRamBuf->hrbRes[0] = 0;
802 	pRamBuf->hrbRes[1] = 0;
803 	return rc;
804 }
805 
806 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
807 {
808 	struct pci_dev *pdev = acb->pdev;
809 	void *dma_coherent;
810 	dma_addr_t dma_coherent_handle;
811 	struct CommandControlBlock *ccb_tmp;
812 	int i = 0, j = 0;
813 	unsigned long cdb_phyaddr, next_ccb_phy;
814 	unsigned long roundup_ccbsize;
815 	unsigned long max_xfer_len;
816 	unsigned long max_sg_entrys;
817 	uint32_t  firm_config_version, curr_phy_upper32;
818 
819 	for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
820 		for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
821 			acb->devstate[i][j] = ARECA_RAID_GONE;
822 
823 	max_xfer_len = ARCMSR_MAX_XFER_LEN;
824 	max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
825 	firm_config_version = acb->firm_cfg_version;
826 	if((firm_config_version & 0xFF) >= 3){
827 		max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
828 		max_sg_entrys = (max_xfer_len/4096);
829 	}
830 	acb->host->max_sectors = max_xfer_len/512;
831 	acb->host->sg_tablesize = max_sg_entrys;
832 	roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
833 	acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
834 	if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
835 		acb->uncache_size += acb->ioqueue_size;
836 	dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
837 	if(!dma_coherent){
838 		printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
839 		return -ENOMEM;
840 	}
841 	acb->dma_coherent = dma_coherent;
842 	acb->dma_coherent_handle = dma_coherent_handle;
843 	memset(dma_coherent, 0, acb->uncache_size);
844 	acb->ccbsize = roundup_ccbsize;
845 	ccb_tmp = dma_coherent;
846 	curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
847 	acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
848 	for(i = 0; i < acb->maxFreeCCB; i++){
849 		cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
850 		switch (acb->adapter_type) {
851 		case ACB_ADAPTER_TYPE_A:
852 		case ACB_ADAPTER_TYPE_B:
853 			ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
854 			break;
855 		case ACB_ADAPTER_TYPE_C:
856 		case ACB_ADAPTER_TYPE_D:
857 		case ACB_ADAPTER_TYPE_E:
858 		case ACB_ADAPTER_TYPE_F:
859 			ccb_tmp->cdb_phyaddr = cdb_phyaddr;
860 			break;
861 		}
862 		acb->pccb_pool[i] = ccb_tmp;
863 		ccb_tmp->acb = acb;
864 		ccb_tmp->smid = (u32)i << 16;
865 		INIT_LIST_HEAD(&ccb_tmp->list);
866 		next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
867 		if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
868 			acb->maxFreeCCB = i;
869 			acb->host->can_queue = i;
870 			break;
871 		}
872 		else
873 			list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
874 		ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
875 		dma_coherent_handle = next_ccb_phy;
876 	}
877 	if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
878 		acb->dma_coherent_handle2 = dma_coherent_handle;
879 		acb->dma_coherent2 = ccb_tmp;
880 	}
881 	switch (acb->adapter_type) {
882 	case ACB_ADAPTER_TYPE_B:
883 		acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
884 		arcmsr_hbaB_assign_regAddr(acb);
885 		break;
886 	case ACB_ADAPTER_TYPE_D:
887 		acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
888 		arcmsr_hbaD_assign_regAddr(acb);
889 		break;
890 	case ACB_ADAPTER_TYPE_E:
891 		acb->pCompletionQ = acb->dma_coherent2;
892 		acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
893 		acb->doneq_index = 0;
894 		break;
895 	}
896 	if ((acb->firm_PicStatus >> 24) & 0x0f) {
897 		if (arcmsr_alloc_xor_buffer(acb))
898 			return -ENOMEM;
899 	}
900 	return 0;
901 }
902 
903 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
904 {
905 	struct AdapterControlBlock *acb = container_of(work,
906 		struct AdapterControlBlock, arcmsr_do_message_isr_bh);
907 	char *acb_dev_map = (char *)acb->device_map;
908 	uint32_t __iomem *signature = NULL;
909 	char __iomem *devicemap = NULL;
910 	int target, lun;
911 	struct scsi_device *psdev;
912 	char diff, temp;
913 
914 	switch (acb->adapter_type) {
915 	case ACB_ADAPTER_TYPE_A: {
916 		struct MessageUnit_A __iomem *reg  = acb->pmuA;
917 
918 		signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
919 		devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
920 		break;
921 	}
922 	case ACB_ADAPTER_TYPE_B: {
923 		struct MessageUnit_B *reg  = acb->pmuB;
924 
925 		signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
926 		devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
927 		break;
928 	}
929 	case ACB_ADAPTER_TYPE_C: {
930 		struct MessageUnit_C __iomem *reg  = acb->pmuC;
931 
932 		signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
933 		devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
934 		break;
935 	}
936 	case ACB_ADAPTER_TYPE_D: {
937 		struct MessageUnit_D *reg  = acb->pmuD;
938 
939 		signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
940 		devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
941 		break;
942 	}
943 	case ACB_ADAPTER_TYPE_E: {
944 		struct MessageUnit_E __iomem *reg  = acb->pmuE;
945 
946 		signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
947 		devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
948 		break;
949 		}
950 	case ACB_ADAPTER_TYPE_F: {
951 		signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
952 		devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
953 		break;
954 		}
955 	}
956 	if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
957 		return;
958 	for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
959 		target++) {
960 		temp = readb(devicemap);
961 		diff = (*acb_dev_map) ^ temp;
962 		if (diff != 0) {
963 			*acb_dev_map = temp;
964 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
965 				lun++) {
966 				if ((diff & 0x01) == 1 &&
967 					(temp & 0x01) == 1) {
968 					scsi_add_device(acb->host,
969 						0, target, lun);
970 				} else if ((diff & 0x01) == 1
971 					&& (temp & 0x01) == 0) {
972 					psdev = scsi_device_lookup(acb->host,
973 						0, target, lun);
974 					if (psdev != NULL) {
975 						scsi_remove_device(psdev);
976 						scsi_device_put(psdev);
977 					}
978 				}
979 				temp >>= 1;
980 				diff >>= 1;
981 			}
982 		}
983 		devicemap++;
984 		acb_dev_map++;
985 	}
986 	acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
987 }
988 
989 static int
990 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
991 {
992 	unsigned long flags;
993 	int nvec, i;
994 
995 	if (msix_enable == 0)
996 		goto msi_int0;
997 	nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
998 			PCI_IRQ_MSIX);
999 	if (nvec > 0) {
1000 		pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
1001 		flags = 0;
1002 	} else {
1003 msi_int0:
1004 		if (msi_enable == 1) {
1005 			nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1006 			if (nvec == 1) {
1007 				dev_info(&pdev->dev, "msi enabled\n");
1008 				goto msi_int1;
1009 			}
1010 		}
1011 		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
1012 		if (nvec < 1)
1013 			return FAILED;
1014 msi_int1:
1015 		flags = IRQF_SHARED;
1016 	}
1017 
1018 	acb->vector_count = nvec;
1019 	for (i = 0; i < nvec; i++) {
1020 		if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
1021 				flags, "arcmsr", acb)) {
1022 			pr_warn("arcmsr%d: request_irq =%d failed!\n",
1023 				acb->host->host_no, pci_irq_vector(pdev, i));
1024 			goto out_free_irq;
1025 		}
1026 	}
1027 
1028 	return SUCCESS;
1029 out_free_irq:
1030 	while (--i >= 0)
1031 		free_irq(pci_irq_vector(pdev, i), acb);
1032 	pci_free_irq_vectors(pdev);
1033 	return FAILED;
1034 }
1035 
1036 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
1037 {
1038 	INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
1039 	pacb->fw_flag = FW_NORMAL;
1040 	timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
1041 	pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
1042 	add_timer(&pacb->eternal_timer);
1043 }
1044 
1045 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
1046 {
1047 	timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
1048 	pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
1049 	add_timer(&pacb->refresh_timer);
1050 }
1051 
1052 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
1053 {
1054 	struct pci_dev *pcidev = acb->pdev;
1055 
1056 	if (IS_DMA64) {
1057 		if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
1058 		    dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
1059 			goto	dma32;
1060 		if (acb->adapter_type <= ACB_ADAPTER_TYPE_B)
1061 			return 0;
1062 		if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
1063 		    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
1064 			printk("arcmsr: set DMA 64 mask failed\n");
1065 			return -ENXIO;
1066 		}
1067 	} else {
1068 dma32:
1069 		if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1070 		    dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1071 		    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1072 			printk("arcmsr: set DMA 32-bit mask failed\n");
1073 			return -ENXIO;
1074 		}
1075 	}
1076 	return 0;
1077 }
1078 
1079 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1080 {
1081 	struct Scsi_Host *host;
1082 	struct AdapterControlBlock *acb;
1083 	uint8_t bus,dev_fun;
1084 	int error;
1085 	error = pci_enable_device(pdev);
1086 	if(error){
1087 		return -ENODEV;
1088 	}
1089 	host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
1090 	if(!host){
1091     		goto pci_disable_dev;
1092 	}
1093 	init_waitqueue_head(&wait_q);
1094 	bus = pdev->bus->number;
1095 	dev_fun = pdev->devfn;
1096 	acb = (struct AdapterControlBlock *) host->hostdata;
1097 	memset(acb,0,sizeof(struct AdapterControlBlock));
1098 	acb->pdev = pdev;
1099 	acb->adapter_type = id->driver_data;
1100 	if (arcmsr_set_dma_mask(acb))
1101 		goto scsi_host_release;
1102 	acb->host = host;
1103 	host->max_lun = ARCMSR_MAX_TARGETLUN;
1104 	host->max_id = ARCMSR_MAX_TARGETID;		/*16:8*/
1105 	host->max_cmd_len = 16;	 			/*this is issue of 64bit LBA ,over 2T byte*/
1106 	if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
1107 		host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
1108 	host->can_queue = host_can_queue;	/* max simultaneous cmds */
1109 	if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
1110 		cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
1111 	host->cmd_per_lun = cmd_per_lun;
1112 	host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1113 	host->unique_id = (bus << 8) | dev_fun;
1114 	pci_set_drvdata(pdev, host);
1115 	pci_set_master(pdev);
1116 	error = pci_request_regions(pdev, "arcmsr");
1117 	if(error){
1118 		goto scsi_host_release;
1119 	}
1120 	spin_lock_init(&acb->eh_lock);
1121 	spin_lock_init(&acb->ccblist_lock);
1122 	spin_lock_init(&acb->postq_lock);
1123 	spin_lock_init(&acb->doneq_lock);
1124 	spin_lock_init(&acb->rqbuffer_lock);
1125 	spin_lock_init(&acb->wqbuffer_lock);
1126 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1127 			ACB_F_MESSAGE_RQBUFFER_CLEARED |
1128 			ACB_F_MESSAGE_WQBUFFER_READED);
1129 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1130 	INIT_LIST_HEAD(&acb->ccb_free_list);
1131 	error = arcmsr_remap_pciregion(acb);
1132 	if(!error){
1133 		goto pci_release_regs;
1134 	}
1135 	error = arcmsr_alloc_io_queue(acb);
1136 	if (!error)
1137 		goto unmap_pci_region;
1138 	error = arcmsr_get_firmware_spec(acb);
1139 	if(!error){
1140 		goto free_hbb_mu;
1141 	}
1142 	if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1143 		arcmsr_free_io_queue(acb);
1144 	error = arcmsr_alloc_ccb_pool(acb);
1145 	if(error){
1146 		goto unmap_pci_region;
1147 	}
1148 	error = scsi_add_host(host, &pdev->dev);
1149 	if(error){
1150 		goto free_ccb_pool;
1151 	}
1152 	if (arcmsr_request_irq(pdev, acb) == FAILED)
1153 		goto scsi_host_remove;
1154 	arcmsr_iop_init(acb);
1155 	arcmsr_init_get_devmap_timer(acb);
1156 	if (set_date_time)
1157 		arcmsr_init_set_datetime_timer(acb);
1158 	if(arcmsr_alloc_sysfs_attr(acb))
1159 		goto out_free_sysfs;
1160 	scsi_scan_host(host);
1161 	return 0;
1162 out_free_sysfs:
1163 	if (set_date_time)
1164 		del_timer_sync(&acb->refresh_timer);
1165 	del_timer_sync(&acb->eternal_timer);
1166 	flush_work(&acb->arcmsr_do_message_isr_bh);
1167 	arcmsr_stop_adapter_bgrb(acb);
1168 	arcmsr_flush_adapter_cache(acb);
1169 	arcmsr_free_irq(pdev, acb);
1170 scsi_host_remove:
1171 	scsi_remove_host(host);
1172 free_ccb_pool:
1173 	arcmsr_free_ccb_pool(acb);
1174 	goto unmap_pci_region;
1175 free_hbb_mu:
1176 	arcmsr_free_io_queue(acb);
1177 unmap_pci_region:
1178 	arcmsr_unmap_pciregion(acb);
1179 pci_release_regs:
1180 	pci_release_regions(pdev);
1181 scsi_host_release:
1182 	scsi_host_put(host);
1183 pci_disable_dev:
1184 	pci_disable_device(pdev);
1185 	return -ENODEV;
1186 }
1187 
1188 static void arcmsr_free_irq(struct pci_dev *pdev,
1189 		struct AdapterControlBlock *acb)
1190 {
1191 	int i;
1192 
1193 	for (i = 0; i < acb->vector_count; i++)
1194 		free_irq(pci_irq_vector(pdev, i), acb);
1195 	pci_free_irq_vectors(pdev);
1196 }
1197 
1198 static int __maybe_unused arcmsr_suspend(struct device *dev)
1199 {
1200 	struct pci_dev *pdev = to_pci_dev(dev);
1201 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1202 	struct AdapterControlBlock *acb =
1203 		(struct AdapterControlBlock *)host->hostdata;
1204 
1205 	arcmsr_disable_outbound_ints(acb);
1206 	arcmsr_free_irq(pdev, acb);
1207 	del_timer_sync(&acb->eternal_timer);
1208 	if (set_date_time)
1209 		del_timer_sync(&acb->refresh_timer);
1210 	flush_work(&acb->arcmsr_do_message_isr_bh);
1211 	arcmsr_stop_adapter_bgrb(acb);
1212 	arcmsr_flush_adapter_cache(acb);
1213 	return 0;
1214 }
1215 
1216 static int __maybe_unused arcmsr_resume(struct device *dev)
1217 {
1218 	struct pci_dev *pdev = to_pci_dev(dev);
1219 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1220 	struct AdapterControlBlock *acb =
1221 		(struct AdapterControlBlock *)host->hostdata;
1222 
1223 	if (arcmsr_set_dma_mask(acb))
1224 		goto controller_unregister;
1225 	if (arcmsr_request_irq(pdev, acb) == FAILED)
1226 		goto controller_stop;
1227 	switch (acb->adapter_type) {
1228 	case ACB_ADAPTER_TYPE_B: {
1229 		struct MessageUnit_B *reg = acb->pmuB;
1230 		uint32_t i;
1231 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1232 			reg->post_qbuffer[i] = 0;
1233 			reg->done_qbuffer[i] = 0;
1234 		}
1235 		reg->postq_index = 0;
1236 		reg->doneq_index = 0;
1237 		break;
1238 		}
1239 	case ACB_ADAPTER_TYPE_E:
1240 		writel(0, &acb->pmuE->host_int_status);
1241 		writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1242 		acb->in_doorbell = 0;
1243 		acb->out_doorbell = 0;
1244 		acb->doneq_index = 0;
1245 		break;
1246 	case ACB_ADAPTER_TYPE_F:
1247 		writel(0, &acb->pmuF->host_int_status);
1248 		writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1249 		acb->in_doorbell = 0;
1250 		acb->out_doorbell = 0;
1251 		acb->doneq_index = 0;
1252 		arcmsr_hbaF_assign_regAddr(acb);
1253 		break;
1254 	}
1255 	arcmsr_iop_init(acb);
1256 	arcmsr_init_get_devmap_timer(acb);
1257 	if (set_date_time)
1258 		arcmsr_init_set_datetime_timer(acb);
1259 	return 0;
1260 controller_stop:
1261 	arcmsr_stop_adapter_bgrb(acb);
1262 	arcmsr_flush_adapter_cache(acb);
1263 controller_unregister:
1264 	scsi_remove_host(host);
1265 	arcmsr_free_ccb_pool(acb);
1266 	if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1267 		arcmsr_free_io_queue(acb);
1268 	arcmsr_unmap_pciregion(acb);
1269 	scsi_host_put(host);
1270 	return -ENODEV;
1271 }
1272 
1273 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1274 {
1275 	struct MessageUnit_A __iomem *reg = acb->pmuA;
1276 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1277 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1278 		printk(KERN_NOTICE
1279 			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
1280 			, acb->host->host_no);
1281 		return false;
1282 	}
1283 	return true;
1284 }
1285 
1286 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1287 {
1288 	struct MessageUnit_B *reg = acb->pmuB;
1289 
1290 	writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1291 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1292 		printk(KERN_NOTICE
1293 			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
1294 			, acb->host->host_no);
1295 		return false;
1296 	}
1297 	return true;
1298 }
1299 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1300 {
1301 	struct MessageUnit_C __iomem *reg = pACB->pmuC;
1302 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1303 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1304 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1305 		printk(KERN_NOTICE
1306 			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
1307 			, pACB->host->host_no);
1308 		return false;
1309 	}
1310 	return true;
1311 }
1312 
1313 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1314 {
1315 	struct MessageUnit_D *reg = pACB->pmuD;
1316 
1317 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1318 	if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1319 		pr_notice("arcmsr%d: wait 'abort all outstanding "
1320 			"command' timeout\n", pACB->host->host_no);
1321 		return false;
1322 	}
1323 	return true;
1324 }
1325 
1326 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1327 {
1328 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
1329 
1330 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1331 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1332 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
1333 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1334 		pr_notice("arcmsr%d: wait 'abort all outstanding "
1335 			"command' timeout\n", pACB->host->host_no);
1336 		return false;
1337 	}
1338 	return true;
1339 }
1340 
1341 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1342 {
1343 	uint8_t rtnval = 0;
1344 	switch (acb->adapter_type) {
1345 	case ACB_ADAPTER_TYPE_A:
1346 		rtnval = arcmsr_hbaA_abort_allcmd(acb);
1347 		break;
1348 	case ACB_ADAPTER_TYPE_B:
1349 		rtnval = arcmsr_hbaB_abort_allcmd(acb);
1350 		break;
1351 	case ACB_ADAPTER_TYPE_C:
1352 		rtnval = arcmsr_hbaC_abort_allcmd(acb);
1353 		break;
1354 	case ACB_ADAPTER_TYPE_D:
1355 		rtnval = arcmsr_hbaD_abort_allcmd(acb);
1356 		break;
1357 	case ACB_ADAPTER_TYPE_E:
1358 	case ACB_ADAPTER_TYPE_F:
1359 		rtnval = arcmsr_hbaE_abort_allcmd(acb);
1360 		break;
1361 	}
1362 	return rtnval;
1363 }
1364 
1365 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1366 {
1367 	struct AdapterControlBlock *acb = ccb->acb;
1368 	struct scsi_cmnd *pcmd = ccb->pcmd;
1369 	unsigned long flags;
1370 	atomic_dec(&acb->ccboutstandingcount);
1371 	scsi_dma_unmap(ccb->pcmd);
1372 	ccb->startdone = ARCMSR_CCB_DONE;
1373 	spin_lock_irqsave(&acb->ccblist_lock, flags);
1374 	list_add_tail(&ccb->list, &acb->ccb_free_list);
1375 	spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1376 	scsi_done(pcmd);
1377 }
1378 
1379 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1380 {
1381 	struct scsi_cmnd *pcmd = ccb->pcmd;
1382 
1383 	pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1384 	if (pcmd->sense_buffer) {
1385 		struct SENSE_DATA *sensebuffer;
1386 
1387 		memcpy_and_pad(pcmd->sense_buffer,
1388 			       SCSI_SENSE_BUFFERSIZE,
1389 			       ccb->arcmsr_cdb.SenseData,
1390 			       sizeof(ccb->arcmsr_cdb.SenseData),
1391 			       0);
1392 
1393 		sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1394 		sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1395 		sensebuffer->Valid = 1;
1396 	}
1397 }
1398 
1399 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1400 {
1401 	u32 orig_mask = 0;
1402 	switch (acb->adapter_type) {
1403 	case ACB_ADAPTER_TYPE_A : {
1404 		struct MessageUnit_A __iomem *reg = acb->pmuA;
1405 		orig_mask = readl(&reg->outbound_intmask);
1406 		writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1407 						&reg->outbound_intmask);
1408 		}
1409 		break;
1410 	case ACB_ADAPTER_TYPE_B : {
1411 		struct MessageUnit_B *reg = acb->pmuB;
1412 		orig_mask = readl(reg->iop2drv_doorbell_mask);
1413 		writel(0, reg->iop2drv_doorbell_mask);
1414 		}
1415 		break;
1416 	case ACB_ADAPTER_TYPE_C:{
1417 		struct MessageUnit_C __iomem *reg = acb->pmuC;
1418 		/* disable all outbound interrupt */
1419 		orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
1420 		writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
1421 		}
1422 		break;
1423 	case ACB_ADAPTER_TYPE_D: {
1424 		struct MessageUnit_D *reg = acb->pmuD;
1425 		/* disable all outbound interrupt */
1426 		writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1427 		}
1428 		break;
1429 	case ACB_ADAPTER_TYPE_E:
1430 	case ACB_ADAPTER_TYPE_F: {
1431 		struct MessageUnit_E __iomem *reg = acb->pmuE;
1432 		orig_mask = readl(&reg->host_int_mask);
1433 		writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
1434 		readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
1435 		}
1436 		break;
1437 	}
1438 	return orig_mask;
1439 }
1440 
1441 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1442 			struct CommandControlBlock *ccb, bool error)
1443 {
1444 	uint8_t id, lun;
1445 	id = ccb->pcmd->device->id;
1446 	lun = ccb->pcmd->device->lun;
1447 	if (!error) {
1448 		if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1449 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1450 		ccb->pcmd->result = DID_OK << 16;
1451 		arcmsr_ccb_complete(ccb);
1452 	}else{
1453 		switch (ccb->arcmsr_cdb.DeviceStatus) {
1454 		case ARCMSR_DEV_SELECT_TIMEOUT: {
1455 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1456 			ccb->pcmd->result = DID_NO_CONNECT << 16;
1457 			arcmsr_ccb_complete(ccb);
1458 			}
1459 			break;
1460 
1461 		case ARCMSR_DEV_ABORTED:
1462 
1463 		case ARCMSR_DEV_INIT_FAIL: {
1464 			acb->devstate[id][lun] = ARECA_RAID_GONE;
1465 			ccb->pcmd->result = DID_BAD_TARGET << 16;
1466 			arcmsr_ccb_complete(ccb);
1467 			}
1468 			break;
1469 
1470 		case ARCMSR_DEV_CHECK_CONDITION: {
1471 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
1472 			arcmsr_report_sense_info(ccb);
1473 			arcmsr_ccb_complete(ccb);
1474 			}
1475 			break;
1476 
1477 		default:
1478 			printk(KERN_NOTICE
1479 				"arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1480 				but got unknown DeviceStatus = 0x%x \n"
1481 				, acb->host->host_no
1482 				, id
1483 				, lun
1484 				, ccb->arcmsr_cdb.DeviceStatus);
1485 				acb->devstate[id][lun] = ARECA_RAID_GONE;
1486 				ccb->pcmd->result = DID_NO_CONNECT << 16;
1487 				arcmsr_ccb_complete(ccb);
1488 			break;
1489 		}
1490 	}
1491 }
1492 
1493 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1494 {
1495 	if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1496 		if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1497 			struct scsi_cmnd *abortcmd = pCCB->pcmd;
1498 			if (abortcmd) {
1499 				abortcmd->result |= DID_ABORT << 16;
1500 				arcmsr_ccb_complete(pCCB);
1501 				printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1502 				acb->host->host_no, pCCB);
1503 			}
1504 			return;
1505 		}
1506 		printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1507 				done acb = '0x%p'"
1508 				"ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1509 				" ccboutstandingcount = %d \n"
1510 				, acb->host->host_no
1511 				, acb
1512 				, pCCB
1513 				, pCCB->acb
1514 				, pCCB->startdone
1515 				, atomic_read(&acb->ccboutstandingcount));
1516 		return;
1517 	}
1518 	arcmsr_report_ccb_state(acb, pCCB, error);
1519 }
1520 
1521 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1522 {
1523 	int i = 0;
1524 	uint32_t flag_ccb;
1525 	struct ARCMSR_CDB *pARCMSR_CDB;
1526 	bool error;
1527 	struct CommandControlBlock *pCCB;
1528 	unsigned long ccb_cdb_phy;
1529 
1530 	switch (acb->adapter_type) {
1531 
1532 	case ACB_ADAPTER_TYPE_A: {
1533 		struct MessageUnit_A __iomem *reg = acb->pmuA;
1534 		uint32_t outbound_intstatus;
1535 		outbound_intstatus = readl(&reg->outbound_intstatus) &
1536 					acb->outbound_int_enable;
1537 		/*clear and abort all outbound posted Q*/
1538 		writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1539 		while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
1540 				&& (i++ < acb->maxOutstanding)) {
1541 			ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1542 			if (acb->cdb_phyadd_hipart)
1543 				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1544 			pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1545 			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1546 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1547 			arcmsr_drain_donequeue(acb, pCCB, error);
1548 		}
1549 		}
1550 		break;
1551 
1552 	case ACB_ADAPTER_TYPE_B: {
1553 		struct MessageUnit_B *reg = acb->pmuB;
1554 		/*clear all outbound posted Q*/
1555 		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1556 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1557 			flag_ccb = reg->done_qbuffer[i];
1558 			if (flag_ccb != 0) {
1559 				reg->done_qbuffer[i] = 0;
1560 				ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1561 				if (acb->cdb_phyadd_hipart)
1562 					ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1563 				pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1564 				pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1565 				error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1566 				arcmsr_drain_donequeue(acb, pCCB, error);
1567 			}
1568 			reg->post_qbuffer[i] = 0;
1569 		}
1570 		reg->doneq_index = 0;
1571 		reg->postq_index = 0;
1572 		}
1573 		break;
1574 	case ACB_ADAPTER_TYPE_C: {
1575 		struct MessageUnit_C __iomem *reg = acb->pmuC;
1576 		while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1577 			/*need to do*/
1578 			flag_ccb = readl(&reg->outbound_queueport_low);
1579 			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1580 			if (acb->cdb_phyadd_hipart)
1581 				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1582 			pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1583 			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1584 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1585 			arcmsr_drain_donequeue(acb, pCCB, error);
1586 		}
1587 		}
1588 		break;
1589 	case ACB_ADAPTER_TYPE_D: {
1590 		struct MessageUnit_D  *pmu = acb->pmuD;
1591 		uint32_t outbound_write_pointer;
1592 		uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1593 		unsigned long flags;
1594 
1595 		residual = atomic_read(&acb->ccboutstandingcount);
1596 		for (i = 0; i < residual; i++) {
1597 			spin_lock_irqsave(&acb->doneq_lock, flags);
1598 			outbound_write_pointer =
1599 				pmu->done_qbuffer[0].addressLow + 1;
1600 			doneq_index = pmu->doneq_index;
1601 			if ((doneq_index & 0xFFF) !=
1602 				(outbound_write_pointer & 0xFFF)) {
1603 				toggle = doneq_index & 0x4000;
1604 				index_stripped = (doneq_index & 0xFFF) + 1;
1605 				index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1606 				pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1607 					((toggle ^ 0x4000) + 1);
1608 				doneq_index = pmu->doneq_index;
1609 				spin_unlock_irqrestore(&acb->doneq_lock, flags);
1610 				addressLow = pmu->done_qbuffer[doneq_index &
1611 					0xFFF].addressLow;
1612 				ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1613 				if (acb->cdb_phyadd_hipart)
1614 					ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1615 				pARCMSR_CDB = (struct  ARCMSR_CDB *)
1616 					(acb->vir2phy_offset + ccb_cdb_phy);
1617 				pCCB = container_of(pARCMSR_CDB,
1618 					struct CommandControlBlock, arcmsr_cdb);
1619 				error = (addressLow &
1620 					ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1621 					true : false;
1622 				arcmsr_drain_donequeue(acb, pCCB, error);
1623 				writel(doneq_index,
1624 					pmu->outboundlist_read_pointer);
1625 			} else {
1626 				spin_unlock_irqrestore(&acb->doneq_lock, flags);
1627 				mdelay(10);
1628 			}
1629 		}
1630 		pmu->postq_index = 0;
1631 		pmu->doneq_index = 0x40FF;
1632 		}
1633 		break;
1634 	case ACB_ADAPTER_TYPE_E:
1635 		arcmsr_hbaE_postqueue_isr(acb);
1636 		break;
1637 	case ACB_ADAPTER_TYPE_F:
1638 		arcmsr_hbaF_postqueue_isr(acb);
1639 		break;
1640 	}
1641 }
1642 
1643 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1644 {
1645 	char *acb_dev_map = (char *)acb->device_map;
1646 	int target, lun, i;
1647 	struct scsi_device *psdev;
1648 	struct CommandControlBlock *ccb;
1649 	char temp;
1650 
1651 	for (i = 0; i < acb->maxFreeCCB; i++) {
1652 		ccb = acb->pccb_pool[i];
1653 		if (ccb->startdone == ARCMSR_CCB_START) {
1654 			ccb->pcmd->result = DID_NO_CONNECT << 16;
1655 			scsi_dma_unmap(ccb->pcmd);
1656 			scsi_done(ccb->pcmd);
1657 		}
1658 	}
1659 	for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1660 		temp = *acb_dev_map;
1661 		if (temp) {
1662 			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1663 				if (temp & 1) {
1664 					psdev = scsi_device_lookup(acb->host,
1665 						0, target, lun);
1666 					if (psdev != NULL) {
1667 						scsi_remove_device(psdev);
1668 						scsi_device_put(psdev);
1669 					}
1670 				}
1671 				temp >>= 1;
1672 			}
1673 			*acb_dev_map = 0;
1674 		}
1675 		acb_dev_map++;
1676 	}
1677 }
1678 
1679 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1680 {
1681 	struct pci_dev *pdev;
1682 	struct Scsi_Host *host;
1683 
1684 	host = acb->host;
1685 	arcmsr_free_sysfs_attr(acb);
1686 	scsi_remove_host(host);
1687 	flush_work(&acb->arcmsr_do_message_isr_bh);
1688 	del_timer_sync(&acb->eternal_timer);
1689 	if (set_date_time)
1690 		del_timer_sync(&acb->refresh_timer);
1691 	pdev = acb->pdev;
1692 	arcmsr_free_irq(pdev, acb);
1693 	arcmsr_free_ccb_pool(acb);
1694 	if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1695 		arcmsr_free_io_queue(acb);
1696 	arcmsr_unmap_pciregion(acb);
1697 	pci_release_regions(pdev);
1698 	scsi_host_put(host);
1699 	pci_disable_device(pdev);
1700 }
1701 
1702 static void arcmsr_remove(struct pci_dev *pdev)
1703 {
1704 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1705 	struct AdapterControlBlock *acb =
1706 		(struct AdapterControlBlock *) host->hostdata;
1707 	int poll_count = 0;
1708 	uint16_t dev_id;
1709 
1710 	pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1711 	if (dev_id == 0xffff) {
1712 		acb->acb_flags &= ~ACB_F_IOP_INITED;
1713 		acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1714 		arcmsr_remove_scsi_devices(acb);
1715 		arcmsr_free_pcidev(acb);
1716 		return;
1717 	}
1718 	arcmsr_free_sysfs_attr(acb);
1719 	scsi_remove_host(host);
1720 	flush_work(&acb->arcmsr_do_message_isr_bh);
1721 	del_timer_sync(&acb->eternal_timer);
1722 	if (set_date_time)
1723 		del_timer_sync(&acb->refresh_timer);
1724 	arcmsr_disable_outbound_ints(acb);
1725 	arcmsr_stop_adapter_bgrb(acb);
1726 	arcmsr_flush_adapter_cache(acb);
1727 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1728 	acb->acb_flags &= ~ACB_F_IOP_INITED;
1729 
1730 	for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1731 		if (!atomic_read(&acb->ccboutstandingcount))
1732 			break;
1733 		arcmsr_interrupt(acb);/* FIXME: need spinlock */
1734 		msleep(25);
1735 	}
1736 
1737 	if (atomic_read(&acb->ccboutstandingcount)) {
1738 		int i;
1739 
1740 		arcmsr_abort_allcmd(acb);
1741 		arcmsr_done4abort_postqueue(acb);
1742 		for (i = 0; i < acb->maxFreeCCB; i++) {
1743 			struct CommandControlBlock *ccb = acb->pccb_pool[i];
1744 			if (ccb->startdone == ARCMSR_CCB_START) {
1745 				ccb->startdone = ARCMSR_CCB_ABORTED;
1746 				ccb->pcmd->result = DID_ABORT << 16;
1747 				arcmsr_ccb_complete(ccb);
1748 			}
1749 		}
1750 	}
1751 	arcmsr_free_irq(pdev, acb);
1752 	arcmsr_free_ccb_pool(acb);
1753 	if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1754 		arcmsr_free_io_queue(acb);
1755 	arcmsr_unmap_pciregion(acb);
1756 	pci_release_regions(pdev);
1757 	scsi_host_put(host);
1758 	pci_disable_device(pdev);
1759 }
1760 
1761 static void arcmsr_shutdown(struct pci_dev *pdev)
1762 {
1763 	struct Scsi_Host *host = pci_get_drvdata(pdev);
1764 	struct AdapterControlBlock *acb =
1765 		(struct AdapterControlBlock *)host->hostdata;
1766 	if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1767 		return;
1768 	del_timer_sync(&acb->eternal_timer);
1769 	if (set_date_time)
1770 		del_timer_sync(&acb->refresh_timer);
1771 	arcmsr_disable_outbound_ints(acb);
1772 	arcmsr_free_irq(pdev, acb);
1773 	flush_work(&acb->arcmsr_do_message_isr_bh);
1774 	arcmsr_stop_adapter_bgrb(acb);
1775 	arcmsr_flush_adapter_cache(acb);
1776 }
1777 
1778 static int __init arcmsr_module_init(void)
1779 {
1780 	int error = 0;
1781 	error = pci_register_driver(&arcmsr_pci_driver);
1782 	return error;
1783 }
1784 
1785 static void __exit arcmsr_module_exit(void)
1786 {
1787 	pci_unregister_driver(&arcmsr_pci_driver);
1788 }
1789 module_init(arcmsr_module_init);
1790 module_exit(arcmsr_module_exit);
1791 
1792 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1793 						u32 intmask_org)
1794 {
1795 	u32 mask;
1796 	switch (acb->adapter_type) {
1797 
1798 	case ACB_ADAPTER_TYPE_A: {
1799 		struct MessageUnit_A __iomem *reg = acb->pmuA;
1800 		mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1801 			     ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1802 			     ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1803 		writel(mask, &reg->outbound_intmask);
1804 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1805 		}
1806 		break;
1807 
1808 	case ACB_ADAPTER_TYPE_B: {
1809 		struct MessageUnit_B *reg = acb->pmuB;
1810 		mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1811 			ARCMSR_IOP2DRV_DATA_READ_OK |
1812 			ARCMSR_IOP2DRV_CDB_DONE |
1813 			ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1814 		writel(mask, reg->iop2drv_doorbell_mask);
1815 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1816 		}
1817 		break;
1818 	case ACB_ADAPTER_TYPE_C: {
1819 		struct MessageUnit_C __iomem *reg = acb->pmuC;
1820 		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1821 		writel(intmask_org & mask, &reg->host_int_mask);
1822 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1823 		}
1824 		break;
1825 	case ACB_ADAPTER_TYPE_D: {
1826 		struct MessageUnit_D *reg = acb->pmuD;
1827 
1828 		mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1829 		writel(intmask_org | mask, reg->pcief0_int_enable);
1830 		break;
1831 		}
1832 	case ACB_ADAPTER_TYPE_E:
1833 	case ACB_ADAPTER_TYPE_F: {
1834 		struct MessageUnit_E __iomem *reg = acb->pmuE;
1835 
1836 		mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1837 		writel(intmask_org & mask, &reg->host_int_mask);
1838 		break;
1839 		}
1840 	}
1841 }
1842 
1843 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1844 	struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1845 {
1846 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1847 	int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1848 	__le32 address_lo, address_hi;
1849 	int arccdbsize = 0x30;
1850 	__le32 length = 0;
1851 	int i;
1852 	struct scatterlist *sg;
1853 	int nseg;
1854 	ccb->pcmd = pcmd;
1855 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1856 	arcmsr_cdb->TargetID = pcmd->device->id;
1857 	arcmsr_cdb->LUN = pcmd->device->lun;
1858 	arcmsr_cdb->Function = 1;
1859 	arcmsr_cdb->msgContext = 0;
1860 	memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1861 
1862 	nseg = scsi_dma_map(pcmd);
1863 	if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1864 		return FAILED;
1865 	scsi_for_each_sg(pcmd, sg, nseg, i) {
1866 		/* Get the physical address of the current data pointer */
1867 		length = cpu_to_le32(sg_dma_len(sg));
1868 		address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1869 		address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1870 		if (address_hi == 0) {
1871 			struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1872 
1873 			pdma_sg->address = address_lo;
1874 			pdma_sg->length = length;
1875 			psge += sizeof (struct SG32ENTRY);
1876 			arccdbsize += sizeof (struct SG32ENTRY);
1877 		} else {
1878 			struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1879 
1880 			pdma_sg->addresshigh = address_hi;
1881 			pdma_sg->address = address_lo;
1882 			pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1883 			psge += sizeof (struct SG64ENTRY);
1884 			arccdbsize += sizeof (struct SG64ENTRY);
1885 		}
1886 	}
1887 	arcmsr_cdb->sgcount = (uint8_t)nseg;
1888 	arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1889 	arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1890 	if ( arccdbsize > 256)
1891 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1892 	if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1893 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1894 	ccb->arc_cdb_size = arccdbsize;
1895 	return SUCCESS;
1896 }
1897 
1898 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1899 {
1900 	uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1901 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1902 	atomic_inc(&acb->ccboutstandingcount);
1903 	ccb->startdone = ARCMSR_CCB_START;
1904 	switch (acb->adapter_type) {
1905 	case ACB_ADAPTER_TYPE_A: {
1906 		struct MessageUnit_A __iomem *reg = acb->pmuA;
1907 
1908 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1909 			writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1910 			&reg->inbound_queueport);
1911 		else
1912 			writel(cdb_phyaddr, &reg->inbound_queueport);
1913 		break;
1914 	}
1915 
1916 	case ACB_ADAPTER_TYPE_B: {
1917 		struct MessageUnit_B *reg = acb->pmuB;
1918 		uint32_t ending_index, index = reg->postq_index;
1919 
1920 		ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1921 		reg->post_qbuffer[ending_index] = 0;
1922 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1923 			reg->post_qbuffer[index] =
1924 				cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1925 		} else {
1926 			reg->post_qbuffer[index] = cdb_phyaddr;
1927 		}
1928 		index++;
1929 		index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1930 		reg->postq_index = index;
1931 		writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1932 		}
1933 		break;
1934 	case ACB_ADAPTER_TYPE_C: {
1935 		struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1936 		uint32_t ccb_post_stamp, arc_cdb_size;
1937 
1938 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1939 		ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1940 		writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1941 		writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1942 		}
1943 		break;
1944 	case ACB_ADAPTER_TYPE_D: {
1945 		struct MessageUnit_D  *pmu = acb->pmuD;
1946 		u16 index_stripped;
1947 		u16 postq_index, toggle;
1948 		unsigned long flags;
1949 		struct InBound_SRB *pinbound_srb;
1950 
1951 		spin_lock_irqsave(&acb->postq_lock, flags);
1952 		postq_index = pmu->postq_index;
1953 		pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1954 		pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1955 		pinbound_srb->addressLow = cdb_phyaddr;
1956 		pinbound_srb->length = ccb->arc_cdb_size >> 2;
1957 		arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1958 		toggle = postq_index & 0x4000;
1959 		index_stripped = postq_index + 1;
1960 		index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1961 		pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1962 			(toggle ^ 0x4000);
1963 		writel(postq_index, pmu->inboundlist_write_pointer);
1964 		spin_unlock_irqrestore(&acb->postq_lock, flags);
1965 		break;
1966 		}
1967 	case ACB_ADAPTER_TYPE_E: {
1968 		struct MessageUnit_E __iomem *pmu = acb->pmuE;
1969 		u32 ccb_post_stamp, arc_cdb_size;
1970 
1971 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1972 		ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1973 		writel(0, &pmu->inbound_queueport_high);
1974 		writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1975 		break;
1976 		}
1977 	case ACB_ADAPTER_TYPE_F: {
1978 		struct MessageUnit_F __iomem *pmu = acb->pmuF;
1979 		u32 ccb_post_stamp, arc_cdb_size;
1980 
1981 		if (ccb->arc_cdb_size <= 0x300)
1982 			arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1983 		else {
1984 			arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1985 			if (arc_cdb_size > 0xF)
1986 				arc_cdb_size = 0xF;
1987 			arc_cdb_size = (arc_cdb_size << 1) | 1;
1988 		}
1989 		ccb_post_stamp = (ccb->smid | arc_cdb_size);
1990 		writel(0, &pmu->inbound_queueport_high);
1991 		writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1992 		break;
1993 		}
1994 	}
1995 }
1996 
1997 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1998 {
1999 	struct MessageUnit_A __iomem *reg = acb->pmuA;
2000 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2001 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
2002 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2003 		printk(KERN_NOTICE
2004 			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2005 			, acb->host->host_no);
2006 	}
2007 }
2008 
2009 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
2010 {
2011 	struct MessageUnit_B *reg = acb->pmuB;
2012 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2013 	writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
2014 
2015 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2016 		printk(KERN_NOTICE
2017 			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2018 			, acb->host->host_no);
2019 	}
2020 }
2021 
2022 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
2023 {
2024 	struct MessageUnit_C __iomem *reg = pACB->pmuC;
2025 	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2026 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
2027 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2028 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
2029 		printk(KERN_NOTICE
2030 			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2031 			, pACB->host->host_no);
2032 	}
2033 	return;
2034 }
2035 
2036 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
2037 {
2038 	struct MessageUnit_D *reg = pACB->pmuD;
2039 
2040 	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2041 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
2042 	if (!arcmsr_hbaD_wait_msgint_ready(pACB))
2043 		pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2044 			"timeout\n", pACB->host->host_no);
2045 }
2046 
2047 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
2048 {
2049 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
2050 
2051 	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2052 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
2053 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2054 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
2055 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
2056 		pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2057 			"timeout\n", pACB->host->host_no);
2058 	}
2059 }
2060 
2061 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2062 {
2063 	switch (acb->adapter_type) {
2064 	case ACB_ADAPTER_TYPE_A:
2065 		arcmsr_hbaA_stop_bgrb(acb);
2066 		break;
2067 	case ACB_ADAPTER_TYPE_B:
2068 		arcmsr_hbaB_stop_bgrb(acb);
2069 		break;
2070 	case ACB_ADAPTER_TYPE_C:
2071 		arcmsr_hbaC_stop_bgrb(acb);
2072 		break;
2073 	case ACB_ADAPTER_TYPE_D:
2074 		arcmsr_hbaD_stop_bgrb(acb);
2075 		break;
2076 	case ACB_ADAPTER_TYPE_E:
2077 	case ACB_ADAPTER_TYPE_F:
2078 		arcmsr_hbaE_stop_bgrb(acb);
2079 		break;
2080 	}
2081 }
2082 
2083 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2084 {
2085 	if (acb->xor_mega) {
2086 		struct Xor_sg *pXorPhys;
2087 		void **pXorVirt;
2088 		int i;
2089 
2090 		pXorPhys = (struct Xor_sg *)(acb->xorVirt +
2091 			sizeof(struct HostRamBuf));
2092 		pXorVirt = (void **)((unsigned long)acb->xorVirt +
2093 			(unsigned long)acb->xorVirtOffset);
2094 		for (i = 0; i < acb->xor_mega; i++) {
2095 			if (pXorPhys->xorPhys) {
2096 				dma_free_coherent(&acb->pdev->dev,
2097 					ARCMSR_XOR_SEG_SIZE,
2098 					*pXorVirt, pXorPhys->xorPhys);
2099 				pXorPhys->xorPhys = 0;
2100 				*pXorVirt = NULL;
2101 			}
2102 			pXorPhys++;
2103 			pXorVirt++;
2104 		}
2105 		dma_free_coherent(&acb->pdev->dev, acb->init2cfg_size,
2106 			acb->xorVirt, acb->xorPhys);
2107 	}
2108 	dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2109 }
2110 
2111 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2112 {
2113 	switch (acb->adapter_type) {
2114 	case ACB_ADAPTER_TYPE_A: {
2115 		struct MessageUnit_A __iomem *reg = acb->pmuA;
2116 		writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2117 		}
2118 		break;
2119 	case ACB_ADAPTER_TYPE_B: {
2120 		struct MessageUnit_B *reg = acb->pmuB;
2121 		writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2122 		}
2123 		break;
2124 	case ACB_ADAPTER_TYPE_C: {
2125 		struct MessageUnit_C __iomem *reg = acb->pmuC;
2126 
2127 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
2128 		}
2129 		break;
2130 	case ACB_ADAPTER_TYPE_D: {
2131 		struct MessageUnit_D *reg = acb->pmuD;
2132 		writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
2133 			reg->inbound_doorbell);
2134 		}
2135 		break;
2136 	case ACB_ADAPTER_TYPE_E:
2137 	case ACB_ADAPTER_TYPE_F: {
2138 		struct MessageUnit_E __iomem *reg = acb->pmuE;
2139 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2140 		writel(acb->out_doorbell, &reg->iobound_doorbell);
2141 		}
2142 		break;
2143 	}
2144 }
2145 
2146 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2147 {
2148 	switch (acb->adapter_type) {
2149 	case ACB_ADAPTER_TYPE_A: {
2150 		struct MessageUnit_A __iomem *reg = acb->pmuA;
2151 		/*
2152 		** push inbound doorbell tell iop, driver data write ok
2153 		** and wait reply on next hwinterrupt for next Qbuffer post
2154 		*/
2155 		writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
2156 		}
2157 		break;
2158 
2159 	case ACB_ADAPTER_TYPE_B: {
2160 		struct MessageUnit_B *reg = acb->pmuB;
2161 		/*
2162 		** push inbound doorbell tell iop, driver data write ok
2163 		** and wait reply on next hwinterrupt for next Qbuffer post
2164 		*/
2165 		writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2166 		}
2167 		break;
2168 	case ACB_ADAPTER_TYPE_C: {
2169 		struct MessageUnit_C __iomem *reg = acb->pmuC;
2170 		/*
2171 		** push inbound doorbell tell iop, driver data write ok
2172 		** and wait reply on next hwinterrupt for next Qbuffer post
2173 		*/
2174 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
2175 		}
2176 		break;
2177 	case ACB_ADAPTER_TYPE_D: {
2178 		struct MessageUnit_D *reg = acb->pmuD;
2179 		writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2180 			reg->inbound_doorbell);
2181 		}
2182 		break;
2183 	case ACB_ADAPTER_TYPE_E:
2184 	case ACB_ADAPTER_TYPE_F: {
2185 		struct MessageUnit_E __iomem *reg = acb->pmuE;
2186 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2187 		writel(acb->out_doorbell, &reg->iobound_doorbell);
2188 		}
2189 		break;
2190 	}
2191 }
2192 
2193 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2194 {
2195 	struct QBUFFER __iomem *qbuffer = NULL;
2196 	switch (acb->adapter_type) {
2197 
2198 	case ACB_ADAPTER_TYPE_A: {
2199 		struct MessageUnit_A __iomem *reg = acb->pmuA;
2200 		qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
2201 		}
2202 		break;
2203 	case ACB_ADAPTER_TYPE_B: {
2204 		struct MessageUnit_B *reg = acb->pmuB;
2205 		qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2206 		}
2207 		break;
2208 	case ACB_ADAPTER_TYPE_C: {
2209 		struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2210 		qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2211 		}
2212 		break;
2213 	case ACB_ADAPTER_TYPE_D: {
2214 		struct MessageUnit_D *reg = acb->pmuD;
2215 		qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2216 		}
2217 		break;
2218 	case ACB_ADAPTER_TYPE_E: {
2219 		struct MessageUnit_E __iomem *reg = acb->pmuE;
2220 		qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
2221 		}
2222 		break;
2223 	case ACB_ADAPTER_TYPE_F: {
2224 		qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2225 		}
2226 		break;
2227 	}
2228 	return qbuffer;
2229 }
2230 
2231 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2232 {
2233 	struct QBUFFER __iomem *pqbuffer = NULL;
2234 	switch (acb->adapter_type) {
2235 
2236 	case ACB_ADAPTER_TYPE_A: {
2237 		struct MessageUnit_A __iomem *reg = acb->pmuA;
2238 		pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
2239 		}
2240 		break;
2241 	case ACB_ADAPTER_TYPE_B: {
2242 		struct MessageUnit_B  *reg = acb->pmuB;
2243 		pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2244 		}
2245 		break;
2246 	case ACB_ADAPTER_TYPE_C: {
2247 		struct MessageUnit_C __iomem *reg = acb->pmuC;
2248 		pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
2249 		}
2250 		break;
2251 	case ACB_ADAPTER_TYPE_D: {
2252 		struct MessageUnit_D *reg = acb->pmuD;
2253 		pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2254 		}
2255 		break;
2256 	case ACB_ADAPTER_TYPE_E: {
2257 		struct MessageUnit_E __iomem *reg = acb->pmuE;
2258 		pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
2259 		}
2260 		break;
2261 	case ACB_ADAPTER_TYPE_F:
2262 		pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2263 		break;
2264 	}
2265 	return pqbuffer;
2266 }
2267 
2268 static uint32_t
2269 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2270 		struct QBUFFER __iomem *prbuffer)
2271 {
2272 	uint8_t *pQbuffer;
2273 	uint8_t *buf1 = NULL;
2274 	uint32_t __iomem *iop_data;
2275 	uint32_t iop_len, data_len, *buf2 = NULL;
2276 
2277 	iop_data = (uint32_t __iomem *)prbuffer->data;
2278 	iop_len = readl(&prbuffer->data_len);
2279 	if (iop_len > 0) {
2280 		buf1 = kmalloc(128, GFP_ATOMIC);
2281 		buf2 = (uint32_t *)buf1;
2282 		if (buf1 == NULL)
2283 			return 0;
2284 		data_len = iop_len;
2285 		while (data_len >= 4) {
2286 			*buf2++ = readl(iop_data);
2287 			iop_data++;
2288 			data_len -= 4;
2289 		}
2290 		if (data_len)
2291 			*buf2 = readl(iop_data);
2292 		buf2 = (uint32_t *)buf1;
2293 	}
2294 	while (iop_len > 0) {
2295 		pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2296 		*pQbuffer = *buf1;
2297 		acb->rqbuf_putIndex++;
2298 		/* if last, index number set it to 0 */
2299 		acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2300 		buf1++;
2301 		iop_len--;
2302 	}
2303 	kfree(buf2);
2304 	/* let IOP know data has been read */
2305 	arcmsr_iop_message_read(acb);
2306 	return 1;
2307 }
2308 
2309 uint32_t
2310 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2311 	struct QBUFFER __iomem *prbuffer) {
2312 
2313 	uint8_t *pQbuffer;
2314 	uint8_t __iomem *iop_data;
2315 	uint32_t iop_len;
2316 
2317 	if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2318 		return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2319 	iop_data = (uint8_t __iomem *)prbuffer->data;
2320 	iop_len = readl(&prbuffer->data_len);
2321 	while (iop_len > 0) {
2322 		pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2323 		*pQbuffer = readb(iop_data);
2324 		acb->rqbuf_putIndex++;
2325 		acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2326 		iop_data++;
2327 		iop_len--;
2328 	}
2329 	arcmsr_iop_message_read(acb);
2330 	return 1;
2331 }
2332 
2333 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2334 {
2335 	unsigned long flags;
2336 	struct QBUFFER __iomem  *prbuffer;
2337 	int32_t buf_empty_len;
2338 
2339 	spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2340 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
2341 	if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) {
2342 		buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) -
2343 		(acb->rqbuf_putIndex - acb->rqbuf_getIndex);
2344 	} else
2345 		buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1;
2346 	if (buf_empty_len >= readl(&prbuffer->data_len)) {
2347 		if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2348 			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2349 	} else
2350 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2351 	spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2352 }
2353 
2354 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2355 {
2356 	uint8_t *pQbuffer;
2357 	struct QBUFFER __iomem *pwbuffer;
2358 	uint8_t *buf1 = NULL;
2359 	uint32_t __iomem *iop_data;
2360 	uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2361 
2362 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2363 		buf1 = kmalloc(128, GFP_ATOMIC);
2364 		buf2 = (uint32_t *)buf1;
2365 		if (buf1 == NULL)
2366 			return;
2367 
2368 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2369 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2370 		iop_data = (uint32_t __iomem *)pwbuffer->data;
2371 		while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2372 			&& (allxfer_len < 124)) {
2373 			pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2374 			*buf1 = *pQbuffer;
2375 			acb->wqbuf_getIndex++;
2376 			acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2377 			buf1++;
2378 			allxfer_len++;
2379 		}
2380 		data_len = allxfer_len;
2381 		buf1 = (uint8_t *)buf2;
2382 		while (data_len >= 4) {
2383 			data = *buf2++;
2384 			writel(data, iop_data);
2385 			iop_data++;
2386 			data_len -= 4;
2387 		}
2388 		if (data_len) {
2389 			data = *buf2;
2390 			writel(data, iop_data);
2391 		}
2392 		writel(allxfer_len, &pwbuffer->data_len);
2393 		kfree(buf1);
2394 		arcmsr_iop_message_wrote(acb);
2395 	}
2396 }
2397 
2398 void
2399 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2400 {
2401 	uint8_t *pQbuffer;
2402 	struct QBUFFER __iomem *pwbuffer;
2403 	uint8_t __iomem *iop_data;
2404 	int32_t allxfer_len = 0;
2405 
2406 	if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2407 		arcmsr_write_ioctldata2iop_in_DWORD(acb);
2408 		return;
2409 	}
2410 	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2411 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2412 		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2413 		iop_data = (uint8_t __iomem *)pwbuffer->data;
2414 		while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2415 			&& (allxfer_len < 124)) {
2416 			pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2417 			writeb(*pQbuffer, iop_data);
2418 			acb->wqbuf_getIndex++;
2419 			acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2420 			iop_data++;
2421 			allxfer_len++;
2422 		}
2423 		writel(allxfer_len, &pwbuffer->data_len);
2424 		arcmsr_iop_message_wrote(acb);
2425 	}
2426 }
2427 
2428 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2429 {
2430 	unsigned long flags;
2431 
2432 	spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2433 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2434 	if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2435 		arcmsr_write_ioctldata2iop(acb);
2436 	if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2437 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2438 	spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2439 }
2440 
2441 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2442 {
2443 	uint32_t outbound_doorbell;
2444 	struct MessageUnit_A __iomem *reg = acb->pmuA;
2445 	outbound_doorbell = readl(&reg->outbound_doorbell);
2446 	do {
2447 		writel(outbound_doorbell, &reg->outbound_doorbell);
2448 		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2449 			arcmsr_iop2drv_data_wrote_handle(acb);
2450 		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2451 			arcmsr_iop2drv_data_read_handle(acb);
2452 		outbound_doorbell = readl(&reg->outbound_doorbell);
2453 	} while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2454 		| ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2455 }
2456 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2457 {
2458 	uint32_t outbound_doorbell;
2459 	struct MessageUnit_C __iomem *reg = pACB->pmuC;
2460 	/*
2461 	*******************************************************************
2462 	**  Maybe here we need to check wrqbuffer_lock is lock or not
2463 	**  DOORBELL: din! don!
2464 	**  check if there are any mail need to pack from firmware
2465 	*******************************************************************
2466 	*/
2467 	outbound_doorbell = readl(&reg->outbound_doorbell);
2468 	do {
2469 		writel(outbound_doorbell, &reg->outbound_doorbell_clear);
2470 		readl(&reg->outbound_doorbell_clear);
2471 		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2472 			arcmsr_iop2drv_data_wrote_handle(pACB);
2473 		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2474 			arcmsr_iop2drv_data_read_handle(pACB);
2475 		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2476 			arcmsr_hbaC_message_isr(pACB);
2477 		outbound_doorbell = readl(&reg->outbound_doorbell);
2478 	} while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2479 		| ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2480 		| ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2481 }
2482 
2483 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2484 {
2485 	uint32_t outbound_doorbell;
2486 	struct MessageUnit_D  *pmu = pACB->pmuD;
2487 
2488 	outbound_doorbell = readl(pmu->outbound_doorbell);
2489 	do {
2490 		writel(outbound_doorbell, pmu->outbound_doorbell);
2491 		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2492 			arcmsr_hbaD_message_isr(pACB);
2493 		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2494 			arcmsr_iop2drv_data_wrote_handle(pACB);
2495 		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2496 			arcmsr_iop2drv_data_read_handle(pACB);
2497 		outbound_doorbell = readl(pmu->outbound_doorbell);
2498 	} while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2499 		| ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2500 		| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2501 }
2502 
2503 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2504 {
2505 	uint32_t outbound_doorbell, in_doorbell, tmp, i;
2506 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
2507 
2508 	if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2509 		for (i = 0; i < 5; i++) {
2510 			in_doorbell = readl(&reg->iobound_doorbell);
2511 			if (in_doorbell != 0)
2512 				break;
2513 		}
2514 	} else
2515 		in_doorbell = readl(&reg->iobound_doorbell);
2516 	outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2517 	do {
2518 		writel(0, &reg->host_int_status); /* clear interrupt */
2519 		if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2520 			arcmsr_iop2drv_data_wrote_handle(pACB);
2521 		}
2522 		if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2523 			arcmsr_iop2drv_data_read_handle(pACB);
2524 		}
2525 		if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2526 			arcmsr_hbaE_message_isr(pACB);
2527 		}
2528 		tmp = in_doorbell;
2529 		in_doorbell = readl(&reg->iobound_doorbell);
2530 		outbound_doorbell = tmp ^ in_doorbell;
2531 	} while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2532 		| ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2533 		| ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2534 	pACB->in_doorbell = in_doorbell;
2535 }
2536 
2537 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2538 {
2539 	uint32_t flag_ccb;
2540 	struct MessageUnit_A __iomem *reg = acb->pmuA;
2541 	struct ARCMSR_CDB *pARCMSR_CDB;
2542 	struct CommandControlBlock *pCCB;
2543 	bool error;
2544 	unsigned long cdb_phy_addr;
2545 
2546 	while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
2547 		cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2548 		if (acb->cdb_phyadd_hipart)
2549 			cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2550 		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2551 		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2552 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2553 		arcmsr_drain_donequeue(acb, pCCB, error);
2554 	}
2555 }
2556 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2557 {
2558 	uint32_t index;
2559 	uint32_t flag_ccb;
2560 	struct MessageUnit_B *reg = acb->pmuB;
2561 	struct ARCMSR_CDB *pARCMSR_CDB;
2562 	struct CommandControlBlock *pCCB;
2563 	bool error;
2564 	unsigned long cdb_phy_addr;
2565 
2566 	index = reg->doneq_index;
2567 	while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2568 		cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2569 		if (acb->cdb_phyadd_hipart)
2570 			cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2571 		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2572 		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2573 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2574 		arcmsr_drain_donequeue(acb, pCCB, error);
2575 		reg->done_qbuffer[index] = 0;
2576 		index++;
2577 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
2578 		reg->doneq_index = index;
2579 	}
2580 }
2581 
2582 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2583 {
2584 	struct MessageUnit_C __iomem *phbcmu;
2585 	struct ARCMSR_CDB *arcmsr_cdb;
2586 	struct CommandControlBlock *ccb;
2587 	uint32_t flag_ccb, throttling = 0;
2588 	unsigned long ccb_cdb_phy;
2589 	int error;
2590 
2591 	phbcmu = acb->pmuC;
2592 	/* areca cdb command done */
2593 	/* Use correct offset and size for syncing */
2594 
2595 	while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2596 			0xFFFFFFFF) {
2597 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2598 		if (acb->cdb_phyadd_hipart)
2599 			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2600 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2601 			+ ccb_cdb_phy);
2602 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2603 			arcmsr_cdb);
2604 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2605 			? true : false;
2606 		/* check if command done with no error */
2607 		arcmsr_drain_donequeue(acb, ccb, error);
2608 		throttling++;
2609 		if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2610 			writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2611 				&phbcmu->inbound_doorbell);
2612 			throttling = 0;
2613 		}
2614 	}
2615 }
2616 
2617 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2618 {
2619 	u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2620 	uint32_t addressLow;
2621 	int error;
2622 	struct MessageUnit_D  *pmu;
2623 	struct ARCMSR_CDB *arcmsr_cdb;
2624 	struct CommandControlBlock *ccb;
2625 	unsigned long flags, ccb_cdb_phy;
2626 
2627 	spin_lock_irqsave(&acb->doneq_lock, flags);
2628 	pmu = acb->pmuD;
2629 	outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2630 	doneq_index = pmu->doneq_index;
2631 	if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2632 		do {
2633 			toggle = doneq_index & 0x4000;
2634 			index_stripped = (doneq_index & 0xFFF) + 1;
2635 			index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2636 			pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2637 				((toggle ^ 0x4000) + 1);
2638 			doneq_index = pmu->doneq_index;
2639 			addressLow = pmu->done_qbuffer[doneq_index &
2640 				0xFFF].addressLow;
2641 			ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2642 			if (acb->cdb_phyadd_hipart)
2643 				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2644 			arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2645 				+ ccb_cdb_phy);
2646 			ccb = container_of(arcmsr_cdb,
2647 				struct CommandControlBlock, arcmsr_cdb);
2648 			error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2649 				? true : false;
2650 			arcmsr_drain_donequeue(acb, ccb, error);
2651 			writel(doneq_index, pmu->outboundlist_read_pointer);
2652 		} while ((doneq_index & 0xFFF) !=
2653 			(outbound_write_pointer & 0xFFF));
2654 	}
2655 	writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2656 		pmu->outboundlist_interrupt_cause);
2657 	readl(pmu->outboundlist_interrupt_cause);
2658 	spin_unlock_irqrestore(&acb->doneq_lock, flags);
2659 }
2660 
2661 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2662 {
2663 	uint32_t doneq_index;
2664 	uint16_t cmdSMID;
2665 	int error;
2666 	struct MessageUnit_E __iomem *pmu;
2667 	struct CommandControlBlock *ccb;
2668 	unsigned long flags;
2669 
2670 	spin_lock_irqsave(&acb->doneq_lock, flags);
2671 	doneq_index = acb->doneq_index;
2672 	pmu = acb->pmuE;
2673 	while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2674 		cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2675 		ccb = acb->pccb_pool[cmdSMID];
2676 		error = (acb->pCompletionQ[doneq_index].cmdFlag
2677 			& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2678 		arcmsr_drain_donequeue(acb, ccb, error);
2679 		doneq_index++;
2680 		if (doneq_index >= acb->completionQ_entry)
2681 			doneq_index = 0;
2682 	}
2683 	acb->doneq_index = doneq_index;
2684 	writel(doneq_index, &pmu->reply_post_consumer_index);
2685 	spin_unlock_irqrestore(&acb->doneq_lock, flags);
2686 }
2687 
2688 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2689 {
2690 	uint32_t doneq_index;
2691 	uint16_t cmdSMID;
2692 	int error;
2693 	struct MessageUnit_F __iomem *phbcmu;
2694 	struct CommandControlBlock *ccb;
2695 	unsigned long flags;
2696 
2697 	spin_lock_irqsave(&acb->doneq_lock, flags);
2698 	doneq_index = acb->doneq_index;
2699 	phbcmu = acb->pmuF;
2700 	while (1) {
2701 		cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2702 		if (cmdSMID == 0xffff)
2703 			break;
2704 		ccb = acb->pccb_pool[cmdSMID];
2705 		error = (acb->pCompletionQ[doneq_index].cmdFlag &
2706 			ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2707 		arcmsr_drain_donequeue(acb, ccb, error);
2708 		acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2709 		doneq_index++;
2710 		if (doneq_index >= acb->completionQ_entry)
2711 			doneq_index = 0;
2712 	}
2713 	acb->doneq_index = doneq_index;
2714 	writel(doneq_index, &phbcmu->reply_post_consumer_index);
2715 	spin_unlock_irqrestore(&acb->doneq_lock, flags);
2716 }
2717 
2718 /*
2719 **********************************************************************************
2720 ** Handle a message interrupt
2721 **
2722 ** The only message interrupt we expect is in response to a query for the current adapter config.
2723 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2724 **********************************************************************************
2725 */
2726 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2727 {
2728 	struct MessageUnit_A __iomem *reg  = acb->pmuA;
2729 	/*clear interrupt and message state*/
2730 	writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
2731 	if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2732 		schedule_work(&acb->arcmsr_do_message_isr_bh);
2733 }
2734 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2735 {
2736 	struct MessageUnit_B *reg  = acb->pmuB;
2737 
2738 	/*clear interrupt and message state*/
2739 	writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2740 	if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2741 		schedule_work(&acb->arcmsr_do_message_isr_bh);
2742 }
2743 /*
2744 **********************************************************************************
2745 ** Handle a message interrupt
2746 **
2747 ** The only message interrupt we expect is in response to a query for the
2748 ** current adapter config.
2749 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2750 **********************************************************************************
2751 */
2752 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2753 {
2754 	struct MessageUnit_C __iomem *reg  = acb->pmuC;
2755 	/*clear interrupt and message state*/
2756 	writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
2757 	if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2758 		schedule_work(&acb->arcmsr_do_message_isr_bh);
2759 }
2760 
2761 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2762 {
2763 	struct MessageUnit_D *reg  = acb->pmuD;
2764 
2765 	writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2766 	readl(reg->outbound_doorbell);
2767 	if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2768 		schedule_work(&acb->arcmsr_do_message_isr_bh);
2769 }
2770 
2771 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2772 {
2773 	struct MessageUnit_E __iomem *reg  = acb->pmuE;
2774 
2775 	writel(0, &reg->host_int_status);
2776 	if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2777 		schedule_work(&acb->arcmsr_do_message_isr_bh);
2778 }
2779 
2780 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2781 {
2782 	uint32_t outbound_intstatus;
2783 	struct MessageUnit_A __iomem *reg = acb->pmuA;
2784 	outbound_intstatus = readl(&reg->outbound_intstatus) &
2785 		acb->outbound_int_enable;
2786 	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2787 		return IRQ_NONE;
2788 	do {
2789 		writel(outbound_intstatus, &reg->outbound_intstatus);
2790 		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2791 			arcmsr_hbaA_doorbell_isr(acb);
2792 		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2793 			arcmsr_hbaA_postqueue_isr(acb);
2794 		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2795 			arcmsr_hbaA_message_isr(acb);
2796 		outbound_intstatus = readl(&reg->outbound_intstatus) &
2797 			acb->outbound_int_enable;
2798 	} while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2799 		| ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2800 		| ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2801 	return IRQ_HANDLED;
2802 }
2803 
2804 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2805 {
2806 	uint32_t outbound_doorbell;
2807 	struct MessageUnit_B *reg = acb->pmuB;
2808 	outbound_doorbell = readl(reg->iop2drv_doorbell) &
2809 				acb->outbound_int_enable;
2810 	if (!outbound_doorbell)
2811 		return IRQ_NONE;
2812 	do {
2813 		writel(~outbound_doorbell, reg->iop2drv_doorbell);
2814 		writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2815 		if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2816 			arcmsr_iop2drv_data_wrote_handle(acb);
2817 		if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2818 			arcmsr_iop2drv_data_read_handle(acb);
2819 		if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2820 			arcmsr_hbaB_postqueue_isr(acb);
2821 		if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2822 			arcmsr_hbaB_message_isr(acb);
2823 		outbound_doorbell = readl(reg->iop2drv_doorbell) &
2824 			acb->outbound_int_enable;
2825 	} while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2826 		| ARCMSR_IOP2DRV_DATA_READ_OK
2827 		| ARCMSR_IOP2DRV_CDB_DONE
2828 		| ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2829 	return IRQ_HANDLED;
2830 }
2831 
2832 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2833 {
2834 	uint32_t host_interrupt_status;
2835 	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2836 	/*
2837 	*********************************************
2838 	**   check outbound intstatus
2839 	*********************************************
2840 	*/
2841 	host_interrupt_status = readl(&phbcmu->host_int_status) &
2842 		(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2843 		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2844 	if (!host_interrupt_status)
2845 		return IRQ_NONE;
2846 	do {
2847 		if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2848 			arcmsr_hbaC_doorbell_isr(pACB);
2849 		/* MU post queue interrupts*/
2850 		if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2851 			arcmsr_hbaC_postqueue_isr(pACB);
2852 		host_interrupt_status = readl(&phbcmu->host_int_status);
2853 	} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2854 		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2855 	return IRQ_HANDLED;
2856 }
2857 
2858 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2859 {
2860 	u32 host_interrupt_status;
2861 	struct MessageUnit_D  *pmu = pACB->pmuD;
2862 
2863 	host_interrupt_status = readl(pmu->host_int_status) &
2864 		(ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2865 		ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2866 	if (!host_interrupt_status)
2867 		return IRQ_NONE;
2868 	do {
2869 		/* MU post queue interrupts*/
2870 		if (host_interrupt_status &
2871 			ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2872 			arcmsr_hbaD_postqueue_isr(pACB);
2873 		if (host_interrupt_status &
2874 			ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2875 			arcmsr_hbaD_doorbell_isr(pACB);
2876 		host_interrupt_status = readl(pmu->host_int_status);
2877 	} while (host_interrupt_status &
2878 		(ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2879 		ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2880 	return IRQ_HANDLED;
2881 }
2882 
2883 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2884 {
2885 	uint32_t host_interrupt_status;
2886 	struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2887 
2888 	host_interrupt_status = readl(&pmu->host_int_status) &
2889 		(ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2890 		ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2891 	if (!host_interrupt_status)
2892 		return IRQ_NONE;
2893 	do {
2894 		/* MU ioctl transfer doorbell interrupts*/
2895 		if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2896 			arcmsr_hbaE_doorbell_isr(pACB);
2897 		}
2898 		/* MU post queue interrupts*/
2899 		if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2900 			arcmsr_hbaE_postqueue_isr(pACB);
2901 		}
2902 		host_interrupt_status = readl(&pmu->host_int_status);
2903 	} while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2904 		ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2905 	return IRQ_HANDLED;
2906 }
2907 
2908 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2909 {
2910 	uint32_t host_interrupt_status;
2911 	struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2912 
2913 	host_interrupt_status = readl(&phbcmu->host_int_status) &
2914 		(ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2915 		ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2916 	if (!host_interrupt_status)
2917 		return IRQ_NONE;
2918 	do {
2919 		/* MU post queue interrupts*/
2920 		if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2921 			arcmsr_hbaF_postqueue_isr(pACB);
2922 
2923 		/* MU ioctl transfer doorbell interrupts*/
2924 		if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2925 			arcmsr_hbaE_doorbell_isr(pACB);
2926 
2927 		host_interrupt_status = readl(&phbcmu->host_int_status);
2928 	} while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2929 		ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2930 	return IRQ_HANDLED;
2931 }
2932 
2933 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2934 {
2935 	switch (acb->adapter_type) {
2936 	case ACB_ADAPTER_TYPE_A:
2937 		return arcmsr_hbaA_handle_isr(acb);
2938 	case ACB_ADAPTER_TYPE_B:
2939 		return arcmsr_hbaB_handle_isr(acb);
2940 	case ACB_ADAPTER_TYPE_C:
2941 		return arcmsr_hbaC_handle_isr(acb);
2942 	case ACB_ADAPTER_TYPE_D:
2943 		return arcmsr_hbaD_handle_isr(acb);
2944 	case ACB_ADAPTER_TYPE_E:
2945 		return arcmsr_hbaE_handle_isr(acb);
2946 	case ACB_ADAPTER_TYPE_F:
2947 		return arcmsr_hbaF_handle_isr(acb);
2948 	default:
2949 		return IRQ_NONE;
2950 	}
2951 }
2952 
2953 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2954 {
2955 	if (acb) {
2956 		/* stop adapter background rebuild */
2957 		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2958 			uint32_t intmask_org;
2959 			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2960 			intmask_org = arcmsr_disable_outbound_ints(acb);
2961 			arcmsr_stop_adapter_bgrb(acb);
2962 			arcmsr_flush_adapter_cache(acb);
2963 			arcmsr_enable_outbound_ints(acb, intmask_org);
2964 		}
2965 	}
2966 }
2967 
2968 
2969 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2970 {
2971 	uint32_t	i;
2972 
2973 	if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2974 		for (i = 0; i < 15; i++) {
2975 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2976 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2977 				acb->rqbuf_getIndex = 0;
2978 				acb->rqbuf_putIndex = 0;
2979 				arcmsr_iop_message_read(acb);
2980 				mdelay(30);
2981 			} else if (acb->rqbuf_getIndex !=
2982 				   acb->rqbuf_putIndex) {
2983 				acb->rqbuf_getIndex = 0;
2984 				acb->rqbuf_putIndex = 0;
2985 				mdelay(30);
2986 			} else
2987 				break;
2988 		}
2989 	}
2990 }
2991 
2992 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2993 		struct scsi_cmnd *cmd)
2994 {
2995 	char *buffer;
2996 	unsigned short use_sg;
2997 	int retvalue = 0, transfer_len = 0;
2998 	unsigned long flags;
2999 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
3000 	uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
3001 		(uint32_t)cmd->cmnd[6] << 16 |
3002 		(uint32_t)cmd->cmnd[7] << 8 |
3003 		(uint32_t)cmd->cmnd[8];
3004 	struct scatterlist *sg;
3005 
3006 	use_sg = scsi_sg_count(cmd);
3007 	sg = scsi_sglist(cmd);
3008 	buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3009 	if (use_sg > 1) {
3010 		retvalue = ARCMSR_MESSAGE_FAIL;
3011 		goto message_out;
3012 	}
3013 	transfer_len += sg->length;
3014 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
3015 		retvalue = ARCMSR_MESSAGE_FAIL;
3016 		pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
3017 		goto message_out;
3018 	}
3019 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
3020 	switch (controlcode) {
3021 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
3022 		unsigned char *ver_addr;
3023 		uint8_t *ptmpQbuffer;
3024 		uint32_t allxfer_len = 0;
3025 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3026 		if (!ver_addr) {
3027 			retvalue = ARCMSR_MESSAGE_FAIL;
3028 			pr_info("%s: memory not enough!\n", __func__);
3029 			goto message_out;
3030 		}
3031 		ptmpQbuffer = ver_addr;
3032 		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3033 		if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
3034 			unsigned int tail = acb->rqbuf_getIndex;
3035 			unsigned int head = acb->rqbuf_putIndex;
3036 			unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
3037 
3038 			allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
3039 			if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
3040 				allxfer_len = ARCMSR_API_DATA_BUFLEN;
3041 
3042 			if (allxfer_len <= cnt_to_end)
3043 				memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
3044 			else {
3045 				memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
3046 				memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
3047 			}
3048 			acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
3049 		}
3050 		memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
3051 			allxfer_len);
3052 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3053 			struct QBUFFER __iomem *prbuffer;
3054 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3055 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
3056 			if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
3057 				acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
3058 		}
3059 		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3060 		kfree(ver_addr);
3061 		pcmdmessagefld->cmdmessage.Length = allxfer_len;
3062 		if (acb->fw_flag == FW_DEADLOCK)
3063 			pcmdmessagefld->cmdmessage.ReturnCode =
3064 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3065 		else
3066 			pcmdmessagefld->cmdmessage.ReturnCode =
3067 				ARCMSR_MESSAGE_RETURNCODE_OK;
3068 		break;
3069 	}
3070 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
3071 		unsigned char *ver_addr;
3072 		uint32_t user_len;
3073 		int32_t cnt2end;
3074 		uint8_t *pQbuffer, *ptmpuserbuffer;
3075 
3076 		user_len = pcmdmessagefld->cmdmessage.Length;
3077 		if (user_len > ARCMSR_API_DATA_BUFLEN) {
3078 			retvalue = ARCMSR_MESSAGE_FAIL;
3079 			goto message_out;
3080 		}
3081 
3082 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3083 		if (!ver_addr) {
3084 			retvalue = ARCMSR_MESSAGE_FAIL;
3085 			goto message_out;
3086 		}
3087 		ptmpuserbuffer = ver_addr;
3088 
3089 		memcpy(ptmpuserbuffer,
3090 			pcmdmessagefld->messagedatabuffer, user_len);
3091 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3092 		if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3093 			struct SENSE_DATA *sensebuffer =
3094 				(struct SENSE_DATA *)cmd->sense_buffer;
3095 			arcmsr_write_ioctldata2iop(acb);
3096 			/* has error report sensedata */
3097 			sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
3098 			sensebuffer->SenseKey = ILLEGAL_REQUEST;
3099 			sensebuffer->AdditionalSenseLength = 0x0A;
3100 			sensebuffer->AdditionalSenseCode = 0x20;
3101 			sensebuffer->Valid = 1;
3102 			retvalue = ARCMSR_MESSAGE_FAIL;
3103 		} else {
3104 			pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3105 			cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3106 			if (user_len > cnt2end) {
3107 				memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
3108 				ptmpuserbuffer += cnt2end;
3109 				user_len -= cnt2end;
3110 				acb->wqbuf_putIndex = 0;
3111 				pQbuffer = acb->wqbuffer;
3112 			}
3113 			memcpy(pQbuffer, ptmpuserbuffer, user_len);
3114 			acb->wqbuf_putIndex += user_len;
3115 			acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3116 			if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3117 				acb->acb_flags &=
3118 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3119 				arcmsr_write_ioctldata2iop(acb);
3120 			}
3121 		}
3122 		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3123 		kfree(ver_addr);
3124 		if (acb->fw_flag == FW_DEADLOCK)
3125 			pcmdmessagefld->cmdmessage.ReturnCode =
3126 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3127 		else
3128 			pcmdmessagefld->cmdmessage.ReturnCode =
3129 				ARCMSR_MESSAGE_RETURNCODE_OK;
3130 		break;
3131 	}
3132 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
3133 		uint8_t *pQbuffer = acb->rqbuffer;
3134 
3135 		arcmsr_clear_iop2drv_rqueue_buffer(acb);
3136 		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3137 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3138 		acb->rqbuf_getIndex = 0;
3139 		acb->rqbuf_putIndex = 0;
3140 		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3141 		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3142 		if (acb->fw_flag == FW_DEADLOCK)
3143 			pcmdmessagefld->cmdmessage.ReturnCode =
3144 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3145 		else
3146 			pcmdmessagefld->cmdmessage.ReturnCode =
3147 				ARCMSR_MESSAGE_RETURNCODE_OK;
3148 		break;
3149 	}
3150 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
3151 		uint8_t *pQbuffer = acb->wqbuffer;
3152 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3153 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3154 			ACB_F_MESSAGE_WQBUFFER_READED);
3155 		acb->wqbuf_getIndex = 0;
3156 		acb->wqbuf_putIndex = 0;
3157 		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3158 		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3159 		if (acb->fw_flag == FW_DEADLOCK)
3160 			pcmdmessagefld->cmdmessage.ReturnCode =
3161 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3162 		else
3163 			pcmdmessagefld->cmdmessage.ReturnCode =
3164 				ARCMSR_MESSAGE_RETURNCODE_OK;
3165 		break;
3166 	}
3167 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
3168 		uint8_t *pQbuffer;
3169 		arcmsr_clear_iop2drv_rqueue_buffer(acb);
3170 		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3171 		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3172 		acb->rqbuf_getIndex = 0;
3173 		acb->rqbuf_putIndex = 0;
3174 		pQbuffer = acb->rqbuffer;
3175 		memset(pQbuffer, 0, sizeof(struct QBUFFER));
3176 		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3177 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3178 		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3179 			ACB_F_MESSAGE_WQBUFFER_READED);
3180 		acb->wqbuf_getIndex = 0;
3181 		acb->wqbuf_putIndex = 0;
3182 		pQbuffer = acb->wqbuffer;
3183 		memset(pQbuffer, 0, sizeof(struct QBUFFER));
3184 		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3185 		if (acb->fw_flag == FW_DEADLOCK)
3186 			pcmdmessagefld->cmdmessage.ReturnCode =
3187 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3188 		else
3189 			pcmdmessagefld->cmdmessage.ReturnCode =
3190 				ARCMSR_MESSAGE_RETURNCODE_OK;
3191 		break;
3192 	}
3193 	case ARCMSR_MESSAGE_RETURN_CODE_3F: {
3194 		if (acb->fw_flag == FW_DEADLOCK)
3195 			pcmdmessagefld->cmdmessage.ReturnCode =
3196 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3197 		else
3198 			pcmdmessagefld->cmdmessage.ReturnCode =
3199 				ARCMSR_MESSAGE_RETURNCODE_3F;
3200 		break;
3201 	}
3202 	case ARCMSR_MESSAGE_SAY_HELLO: {
3203 		int8_t *hello_string = "Hello! I am ARCMSR";
3204 		if (acb->fw_flag == FW_DEADLOCK)
3205 			pcmdmessagefld->cmdmessage.ReturnCode =
3206 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3207 		else
3208 			pcmdmessagefld->cmdmessage.ReturnCode =
3209 				ARCMSR_MESSAGE_RETURNCODE_OK;
3210 		memcpy(pcmdmessagefld->messagedatabuffer,
3211 			hello_string, (int16_t)strlen(hello_string));
3212 		break;
3213 	}
3214 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
3215 		if (acb->fw_flag == FW_DEADLOCK)
3216 			pcmdmessagefld->cmdmessage.ReturnCode =
3217 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3218 		else
3219 			pcmdmessagefld->cmdmessage.ReturnCode =
3220 				ARCMSR_MESSAGE_RETURNCODE_OK;
3221 		arcmsr_iop_parking(acb);
3222 		break;
3223 	}
3224 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
3225 		if (acb->fw_flag == FW_DEADLOCK)
3226 			pcmdmessagefld->cmdmessage.ReturnCode =
3227 				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3228 		else
3229 			pcmdmessagefld->cmdmessage.ReturnCode =
3230 				ARCMSR_MESSAGE_RETURNCODE_OK;
3231 		arcmsr_flush_adapter_cache(acb);
3232 		break;
3233 	}
3234 	default:
3235 		retvalue = ARCMSR_MESSAGE_FAIL;
3236 		pr_info("%s: unknown controlcode!\n", __func__);
3237 	}
3238 message_out:
3239 	if (use_sg) {
3240 		struct scatterlist *sg = scsi_sglist(cmd);
3241 		kunmap_atomic(buffer - sg->offset);
3242 	}
3243 	return retvalue;
3244 }
3245 
3246 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3247 {
3248 	struct list_head *head;
3249 	struct CommandControlBlock *ccb = NULL;
3250 	unsigned long flags;
3251 
3252 	spin_lock_irqsave(&acb->ccblist_lock, flags);
3253 	head = &acb->ccb_free_list;
3254 	if (!list_empty(head)) {
3255 		ccb = list_entry(head->next, struct CommandControlBlock, list);
3256 		list_del_init(&ccb->list);
3257 	}else{
3258 		spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3259 		return NULL;
3260 	}
3261 	spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3262 	return ccb;
3263 }
3264 
3265 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3266 		struct scsi_cmnd *cmd)
3267 {
3268 	switch (cmd->cmnd[0]) {
3269 	case INQUIRY: {
3270 		unsigned char inqdata[36];
3271 		char *buffer;
3272 		struct scatterlist *sg;
3273 
3274 		if (cmd->device->lun) {
3275 			cmd->result = (DID_TIME_OUT << 16);
3276 			scsi_done(cmd);
3277 			return;
3278 		}
3279 		inqdata[0] = TYPE_PROCESSOR;
3280 		/* Periph Qualifier & Periph Dev Type */
3281 		inqdata[1] = 0;
3282 		/* rem media bit & Dev Type Modifier */
3283 		inqdata[2] = 0;
3284 		/* ISO, ECMA, & ANSI versions */
3285 		inqdata[4] = 31;
3286 		/* length of additional data */
3287 		memcpy(&inqdata[8], "Areca   ", 8);
3288 		/* Vendor Identification */
3289 		memcpy(&inqdata[16], "RAID controller ", 16);
3290 		/* Product Identification */
3291 		memcpy(&inqdata[32], "R001", 4); /* Product Revision */
3292 
3293 		sg = scsi_sglist(cmd);
3294 		buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3295 
3296 		memcpy(buffer, inqdata, sizeof(inqdata));
3297 		sg = scsi_sglist(cmd);
3298 		kunmap_atomic(buffer - sg->offset);
3299 
3300 		scsi_done(cmd);
3301 	}
3302 	break;
3303 	case WRITE_BUFFER:
3304 	case READ_BUFFER: {
3305 		if (arcmsr_iop_message_xfer(acb, cmd))
3306 			cmd->result = (DID_ERROR << 16);
3307 		scsi_done(cmd);
3308 	}
3309 	break;
3310 	default:
3311 		scsi_done(cmd);
3312 	}
3313 }
3314 
3315 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
3316 {
3317 	struct Scsi_Host *host = cmd->device->host;
3318 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3319 	struct CommandControlBlock *ccb;
3320 	int target = cmd->device->id;
3321 
3322 	if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3323 		cmd->result = (DID_NO_CONNECT << 16);
3324 		scsi_done(cmd);
3325 		return 0;
3326 	}
3327 	cmd->host_scribble = NULL;
3328 	cmd->result = 0;
3329 	if (target == 16) {
3330 		/* virtual device for iop message transfer */
3331 		arcmsr_handle_virtual_command(acb, cmd);
3332 		return 0;
3333 	}
3334 	ccb = arcmsr_get_freeccb(acb);
3335 	if (!ccb)
3336 		return SCSI_MLQUEUE_HOST_BUSY;
3337 	if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3338 		cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
3339 		scsi_done(cmd);
3340 		return 0;
3341 	}
3342 	arcmsr_post_ccb(acb, ccb);
3343 	return 0;
3344 }
3345 
3346 static DEF_SCSI_QCMD(arcmsr_queue_command)
3347 
3348 static int arcmsr_sdev_configure(struct scsi_device *sdev,
3349 				 struct queue_limits *lim)
3350 {
3351 	unsigned int	dev_timeout;
3352 
3353 	dev_timeout = sdev->request_queue->rq_timeout;
3354 	if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
3355 		blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
3356 	return 0;
3357 }
3358 
3359 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3360 {
3361 	int count;
3362 	uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3363 	uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3364 	uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3365 	uint32_t *firm_model = &rwbuffer[15];
3366 	uint32_t *firm_version = &rwbuffer[17];
3367 	uint32_t *device_map = &rwbuffer[21];
3368 
3369 	count = 2;
3370 	while (count) {
3371 		*acb_firm_model = readl(firm_model);
3372 		acb_firm_model++;
3373 		firm_model++;
3374 		count--;
3375 	}
3376 	count = 4;
3377 	while (count) {
3378 		*acb_firm_version = readl(firm_version);
3379 		acb_firm_version++;
3380 		firm_version++;
3381 		count--;
3382 	}
3383 	count = 4;
3384 	while (count) {
3385 		*acb_device_map = readl(device_map);
3386 		acb_device_map++;
3387 		device_map++;
3388 		count--;
3389 	}
3390 	pACB->signature = readl(&rwbuffer[0]);
3391 	pACB->firm_request_len = readl(&rwbuffer[1]);
3392 	pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3393 	pACB->firm_sdram_size = readl(&rwbuffer[3]);
3394 	pACB->firm_hd_channels = readl(&rwbuffer[4]);
3395 	pACB->firm_cfg_version = readl(&rwbuffer[25]);
3396 	if (pACB->adapter_type == ACB_ADAPTER_TYPE_F)
3397 		pACB->firm_PicStatus = readl(&rwbuffer[30]);
3398 	else
3399 		pACB->firm_PicStatus = 0;
3400 	pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3401 		pACB->host->host_no,
3402 		pACB->firm_model,
3403 		pACB->firm_version);
3404 }
3405 
3406 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3407 {
3408 	struct MessageUnit_A __iomem *reg = acb->pmuA;
3409 
3410 	arcmsr_wait_firmware_ready(acb);
3411 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3412 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3413 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3414 			miscellaneous data' timeout \n", acb->host->host_no);
3415 		return false;
3416 	}
3417 	arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3418 	return true;
3419 }
3420 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3421 {
3422 	struct MessageUnit_B *reg = acb->pmuB;
3423 
3424 	arcmsr_wait_firmware_ready(acb);
3425 	writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3426 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3427 		printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3428 		return false;
3429 	}
3430 	writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3431 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3432 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3433 			miscellaneous data' timeout \n", acb->host->host_no);
3434 		return false;
3435 	}
3436 	arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3437 	return true;
3438 }
3439 
3440 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3441 {
3442 	uint32_t intmask_org;
3443 	struct MessageUnit_C __iomem *reg = pACB->pmuC;
3444 
3445 	/* disable all outbound interrupt */
3446 	intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3447 	writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3448 	/* wait firmware ready */
3449 	arcmsr_wait_firmware_ready(pACB);
3450 	/* post "get config" instruction */
3451 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3452 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3453 	/* wait message ready */
3454 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3455 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3456 			miscellaneous data' timeout \n", pACB->host->host_no);
3457 		return false;
3458 	}
3459 	arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3460 	return true;
3461 }
3462 
3463 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3464 {
3465 	struct MessageUnit_D *reg = acb->pmuD;
3466 
3467 	if (readl(acb->pmuD->outbound_doorbell) &
3468 		ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3469 		writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3470 			acb->pmuD->outbound_doorbell);/*clear interrupt*/
3471 	}
3472 	arcmsr_wait_firmware_ready(acb);
3473 	/* post "get config" instruction */
3474 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3475 	/* wait message ready */
3476 	if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3477 		pr_notice("arcmsr%d: wait get adapter firmware "
3478 			"miscellaneous data timeout\n", acb->host->host_no);
3479 		return false;
3480 	}
3481 	arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3482 	return true;
3483 }
3484 
3485 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3486 {
3487 	struct MessageUnit_E __iomem *reg = pACB->pmuE;
3488 	uint32_t intmask_org;
3489 
3490 	/* disable all outbound interrupt */
3491 	intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3492 	writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3493 	/* wait firmware ready */
3494 	arcmsr_wait_firmware_ready(pACB);
3495 	mdelay(20);
3496 	/* post "get config" instruction */
3497 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3498 
3499 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3500 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
3501 	/* wait message ready */
3502 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3503 		pr_notice("arcmsr%d: wait get adapter firmware "
3504 			"miscellaneous data timeout\n", pACB->host->host_no);
3505 		return false;
3506 	}
3507 	arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3508 	return true;
3509 }
3510 
3511 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3512 {
3513 	struct MessageUnit_F __iomem *reg = pACB->pmuF;
3514 	uint32_t intmask_org;
3515 
3516 	/* disable all outbound interrupt */
3517 	intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
3518 	writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
3519 	/* wait firmware ready */
3520 	arcmsr_wait_firmware_ready(pACB);
3521 	/* post "get config" instruction */
3522 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3523 
3524 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3525 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
3526 	/* wait message ready */
3527 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3528 		pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3529 			  pACB->host->host_no);
3530 		return false;
3531 	}
3532 	arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3533 	return true;
3534 }
3535 
3536 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3537 {
3538 	bool rtn = false;
3539 
3540 	switch (acb->adapter_type) {
3541 	case ACB_ADAPTER_TYPE_A:
3542 		rtn = arcmsr_hbaA_get_config(acb);
3543 		break;
3544 	case ACB_ADAPTER_TYPE_B:
3545 		rtn = arcmsr_hbaB_get_config(acb);
3546 		break;
3547 	case ACB_ADAPTER_TYPE_C:
3548 		rtn = arcmsr_hbaC_get_config(acb);
3549 		break;
3550 	case ACB_ADAPTER_TYPE_D:
3551 		rtn = arcmsr_hbaD_get_config(acb);
3552 		break;
3553 	case ACB_ADAPTER_TYPE_E:
3554 		rtn = arcmsr_hbaE_get_config(acb);
3555 		break;
3556 	case ACB_ADAPTER_TYPE_F:
3557 		rtn = arcmsr_hbaF_get_config(acb);
3558 		break;
3559 	default:
3560 		break;
3561 	}
3562 	acb->maxOutstanding = acb->firm_numbers_queue - 1;
3563 	if (acb->host->can_queue >= acb->firm_numbers_queue)
3564 		acb->host->can_queue = acb->maxOutstanding;
3565 	else
3566 		acb->maxOutstanding = acb->host->can_queue;
3567 	acb->maxFreeCCB = acb->host->can_queue;
3568 	if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3569 		acb->maxFreeCCB += 64;
3570 	return rtn;
3571 }
3572 
3573 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3574 	struct CommandControlBlock *poll_ccb)
3575 {
3576 	struct MessageUnit_A __iomem *reg = acb->pmuA;
3577 	struct CommandControlBlock *ccb;
3578 	struct ARCMSR_CDB *arcmsr_cdb;
3579 	uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3580 	int rtn;
3581 	bool error;
3582 	unsigned long ccb_cdb_phy;
3583 
3584 polling_hba_ccb_retry:
3585 	poll_count++;
3586 	outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
3587 	writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
3588 	while (1) {
3589 		if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
3590 			if (poll_ccb_done){
3591 				rtn = SUCCESS;
3592 				break;
3593 			}else {
3594 				msleep(25);
3595 				if (poll_count > 100){
3596 					rtn = FAILED;
3597 					break;
3598 				}
3599 				goto polling_hba_ccb_retry;
3600 			}
3601 		}
3602 		ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3603 		if (acb->cdb_phyadd_hipart)
3604 			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3605 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3606 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3607 		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3608 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3609 			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3610 				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3611 					" poll command abort successfully \n"
3612 					, acb->host->host_no
3613 					, ccb->pcmd->device->id
3614 					, (u32)ccb->pcmd->device->lun
3615 					, ccb);
3616 				ccb->pcmd->result = DID_ABORT << 16;
3617 				arcmsr_ccb_complete(ccb);
3618 				continue;
3619 			}
3620 			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3621 				" command done ccb = '0x%p'"
3622 				"ccboutstandingcount = %d \n"
3623 				, acb->host->host_no
3624 				, ccb
3625 				, atomic_read(&acb->ccboutstandingcount));
3626 			continue;
3627 		}
3628 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3629 		arcmsr_report_ccb_state(acb, ccb, error);
3630 	}
3631 	return rtn;
3632 }
3633 
3634 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3635 					struct CommandControlBlock *poll_ccb)
3636 {
3637 	struct MessageUnit_B *reg = acb->pmuB;
3638 	struct ARCMSR_CDB *arcmsr_cdb;
3639 	struct CommandControlBlock *ccb;
3640 	uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3641 	int index, rtn;
3642 	bool error;
3643 	unsigned long ccb_cdb_phy;
3644 
3645 polling_hbb_ccb_retry:
3646 	poll_count++;
3647 	/* clear doorbell interrupt */
3648 	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3649 	while(1){
3650 		index = reg->doneq_index;
3651 		flag_ccb = reg->done_qbuffer[index];
3652 		if (flag_ccb == 0) {
3653 			if (poll_ccb_done){
3654 				rtn = SUCCESS;
3655 				break;
3656 			}else {
3657 				msleep(25);
3658 				if (poll_count > 100){
3659 					rtn = FAILED;
3660 					break;
3661 				}
3662 				goto polling_hbb_ccb_retry;
3663 			}
3664 		}
3665 		reg->done_qbuffer[index] = 0;
3666 		index++;
3667 		/*if last index number set it to 0 */
3668 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
3669 		reg->doneq_index = index;
3670 		/* check if command done with no error*/
3671 		ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3672 		if (acb->cdb_phyadd_hipart)
3673 			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3674 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3675 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3676 		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3677 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3678 			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3679 				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3680 					" poll command abort successfully \n"
3681 					,acb->host->host_no
3682 					,ccb->pcmd->device->id
3683 					,(u32)ccb->pcmd->device->lun
3684 					,ccb);
3685 				ccb->pcmd->result = DID_ABORT << 16;
3686 				arcmsr_ccb_complete(ccb);
3687 				continue;
3688 			}
3689 			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3690 				" command done ccb = '0x%p'"
3691 				"ccboutstandingcount = %d \n"
3692 				, acb->host->host_no
3693 				, ccb
3694 				, atomic_read(&acb->ccboutstandingcount));
3695 			continue;
3696 		}
3697 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3698 		arcmsr_report_ccb_state(acb, ccb, error);
3699 	}
3700 	return rtn;
3701 }
3702 
3703 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3704 		struct CommandControlBlock *poll_ccb)
3705 {
3706 	struct MessageUnit_C __iomem *reg = acb->pmuC;
3707 	uint32_t flag_ccb;
3708 	struct ARCMSR_CDB *arcmsr_cdb;
3709 	bool error;
3710 	struct CommandControlBlock *pCCB;
3711 	uint32_t poll_ccb_done = 0, poll_count = 0;
3712 	int rtn;
3713 	unsigned long ccb_cdb_phy;
3714 
3715 polling_hbc_ccb_retry:
3716 	poll_count++;
3717 	while (1) {
3718 		if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3719 			if (poll_ccb_done) {
3720 				rtn = SUCCESS;
3721 				break;
3722 			} else {
3723 				msleep(25);
3724 				if (poll_count > 100) {
3725 					rtn = FAILED;
3726 					break;
3727 				}
3728 				goto polling_hbc_ccb_retry;
3729 			}
3730 		}
3731 		flag_ccb = readl(&reg->outbound_queueport_low);
3732 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3733 		if (acb->cdb_phyadd_hipart)
3734 			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3735 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3736 		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3737 		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3738 		/* check ifcommand done with no error*/
3739 		if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3740 			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3741 				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3742 					" poll command abort successfully \n"
3743 					, acb->host->host_no
3744 					, pCCB->pcmd->device->id
3745 					, (u32)pCCB->pcmd->device->lun
3746 					, pCCB);
3747 				pCCB->pcmd->result = DID_ABORT << 16;
3748 				arcmsr_ccb_complete(pCCB);
3749 				continue;
3750 			}
3751 			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3752 				" command done ccb = '0x%p'"
3753 				"ccboutstandingcount = %d \n"
3754 				, acb->host->host_no
3755 				, pCCB
3756 				, atomic_read(&acb->ccboutstandingcount));
3757 			continue;
3758 		}
3759 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3760 		arcmsr_report_ccb_state(acb, pCCB, error);
3761 	}
3762 	return rtn;
3763 }
3764 
3765 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3766 				struct CommandControlBlock *poll_ccb)
3767 {
3768 	bool error;
3769 	uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
3770 	int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3771 	unsigned long flags, ccb_cdb_phy;
3772 	struct ARCMSR_CDB *arcmsr_cdb;
3773 	struct CommandControlBlock *pCCB;
3774 	struct MessageUnit_D *pmu = acb->pmuD;
3775 
3776 polling_hbaD_ccb_retry:
3777 	poll_count++;
3778 	while (1) {
3779 		spin_lock_irqsave(&acb->doneq_lock, flags);
3780 		outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3781 		doneq_index = pmu->doneq_index;
3782 		if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3783 			spin_unlock_irqrestore(&acb->doneq_lock, flags);
3784 			if (poll_ccb_done) {
3785 				rtn = SUCCESS;
3786 				break;
3787 			} else {
3788 				msleep(25);
3789 				if (poll_count > 40) {
3790 					rtn = FAILED;
3791 					break;
3792 				}
3793 				goto polling_hbaD_ccb_retry;
3794 			}
3795 		}
3796 		toggle = doneq_index & 0x4000;
3797 		index_stripped = (doneq_index & 0xFFF) + 1;
3798 		index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3799 		pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3800 				((toggle ^ 0x4000) + 1);
3801 		doneq_index = pmu->doneq_index;
3802 		spin_unlock_irqrestore(&acb->doneq_lock, flags);
3803 		flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3804 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3805 		if (acb->cdb_phyadd_hipart)
3806 			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3807 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3808 			ccb_cdb_phy);
3809 		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3810 			arcmsr_cdb);
3811 		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3812 		if ((pCCB->acb != acb) ||
3813 			(pCCB->startdone != ARCMSR_CCB_START)) {
3814 			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3815 				pr_notice("arcmsr%d: scsi id = %d "
3816 					"lun = %d ccb = '0x%p' poll command "
3817 					"abort successfully\n"
3818 					, acb->host->host_no
3819 					, pCCB->pcmd->device->id
3820 					, (u32)pCCB->pcmd->device->lun
3821 					, pCCB);
3822 				pCCB->pcmd->result = DID_ABORT << 16;
3823 				arcmsr_ccb_complete(pCCB);
3824 				continue;
3825 			}
3826 			pr_notice("arcmsr%d: polling an illegal "
3827 				"ccb command done ccb = '0x%p' "
3828 				"ccboutstandingcount = %d\n"
3829 				, acb->host->host_no
3830 				, pCCB
3831 				, atomic_read(&acb->ccboutstandingcount));
3832 			continue;
3833 		}
3834 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3835 			? true : false;
3836 		arcmsr_report_ccb_state(acb, pCCB, error);
3837 	}
3838 	return rtn;
3839 }
3840 
3841 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3842 				struct CommandControlBlock *poll_ccb)
3843 {
3844 	bool error;
3845 	uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3846 	uint16_t cmdSMID;
3847 	unsigned long flags;
3848 	int rtn;
3849 	struct CommandControlBlock *pCCB;
3850 	struct MessageUnit_E __iomem *reg = acb->pmuE;
3851 
3852 	polling_hbaC_ccb_retry:
3853 	poll_count++;
3854 	while (1) {
3855 		spin_lock_irqsave(&acb->doneq_lock, flags);
3856 		doneq_index = acb->doneq_index;
3857 		if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
3858 				doneq_index) {
3859 			spin_unlock_irqrestore(&acb->doneq_lock, flags);
3860 			if (poll_ccb_done) {
3861 				rtn = SUCCESS;
3862 				break;
3863 			} else {
3864 				msleep(25);
3865 				if (poll_count > 40) {
3866 					rtn = FAILED;
3867 					break;
3868 				}
3869 				goto polling_hbaC_ccb_retry;
3870 			}
3871 		}
3872 		cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3873 		doneq_index++;
3874 		if (doneq_index >= acb->completionQ_entry)
3875 			doneq_index = 0;
3876 		acb->doneq_index = doneq_index;
3877 		spin_unlock_irqrestore(&acb->doneq_lock, flags);
3878 		pCCB = acb->pccb_pool[cmdSMID];
3879 		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3880 		/* check if command done with no error*/
3881 		if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3882 			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3883 				pr_notice("arcmsr%d: scsi id = %d "
3884 					"lun = %d ccb = '0x%p' poll command "
3885 					"abort successfully\n"
3886 					, acb->host->host_no
3887 					, pCCB->pcmd->device->id
3888 					, (u32)pCCB->pcmd->device->lun
3889 					, pCCB);
3890 				pCCB->pcmd->result = DID_ABORT << 16;
3891 				arcmsr_ccb_complete(pCCB);
3892 				continue;
3893 			}
3894 			pr_notice("arcmsr%d: polling an illegal "
3895 				"ccb command done ccb = '0x%p' "
3896 				"ccboutstandingcount = %d\n"
3897 				, acb->host->host_no
3898 				, pCCB
3899 				, atomic_read(&acb->ccboutstandingcount));
3900 			continue;
3901 		}
3902 		error = (acb->pCompletionQ[doneq_index].cmdFlag &
3903 			ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3904 		arcmsr_report_ccb_state(acb, pCCB, error);
3905 	}
3906 	writel(doneq_index, &reg->reply_post_consumer_index);
3907 	return rtn;
3908 }
3909 
3910 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3911 					struct CommandControlBlock *poll_ccb)
3912 {
3913 	int rtn = 0;
3914 	switch (acb->adapter_type) {
3915 
3916 	case ACB_ADAPTER_TYPE_A:
3917 		rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3918 		break;
3919 	case ACB_ADAPTER_TYPE_B:
3920 		rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3921 		break;
3922 	case ACB_ADAPTER_TYPE_C:
3923 		rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3924 		break;
3925 	case ACB_ADAPTER_TYPE_D:
3926 		rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3927 		break;
3928 	case ACB_ADAPTER_TYPE_E:
3929 	case ACB_ADAPTER_TYPE_F:
3930 		rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3931 		break;
3932 	}
3933 	return rtn;
3934 }
3935 
3936 static void arcmsr_set_iop_datetime(struct timer_list *t)
3937 {
3938 	struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
3939 	unsigned int next_time;
3940 	struct tm tm;
3941 
3942 	union {
3943 		struct	{
3944 		uint16_t	signature;
3945 		uint8_t		year;
3946 		uint8_t		month;
3947 		uint8_t		date;
3948 		uint8_t		hour;
3949 		uint8_t		minute;
3950 		uint8_t		second;
3951 		} a;
3952 		struct	{
3953 		uint32_t	msg_time[2];
3954 		} b;
3955 	} datetime;
3956 
3957 	time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3958 
3959 	datetime.a.signature = 0x55AA;
3960 	datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3961 	datetime.a.month = tm.tm_mon;
3962 	datetime.a.date = tm.tm_mday;
3963 	datetime.a.hour = tm.tm_hour;
3964 	datetime.a.minute = tm.tm_min;
3965 	datetime.a.second = tm.tm_sec;
3966 
3967 	switch (pacb->adapter_type) {
3968 		case ACB_ADAPTER_TYPE_A: {
3969 			struct MessageUnit_A __iomem *reg = pacb->pmuA;
3970 			writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
3971 			writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
3972 			writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3973 			break;
3974 		}
3975 		case ACB_ADAPTER_TYPE_B: {
3976 			uint32_t __iomem *rwbuffer;
3977 			struct MessageUnit_B *reg = pacb->pmuB;
3978 			rwbuffer = reg->message_rwbuffer;
3979 			writel(datetime.b.msg_time[0], rwbuffer++);
3980 			writel(datetime.b.msg_time[1], rwbuffer++);
3981 			writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3982 			break;
3983 		}
3984 		case ACB_ADAPTER_TYPE_C: {
3985 			struct MessageUnit_C __iomem *reg = pacb->pmuC;
3986 			writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
3987 			writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
3988 			writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3989 			writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3990 			break;
3991 		}
3992 		case ACB_ADAPTER_TYPE_D: {
3993 			uint32_t __iomem *rwbuffer;
3994 			struct MessageUnit_D *reg = pacb->pmuD;
3995 			rwbuffer = reg->msgcode_rwbuffer;
3996 			writel(datetime.b.msg_time[0], rwbuffer++);
3997 			writel(datetime.b.msg_time[1], rwbuffer++);
3998 			writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
3999 			break;
4000 		}
4001 		case ACB_ADAPTER_TYPE_E: {
4002 			struct MessageUnit_E __iomem *reg = pacb->pmuE;
4003 			writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
4004 			writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
4005 			writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
4006 			pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4007 			writel(pacb->out_doorbell, &reg->iobound_doorbell);
4008 			break;
4009 		}
4010 		case ACB_ADAPTER_TYPE_F: {
4011 			struct MessageUnit_F __iomem *reg = pacb->pmuF;
4012 
4013 			pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
4014 			pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
4015 			writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
4016 			pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4017 			writel(pacb->out_doorbell, &reg->iobound_doorbell);
4018 			break;
4019 		}
4020 	}
4021 	if (sys_tz.tz_minuteswest)
4022 		next_time = ARCMSR_HOURS;
4023 	else
4024 		next_time = ARCMSR_MINUTES;
4025 	mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
4026 }
4027 
4028 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
4029 {
4030 	uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
4031 	dma_addr_t dma_coherent_handle;
4032 
4033 	/*
4034 	********************************************************************
4035 	** here we need to tell iop 331 our freeccb.HighPart
4036 	** if freeccb.HighPart is not zero
4037 	********************************************************************
4038 	*/
4039 	switch (acb->adapter_type) {
4040 	case ACB_ADAPTER_TYPE_B:
4041 	case ACB_ADAPTER_TYPE_D:
4042 		dma_coherent_handle = acb->dma_coherent_handle2;
4043 		break;
4044 	case ACB_ADAPTER_TYPE_E:
4045 	case ACB_ADAPTER_TYPE_F:
4046 		dma_coherent_handle = acb->dma_coherent_handle +
4047 			offsetof(struct CommandControlBlock, arcmsr_cdb);
4048 		break;
4049 	default:
4050 		dma_coherent_handle = acb->dma_coherent_handle;
4051 		break;
4052 	}
4053 	cdb_phyaddr = lower_32_bits(dma_coherent_handle);
4054 	cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
4055 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
4056 	acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
4057 	/*
4058 	***********************************************************************
4059 	**    if adapter type B, set window of "post command Q"
4060 	***********************************************************************
4061 	*/
4062 	switch (acb->adapter_type) {
4063 
4064 	case ACB_ADAPTER_TYPE_A: {
4065 		if (cdb_phyaddr_hi32 != 0) {
4066 			struct MessageUnit_A __iomem *reg = acb->pmuA;
4067 			writel(ARCMSR_SIGNATURE_SET_CONFIG, \
4068 						&reg->message_rwbuffer[0]);
4069 			writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
4070 			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
4071 							&reg->inbound_msgaddr0);
4072 			if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4073 				printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
4074 				part physical address timeout\n",
4075 				acb->host->host_no);
4076 				return 1;
4077 			}
4078 		}
4079 		}
4080 		break;
4081 
4082 	case ACB_ADAPTER_TYPE_B: {
4083 		uint32_t __iomem *rwbuffer;
4084 
4085 		struct MessageUnit_B *reg = acb->pmuB;
4086 		reg->postq_index = 0;
4087 		reg->doneq_index = 0;
4088 		writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
4089 		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4090 			printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
4091 				acb->host->host_no);
4092 			return 1;
4093 		}
4094 		rwbuffer = reg->message_rwbuffer;
4095 		/* driver "set config" signature */
4096 		writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4097 		/* normal should be zero */
4098 		writel(cdb_phyaddr_hi32, rwbuffer++);
4099 		/* postQ size (256 + 8)*4	 */
4100 		writel(cdb_phyaddr, rwbuffer++);
4101 		/* doneQ size (256 + 8)*4	 */
4102 		writel(cdb_phyaddr + 1056, rwbuffer++);
4103 		/* ccb maxQ size must be --> [(256 + 8)*4]*/
4104 		writel(1056, rwbuffer);
4105 
4106 		writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
4107 		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4108 			printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4109 			timeout \n",acb->host->host_no);
4110 			return 1;
4111 		}
4112 		writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
4113 		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4114 			pr_err("arcmsr%d: can't set driver mode.\n",
4115 				acb->host->host_no);
4116 			return 1;
4117 		}
4118 		}
4119 		break;
4120 	case ACB_ADAPTER_TYPE_C: {
4121 			struct MessageUnit_C __iomem *reg = acb->pmuC;
4122 
4123 			printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
4124 					acb->adapter_index, cdb_phyaddr_hi32);
4125 			writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
4126 			writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
4127 			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
4128 			writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
4129 			if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
4130 				printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4131 				timeout \n", acb->host->host_no);
4132 				return 1;
4133 			}
4134 		}
4135 		break;
4136 	case ACB_ADAPTER_TYPE_D: {
4137 		uint32_t __iomem *rwbuffer;
4138 		struct MessageUnit_D *reg = acb->pmuD;
4139 		reg->postq_index = 0;
4140 		reg->doneq_index = 0;
4141 		rwbuffer = reg->msgcode_rwbuffer;
4142 		writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4143 		writel(cdb_phyaddr_hi32, rwbuffer++);
4144 		writel(cdb_phyaddr, rwbuffer++);
4145 		writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
4146 			sizeof(struct InBound_SRB)), rwbuffer++);
4147 		writel(0x100, rwbuffer);
4148 		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
4149 		if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
4150 			pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4151 				acb->host->host_no);
4152 			return 1;
4153 		}
4154 		}
4155 		break;
4156 	case ACB_ADAPTER_TYPE_E: {
4157 		struct MessageUnit_E __iomem *reg = acb->pmuE;
4158 		writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
4159 		writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
4160 		writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
4161 		writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
4162 		writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
4163 		writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]);
4164 		writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]);
4165 		writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
4166 		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
4167 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4168 		writel(acb->out_doorbell, &reg->iobound_doorbell);
4169 		if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4170 			pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4171 				acb->host->host_no);
4172 			return 1;
4173 		}
4174 		}
4175 		break;
4176 	case ACB_ADAPTER_TYPE_F: {
4177 		struct MessageUnit_F __iomem *reg = acb->pmuF;
4178 
4179 		acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4180 		acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4181 		acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4182 		acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4183 		acb->msgcode_rwbuffer[4] = acb->ccbsize;
4184 		acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4185 		acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4186 		acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4187 		if (acb->xor_mega) {
4188 			acb->msgcode_rwbuffer[8] = 0x455AA;	//Linux init 2
4189 			acb->msgcode_rwbuffer[9] = 0;
4190 			acb->msgcode_rwbuffer[10] = lower_32_bits(acb->xorPhys);
4191 			acb->msgcode_rwbuffer[11] = upper_32_bits(acb->xorPhys);
4192 		}
4193 		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
4194 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4195 		writel(acb->out_doorbell, &reg->iobound_doorbell);
4196 		if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4197 			pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4198 				acb->host->host_no);
4199 			return 1;
4200 		}
4201 		}
4202 		break;
4203 	}
4204 	return 0;
4205 }
4206 
4207 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
4208 {
4209 	uint32_t firmware_state = 0;
4210 	switch (acb->adapter_type) {
4211 
4212 	case ACB_ADAPTER_TYPE_A: {
4213 		struct MessageUnit_A __iomem *reg = acb->pmuA;
4214 		do {
4215 			if (!(acb->acb_flags & ACB_F_IOP_INITED))
4216 				msleep(20);
4217 			firmware_state = readl(&reg->outbound_msgaddr1);
4218 		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
4219 		}
4220 		break;
4221 
4222 	case ACB_ADAPTER_TYPE_B: {
4223 		struct MessageUnit_B *reg = acb->pmuB;
4224 		do {
4225 			if (!(acb->acb_flags & ACB_F_IOP_INITED))
4226 				msleep(20);
4227 			firmware_state = readl(reg->iop2drv_doorbell);
4228 		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4229 		writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
4230 		}
4231 		break;
4232 	case ACB_ADAPTER_TYPE_C: {
4233 		struct MessageUnit_C __iomem *reg = acb->pmuC;
4234 		do {
4235 			if (!(acb->acb_flags & ACB_F_IOP_INITED))
4236 				msleep(20);
4237 			firmware_state = readl(&reg->outbound_msgaddr1);
4238 		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
4239 		}
4240 		break;
4241 	case ACB_ADAPTER_TYPE_D: {
4242 		struct MessageUnit_D *reg = acb->pmuD;
4243 		do {
4244 			if (!(acb->acb_flags & ACB_F_IOP_INITED))
4245 				msleep(20);
4246 			firmware_state = readl(reg->outbound_msgaddr1);
4247 		} while ((firmware_state &
4248 			ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
4249 		}
4250 		break;
4251 	case ACB_ADAPTER_TYPE_E:
4252 	case ACB_ADAPTER_TYPE_F: {
4253 		struct MessageUnit_E __iomem *reg = acb->pmuE;
4254 		do {
4255 			if (!(acb->acb_flags & ACB_F_IOP_INITED))
4256 				msleep(20);
4257 			firmware_state = readl(&reg->outbound_msgaddr1);
4258 		} while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
4259 		}
4260 		break;
4261 	}
4262 }
4263 
4264 static void arcmsr_request_device_map(struct timer_list *t)
4265 {
4266 	struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
4267 	if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4268 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4269 	} else {
4270 		acb->fw_flag = FW_NORMAL;
4271 		switch (acb->adapter_type) {
4272 		case ACB_ADAPTER_TYPE_A: {
4273 			struct MessageUnit_A __iomem *reg = acb->pmuA;
4274 			writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
4275 			break;
4276 			}
4277 		case ACB_ADAPTER_TYPE_B: {
4278 			struct MessageUnit_B *reg = acb->pmuB;
4279 			writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
4280 			break;
4281 			}
4282 		case ACB_ADAPTER_TYPE_C: {
4283 			struct MessageUnit_C __iomem *reg = acb->pmuC;
4284 			writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
4285 			writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
4286 			break;
4287 			}
4288 		case ACB_ADAPTER_TYPE_D: {
4289 			struct MessageUnit_D *reg = acb->pmuD;
4290 			writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
4291 			break;
4292 			}
4293 		case ACB_ADAPTER_TYPE_E: {
4294 			struct MessageUnit_E __iomem *reg = acb->pmuE;
4295 			writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
4296 			acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4297 			writel(acb->out_doorbell, &reg->iobound_doorbell);
4298 			break;
4299 			}
4300 		case ACB_ADAPTER_TYPE_F: {
4301 			struct MessageUnit_F __iomem *reg = acb->pmuF;
4302 			uint32_t outMsg1 = readl(&reg->outbound_msgaddr1);
4303 
4304 			if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
4305 				(outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
4306 				goto nxt6s;
4307 			writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
4308 			acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4309 			writel(acb->out_doorbell, &reg->iobound_doorbell);
4310 			break;
4311 			}
4312 		default:
4313 			return;
4314 		}
4315 		acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4316 nxt6s:
4317 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4318 	}
4319 }
4320 
4321 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4322 {
4323 	struct MessageUnit_A __iomem *reg = acb->pmuA;
4324 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4325 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
4326 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4327 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4328 				rebuild' timeout \n", acb->host->host_no);
4329 	}
4330 }
4331 
4332 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4333 {
4334 	struct MessageUnit_B *reg = acb->pmuB;
4335 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
4336 	writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
4337 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4338 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4339 				rebuild' timeout \n",acb->host->host_no);
4340 	}
4341 }
4342 
4343 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
4344 {
4345 	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
4346 	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4347 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
4348 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
4349 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
4350 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4351 				rebuild' timeout \n", pACB->host->host_no);
4352 	}
4353 	return;
4354 }
4355 
4356 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
4357 {
4358 	struct MessageUnit_D *pmu = pACB->pmuD;
4359 
4360 	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4361 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
4362 	if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
4363 		pr_notice("arcmsr%d: wait 'start adapter "
4364 			"background rebuild' timeout\n", pACB->host->host_no);
4365 	}
4366 }
4367 
4368 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
4369 {
4370 	struct MessageUnit_E __iomem *pmu = pACB->pmuE;
4371 
4372 	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4373 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
4374 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4375 	writel(pACB->out_doorbell, &pmu->iobound_doorbell);
4376 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
4377 		pr_notice("arcmsr%d: wait 'start adapter "
4378 			"background rebuild' timeout \n", pACB->host->host_no);
4379 	}
4380 }
4381 
4382 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4383 {
4384 	switch (acb->adapter_type) {
4385 	case ACB_ADAPTER_TYPE_A:
4386 		arcmsr_hbaA_start_bgrb(acb);
4387 		break;
4388 	case ACB_ADAPTER_TYPE_B:
4389 		arcmsr_hbaB_start_bgrb(acb);
4390 		break;
4391 	case ACB_ADAPTER_TYPE_C:
4392 		arcmsr_hbaC_start_bgrb(acb);
4393 		break;
4394 	case ACB_ADAPTER_TYPE_D:
4395 		arcmsr_hbaD_start_bgrb(acb);
4396 		break;
4397 	case ACB_ADAPTER_TYPE_E:
4398 	case ACB_ADAPTER_TYPE_F:
4399 		arcmsr_hbaE_start_bgrb(acb);
4400 		break;
4401 	}
4402 }
4403 
4404 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4405 {
4406 	switch (acb->adapter_type) {
4407 	case ACB_ADAPTER_TYPE_A: {
4408 		struct MessageUnit_A __iomem *reg = acb->pmuA;
4409 		uint32_t outbound_doorbell;
4410 		/* empty doorbell Qbuffer if door bell ringed */
4411 		outbound_doorbell = readl(&reg->outbound_doorbell);
4412 		/*clear doorbell interrupt */
4413 		writel(outbound_doorbell, &reg->outbound_doorbell);
4414 		writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
4415 		}
4416 		break;
4417 
4418 	case ACB_ADAPTER_TYPE_B: {
4419 		struct MessageUnit_B *reg = acb->pmuB;
4420 		uint32_t outbound_doorbell, i;
4421 		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4422 		writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4423 		/* let IOP know data has been read */
4424 		for(i=0; i < 200; i++) {
4425 			msleep(20);
4426 			outbound_doorbell = readl(reg->iop2drv_doorbell);
4427 			if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
4428 				writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4429 				writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4430 			} else
4431 				break;
4432 		}
4433 		}
4434 		break;
4435 	case ACB_ADAPTER_TYPE_C: {
4436 		struct MessageUnit_C __iomem *reg = acb->pmuC;
4437 		uint32_t outbound_doorbell, i;
4438 		/* empty doorbell Qbuffer if door bell ringed */
4439 		outbound_doorbell = readl(&reg->outbound_doorbell);
4440 		writel(outbound_doorbell, &reg->outbound_doorbell_clear);
4441 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
4442 		for (i = 0; i < 200; i++) {
4443 			msleep(20);
4444 			outbound_doorbell = readl(&reg->outbound_doorbell);
4445 			if (outbound_doorbell &
4446 				ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
4447 				writel(outbound_doorbell,
4448 					&reg->outbound_doorbell_clear);
4449 				writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
4450 					&reg->inbound_doorbell);
4451 			} else
4452 				break;
4453 		}
4454 		}
4455 		break;
4456 	case ACB_ADAPTER_TYPE_D: {
4457 		struct MessageUnit_D *reg = acb->pmuD;
4458 		uint32_t outbound_doorbell, i;
4459 		/* empty doorbell Qbuffer if door bell ringed */
4460 		outbound_doorbell = readl(reg->outbound_doorbell);
4461 		writel(outbound_doorbell, reg->outbound_doorbell);
4462 		writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4463 			reg->inbound_doorbell);
4464 		for (i = 0; i < 200; i++) {
4465 			msleep(20);
4466 			outbound_doorbell = readl(reg->outbound_doorbell);
4467 			if (outbound_doorbell &
4468 				ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
4469 				writel(outbound_doorbell,
4470 					reg->outbound_doorbell);
4471 				writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4472 					reg->inbound_doorbell);
4473 			} else
4474 				break;
4475 		}
4476 		}
4477 		break;
4478 	case ACB_ADAPTER_TYPE_E:
4479 	case ACB_ADAPTER_TYPE_F: {
4480 		struct MessageUnit_E __iomem *reg = acb->pmuE;
4481 		uint32_t i, tmp;
4482 
4483 		acb->in_doorbell = readl(&reg->iobound_doorbell);
4484 		writel(0, &reg->host_int_status); /*clear interrupt*/
4485 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4486 		writel(acb->out_doorbell, &reg->iobound_doorbell);
4487 		for(i=0; i < 200; i++) {
4488 			msleep(20);
4489 			tmp = acb->in_doorbell;
4490 			acb->in_doorbell = readl(&reg->iobound_doorbell);
4491 			if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4492 				writel(0, &reg->host_int_status); /*clear interrupt*/
4493 				acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4494 				writel(acb->out_doorbell, &reg->iobound_doorbell);
4495 			} else
4496 				break;
4497 		}
4498 		}
4499 		break;
4500 	}
4501 }
4502 
4503 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4504 {
4505 	switch (acb->adapter_type) {
4506 	case ACB_ADAPTER_TYPE_A:
4507 		return;
4508 	case ACB_ADAPTER_TYPE_B:
4509 		{
4510 			struct MessageUnit_B *reg = acb->pmuB;
4511 			writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
4512 			if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4513 				printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
4514 				return;
4515 			}
4516 		}
4517 		break;
4518 	case ACB_ADAPTER_TYPE_C:
4519 		return;
4520 	}
4521 	return;
4522 }
4523 
4524 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4525 {
4526 	uint8_t value[64];
4527 	int i, count = 0;
4528 	struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4529 	struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4530 	struct MessageUnit_D *pmuD = acb->pmuD;
4531 
4532 	/* backup pci config data */
4533 	printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4534 	for (i = 0; i < 64; i++) {
4535 		pci_read_config_byte(acb->pdev, i, &value[i]);
4536 	}
4537 	/* hardware reset signal */
4538 	if (acb->dev_id == 0x1680) {
4539 		writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4540 	} else if (acb->dev_id == 0x1880) {
4541 		do {
4542 			count++;
4543 			writel(0xF, &pmuC->write_sequence);
4544 			writel(0x4, &pmuC->write_sequence);
4545 			writel(0xB, &pmuC->write_sequence);
4546 			writel(0x2, &pmuC->write_sequence);
4547 			writel(0x7, &pmuC->write_sequence);
4548 			writel(0xD, &pmuC->write_sequence);
4549 		} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
4550 		writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4551 	} else if (acb->dev_id == 0x1884) {
4552 		struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4553 		do {
4554 			count++;
4555 			writel(0x4, &pmuE->write_sequence_3xxx);
4556 			writel(0xB, &pmuE->write_sequence_3xxx);
4557 			writel(0x2, &pmuE->write_sequence_3xxx);
4558 			writel(0x7, &pmuE->write_sequence_3xxx);
4559 			writel(0xD, &pmuE->write_sequence_3xxx);
4560 			mdelay(10);
4561 		} while (((readl(&pmuE->host_diagnostic_3xxx) &
4562 			ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4563 		writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4564 	} else if (acb->dev_id == 0x1214) {
4565 		writel(0x20, pmuD->reset_request);
4566 	} else {
4567 		pci_write_config_byte(acb->pdev, 0x84, 0x20);
4568 	}
4569 	msleep(2000);
4570 	/* write back pci config data */
4571 	for (i = 0; i < 64; i++) {
4572 		pci_write_config_byte(acb->pdev, i, value[i]);
4573 	}
4574 	msleep(1000);
4575 	return;
4576 }
4577 
4578 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4579 {
4580 	bool rtn = true;
4581 
4582 	switch(acb->adapter_type) {
4583 	case ACB_ADAPTER_TYPE_A:{
4584 		struct MessageUnit_A __iomem *reg = acb->pmuA;
4585 		rtn = ((readl(&reg->outbound_msgaddr1) &
4586 			ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4587 		}
4588 		break;
4589 	case ACB_ADAPTER_TYPE_B:{
4590 		struct MessageUnit_B *reg = acb->pmuB;
4591 		rtn = ((readl(reg->iop2drv_doorbell) &
4592 			ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4593 		}
4594 		break;
4595 	case ACB_ADAPTER_TYPE_C:{
4596 		struct MessageUnit_C __iomem *reg = acb->pmuC;
4597 		rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
4598 		}
4599 		break;
4600 	case ACB_ADAPTER_TYPE_D:{
4601 		struct MessageUnit_D *reg = acb->pmuD;
4602 		rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4603 			true : false;
4604 		}
4605 		break;
4606 	case ACB_ADAPTER_TYPE_E:
4607 	case ACB_ADAPTER_TYPE_F:{
4608 		struct MessageUnit_E __iomem *reg = acb->pmuE;
4609 		rtn = (readl(&reg->host_diagnostic_3xxx) &
4610 			ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4611 		}
4612 		break;
4613 	}
4614 	return rtn;
4615 }
4616 
4617 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4618 {
4619 	uint32_t intmask_org;
4620 	/* disable all outbound interrupt */
4621 	intmask_org = arcmsr_disable_outbound_ints(acb);
4622 	arcmsr_wait_firmware_ready(acb);
4623 	arcmsr_iop_confirm(acb);
4624 	/*start background rebuild*/
4625 	arcmsr_start_adapter_bgrb(acb);
4626 	/* empty doorbell Qbuffer if door bell ringed */
4627 	arcmsr_clear_doorbell_queue_buffer(acb);
4628 	arcmsr_enable_eoi_mode(acb);
4629 	/* enable outbound Post Queue,outbound doorbell Interrupt */
4630 	arcmsr_enable_outbound_ints(acb, intmask_org);
4631 	acb->acb_flags |= ACB_F_IOP_INITED;
4632 }
4633 
4634 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4635 {
4636 	struct CommandControlBlock *ccb;
4637 	uint32_t intmask_org;
4638 	uint8_t rtnval = 0x00;
4639 	int i = 0;
4640 	unsigned long flags;
4641 
4642 	if (atomic_read(&acb->ccboutstandingcount) != 0) {
4643 		/* disable all outbound interrupt */
4644 		intmask_org = arcmsr_disable_outbound_ints(acb);
4645 		/* talk to iop 331 outstanding command aborted */
4646 		rtnval = arcmsr_abort_allcmd(acb);
4647 		/* clear all outbound posted Q */
4648 		arcmsr_done4abort_postqueue(acb);
4649 		for (i = 0; i < acb->maxFreeCCB; i++) {
4650 			ccb = acb->pccb_pool[i];
4651 			if (ccb->startdone == ARCMSR_CCB_START) {
4652 				scsi_dma_unmap(ccb->pcmd);
4653 				ccb->startdone = ARCMSR_CCB_DONE;
4654 				ccb->ccb_flags = 0;
4655 				spin_lock_irqsave(&acb->ccblist_lock, flags);
4656 				list_add_tail(&ccb->list, &acb->ccb_free_list);
4657 				spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4658 			}
4659 		}
4660 		atomic_set(&acb->ccboutstandingcount, 0);
4661 		/* enable all outbound interrupt */
4662 		arcmsr_enable_outbound_ints(acb, intmask_org);
4663 		return rtnval;
4664 	}
4665 	return rtnval;
4666 }
4667 
4668 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
4669 {
4670 	struct AdapterControlBlock *acb;
4671 	int retry_count = 0;
4672 	int rtn = FAILED;
4673 	acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4674 	if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4675 		return SUCCESS;
4676 	pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4677 		" num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4678 	acb->num_resets++;
4679 
4680 	if (acb->acb_flags & ACB_F_BUS_RESET) {
4681 		long timeout;
4682 		pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4683 		timeout = wait_event_timeout(wait_q, (acb->acb_flags
4684 			& ACB_F_BUS_RESET) == 0, 220 * HZ);
4685 		if (timeout)
4686 			return SUCCESS;
4687 	}
4688 	acb->acb_flags |= ACB_F_BUS_RESET;
4689 	if (!arcmsr_iop_reset(acb)) {
4690 		arcmsr_hardware_reset(acb);
4691 		acb->acb_flags &= ~ACB_F_IOP_INITED;
4692 wait_reset_done:
4693 		ssleep(ARCMSR_SLEEPTIME);
4694 		if (arcmsr_reset_in_progress(acb)) {
4695 			if (retry_count > ARCMSR_RETRYCOUNT) {
4696 				acb->fw_flag = FW_DEADLOCK;
4697 				pr_notice("arcmsr%d: waiting for hw bus reset"
4698 					" return, RETRY TERMINATED!!\n",
4699 					acb->host->host_no);
4700 				return FAILED;
4701 			}
4702 			retry_count++;
4703 			goto wait_reset_done;
4704 		}
4705 		arcmsr_iop_init(acb);
4706 		acb->fw_flag = FW_NORMAL;
4707 		mod_timer(&acb->eternal_timer, jiffies +
4708 			msecs_to_jiffies(6 * HZ));
4709 		acb->acb_flags &= ~ACB_F_BUS_RESET;
4710 		rtn = SUCCESS;
4711 		pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4712 	} else {
4713 		acb->acb_flags &= ~ACB_F_BUS_RESET;
4714 		acb->fw_flag = FW_NORMAL;
4715 		mod_timer(&acb->eternal_timer, jiffies +
4716 			msecs_to_jiffies(6 * HZ));
4717 		rtn = SUCCESS;
4718 	}
4719 	return rtn;
4720 }
4721 
4722 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4723 		struct CommandControlBlock *ccb)
4724 {
4725 	int rtn;
4726 	rtn = arcmsr_polling_ccbdone(acb, ccb);
4727 	return rtn;
4728 }
4729 
4730 static int arcmsr_abort(struct scsi_cmnd *cmd)
4731 {
4732 	struct AdapterControlBlock *acb =
4733 		(struct AdapterControlBlock *)cmd->device->host->hostdata;
4734 	int i = 0;
4735 	int rtn = FAILED;
4736 	uint32_t intmask_org;
4737 
4738 	if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4739 		return SUCCESS;
4740 	printk(KERN_NOTICE
4741 		"arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4742 		acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4743 	acb->acb_flags |= ACB_F_ABORT;
4744 	acb->num_aborts++;
4745 	/*
4746 	************************************************
4747 	** the all interrupt service routine is locked
4748 	** we need to handle it as soon as possible and exit
4749 	************************************************
4750 	*/
4751 	if (!atomic_read(&acb->ccboutstandingcount)) {
4752 		acb->acb_flags &= ~ACB_F_ABORT;
4753 		return rtn;
4754 	}
4755 
4756 	intmask_org = arcmsr_disable_outbound_ints(acb);
4757 	for (i = 0; i < acb->maxFreeCCB; i++) {
4758 		struct CommandControlBlock *ccb = acb->pccb_pool[i];
4759 		if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4760 			ccb->startdone = ARCMSR_CCB_ABORTED;
4761 			rtn = arcmsr_abort_one_cmd(acb, ccb);
4762 			break;
4763 		}
4764 	}
4765 	acb->acb_flags &= ~ACB_F_ABORT;
4766 	arcmsr_enable_outbound_ints(acb, intmask_org);
4767 	return rtn;
4768 }
4769 
4770 static const char *arcmsr_info(struct Scsi_Host *host)
4771 {
4772 	struct AdapterControlBlock *acb =
4773 		(struct AdapterControlBlock *) host->hostdata;
4774 	static char buf[256];
4775 	char *type;
4776 	int raid6 = 1;
4777 	switch (acb->pdev->device) {
4778 	case PCI_DEVICE_ID_ARECA_1110:
4779 	case PCI_DEVICE_ID_ARECA_1200:
4780 	case PCI_DEVICE_ID_ARECA_1202:
4781 	case PCI_DEVICE_ID_ARECA_1210:
4782 		raid6 = 0;
4783 		fallthrough;
4784 	case PCI_DEVICE_ID_ARECA_1120:
4785 	case PCI_DEVICE_ID_ARECA_1130:
4786 	case PCI_DEVICE_ID_ARECA_1160:
4787 	case PCI_DEVICE_ID_ARECA_1170:
4788 	case PCI_DEVICE_ID_ARECA_1201:
4789 	case PCI_DEVICE_ID_ARECA_1203:
4790 	case PCI_DEVICE_ID_ARECA_1220:
4791 	case PCI_DEVICE_ID_ARECA_1230:
4792 	case PCI_DEVICE_ID_ARECA_1260:
4793 	case PCI_DEVICE_ID_ARECA_1270:
4794 	case PCI_DEVICE_ID_ARECA_1280:
4795 		type = "SATA";
4796 		break;
4797 	case PCI_DEVICE_ID_ARECA_1214:
4798 	case PCI_DEVICE_ID_ARECA_1380:
4799 	case PCI_DEVICE_ID_ARECA_1381:
4800 	case PCI_DEVICE_ID_ARECA_1680:
4801 	case PCI_DEVICE_ID_ARECA_1681:
4802 	case PCI_DEVICE_ID_ARECA_1880:
4803 	case PCI_DEVICE_ID_ARECA_1883:
4804 	case PCI_DEVICE_ID_ARECA_1884:
4805 		type = "SAS/SATA";
4806 		break;
4807 	case PCI_DEVICE_ID_ARECA_1886_0:
4808 	case PCI_DEVICE_ID_ARECA_1886:
4809 		type = "NVMe/SAS/SATA";
4810 		break;
4811 	default:
4812 		type = "unknown";
4813 		raid6 =	0;
4814 		break;
4815 	}
4816 	sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4817 		type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4818 	return buf;
4819 }
4820