xref: /linux/drivers/scsi/aacraid/linit.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2010 Adaptec, Inc.
9  *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; see the file COPYING.  If not, write to
23  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  * Module Name:
26  *   linit.c
27  *
28  * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
29  */
30 
31 
32 #include <linux/compat.h>
33 #include <linux/blkdev.h>
34 #include <linux/completion.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/pci.h>
41 #include <linux/pci-aspm.h>
42 #include <linux/slab.h>
43 #include <linux/mutex.h>
44 #include <linux/spinlock.h>
45 #include <linux/syscalls.h>
46 #include <linux/delay.h>
47 #include <linux/kthread.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 
57 #include "aacraid.h"
58 
59 #define AAC_DRIVER_VERSION		"1.2-0"
60 #ifndef AAC_DRIVER_BRANCH
61 #define AAC_DRIVER_BRANCH		""
62 #endif
63 #define AAC_DRIVERNAME			"aacraid"
64 
65 #ifdef AAC_DRIVER_BUILD
66 #define _str(x) #x
67 #define str(x) _str(x)
68 #define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
69 #else
70 #define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
71 #endif
72 
73 MODULE_AUTHOR("Red Hat Inc and Adaptec");
74 MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
75 		   "Adaptec Advanced Raid Products, "
76 		   "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
79 
80 static DEFINE_MUTEX(aac_mutex);
81 static LIST_HEAD(aac_devices);
82 static int aac_cfg_major = -1;
83 char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 
85 /*
86  * Because of the way Linux names scsi devices, the order in this table has
87  * become important.  Check for on-board Raid first, add-in cards second.
88  *
89  * Note: The last field is used to index into aac_drivers below.
90  */
91 #ifdef DECLARE_PCI_DEVICE_TABLE
92 static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
93 #elif defined(__devinitconst)
94 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
95 #else
96 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
97 #endif
98 	{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
99 	{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
100 	{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
101 	{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
102 	{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
103 	{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
104 	{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
105 	{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
106 	{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
107 	{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
108 	{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
109 	{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
110 	{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
111 	{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
112 	{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
113 	{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
114 
115 	{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
116 	{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
117 	{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
118 	{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
119 	{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
120 	{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
121 	{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
122 	{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
123 	{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
124 	{ 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
125 	{ 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
126 	{ 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
127 	{ 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
128 	{ 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
129 	{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
130 	{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
131 	{ 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
132 	{ 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
133 	{ 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
134 	{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
135 	{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
136 	{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
137 	{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
138 	{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
139 	{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
140 	{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
141 	{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
142 	{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
143 	{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
144 	{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
145 	{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
146 	{ 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
147 	{ 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
148 	{ 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
149 	{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
150 	{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
151 	{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
152 	{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
153 
154 	{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
155 	{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
156 	{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
157 	{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
158 	{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
159 
160 	{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
161 	{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
162 	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
163 	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
164 	{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
165 	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
166 	{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
167 	{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
168 	{ 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
169 	{ 0,}
170 };
171 MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
172 
173 /*
174  * dmb - For now we add the number of channels to this structure.
175  * In the future we should add a fib that reports the number of channels
176  * for the card.  At that time we can remove the channels from here
177  */
178 static struct aac_driver_ident aac_drivers[] = {
179 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
180 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
181 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
182 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
183 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
184 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
185 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
186 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
187 	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
188 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
189 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
190 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2120S (Crusader) */
191 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2200S (Vulcan) */
192 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
193 	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
194 	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
195 
196 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 }, /* Adaptec 3230S (Harrier) */
197 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 }, /* Adaptec 3240S (Tornado) */
198 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020ZCR     ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
199 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025ZCR     ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
200 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
201 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
202 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2820SA      ", 1 }, /* AAR-2820SA (Intruder) */
203 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2620SA      ", 1 }, /* AAR-2620SA (Intruder) */
204 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2420SA      ", 1 }, /* AAR-2420SA (Intruder) */
205 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9024RO       ", 2 }, /* ICP9024RO (Lancer) */
206 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9014RO       ", 1 }, /* ICP9014RO (Lancer) */
207 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9047MA       ", 1 }, /* ICP9047MA (Lancer) */
208 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9087MA       ", 1 }, /* ICP9087MA (Lancer) */
209 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP5445AU       ", 1 }, /* ICP5445AU (Hurricane44) */
210 	{ aac_rx_init, "aacraid",  "ICP     ", "ICP9085LI       ", 1 }, /* ICP9085LI (Marauder-X) */
211 	{ aac_rx_init, "aacraid",  "ICP     ", "ICP5085BR       ", 1 }, /* ICP5085BR (Marauder-E) */
212 	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9067MA       ", 1 }, /* ICP9067MA (Intruder-6) */
213 	{ NULL        , "aacraid",  "ADAPTEC ", "Themisto        ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
214 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "Callisto        ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
215 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020SA       ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
216 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025SA       ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
217 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
218 	{ aac_rx_init, "aacraid",  "DELL    ", "CERC SR2        ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
219 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
220 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
221 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2026ZCR     ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
222 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2610SA      ", 1 }, /* SATA 6Ch (Bearcat) */
223 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2240S       ", 1 }, /* ASR-2240S (SabreExpress) */
224 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4005        ", 1 }, /* ASR-4005 */
225 	{ aac_rx_init, "ServeRAID","IBM     ", "ServeRAID 8i    ", 1 }, /* IBM 8i (AvonPark) */
226 	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
227 	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
228 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4000        ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
229 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4800SAS     ", 1 }, /* ASR-4800SAS (Marauder-X) */
230 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4805SAS     ", 1 }, /* ASR-4805SAS (Marauder-E) */
231 	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-3800        ", 1 }, /* ASR-3800 (Hurricane44) */
232 
233 	{ aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
234 	{ aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
235 	{ aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
236 	{ aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
237 	{ aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
238 
239 	{ aac_rx_init, "aacraid",  "DELL    ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
240 	{ aac_rx_init, "aacraid",  "Legend  ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
241 	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
242 	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
243 	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
244 	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
245 	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 7 (Denali) */
246 	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 8 */
247 	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Series 9 */
248 };
249 
250 /**
251  *	aac_queuecommand	-	queue a SCSI command
252  *	@cmd:		SCSI command to queue
253  *	@done:		Function to call on command completion
254  *
255  *	Queues a command for execution by the associated Host Adapter.
256  *
257  *	TODO: unify with aac_scsi_cmd().
258  */
259 
260 static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
261 {
262 	struct Scsi_Host *host = cmd->device->host;
263 	struct aac_dev *dev = (struct aac_dev *)host->hostdata;
264 	u32 count = 0;
265 	cmd->scsi_done = done;
266 	for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
267 		struct fib * fib = &dev->fibs[count];
268 		struct scsi_cmnd * command;
269 		if (fib->hw_fib_va->header.XferState &&
270 		    ((command = fib->callback_data)) &&
271 		    (command == cmd) &&
272 		    (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
273 			return 0; /* Already owned by Adapter */
274 	}
275 	cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
276 	return (aac_scsi_cmd(cmd) ? FAILED : 0);
277 }
278 
279 static DEF_SCSI_QCMD(aac_queuecommand)
280 
281 /**
282  *	aac_info		-	Returns the host adapter name
283  *	@shost:		Scsi host to report on
284  *
285  *	Returns a static string describing the device in question
286  */
287 
288 static const char *aac_info(struct Scsi_Host *shost)
289 {
290 	struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
291 	return aac_drivers[dev->cardtype].name;
292 }
293 
294 /**
295  *	aac_get_driver_ident
296  *	@devtype: index into lookup table
297  *
298  *	Returns a pointer to the entry in the driver lookup table.
299  */
300 
301 struct aac_driver_ident* aac_get_driver_ident(int devtype)
302 {
303 	return &aac_drivers[devtype];
304 }
305 
306 /**
307  *	aac_biosparm	-	return BIOS parameters for disk
308  *	@sdev: The scsi device corresponding to the disk
309  *	@bdev: the block device corresponding to the disk
310  *	@capacity: the sector capacity of the disk
311  *	@geom: geometry block to fill in
312  *
313  *	Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
314  *	The default disk geometry is 64 heads, 32 sectors, and the appropriate
315  *	number of cylinders so as not to exceed drive capacity.  In order for
316  *	disks equal to or larger than 1 GB to be addressable by the BIOS
317  *	without exceeding the BIOS limitation of 1024 cylinders, Extended
318  *	Translation should be enabled.   With Extended Translation enabled,
319  *	drives between 1 GB inclusive and 2 GB exclusive are given a disk
320  *	geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
321  *	are given a disk geometry of 255 heads and 63 sectors.  However, if
322  *	the BIOS detects that the Extended Translation setting does not match
323  *	the geometry in the partition table, then the translation inferred
324  *	from the partition table will be used by the BIOS, and a warning may
325  *	be displayed.
326  */
327 
328 static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
329 			sector_t capacity, int *geom)
330 {
331 	struct diskparm *param = (struct diskparm *)geom;
332 	unsigned char *buf;
333 
334 	dprintk((KERN_DEBUG "aac_biosparm.\n"));
335 
336 	/*
337 	 *	Assuming extended translation is enabled - #REVISIT#
338 	 */
339 	if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
340 		if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
341 			param->heads = 255;
342 			param->sectors = 63;
343 		} else {
344 			param->heads = 128;
345 			param->sectors = 32;
346 		}
347 	} else {
348 		param->heads = 64;
349 		param->sectors = 32;
350 	}
351 
352 	param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
353 
354 	/*
355 	 *	Read the first 1024 bytes from the disk device, if the boot
356 	 *	sector partition table is valid, search for a partition table
357 	 *	entry whose end_head matches one of the standard geometry
358 	 *	translations ( 64/32, 128/32, 255/63 ).
359 	 */
360 	buf = scsi_bios_ptable(bdev);
361 	if (!buf)
362 		return 0;
363 	if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
364 		struct partition *first = (struct partition * )buf;
365 		struct partition *entry = first;
366 		int saved_cylinders = param->cylinders;
367 		int num;
368 		unsigned char end_head, end_sec;
369 
370 		for(num = 0; num < 4; num++) {
371 			end_head = entry->end_head;
372 			end_sec = entry->end_sector & 0x3f;
373 
374 			if(end_head == 63) {
375 				param->heads = 64;
376 				param->sectors = 32;
377 				break;
378 			} else if(end_head == 127) {
379 				param->heads = 128;
380 				param->sectors = 32;
381 				break;
382 			} else if(end_head == 254) {
383 				param->heads = 255;
384 				param->sectors = 63;
385 				break;
386 			}
387 			entry++;
388 		}
389 
390 		if (num == 4) {
391 			end_head = first->end_head;
392 			end_sec = first->end_sector & 0x3f;
393 		}
394 
395 		param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
396 		if (num < 4 && end_sec == param->sectors) {
397 			if (param->cylinders != saved_cylinders)
398 				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
399 					param->heads, param->sectors, num));
400 		} else if (end_head > 0 || end_sec > 0) {
401 			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
402 				end_head + 1, end_sec, num));
403 			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
404 					param->heads, param->sectors));
405 		}
406 	}
407 	kfree(buf);
408 	return 0;
409 }
410 
411 /**
412  *	aac_slave_configure		-	compute queue depths
413  *	@sdev:	SCSI device we are considering
414  *
415  *	Selects queue depths for each target device based on the host adapter's
416  *	total capacity and the queue depth supported by the target device.
417  *	A queue depth of one automatically disables tagged queueing.
418  */
419 
420 static int aac_slave_configure(struct scsi_device *sdev)
421 {
422 	struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
423 	if (aac->jbod && (sdev->type == TYPE_DISK))
424 		sdev->removable = 1;
425 	if ((sdev->type == TYPE_DISK) &&
426 			(sdev_channel(sdev) != CONTAINER_CHANNEL) &&
427 			(!aac->jbod || sdev->inq_periph_qual) &&
428 			(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
429 		if (expose_physicals == 0)
430 			return -ENXIO;
431 		if (expose_physicals < 0)
432 			sdev->no_uld_attach = 1;
433 	}
434 	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
435 			(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
436 			!sdev->no_uld_attach) {
437 		struct scsi_device * dev;
438 		struct Scsi_Host *host = sdev->host;
439 		unsigned num_lsu = 0;
440 		unsigned num_one = 0;
441 		unsigned depth;
442 		unsigned cid;
443 
444 		/*
445 		 * Firmware has an individual device recovery time typically
446 		 * of 35 seconds, give us a margin.
447 		 */
448 		if (sdev->request_queue->rq_timeout < (45 * HZ))
449 			blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
450 		for (cid = 0; cid < aac->maximum_num_containers; ++cid)
451 			if (aac->fsa_dev[cid].valid)
452 				++num_lsu;
453 		__shost_for_each_device(dev, host) {
454 			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
455 					(!aac->raid_scsi_mode ||
456 						(sdev_channel(sdev) != 2)) &&
457 					!dev->no_uld_attach) {
458 				if ((sdev_channel(dev) != CONTAINER_CHANNEL)
459 				 || !aac->fsa_dev[sdev_id(dev)].valid)
460 					++num_lsu;
461 			} else
462 				++num_one;
463 		}
464 		if (num_lsu == 0)
465 			++num_lsu;
466 		depth = (host->can_queue - num_one) / num_lsu;
467 		if (depth > 256)
468 			depth = 256;
469 		else if (depth < 2)
470 			depth = 2;
471 		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
472 	} else
473 		scsi_adjust_queue_depth(sdev, 0, 1);
474 
475 	return 0;
476 }
477 
478 /**
479  *	aac_change_queue_depth		-	alter queue depths
480  *	@sdev:	SCSI device we are considering
481  *	@depth:	desired queue depth
482  *
483  *	Alters queue depths for target device based on the host adapter's
484  *	total capacity and the queue depth supported by the target device.
485  */
486 
487 static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
488 				  int reason)
489 {
490 	if (reason != SCSI_QDEPTH_DEFAULT)
491 		return -EOPNOTSUPP;
492 
493 	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
494 	    (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
495 		struct scsi_device * dev;
496 		struct Scsi_Host *host = sdev->host;
497 		unsigned num = 0;
498 
499 		__shost_for_each_device(dev, host) {
500 			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
501 			    (sdev_channel(dev) == CONTAINER_CHANNEL))
502 				++num;
503 			++num;
504 		}
505 		if (num >= host->can_queue)
506 			num = host->can_queue - 1;
507 		if (depth > (host->can_queue - num))
508 			depth = host->can_queue - num;
509 		if (depth > 256)
510 			depth = 256;
511 		else if (depth < 2)
512 			depth = 2;
513 		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
514 	} else
515 		scsi_adjust_queue_depth(sdev, 0, 1);
516 	return sdev->queue_depth;
517 }
518 
519 static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
520 {
521 	struct scsi_device *sdev = to_scsi_device(dev);
522 	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
523 	if (sdev_channel(sdev) != CONTAINER_CHANNEL)
524 		return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
525 		  ? "Hidden\n" :
526 		  ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
527 	return snprintf(buf, PAGE_SIZE, "%s\n",
528 	  get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
529 }
530 
531 static struct device_attribute aac_raid_level_attr = {
532 	.attr = {
533 		.name = "level",
534 		.mode = S_IRUGO,
535 	},
536 	.show = aac_show_raid_level
537 };
538 
539 static struct device_attribute *aac_dev_attrs[] = {
540 	&aac_raid_level_attr,
541 	NULL,
542 };
543 
544 static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
545 {
546 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
547 	if (!capable(CAP_SYS_RAWIO))
548 		return -EPERM;
549 	return aac_do_ioctl(dev, cmd, arg);
550 }
551 
552 static int aac_eh_abort(struct scsi_cmnd* cmd)
553 {
554 	struct scsi_device * dev = cmd->device;
555 	struct Scsi_Host * host = dev->host;
556 	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
557 	int count;
558 	int ret = FAILED;
559 
560 	printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
561 		AAC_DRIVERNAME,
562 		host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
563 	switch (cmd->cmnd[0]) {
564 	case SERVICE_ACTION_IN:
565 		if (!(aac->raw_io_interface) ||
566 		    !(aac->raw_io_64) ||
567 		    ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
568 			break;
569 	case INQUIRY:
570 	case READ_CAPACITY:
571 		/* Mark associated FIB to not complete, eh handler does this */
572 		for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
573 			struct fib * fib = &aac->fibs[count];
574 			if (fib->hw_fib_va->header.XferState &&
575 			  (fib->flags & FIB_CONTEXT_FLAG) &&
576 			  (fib->callback_data == cmd)) {
577 				fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
578 				cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
579 				ret = SUCCESS;
580 			}
581 		}
582 		break;
583 	case TEST_UNIT_READY:
584 		/* Mark associated FIB to not complete, eh handler does this */
585 		for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
586 			struct scsi_cmnd * command;
587 			struct fib * fib = &aac->fibs[count];
588 			if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
589 			  (fib->flags & FIB_CONTEXT_FLAG) &&
590 			  ((command = fib->callback_data)) &&
591 			  (command->device == cmd->device)) {
592 				fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
593 				command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
594 				if (command == cmd)
595 					ret = SUCCESS;
596 			}
597 		}
598 	}
599 	return ret;
600 }
601 
602 /*
603  *	aac_eh_reset	- Reset command handling
604  *	@scsi_cmd:	SCSI command block causing the reset
605  *
606  */
607 static int aac_eh_reset(struct scsi_cmnd* cmd)
608 {
609 	struct scsi_device * dev = cmd->device;
610 	struct Scsi_Host * host = dev->host;
611 	struct scsi_cmnd * command;
612 	int count;
613 	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
614 	unsigned long flags;
615 
616 	/* Mark the associated FIB to not complete, eh handler does this */
617 	for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
618 		struct fib * fib = &aac->fibs[count];
619 		if (fib->hw_fib_va->header.XferState &&
620 		  (fib->flags & FIB_CONTEXT_FLAG) &&
621 		  (fib->callback_data == cmd)) {
622 			fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
623 			cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
624 		}
625 	}
626 	printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
627 					AAC_DRIVERNAME);
628 
629 	if ((count = aac_check_health(aac)))
630 		return count;
631 	/*
632 	 * Wait for all commands to complete to this specific
633 	 * target (block maximum 60 seconds).
634 	 */
635 	for (count = 60; count; --count) {
636 		int active = aac->in_reset;
637 
638 		if (active == 0)
639 		__shost_for_each_device(dev, host) {
640 			spin_lock_irqsave(&dev->list_lock, flags);
641 			list_for_each_entry(command, &dev->cmd_list, list) {
642 				if ((command != cmd) &&
643 				    (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
644 					active++;
645 					break;
646 				}
647 			}
648 			spin_unlock_irqrestore(&dev->list_lock, flags);
649 			if (active)
650 				break;
651 
652 		}
653 		/*
654 		 * We can exit If all the commands are complete
655 		 */
656 		if (active == 0)
657 			return SUCCESS;
658 		ssleep(1);
659 	}
660 	printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
661 	/*
662 	 * This adapter needs a blind reset, only do so for Adapters that
663 	 * support a register, instead of a commanded, reset.
664 	 */
665 	if (((aac->supplement_adapter_info.SupportedOptions2 &
666 	  AAC_OPTION_MU_RESET) ||
667 	  (aac->supplement_adapter_info.SupportedOptions2 &
668 	  AAC_OPTION_DOORBELL_RESET)) &&
669 	  aac_check_reset &&
670 	  ((aac_check_reset != 1) ||
671 	   !(aac->supplement_adapter_info.SupportedOptions2 &
672 	    AAC_OPTION_IGNORE_RESET)))
673 		aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
674 	return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
675 }
676 
677 /**
678  *	aac_cfg_open		-	open a configuration file
679  *	@inode: inode being opened
680  *	@file: file handle attached
681  *
682  *	Called when the configuration device is opened. Does the needed
683  *	set up on the handle and then returns
684  *
685  *	Bugs: This needs extending to check a given adapter is present
686  *	so we can support hot plugging, and to ref count adapters.
687  */
688 
689 static int aac_cfg_open(struct inode *inode, struct file *file)
690 {
691 	struct aac_dev *aac;
692 	unsigned minor_number = iminor(inode);
693 	int err = -ENODEV;
694 
695 	mutex_lock(&aac_mutex);  /* BKL pushdown: nothing else protects this list */
696 	list_for_each_entry(aac, &aac_devices, entry) {
697 		if (aac->id == minor_number) {
698 			file->private_data = aac;
699 			err = 0;
700 			break;
701 		}
702 	}
703 	mutex_unlock(&aac_mutex);
704 
705 	return err;
706 }
707 
708 /**
709  *	aac_cfg_ioctl		-	AAC configuration request
710  *	@inode: inode of device
711  *	@file: file handle
712  *	@cmd: ioctl command code
713  *	@arg: argument
714  *
715  *	Handles a configuration ioctl. Currently this involves wrapping it
716  *	up and feeding it into the nasty windowsalike glue layer.
717  *
718  *	Bugs: Needs locking against parallel ioctls lower down
719  *	Bugs: Needs to handle hot plugging
720  */
721 
722 static long aac_cfg_ioctl(struct file *file,
723 		unsigned int cmd, unsigned long arg)
724 {
725 	int ret;
726 	if (!capable(CAP_SYS_RAWIO))
727 		return -EPERM;
728 	mutex_lock(&aac_mutex);
729 	ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
730 	mutex_unlock(&aac_mutex);
731 
732 	return ret;
733 }
734 
735 #ifdef CONFIG_COMPAT
736 static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
737 {
738 	long ret;
739 	mutex_lock(&aac_mutex);
740 	switch (cmd) {
741 	case FSACTL_MINIPORT_REV_CHECK:
742 	case FSACTL_SENDFIB:
743 	case FSACTL_OPEN_GET_ADAPTER_FIB:
744 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
745 	case FSACTL_SEND_RAW_SRB:
746 	case FSACTL_GET_PCI_INFO:
747 	case FSACTL_QUERY_DISK:
748 	case FSACTL_DELETE_DISK:
749 	case FSACTL_FORCE_DELETE_DISK:
750 	case FSACTL_GET_CONTAINERS:
751 	case FSACTL_SEND_LARGE_FIB:
752 		ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
753 		break;
754 
755 	case FSACTL_GET_NEXT_ADAPTER_FIB: {
756 		struct fib_ioctl __user *f;
757 
758 		f = compat_alloc_user_space(sizeof(*f));
759 		ret = 0;
760 		if (clear_user(f, sizeof(*f)))
761 			ret = -EFAULT;
762 		if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
763 			ret = -EFAULT;
764 		if (!ret)
765 			ret = aac_do_ioctl(dev, cmd, f);
766 		break;
767 	}
768 
769 	default:
770 		ret = -ENOIOCTLCMD;
771 		break;
772 	}
773 	mutex_unlock(&aac_mutex);
774 	return ret;
775 }
776 
777 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
778 {
779 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
780 	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
781 }
782 
783 static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
784 {
785 	if (!capable(CAP_SYS_RAWIO))
786 		return -EPERM;
787 	return aac_compat_do_ioctl(file->private_data, cmd, arg);
788 }
789 #endif
790 
791 static ssize_t aac_show_model(struct device *device,
792 			      struct device_attribute *attr, char *buf)
793 {
794 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
795 	int len;
796 
797 	if (dev->supplement_adapter_info.AdapterTypeText[0]) {
798 		char * cp = dev->supplement_adapter_info.AdapterTypeText;
799 		while (*cp && *cp != ' ')
800 			++cp;
801 		while (*cp == ' ')
802 			++cp;
803 		len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
804 	} else
805 		len = snprintf(buf, PAGE_SIZE, "%s\n",
806 		  aac_drivers[dev->cardtype].model);
807 	return len;
808 }
809 
810 static ssize_t aac_show_vendor(struct device *device,
811 			       struct device_attribute *attr, char *buf)
812 {
813 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
814 	int len;
815 
816 	if (dev->supplement_adapter_info.AdapterTypeText[0]) {
817 		char * cp = dev->supplement_adapter_info.AdapterTypeText;
818 		while (*cp && *cp != ' ')
819 			++cp;
820 		len = snprintf(buf, PAGE_SIZE, "%.*s\n",
821 		  (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
822 		  dev->supplement_adapter_info.AdapterTypeText);
823 	} else
824 		len = snprintf(buf, PAGE_SIZE, "%s\n",
825 		  aac_drivers[dev->cardtype].vname);
826 	return len;
827 }
828 
829 static ssize_t aac_show_flags(struct device *cdev,
830 			      struct device_attribute *attr, char *buf)
831 {
832 	int len = 0;
833 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
834 
835 	if (nblank(dprintk(x)))
836 		len = snprintf(buf, PAGE_SIZE, "dprintk\n");
837 #ifdef AAC_DETAILED_STATUS_INFO
838 	len += snprintf(buf + len, PAGE_SIZE - len,
839 			"AAC_DETAILED_STATUS_INFO\n");
840 #endif
841 	if (dev->raw_io_interface && dev->raw_io_64)
842 		len += snprintf(buf + len, PAGE_SIZE - len,
843 				"SAI_READ_CAPACITY_16\n");
844 	if (dev->jbod)
845 		len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
846 	if (dev->supplement_adapter_info.SupportedOptions2 &
847 		AAC_OPTION_POWER_MANAGEMENT)
848 		len += snprintf(buf + len, PAGE_SIZE - len,
849 				"SUPPORTED_POWER_MANAGEMENT\n");
850 	if (dev->msi)
851 		len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
852 	return len;
853 }
854 
855 static ssize_t aac_show_kernel_version(struct device *device,
856 				       struct device_attribute *attr,
857 				       char *buf)
858 {
859 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
860 	int len, tmp;
861 
862 	tmp = le32_to_cpu(dev->adapter_info.kernelrev);
863 	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
864 	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
865 	  le32_to_cpu(dev->adapter_info.kernelbuild));
866 	return len;
867 }
868 
869 static ssize_t aac_show_monitor_version(struct device *device,
870 					struct device_attribute *attr,
871 					char *buf)
872 {
873 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
874 	int len, tmp;
875 
876 	tmp = le32_to_cpu(dev->adapter_info.monitorrev);
877 	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
878 	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
879 	  le32_to_cpu(dev->adapter_info.monitorbuild));
880 	return len;
881 }
882 
883 static ssize_t aac_show_bios_version(struct device *device,
884 				     struct device_attribute *attr,
885 				     char *buf)
886 {
887 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
888 	int len, tmp;
889 
890 	tmp = le32_to_cpu(dev->adapter_info.biosrev);
891 	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
892 	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
893 	  le32_to_cpu(dev->adapter_info.biosbuild));
894 	return len;
895 }
896 
897 static ssize_t aac_show_serial_number(struct device *device,
898 			       struct device_attribute *attr, char *buf)
899 {
900 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
901 	int len = 0;
902 
903 	if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
904 		len = snprintf(buf, 16, "%06X\n",
905 		  le32_to_cpu(dev->adapter_info.serial[0]));
906 	if (len &&
907 	  !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
908 	    sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
909 	  buf, len-1))
910 		len = snprintf(buf, 16, "%.*s\n",
911 		  (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
912 		  dev->supplement_adapter_info.MfgPcbaSerialNo);
913 
914 	return min(len, 16);
915 }
916 
917 static ssize_t aac_show_max_channel(struct device *device,
918 				    struct device_attribute *attr, char *buf)
919 {
920 	return snprintf(buf, PAGE_SIZE, "%d\n",
921 	  class_to_shost(device)->max_channel);
922 }
923 
924 static ssize_t aac_show_max_id(struct device *device,
925 			       struct device_attribute *attr, char *buf)
926 {
927 	return snprintf(buf, PAGE_SIZE, "%d\n",
928 	  class_to_shost(device)->max_id);
929 }
930 
931 static ssize_t aac_store_reset_adapter(struct device *device,
932 				       struct device_attribute *attr,
933 				       const char *buf, size_t count)
934 {
935 	int retval = -EACCES;
936 
937 	if (!capable(CAP_SYS_ADMIN))
938 		return retval;
939 	retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
940 	if (retval >= 0)
941 		retval = count;
942 	return retval;
943 }
944 
945 static ssize_t aac_show_reset_adapter(struct device *device,
946 				      struct device_attribute *attr,
947 				      char *buf)
948 {
949 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
950 	int len, tmp;
951 
952 	tmp = aac_adapter_check_health(dev);
953 	if ((tmp == 0) && dev->in_reset)
954 		tmp = -EBUSY;
955 	len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
956 	return len;
957 }
958 
959 static struct device_attribute aac_model = {
960 	.attr = {
961 		.name = "model",
962 		.mode = S_IRUGO,
963 	},
964 	.show = aac_show_model,
965 };
966 static struct device_attribute aac_vendor = {
967 	.attr = {
968 		.name = "vendor",
969 		.mode = S_IRUGO,
970 	},
971 	.show = aac_show_vendor,
972 };
973 static struct device_attribute aac_flags = {
974 	.attr = {
975 		.name = "flags",
976 		.mode = S_IRUGO,
977 	},
978 	.show = aac_show_flags,
979 };
980 static struct device_attribute aac_kernel_version = {
981 	.attr = {
982 		.name = "hba_kernel_version",
983 		.mode = S_IRUGO,
984 	},
985 	.show = aac_show_kernel_version,
986 };
987 static struct device_attribute aac_monitor_version = {
988 	.attr = {
989 		.name = "hba_monitor_version",
990 		.mode = S_IRUGO,
991 	},
992 	.show = aac_show_monitor_version,
993 };
994 static struct device_attribute aac_bios_version = {
995 	.attr = {
996 		.name = "hba_bios_version",
997 		.mode = S_IRUGO,
998 	},
999 	.show = aac_show_bios_version,
1000 };
1001 static struct device_attribute aac_serial_number = {
1002 	.attr = {
1003 		.name = "serial_number",
1004 		.mode = S_IRUGO,
1005 	},
1006 	.show = aac_show_serial_number,
1007 };
1008 static struct device_attribute aac_max_channel = {
1009 	.attr = {
1010 		.name = "max_channel",
1011 		.mode = S_IRUGO,
1012 	},
1013 	.show = aac_show_max_channel,
1014 };
1015 static struct device_attribute aac_max_id = {
1016 	.attr = {
1017 		.name = "max_id",
1018 		.mode = S_IRUGO,
1019 	},
1020 	.show = aac_show_max_id,
1021 };
1022 static struct device_attribute aac_reset = {
1023 	.attr = {
1024 		.name = "reset_host",
1025 		.mode = S_IWUSR|S_IRUGO,
1026 	},
1027 	.store = aac_store_reset_adapter,
1028 	.show = aac_show_reset_adapter,
1029 };
1030 
1031 static struct device_attribute *aac_attrs[] = {
1032 	&aac_model,
1033 	&aac_vendor,
1034 	&aac_flags,
1035 	&aac_kernel_version,
1036 	&aac_monitor_version,
1037 	&aac_bios_version,
1038 	&aac_serial_number,
1039 	&aac_max_channel,
1040 	&aac_max_id,
1041 	&aac_reset,
1042 	NULL
1043 };
1044 
1045 ssize_t aac_get_serial_number(struct device *device, char *buf)
1046 {
1047 	return aac_show_serial_number(device, &aac_serial_number, buf);
1048 }
1049 
1050 static const struct file_operations aac_cfg_fops = {
1051 	.owner		= THIS_MODULE,
1052 	.unlocked_ioctl	= aac_cfg_ioctl,
1053 #ifdef CONFIG_COMPAT
1054 	.compat_ioctl   = aac_compat_cfg_ioctl,
1055 #endif
1056 	.open		= aac_cfg_open,
1057 	.llseek		= noop_llseek,
1058 };
1059 
1060 static struct scsi_host_template aac_driver_template = {
1061 	.module				= THIS_MODULE,
1062 	.name				= "AAC",
1063 	.proc_name			= AAC_DRIVERNAME,
1064 	.info				= aac_info,
1065 	.ioctl				= aac_ioctl,
1066 #ifdef CONFIG_COMPAT
1067 	.compat_ioctl			= aac_compat_ioctl,
1068 #endif
1069 	.queuecommand			= aac_queuecommand,
1070 	.bios_param			= aac_biosparm,
1071 	.shost_attrs			= aac_attrs,
1072 	.slave_configure		= aac_slave_configure,
1073 	.change_queue_depth		= aac_change_queue_depth,
1074 	.sdev_attrs			= aac_dev_attrs,
1075 	.eh_abort_handler		= aac_eh_abort,
1076 	.eh_host_reset_handler		= aac_eh_reset,
1077 	.can_queue			= AAC_NUM_IO_FIB,
1078 	.this_id			= MAXIMUM_NUM_CONTAINERS,
1079 	.sg_tablesize			= 16,
1080 	.max_sectors			= 128,
1081 #if (AAC_NUM_IO_FIB > 256)
1082 	.cmd_per_lun			= 256,
1083 #else
1084 	.cmd_per_lun			= AAC_NUM_IO_FIB,
1085 #endif
1086 	.use_clustering			= ENABLE_CLUSTERING,
1087 	.emulated			= 1,
1088 };
1089 
1090 static void __aac_shutdown(struct aac_dev * aac)
1091 {
1092 	if (aac->aif_thread) {
1093 		int i;
1094 		/* Clear out events first */
1095 		for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
1096 			struct fib *fib = &aac->fibs[i];
1097 			if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1098 			    (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
1099 				up(&fib->event_wait);
1100 		}
1101 		kthread_stop(aac->thread);
1102 	}
1103 	aac_send_shutdown(aac);
1104 	aac_adapter_disable_int(aac);
1105 	free_irq(aac->pdev->irq, aac);
1106 	if (aac->msi)
1107 		pci_disable_msi(aac->pdev);
1108 }
1109 
1110 static int __devinit aac_probe_one(struct pci_dev *pdev,
1111 		const struct pci_device_id *id)
1112 {
1113 	unsigned index = id->driver_data;
1114 	struct Scsi_Host *shost;
1115 	struct aac_dev *aac;
1116 	struct list_head *insert = &aac_devices;
1117 	int error = -ENODEV;
1118 	int unique_id = 0;
1119 	u64 dmamask;
1120 	extern int aac_sync_mode;
1121 
1122 	list_for_each_entry(aac, &aac_devices, entry) {
1123 		if (aac->id > unique_id)
1124 			break;
1125 		insert = &aac->entry;
1126 		unique_id++;
1127 	}
1128 
1129 	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1130 			       PCIE_LINK_STATE_CLKPM);
1131 
1132 	error = pci_enable_device(pdev);
1133 	if (error)
1134 		goto out;
1135 	error = -ENODEV;
1136 
1137 	/*
1138 	 * If the quirk31 bit is set, the adapter needs adapter
1139 	 * to driver communication memory to be allocated below 2gig
1140 	 */
1141 	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
1142 		dmamask = DMA_BIT_MASK(31);
1143 	else
1144 		dmamask = DMA_BIT_MASK(32);
1145 
1146 	if (pci_set_dma_mask(pdev, dmamask) ||
1147 			pci_set_consistent_dma_mask(pdev, dmamask))
1148 		goto out_disable_pdev;
1149 
1150 	pci_set_master(pdev);
1151 
1152 	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
1153 	if (!shost)
1154 		goto out_disable_pdev;
1155 
1156 	shost->irq = pdev->irq;
1157 	shost->unique_id = unique_id;
1158 	shost->max_cmd_len = 16;
1159 
1160 	aac = (struct aac_dev *)shost->hostdata;
1161 	aac->base_start = pci_resource_start(pdev, 0);
1162 	aac->scsi_host_ptr = shost;
1163 	aac->pdev = pdev;
1164 	aac->name = aac_driver_template.name;
1165 	aac->id = shost->unique_id;
1166 	aac->cardtype = index;
1167 	INIT_LIST_HEAD(&aac->entry);
1168 
1169 	aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
1170 	if (!aac->fibs)
1171 		goto out_free_host;
1172 	spin_lock_init(&aac->fib_lock);
1173 
1174 	/*
1175 	 *	Map in the registers from the adapter.
1176 	 */
1177 	aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
1178 	if ((*aac_drivers[index].init)(aac))
1179 		goto out_unmap;
1180 
1181 	if (aac->sync_mode) {
1182 		if (aac_sync_mode)
1183 			printk(KERN_INFO "%s%d: Sync. mode enforced "
1184 				"by driver parameter. This will cause "
1185 				"a significant performance decrease!\n",
1186 				aac->name,
1187 				aac->id);
1188 		else
1189 			printk(KERN_INFO "%s%d: Async. mode not supported "
1190 				"by current driver, sync. mode enforced."
1191 				"\nPlease update driver to get full performance.\n",
1192 				aac->name,
1193 				aac->id);
1194 	}
1195 
1196 	/*
1197 	 *	Start any kernel threads needed
1198 	 */
1199 	aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
1200 	if (IS_ERR(aac->thread)) {
1201 		printk(KERN_ERR "aacraid: Unable to create command thread.\n");
1202 		error = PTR_ERR(aac->thread);
1203 		aac->thread = NULL;
1204 		goto out_deinit;
1205 	}
1206 
1207 	/*
1208 	 * If we had set a smaller DMA mask earlier, set it to 4gig
1209 	 * now since the adapter can dma data to at least a 4gig
1210 	 * address space.
1211 	 */
1212 	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
1213 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
1214 			goto out_deinit;
1215 
1216 	aac->maximum_num_channels = aac_drivers[index].channels;
1217 	error = aac_get_adapter_info(aac);
1218 	if (error < 0)
1219 		goto out_deinit;
1220 
1221 	/*
1222 	 * Lets override negotiations and drop the maximum SG limit to 34
1223 	 */
1224 	if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1225 			(shost->sg_tablesize > 34)) {
1226 		shost->sg_tablesize = 34;
1227 		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1228 	}
1229 
1230 	if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
1231 			(shost->sg_tablesize > 17)) {
1232 		shost->sg_tablesize = 17;
1233 		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1234 	}
1235 
1236 	error = pci_set_dma_max_seg_size(pdev,
1237 		(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
1238 			(shost->max_sectors << 9) : 65536);
1239 	if (error)
1240 		goto out_deinit;
1241 
1242 	/*
1243 	 * Firmware printf works only with older firmware.
1244 	 */
1245 	if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
1246 		aac->printf_enabled = 1;
1247 	else
1248 		aac->printf_enabled = 0;
1249 
1250 	/*
1251 	 * max channel will be the physical channels plus 1 virtual channel
1252 	 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
1253 	 * physical channels are address by their actual physical number+1
1254 	 */
1255 	if (aac->nondasd_support || expose_physicals || aac->jbod)
1256 		shost->max_channel = aac->maximum_num_channels;
1257 	else
1258 		shost->max_channel = 0;
1259 
1260 	aac_get_config_status(aac, 0);
1261 	aac_get_containers(aac);
1262 	list_add(&aac->entry, insert);
1263 
1264 	shost->max_id = aac->maximum_num_containers;
1265 	if (shost->max_id < aac->maximum_num_physicals)
1266 		shost->max_id = aac->maximum_num_physicals;
1267 	if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
1268 		shost->max_id = MAXIMUM_NUM_CONTAINERS;
1269 	else
1270 		shost->this_id = shost->max_id;
1271 
1272 	/*
1273 	 * dmb - we may need to move the setting of these parms somewhere else once
1274 	 * we get a fib that can report the actual numbers
1275 	 */
1276 	shost->max_lun = AAC_MAX_LUN;
1277 
1278 	pci_set_drvdata(pdev, shost);
1279 
1280 	error = scsi_add_host(shost, &pdev->dev);
1281 	if (error)
1282 		goto out_deinit;
1283 	scsi_scan_host(shost);
1284 
1285 	return 0;
1286 
1287  out_deinit:
1288 	__aac_shutdown(aac);
1289  out_unmap:
1290 	aac_fib_map_free(aac);
1291 	if (aac->comm_addr)
1292 		pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
1293 		  aac->comm_phys);
1294 	kfree(aac->queues);
1295 	aac_adapter_ioremap(aac, 0);
1296 	kfree(aac->fibs);
1297 	kfree(aac->fsa_dev);
1298  out_free_host:
1299 	scsi_host_put(shost);
1300  out_disable_pdev:
1301 	pci_disable_device(pdev);
1302  out:
1303 	return error;
1304 }
1305 
1306 static void aac_shutdown(struct pci_dev *dev)
1307 {
1308 	struct Scsi_Host *shost = pci_get_drvdata(dev);
1309 	scsi_block_requests(shost);
1310 	__aac_shutdown((struct aac_dev *)shost->hostdata);
1311 }
1312 
1313 static void __devexit aac_remove_one(struct pci_dev *pdev)
1314 {
1315 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
1316 	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
1317 
1318 	scsi_remove_host(shost);
1319 
1320 	__aac_shutdown(aac);
1321 	aac_fib_map_free(aac);
1322 	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
1323 			aac->comm_phys);
1324 	kfree(aac->queues);
1325 
1326 	aac_adapter_ioremap(aac, 0);
1327 
1328 	kfree(aac->fibs);
1329 	kfree(aac->fsa_dev);
1330 
1331 	list_del(&aac->entry);
1332 	scsi_host_put(shost);
1333 	pci_disable_device(pdev);
1334 	if (list_empty(&aac_devices)) {
1335 		unregister_chrdev(aac_cfg_major, "aac");
1336 		aac_cfg_major = -1;
1337 	}
1338 }
1339 
1340 static struct pci_driver aac_pci_driver = {
1341 	.name		= AAC_DRIVERNAME,
1342 	.id_table	= aac_pci_tbl,
1343 	.probe		= aac_probe_one,
1344 	.remove		= __devexit_p(aac_remove_one),
1345 	.shutdown	= aac_shutdown,
1346 };
1347 
1348 static int __init aac_init(void)
1349 {
1350 	int error;
1351 
1352 	printk(KERN_INFO "Adaptec %s driver %s\n",
1353 	  AAC_DRIVERNAME, aac_driver_version);
1354 
1355 	error = pci_register_driver(&aac_pci_driver);
1356 	if (error < 0)
1357 		return error;
1358 
1359 	aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
1360 	if (aac_cfg_major < 0) {
1361 		printk(KERN_WARNING
1362 			"aacraid: unable to register \"aac\" device.\n");
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 static void __exit aac_exit(void)
1369 {
1370 	if (aac_cfg_major > -1)
1371 		unregister_chrdev(aac_cfg_major, "aac");
1372 	pci_unregister_driver(&aac_pci_driver);
1373 }
1374 
1375 module_init(aac_init);
1376 module_exit(aac_exit);
1377