xref: /linux/drivers/scsi/ipr.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <linux/libata.h>
75 #include <linux/hdreg.h>
76 #include <linux/reboot.h>
77 #include <linux/stringify.h>
78 #include <asm/io.h>
79 #include <asm/irq.h>
80 #include <asm/processor.h>
81 #include <scsi/scsi.h>
82 #include <scsi/scsi_host.h>
83 #include <scsi/scsi_tcq.h>
84 #include <scsi/scsi_eh.h>
85 #include <scsi/scsi_cmnd.h>
86 #include "ipr.h"
87 
88 /*
89  *   Global Data
90  */
91 static LIST_HEAD(ipr_ioa_head);
92 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93 static unsigned int ipr_max_speed = 1;
94 static int ipr_testmode = 0;
95 static unsigned int ipr_fastfail = 0;
96 static unsigned int ipr_transop_timeout = 0;
97 static unsigned int ipr_debug = 0;
98 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99 static unsigned int ipr_dual_ioa_raid = 1;
100 static DEFINE_SPINLOCK(ipr_driver_lock);
101 
102 /* This table describes the differences between DMA controller chips */
103 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105 		.mailbox = 0x0042C,
106 		.cache_line_size = 0x20,
107 		{
108 			.set_interrupt_mask_reg = 0x0022C,
109 			.clr_interrupt_mask_reg = 0x00230,
110 			.clr_interrupt_mask_reg32 = 0x00230,
111 			.sense_interrupt_mask_reg = 0x0022C,
112 			.sense_interrupt_mask_reg32 = 0x0022C,
113 			.clr_interrupt_reg = 0x00228,
114 			.clr_interrupt_reg32 = 0x00228,
115 			.sense_interrupt_reg = 0x00224,
116 			.sense_interrupt_reg32 = 0x00224,
117 			.ioarrin_reg = 0x00404,
118 			.sense_uproc_interrupt_reg = 0x00214,
119 			.sense_uproc_interrupt_reg32 = 0x00214,
120 			.set_uproc_interrupt_reg = 0x00214,
121 			.set_uproc_interrupt_reg32 = 0x00214,
122 			.clr_uproc_interrupt_reg = 0x00218,
123 			.clr_uproc_interrupt_reg32 = 0x00218
124 		}
125 	},
126 	{ /* Snipe and Scamp */
127 		.mailbox = 0x0052C,
128 		.cache_line_size = 0x20,
129 		{
130 			.set_interrupt_mask_reg = 0x00288,
131 			.clr_interrupt_mask_reg = 0x0028C,
132 			.clr_interrupt_mask_reg32 = 0x0028C,
133 			.sense_interrupt_mask_reg = 0x00288,
134 			.sense_interrupt_mask_reg32 = 0x00288,
135 			.clr_interrupt_reg = 0x00284,
136 			.clr_interrupt_reg32 = 0x00284,
137 			.sense_interrupt_reg = 0x00280,
138 			.sense_interrupt_reg32 = 0x00280,
139 			.ioarrin_reg = 0x00504,
140 			.sense_uproc_interrupt_reg = 0x00290,
141 			.sense_uproc_interrupt_reg32 = 0x00290,
142 			.set_uproc_interrupt_reg = 0x00290,
143 			.set_uproc_interrupt_reg32 = 0x00290,
144 			.clr_uproc_interrupt_reg = 0x00294,
145 			.clr_uproc_interrupt_reg32 = 0x00294
146 		}
147 	},
148 	{ /* CRoC */
149 		.mailbox = 0x00040,
150 		.cache_line_size = 0x20,
151 		{
152 			.set_interrupt_mask_reg = 0x00010,
153 			.clr_interrupt_mask_reg = 0x00018,
154 			.clr_interrupt_mask_reg32 = 0x0001C,
155 			.sense_interrupt_mask_reg = 0x00010,
156 			.sense_interrupt_mask_reg32 = 0x00014,
157 			.clr_interrupt_reg = 0x00008,
158 			.clr_interrupt_reg32 = 0x0000C,
159 			.sense_interrupt_reg = 0x00000,
160 			.sense_interrupt_reg32 = 0x00004,
161 			.ioarrin_reg = 0x00070,
162 			.sense_uproc_interrupt_reg = 0x00020,
163 			.sense_uproc_interrupt_reg32 = 0x00024,
164 			.set_uproc_interrupt_reg = 0x00020,
165 			.set_uproc_interrupt_reg32 = 0x00024,
166 			.clr_uproc_interrupt_reg = 0x00028,
167 			.clr_uproc_interrupt_reg32 = 0x0002C,
168 			.init_feedback_reg = 0x0005C,
169 			.dump_addr_reg = 0x00064,
170 			.dump_data_reg = 0x00068,
171 			.endian_swap_reg = 0x00084
172 		}
173 	},
174 };
175 
176 static const struct ipr_chip_t ipr_chip[] = {
177 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
183 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
185 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
186 };
187 
188 static int ipr_max_bus_speeds [] = {
189 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
190 };
191 
192 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
193 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
194 module_param_named(max_speed, ipr_max_speed, uint, 0);
195 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
196 module_param_named(log_level, ipr_log_level, uint, 0);
197 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
198 module_param_named(testmode, ipr_testmode, int, 0);
199 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
200 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
201 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
202 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
203 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
204 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
205 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
206 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
207 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
208 module_param_named(max_devs, ipr_max_devs, int, 0);
209 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
210 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
211 MODULE_LICENSE("GPL");
212 MODULE_VERSION(IPR_DRIVER_VERSION);
213 
214 /*  A constant array of IOASCs/URCs/Error Messages */
215 static const
216 struct ipr_error_table_t ipr_error_table[] = {
217 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
218 	"8155: An unknown error was received"},
219 	{0x00330000, 0, 0,
220 	"Soft underlength error"},
221 	{0x005A0000, 0, 0,
222 	"Command to be cancelled not found"},
223 	{0x00808000, 0, 0,
224 	"Qualified success"},
225 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
226 	"FFFE: Soft device bus error recovered by the IOA"},
227 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
228 	"4101: Soft device bus fabric error"},
229 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
230 	"FFFC: Logical block guard error recovered by the device"},
231 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
232 	"FFFC: Logical block reference tag error recovered by the device"},
233 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
234 	"4171: Recovered scatter list tag / sequence number error"},
235 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
236 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
237 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
238 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
239 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
240 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
241 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
242 	"FFFD: Logical block guard error recovered by the IOA"},
243 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
244 	"FFF9: Device sector reassign successful"},
245 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
246 	"FFF7: Media error recovered by device rewrite procedures"},
247 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
248 	"7001: IOA sector reassignment successful"},
249 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
250 	"FFF9: Soft media error. Sector reassignment recommended"},
251 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
252 	"FFF7: Media error recovered by IOA rewrite procedures"},
253 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
254 	"FF3D: Soft PCI bus error recovered by the IOA"},
255 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
256 	"FFF6: Device hardware error recovered by the IOA"},
257 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
258 	"FFF6: Device hardware error recovered by the device"},
259 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
260 	"FF3D: Soft IOA error recovered by the IOA"},
261 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
262 	"FFFA: Undefined device response recovered by the IOA"},
263 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
264 	"FFF6: Device bus error, message or command phase"},
265 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
266 	"FFFE: Task Management Function failed"},
267 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
268 	"FFF6: Failure prediction threshold exceeded"},
269 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
270 	"8009: Impending cache battery pack failure"},
271 	{0x02040400, 0, 0,
272 	"34FF: Disk device format in progress"},
273 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
274 	"9070: IOA requested reset"},
275 	{0x023F0000, 0, 0,
276 	"Synchronization required"},
277 	{0x024E0000, 0, 0,
278 	"No ready, IOA shutdown"},
279 	{0x025A0000, 0, 0,
280 	"Not ready, IOA has been shutdown"},
281 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
282 	"3020: Storage subsystem configuration error"},
283 	{0x03110B00, 0, 0,
284 	"FFF5: Medium error, data unreadable, recommend reassign"},
285 	{0x03110C00, 0, 0,
286 	"7000: Medium error, data unreadable, do not reassign"},
287 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
288 	"FFF3: Disk media format bad"},
289 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
290 	"3002: Addressed device failed to respond to selection"},
291 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
292 	"3100: Device bus error"},
293 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
294 	"3109: IOA timed out a device command"},
295 	{0x04088000, 0, 0,
296 	"3120: SCSI bus is not operational"},
297 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
298 	"4100: Hard device bus fabric error"},
299 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
300 	"310C: Logical block guard error detected by the device"},
301 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
302 	"310C: Logical block reference tag error detected by the device"},
303 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
304 	"4170: Scatter list tag / sequence number error"},
305 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
306 	"8150: Logical block CRC error on IOA to Host transfer"},
307 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
308 	"4170: Logical block sequence number error on IOA to Host transfer"},
309 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
310 	"310D: Logical block reference tag error detected by the IOA"},
311 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
312 	"310D: Logical block guard error detected by the IOA"},
313 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
314 	"9000: IOA reserved area data check"},
315 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
316 	"9001: IOA reserved area invalid data pattern"},
317 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
318 	"9002: IOA reserved area LRC error"},
319 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
320 	"Hardware Error, IOA metadata access error"},
321 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
322 	"102E: Out of alternate sectors for disk storage"},
323 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
324 	"FFF4: Data transfer underlength error"},
325 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
326 	"FFF4: Data transfer overlength error"},
327 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
328 	"3400: Logical unit failure"},
329 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
330 	"FFF4: Device microcode is corrupt"},
331 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
332 	"8150: PCI bus error"},
333 	{0x04430000, 1, 0,
334 	"Unsupported device bus message received"},
335 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
336 	"FFF4: Disk device problem"},
337 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
338 	"8150: Permanent IOA failure"},
339 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
340 	"3010: Disk device returned wrong response to IOA"},
341 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
342 	"8151: IOA microcode error"},
343 	{0x04448500, 0, 0,
344 	"Device bus status error"},
345 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
346 	"8157: IOA error requiring IOA reset to recover"},
347 	{0x04448700, 0, 0,
348 	"ATA device status error"},
349 	{0x04490000, 0, 0,
350 	"Message reject received from the device"},
351 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
352 	"8008: A permanent cache battery pack failure occurred"},
353 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
354 	"9090: Disk unit has been modified after the last known status"},
355 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
356 	"9081: IOA detected device error"},
357 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
358 	"9082: IOA detected device error"},
359 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
360 	"3110: Device bus error, message or command phase"},
361 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
362 	"3110: SAS Command / Task Management Function failed"},
363 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
364 	"9091: Incorrect hardware configuration change has been detected"},
365 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
366 	"9073: Invalid multi-adapter configuration"},
367 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
368 	"4010: Incorrect connection between cascaded expanders"},
369 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
370 	"4020: Connections exceed IOA design limits"},
371 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
372 	"4030: Incorrect multipath connection"},
373 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
374 	"4110: Unsupported enclosure function"},
375 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
376 	"FFF4: Command to logical unit failed"},
377 	{0x05240000, 1, 0,
378 	"Illegal request, invalid request type or request packet"},
379 	{0x05250000, 0, 0,
380 	"Illegal request, invalid resource handle"},
381 	{0x05258000, 0, 0,
382 	"Illegal request, commands not allowed to this device"},
383 	{0x05258100, 0, 0,
384 	"Illegal request, command not allowed to a secondary adapter"},
385 	{0x05258200, 0, 0,
386 	"Illegal request, command not allowed to a non-optimized resource"},
387 	{0x05260000, 0, 0,
388 	"Illegal request, invalid field in parameter list"},
389 	{0x05260100, 0, 0,
390 	"Illegal request, parameter not supported"},
391 	{0x05260200, 0, 0,
392 	"Illegal request, parameter value invalid"},
393 	{0x052C0000, 0, 0,
394 	"Illegal request, command sequence error"},
395 	{0x052C8000, 1, 0,
396 	"Illegal request, dual adapter support not enabled"},
397 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
398 	"9031: Array protection temporarily suspended, protection resuming"},
399 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
400 	"9040: Array protection temporarily suspended, protection resuming"},
401 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
402 	"3140: Device bus not ready to ready transition"},
403 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
404 	"FFFB: SCSI bus was reset"},
405 	{0x06290500, 0, 0,
406 	"FFFE: SCSI bus transition to single ended"},
407 	{0x06290600, 0, 0,
408 	"FFFE: SCSI bus transition to LVD"},
409 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
410 	"FFFB: SCSI bus was reset by another initiator"},
411 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
412 	"3029: A device replacement has occurred"},
413 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
414 	"9051: IOA cache data exists for a missing or failed device"},
415 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
416 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
417 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
418 	"9025: Disk unit is not supported at its physical location"},
419 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
420 	"3020: IOA detected a SCSI bus configuration error"},
421 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
422 	"3150: SCSI bus configuration error"},
423 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
424 	"9074: Asymmetric advanced function disk configuration"},
425 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
426 	"4040: Incomplete multipath connection between IOA and enclosure"},
427 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
428 	"4041: Incomplete multipath connection between enclosure and device"},
429 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
430 	"9075: Incomplete multipath connection between IOA and remote IOA"},
431 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
432 	"9076: Configuration error, missing remote IOA"},
433 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
434 	"4050: Enclosure does not support a required multipath function"},
435 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
436 	"4070: Logically bad block written on device"},
437 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
438 	"9041: Array protection temporarily suspended"},
439 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
440 	"9042: Corrupt array parity detected on specified device"},
441 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
442 	"9030: Array no longer protected due to missing or failed disk unit"},
443 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
444 	"9071: Link operational transition"},
445 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
446 	"9072: Link not operational transition"},
447 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
448 	"9032: Array exposed but still protected"},
449 	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
450 	"70DD: Device forced failed by disrupt device command"},
451 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
452 	"4061: Multipath redundancy level got better"},
453 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
454 	"4060: Multipath redundancy level got worse"},
455 	{0x07270000, 0, 0,
456 	"Failure due to other device"},
457 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
458 	"9008: IOA does not support functions expected by devices"},
459 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
460 	"9010: Cache data associated with attached devices cannot be found"},
461 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
462 	"9011: Cache data belongs to devices other than those attached"},
463 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
464 	"9020: Array missing 2 or more devices with only 1 device present"},
465 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
466 	"9021: Array missing 2 or more devices with 2 or more devices present"},
467 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
468 	"9022: Exposed array is missing a required device"},
469 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
470 	"9023: Array member(s) not at required physical locations"},
471 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
472 	"9024: Array not functional due to present hardware configuration"},
473 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
474 	"9026: Array not functional due to present hardware configuration"},
475 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
476 	"9027: Array is missing a device and parity is out of sync"},
477 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
478 	"9028: Maximum number of arrays already exist"},
479 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
480 	"9050: Required cache data cannot be located for a disk unit"},
481 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
482 	"9052: Cache data exists for a device that has been modified"},
483 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
484 	"9054: IOA resources not available due to previous problems"},
485 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
486 	"9092: Disk unit requires initialization before use"},
487 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
488 	"9029: Incorrect hardware configuration change has been detected"},
489 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
490 	"9060: One or more disk pairs are missing from an array"},
491 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
492 	"9061: One or more disks are missing from an array"},
493 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
494 	"9062: One or more disks are missing from an array"},
495 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
496 	"9063: Maximum number of functional arrays has been exceeded"},
497 	{0x0B260000, 0, 0,
498 	"Aborted command, invalid descriptor"},
499 	{0x0B5A0000, 0, 0,
500 	"Command terminated by host"}
501 };
502 
503 static const struct ipr_ses_table_entry ipr_ses_table[] = {
504 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
505 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
506 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
507 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
508 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
509 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
510 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
511 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
512 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
514 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
515 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
516 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
517 };
518 
519 /*
520  *  Function Prototypes
521  */
522 static int ipr_reset_alert(struct ipr_cmnd *);
523 static void ipr_process_ccn(struct ipr_cmnd *);
524 static void ipr_process_error(struct ipr_cmnd *);
525 static void ipr_reset_ioa_job(struct ipr_cmnd *);
526 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
527 				   enum ipr_shutdown_type);
528 
529 #ifdef CONFIG_SCSI_IPR_TRACE
530 /**
531  * ipr_trc_hook - Add a trace entry to the driver trace
532  * @ipr_cmd:	ipr command struct
533  * @type:		trace type
534  * @add_data:	additional data
535  *
536  * Return value:
537  * 	none
538  **/
539 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
540 			 u8 type, u32 add_data)
541 {
542 	struct ipr_trace_entry *trace_entry;
543 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
544 
545 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
546 	trace_entry->time = jiffies;
547 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
548 	trace_entry->type = type;
549 	if (ipr_cmd->ioa_cfg->sis64)
550 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
551 	else
552 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
553 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
554 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
555 	trace_entry->u.add_data = add_data;
556 }
557 #else
558 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
559 #endif
560 
561 /**
562  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
563  * @ipr_cmd:	ipr command struct
564  *
565  * Return value:
566  * 	none
567  **/
568 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
569 {
570 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
571 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
572 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
573 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
574 
575 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
576 	ioarcb->data_transfer_length = 0;
577 	ioarcb->read_data_transfer_length = 0;
578 	ioarcb->ioadl_len = 0;
579 	ioarcb->read_ioadl_len = 0;
580 
581 	if (ipr_cmd->ioa_cfg->sis64) {
582 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
583 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
584 		ioasa64->u.gata.status = 0;
585 	} else {
586 		ioarcb->write_ioadl_addr =
587 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
588 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
589 		ioasa->u.gata.status = 0;
590 	}
591 
592 	ioasa->hdr.ioasc = 0;
593 	ioasa->hdr.residual_data_len = 0;
594 	ipr_cmd->scsi_cmd = NULL;
595 	ipr_cmd->qc = NULL;
596 	ipr_cmd->sense_buffer[0] = 0;
597 	ipr_cmd->dma_use_sg = 0;
598 }
599 
600 /**
601  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
602  * @ipr_cmd:	ipr command struct
603  *
604  * Return value:
605  * 	none
606  **/
607 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
608 {
609 	ipr_reinit_ipr_cmnd(ipr_cmd);
610 	ipr_cmd->u.scratch = 0;
611 	ipr_cmd->sibling = NULL;
612 	init_timer(&ipr_cmd->timer);
613 }
614 
615 /**
616  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
617  * @ioa_cfg:	ioa config struct
618  *
619  * Return value:
620  * 	pointer to ipr command struct
621  **/
622 static
623 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
624 {
625 	struct ipr_cmnd *ipr_cmd;
626 
627 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
628 	list_del(&ipr_cmd->queue);
629 	ipr_init_ipr_cmnd(ipr_cmd);
630 
631 	return ipr_cmd;
632 }
633 
634 /**
635  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
636  * @ioa_cfg:	ioa config struct
637  * @clr_ints:     interrupts to clear
638  *
639  * This function masks all interrupts on the adapter, then clears the
640  * interrupts specified in the mask
641  *
642  * Return value:
643  * 	none
644  **/
645 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
646 					  u32 clr_ints)
647 {
648 	volatile u32 int_reg;
649 
650 	/* Stop new interrupts */
651 	ioa_cfg->allow_interrupts = 0;
652 
653 	/* Set interrupt mask to stop all new interrupts */
654 	if (ioa_cfg->sis64)
655 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
656 	else
657 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
658 
659 	/* Clear any pending interrupts */
660 	if (ioa_cfg->sis64)
661 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
662 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
663 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
664 }
665 
666 /**
667  * ipr_save_pcix_cmd_reg - Save PCI-X command register
668  * @ioa_cfg:	ioa config struct
669  *
670  * Return value:
671  * 	0 on success / -EIO on failure
672  **/
673 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
674 {
675 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
676 
677 	if (pcix_cmd_reg == 0)
678 		return 0;
679 
680 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
681 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
682 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
683 		return -EIO;
684 	}
685 
686 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
687 	return 0;
688 }
689 
690 /**
691  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
692  * @ioa_cfg:	ioa config struct
693  *
694  * Return value:
695  * 	0 on success / -EIO on failure
696  **/
697 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
698 {
699 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
700 
701 	if (pcix_cmd_reg) {
702 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
703 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
704 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
705 			return -EIO;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 /**
713  * ipr_sata_eh_done - done function for aborted SATA commands
714  * @ipr_cmd:	ipr command struct
715  *
716  * This function is invoked for ops generated to SATA
717  * devices which are being aborted.
718  *
719  * Return value:
720  * 	none
721  **/
722 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
723 {
724 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725 	struct ata_queued_cmd *qc = ipr_cmd->qc;
726 	struct ipr_sata_port *sata_port = qc->ap->private_data;
727 
728 	qc->err_mask |= AC_ERR_OTHER;
729 	sata_port->ioasa.status |= ATA_BUSY;
730 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
731 	ata_qc_complete(qc);
732 }
733 
734 /**
735  * ipr_scsi_eh_done - mid-layer done function for aborted ops
736  * @ipr_cmd:	ipr command struct
737  *
738  * This function is invoked by the interrupt handler for
739  * ops generated by the SCSI mid-layer which are being aborted.
740  *
741  * Return value:
742  * 	none
743  **/
744 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
745 {
746 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
747 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
748 
749 	scsi_cmd->result |= (DID_ERROR << 16);
750 
751 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
752 	scsi_cmd->scsi_done(scsi_cmd);
753 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
754 }
755 
756 /**
757  * ipr_fail_all_ops - Fails all outstanding ops.
758  * @ioa_cfg:	ioa config struct
759  *
760  * This function fails all outstanding ops.
761  *
762  * Return value:
763  * 	none
764  **/
765 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
766 {
767 	struct ipr_cmnd *ipr_cmd, *temp;
768 
769 	ENTER;
770 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
771 		list_del(&ipr_cmd->queue);
772 
773 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
774 		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
775 
776 		if (ipr_cmd->scsi_cmd)
777 			ipr_cmd->done = ipr_scsi_eh_done;
778 		else if (ipr_cmd->qc)
779 			ipr_cmd->done = ipr_sata_eh_done;
780 
781 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
782 		del_timer(&ipr_cmd->timer);
783 		ipr_cmd->done(ipr_cmd);
784 	}
785 
786 	LEAVE;
787 }
788 
789 /**
790  * ipr_send_command -  Send driver initiated requests.
791  * @ipr_cmd:		ipr command struct
792  *
793  * This function sends a command to the adapter using the correct write call.
794  * In the case of sis64, calculate the ioarcb size required. Then or in the
795  * appropriate bits.
796  *
797  * Return value:
798  * 	none
799  **/
800 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
801 {
802 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
803 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
804 
805 	if (ioa_cfg->sis64) {
806 		/* The default size is 256 bytes */
807 		send_dma_addr |= 0x1;
808 
809 		/* If the number of ioadls * size of ioadl > 128 bytes,
810 		   then use a 512 byte ioarcb */
811 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
812 			send_dma_addr |= 0x4;
813 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814 	} else
815 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
816 }
817 
818 /**
819  * ipr_do_req -  Send driver initiated requests.
820  * @ipr_cmd:		ipr command struct
821  * @done:			done function
822  * @timeout_func:	timeout function
823  * @timeout:		timeout value
824  *
825  * This function sends the specified command to the adapter with the
826  * timeout given. The done function is invoked on command completion.
827  *
828  * Return value:
829  * 	none
830  **/
831 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
832 		       void (*done) (struct ipr_cmnd *),
833 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
834 {
835 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
836 
837 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
838 
839 	ipr_cmd->done = done;
840 
841 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
842 	ipr_cmd->timer.expires = jiffies + timeout;
843 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
844 
845 	add_timer(&ipr_cmd->timer);
846 
847 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
848 
849 	mb();
850 
851 	ipr_send_command(ipr_cmd);
852 }
853 
854 /**
855  * ipr_internal_cmd_done - Op done function for an internally generated op.
856  * @ipr_cmd:	ipr command struct
857  *
858  * This function is the op done function for an internally generated,
859  * blocking op. It simply wakes the sleeping thread.
860  *
861  * Return value:
862  * 	none
863  **/
864 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
865 {
866 	if (ipr_cmd->sibling)
867 		ipr_cmd->sibling = NULL;
868 	else
869 		complete(&ipr_cmd->completion);
870 }
871 
872 /**
873  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
874  * @ipr_cmd:	ipr command struct
875  * @dma_addr:	dma address
876  * @len:	transfer length
877  * @flags:	ioadl flag value
878  *
879  * This function initializes an ioadl in the case where there is only a single
880  * descriptor.
881  *
882  * Return value:
883  * 	nothing
884  **/
885 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
886 			   u32 len, int flags)
887 {
888 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
889 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
890 
891 	ipr_cmd->dma_use_sg = 1;
892 
893 	if (ipr_cmd->ioa_cfg->sis64) {
894 		ioadl64->flags = cpu_to_be32(flags);
895 		ioadl64->data_len = cpu_to_be32(len);
896 		ioadl64->address = cpu_to_be64(dma_addr);
897 
898 		ipr_cmd->ioarcb.ioadl_len =
899 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
900 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
901 	} else {
902 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
903 		ioadl->address = cpu_to_be32(dma_addr);
904 
905 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
906 			ipr_cmd->ioarcb.read_ioadl_len =
907 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
908 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
909 		} else {
910 			ipr_cmd->ioarcb.ioadl_len =
911 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
912 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
913 		}
914 	}
915 }
916 
917 /**
918  * ipr_send_blocking_cmd - Send command and sleep on its completion.
919  * @ipr_cmd:	ipr command struct
920  * @timeout_func:	function to invoke if command times out
921  * @timeout:	timeout
922  *
923  * Return value:
924  * 	none
925  **/
926 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
927 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
928 				  u32 timeout)
929 {
930 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
931 
932 	init_completion(&ipr_cmd->completion);
933 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
934 
935 	spin_unlock_irq(ioa_cfg->host->host_lock);
936 	wait_for_completion(&ipr_cmd->completion);
937 	spin_lock_irq(ioa_cfg->host->host_lock);
938 }
939 
940 /**
941  * ipr_send_hcam - Send an HCAM to the adapter.
942  * @ioa_cfg:	ioa config struct
943  * @type:		HCAM type
944  * @hostrcb:	hostrcb struct
945  *
946  * This function will send a Host Controlled Async command to the adapter.
947  * If HCAMs are currently not allowed to be issued to the adapter, it will
948  * place the hostrcb on the free queue.
949  *
950  * Return value:
951  * 	none
952  **/
953 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
954 			  struct ipr_hostrcb *hostrcb)
955 {
956 	struct ipr_cmnd *ipr_cmd;
957 	struct ipr_ioarcb *ioarcb;
958 
959 	if (ioa_cfg->allow_cmds) {
960 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
961 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
962 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
963 
964 		ipr_cmd->u.hostrcb = hostrcb;
965 		ioarcb = &ipr_cmd->ioarcb;
966 
967 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
968 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
969 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
970 		ioarcb->cmd_pkt.cdb[1] = type;
971 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
972 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
973 
974 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
975 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
976 
977 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
978 			ipr_cmd->done = ipr_process_ccn;
979 		else
980 			ipr_cmd->done = ipr_process_error;
981 
982 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
983 
984 		mb();
985 
986 		ipr_send_command(ipr_cmd);
987 	} else {
988 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
989 	}
990 }
991 
992 /**
993  * ipr_update_ata_class - Update the ata class in the resource entry
994  * @res:	resource entry struct
995  * @proto:	cfgte device bus protocol value
996  *
997  * Return value:
998  * 	none
999  **/
1000 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001 {
1002 	switch(proto) {
1003 	case IPR_PROTO_SATA:
1004 	case IPR_PROTO_SAS_STP:
1005 		res->ata_class = ATA_DEV_ATA;
1006 		break;
1007 	case IPR_PROTO_SATA_ATAPI:
1008 	case IPR_PROTO_SAS_STP_ATAPI:
1009 		res->ata_class = ATA_DEV_ATAPI;
1010 		break;
1011 	default:
1012 		res->ata_class = ATA_DEV_UNKNOWN;
1013 		break;
1014 	};
1015 }
1016 
1017 /**
1018  * ipr_init_res_entry - Initialize a resource entry struct.
1019  * @res:	resource entry struct
1020  * @cfgtew:	config table entry wrapper struct
1021  *
1022  * Return value:
1023  * 	none
1024  **/
1025 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1026 			       struct ipr_config_table_entry_wrapper *cfgtew)
1027 {
1028 	int found = 0;
1029 	unsigned int proto;
1030 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1031 	struct ipr_resource_entry *gscsi_res = NULL;
1032 
1033 	res->needs_sync_complete = 0;
1034 	res->in_erp = 0;
1035 	res->add_to_ml = 0;
1036 	res->del_from_ml = 0;
1037 	res->resetting_device = 0;
1038 	res->sdev = NULL;
1039 	res->sata_port = NULL;
1040 
1041 	if (ioa_cfg->sis64) {
1042 		proto = cfgtew->u.cfgte64->proto;
1043 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1044 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1045 		res->type = cfgtew->u.cfgte64->res_type;
1046 
1047 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1048 			sizeof(res->res_path));
1049 
1050 		res->bus = 0;
1051 		res->lun = scsilun_to_int(&res->dev_lun);
1052 
1053 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1054 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1055 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1056 					found = 1;
1057 					res->target = gscsi_res->target;
1058 					break;
1059 				}
1060 			}
1061 			if (!found) {
1062 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1063 								  ioa_cfg->max_devs_supported);
1064 				set_bit(res->target, ioa_cfg->target_ids);
1065 			}
1066 
1067 			memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1068 				sizeof(res->dev_lun.scsi_lun));
1069 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1070 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1071 			res->target = 0;
1072 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1073 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1074 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1075 							  ioa_cfg->max_devs_supported);
1076 			set_bit(res->target, ioa_cfg->array_ids);
1077 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1078 			res->bus = IPR_VSET_VIRTUAL_BUS;
1079 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1080 							  ioa_cfg->max_devs_supported);
1081 			set_bit(res->target, ioa_cfg->vset_ids);
1082 		} else {
1083 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1084 							  ioa_cfg->max_devs_supported);
1085 			set_bit(res->target, ioa_cfg->target_ids);
1086 		}
1087 	} else {
1088 		proto = cfgtew->u.cfgte->proto;
1089 		res->qmodel = IPR_QUEUEING_MODEL(res);
1090 		res->flags = cfgtew->u.cfgte->flags;
1091 		if (res->flags & IPR_IS_IOA_RESOURCE)
1092 			res->type = IPR_RES_TYPE_IOAFP;
1093 		else
1094 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1095 
1096 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1097 		res->target = cfgtew->u.cfgte->res_addr.target;
1098 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1099 	}
1100 
1101 	ipr_update_ata_class(res, proto);
1102 }
1103 
1104 /**
1105  * ipr_is_same_device - Determine if two devices are the same.
1106  * @res:	resource entry struct
1107  * @cfgtew:	config table entry wrapper struct
1108  *
1109  * Return value:
1110  * 	1 if the devices are the same / 0 otherwise
1111  **/
1112 static int ipr_is_same_device(struct ipr_resource_entry *res,
1113 			      struct ipr_config_table_entry_wrapper *cfgtew)
1114 {
1115 	if (res->ioa_cfg->sis64) {
1116 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1117 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1118 			!memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1119 					sizeof(cfgtew->u.cfgte64->lun))) {
1120 			return 1;
1121 		}
1122 	} else {
1123 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1124 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1125 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1126 			return 1;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 /**
1133  * ipr_format_res_path - Format the resource path for printing.
1134  * @res_path:	resource path
1135  * @buf:	buffer
1136  *
1137  * Return value:
1138  * 	pointer to buffer
1139  **/
1140 static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1141 {
1142 	int i;
1143 	char *p = buffer;
1144 
1145 	res_path[0] = '\0';
1146 	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148 		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1149 
1150 	return buffer;
1151 }
1152 
1153 /**
1154  * ipr_update_res_entry - Update the resource entry.
1155  * @res:	resource entry struct
1156  * @cfgtew:	config table entry wrapper struct
1157  *
1158  * Return value:
1159  *      none
1160  **/
1161 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1162 				 struct ipr_config_table_entry_wrapper *cfgtew)
1163 {
1164 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1165 	unsigned int proto;
1166 	int new_path = 0;
1167 
1168 	if (res->ioa_cfg->sis64) {
1169 		res->flags = cfgtew->u.cfgte64->flags;
1170 		res->res_flags = cfgtew->u.cfgte64->res_flags;
1171 		res->type = cfgtew->u.cfgte64->res_type;
1172 
1173 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1174 			sizeof(struct ipr_std_inq_data));
1175 
1176 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1177 		proto = cfgtew->u.cfgte64->proto;
1178 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1179 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1180 
1181 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1182 			sizeof(res->dev_lun.scsi_lun));
1183 
1184 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1185 					sizeof(res->res_path))) {
1186 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1187 				sizeof(res->res_path));
1188 			new_path = 1;
1189 		}
1190 
1191 		if (res->sdev && new_path)
1192 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1193 				    ipr_format_res_path(res->res_path, buffer,
1194 							sizeof(buffer)));
1195 	} else {
1196 		res->flags = cfgtew->u.cfgte->flags;
1197 		if (res->flags & IPR_IS_IOA_RESOURCE)
1198 			res->type = IPR_RES_TYPE_IOAFP;
1199 		else
1200 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201 
1202 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1203 			sizeof(struct ipr_std_inq_data));
1204 
1205 		res->qmodel = IPR_QUEUEING_MODEL(res);
1206 		proto = cfgtew->u.cfgte->proto;
1207 		res->res_handle = cfgtew->u.cfgte->res_handle;
1208 	}
1209 
1210 	ipr_update_ata_class(res, proto);
1211 }
1212 
1213 /**
1214  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1215  * 			  for the resource.
1216  * @res:	resource entry struct
1217  * @cfgtew:	config table entry wrapper struct
1218  *
1219  * Return value:
1220  *      none
1221  **/
1222 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223 {
1224 	struct ipr_resource_entry *gscsi_res = NULL;
1225 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226 
1227 	if (!ioa_cfg->sis64)
1228 		return;
1229 
1230 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1231 		clear_bit(res->target, ioa_cfg->array_ids);
1232 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1233 		clear_bit(res->target, ioa_cfg->vset_ids);
1234 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1235 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1236 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1237 				return;
1238 		clear_bit(res->target, ioa_cfg->target_ids);
1239 
1240 	} else if (res->bus == 0)
1241 		clear_bit(res->target, ioa_cfg->target_ids);
1242 }
1243 
1244 /**
1245  * ipr_handle_config_change - Handle a config change from the adapter
1246  * @ioa_cfg:	ioa config struct
1247  * @hostrcb:	hostrcb
1248  *
1249  * Return value:
1250  * 	none
1251  **/
1252 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1253 				     struct ipr_hostrcb *hostrcb)
1254 {
1255 	struct ipr_resource_entry *res = NULL;
1256 	struct ipr_config_table_entry_wrapper cfgtew;
1257 	__be32 cc_res_handle;
1258 
1259 	u32 is_ndn = 1;
1260 
1261 	if (ioa_cfg->sis64) {
1262 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1263 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1264 	} else {
1265 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1266 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1267 	}
1268 
1269 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1270 		if (res->res_handle == cc_res_handle) {
1271 			is_ndn = 0;
1272 			break;
1273 		}
1274 	}
1275 
1276 	if (is_ndn) {
1277 		if (list_empty(&ioa_cfg->free_res_q)) {
1278 			ipr_send_hcam(ioa_cfg,
1279 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1280 				      hostrcb);
1281 			return;
1282 		}
1283 
1284 		res = list_entry(ioa_cfg->free_res_q.next,
1285 				 struct ipr_resource_entry, queue);
1286 
1287 		list_del(&res->queue);
1288 		ipr_init_res_entry(res, &cfgtew);
1289 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1290 	}
1291 
1292 	ipr_update_res_entry(res, &cfgtew);
1293 
1294 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1295 		if (res->sdev) {
1296 			res->del_from_ml = 1;
1297 			res->res_handle = IPR_INVALID_RES_HANDLE;
1298 			if (ioa_cfg->allow_ml_add_del)
1299 				schedule_work(&ioa_cfg->work_q);
1300 		} else {
1301 			ipr_clear_res_target(res);
1302 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1303 		}
1304 	} else if (!res->sdev) {
1305 		res->add_to_ml = 1;
1306 		if (ioa_cfg->allow_ml_add_del)
1307 			schedule_work(&ioa_cfg->work_q);
1308 	}
1309 
1310 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1311 }
1312 
1313 /**
1314  * ipr_process_ccn - Op done function for a CCN.
1315  * @ipr_cmd:	ipr command struct
1316  *
1317  * This function is the op done function for a configuration
1318  * change notification host controlled async from the adapter.
1319  *
1320  * Return value:
1321  * 	none
1322  **/
1323 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324 {
1325 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1326 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1327 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1328 
1329 	list_del(&hostrcb->queue);
1330 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331 
1332 	if (ioasc) {
1333 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1334 			dev_err(&ioa_cfg->pdev->dev,
1335 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336 
1337 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1338 	} else {
1339 		ipr_handle_config_change(ioa_cfg, hostrcb);
1340 	}
1341 }
1342 
1343 /**
1344  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1345  * @i:		index into buffer
1346  * @buf:		string to modify
1347  *
1348  * This function will strip all trailing whitespace, pad the end
1349  * of the string with a single space, and NULL terminate the string.
1350  *
1351  * Return value:
1352  * 	new length of string
1353  **/
1354 static int strip_and_pad_whitespace(int i, char *buf)
1355 {
1356 	while (i && buf[i] == ' ')
1357 		i--;
1358 	buf[i+1] = ' ';
1359 	buf[i+2] = '\0';
1360 	return i + 2;
1361 }
1362 
1363 /**
1364  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1365  * @prefix:		string to print at start of printk
1366  * @hostrcb:	hostrcb pointer
1367  * @vpd:		vendor/product id/sn struct
1368  *
1369  * Return value:
1370  * 	none
1371  **/
1372 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1373 				struct ipr_vpd *vpd)
1374 {
1375 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1376 	int i = 0;
1377 
1378 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1379 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380 
1381 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1382 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383 
1384 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1385 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386 
1387 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1388 }
1389 
1390 /**
1391  * ipr_log_vpd - Log the passed VPD to the error log.
1392  * @vpd:		vendor/product id/sn struct
1393  *
1394  * Return value:
1395  * 	none
1396  **/
1397 static void ipr_log_vpd(struct ipr_vpd *vpd)
1398 {
1399 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1400 		    + IPR_SERIAL_NUM_LEN];
1401 
1402 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1403 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1404 	       IPR_PROD_ID_LEN);
1405 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1406 	ipr_err("Vendor/Product ID: %s\n", buffer);
1407 
1408 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1409 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1410 	ipr_err("    Serial Number: %s\n", buffer);
1411 }
1412 
1413 /**
1414  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1415  * @prefix:		string to print at start of printk
1416  * @hostrcb:	hostrcb pointer
1417  * @vpd:		vendor/product id/sn/wwn struct
1418  *
1419  * Return value:
1420  * 	none
1421  **/
1422 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1423 				    struct ipr_ext_vpd *vpd)
1424 {
1425 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1426 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1427 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1428 }
1429 
1430 /**
1431  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1432  * @vpd:		vendor/product id/sn/wwn struct
1433  *
1434  * Return value:
1435  * 	none
1436  **/
1437 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438 {
1439 	ipr_log_vpd(&vpd->vpd);
1440 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1441 		be32_to_cpu(vpd->wwid[1]));
1442 }
1443 
1444 /**
1445  * ipr_log_enhanced_cache_error - Log a cache error.
1446  * @ioa_cfg:	ioa config struct
1447  * @hostrcb:	hostrcb struct
1448  *
1449  * Return value:
1450  * 	none
1451  **/
1452 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1453 					 struct ipr_hostrcb *hostrcb)
1454 {
1455 	struct ipr_hostrcb_type_12_error *error;
1456 
1457 	if (ioa_cfg->sis64)
1458 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1459 	else
1460 		error = &hostrcb->hcam.u.error.u.type_12_error;
1461 
1462 	ipr_err("-----Current Configuration-----\n");
1463 	ipr_err("Cache Directory Card Information:\n");
1464 	ipr_log_ext_vpd(&error->ioa_vpd);
1465 	ipr_err("Adapter Card Information:\n");
1466 	ipr_log_ext_vpd(&error->cfc_vpd);
1467 
1468 	ipr_err("-----Expected Configuration-----\n");
1469 	ipr_err("Cache Directory Card Information:\n");
1470 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1471 	ipr_err("Adapter Card Information:\n");
1472 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473 
1474 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1475 		     be32_to_cpu(error->ioa_data[0]),
1476 		     be32_to_cpu(error->ioa_data[1]),
1477 		     be32_to_cpu(error->ioa_data[2]));
1478 }
1479 
1480 /**
1481  * ipr_log_cache_error - Log a cache error.
1482  * @ioa_cfg:	ioa config struct
1483  * @hostrcb:	hostrcb struct
1484  *
1485  * Return value:
1486  * 	none
1487  **/
1488 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489 				struct ipr_hostrcb *hostrcb)
1490 {
1491 	struct ipr_hostrcb_type_02_error *error =
1492 		&hostrcb->hcam.u.error.u.type_02_error;
1493 
1494 	ipr_err("-----Current Configuration-----\n");
1495 	ipr_err("Cache Directory Card Information:\n");
1496 	ipr_log_vpd(&error->ioa_vpd);
1497 	ipr_err("Adapter Card Information:\n");
1498 	ipr_log_vpd(&error->cfc_vpd);
1499 
1500 	ipr_err("-----Expected Configuration-----\n");
1501 	ipr_err("Cache Directory Card Information:\n");
1502 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1503 	ipr_err("Adapter Card Information:\n");
1504 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1505 
1506 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1507 		     be32_to_cpu(error->ioa_data[0]),
1508 		     be32_to_cpu(error->ioa_data[1]),
1509 		     be32_to_cpu(error->ioa_data[2]));
1510 }
1511 
1512 /**
1513  * ipr_log_enhanced_config_error - Log a configuration error.
1514  * @ioa_cfg:	ioa config struct
1515  * @hostrcb:	hostrcb struct
1516  *
1517  * Return value:
1518  * 	none
1519  **/
1520 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 					  struct ipr_hostrcb *hostrcb)
1522 {
1523 	int errors_logged, i;
1524 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1525 	struct ipr_hostrcb_type_13_error *error;
1526 
1527 	error = &hostrcb->hcam.u.error.u.type_13_error;
1528 	errors_logged = be32_to_cpu(error->errors_logged);
1529 
1530 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1531 		be32_to_cpu(error->errors_detected), errors_logged);
1532 
1533 	dev_entry = error->dev;
1534 
1535 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1536 		ipr_err_separator;
1537 
1538 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1539 		ipr_log_ext_vpd(&dev_entry->vpd);
1540 
1541 		ipr_err("-----New Device Information-----\n");
1542 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1543 
1544 		ipr_err("Cache Directory Card Information:\n");
1545 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546 
1547 		ipr_err("Adapter Card Information:\n");
1548 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1549 	}
1550 }
1551 
1552 /**
1553  * ipr_log_sis64_config_error - Log a device error.
1554  * @ioa_cfg:	ioa config struct
1555  * @hostrcb:	hostrcb struct
1556  *
1557  * Return value:
1558  * 	none
1559  **/
1560 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1561 				       struct ipr_hostrcb *hostrcb)
1562 {
1563 	int errors_logged, i;
1564 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1565 	struct ipr_hostrcb_type_23_error *error;
1566 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1567 
1568 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1569 	errors_logged = be32_to_cpu(error->errors_logged);
1570 
1571 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1572 		be32_to_cpu(error->errors_detected), errors_logged);
1573 
1574 	dev_entry = error->dev;
1575 
1576 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1577 		ipr_err_separator;
1578 
1579 		ipr_err("Device %d : %s", i + 1,
1580 			 ipr_format_res_path(dev_entry->res_path, buffer,
1581 					     sizeof(buffer)));
1582 		ipr_log_ext_vpd(&dev_entry->vpd);
1583 
1584 		ipr_err("-----New Device Information-----\n");
1585 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1586 
1587 		ipr_err("Cache Directory Card Information:\n");
1588 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589 
1590 		ipr_err("Adapter Card Information:\n");
1591 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592 	}
1593 }
1594 
1595 /**
1596  * ipr_log_config_error - Log a configuration error.
1597  * @ioa_cfg:	ioa config struct
1598  * @hostrcb:	hostrcb struct
1599  *
1600  * Return value:
1601  * 	none
1602  **/
1603 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1604 				 struct ipr_hostrcb *hostrcb)
1605 {
1606 	int errors_logged, i;
1607 	struct ipr_hostrcb_device_data_entry *dev_entry;
1608 	struct ipr_hostrcb_type_03_error *error;
1609 
1610 	error = &hostrcb->hcam.u.error.u.type_03_error;
1611 	errors_logged = be32_to_cpu(error->errors_logged);
1612 
1613 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1614 		be32_to_cpu(error->errors_detected), errors_logged);
1615 
1616 	dev_entry = error->dev;
1617 
1618 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1619 		ipr_err_separator;
1620 
1621 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1622 		ipr_log_vpd(&dev_entry->vpd);
1623 
1624 		ipr_err("-----New Device Information-----\n");
1625 		ipr_log_vpd(&dev_entry->new_vpd);
1626 
1627 		ipr_err("Cache Directory Card Information:\n");
1628 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1629 
1630 		ipr_err("Adapter Card Information:\n");
1631 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1632 
1633 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1634 			be32_to_cpu(dev_entry->ioa_data[0]),
1635 			be32_to_cpu(dev_entry->ioa_data[1]),
1636 			be32_to_cpu(dev_entry->ioa_data[2]),
1637 			be32_to_cpu(dev_entry->ioa_data[3]),
1638 			be32_to_cpu(dev_entry->ioa_data[4]));
1639 	}
1640 }
1641 
1642 /**
1643  * ipr_log_enhanced_array_error - Log an array configuration error.
1644  * @ioa_cfg:	ioa config struct
1645  * @hostrcb:	hostrcb struct
1646  *
1647  * Return value:
1648  * 	none
1649  **/
1650 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1651 					 struct ipr_hostrcb *hostrcb)
1652 {
1653 	int i, num_entries;
1654 	struct ipr_hostrcb_type_14_error *error;
1655 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1656 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657 
1658 	error = &hostrcb->hcam.u.error.u.type_14_error;
1659 
1660 	ipr_err_separator;
1661 
1662 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1663 		error->protection_level,
1664 		ioa_cfg->host->host_no,
1665 		error->last_func_vset_res_addr.bus,
1666 		error->last_func_vset_res_addr.target,
1667 		error->last_func_vset_res_addr.lun);
1668 
1669 	ipr_err_separator;
1670 
1671 	array_entry = error->array_member;
1672 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673 			    sizeof(error->array_member));
1674 
1675 	for (i = 0; i < num_entries; i++, array_entry++) {
1676 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1677 			continue;
1678 
1679 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1680 			ipr_err("Exposed Array Member %d:\n", i);
1681 		else
1682 			ipr_err("Array Member %d:\n", i);
1683 
1684 		ipr_log_ext_vpd(&array_entry->vpd);
1685 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1687 				 "Expected Location");
1688 
1689 		ipr_err_separator;
1690 	}
1691 }
1692 
1693 /**
1694  * ipr_log_array_error - Log an array configuration error.
1695  * @ioa_cfg:	ioa config struct
1696  * @hostrcb:	hostrcb struct
1697  *
1698  * Return value:
1699  * 	none
1700  **/
1701 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1702 				struct ipr_hostrcb *hostrcb)
1703 {
1704 	int i;
1705 	struct ipr_hostrcb_type_04_error *error;
1706 	struct ipr_hostrcb_array_data_entry *array_entry;
1707 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708 
1709 	error = &hostrcb->hcam.u.error.u.type_04_error;
1710 
1711 	ipr_err_separator;
1712 
1713 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1714 		error->protection_level,
1715 		ioa_cfg->host->host_no,
1716 		error->last_func_vset_res_addr.bus,
1717 		error->last_func_vset_res_addr.target,
1718 		error->last_func_vset_res_addr.lun);
1719 
1720 	ipr_err_separator;
1721 
1722 	array_entry = error->array_member;
1723 
1724 	for (i = 0; i < 18; i++) {
1725 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1726 			continue;
1727 
1728 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1729 			ipr_err("Exposed Array Member %d:\n", i);
1730 		else
1731 			ipr_err("Array Member %d:\n", i);
1732 
1733 		ipr_log_vpd(&array_entry->vpd);
1734 
1735 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1737 				 "Expected Location");
1738 
1739 		ipr_err_separator;
1740 
1741 		if (i == 9)
1742 			array_entry = error->array_member2;
1743 		else
1744 			array_entry++;
1745 	}
1746 }
1747 
1748 /**
1749  * ipr_log_hex_data - Log additional hex IOA error data.
1750  * @ioa_cfg:	ioa config struct
1751  * @data:		IOA error data
1752  * @len:		data length
1753  *
1754  * Return value:
1755  * 	none
1756  **/
1757 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1758 {
1759 	int i;
1760 
1761 	if (len == 0)
1762 		return;
1763 
1764 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1765 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766 
1767 	for (i = 0; i < len / 4; i += 4) {
1768 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1769 			be32_to_cpu(data[i]),
1770 			be32_to_cpu(data[i+1]),
1771 			be32_to_cpu(data[i+2]),
1772 			be32_to_cpu(data[i+3]));
1773 	}
1774 }
1775 
1776 /**
1777  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1778  * @ioa_cfg:	ioa config struct
1779  * @hostrcb:	hostrcb struct
1780  *
1781  * Return value:
1782  * 	none
1783  **/
1784 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1785 					    struct ipr_hostrcb *hostrcb)
1786 {
1787 	struct ipr_hostrcb_type_17_error *error;
1788 
1789 	if (ioa_cfg->sis64)
1790 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1791 	else
1792 		error = &hostrcb->hcam.u.error.u.type_17_error;
1793 
1794 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1795 	strim(error->failure_reason);
1796 
1797 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1798 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1799 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1800 	ipr_log_hex_data(ioa_cfg, error->data,
1801 			 be32_to_cpu(hostrcb->hcam.length) -
1802 			 (offsetof(struct ipr_hostrcb_error, u) +
1803 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1804 }
1805 
1806 /**
1807  * ipr_log_dual_ioa_error - Log a dual adapter error.
1808  * @ioa_cfg:	ioa config struct
1809  * @hostrcb:	hostrcb struct
1810  *
1811  * Return value:
1812  * 	none
1813  **/
1814 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1815 				   struct ipr_hostrcb *hostrcb)
1816 {
1817 	struct ipr_hostrcb_type_07_error *error;
1818 
1819 	error = &hostrcb->hcam.u.error.u.type_07_error;
1820 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1821 	strim(error->failure_reason);
1822 
1823 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1824 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1825 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1826 	ipr_log_hex_data(ioa_cfg, error->data,
1827 			 be32_to_cpu(hostrcb->hcam.length) -
1828 			 (offsetof(struct ipr_hostrcb_error, u) +
1829 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1830 }
1831 
1832 static const struct {
1833 	u8 active;
1834 	char *desc;
1835 } path_active_desc[] = {
1836 	{ IPR_PATH_NO_INFO, "Path" },
1837 	{ IPR_PATH_ACTIVE, "Active path" },
1838 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1839 };
1840 
1841 static const struct {
1842 	u8 state;
1843 	char *desc;
1844 } path_state_desc[] = {
1845 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1846 	{ IPR_PATH_HEALTHY, "is healthy" },
1847 	{ IPR_PATH_DEGRADED, "is degraded" },
1848 	{ IPR_PATH_FAILED, "is failed" }
1849 };
1850 
1851 /**
1852  * ipr_log_fabric_path - Log a fabric path error
1853  * @hostrcb:	hostrcb struct
1854  * @fabric:		fabric descriptor
1855  *
1856  * Return value:
1857  * 	none
1858  **/
1859 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1860 				struct ipr_hostrcb_fabric_desc *fabric)
1861 {
1862 	int i, j;
1863 	u8 path_state = fabric->path_state;
1864 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1865 	u8 state = path_state & IPR_PATH_STATE_MASK;
1866 
1867 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1868 		if (path_active_desc[i].active != active)
1869 			continue;
1870 
1871 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1872 			if (path_state_desc[j].state != state)
1873 				continue;
1874 
1875 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1876 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1877 					     path_active_desc[i].desc, path_state_desc[j].desc,
1878 					     fabric->ioa_port);
1879 			} else if (fabric->cascaded_expander == 0xff) {
1880 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1881 					     path_active_desc[i].desc, path_state_desc[j].desc,
1882 					     fabric->ioa_port, fabric->phy);
1883 			} else if (fabric->phy == 0xff) {
1884 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1885 					     path_active_desc[i].desc, path_state_desc[j].desc,
1886 					     fabric->ioa_port, fabric->cascaded_expander);
1887 			} else {
1888 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1889 					     path_active_desc[i].desc, path_state_desc[j].desc,
1890 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891 			}
1892 			return;
1893 		}
1894 	}
1895 
1896 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1897 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1898 }
1899 
1900 /**
1901  * ipr_log64_fabric_path - Log a fabric path error
1902  * @hostrcb:	hostrcb struct
1903  * @fabric:		fabric descriptor
1904  *
1905  * Return value:
1906  * 	none
1907  **/
1908 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1909 				  struct ipr_hostrcb64_fabric_desc *fabric)
1910 {
1911 	int i, j;
1912 	u8 path_state = fabric->path_state;
1913 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1914 	u8 state = path_state & IPR_PATH_STATE_MASK;
1915 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1916 
1917 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1918 		if (path_active_desc[i].active != active)
1919 			continue;
1920 
1921 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1922 			if (path_state_desc[j].state != state)
1923 				continue;
1924 
1925 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1926 				     path_active_desc[i].desc, path_state_desc[j].desc,
1927 				     ipr_format_res_path(fabric->res_path, buffer,
1928 							 sizeof(buffer)));
1929 			return;
1930 		}
1931 	}
1932 
1933 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1934 		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1935 }
1936 
1937 static const struct {
1938 	u8 type;
1939 	char *desc;
1940 } path_type_desc[] = {
1941 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1942 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1943 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1944 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1945 };
1946 
1947 static const struct {
1948 	u8 status;
1949 	char *desc;
1950 } path_status_desc[] = {
1951 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1952 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1953 	{ IPR_PATH_CFG_FAILED, "Failed" },
1954 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1955 	{ IPR_PATH_NOT_DETECTED, "Missing" },
1956 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1957 };
1958 
1959 static const char *link_rate[] = {
1960 	"unknown",
1961 	"disabled",
1962 	"phy reset problem",
1963 	"spinup hold",
1964 	"port selector",
1965 	"unknown",
1966 	"unknown",
1967 	"unknown",
1968 	"1.5Gbps",
1969 	"3.0Gbps",
1970 	"unknown",
1971 	"unknown",
1972 	"unknown",
1973 	"unknown",
1974 	"unknown",
1975 	"unknown"
1976 };
1977 
1978 /**
1979  * ipr_log_path_elem - Log a fabric path element.
1980  * @hostrcb:	hostrcb struct
1981  * @cfg:		fabric path element struct
1982  *
1983  * Return value:
1984  * 	none
1985  **/
1986 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1987 			      struct ipr_hostrcb_config_element *cfg)
1988 {
1989 	int i, j;
1990 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1991 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992 
1993 	if (type == IPR_PATH_CFG_NOT_EXIST)
1994 		return;
1995 
1996 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 		if (path_type_desc[i].type != type)
1998 			continue;
1999 
2000 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 			if (path_status_desc[j].status != status)
2002 				continue;
2003 
2004 			if (type == IPR_PATH_CFG_IOA_PORT) {
2005 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2006 					     path_status_desc[j].desc, path_type_desc[i].desc,
2007 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 			} else {
2010 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2011 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2012 						     path_status_desc[j].desc, path_type_desc[i].desc,
2013 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015 				} else if (cfg->cascaded_expander == 0xff) {
2016 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2017 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2018 						     path_type_desc[i].desc, cfg->phy,
2019 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021 				} else if (cfg->phy == 0xff) {
2022 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2023 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2024 						     path_type_desc[i].desc, cfg->cascaded_expander,
2025 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 				} else {
2028 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2029 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2030 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2031 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033 				}
2034 			}
2035 			return;
2036 		}
2037 	}
2038 
2039 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2040 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2041 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2042 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2043 }
2044 
2045 /**
2046  * ipr_log64_path_elem - Log a fabric path element.
2047  * @hostrcb:	hostrcb struct
2048  * @cfg:		fabric path element struct
2049  *
2050  * Return value:
2051  * 	none
2052  **/
2053 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2054 				struct ipr_hostrcb64_config_element *cfg)
2055 {
2056 	int i, j;
2057 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2058 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2059 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2060 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2061 
2062 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2063 		return;
2064 
2065 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2066 		if (path_type_desc[i].type != type)
2067 			continue;
2068 
2069 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2070 			if (path_status_desc[j].status != status)
2071 				continue;
2072 
2073 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2074 				     path_status_desc[j].desc, path_type_desc[i].desc,
2075 				     ipr_format_res_path(cfg->res_path, buffer,
2076 							 sizeof(buffer)),
2077 				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078 				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079 			return;
2080 		}
2081 	}
2082 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2083 		     "WWN=%08X%08X\n", cfg->type_status,
2084 		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2085 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087 }
2088 
2089 /**
2090  * ipr_log_fabric_error - Log a fabric error.
2091  * @ioa_cfg:	ioa config struct
2092  * @hostrcb:	hostrcb struct
2093  *
2094  * Return value:
2095  * 	none
2096  **/
2097 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2098 				 struct ipr_hostrcb *hostrcb)
2099 {
2100 	struct ipr_hostrcb_type_20_error *error;
2101 	struct ipr_hostrcb_fabric_desc *fabric;
2102 	struct ipr_hostrcb_config_element *cfg;
2103 	int i, add_len;
2104 
2105 	error = &hostrcb->hcam.u.error.u.type_20_error;
2106 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2107 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108 
2109 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2110 		(offsetof(struct ipr_hostrcb_error, u) +
2111 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112 
2113 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2114 		ipr_log_fabric_path(hostrcb, fabric);
2115 		for_each_fabric_cfg(fabric, cfg)
2116 			ipr_log_path_elem(hostrcb, cfg);
2117 
2118 		add_len -= be16_to_cpu(fabric->length);
2119 		fabric = (struct ipr_hostrcb_fabric_desc *)
2120 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2121 	}
2122 
2123 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2124 }
2125 
2126 /**
2127  * ipr_log_sis64_array_error - Log a sis64 array error.
2128  * @ioa_cfg:	ioa config struct
2129  * @hostrcb:	hostrcb struct
2130  *
2131  * Return value:
2132  * 	none
2133  **/
2134 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2135 				      struct ipr_hostrcb *hostrcb)
2136 {
2137 	int i, num_entries;
2138 	struct ipr_hostrcb_type_24_error *error;
2139 	struct ipr_hostrcb64_array_data_entry *array_entry;
2140 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2141 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142 
2143 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2144 
2145 	ipr_err_separator;
2146 
2147 	ipr_err("RAID %s Array Configuration: %s\n",
2148 		error->protection_level,
2149 		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2150 
2151 	ipr_err_separator;
2152 
2153 	array_entry = error->array_member;
2154 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2155 			    sizeof(error->array_member));
2156 
2157 	for (i = 0; i < num_entries; i++, array_entry++) {
2158 
2159 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2160 			continue;
2161 
2162 		if (error->exposed_mode_adn == i)
2163 			ipr_err("Exposed Array Member %d:\n", i);
2164 		else
2165 			ipr_err("Array Member %d:\n", i);
2166 
2167 		ipr_err("Array Member %d:\n", i);
2168 		ipr_log_ext_vpd(&array_entry->vpd);
2169 		ipr_err("Current Location: %s",
2170 			 ipr_format_res_path(array_entry->res_path, buffer,
2171 					     sizeof(buffer)));
2172 		ipr_err("Expected Location: %s",
2173 			 ipr_format_res_path(array_entry->expected_res_path,
2174 					     buffer, sizeof(buffer)));
2175 
2176 		ipr_err_separator;
2177 	}
2178 }
2179 
2180 /**
2181  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2182  * @ioa_cfg:	ioa config struct
2183  * @hostrcb:	hostrcb struct
2184  *
2185  * Return value:
2186  * 	none
2187  **/
2188 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189 				       struct ipr_hostrcb *hostrcb)
2190 {
2191 	struct ipr_hostrcb_type_30_error *error;
2192 	struct ipr_hostrcb64_fabric_desc *fabric;
2193 	struct ipr_hostrcb64_config_element *cfg;
2194 	int i, add_len;
2195 
2196 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2197 
2198 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200 
2201 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2202 		(offsetof(struct ipr_hostrcb64_error, u) +
2203 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204 
2205 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206 		ipr_log64_fabric_path(hostrcb, fabric);
2207 		for_each_fabric_cfg(fabric, cfg)
2208 			ipr_log64_path_elem(hostrcb, cfg);
2209 
2210 		add_len -= be16_to_cpu(fabric->length);
2211 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2212 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2213 	}
2214 
2215 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2216 }
2217 
2218 /**
2219  * ipr_log_generic_error - Log an adapter error.
2220  * @ioa_cfg:	ioa config struct
2221  * @hostrcb:	hostrcb struct
2222  *
2223  * Return value:
2224  * 	none
2225  **/
2226 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2227 				  struct ipr_hostrcb *hostrcb)
2228 {
2229 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2230 			 be32_to_cpu(hostrcb->hcam.length));
2231 }
2232 
2233 /**
2234  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2235  * @ioasc:	IOASC
2236  *
2237  * This function will return the index of into the ipr_error_table
2238  * for the specified IOASC. If the IOASC is not in the table,
2239  * 0 will be returned, which points to the entry used for unknown errors.
2240  *
2241  * Return value:
2242  * 	index into the ipr_error_table
2243  **/
2244 static u32 ipr_get_error(u32 ioasc)
2245 {
2246 	int i;
2247 
2248 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2249 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2250 			return i;
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * ipr_handle_log_data - Log an adapter error.
2257  * @ioa_cfg:	ioa config struct
2258  * @hostrcb:	hostrcb struct
2259  *
2260  * This function logs an adapter error to the system.
2261  *
2262  * Return value:
2263  * 	none
2264  **/
2265 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2266 				struct ipr_hostrcb *hostrcb)
2267 {
2268 	u32 ioasc;
2269 	int error_index;
2270 
2271 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2272 		return;
2273 
2274 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2275 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276 
2277 	if (ioa_cfg->sis64)
2278 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2279 	else
2280 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2281 
2282 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2283 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2284 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2285 		scsi_report_bus_reset(ioa_cfg->host,
2286 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2287 	}
2288 
2289 	error_index = ipr_get_error(ioasc);
2290 
2291 	if (!ipr_error_table[error_index].log_hcam)
2292 		return;
2293 
2294 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2295 
2296 	/* Set indication we have logged an error */
2297 	ioa_cfg->errors_logged++;
2298 
2299 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2300 		return;
2301 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2302 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2303 
2304 	switch (hostrcb->hcam.overlay_id) {
2305 	case IPR_HOST_RCB_OVERLAY_ID_2:
2306 		ipr_log_cache_error(ioa_cfg, hostrcb);
2307 		break;
2308 	case IPR_HOST_RCB_OVERLAY_ID_3:
2309 		ipr_log_config_error(ioa_cfg, hostrcb);
2310 		break;
2311 	case IPR_HOST_RCB_OVERLAY_ID_4:
2312 	case IPR_HOST_RCB_OVERLAY_ID_6:
2313 		ipr_log_array_error(ioa_cfg, hostrcb);
2314 		break;
2315 	case IPR_HOST_RCB_OVERLAY_ID_7:
2316 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2317 		break;
2318 	case IPR_HOST_RCB_OVERLAY_ID_12:
2319 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2320 		break;
2321 	case IPR_HOST_RCB_OVERLAY_ID_13:
2322 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2323 		break;
2324 	case IPR_HOST_RCB_OVERLAY_ID_14:
2325 	case IPR_HOST_RCB_OVERLAY_ID_16:
2326 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2327 		break;
2328 	case IPR_HOST_RCB_OVERLAY_ID_17:
2329 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2330 		break;
2331 	case IPR_HOST_RCB_OVERLAY_ID_20:
2332 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2333 		break;
2334 	case IPR_HOST_RCB_OVERLAY_ID_23:
2335 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2336 		break;
2337 	case IPR_HOST_RCB_OVERLAY_ID_24:
2338 	case IPR_HOST_RCB_OVERLAY_ID_26:
2339 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2340 		break;
2341 	case IPR_HOST_RCB_OVERLAY_ID_30:
2342 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2343 		break;
2344 	case IPR_HOST_RCB_OVERLAY_ID_1:
2345 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2346 	default:
2347 		ipr_log_generic_error(ioa_cfg, hostrcb);
2348 		break;
2349 	}
2350 }
2351 
2352 /**
2353  * ipr_process_error - Op done function for an adapter error log.
2354  * @ipr_cmd:	ipr command struct
2355  *
2356  * This function is the op done function for an error log host
2357  * controlled async from the adapter. It will log the error and
2358  * send the HCAM back to the adapter.
2359  *
2360  * Return value:
2361  * 	none
2362  **/
2363 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364 {
2365 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2366 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2367 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2368 	u32 fd_ioasc;
2369 
2370 	if (ioa_cfg->sis64)
2371 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2372 	else
2373 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2374 
2375 	list_del(&hostrcb->queue);
2376 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377 
2378 	if (!ioasc) {
2379 		ipr_handle_log_data(ioa_cfg, hostrcb);
2380 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2381 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2382 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2383 		dev_err(&ioa_cfg->pdev->dev,
2384 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2385 	}
2386 
2387 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2388 }
2389 
2390 /**
2391  * ipr_timeout -  An internally generated op has timed out.
2392  * @ipr_cmd:	ipr command struct
2393  *
2394  * This function blocks host requests and initiates an
2395  * adapter reset.
2396  *
2397  * Return value:
2398  * 	none
2399  **/
2400 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401 {
2402 	unsigned long lock_flags = 0;
2403 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404 
2405 	ENTER;
2406 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407 
2408 	ioa_cfg->errors_logged++;
2409 	dev_err(&ioa_cfg->pdev->dev,
2410 		"Adapter being reset due to command timeout.\n");
2411 
2412 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413 		ioa_cfg->sdt_state = GET_DUMP;
2414 
2415 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417 
2418 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419 	LEAVE;
2420 }
2421 
2422 /**
2423  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2424  * @ipr_cmd:	ipr command struct
2425  *
2426  * This function blocks host requests and initiates an
2427  * adapter reset.
2428  *
2429  * Return value:
2430  * 	none
2431  **/
2432 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433 {
2434 	unsigned long lock_flags = 0;
2435 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436 
2437 	ENTER;
2438 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439 
2440 	ioa_cfg->errors_logged++;
2441 	dev_err(&ioa_cfg->pdev->dev,
2442 		"Adapter timed out transitioning to operational.\n");
2443 
2444 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445 		ioa_cfg->sdt_state = GET_DUMP;
2446 
2447 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2448 		if (ipr_fastfail)
2449 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2451 	}
2452 
2453 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2454 	LEAVE;
2455 }
2456 
2457 /**
2458  * ipr_reset_reload - Reset/Reload the IOA
2459  * @ioa_cfg:		ioa config struct
2460  * @shutdown_type:	shutdown type
2461  *
2462  * This function resets the adapter and re-initializes it.
2463  * This function assumes that all new host commands have been stopped.
2464  * Return value:
2465  * 	SUCCESS / FAILED
2466  **/
2467 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2468 			    enum ipr_shutdown_type shutdown_type)
2469 {
2470 	if (!ioa_cfg->in_reset_reload)
2471 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472 
2473 	spin_unlock_irq(ioa_cfg->host->host_lock);
2474 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475 	spin_lock_irq(ioa_cfg->host->host_lock);
2476 
2477 	/* If we got hit with a host reset while we were already resetting
2478 	 the adapter for some reason, and the reset failed. */
2479 	if (ioa_cfg->ioa_is_dead) {
2480 		ipr_trace;
2481 		return FAILED;
2482 	}
2483 
2484 	return SUCCESS;
2485 }
2486 
2487 /**
2488  * ipr_find_ses_entry - Find matching SES in SES table
2489  * @res:	resource entry struct of SES
2490  *
2491  * Return value:
2492  * 	pointer to SES table entry / NULL on failure
2493  **/
2494 static const struct ipr_ses_table_entry *
2495 ipr_find_ses_entry(struct ipr_resource_entry *res)
2496 {
2497 	int i, j, matches;
2498 	struct ipr_std_inq_vpids *vpids;
2499 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500 
2501 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2502 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2503 			if (ste->compare_product_id_byte[j] == 'X') {
2504 				vpids = &res->std_inq_data.vpids;
2505 				if (vpids->product_id[j] == ste->product_id[j])
2506 					matches++;
2507 				else
2508 					break;
2509 			} else
2510 				matches++;
2511 		}
2512 
2513 		if (matches == IPR_PROD_ID_LEN)
2514 			return ste;
2515 	}
2516 
2517 	return NULL;
2518 }
2519 
2520 /**
2521  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2522  * @ioa_cfg:	ioa config struct
2523  * @bus:		SCSI bus
2524  * @bus_width:	bus width
2525  *
2526  * Return value:
2527  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2528  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2529  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2530  *	max 160MHz = max 320MB/sec).
2531  **/
2532 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533 {
2534 	struct ipr_resource_entry *res;
2535 	const struct ipr_ses_table_entry *ste;
2536 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537 
2538 	/* Loop through each config table entry in the config table buffer */
2539 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2540 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2541 			continue;
2542 
2543 		if (bus != res->bus)
2544 			continue;
2545 
2546 		if (!(ste = ipr_find_ses_entry(res)))
2547 			continue;
2548 
2549 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2550 	}
2551 
2552 	return max_xfer_rate;
2553 }
2554 
2555 /**
2556  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2557  * @ioa_cfg:		ioa config struct
2558  * @max_delay:		max delay in micro-seconds to wait
2559  *
2560  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561  *
2562  * Return value:
2563  * 	0 on success / other on failure
2564  **/
2565 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566 {
2567 	volatile u32 pcii_reg;
2568 	int delay = 1;
2569 
2570 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2571 	while (delay < max_delay) {
2572 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573 
2574 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2575 			return 0;
2576 
2577 		/* udelay cannot be used if delay is more than a few milliseconds */
2578 		if ((delay / 1000) > MAX_UDELAY_MS)
2579 			mdelay(delay / 1000);
2580 		else
2581 			udelay(delay);
2582 
2583 		delay += delay;
2584 	}
2585 	return -EIO;
2586 }
2587 
2588 /**
2589  * ipr_get_sis64_dump_data_section - Dump IOA memory
2590  * @ioa_cfg:			ioa config struct
2591  * @start_addr:			adapter address to dump
2592  * @dest:			destination kernel buffer
2593  * @length_in_words:		length to dump in 4 byte words
2594  *
2595  * Return value:
2596  * 	0 on success
2597  **/
2598 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2599 					   u32 start_addr,
2600 					   __be32 *dest, u32 length_in_words)
2601 {
2602 	int i;
2603 
2604 	for (i = 0; i < length_in_words; i++) {
2605 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2607 		dest++;
2608 	}
2609 
2610 	return 0;
2611 }
2612 
2613 /**
2614  * ipr_get_ldump_data_section - Dump IOA memory
2615  * @ioa_cfg:			ioa config struct
2616  * @start_addr:			adapter address to dump
2617  * @dest:				destination kernel buffer
2618  * @length_in_words:	length to dump in 4 byte words
2619  *
2620  * Return value:
2621  * 	0 on success / -EIO on failure
2622  **/
2623 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2624 				      u32 start_addr,
2625 				      __be32 *dest, u32 length_in_words)
2626 {
2627 	volatile u32 temp_pcii_reg;
2628 	int i, delay = 0;
2629 
2630 	if (ioa_cfg->sis64)
2631 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2632 						       dest, length_in_words);
2633 
2634 	/* Write IOA interrupt reg starting LDUMP state  */
2635 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2636 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2637 
2638 	/* Wait for IO debug acknowledge */
2639 	if (ipr_wait_iodbg_ack(ioa_cfg,
2640 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2641 		dev_err(&ioa_cfg->pdev->dev,
2642 			"IOA dump long data transfer timeout\n");
2643 		return -EIO;
2644 	}
2645 
2646 	/* Signal LDUMP interlocked - clear IO debug ack */
2647 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2648 	       ioa_cfg->regs.clr_interrupt_reg);
2649 
2650 	/* Write Mailbox with starting address */
2651 	writel(start_addr, ioa_cfg->ioa_mailbox);
2652 
2653 	/* Signal address valid - clear IOA Reset alert */
2654 	writel(IPR_UPROCI_RESET_ALERT,
2655 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2656 
2657 	for (i = 0; i < length_in_words; i++) {
2658 		/* Wait for IO debug acknowledge */
2659 		if (ipr_wait_iodbg_ack(ioa_cfg,
2660 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2661 			dev_err(&ioa_cfg->pdev->dev,
2662 				"IOA dump short data transfer timeout\n");
2663 			return -EIO;
2664 		}
2665 
2666 		/* Read data from mailbox and increment destination pointer */
2667 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2668 		dest++;
2669 
2670 		/* For all but the last word of data, signal data received */
2671 		if (i < (length_in_words - 1)) {
2672 			/* Signal dump data received - Clear IO debug Ack */
2673 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674 			       ioa_cfg->regs.clr_interrupt_reg);
2675 		}
2676 	}
2677 
2678 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2679 	writel(IPR_UPROCI_RESET_ALERT,
2680 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2681 
2682 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2683 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2684 
2685 	/* Signal dump data received - Clear IO debug Ack */
2686 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2687 	       ioa_cfg->regs.clr_interrupt_reg);
2688 
2689 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2690 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2691 		temp_pcii_reg =
2692 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2693 
2694 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2695 			return 0;
2696 
2697 		udelay(10);
2698 		delay += 10;
2699 	}
2700 
2701 	return 0;
2702 }
2703 
2704 #ifdef CONFIG_SCSI_IPR_DUMP
2705 /**
2706  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2707  * @ioa_cfg:		ioa config struct
2708  * @pci_address:	adapter address
2709  * @length:			length of data to copy
2710  *
2711  * Copy data from PCI adapter to kernel buffer.
2712  * Note: length MUST be a 4 byte multiple
2713  * Return value:
2714  * 	0 on success / other on failure
2715  **/
2716 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717 			unsigned long pci_address, u32 length)
2718 {
2719 	int bytes_copied = 0;
2720 	int cur_len, rc, rem_len, rem_page_len;
2721 	__be32 *page;
2722 	unsigned long lock_flags = 0;
2723 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724 
2725 	while (bytes_copied < length &&
2726 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2727 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2728 		    ioa_dump->page_offset == 0) {
2729 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2730 
2731 			if (!page) {
2732 				ipr_trace;
2733 				return bytes_copied;
2734 			}
2735 
2736 			ioa_dump->page_offset = 0;
2737 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2738 			ioa_dump->next_page_index++;
2739 		} else
2740 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2741 
2742 		rem_len = length - bytes_copied;
2743 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2744 		cur_len = min(rem_len, rem_page_len);
2745 
2746 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2748 			rc = -EIO;
2749 		} else {
2750 			rc = ipr_get_ldump_data_section(ioa_cfg,
2751 							pci_address + bytes_copied,
2752 							&page[ioa_dump->page_offset / 4],
2753 							(cur_len / sizeof(u32)));
2754 		}
2755 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2756 
2757 		if (!rc) {
2758 			ioa_dump->page_offset += cur_len;
2759 			bytes_copied += cur_len;
2760 		} else {
2761 			ipr_trace;
2762 			break;
2763 		}
2764 		schedule();
2765 	}
2766 
2767 	return bytes_copied;
2768 }
2769 
2770 /**
2771  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2772  * @hdr:	dump entry header struct
2773  *
2774  * Return value:
2775  * 	nothing
2776  **/
2777 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2778 {
2779 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2780 	hdr->num_elems = 1;
2781 	hdr->offset = sizeof(*hdr);
2782 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2783 }
2784 
2785 /**
2786  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2787  * @ioa_cfg:	ioa config struct
2788  * @driver_dump:	driver dump struct
2789  *
2790  * Return value:
2791  * 	nothing
2792  **/
2793 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2794 				   struct ipr_driver_dump *driver_dump)
2795 {
2796 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2797 
2798 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2799 	driver_dump->ioa_type_entry.hdr.len =
2800 		sizeof(struct ipr_dump_ioa_type_entry) -
2801 		sizeof(struct ipr_dump_entry_header);
2802 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2804 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2805 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2806 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2807 		ucode_vpd->minor_release[1];
2808 	driver_dump->hdr.num_entries++;
2809 }
2810 
2811 /**
2812  * ipr_dump_version_data - Fill in the driver version in the dump.
2813  * @ioa_cfg:	ioa config struct
2814  * @driver_dump:	driver dump struct
2815  *
2816  * Return value:
2817  * 	nothing
2818  **/
2819 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2820 				  struct ipr_driver_dump *driver_dump)
2821 {
2822 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2823 	driver_dump->version_entry.hdr.len =
2824 		sizeof(struct ipr_dump_version_entry) -
2825 		sizeof(struct ipr_dump_entry_header);
2826 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2827 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2828 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2829 	driver_dump->hdr.num_entries++;
2830 }
2831 
2832 /**
2833  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2834  * @ioa_cfg:	ioa config struct
2835  * @driver_dump:	driver dump struct
2836  *
2837  * Return value:
2838  * 	nothing
2839  **/
2840 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2841 				   struct ipr_driver_dump *driver_dump)
2842 {
2843 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2844 	driver_dump->trace_entry.hdr.len =
2845 		sizeof(struct ipr_dump_trace_entry) -
2846 		sizeof(struct ipr_dump_entry_header);
2847 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2848 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2849 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2850 	driver_dump->hdr.num_entries++;
2851 }
2852 
2853 /**
2854  * ipr_dump_location_data - Fill in the IOA location in the dump.
2855  * @ioa_cfg:	ioa config struct
2856  * @driver_dump:	driver dump struct
2857  *
2858  * Return value:
2859  * 	nothing
2860  **/
2861 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2862 				   struct ipr_driver_dump *driver_dump)
2863 {
2864 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2865 	driver_dump->location_entry.hdr.len =
2866 		sizeof(struct ipr_dump_location_entry) -
2867 		sizeof(struct ipr_dump_entry_header);
2868 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2869 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2870 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2871 	driver_dump->hdr.num_entries++;
2872 }
2873 
2874 /**
2875  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2876  * @ioa_cfg:	ioa config struct
2877  * @dump:		dump struct
2878  *
2879  * Return value:
2880  * 	nothing
2881  **/
2882 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2883 {
2884 	unsigned long start_addr, sdt_word;
2885 	unsigned long lock_flags = 0;
2886 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888 	u32 num_entries, start_off, end_off;
2889 	u32 bytes_to_copy, bytes_copied, rc;
2890 	struct ipr_sdt *sdt;
2891 	int valid = 1;
2892 	int i;
2893 
2894 	ENTER;
2895 
2896 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2897 
2898 	if (ioa_cfg->sdt_state != GET_DUMP) {
2899 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900 		return;
2901 	}
2902 
2903 	start_addr = readl(ioa_cfg->ioa_mailbox);
2904 
2905 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2906 		dev_err(&ioa_cfg->pdev->dev,
2907 			"Invalid dump table format: %lx\n", start_addr);
2908 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2909 		return;
2910 	}
2911 
2912 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2913 
2914 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2915 
2916 	/* Initialize the overall dump header */
2917 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2918 	driver_dump->hdr.num_entries = 1;
2919 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2920 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2921 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2922 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2923 
2924 	ipr_dump_version_data(ioa_cfg, driver_dump);
2925 	ipr_dump_location_data(ioa_cfg, driver_dump);
2926 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2927 	ipr_dump_trace_data(ioa_cfg, driver_dump);
2928 
2929 	/* Update dump_header */
2930 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2931 
2932 	/* IOA Dump entry */
2933 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2934 	ioa_dump->hdr.len = 0;
2935 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2936 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2937 
2938 	/* First entries in sdt are actually a list of dump addresses and
2939 	 lengths to gather the real dump data.  sdt represents the pointer
2940 	 to the ioa generated dump table.  Dump data will be extracted based
2941 	 on entries in this table */
2942 	sdt = &ioa_dump->sdt;
2943 
2944 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2945 					sizeof(struct ipr_sdt) / sizeof(__be32));
2946 
2947 	/* Smart Dump table is ready to use and the first entry is valid */
2948 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2949 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2950 		dev_err(&ioa_cfg->pdev->dev,
2951 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2952 			rc, be32_to_cpu(sdt->hdr.state));
2953 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2954 		ioa_cfg->sdt_state = DUMP_OBTAINED;
2955 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956 		return;
2957 	}
2958 
2959 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2960 
2961 	if (num_entries > IPR_NUM_SDT_ENTRIES)
2962 		num_entries = IPR_NUM_SDT_ENTRIES;
2963 
2964 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2965 
2966 	for (i = 0; i < num_entries; i++) {
2967 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2968 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2969 			break;
2970 		}
2971 
2972 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2973 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2974 			if (ioa_cfg->sis64)
2975 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2976 			else {
2977 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2978 				end_off = be32_to_cpu(sdt->entry[i].end_token);
2979 
2980 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2981 					bytes_to_copy = end_off - start_off;
2982 				else
2983 					valid = 0;
2984 			}
2985 			if (valid) {
2986 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2987 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2988 					continue;
2989 				}
2990 
2991 				/* Copy data from adapter to driver buffers */
2992 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2993 							    bytes_to_copy);
2994 
2995 				ioa_dump->hdr.len += bytes_copied;
2996 
2997 				if (bytes_copied != bytes_to_copy) {
2998 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2999 					break;
3000 				}
3001 			}
3002 		}
3003 	}
3004 
3005 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3006 
3007 	/* Update dump_header */
3008 	driver_dump->hdr.len += ioa_dump->hdr.len;
3009 	wmb();
3010 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3011 	LEAVE;
3012 }
3013 
3014 #else
3015 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3016 #endif
3017 
3018 /**
3019  * ipr_release_dump - Free adapter dump memory
3020  * @kref:	kref struct
3021  *
3022  * Return value:
3023  *	nothing
3024  **/
3025 static void ipr_release_dump(struct kref *kref)
3026 {
3027 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3028 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3029 	unsigned long lock_flags = 0;
3030 	int i;
3031 
3032 	ENTER;
3033 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3034 	ioa_cfg->dump = NULL;
3035 	ioa_cfg->sdt_state = INACTIVE;
3036 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3037 
3038 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3039 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3040 
3041 	kfree(dump);
3042 	LEAVE;
3043 }
3044 
3045 /**
3046  * ipr_worker_thread - Worker thread
3047  * @work:		ioa config struct
3048  *
3049  * Called at task level from a work thread. This function takes care
3050  * of adding and removing device from the mid-layer as configuration
3051  * changes are detected by the adapter.
3052  *
3053  * Return value:
3054  * 	nothing
3055  **/
3056 static void ipr_worker_thread(struct work_struct *work)
3057 {
3058 	unsigned long lock_flags;
3059 	struct ipr_resource_entry *res;
3060 	struct scsi_device *sdev;
3061 	struct ipr_dump *dump;
3062 	struct ipr_ioa_cfg *ioa_cfg =
3063 		container_of(work, struct ipr_ioa_cfg, work_q);
3064 	u8 bus, target, lun;
3065 	int did_work;
3066 
3067 	ENTER;
3068 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3069 
3070 	if (ioa_cfg->sdt_state == GET_DUMP) {
3071 		dump = ioa_cfg->dump;
3072 		if (!dump) {
3073 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 			return;
3075 		}
3076 		kref_get(&dump->kref);
3077 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3078 		ipr_get_ioa_dump(ioa_cfg, dump);
3079 		kref_put(&dump->kref, ipr_release_dump);
3080 
3081 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3082 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3083 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3084 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 		return;
3086 	}
3087 
3088 restart:
3089 	do {
3090 		did_work = 0;
3091 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3092 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093 			return;
3094 		}
3095 
3096 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3097 			if (res->del_from_ml && res->sdev) {
3098 				did_work = 1;
3099 				sdev = res->sdev;
3100 				if (!scsi_device_get(sdev)) {
3101 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3102 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3103 					scsi_remove_device(sdev);
3104 					scsi_device_put(sdev);
3105 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3106 				}
3107 				break;
3108 			}
3109 		}
3110 	} while(did_work);
3111 
3112 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3113 		if (res->add_to_ml) {
3114 			bus = res->bus;
3115 			target = res->target;
3116 			lun = res->lun;
3117 			res->add_to_ml = 0;
3118 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3119 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3120 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3121 			goto restart;
3122 		}
3123 	}
3124 
3125 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3126 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3127 	LEAVE;
3128 }
3129 
3130 #ifdef CONFIG_SCSI_IPR_TRACE
3131 /**
3132  * ipr_read_trace - Dump the adapter trace
3133  * @filp:		open sysfs file
3134  * @kobj:		kobject struct
3135  * @bin_attr:		bin_attribute struct
3136  * @buf:		buffer
3137  * @off:		offset
3138  * @count:		buffer size
3139  *
3140  * Return value:
3141  *	number of bytes printed to buffer
3142  **/
3143 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3144 			      struct bin_attribute *bin_attr,
3145 			      char *buf, loff_t off, size_t count)
3146 {
3147 	struct device *dev = container_of(kobj, struct device, kobj);
3148 	struct Scsi_Host *shost = class_to_shost(dev);
3149 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3150 	unsigned long lock_flags = 0;
3151 	ssize_t ret;
3152 
3153 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3154 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3155 				IPR_TRACE_SIZE);
3156 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157 
3158 	return ret;
3159 }
3160 
3161 static struct bin_attribute ipr_trace_attr = {
3162 	.attr =	{
3163 		.name = "trace",
3164 		.mode = S_IRUGO,
3165 	},
3166 	.size = 0,
3167 	.read = ipr_read_trace,
3168 };
3169 #endif
3170 
3171 /**
3172  * ipr_show_fw_version - Show the firmware version
3173  * @dev:	class device struct
3174  * @buf:	buffer
3175  *
3176  * Return value:
3177  *	number of bytes printed to buffer
3178  **/
3179 static ssize_t ipr_show_fw_version(struct device *dev,
3180 				   struct device_attribute *attr, char *buf)
3181 {
3182 	struct Scsi_Host *shost = class_to_shost(dev);
3183 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3184 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3185 	unsigned long lock_flags = 0;
3186 	int len;
3187 
3188 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3190 		       ucode_vpd->major_release, ucode_vpd->card_type,
3191 		       ucode_vpd->minor_release[0],
3192 		       ucode_vpd->minor_release[1]);
3193 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3194 	return len;
3195 }
3196 
3197 static struct device_attribute ipr_fw_version_attr = {
3198 	.attr = {
3199 		.name =		"fw_version",
3200 		.mode =		S_IRUGO,
3201 	},
3202 	.show = ipr_show_fw_version,
3203 };
3204 
3205 /**
3206  * ipr_show_log_level - Show the adapter's error logging level
3207  * @dev:	class device struct
3208  * @buf:	buffer
3209  *
3210  * Return value:
3211  * 	number of bytes printed to buffer
3212  **/
3213 static ssize_t ipr_show_log_level(struct device *dev,
3214 				   struct device_attribute *attr, char *buf)
3215 {
3216 	struct Scsi_Host *shost = class_to_shost(dev);
3217 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3218 	unsigned long lock_flags = 0;
3219 	int len;
3220 
3221 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3223 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3224 	return len;
3225 }
3226 
3227 /**
3228  * ipr_store_log_level - Change the adapter's error logging level
3229  * @dev:	class device struct
3230  * @buf:	buffer
3231  *
3232  * Return value:
3233  * 	number of bytes printed to buffer
3234  **/
3235 static ssize_t ipr_store_log_level(struct device *dev,
3236 			           struct device_attribute *attr,
3237 				   const char *buf, size_t count)
3238 {
3239 	struct Scsi_Host *shost = class_to_shost(dev);
3240 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3241 	unsigned long lock_flags = 0;
3242 
3243 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3244 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3245 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3246 	return strlen(buf);
3247 }
3248 
3249 static struct device_attribute ipr_log_level_attr = {
3250 	.attr = {
3251 		.name =		"log_level",
3252 		.mode =		S_IRUGO | S_IWUSR,
3253 	},
3254 	.show = ipr_show_log_level,
3255 	.store = ipr_store_log_level
3256 };
3257 
3258 /**
3259  * ipr_store_diagnostics - IOA Diagnostics interface
3260  * @dev:	device struct
3261  * @buf:	buffer
3262  * @count:	buffer size
3263  *
3264  * This function will reset the adapter and wait a reasonable
3265  * amount of time for any errors that the adapter might log.
3266  *
3267  * Return value:
3268  * 	count on success / other on failure
3269  **/
3270 static ssize_t ipr_store_diagnostics(struct device *dev,
3271 				     struct device_attribute *attr,
3272 				     const char *buf, size_t count)
3273 {
3274 	struct Scsi_Host *shost = class_to_shost(dev);
3275 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3276 	unsigned long lock_flags = 0;
3277 	int rc = count;
3278 
3279 	if (!capable(CAP_SYS_ADMIN))
3280 		return -EACCES;
3281 
3282 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3283 	while(ioa_cfg->in_reset_reload) {
3284 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3285 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3286 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3287 	}
3288 
3289 	ioa_cfg->errors_logged = 0;
3290 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3291 
3292 	if (ioa_cfg->in_reset_reload) {
3293 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3295 
3296 		/* Wait for a second for any errors to be logged */
3297 		msleep(1000);
3298 	} else {
3299 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3300 		return -EIO;
3301 	}
3302 
3303 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3305 		rc = -EIO;
3306 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 
3308 	return rc;
3309 }
3310 
3311 static struct device_attribute ipr_diagnostics_attr = {
3312 	.attr = {
3313 		.name =		"run_diagnostics",
3314 		.mode =		S_IWUSR,
3315 	},
3316 	.store = ipr_store_diagnostics
3317 };
3318 
3319 /**
3320  * ipr_show_adapter_state - Show the adapter's state
3321  * @class_dev:	device struct
3322  * @buf:	buffer
3323  *
3324  * Return value:
3325  * 	number of bytes printed to buffer
3326  **/
3327 static ssize_t ipr_show_adapter_state(struct device *dev,
3328 				      struct device_attribute *attr, char *buf)
3329 {
3330 	struct Scsi_Host *shost = class_to_shost(dev);
3331 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3332 	unsigned long lock_flags = 0;
3333 	int len;
3334 
3335 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336 	if (ioa_cfg->ioa_is_dead)
3337 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3338 	else
3339 		len = snprintf(buf, PAGE_SIZE, "online\n");
3340 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341 	return len;
3342 }
3343 
3344 /**
3345  * ipr_store_adapter_state - Change adapter state
3346  * @dev:	device struct
3347  * @buf:	buffer
3348  * @count:	buffer size
3349  *
3350  * This function will change the adapter's state.
3351  *
3352  * Return value:
3353  * 	count on success / other on failure
3354  **/
3355 static ssize_t ipr_store_adapter_state(struct device *dev,
3356 				       struct device_attribute *attr,
3357 				       const char *buf, size_t count)
3358 {
3359 	struct Scsi_Host *shost = class_to_shost(dev);
3360 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3361 	unsigned long lock_flags;
3362 	int result = count;
3363 
3364 	if (!capable(CAP_SYS_ADMIN))
3365 		return -EACCES;
3366 
3367 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3369 		ioa_cfg->ioa_is_dead = 0;
3370 		ioa_cfg->reset_retries = 0;
3371 		ioa_cfg->in_ioa_bringdown = 0;
3372 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3373 	}
3374 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3376 
3377 	return result;
3378 }
3379 
3380 static struct device_attribute ipr_ioa_state_attr = {
3381 	.attr = {
3382 		.name =		"online_state",
3383 		.mode =		S_IRUGO | S_IWUSR,
3384 	},
3385 	.show = ipr_show_adapter_state,
3386 	.store = ipr_store_adapter_state
3387 };
3388 
3389 /**
3390  * ipr_store_reset_adapter - Reset the adapter
3391  * @dev:	device struct
3392  * @buf:	buffer
3393  * @count:	buffer size
3394  *
3395  * This function will reset the adapter.
3396  *
3397  * Return value:
3398  * 	count on success / other on failure
3399  **/
3400 static ssize_t ipr_store_reset_adapter(struct device *dev,
3401 				       struct device_attribute *attr,
3402 				       const char *buf, size_t count)
3403 {
3404 	struct Scsi_Host *shost = class_to_shost(dev);
3405 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3406 	unsigned long lock_flags;
3407 	int result = count;
3408 
3409 	if (!capable(CAP_SYS_ADMIN))
3410 		return -EACCES;
3411 
3412 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3413 	if (!ioa_cfg->in_reset_reload)
3414 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3415 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3416 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3417 
3418 	return result;
3419 }
3420 
3421 static struct device_attribute ipr_ioa_reset_attr = {
3422 	.attr = {
3423 		.name =		"reset_host",
3424 		.mode =		S_IWUSR,
3425 	},
3426 	.store = ipr_store_reset_adapter
3427 };
3428 
3429 /**
3430  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3431  * @buf_len:		buffer length
3432  *
3433  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3434  * list to use for microcode download
3435  *
3436  * Return value:
3437  * 	pointer to sglist / NULL on failure
3438  **/
3439 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3440 {
3441 	int sg_size, order, bsize_elem, num_elem, i, j;
3442 	struct ipr_sglist *sglist;
3443 	struct scatterlist *scatterlist;
3444 	struct page *page;
3445 
3446 	/* Get the minimum size per scatter/gather element */
3447 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3448 
3449 	/* Get the actual size per element */
3450 	order = get_order(sg_size);
3451 
3452 	/* Determine the actual number of bytes per element */
3453 	bsize_elem = PAGE_SIZE * (1 << order);
3454 
3455 	/* Determine the actual number of sg entries needed */
3456 	if (buf_len % bsize_elem)
3457 		num_elem = (buf_len / bsize_elem) + 1;
3458 	else
3459 		num_elem = buf_len / bsize_elem;
3460 
3461 	/* Allocate a scatter/gather list for the DMA */
3462 	sglist = kzalloc(sizeof(struct ipr_sglist) +
3463 			 (sizeof(struct scatterlist) * (num_elem - 1)),
3464 			 GFP_KERNEL);
3465 
3466 	if (sglist == NULL) {
3467 		ipr_trace;
3468 		return NULL;
3469 	}
3470 
3471 	scatterlist = sglist->scatterlist;
3472 	sg_init_table(scatterlist, num_elem);
3473 
3474 	sglist->order = order;
3475 	sglist->num_sg = num_elem;
3476 
3477 	/* Allocate a bunch of sg elements */
3478 	for (i = 0; i < num_elem; i++) {
3479 		page = alloc_pages(GFP_KERNEL, order);
3480 		if (!page) {
3481 			ipr_trace;
3482 
3483 			/* Free up what we already allocated */
3484 			for (j = i - 1; j >= 0; j--)
3485 				__free_pages(sg_page(&scatterlist[j]), order);
3486 			kfree(sglist);
3487 			return NULL;
3488 		}
3489 
3490 		sg_set_page(&scatterlist[i], page, 0, 0);
3491 	}
3492 
3493 	return sglist;
3494 }
3495 
3496 /**
3497  * ipr_free_ucode_buffer - Frees a microcode download buffer
3498  * @p_dnld:		scatter/gather list pointer
3499  *
3500  * Free a DMA'able ucode download buffer previously allocated with
3501  * ipr_alloc_ucode_buffer
3502  *
3503  * Return value:
3504  * 	nothing
3505  **/
3506 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3507 {
3508 	int i;
3509 
3510 	for (i = 0; i < sglist->num_sg; i++)
3511 		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3512 
3513 	kfree(sglist);
3514 }
3515 
3516 /**
3517  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3518  * @sglist:		scatter/gather list pointer
3519  * @buffer:		buffer pointer
3520  * @len:		buffer length
3521  *
3522  * Copy a microcode image from a user buffer into a buffer allocated by
3523  * ipr_alloc_ucode_buffer
3524  *
3525  * Return value:
3526  * 	0 on success / other on failure
3527  **/
3528 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3529 				 u8 *buffer, u32 len)
3530 {
3531 	int bsize_elem, i, result = 0;
3532 	struct scatterlist *scatterlist;
3533 	void *kaddr;
3534 
3535 	/* Determine the actual number of bytes per element */
3536 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3537 
3538 	scatterlist = sglist->scatterlist;
3539 
3540 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3541 		struct page *page = sg_page(&scatterlist[i]);
3542 
3543 		kaddr = kmap(page);
3544 		memcpy(kaddr, buffer, bsize_elem);
3545 		kunmap(page);
3546 
3547 		scatterlist[i].length = bsize_elem;
3548 
3549 		if (result != 0) {
3550 			ipr_trace;
3551 			return result;
3552 		}
3553 	}
3554 
3555 	if (len % bsize_elem) {
3556 		struct page *page = sg_page(&scatterlist[i]);
3557 
3558 		kaddr = kmap(page);
3559 		memcpy(kaddr, buffer, len % bsize_elem);
3560 		kunmap(page);
3561 
3562 		scatterlist[i].length = len % bsize_elem;
3563 	}
3564 
3565 	sglist->buffer_len = len;
3566 	return result;
3567 }
3568 
3569 /**
3570  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3571  * @ipr_cmd:		ipr command struct
3572  * @sglist:		scatter/gather list
3573  *
3574  * Builds a microcode download IOA data list (IOADL).
3575  *
3576  **/
3577 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3578 				    struct ipr_sglist *sglist)
3579 {
3580 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3581 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3582 	struct scatterlist *scatterlist = sglist->scatterlist;
3583 	int i;
3584 
3585 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3586 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3587 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3588 
3589 	ioarcb->ioadl_len =
3590 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3591 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3592 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3593 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3594 		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3595 	}
3596 
3597 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3598 }
3599 
3600 /**
3601  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3602  * @ipr_cmd:	ipr command struct
3603  * @sglist:		scatter/gather list
3604  *
3605  * Builds a microcode download IOA data list (IOADL).
3606  *
3607  **/
3608 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3609 				  struct ipr_sglist *sglist)
3610 {
3611 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3612 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3613 	struct scatterlist *scatterlist = sglist->scatterlist;
3614 	int i;
3615 
3616 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3617 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3618 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3619 
3620 	ioarcb->ioadl_len =
3621 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3622 
3623 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3624 		ioadl[i].flags_and_data_len =
3625 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3626 		ioadl[i].address =
3627 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3628 	}
3629 
3630 	ioadl[i-1].flags_and_data_len |=
3631 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3632 }
3633 
3634 /**
3635  * ipr_update_ioa_ucode - Update IOA's microcode
3636  * @ioa_cfg:	ioa config struct
3637  * @sglist:		scatter/gather list
3638  *
3639  * Initiate an adapter reset to update the IOA's microcode
3640  *
3641  * Return value:
3642  * 	0 on success / -EIO on failure
3643  **/
3644 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3645 				struct ipr_sglist *sglist)
3646 {
3647 	unsigned long lock_flags;
3648 
3649 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3650 	while(ioa_cfg->in_reset_reload) {
3651 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3652 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3653 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654 	}
3655 
3656 	if (ioa_cfg->ucode_sglist) {
3657 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658 		dev_err(&ioa_cfg->pdev->dev,
3659 			"Microcode download already in progress\n");
3660 		return -EIO;
3661 	}
3662 
3663 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3664 					sglist->num_sg, DMA_TO_DEVICE);
3665 
3666 	if (!sglist->num_dma_sg) {
3667 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668 		dev_err(&ioa_cfg->pdev->dev,
3669 			"Failed to map microcode download buffer!\n");
3670 		return -EIO;
3671 	}
3672 
3673 	ioa_cfg->ucode_sglist = sglist;
3674 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3675 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3676 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3677 
3678 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3679 	ioa_cfg->ucode_sglist = NULL;
3680 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3681 	return 0;
3682 }
3683 
3684 /**
3685  * ipr_store_update_fw - Update the firmware on the adapter
3686  * @class_dev:	device struct
3687  * @buf:	buffer
3688  * @count:	buffer size
3689  *
3690  * This function will update the firmware on the adapter.
3691  *
3692  * Return value:
3693  * 	count on success / other on failure
3694  **/
3695 static ssize_t ipr_store_update_fw(struct device *dev,
3696 				   struct device_attribute *attr,
3697 				   const char *buf, size_t count)
3698 {
3699 	struct Scsi_Host *shost = class_to_shost(dev);
3700 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3701 	struct ipr_ucode_image_header *image_hdr;
3702 	const struct firmware *fw_entry;
3703 	struct ipr_sglist *sglist;
3704 	char fname[100];
3705 	char *src;
3706 	int len, result, dnld_size;
3707 
3708 	if (!capable(CAP_SYS_ADMIN))
3709 		return -EACCES;
3710 
3711 	len = snprintf(fname, 99, "%s", buf);
3712 	fname[len-1] = '\0';
3713 
3714 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3715 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3716 		return -EIO;
3717 	}
3718 
3719 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3720 
3721 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3722 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3723 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3724 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3725 		release_firmware(fw_entry);
3726 		return -EINVAL;
3727 	}
3728 
3729 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3730 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3731 	sglist = ipr_alloc_ucode_buffer(dnld_size);
3732 
3733 	if (!sglist) {
3734 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3735 		release_firmware(fw_entry);
3736 		return -ENOMEM;
3737 	}
3738 
3739 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3740 
3741 	if (result) {
3742 		dev_err(&ioa_cfg->pdev->dev,
3743 			"Microcode buffer copy to DMA buffer failed\n");
3744 		goto out;
3745 	}
3746 
3747 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3748 
3749 	if (!result)
3750 		result = count;
3751 out:
3752 	ipr_free_ucode_buffer(sglist);
3753 	release_firmware(fw_entry);
3754 	return result;
3755 }
3756 
3757 static struct device_attribute ipr_update_fw_attr = {
3758 	.attr = {
3759 		.name =		"update_fw",
3760 		.mode =		S_IWUSR,
3761 	},
3762 	.store = ipr_store_update_fw
3763 };
3764 
3765 /**
3766  * ipr_show_fw_type - Show the adapter's firmware type.
3767  * @dev:	class device struct
3768  * @buf:	buffer
3769  *
3770  * Return value:
3771  *	number of bytes printed to buffer
3772  **/
3773 static ssize_t ipr_show_fw_type(struct device *dev,
3774 				struct device_attribute *attr, char *buf)
3775 {
3776 	struct Scsi_Host *shost = class_to_shost(dev);
3777 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3778 	unsigned long lock_flags = 0;
3779 	int len;
3780 
3781 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3782 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3783 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3784 	return len;
3785 }
3786 
3787 static struct device_attribute ipr_ioa_fw_type_attr = {
3788 	.attr = {
3789 		.name =		"fw_type",
3790 		.mode =		S_IRUGO,
3791 	},
3792 	.show = ipr_show_fw_type
3793 };
3794 
3795 static struct device_attribute *ipr_ioa_attrs[] = {
3796 	&ipr_fw_version_attr,
3797 	&ipr_log_level_attr,
3798 	&ipr_diagnostics_attr,
3799 	&ipr_ioa_state_attr,
3800 	&ipr_ioa_reset_attr,
3801 	&ipr_update_fw_attr,
3802 	&ipr_ioa_fw_type_attr,
3803 	NULL,
3804 };
3805 
3806 #ifdef CONFIG_SCSI_IPR_DUMP
3807 /**
3808  * ipr_read_dump - Dump the adapter
3809  * @filp:		open sysfs file
3810  * @kobj:		kobject struct
3811  * @bin_attr:		bin_attribute struct
3812  * @buf:		buffer
3813  * @off:		offset
3814  * @count:		buffer size
3815  *
3816  * Return value:
3817  *	number of bytes printed to buffer
3818  **/
3819 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3820 			     struct bin_attribute *bin_attr,
3821 			     char *buf, loff_t off, size_t count)
3822 {
3823 	struct device *cdev = container_of(kobj, struct device, kobj);
3824 	struct Scsi_Host *shost = class_to_shost(cdev);
3825 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3826 	struct ipr_dump *dump;
3827 	unsigned long lock_flags = 0;
3828 	char *src;
3829 	int len;
3830 	size_t rc = count;
3831 
3832 	if (!capable(CAP_SYS_ADMIN))
3833 		return -EACCES;
3834 
3835 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3836 	dump = ioa_cfg->dump;
3837 
3838 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3839 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3840 		return 0;
3841 	}
3842 	kref_get(&dump->kref);
3843 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3844 
3845 	if (off > dump->driver_dump.hdr.len) {
3846 		kref_put(&dump->kref, ipr_release_dump);
3847 		return 0;
3848 	}
3849 
3850 	if (off + count > dump->driver_dump.hdr.len) {
3851 		count = dump->driver_dump.hdr.len - off;
3852 		rc = count;
3853 	}
3854 
3855 	if (count && off < sizeof(dump->driver_dump)) {
3856 		if (off + count > sizeof(dump->driver_dump))
3857 			len = sizeof(dump->driver_dump) - off;
3858 		else
3859 			len = count;
3860 		src = (u8 *)&dump->driver_dump + off;
3861 		memcpy(buf, src, len);
3862 		buf += len;
3863 		off += len;
3864 		count -= len;
3865 	}
3866 
3867 	off -= sizeof(dump->driver_dump);
3868 
3869 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3870 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3871 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3872 		else
3873 			len = count;
3874 		src = (u8 *)&dump->ioa_dump + off;
3875 		memcpy(buf, src, len);
3876 		buf += len;
3877 		off += len;
3878 		count -= len;
3879 	}
3880 
3881 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3882 
3883 	while (count) {
3884 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3885 			len = PAGE_ALIGN(off) - off;
3886 		else
3887 			len = count;
3888 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3889 		src += off & ~PAGE_MASK;
3890 		memcpy(buf, src, len);
3891 		buf += len;
3892 		off += len;
3893 		count -= len;
3894 	}
3895 
3896 	kref_put(&dump->kref, ipr_release_dump);
3897 	return rc;
3898 }
3899 
3900 /**
3901  * ipr_alloc_dump - Prepare for adapter dump
3902  * @ioa_cfg:	ioa config struct
3903  *
3904  * Return value:
3905  *	0 on success / other on failure
3906  **/
3907 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3908 {
3909 	struct ipr_dump *dump;
3910 	unsigned long lock_flags = 0;
3911 
3912 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3913 
3914 	if (!dump) {
3915 		ipr_err("Dump memory allocation failed\n");
3916 		return -ENOMEM;
3917 	}
3918 
3919 	kref_init(&dump->kref);
3920 	dump->ioa_cfg = ioa_cfg;
3921 
3922 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3923 
3924 	if (INACTIVE != ioa_cfg->sdt_state) {
3925 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3926 		kfree(dump);
3927 		return 0;
3928 	}
3929 
3930 	ioa_cfg->dump = dump;
3931 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3932 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3933 		ioa_cfg->dump_taken = 1;
3934 		schedule_work(&ioa_cfg->work_q);
3935 	}
3936 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3937 
3938 	return 0;
3939 }
3940 
3941 /**
3942  * ipr_free_dump - Free adapter dump memory
3943  * @ioa_cfg:	ioa config struct
3944  *
3945  * Return value:
3946  *	0 on success / other on failure
3947  **/
3948 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3949 {
3950 	struct ipr_dump *dump;
3951 	unsigned long lock_flags = 0;
3952 
3953 	ENTER;
3954 
3955 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3956 	dump = ioa_cfg->dump;
3957 	if (!dump) {
3958 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959 		return 0;
3960 	}
3961 
3962 	ioa_cfg->dump = NULL;
3963 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3964 
3965 	kref_put(&dump->kref, ipr_release_dump);
3966 
3967 	LEAVE;
3968 	return 0;
3969 }
3970 
3971 /**
3972  * ipr_write_dump - Setup dump state of adapter
3973  * @filp:		open sysfs file
3974  * @kobj:		kobject struct
3975  * @bin_attr:		bin_attribute struct
3976  * @buf:		buffer
3977  * @off:		offset
3978  * @count:		buffer size
3979  *
3980  * Return value:
3981  *	number of bytes printed to buffer
3982  **/
3983 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3984 			      struct bin_attribute *bin_attr,
3985 			      char *buf, loff_t off, size_t count)
3986 {
3987 	struct device *cdev = container_of(kobj, struct device, kobj);
3988 	struct Scsi_Host *shost = class_to_shost(cdev);
3989 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3990 	int rc;
3991 
3992 	if (!capable(CAP_SYS_ADMIN))
3993 		return -EACCES;
3994 
3995 	if (buf[0] == '1')
3996 		rc = ipr_alloc_dump(ioa_cfg);
3997 	else if (buf[0] == '0')
3998 		rc = ipr_free_dump(ioa_cfg);
3999 	else
4000 		return -EINVAL;
4001 
4002 	if (rc)
4003 		return rc;
4004 	else
4005 		return count;
4006 }
4007 
4008 static struct bin_attribute ipr_dump_attr = {
4009 	.attr =	{
4010 		.name = "dump",
4011 		.mode = S_IRUSR | S_IWUSR,
4012 	},
4013 	.size = 0,
4014 	.read = ipr_read_dump,
4015 	.write = ipr_write_dump
4016 };
4017 #else
4018 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4019 #endif
4020 
4021 /**
4022  * ipr_change_queue_depth - Change the device's queue depth
4023  * @sdev:	scsi device struct
4024  * @qdepth:	depth to set
4025  * @reason:	calling context
4026  *
4027  * Return value:
4028  * 	actual depth set
4029  **/
4030 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4031 				  int reason)
4032 {
4033 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4034 	struct ipr_resource_entry *res;
4035 	unsigned long lock_flags = 0;
4036 
4037 	if (reason != SCSI_QDEPTH_DEFAULT)
4038 		return -EOPNOTSUPP;
4039 
4040 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4041 	res = (struct ipr_resource_entry *)sdev->hostdata;
4042 
4043 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4044 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4045 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046 
4047 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4048 	return sdev->queue_depth;
4049 }
4050 
4051 /**
4052  * ipr_change_queue_type - Change the device's queue type
4053  * @dsev:		scsi device struct
4054  * @tag_type:	type of tags to use
4055  *
4056  * Return value:
4057  * 	actual queue type set
4058  **/
4059 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4060 {
4061 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4062 	struct ipr_resource_entry *res;
4063 	unsigned long lock_flags = 0;
4064 
4065 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066 	res = (struct ipr_resource_entry *)sdev->hostdata;
4067 
4068 	if (res) {
4069 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4070 			/*
4071 			 * We don't bother quiescing the device here since the
4072 			 * adapter firmware does it for us.
4073 			 */
4074 			scsi_set_tag_type(sdev, tag_type);
4075 
4076 			if (tag_type)
4077 				scsi_activate_tcq(sdev, sdev->queue_depth);
4078 			else
4079 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4080 		} else
4081 			tag_type = 0;
4082 	} else
4083 		tag_type = 0;
4084 
4085 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4086 	return tag_type;
4087 }
4088 
4089 /**
4090  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4091  * @dev:	device struct
4092  * @buf:	buffer
4093  *
4094  * Return value:
4095  * 	number of bytes printed to buffer
4096  **/
4097 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4098 {
4099 	struct scsi_device *sdev = to_scsi_device(dev);
4100 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4101 	struct ipr_resource_entry *res;
4102 	unsigned long lock_flags = 0;
4103 	ssize_t len = -ENXIO;
4104 
4105 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4106 	res = (struct ipr_resource_entry *)sdev->hostdata;
4107 	if (res)
4108 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4109 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110 	return len;
4111 }
4112 
4113 static struct device_attribute ipr_adapter_handle_attr = {
4114 	.attr = {
4115 		.name = 	"adapter_handle",
4116 		.mode =		S_IRUSR,
4117 	},
4118 	.show = ipr_show_adapter_handle
4119 };
4120 
4121 /**
4122  * ipr_show_resource_path - Show the resource path or the resource address for
4123  *			    this device.
4124  * @dev:	device struct
4125  * @buf:	buffer
4126  *
4127  * Return value:
4128  * 	number of bytes printed to buffer
4129  **/
4130 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4131 {
4132 	struct scsi_device *sdev = to_scsi_device(dev);
4133 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4134 	struct ipr_resource_entry *res;
4135 	unsigned long lock_flags = 0;
4136 	ssize_t len = -ENXIO;
4137 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4138 
4139 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4140 	res = (struct ipr_resource_entry *)sdev->hostdata;
4141 	if (res && ioa_cfg->sis64)
4142 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4143 			       ipr_format_res_path(res->res_path, buffer,
4144 						   sizeof(buffer)));
4145 	else if (res)
4146 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4147 			       res->bus, res->target, res->lun);
4148 
4149 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4150 	return len;
4151 }
4152 
4153 static struct device_attribute ipr_resource_path_attr = {
4154 	.attr = {
4155 		.name = 	"resource_path",
4156 		.mode =		S_IRUGO,
4157 	},
4158 	.show = ipr_show_resource_path
4159 };
4160 
4161 /**
4162  * ipr_show_resource_type - Show the resource type for this device.
4163  * @dev:	device struct
4164  * @buf:	buffer
4165  *
4166  * Return value:
4167  *	number of bytes printed to buffer
4168  **/
4169 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4170 {
4171 	struct scsi_device *sdev = to_scsi_device(dev);
4172 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4173 	struct ipr_resource_entry *res;
4174 	unsigned long lock_flags = 0;
4175 	ssize_t len = -ENXIO;
4176 
4177 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4178 	res = (struct ipr_resource_entry *)sdev->hostdata;
4179 
4180 	if (res)
4181 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4182 
4183 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4184 	return len;
4185 }
4186 
4187 static struct device_attribute ipr_resource_type_attr = {
4188 	.attr = {
4189 		.name =		"resource_type",
4190 		.mode =		S_IRUGO,
4191 	},
4192 	.show = ipr_show_resource_type
4193 };
4194 
4195 static struct device_attribute *ipr_dev_attrs[] = {
4196 	&ipr_adapter_handle_attr,
4197 	&ipr_resource_path_attr,
4198 	&ipr_resource_type_attr,
4199 	NULL,
4200 };
4201 
4202 /**
4203  * ipr_biosparam - Return the HSC mapping
4204  * @sdev:			scsi device struct
4205  * @block_device:	block device pointer
4206  * @capacity:		capacity of the device
4207  * @parm:			Array containing returned HSC values.
4208  *
4209  * This function generates the HSC parms that fdisk uses.
4210  * We want to make sure we return something that places partitions
4211  * on 4k boundaries for best performance with the IOA.
4212  *
4213  * Return value:
4214  * 	0 on success
4215  **/
4216 static int ipr_biosparam(struct scsi_device *sdev,
4217 			 struct block_device *block_device,
4218 			 sector_t capacity, int *parm)
4219 {
4220 	int heads, sectors;
4221 	sector_t cylinders;
4222 
4223 	heads = 128;
4224 	sectors = 32;
4225 
4226 	cylinders = capacity;
4227 	sector_div(cylinders, (128 * 32));
4228 
4229 	/* return result */
4230 	parm[0] = heads;
4231 	parm[1] = sectors;
4232 	parm[2] = cylinders;
4233 
4234 	return 0;
4235 }
4236 
4237 /**
4238  * ipr_find_starget - Find target based on bus/target.
4239  * @starget:	scsi target struct
4240  *
4241  * Return value:
4242  * 	resource entry pointer if found / NULL if not found
4243  **/
4244 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4245 {
4246 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4247 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4248 	struct ipr_resource_entry *res;
4249 
4250 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4251 		if ((res->bus == starget->channel) &&
4252 		    (res->target == starget->id) &&
4253 		    (res->lun == 0)) {
4254 			return res;
4255 		}
4256 	}
4257 
4258 	return NULL;
4259 }
4260 
4261 static struct ata_port_info sata_port_info;
4262 
4263 /**
4264  * ipr_target_alloc - Prepare for commands to a SCSI target
4265  * @starget:	scsi target struct
4266  *
4267  * If the device is a SATA device, this function allocates an
4268  * ATA port with libata, else it does nothing.
4269  *
4270  * Return value:
4271  * 	0 on success / non-0 on failure
4272  **/
4273 static int ipr_target_alloc(struct scsi_target *starget)
4274 {
4275 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4276 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4277 	struct ipr_sata_port *sata_port;
4278 	struct ata_port *ap;
4279 	struct ipr_resource_entry *res;
4280 	unsigned long lock_flags;
4281 
4282 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283 	res = ipr_find_starget(starget);
4284 	starget->hostdata = NULL;
4285 
4286 	if (res && ipr_is_gata(res)) {
4287 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4288 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4289 		if (!sata_port)
4290 			return -ENOMEM;
4291 
4292 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4293 		if (ap) {
4294 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4295 			sata_port->ioa_cfg = ioa_cfg;
4296 			sata_port->ap = ap;
4297 			sata_port->res = res;
4298 
4299 			res->sata_port = sata_port;
4300 			ap->private_data = sata_port;
4301 			starget->hostdata = sata_port;
4302 		} else {
4303 			kfree(sata_port);
4304 			return -ENOMEM;
4305 		}
4306 	}
4307 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308 
4309 	return 0;
4310 }
4311 
4312 /**
4313  * ipr_target_destroy - Destroy a SCSI target
4314  * @starget:	scsi target struct
4315  *
4316  * If the device was a SATA device, this function frees the libata
4317  * ATA port, else it does nothing.
4318  *
4319  **/
4320 static void ipr_target_destroy(struct scsi_target *starget)
4321 {
4322 	struct ipr_sata_port *sata_port = starget->hostdata;
4323 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4324 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4325 
4326 	if (ioa_cfg->sis64) {
4327 		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4328 			clear_bit(starget->id, ioa_cfg->array_ids);
4329 		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4330 			clear_bit(starget->id, ioa_cfg->vset_ids);
4331 		else if (starget->channel == 0)
4332 			clear_bit(starget->id, ioa_cfg->target_ids);
4333 	}
4334 
4335 	if (sata_port) {
4336 		starget->hostdata = NULL;
4337 		ata_sas_port_destroy(sata_port->ap);
4338 		kfree(sata_port);
4339 	}
4340 }
4341 
4342 /**
4343  * ipr_find_sdev - Find device based on bus/target/lun.
4344  * @sdev:	scsi device struct
4345  *
4346  * Return value:
4347  * 	resource entry pointer if found / NULL if not found
4348  **/
4349 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4350 {
4351 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4352 	struct ipr_resource_entry *res;
4353 
4354 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4355 		if ((res->bus == sdev->channel) &&
4356 		    (res->target == sdev->id) &&
4357 		    (res->lun == sdev->lun))
4358 			return res;
4359 	}
4360 
4361 	return NULL;
4362 }
4363 
4364 /**
4365  * ipr_slave_destroy - Unconfigure a SCSI device
4366  * @sdev:	scsi device struct
4367  *
4368  * Return value:
4369  * 	nothing
4370  **/
4371 static void ipr_slave_destroy(struct scsi_device *sdev)
4372 {
4373 	struct ipr_resource_entry *res;
4374 	struct ipr_ioa_cfg *ioa_cfg;
4375 	unsigned long lock_flags = 0;
4376 
4377 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4378 
4379 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4380 	res = (struct ipr_resource_entry *) sdev->hostdata;
4381 	if (res) {
4382 		if (res->sata_port)
4383 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4384 		sdev->hostdata = NULL;
4385 		res->sdev = NULL;
4386 		res->sata_port = NULL;
4387 	}
4388 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4389 }
4390 
4391 /**
4392  * ipr_slave_configure - Configure a SCSI device
4393  * @sdev:	scsi device struct
4394  *
4395  * This function configures the specified scsi device.
4396  *
4397  * Return value:
4398  * 	0 on success
4399  **/
4400 static int ipr_slave_configure(struct scsi_device *sdev)
4401 {
4402 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4403 	struct ipr_resource_entry *res;
4404 	struct ata_port *ap = NULL;
4405 	unsigned long lock_flags = 0;
4406 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4407 
4408 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4409 	res = sdev->hostdata;
4410 	if (res) {
4411 		if (ipr_is_af_dasd_device(res))
4412 			sdev->type = TYPE_RAID;
4413 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4414 			sdev->scsi_level = 4;
4415 			sdev->no_uld_attach = 1;
4416 		}
4417 		if (ipr_is_vset_device(res)) {
4418 			blk_queue_rq_timeout(sdev->request_queue,
4419 					     IPR_VSET_RW_TIMEOUT);
4420 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4421 		}
4422 		if (ipr_is_gata(res) && res->sata_port)
4423 			ap = res->sata_port->ap;
4424 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4425 
4426 		if (ap) {
4427 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4428 			ata_sas_slave_configure(sdev, ap);
4429 		} else
4430 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4431 		if (ioa_cfg->sis64)
4432 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4433 				    ipr_format_res_path(res->res_path, buffer,
4434 							sizeof(buffer)));
4435 		return 0;
4436 	}
4437 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438 	return 0;
4439 }
4440 
4441 /**
4442  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4443  * @sdev:	scsi device struct
4444  *
4445  * This function initializes an ATA port so that future commands
4446  * sent through queuecommand will work.
4447  *
4448  * Return value:
4449  * 	0 on success
4450  **/
4451 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4452 {
4453 	struct ipr_sata_port *sata_port = NULL;
4454 	int rc = -ENXIO;
4455 
4456 	ENTER;
4457 	if (sdev->sdev_target)
4458 		sata_port = sdev->sdev_target->hostdata;
4459 	if (sata_port)
4460 		rc = ata_sas_port_init(sata_port->ap);
4461 	if (rc)
4462 		ipr_slave_destroy(sdev);
4463 
4464 	LEAVE;
4465 	return rc;
4466 }
4467 
4468 /**
4469  * ipr_slave_alloc - Prepare for commands to a device.
4470  * @sdev:	scsi device struct
4471  *
4472  * This function saves a pointer to the resource entry
4473  * in the scsi device struct if the device exists. We
4474  * can then use this pointer in ipr_queuecommand when
4475  * handling new commands.
4476  *
4477  * Return value:
4478  * 	0 on success / -ENXIO if device does not exist
4479  **/
4480 static int ipr_slave_alloc(struct scsi_device *sdev)
4481 {
4482 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4483 	struct ipr_resource_entry *res;
4484 	unsigned long lock_flags;
4485 	int rc = -ENXIO;
4486 
4487 	sdev->hostdata = NULL;
4488 
4489 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4490 
4491 	res = ipr_find_sdev(sdev);
4492 	if (res) {
4493 		res->sdev = sdev;
4494 		res->add_to_ml = 0;
4495 		res->in_erp = 0;
4496 		sdev->hostdata = res;
4497 		if (!ipr_is_naca_model(res))
4498 			res->needs_sync_complete = 1;
4499 		rc = 0;
4500 		if (ipr_is_gata(res)) {
4501 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4502 			return ipr_ata_slave_alloc(sdev);
4503 		}
4504 	}
4505 
4506 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4507 
4508 	return rc;
4509 }
4510 
4511 /**
4512  * ipr_eh_host_reset - Reset the host adapter
4513  * @scsi_cmd:	scsi command struct
4514  *
4515  * Return value:
4516  * 	SUCCESS / FAILED
4517  **/
4518 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4519 {
4520 	struct ipr_ioa_cfg *ioa_cfg;
4521 	int rc;
4522 
4523 	ENTER;
4524 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4525 
4526 	dev_err(&ioa_cfg->pdev->dev,
4527 		"Adapter being reset as a result of error recovery.\n");
4528 
4529 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4530 		ioa_cfg->sdt_state = GET_DUMP;
4531 
4532 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4533 
4534 	LEAVE;
4535 	return rc;
4536 }
4537 
4538 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4539 {
4540 	int rc;
4541 
4542 	spin_lock_irq(cmd->device->host->host_lock);
4543 	rc = __ipr_eh_host_reset(cmd);
4544 	spin_unlock_irq(cmd->device->host->host_lock);
4545 
4546 	return rc;
4547 }
4548 
4549 /**
4550  * ipr_device_reset - Reset the device
4551  * @ioa_cfg:	ioa config struct
4552  * @res:		resource entry struct
4553  *
4554  * This function issues a device reset to the affected device.
4555  * If the device is a SCSI device, a LUN reset will be sent
4556  * to the device first. If that does not work, a target reset
4557  * will be sent. If the device is a SATA device, a PHY reset will
4558  * be sent.
4559  *
4560  * Return value:
4561  *	0 on success / non-zero on failure
4562  **/
4563 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4564 			    struct ipr_resource_entry *res)
4565 {
4566 	struct ipr_cmnd *ipr_cmd;
4567 	struct ipr_ioarcb *ioarcb;
4568 	struct ipr_cmd_pkt *cmd_pkt;
4569 	struct ipr_ioarcb_ata_regs *regs;
4570 	u32 ioasc;
4571 
4572 	ENTER;
4573 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4574 	ioarcb = &ipr_cmd->ioarcb;
4575 	cmd_pkt = &ioarcb->cmd_pkt;
4576 
4577 	if (ipr_cmd->ioa_cfg->sis64) {
4578 		regs = &ipr_cmd->i.ata_ioadl.regs;
4579 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4580 	} else
4581 		regs = &ioarcb->u.add_data.u.regs;
4582 
4583 	ioarcb->res_handle = res->res_handle;
4584 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4585 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4586 	if (ipr_is_gata(res)) {
4587 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4588 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4589 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4590 	}
4591 
4592 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4593 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4594 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4595 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4596 		if (ipr_cmd->ioa_cfg->sis64)
4597 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4598 			       sizeof(struct ipr_ioasa_gata));
4599 		else
4600 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4601 			       sizeof(struct ipr_ioasa_gata));
4602 	}
4603 
4604 	LEAVE;
4605 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4606 }
4607 
4608 /**
4609  * ipr_sata_reset - Reset the SATA port
4610  * @link:	SATA link to reset
4611  * @classes:	class of the attached device
4612  *
4613  * This function issues a SATA phy reset to the affected ATA link.
4614  *
4615  * Return value:
4616  *	0 on success / non-zero on failure
4617  **/
4618 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4619 				unsigned long deadline)
4620 {
4621 	struct ipr_sata_port *sata_port = link->ap->private_data;
4622 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4623 	struct ipr_resource_entry *res;
4624 	unsigned long lock_flags = 0;
4625 	int rc = -ENXIO;
4626 
4627 	ENTER;
4628 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4629 	while(ioa_cfg->in_reset_reload) {
4630 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4631 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4632 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4633 	}
4634 
4635 	res = sata_port->res;
4636 	if (res) {
4637 		rc = ipr_device_reset(ioa_cfg, res);
4638 		*classes = res->ata_class;
4639 	}
4640 
4641 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4642 	LEAVE;
4643 	return rc;
4644 }
4645 
4646 /**
4647  * ipr_eh_dev_reset - Reset the device
4648  * @scsi_cmd:	scsi command struct
4649  *
4650  * This function issues a device reset to the affected device.
4651  * A LUN reset will be sent to the device first. If that does
4652  * not work, a target reset will be sent.
4653  *
4654  * Return value:
4655  *	SUCCESS / FAILED
4656  **/
4657 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4658 {
4659 	struct ipr_cmnd *ipr_cmd;
4660 	struct ipr_ioa_cfg *ioa_cfg;
4661 	struct ipr_resource_entry *res;
4662 	struct ata_port *ap;
4663 	int rc = 0;
4664 
4665 	ENTER;
4666 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4667 	res = scsi_cmd->device->hostdata;
4668 
4669 	if (!res)
4670 		return FAILED;
4671 
4672 	/*
4673 	 * If we are currently going through reset/reload, return failed. This will force the
4674 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4675 	 * reset to complete
4676 	 */
4677 	if (ioa_cfg->in_reset_reload)
4678 		return FAILED;
4679 	if (ioa_cfg->ioa_is_dead)
4680 		return FAILED;
4681 
4682 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4683 		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4684 			if (ipr_cmd->scsi_cmd)
4685 				ipr_cmd->done = ipr_scsi_eh_done;
4686 			if (ipr_cmd->qc)
4687 				ipr_cmd->done = ipr_sata_eh_done;
4688 			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4689 				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4690 				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4691 			}
4692 		}
4693 	}
4694 
4695 	res->resetting_device = 1;
4696 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4697 
4698 	if (ipr_is_gata(res) && res->sata_port) {
4699 		ap = res->sata_port->ap;
4700 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4701 		ata_std_error_handler(ap);
4702 		spin_lock_irq(scsi_cmd->device->host->host_lock);
4703 
4704 		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4705 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4706 				rc = -EIO;
4707 				break;
4708 			}
4709 		}
4710 	} else
4711 		rc = ipr_device_reset(ioa_cfg, res);
4712 	res->resetting_device = 0;
4713 
4714 	LEAVE;
4715 	return (rc ? FAILED : SUCCESS);
4716 }
4717 
4718 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4719 {
4720 	int rc;
4721 
4722 	spin_lock_irq(cmd->device->host->host_lock);
4723 	rc = __ipr_eh_dev_reset(cmd);
4724 	spin_unlock_irq(cmd->device->host->host_lock);
4725 
4726 	return rc;
4727 }
4728 
4729 /**
4730  * ipr_bus_reset_done - Op done function for bus reset.
4731  * @ipr_cmd:	ipr command struct
4732  *
4733  * This function is the op done function for a bus reset
4734  *
4735  * Return value:
4736  * 	none
4737  **/
4738 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4739 {
4740 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4741 	struct ipr_resource_entry *res;
4742 
4743 	ENTER;
4744 	if (!ioa_cfg->sis64)
4745 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4746 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4747 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4748 				break;
4749 			}
4750 		}
4751 
4752 	/*
4753 	 * If abort has not completed, indicate the reset has, else call the
4754 	 * abort's done function to wake the sleeping eh thread
4755 	 */
4756 	if (ipr_cmd->sibling->sibling)
4757 		ipr_cmd->sibling->sibling = NULL;
4758 	else
4759 		ipr_cmd->sibling->done(ipr_cmd->sibling);
4760 
4761 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4762 	LEAVE;
4763 }
4764 
4765 /**
4766  * ipr_abort_timeout - An abort task has timed out
4767  * @ipr_cmd:	ipr command struct
4768  *
4769  * This function handles when an abort task times out. If this
4770  * happens we issue a bus reset since we have resources tied
4771  * up that must be freed before returning to the midlayer.
4772  *
4773  * Return value:
4774  *	none
4775  **/
4776 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4777 {
4778 	struct ipr_cmnd *reset_cmd;
4779 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4780 	struct ipr_cmd_pkt *cmd_pkt;
4781 	unsigned long lock_flags = 0;
4782 
4783 	ENTER;
4784 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4785 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4786 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4787 		return;
4788 	}
4789 
4790 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4791 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4792 	ipr_cmd->sibling = reset_cmd;
4793 	reset_cmd->sibling = ipr_cmd;
4794 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4795 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4796 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4797 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4798 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4799 
4800 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4801 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4802 	LEAVE;
4803 }
4804 
4805 /**
4806  * ipr_cancel_op - Cancel specified op
4807  * @scsi_cmd:	scsi command struct
4808  *
4809  * This function cancels specified op.
4810  *
4811  * Return value:
4812  *	SUCCESS / FAILED
4813  **/
4814 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4815 {
4816 	struct ipr_cmnd *ipr_cmd;
4817 	struct ipr_ioa_cfg *ioa_cfg;
4818 	struct ipr_resource_entry *res;
4819 	struct ipr_cmd_pkt *cmd_pkt;
4820 	u32 ioasc;
4821 	int op_found = 0;
4822 
4823 	ENTER;
4824 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4825 	res = scsi_cmd->device->hostdata;
4826 
4827 	/* If we are currently going through reset/reload, return failed.
4828 	 * This will force the mid-layer to call ipr_eh_host_reset,
4829 	 * which will then go to sleep and wait for the reset to complete
4830 	 */
4831 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4832 		return FAILED;
4833 	if (!res || !ipr_is_gscsi(res))
4834 		return FAILED;
4835 
4836 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4837 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4838 			ipr_cmd->done = ipr_scsi_eh_done;
4839 			op_found = 1;
4840 			break;
4841 		}
4842 	}
4843 
4844 	if (!op_found)
4845 		return SUCCESS;
4846 
4847 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4848 	ipr_cmd->ioarcb.res_handle = res->res_handle;
4849 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4850 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4851 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4852 	ipr_cmd->u.sdev = scsi_cmd->device;
4853 
4854 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4855 		    scsi_cmd->cmnd[0]);
4856 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4857 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4858 
4859 	/*
4860 	 * If the abort task timed out and we sent a bus reset, we will get
4861 	 * one the following responses to the abort
4862 	 */
4863 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4864 		ioasc = 0;
4865 		ipr_trace;
4866 	}
4867 
4868 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4869 	if (!ipr_is_naca_model(res))
4870 		res->needs_sync_complete = 1;
4871 
4872 	LEAVE;
4873 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4874 }
4875 
4876 /**
4877  * ipr_eh_abort - Abort a single op
4878  * @scsi_cmd:	scsi command struct
4879  *
4880  * Return value:
4881  * 	SUCCESS / FAILED
4882  **/
4883 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4884 {
4885 	unsigned long flags;
4886 	int rc;
4887 
4888 	ENTER;
4889 
4890 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4891 	rc = ipr_cancel_op(scsi_cmd);
4892 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4893 
4894 	LEAVE;
4895 	return rc;
4896 }
4897 
4898 /**
4899  * ipr_handle_other_interrupt - Handle "other" interrupts
4900  * @ioa_cfg:	ioa config struct
4901  *
4902  * Return value:
4903  * 	IRQ_NONE / IRQ_HANDLED
4904  **/
4905 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4906 {
4907 	irqreturn_t rc = IRQ_HANDLED;
4908 	volatile u32 int_reg, int_mask_reg;
4909 
4910 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4911 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4912 
4913 	/* If an interrupt on the adapter did not occur, ignore it.
4914 	 * Or in the case of SIS 64, check for a stage change interrupt.
4915 	 */
4916 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4917 		if (ioa_cfg->sis64) {
4918 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4919 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4920 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4921 
4922 				/* clear stage change */
4923 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4924 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4925 				list_del(&ioa_cfg->reset_cmd->queue);
4926 				del_timer(&ioa_cfg->reset_cmd->timer);
4927 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4928 				return IRQ_HANDLED;
4929 			}
4930 		}
4931 
4932 		return IRQ_NONE;
4933 	}
4934 
4935 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4936 		/* Mask the interrupt */
4937 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4938 
4939 		/* Clear the interrupt */
4940 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4941 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4942 
4943 		list_del(&ioa_cfg->reset_cmd->queue);
4944 		del_timer(&ioa_cfg->reset_cmd->timer);
4945 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4946 	} else {
4947 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4948 			ioa_cfg->ioa_unit_checked = 1;
4949 		else
4950 			dev_err(&ioa_cfg->pdev->dev,
4951 				"Permanent IOA failure. 0x%08X\n", int_reg);
4952 
4953 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4954 			ioa_cfg->sdt_state = GET_DUMP;
4955 
4956 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4957 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4958 	}
4959 
4960 	return rc;
4961 }
4962 
4963 /**
4964  * ipr_isr_eh - Interrupt service routine error handler
4965  * @ioa_cfg:	ioa config struct
4966  * @msg:	message to log
4967  *
4968  * Return value:
4969  * 	none
4970  **/
4971 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4972 {
4973 	ioa_cfg->errors_logged++;
4974 	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4975 
4976 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4977 		ioa_cfg->sdt_state = GET_DUMP;
4978 
4979 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4980 }
4981 
4982 /**
4983  * ipr_isr - Interrupt service routine
4984  * @irq:	irq number
4985  * @devp:	pointer to ioa config struct
4986  *
4987  * Return value:
4988  * 	IRQ_NONE / IRQ_HANDLED
4989  **/
4990 static irqreturn_t ipr_isr(int irq, void *devp)
4991 {
4992 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4993 	unsigned long lock_flags = 0;
4994 	volatile u32 int_reg;
4995 	u32 ioasc;
4996 	u16 cmd_index;
4997 	int num_hrrq = 0;
4998 	struct ipr_cmnd *ipr_cmd;
4999 	irqreturn_t rc = IRQ_NONE;
5000 
5001 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5002 
5003 	/* If interrupts are disabled, ignore the interrupt */
5004 	if (!ioa_cfg->allow_interrupts) {
5005 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5006 		return IRQ_NONE;
5007 	}
5008 
5009 	while (1) {
5010 		ipr_cmd = NULL;
5011 
5012 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5013 		       ioa_cfg->toggle_bit) {
5014 
5015 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5016 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5017 
5018 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5019 				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5020 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5021 				return IRQ_HANDLED;
5022 			}
5023 
5024 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5025 
5026 			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5027 
5028 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5029 
5030 			list_del(&ipr_cmd->queue);
5031 			del_timer(&ipr_cmd->timer);
5032 			ipr_cmd->done(ipr_cmd);
5033 
5034 			rc = IRQ_HANDLED;
5035 
5036 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5037 				ioa_cfg->hrrq_curr++;
5038 			} else {
5039 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5040 				ioa_cfg->toggle_bit ^= 1u;
5041 			}
5042 		}
5043 
5044 		if (ipr_cmd != NULL) {
5045 			/* Clear the PCI interrupt */
5046 			do {
5047 				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5048 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5049 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5050 					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5051 
5052 			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5053 				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5054 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5055 				return IRQ_HANDLED;
5056 			}
5057 
5058 		} else
5059 			break;
5060 	}
5061 
5062 	if (unlikely(rc == IRQ_NONE))
5063 		rc = ipr_handle_other_interrupt(ioa_cfg);
5064 
5065 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5066 	return rc;
5067 }
5068 
5069 /**
5070  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5071  * @ioa_cfg:	ioa config struct
5072  * @ipr_cmd:	ipr command struct
5073  *
5074  * Return value:
5075  * 	0 on success / -1 on failure
5076  **/
5077 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5078 			     struct ipr_cmnd *ipr_cmd)
5079 {
5080 	int i, nseg;
5081 	struct scatterlist *sg;
5082 	u32 length;
5083 	u32 ioadl_flags = 0;
5084 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5085 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5086 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5087 
5088 	length = scsi_bufflen(scsi_cmd);
5089 	if (!length)
5090 		return 0;
5091 
5092 	nseg = scsi_dma_map(scsi_cmd);
5093 	if (nseg < 0) {
5094 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5095 		return -1;
5096 	}
5097 
5098 	ipr_cmd->dma_use_sg = nseg;
5099 
5100 	ioarcb->data_transfer_length = cpu_to_be32(length);
5101 	ioarcb->ioadl_len =
5102 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5103 
5104 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5105 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5106 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5107 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5108 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5109 
5110 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5111 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5112 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5113 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5114 	}
5115 
5116 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5117 	return 0;
5118 }
5119 
5120 /**
5121  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5122  * @ioa_cfg:	ioa config struct
5123  * @ipr_cmd:	ipr command struct
5124  *
5125  * Return value:
5126  * 	0 on success / -1 on failure
5127  **/
5128 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5129 			   struct ipr_cmnd *ipr_cmd)
5130 {
5131 	int i, nseg;
5132 	struct scatterlist *sg;
5133 	u32 length;
5134 	u32 ioadl_flags = 0;
5135 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5136 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5137 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5138 
5139 	length = scsi_bufflen(scsi_cmd);
5140 	if (!length)
5141 		return 0;
5142 
5143 	nseg = scsi_dma_map(scsi_cmd);
5144 	if (nseg < 0) {
5145 		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5146 		return -1;
5147 	}
5148 
5149 	ipr_cmd->dma_use_sg = nseg;
5150 
5151 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5152 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5153 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5154 		ioarcb->data_transfer_length = cpu_to_be32(length);
5155 		ioarcb->ioadl_len =
5156 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5157 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5158 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5159 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5160 		ioarcb->read_ioadl_len =
5161 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5162 	}
5163 
5164 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5165 		ioadl = ioarcb->u.add_data.u.ioadl;
5166 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5167 				    offsetof(struct ipr_ioarcb, u.add_data));
5168 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5169 	}
5170 
5171 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5172 		ioadl[i].flags_and_data_len =
5173 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5174 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5175 	}
5176 
5177 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5178 	return 0;
5179 }
5180 
5181 /**
5182  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5183  * @scsi_cmd:	scsi command struct
5184  *
5185  * Return value:
5186  * 	task attributes
5187  **/
5188 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5189 {
5190 	u8 tag[2];
5191 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5192 
5193 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5194 		switch (tag[0]) {
5195 		case MSG_SIMPLE_TAG:
5196 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5197 			break;
5198 		case MSG_HEAD_TAG:
5199 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5200 			break;
5201 		case MSG_ORDERED_TAG:
5202 			rc = IPR_FLAGS_LO_ORDERED_TASK;
5203 			break;
5204 		};
5205 	}
5206 
5207 	return rc;
5208 }
5209 
5210 /**
5211  * ipr_erp_done - Process completion of ERP for a device
5212  * @ipr_cmd:		ipr command struct
5213  *
5214  * This function copies the sense buffer into the scsi_cmd
5215  * struct and pushes the scsi_done function.
5216  *
5217  * Return value:
5218  * 	nothing
5219  **/
5220 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5221 {
5222 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5223 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5224 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5225 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5226 
5227 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5228 		scsi_cmd->result |= (DID_ERROR << 16);
5229 		scmd_printk(KERN_ERR, scsi_cmd,
5230 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5231 	} else {
5232 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5233 		       SCSI_SENSE_BUFFERSIZE);
5234 	}
5235 
5236 	if (res) {
5237 		if (!ipr_is_naca_model(res))
5238 			res->needs_sync_complete = 1;
5239 		res->in_erp = 0;
5240 	}
5241 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5242 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5243 	scsi_cmd->scsi_done(scsi_cmd);
5244 }
5245 
5246 /**
5247  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5248  * @ipr_cmd:	ipr command struct
5249  *
5250  * Return value:
5251  * 	none
5252  **/
5253 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5254 {
5255 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5256 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5257 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5258 
5259 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5260 	ioarcb->data_transfer_length = 0;
5261 	ioarcb->read_data_transfer_length = 0;
5262 	ioarcb->ioadl_len = 0;
5263 	ioarcb->read_ioadl_len = 0;
5264 	ioasa->hdr.ioasc = 0;
5265 	ioasa->hdr.residual_data_len = 0;
5266 
5267 	if (ipr_cmd->ioa_cfg->sis64)
5268 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5269 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5270 	else {
5271 		ioarcb->write_ioadl_addr =
5272 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5273 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5274 	}
5275 }
5276 
5277 /**
5278  * ipr_erp_request_sense - Send request sense to a device
5279  * @ipr_cmd:	ipr command struct
5280  *
5281  * This function sends a request sense to a device as a result
5282  * of a check condition.
5283  *
5284  * Return value:
5285  * 	nothing
5286  **/
5287 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5288 {
5289 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5290 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5291 
5292 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5293 		ipr_erp_done(ipr_cmd);
5294 		return;
5295 	}
5296 
5297 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5298 
5299 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5300 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5301 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5302 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5303 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5304 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5305 
5306 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5307 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5308 
5309 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5310 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5311 }
5312 
5313 /**
5314  * ipr_erp_cancel_all - Send cancel all to a device
5315  * @ipr_cmd:	ipr command struct
5316  *
5317  * This function sends a cancel all to a device to clear the
5318  * queue. If we are running TCQ on the device, QERR is set to 1,
5319  * which means all outstanding ops have been dropped on the floor.
5320  * Cancel all will return them to us.
5321  *
5322  * Return value:
5323  * 	nothing
5324  **/
5325 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5326 {
5327 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5328 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5329 	struct ipr_cmd_pkt *cmd_pkt;
5330 
5331 	res->in_erp = 1;
5332 
5333 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5334 
5335 	if (!scsi_get_tag_type(scsi_cmd->device)) {
5336 		ipr_erp_request_sense(ipr_cmd);
5337 		return;
5338 	}
5339 
5340 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5341 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5342 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5343 
5344 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5345 		   IPR_CANCEL_ALL_TIMEOUT);
5346 }
5347 
5348 /**
5349  * ipr_dump_ioasa - Dump contents of IOASA
5350  * @ioa_cfg:	ioa config struct
5351  * @ipr_cmd:	ipr command struct
5352  * @res:		resource entry struct
5353  *
5354  * This function is invoked by the interrupt handler when ops
5355  * fail. It will log the IOASA if appropriate. Only called
5356  * for GPDD ops.
5357  *
5358  * Return value:
5359  * 	none
5360  **/
5361 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5362 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5363 {
5364 	int i;
5365 	u16 data_len;
5366 	u32 ioasc, fd_ioasc;
5367 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5368 	__be32 *ioasa_data = (__be32 *)ioasa;
5369 	int error_index;
5370 
5371 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5372 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5373 
5374 	if (0 == ioasc)
5375 		return;
5376 
5377 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5378 		return;
5379 
5380 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5381 		error_index = ipr_get_error(fd_ioasc);
5382 	else
5383 		error_index = ipr_get_error(ioasc);
5384 
5385 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5386 		/* Don't log an error if the IOA already logged one */
5387 		if (ioasa->hdr.ilid != 0)
5388 			return;
5389 
5390 		if (!ipr_is_gscsi(res))
5391 			return;
5392 
5393 		if (ipr_error_table[error_index].log_ioasa == 0)
5394 			return;
5395 	}
5396 
5397 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5398 
5399 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5400 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5401 		data_len = sizeof(struct ipr_ioasa64);
5402 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5403 		data_len = sizeof(struct ipr_ioasa);
5404 
5405 	ipr_err("IOASA Dump:\n");
5406 
5407 	for (i = 0; i < data_len / 4; i += 4) {
5408 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5409 			be32_to_cpu(ioasa_data[i]),
5410 			be32_to_cpu(ioasa_data[i+1]),
5411 			be32_to_cpu(ioasa_data[i+2]),
5412 			be32_to_cpu(ioasa_data[i+3]));
5413 	}
5414 }
5415 
5416 /**
5417  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5418  * @ioasa:		IOASA
5419  * @sense_buf:	sense data buffer
5420  *
5421  * Return value:
5422  * 	none
5423  **/
5424 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5425 {
5426 	u32 failing_lba;
5427 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5428 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5429 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5430 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5431 
5432 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5433 
5434 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5435 		return;
5436 
5437 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5438 
5439 	if (ipr_is_vset_device(res) &&
5440 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5441 	    ioasa->u.vset.failing_lba_hi != 0) {
5442 		sense_buf[0] = 0x72;
5443 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5444 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5445 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5446 
5447 		sense_buf[7] = 12;
5448 		sense_buf[8] = 0;
5449 		sense_buf[9] = 0x0A;
5450 		sense_buf[10] = 0x80;
5451 
5452 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5453 
5454 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5455 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5456 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5457 		sense_buf[15] = failing_lba & 0x000000ff;
5458 
5459 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5460 
5461 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5462 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5463 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5464 		sense_buf[19] = failing_lba & 0x000000ff;
5465 	} else {
5466 		sense_buf[0] = 0x70;
5467 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5468 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5469 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5470 
5471 		/* Illegal request */
5472 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5473 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5474 			sense_buf[7] = 10;	/* additional length */
5475 
5476 			/* IOARCB was in error */
5477 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5478 				sense_buf[15] = 0xC0;
5479 			else	/* Parameter data was invalid */
5480 				sense_buf[15] = 0x80;
5481 
5482 			sense_buf[16] =
5483 			    ((IPR_FIELD_POINTER_MASK &
5484 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5485 			sense_buf[17] =
5486 			    (IPR_FIELD_POINTER_MASK &
5487 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5488 		} else {
5489 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5490 				if (ipr_is_vset_device(res))
5491 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5492 				else
5493 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5494 
5495 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5496 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5497 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5498 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5499 				sense_buf[6] = failing_lba & 0x000000ff;
5500 			}
5501 
5502 			sense_buf[7] = 6;	/* additional length */
5503 		}
5504 	}
5505 }
5506 
5507 /**
5508  * ipr_get_autosense - Copy autosense data to sense buffer
5509  * @ipr_cmd:	ipr command struct
5510  *
5511  * This function copies the autosense buffer to the buffer
5512  * in the scsi_cmd, if there is autosense available.
5513  *
5514  * Return value:
5515  *	1 if autosense was available / 0 if not
5516  **/
5517 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5518 {
5519 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5520 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5521 
5522 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5523 		return 0;
5524 
5525 	if (ipr_cmd->ioa_cfg->sis64)
5526 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5527 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5528 			   SCSI_SENSE_BUFFERSIZE));
5529 	else
5530 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5531 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5532 			   SCSI_SENSE_BUFFERSIZE));
5533 	return 1;
5534 }
5535 
5536 /**
5537  * ipr_erp_start - Process an error response for a SCSI op
5538  * @ioa_cfg:	ioa config struct
5539  * @ipr_cmd:	ipr command struct
5540  *
5541  * This function determines whether or not to initiate ERP
5542  * on the affected device.
5543  *
5544  * Return value:
5545  * 	nothing
5546  **/
5547 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5548 			      struct ipr_cmnd *ipr_cmd)
5549 {
5550 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5551 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5552 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5553 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5554 
5555 	if (!res) {
5556 		ipr_scsi_eh_done(ipr_cmd);
5557 		return;
5558 	}
5559 
5560 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5561 		ipr_gen_sense(ipr_cmd);
5562 
5563 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5564 
5565 	switch (masked_ioasc) {
5566 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5567 		if (ipr_is_naca_model(res))
5568 			scsi_cmd->result |= (DID_ABORT << 16);
5569 		else
5570 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5571 		break;
5572 	case IPR_IOASC_IR_RESOURCE_HANDLE:
5573 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5574 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5575 		break;
5576 	case IPR_IOASC_HW_SEL_TIMEOUT:
5577 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5578 		if (!ipr_is_naca_model(res))
5579 			res->needs_sync_complete = 1;
5580 		break;
5581 	case IPR_IOASC_SYNC_REQUIRED:
5582 		if (!res->in_erp)
5583 			res->needs_sync_complete = 1;
5584 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5585 		break;
5586 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5587 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5588 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5589 		break;
5590 	case IPR_IOASC_BUS_WAS_RESET:
5591 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5592 		/*
5593 		 * Report the bus reset and ask for a retry. The device
5594 		 * will give CC/UA the next command.
5595 		 */
5596 		if (!res->resetting_device)
5597 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5598 		scsi_cmd->result |= (DID_ERROR << 16);
5599 		if (!ipr_is_naca_model(res))
5600 			res->needs_sync_complete = 1;
5601 		break;
5602 	case IPR_IOASC_HW_DEV_BUS_STATUS:
5603 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5604 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5605 			if (!ipr_get_autosense(ipr_cmd)) {
5606 				if (!ipr_is_naca_model(res)) {
5607 					ipr_erp_cancel_all(ipr_cmd);
5608 					return;
5609 				}
5610 			}
5611 		}
5612 		if (!ipr_is_naca_model(res))
5613 			res->needs_sync_complete = 1;
5614 		break;
5615 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5616 		break;
5617 	default:
5618 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5619 			scsi_cmd->result |= (DID_ERROR << 16);
5620 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5621 			res->needs_sync_complete = 1;
5622 		break;
5623 	}
5624 
5625 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5626 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5627 	scsi_cmd->scsi_done(scsi_cmd);
5628 }
5629 
5630 /**
5631  * ipr_scsi_done - mid-layer done function
5632  * @ipr_cmd:	ipr command struct
5633  *
5634  * This function is invoked by the interrupt handler for
5635  * ops generated by the SCSI mid-layer
5636  *
5637  * Return value:
5638  * 	none
5639  **/
5640 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5641 {
5642 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5643 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5644 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5645 
5646 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5647 
5648 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5649 		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5650 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5651 		scsi_cmd->scsi_done(scsi_cmd);
5652 	} else
5653 		ipr_erp_start(ioa_cfg, ipr_cmd);
5654 }
5655 
5656 /**
5657  * ipr_queuecommand - Queue a mid-layer request
5658  * @scsi_cmd:	scsi command struct
5659  * @done:		done function
5660  *
5661  * This function queues a request generated by the mid-layer.
5662  *
5663  * Return value:
5664  *	0 on success
5665  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5666  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5667  **/
5668 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5669 			    void (*done) (struct scsi_cmnd *))
5670 {
5671 	struct ipr_ioa_cfg *ioa_cfg;
5672 	struct ipr_resource_entry *res;
5673 	struct ipr_ioarcb *ioarcb;
5674 	struct ipr_cmnd *ipr_cmd;
5675 	int rc = 0;
5676 
5677 	scsi_cmd->scsi_done = done;
5678 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5679 	res = scsi_cmd->device->hostdata;
5680 	scsi_cmd->result = (DID_OK << 16);
5681 
5682 	/*
5683 	 * We are currently blocking all devices due to a host reset
5684 	 * We have told the host to stop giving us new requests, but
5685 	 * ERP ops don't count. FIXME
5686 	 */
5687 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5688 		return SCSI_MLQUEUE_HOST_BUSY;
5689 
5690 	/*
5691 	 * FIXME - Create scsi_set_host_offline interface
5692 	 *  and the ioa_is_dead check can be removed
5693 	 */
5694 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5695 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5696 		scsi_cmd->result = (DID_NO_CONNECT << 16);
5697 		scsi_cmd->scsi_done(scsi_cmd);
5698 		return 0;
5699 	}
5700 
5701 	if (ipr_is_gata(res) && res->sata_port)
5702 		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5703 
5704 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5705 	ioarcb = &ipr_cmd->ioarcb;
5706 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5707 
5708 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5709 	ipr_cmd->scsi_cmd = scsi_cmd;
5710 	ioarcb->res_handle = res->res_handle;
5711 	ipr_cmd->done = ipr_scsi_done;
5712 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5713 
5714 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5715 		if (scsi_cmd->underflow == 0)
5716 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5717 
5718 		if (res->needs_sync_complete) {
5719 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5720 			res->needs_sync_complete = 0;
5721 		}
5722 
5723 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5724 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5725 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5726 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5727 	}
5728 
5729 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5730 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5731 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5732 
5733 	if (likely(rc == 0)) {
5734 		if (ioa_cfg->sis64)
5735 			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5736 		else
5737 			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5738 	}
5739 
5740 	if (likely(rc == 0)) {
5741 		mb();
5742 		ipr_send_command(ipr_cmd);
5743 	} else {
5744 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5745 		 return SCSI_MLQUEUE_HOST_BUSY;
5746 	}
5747 
5748 	return 0;
5749 }
5750 
5751 /**
5752  * ipr_ioctl - IOCTL handler
5753  * @sdev:	scsi device struct
5754  * @cmd:	IOCTL cmd
5755  * @arg:	IOCTL arg
5756  *
5757  * Return value:
5758  * 	0 on success / other on failure
5759  **/
5760 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5761 {
5762 	struct ipr_resource_entry *res;
5763 
5764 	res = (struct ipr_resource_entry *)sdev->hostdata;
5765 	if (res && ipr_is_gata(res)) {
5766 		if (cmd == HDIO_GET_IDENTITY)
5767 			return -ENOTTY;
5768 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5769 	}
5770 
5771 	return -EINVAL;
5772 }
5773 
5774 /**
5775  * ipr_info - Get information about the card/driver
5776  * @scsi_host:	scsi host struct
5777  *
5778  * Return value:
5779  * 	pointer to buffer with description string
5780  **/
5781 static const char * ipr_ioa_info(struct Scsi_Host *host)
5782 {
5783 	static char buffer[512];
5784 	struct ipr_ioa_cfg *ioa_cfg;
5785 	unsigned long lock_flags = 0;
5786 
5787 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5788 
5789 	spin_lock_irqsave(host->host_lock, lock_flags);
5790 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5791 	spin_unlock_irqrestore(host->host_lock, lock_flags);
5792 
5793 	return buffer;
5794 }
5795 
5796 static struct scsi_host_template driver_template = {
5797 	.module = THIS_MODULE,
5798 	.name = "IPR",
5799 	.info = ipr_ioa_info,
5800 	.ioctl = ipr_ioctl,
5801 	.queuecommand = ipr_queuecommand,
5802 	.eh_abort_handler = ipr_eh_abort,
5803 	.eh_device_reset_handler = ipr_eh_dev_reset,
5804 	.eh_host_reset_handler = ipr_eh_host_reset,
5805 	.slave_alloc = ipr_slave_alloc,
5806 	.slave_configure = ipr_slave_configure,
5807 	.slave_destroy = ipr_slave_destroy,
5808 	.target_alloc = ipr_target_alloc,
5809 	.target_destroy = ipr_target_destroy,
5810 	.change_queue_depth = ipr_change_queue_depth,
5811 	.change_queue_type = ipr_change_queue_type,
5812 	.bios_param = ipr_biosparam,
5813 	.can_queue = IPR_MAX_COMMANDS,
5814 	.this_id = -1,
5815 	.sg_tablesize = IPR_MAX_SGLIST,
5816 	.max_sectors = IPR_IOA_MAX_SECTORS,
5817 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5818 	.use_clustering = ENABLE_CLUSTERING,
5819 	.shost_attrs = ipr_ioa_attrs,
5820 	.sdev_attrs = ipr_dev_attrs,
5821 	.proc_name = IPR_NAME
5822 };
5823 
5824 /**
5825  * ipr_ata_phy_reset - libata phy_reset handler
5826  * @ap:		ata port to reset
5827  *
5828  **/
5829 static void ipr_ata_phy_reset(struct ata_port *ap)
5830 {
5831 	unsigned long flags;
5832 	struct ipr_sata_port *sata_port = ap->private_data;
5833 	struct ipr_resource_entry *res = sata_port->res;
5834 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5835 	int rc;
5836 
5837 	ENTER;
5838 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5839 	while(ioa_cfg->in_reset_reload) {
5840 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5841 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5842 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5843 	}
5844 
5845 	if (!ioa_cfg->allow_cmds)
5846 		goto out_unlock;
5847 
5848 	rc = ipr_device_reset(ioa_cfg, res);
5849 
5850 	if (rc) {
5851 		ap->link.device[0].class = ATA_DEV_NONE;
5852 		goto out_unlock;
5853 	}
5854 
5855 	ap->link.device[0].class = res->ata_class;
5856 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5857 		ap->link.device[0].class = ATA_DEV_NONE;
5858 
5859 out_unlock:
5860 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5861 	LEAVE;
5862 }
5863 
5864 /**
5865  * ipr_ata_post_internal - Cleanup after an internal command
5866  * @qc:	ATA queued command
5867  *
5868  * Return value:
5869  * 	none
5870  **/
5871 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5872 {
5873 	struct ipr_sata_port *sata_port = qc->ap->private_data;
5874 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5875 	struct ipr_cmnd *ipr_cmd;
5876 	unsigned long flags;
5877 
5878 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5879 	while(ioa_cfg->in_reset_reload) {
5880 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5881 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5882 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5883 	}
5884 
5885 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5886 		if (ipr_cmd->qc == qc) {
5887 			ipr_device_reset(ioa_cfg, sata_port->res);
5888 			break;
5889 		}
5890 	}
5891 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5892 }
5893 
5894 /**
5895  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5896  * @regs:	destination
5897  * @tf:	source ATA taskfile
5898  *
5899  * Return value:
5900  * 	none
5901  **/
5902 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5903 			     struct ata_taskfile *tf)
5904 {
5905 	regs->feature = tf->feature;
5906 	regs->nsect = tf->nsect;
5907 	regs->lbal = tf->lbal;
5908 	regs->lbam = tf->lbam;
5909 	regs->lbah = tf->lbah;
5910 	regs->device = tf->device;
5911 	regs->command = tf->command;
5912 	regs->hob_feature = tf->hob_feature;
5913 	regs->hob_nsect = tf->hob_nsect;
5914 	regs->hob_lbal = tf->hob_lbal;
5915 	regs->hob_lbam = tf->hob_lbam;
5916 	regs->hob_lbah = tf->hob_lbah;
5917 	regs->ctl = tf->ctl;
5918 }
5919 
5920 /**
5921  * ipr_sata_done - done function for SATA commands
5922  * @ipr_cmd:	ipr command struct
5923  *
5924  * This function is invoked by the interrupt handler for
5925  * ops generated by the SCSI mid-layer to SATA devices
5926  *
5927  * Return value:
5928  * 	none
5929  **/
5930 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5931 {
5932 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5933 	struct ata_queued_cmd *qc = ipr_cmd->qc;
5934 	struct ipr_sata_port *sata_port = qc->ap->private_data;
5935 	struct ipr_resource_entry *res = sata_port->res;
5936 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5937 
5938 	if (ipr_cmd->ioa_cfg->sis64)
5939 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5940 		       sizeof(struct ipr_ioasa_gata));
5941 	else
5942 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5943 		       sizeof(struct ipr_ioasa_gata));
5944 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5945 
5946 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5947 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5948 
5949 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5950 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5951 	else
5952 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5953 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5954 	ata_qc_complete(qc);
5955 }
5956 
5957 /**
5958  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5959  * @ipr_cmd:	ipr command struct
5960  * @qc:		ATA queued command
5961  *
5962  **/
5963 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5964 				  struct ata_queued_cmd *qc)
5965 {
5966 	u32 ioadl_flags = 0;
5967 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5968 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5969 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5970 	int len = qc->nbytes;
5971 	struct scatterlist *sg;
5972 	unsigned int si;
5973 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5974 
5975 	if (len == 0)
5976 		return;
5977 
5978 	if (qc->dma_dir == DMA_TO_DEVICE) {
5979 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5980 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5981 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
5982 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5983 
5984 	ioarcb->data_transfer_length = cpu_to_be32(len);
5985 	ioarcb->ioadl_len =
5986 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5987 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
5988 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5989 
5990 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5991 		ioadl64->flags = cpu_to_be32(ioadl_flags);
5992 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5993 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5994 
5995 		last_ioadl64 = ioadl64;
5996 		ioadl64++;
5997 	}
5998 
5999 	if (likely(last_ioadl64))
6000 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6001 }
6002 
6003 /**
6004  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6005  * @ipr_cmd:	ipr command struct
6006  * @qc:		ATA queued command
6007  *
6008  **/
6009 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6010 				struct ata_queued_cmd *qc)
6011 {
6012 	u32 ioadl_flags = 0;
6013 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6014 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6015 	struct ipr_ioadl_desc *last_ioadl = NULL;
6016 	int len = qc->nbytes;
6017 	struct scatterlist *sg;
6018 	unsigned int si;
6019 
6020 	if (len == 0)
6021 		return;
6022 
6023 	if (qc->dma_dir == DMA_TO_DEVICE) {
6024 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6025 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6026 		ioarcb->data_transfer_length = cpu_to_be32(len);
6027 		ioarcb->ioadl_len =
6028 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6029 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6030 		ioadl_flags = IPR_IOADL_FLAGS_READ;
6031 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6032 		ioarcb->read_ioadl_len =
6033 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6034 	}
6035 
6036 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6037 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6038 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6039 
6040 		last_ioadl = ioadl;
6041 		ioadl++;
6042 	}
6043 
6044 	if (likely(last_ioadl))
6045 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6046 }
6047 
6048 /**
6049  * ipr_qc_issue - Issue a SATA qc to a device
6050  * @qc:	queued command
6051  *
6052  * Return value:
6053  * 	0 if success
6054  **/
6055 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6056 {
6057 	struct ata_port *ap = qc->ap;
6058 	struct ipr_sata_port *sata_port = ap->private_data;
6059 	struct ipr_resource_entry *res = sata_port->res;
6060 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6061 	struct ipr_cmnd *ipr_cmd;
6062 	struct ipr_ioarcb *ioarcb;
6063 	struct ipr_ioarcb_ata_regs *regs;
6064 
6065 	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6066 		return AC_ERR_SYSTEM;
6067 
6068 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6069 	ioarcb = &ipr_cmd->ioarcb;
6070 
6071 	if (ioa_cfg->sis64) {
6072 		regs = &ipr_cmd->i.ata_ioadl.regs;
6073 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6074 	} else
6075 		regs = &ioarcb->u.add_data.u.regs;
6076 
6077 	memset(regs, 0, sizeof(*regs));
6078 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6079 
6080 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6081 	ipr_cmd->qc = qc;
6082 	ipr_cmd->done = ipr_sata_done;
6083 	ipr_cmd->ioarcb.res_handle = res->res_handle;
6084 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6085 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6086 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6087 	ipr_cmd->dma_use_sg = qc->n_elem;
6088 
6089 	if (ioa_cfg->sis64)
6090 		ipr_build_ata_ioadl64(ipr_cmd, qc);
6091 	else
6092 		ipr_build_ata_ioadl(ipr_cmd, qc);
6093 
6094 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6095 	ipr_copy_sata_tf(regs, &qc->tf);
6096 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6097 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6098 
6099 	switch (qc->tf.protocol) {
6100 	case ATA_PROT_NODATA:
6101 	case ATA_PROT_PIO:
6102 		break;
6103 
6104 	case ATA_PROT_DMA:
6105 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6106 		break;
6107 
6108 	case ATAPI_PROT_PIO:
6109 	case ATAPI_PROT_NODATA:
6110 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6111 		break;
6112 
6113 	case ATAPI_PROT_DMA:
6114 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6115 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6116 		break;
6117 
6118 	default:
6119 		WARN_ON(1);
6120 		return AC_ERR_INVALID;
6121 	}
6122 
6123 	mb();
6124 
6125 	ipr_send_command(ipr_cmd);
6126 
6127 	return 0;
6128 }
6129 
6130 /**
6131  * ipr_qc_fill_rtf - Read result TF
6132  * @qc: ATA queued command
6133  *
6134  * Return value:
6135  * 	true
6136  **/
6137 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6138 {
6139 	struct ipr_sata_port *sata_port = qc->ap->private_data;
6140 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6141 	struct ata_taskfile *tf = &qc->result_tf;
6142 
6143 	tf->feature = g->error;
6144 	tf->nsect = g->nsect;
6145 	tf->lbal = g->lbal;
6146 	tf->lbam = g->lbam;
6147 	tf->lbah = g->lbah;
6148 	tf->device = g->device;
6149 	tf->command = g->status;
6150 	tf->hob_nsect = g->hob_nsect;
6151 	tf->hob_lbal = g->hob_lbal;
6152 	tf->hob_lbam = g->hob_lbam;
6153 	tf->hob_lbah = g->hob_lbah;
6154 	tf->ctl = g->alt_status;
6155 
6156 	return true;
6157 }
6158 
6159 static struct ata_port_operations ipr_sata_ops = {
6160 	.phy_reset = ipr_ata_phy_reset,
6161 	.hardreset = ipr_sata_reset,
6162 	.post_internal_cmd = ipr_ata_post_internal,
6163 	.qc_prep = ata_noop_qc_prep,
6164 	.qc_issue = ipr_qc_issue,
6165 	.qc_fill_rtf = ipr_qc_fill_rtf,
6166 	.port_start = ata_sas_port_start,
6167 	.port_stop = ata_sas_port_stop
6168 };
6169 
6170 static struct ata_port_info sata_port_info = {
6171 	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6172 	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6173 	.pio_mask	= 0x10, /* pio4 */
6174 	.mwdma_mask = 0x07,
6175 	.udma_mask	= 0x7f, /* udma0-6 */
6176 	.port_ops	= &ipr_sata_ops
6177 };
6178 
6179 #ifdef CONFIG_PPC_PSERIES
6180 static const u16 ipr_blocked_processors[] = {
6181 	PV_NORTHSTAR,
6182 	PV_PULSAR,
6183 	PV_POWER4,
6184 	PV_ICESTAR,
6185 	PV_SSTAR,
6186 	PV_POWER4p,
6187 	PV_630,
6188 	PV_630p
6189 };
6190 
6191 /**
6192  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6193  * @ioa_cfg:	ioa cfg struct
6194  *
6195  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6196  * certain pSeries hardware. This function determines if the given
6197  * adapter is in one of these confgurations or not.
6198  *
6199  * Return value:
6200  * 	1 if adapter is not supported / 0 if adapter is supported
6201  **/
6202 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6203 {
6204 	int i;
6205 
6206 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6207 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6208 			if (__is_processor(ipr_blocked_processors[i]))
6209 				return 1;
6210 		}
6211 	}
6212 	return 0;
6213 }
6214 #else
6215 #define ipr_invalid_adapter(ioa_cfg) 0
6216 #endif
6217 
6218 /**
6219  * ipr_ioa_bringdown_done - IOA bring down completion.
6220  * @ipr_cmd:	ipr command struct
6221  *
6222  * This function processes the completion of an adapter bring down.
6223  * It wakes any reset sleepers.
6224  *
6225  * Return value:
6226  * 	IPR_RC_JOB_RETURN
6227  **/
6228 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6229 {
6230 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6231 
6232 	ENTER;
6233 	ioa_cfg->in_reset_reload = 0;
6234 	ioa_cfg->reset_retries = 0;
6235 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6236 	wake_up_all(&ioa_cfg->reset_wait_q);
6237 
6238 	spin_unlock_irq(ioa_cfg->host->host_lock);
6239 	scsi_unblock_requests(ioa_cfg->host);
6240 	spin_lock_irq(ioa_cfg->host->host_lock);
6241 	LEAVE;
6242 
6243 	return IPR_RC_JOB_RETURN;
6244 }
6245 
6246 /**
6247  * ipr_ioa_reset_done - IOA reset completion.
6248  * @ipr_cmd:	ipr command struct
6249  *
6250  * This function processes the completion of an adapter reset.
6251  * It schedules any necessary mid-layer add/removes and
6252  * wakes any reset sleepers.
6253  *
6254  * Return value:
6255  * 	IPR_RC_JOB_RETURN
6256  **/
6257 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6258 {
6259 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6260 	struct ipr_resource_entry *res;
6261 	struct ipr_hostrcb *hostrcb, *temp;
6262 	int i = 0;
6263 
6264 	ENTER;
6265 	ioa_cfg->in_reset_reload = 0;
6266 	ioa_cfg->allow_cmds = 1;
6267 	ioa_cfg->reset_cmd = NULL;
6268 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6269 
6270 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6271 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6272 			ipr_trace;
6273 			break;
6274 		}
6275 	}
6276 	schedule_work(&ioa_cfg->work_q);
6277 
6278 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6279 		list_del(&hostrcb->queue);
6280 		if (i++ < IPR_NUM_LOG_HCAMS)
6281 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6282 		else
6283 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6284 	}
6285 
6286 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6287 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6288 
6289 	ioa_cfg->reset_retries = 0;
6290 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6291 	wake_up_all(&ioa_cfg->reset_wait_q);
6292 
6293 	spin_unlock(ioa_cfg->host->host_lock);
6294 	scsi_unblock_requests(ioa_cfg->host);
6295 	spin_lock(ioa_cfg->host->host_lock);
6296 
6297 	if (!ioa_cfg->allow_cmds)
6298 		scsi_block_requests(ioa_cfg->host);
6299 
6300 	LEAVE;
6301 	return IPR_RC_JOB_RETURN;
6302 }
6303 
6304 /**
6305  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6306  * @supported_dev:	supported device struct
6307  * @vpids:			vendor product id struct
6308  *
6309  * Return value:
6310  * 	none
6311  **/
6312 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6313 				 struct ipr_std_inq_vpids *vpids)
6314 {
6315 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6316 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6317 	supported_dev->num_records = 1;
6318 	supported_dev->data_length =
6319 		cpu_to_be16(sizeof(struct ipr_supported_device));
6320 	supported_dev->reserved = 0;
6321 }
6322 
6323 /**
6324  * ipr_set_supported_devs - Send Set Supported Devices for a device
6325  * @ipr_cmd:	ipr command struct
6326  *
6327  * This function sends a Set Supported Devices to the adapter
6328  *
6329  * Return value:
6330  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6331  **/
6332 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6333 {
6334 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6335 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6336 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6337 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6338 
6339 	ipr_cmd->job_step = ipr_ioa_reset_done;
6340 
6341 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6342 		if (!ipr_is_scsi_disk(res))
6343 			continue;
6344 
6345 		ipr_cmd->u.res = res;
6346 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6347 
6348 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6349 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6350 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6351 
6352 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6353 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6354 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6355 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6356 
6357 		ipr_init_ioadl(ipr_cmd,
6358 			       ioa_cfg->vpd_cbs_dma +
6359 				 offsetof(struct ipr_misc_cbs, supp_dev),
6360 			       sizeof(struct ipr_supported_device),
6361 			       IPR_IOADL_FLAGS_WRITE_LAST);
6362 
6363 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6364 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6365 
6366 		if (!ioa_cfg->sis64)
6367 			ipr_cmd->job_step = ipr_set_supported_devs;
6368 		return IPR_RC_JOB_RETURN;
6369 	}
6370 
6371 	return IPR_RC_JOB_CONTINUE;
6372 }
6373 
6374 /**
6375  * ipr_get_mode_page - Locate specified mode page
6376  * @mode_pages:	mode page buffer
6377  * @page_code:	page code to find
6378  * @len:		minimum required length for mode page
6379  *
6380  * Return value:
6381  * 	pointer to mode page / NULL on failure
6382  **/
6383 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6384 			       u32 page_code, u32 len)
6385 {
6386 	struct ipr_mode_page_hdr *mode_hdr;
6387 	u32 page_length;
6388 	u32 length;
6389 
6390 	if (!mode_pages || (mode_pages->hdr.length == 0))
6391 		return NULL;
6392 
6393 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6394 	mode_hdr = (struct ipr_mode_page_hdr *)
6395 		(mode_pages->data + mode_pages->hdr.block_desc_len);
6396 
6397 	while (length) {
6398 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6399 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6400 				return mode_hdr;
6401 			break;
6402 		} else {
6403 			page_length = (sizeof(struct ipr_mode_page_hdr) +
6404 				       mode_hdr->page_length);
6405 			length -= page_length;
6406 			mode_hdr = (struct ipr_mode_page_hdr *)
6407 				((unsigned long)mode_hdr + page_length);
6408 		}
6409 	}
6410 	return NULL;
6411 }
6412 
6413 /**
6414  * ipr_check_term_power - Check for term power errors
6415  * @ioa_cfg:	ioa config struct
6416  * @mode_pages:	IOAFP mode pages buffer
6417  *
6418  * Check the IOAFP's mode page 28 for term power errors
6419  *
6420  * Return value:
6421  * 	nothing
6422  **/
6423 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6424 				 struct ipr_mode_pages *mode_pages)
6425 {
6426 	int i;
6427 	int entry_length;
6428 	struct ipr_dev_bus_entry *bus;
6429 	struct ipr_mode_page28 *mode_page;
6430 
6431 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6432 				      sizeof(struct ipr_mode_page28));
6433 
6434 	entry_length = mode_page->entry_length;
6435 
6436 	bus = mode_page->bus;
6437 
6438 	for (i = 0; i < mode_page->num_entries; i++) {
6439 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6440 			dev_err(&ioa_cfg->pdev->dev,
6441 				"Term power is absent on scsi bus %d\n",
6442 				bus->res_addr.bus);
6443 		}
6444 
6445 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6446 	}
6447 }
6448 
6449 /**
6450  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6451  * @ioa_cfg:	ioa config struct
6452  *
6453  * Looks through the config table checking for SES devices. If
6454  * the SES device is in the SES table indicating a maximum SCSI
6455  * bus speed, the speed is limited for the bus.
6456  *
6457  * Return value:
6458  * 	none
6459  **/
6460 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6461 {
6462 	u32 max_xfer_rate;
6463 	int i;
6464 
6465 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6466 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6467 						       ioa_cfg->bus_attr[i].bus_width);
6468 
6469 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6470 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6471 	}
6472 }
6473 
6474 /**
6475  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6476  * @ioa_cfg:	ioa config struct
6477  * @mode_pages:	mode page 28 buffer
6478  *
6479  * Updates mode page 28 based on driver configuration
6480  *
6481  * Return value:
6482  * 	none
6483  **/
6484 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6485 					  	struct ipr_mode_pages *mode_pages)
6486 {
6487 	int i, entry_length;
6488 	struct ipr_dev_bus_entry *bus;
6489 	struct ipr_bus_attributes *bus_attr;
6490 	struct ipr_mode_page28 *mode_page;
6491 
6492 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6493 				      sizeof(struct ipr_mode_page28));
6494 
6495 	entry_length = mode_page->entry_length;
6496 
6497 	/* Loop for each device bus entry */
6498 	for (i = 0, bus = mode_page->bus;
6499 	     i < mode_page->num_entries;
6500 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6501 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6502 			dev_err(&ioa_cfg->pdev->dev,
6503 				"Invalid resource address reported: 0x%08X\n",
6504 				IPR_GET_PHYS_LOC(bus->res_addr));
6505 			continue;
6506 		}
6507 
6508 		bus_attr = &ioa_cfg->bus_attr[i];
6509 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6510 		bus->bus_width = bus_attr->bus_width;
6511 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6512 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6513 		if (bus_attr->qas_enabled)
6514 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6515 		else
6516 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6517 	}
6518 }
6519 
6520 /**
6521  * ipr_build_mode_select - Build a mode select command
6522  * @ipr_cmd:	ipr command struct
6523  * @res_handle:	resource handle to send command to
6524  * @parm:		Byte 2 of Mode Sense command
6525  * @dma_addr:	DMA buffer address
6526  * @xfer_len:	data transfer length
6527  *
6528  * Return value:
6529  * 	none
6530  **/
6531 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6532 				  __be32 res_handle, u8 parm,
6533 				  dma_addr_t dma_addr, u8 xfer_len)
6534 {
6535 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6536 
6537 	ioarcb->res_handle = res_handle;
6538 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6539 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6540 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6541 	ioarcb->cmd_pkt.cdb[1] = parm;
6542 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6543 
6544 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6545 }
6546 
6547 /**
6548  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6549  * @ipr_cmd:	ipr command struct
6550  *
6551  * This function sets up the SCSI bus attributes and sends
6552  * a Mode Select for Page 28 to activate them.
6553  *
6554  * Return value:
6555  * 	IPR_RC_JOB_RETURN
6556  **/
6557 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6558 {
6559 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6560 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6561 	int length;
6562 
6563 	ENTER;
6564 	ipr_scsi_bus_speed_limit(ioa_cfg);
6565 	ipr_check_term_power(ioa_cfg, mode_pages);
6566 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6567 	length = mode_pages->hdr.length + 1;
6568 	mode_pages->hdr.length = 0;
6569 
6570 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6571 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6572 			      length);
6573 
6574 	ipr_cmd->job_step = ipr_set_supported_devs;
6575 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6576 				    struct ipr_resource_entry, queue);
6577 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6578 
6579 	LEAVE;
6580 	return IPR_RC_JOB_RETURN;
6581 }
6582 
6583 /**
6584  * ipr_build_mode_sense - Builds a mode sense command
6585  * @ipr_cmd:	ipr command struct
6586  * @res:		resource entry struct
6587  * @parm:		Byte 2 of mode sense command
6588  * @dma_addr:	DMA address of mode sense buffer
6589  * @xfer_len:	Size of DMA buffer
6590  *
6591  * Return value:
6592  * 	none
6593  **/
6594 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6595 				 __be32 res_handle,
6596 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6597 {
6598 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6599 
6600 	ioarcb->res_handle = res_handle;
6601 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6602 	ioarcb->cmd_pkt.cdb[2] = parm;
6603 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6604 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6605 
6606 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6607 }
6608 
6609 /**
6610  * ipr_reset_cmd_failed - Handle failure of IOA reset command
6611  * @ipr_cmd:	ipr command struct
6612  *
6613  * This function handles the failure of an IOA bringup command.
6614  *
6615  * Return value:
6616  * 	IPR_RC_JOB_RETURN
6617  **/
6618 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6619 {
6620 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6621 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6622 
6623 	dev_err(&ioa_cfg->pdev->dev,
6624 		"0x%02X failed with IOASC: 0x%08X\n",
6625 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6626 
6627 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6628 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6629 	return IPR_RC_JOB_RETURN;
6630 }
6631 
6632 /**
6633  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6634  * @ipr_cmd:	ipr command struct
6635  *
6636  * This function handles the failure of a Mode Sense to the IOAFP.
6637  * Some adapters do not handle all mode pages.
6638  *
6639  * Return value:
6640  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6641  **/
6642 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6643 {
6644 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6645 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6646 
6647 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6648 		ipr_cmd->job_step = ipr_set_supported_devs;
6649 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6650 					    struct ipr_resource_entry, queue);
6651 		return IPR_RC_JOB_CONTINUE;
6652 	}
6653 
6654 	return ipr_reset_cmd_failed(ipr_cmd);
6655 }
6656 
6657 /**
6658  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6659  * @ipr_cmd:	ipr command struct
6660  *
6661  * This function send a Page 28 mode sense to the IOA to
6662  * retrieve SCSI bus attributes.
6663  *
6664  * Return value:
6665  * 	IPR_RC_JOB_RETURN
6666  **/
6667 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6668 {
6669 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6670 
6671 	ENTER;
6672 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6673 			     0x28, ioa_cfg->vpd_cbs_dma +
6674 			     offsetof(struct ipr_misc_cbs, mode_pages),
6675 			     sizeof(struct ipr_mode_pages));
6676 
6677 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6678 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6679 
6680 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6681 
6682 	LEAVE;
6683 	return IPR_RC_JOB_RETURN;
6684 }
6685 
6686 /**
6687  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6688  * @ipr_cmd:	ipr command struct
6689  *
6690  * This function enables dual IOA RAID support if possible.
6691  *
6692  * Return value:
6693  * 	IPR_RC_JOB_RETURN
6694  **/
6695 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6696 {
6697 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6698 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6699 	struct ipr_mode_page24 *mode_page;
6700 	int length;
6701 
6702 	ENTER;
6703 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6704 				      sizeof(struct ipr_mode_page24));
6705 
6706 	if (mode_page)
6707 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6708 
6709 	length = mode_pages->hdr.length + 1;
6710 	mode_pages->hdr.length = 0;
6711 
6712 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6713 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6714 			      length);
6715 
6716 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6717 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6718 
6719 	LEAVE;
6720 	return IPR_RC_JOB_RETURN;
6721 }
6722 
6723 /**
6724  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6725  * @ipr_cmd:	ipr command struct
6726  *
6727  * This function handles the failure of a Mode Sense to the IOAFP.
6728  * Some adapters do not handle all mode pages.
6729  *
6730  * Return value:
6731  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6732  **/
6733 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6734 {
6735 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6736 
6737 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6738 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6739 		return IPR_RC_JOB_CONTINUE;
6740 	}
6741 
6742 	return ipr_reset_cmd_failed(ipr_cmd);
6743 }
6744 
6745 /**
6746  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6747  * @ipr_cmd:	ipr command struct
6748  *
6749  * This function send a mode sense to the IOA to retrieve
6750  * the IOA Advanced Function Control mode page.
6751  *
6752  * Return value:
6753  * 	IPR_RC_JOB_RETURN
6754  **/
6755 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6756 {
6757 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6758 
6759 	ENTER;
6760 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6761 			     0x24, ioa_cfg->vpd_cbs_dma +
6762 			     offsetof(struct ipr_misc_cbs, mode_pages),
6763 			     sizeof(struct ipr_mode_pages));
6764 
6765 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6766 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6767 
6768 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6769 
6770 	LEAVE;
6771 	return IPR_RC_JOB_RETURN;
6772 }
6773 
6774 /**
6775  * ipr_init_res_table - Initialize the resource table
6776  * @ipr_cmd:	ipr command struct
6777  *
6778  * This function looks through the existing resource table, comparing
6779  * it with the config table. This function will take care of old/new
6780  * devices and schedule adding/removing them from the mid-layer
6781  * as appropriate.
6782  *
6783  * Return value:
6784  * 	IPR_RC_JOB_CONTINUE
6785  **/
6786 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6787 {
6788 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6789 	struct ipr_resource_entry *res, *temp;
6790 	struct ipr_config_table_entry_wrapper cfgtew;
6791 	int entries, found, flag, i;
6792 	LIST_HEAD(old_res);
6793 
6794 	ENTER;
6795 	if (ioa_cfg->sis64)
6796 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6797 	else
6798 		flag = ioa_cfg->u.cfg_table->hdr.flags;
6799 
6800 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6801 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6802 
6803 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6804 		list_move_tail(&res->queue, &old_res);
6805 
6806 	if (ioa_cfg->sis64)
6807 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6808 	else
6809 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6810 
6811 	for (i = 0; i < entries; i++) {
6812 		if (ioa_cfg->sis64)
6813 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6814 		else
6815 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6816 		found = 0;
6817 
6818 		list_for_each_entry_safe(res, temp, &old_res, queue) {
6819 			if (ipr_is_same_device(res, &cfgtew)) {
6820 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6821 				found = 1;
6822 				break;
6823 			}
6824 		}
6825 
6826 		if (!found) {
6827 			if (list_empty(&ioa_cfg->free_res_q)) {
6828 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6829 				break;
6830 			}
6831 
6832 			found = 1;
6833 			res = list_entry(ioa_cfg->free_res_q.next,
6834 					 struct ipr_resource_entry, queue);
6835 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6836 			ipr_init_res_entry(res, &cfgtew);
6837 			res->add_to_ml = 1;
6838 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6839 			res->sdev->allow_restart = 1;
6840 
6841 		if (found)
6842 			ipr_update_res_entry(res, &cfgtew);
6843 	}
6844 
6845 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6846 		if (res->sdev) {
6847 			res->del_from_ml = 1;
6848 			res->res_handle = IPR_INVALID_RES_HANDLE;
6849 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6850 		}
6851 	}
6852 
6853 	list_for_each_entry_safe(res, temp, &old_res, queue) {
6854 		ipr_clear_res_target(res);
6855 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6856 	}
6857 
6858 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6859 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6860 	else
6861 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6862 
6863 	LEAVE;
6864 	return IPR_RC_JOB_CONTINUE;
6865 }
6866 
6867 /**
6868  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6869  * @ipr_cmd:	ipr command struct
6870  *
6871  * This function sends a Query IOA Configuration command
6872  * to the adapter to retrieve the IOA configuration table.
6873  *
6874  * Return value:
6875  * 	IPR_RC_JOB_RETURN
6876  **/
6877 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6878 {
6879 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6880 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6881 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6882 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6883 
6884 	ENTER;
6885 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6886 		ioa_cfg->dual_raid = 1;
6887 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6888 		 ucode_vpd->major_release, ucode_vpd->card_type,
6889 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6890 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6891 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6892 
6893 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6894 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6895 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6896 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6897 
6898 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6899 		       IPR_IOADL_FLAGS_READ_LAST);
6900 
6901 	ipr_cmd->job_step = ipr_init_res_table;
6902 
6903 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6904 
6905 	LEAVE;
6906 	return IPR_RC_JOB_RETURN;
6907 }
6908 
6909 /**
6910  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6911  * @ipr_cmd:	ipr command struct
6912  *
6913  * This utility function sends an inquiry to the adapter.
6914  *
6915  * Return value:
6916  * 	none
6917  **/
6918 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6919 			      dma_addr_t dma_addr, u8 xfer_len)
6920 {
6921 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6922 
6923 	ENTER;
6924 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6925 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6926 
6927 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6928 	ioarcb->cmd_pkt.cdb[1] = flags;
6929 	ioarcb->cmd_pkt.cdb[2] = page;
6930 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6931 
6932 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6933 
6934 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6935 	LEAVE;
6936 }
6937 
6938 /**
6939  * ipr_inquiry_page_supported - Is the given inquiry page supported
6940  * @page0:		inquiry page 0 buffer
6941  * @page:		page code.
6942  *
6943  * This function determines if the specified inquiry page is supported.
6944  *
6945  * Return value:
6946  *	1 if page is supported / 0 if not
6947  **/
6948 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6949 {
6950 	int i;
6951 
6952 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6953 		if (page0->page[i] == page)
6954 			return 1;
6955 
6956 	return 0;
6957 }
6958 
6959 /**
6960  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6961  * @ipr_cmd:	ipr command struct
6962  *
6963  * This function sends a Page 0xD0 inquiry to the adapter
6964  * to retrieve adapter capabilities.
6965  *
6966  * Return value:
6967  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6968  **/
6969 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6970 {
6971 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6972 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6973 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6974 
6975 	ENTER;
6976 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6977 	memset(cap, 0, sizeof(*cap));
6978 
6979 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6980 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6981 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6982 				  sizeof(struct ipr_inquiry_cap));
6983 		return IPR_RC_JOB_RETURN;
6984 	}
6985 
6986 	LEAVE;
6987 	return IPR_RC_JOB_CONTINUE;
6988 }
6989 
6990 /**
6991  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6992  * @ipr_cmd:	ipr command struct
6993  *
6994  * This function sends a Page 3 inquiry to the adapter
6995  * to retrieve software VPD information.
6996  *
6997  * Return value:
6998  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6999  **/
7000 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7001 {
7002 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7003 
7004 	ENTER;
7005 
7006 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7007 
7008 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7009 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7010 			  sizeof(struct ipr_inquiry_page3));
7011 
7012 	LEAVE;
7013 	return IPR_RC_JOB_RETURN;
7014 }
7015 
7016 /**
7017  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7018  * @ipr_cmd:	ipr command struct
7019  *
7020  * This function sends a Page 0 inquiry to the adapter
7021  * to retrieve supported inquiry pages.
7022  *
7023  * Return value:
7024  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7025  **/
7026 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7027 {
7028 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7029 	char type[5];
7030 
7031 	ENTER;
7032 
7033 	/* Grab the type out of the VPD and store it away */
7034 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7035 	type[4] = '\0';
7036 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7037 
7038 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7039 
7040 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7041 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7042 			  sizeof(struct ipr_inquiry_page0));
7043 
7044 	LEAVE;
7045 	return IPR_RC_JOB_RETURN;
7046 }
7047 
7048 /**
7049  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7050  * @ipr_cmd:	ipr command struct
7051  *
7052  * This function sends a standard inquiry to the adapter.
7053  *
7054  * Return value:
7055  * 	IPR_RC_JOB_RETURN
7056  **/
7057 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7058 {
7059 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7060 
7061 	ENTER;
7062 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7063 
7064 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7065 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7066 			  sizeof(struct ipr_ioa_vpd));
7067 
7068 	LEAVE;
7069 	return IPR_RC_JOB_RETURN;
7070 }
7071 
7072 /**
7073  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7074  * @ipr_cmd:	ipr command struct
7075  *
7076  * This function send an Identify Host Request Response Queue
7077  * command to establish the HRRQ with the adapter.
7078  *
7079  * Return value:
7080  * 	IPR_RC_JOB_RETURN
7081  **/
7082 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7083 {
7084 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7085 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7086 
7087 	ENTER;
7088 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7089 
7090 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7091 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7092 
7093 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7094 	if (ioa_cfg->sis64)
7095 		ioarcb->cmd_pkt.cdb[1] = 0x1;
7096 	ioarcb->cmd_pkt.cdb[2] =
7097 		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7098 	ioarcb->cmd_pkt.cdb[3] =
7099 		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7100 	ioarcb->cmd_pkt.cdb[4] =
7101 		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7102 	ioarcb->cmd_pkt.cdb[5] =
7103 		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7104 	ioarcb->cmd_pkt.cdb[7] =
7105 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7106 	ioarcb->cmd_pkt.cdb[8] =
7107 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7108 
7109 	if (ioa_cfg->sis64) {
7110 		ioarcb->cmd_pkt.cdb[10] =
7111 			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7112 		ioarcb->cmd_pkt.cdb[11] =
7113 			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7114 		ioarcb->cmd_pkt.cdb[12] =
7115 			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7116 		ioarcb->cmd_pkt.cdb[13] =
7117 			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7118 	}
7119 
7120 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7121 
7122 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7123 
7124 	LEAVE;
7125 	return IPR_RC_JOB_RETURN;
7126 }
7127 
7128 /**
7129  * ipr_reset_timer_done - Adapter reset timer function
7130  * @ipr_cmd:	ipr command struct
7131  *
7132  * Description: This function is used in adapter reset processing
7133  * for timing events. If the reset_cmd pointer in the IOA
7134  * config struct is not this adapter's we are doing nested
7135  * resets and fail_all_ops will take care of freeing the
7136  * command block.
7137  *
7138  * Return value:
7139  * 	none
7140  **/
7141 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7142 {
7143 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7144 	unsigned long lock_flags = 0;
7145 
7146 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7147 
7148 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7149 		list_del(&ipr_cmd->queue);
7150 		ipr_cmd->done(ipr_cmd);
7151 	}
7152 
7153 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7154 }
7155 
7156 /**
7157  * ipr_reset_start_timer - Start a timer for adapter reset job
7158  * @ipr_cmd:	ipr command struct
7159  * @timeout:	timeout value
7160  *
7161  * Description: This function is used in adapter reset processing
7162  * for timing events. If the reset_cmd pointer in the IOA
7163  * config struct is not this adapter's we are doing nested
7164  * resets and fail_all_ops will take care of freeing the
7165  * command block.
7166  *
7167  * Return value:
7168  * 	none
7169  **/
7170 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7171 				  unsigned long timeout)
7172 {
7173 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7174 	ipr_cmd->done = ipr_reset_ioa_job;
7175 
7176 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7177 	ipr_cmd->timer.expires = jiffies + timeout;
7178 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7179 	add_timer(&ipr_cmd->timer);
7180 }
7181 
7182 /**
7183  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7184  * @ioa_cfg:	ioa cfg struct
7185  *
7186  * Return value:
7187  * 	nothing
7188  **/
7189 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7190 {
7191 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7192 
7193 	/* Initialize Host RRQ pointers */
7194 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7195 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7196 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7197 	ioa_cfg->toggle_bit = 1;
7198 
7199 	/* Zero out config table */
7200 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7201 }
7202 
7203 /**
7204  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7205  * @ipr_cmd:	ipr command struct
7206  *
7207  * Return value:
7208  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7209  **/
7210 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7211 {
7212 	unsigned long stage, stage_time;
7213 	u32 feedback;
7214 	volatile u32 int_reg;
7215 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7216 	u64 maskval = 0;
7217 
7218 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7219 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7220 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7221 
7222 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7223 
7224 	/* sanity check the stage_time value */
7225 	if (stage_time == 0)
7226 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7227 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7228 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7229 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7230 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7231 
7232 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7233 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7234 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7235 		stage_time = ioa_cfg->transop_timeout;
7236 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7237 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7238 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7239 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7240 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7241 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7242 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7243 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7244 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7245 			return IPR_RC_JOB_CONTINUE;
7246 		}
7247 	}
7248 
7249 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7250 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7251 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7252 	ipr_cmd->done = ipr_reset_ioa_job;
7253 	add_timer(&ipr_cmd->timer);
7254 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7255 
7256 	return IPR_RC_JOB_RETURN;
7257 }
7258 
7259 /**
7260  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7261  * @ipr_cmd:	ipr command struct
7262  *
7263  * This function reinitializes some control blocks and
7264  * enables destructive diagnostics on the adapter.
7265  *
7266  * Return value:
7267  * 	IPR_RC_JOB_RETURN
7268  **/
7269 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7270 {
7271 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7272 	volatile u32 int_reg;
7273 	volatile u64 maskval;
7274 
7275 	ENTER;
7276 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7277 	ipr_init_ioa_mem(ioa_cfg);
7278 
7279 	ioa_cfg->allow_interrupts = 1;
7280 	if (ioa_cfg->sis64) {
7281 		/* Set the adapter to the correct endian mode. */
7282 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7283 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7284 	}
7285 
7286 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7287 
7288 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7289 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7290 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7291 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7292 		return IPR_RC_JOB_CONTINUE;
7293 	}
7294 
7295 	/* Enable destructive diagnostics on IOA */
7296 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7297 
7298 	if (ioa_cfg->sis64) {
7299 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7300 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7301 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7302 	} else
7303 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7304 
7305 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7306 
7307 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7308 
7309 	if (ioa_cfg->sis64) {
7310 		ipr_cmd->job_step = ipr_reset_next_stage;
7311 		return IPR_RC_JOB_CONTINUE;
7312 	}
7313 
7314 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7315 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7316 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7317 	ipr_cmd->done = ipr_reset_ioa_job;
7318 	add_timer(&ipr_cmd->timer);
7319 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7320 
7321 	LEAVE;
7322 	return IPR_RC_JOB_RETURN;
7323 }
7324 
7325 /**
7326  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7327  * @ipr_cmd:	ipr command struct
7328  *
7329  * This function is invoked when an adapter dump has run out
7330  * of processing time.
7331  *
7332  * Return value:
7333  * 	IPR_RC_JOB_CONTINUE
7334  **/
7335 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7336 {
7337 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7338 
7339 	if (ioa_cfg->sdt_state == GET_DUMP)
7340 		ioa_cfg->sdt_state = ABORT_DUMP;
7341 
7342 	ipr_cmd->job_step = ipr_reset_alert;
7343 
7344 	return IPR_RC_JOB_CONTINUE;
7345 }
7346 
7347 /**
7348  * ipr_unit_check_no_data - Log a unit check/no data error log
7349  * @ioa_cfg:		ioa config struct
7350  *
7351  * Logs an error indicating the adapter unit checked, but for some
7352  * reason, we were unable to fetch the unit check buffer.
7353  *
7354  * Return value:
7355  * 	nothing
7356  **/
7357 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7358 {
7359 	ioa_cfg->errors_logged++;
7360 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7361 }
7362 
7363 /**
7364  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7365  * @ioa_cfg:		ioa config struct
7366  *
7367  * Fetches the unit check buffer from the adapter by clocking the data
7368  * through the mailbox register.
7369  *
7370  * Return value:
7371  * 	nothing
7372  **/
7373 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7374 {
7375 	unsigned long mailbox;
7376 	struct ipr_hostrcb *hostrcb;
7377 	struct ipr_uc_sdt sdt;
7378 	int rc, length;
7379 	u32 ioasc;
7380 
7381 	mailbox = readl(ioa_cfg->ioa_mailbox);
7382 
7383 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7384 		ipr_unit_check_no_data(ioa_cfg);
7385 		return;
7386 	}
7387 
7388 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7389 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7390 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7391 
7392 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7393 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7394 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7395 		ipr_unit_check_no_data(ioa_cfg);
7396 		return;
7397 	}
7398 
7399 	/* Find length of the first sdt entry (UC buffer) */
7400 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7401 		length = be32_to_cpu(sdt.entry[0].end_token);
7402 	else
7403 		length = (be32_to_cpu(sdt.entry[0].end_token) -
7404 			  be32_to_cpu(sdt.entry[0].start_token)) &
7405 			  IPR_FMT2_MBX_ADDR_MASK;
7406 
7407 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7408 			     struct ipr_hostrcb, queue);
7409 	list_del(&hostrcb->queue);
7410 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7411 
7412 	rc = ipr_get_ldump_data_section(ioa_cfg,
7413 					be32_to_cpu(sdt.entry[0].start_token),
7414 					(__be32 *)&hostrcb->hcam,
7415 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7416 
7417 	if (!rc) {
7418 		ipr_handle_log_data(ioa_cfg, hostrcb);
7419 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7420 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7421 		    ioa_cfg->sdt_state == GET_DUMP)
7422 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7423 	} else
7424 		ipr_unit_check_no_data(ioa_cfg);
7425 
7426 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7427 }
7428 
7429 /**
7430  * ipr_reset_restore_cfg_space - Restore PCI config space.
7431  * @ipr_cmd:	ipr command struct
7432  *
7433  * Description: This function restores the saved PCI config space of
7434  * the adapter, fails all outstanding ops back to the callers, and
7435  * fetches the dump/unit check if applicable to this reset.
7436  *
7437  * Return value:
7438  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7439  **/
7440 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7441 {
7442 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7443 	volatile u32 int_reg;
7444 	int rc;
7445 
7446 	ENTER;
7447 	ioa_cfg->pdev->state_saved = true;
7448 	rc = pci_restore_state(ioa_cfg->pdev);
7449 
7450 	if (rc != PCIBIOS_SUCCESSFUL) {
7451 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7452 		return IPR_RC_JOB_CONTINUE;
7453 	}
7454 
7455 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7456 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7457 		return IPR_RC_JOB_CONTINUE;
7458 	}
7459 
7460 	ipr_fail_all_ops(ioa_cfg);
7461 
7462 	if (ioa_cfg->sis64) {
7463 		/* Set the adapter to the correct endian mode. */
7464 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7465 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7466 	}
7467 
7468 	if (ioa_cfg->ioa_unit_checked) {
7469 		ioa_cfg->ioa_unit_checked = 0;
7470 		ipr_get_unit_check_buffer(ioa_cfg);
7471 		ipr_cmd->job_step = ipr_reset_alert;
7472 		ipr_reset_start_timer(ipr_cmd, 0);
7473 		return IPR_RC_JOB_RETURN;
7474 	}
7475 
7476 	if (ioa_cfg->in_ioa_bringdown) {
7477 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7478 	} else {
7479 		ipr_cmd->job_step = ipr_reset_enable_ioa;
7480 
7481 		if (GET_DUMP == ioa_cfg->sdt_state) {
7482 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7483 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7484 			schedule_work(&ioa_cfg->work_q);
7485 			return IPR_RC_JOB_RETURN;
7486 		}
7487 	}
7488 
7489 	LEAVE;
7490 	return IPR_RC_JOB_CONTINUE;
7491 }
7492 
7493 /**
7494  * ipr_reset_bist_done - BIST has completed on the adapter.
7495  * @ipr_cmd:	ipr command struct
7496  *
7497  * Description: Unblock config space and resume the reset process.
7498  *
7499  * Return value:
7500  * 	IPR_RC_JOB_CONTINUE
7501  **/
7502 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7503 {
7504 	ENTER;
7505 	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7506 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7507 	LEAVE;
7508 	return IPR_RC_JOB_CONTINUE;
7509 }
7510 
7511 /**
7512  * ipr_reset_start_bist - Run BIST on the adapter.
7513  * @ipr_cmd:	ipr command struct
7514  *
7515  * Description: This function runs BIST on the adapter, then delays 2 seconds.
7516  *
7517  * Return value:
7518  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7519  **/
7520 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7521 {
7522 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7523 	int rc = PCIBIOS_SUCCESSFUL;
7524 
7525 	ENTER;
7526 	pci_block_user_cfg_access(ioa_cfg->pdev);
7527 
7528 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7529 		writel(IPR_UPROCI_SIS64_START_BIST,
7530 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7531 	else
7532 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7533 
7534 	if (rc == PCIBIOS_SUCCESSFUL) {
7535 		ipr_cmd->job_step = ipr_reset_bist_done;
7536 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7537 		rc = IPR_RC_JOB_RETURN;
7538 	} else {
7539 		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7540 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7541 		rc = IPR_RC_JOB_CONTINUE;
7542 	}
7543 
7544 	LEAVE;
7545 	return rc;
7546 }
7547 
7548 /**
7549  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7550  * @ipr_cmd:	ipr command struct
7551  *
7552  * Description: This clears PCI reset to the adapter and delays two seconds.
7553  *
7554  * Return value:
7555  * 	IPR_RC_JOB_RETURN
7556  **/
7557 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7558 {
7559 	ENTER;
7560 	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7561 	ipr_cmd->job_step = ipr_reset_bist_done;
7562 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7563 	LEAVE;
7564 	return IPR_RC_JOB_RETURN;
7565 }
7566 
7567 /**
7568  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7569  * @ipr_cmd:	ipr command struct
7570  *
7571  * Description: This asserts PCI reset to the adapter.
7572  *
7573  * Return value:
7574  * 	IPR_RC_JOB_RETURN
7575  **/
7576 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7577 {
7578 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7579 	struct pci_dev *pdev = ioa_cfg->pdev;
7580 
7581 	ENTER;
7582 	pci_block_user_cfg_access(pdev);
7583 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7584 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7585 	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7586 	LEAVE;
7587 	return IPR_RC_JOB_RETURN;
7588 }
7589 
7590 /**
7591  * ipr_reset_allowed - Query whether or not IOA can be reset
7592  * @ioa_cfg:	ioa config struct
7593  *
7594  * Return value:
7595  * 	0 if reset not allowed / non-zero if reset is allowed
7596  **/
7597 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7598 {
7599 	volatile u32 temp_reg;
7600 
7601 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7602 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7603 }
7604 
7605 /**
7606  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7607  * @ipr_cmd:	ipr command struct
7608  *
7609  * Description: This function waits for adapter permission to run BIST,
7610  * then runs BIST. If the adapter does not give permission after a
7611  * reasonable time, we will reset the adapter anyway. The impact of
7612  * resetting the adapter without warning the adapter is the risk of
7613  * losing the persistent error log on the adapter. If the adapter is
7614  * reset while it is writing to the flash on the adapter, the flash
7615  * segment will have bad ECC and be zeroed.
7616  *
7617  * Return value:
7618  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7619  **/
7620 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7621 {
7622 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7623 	int rc = IPR_RC_JOB_RETURN;
7624 
7625 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7626 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7627 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7628 	} else {
7629 		ipr_cmd->job_step = ioa_cfg->reset;
7630 		rc = IPR_RC_JOB_CONTINUE;
7631 	}
7632 
7633 	return rc;
7634 }
7635 
7636 /**
7637  * ipr_reset_alert - Alert the adapter of a pending reset
7638  * @ipr_cmd:	ipr command struct
7639  *
7640  * Description: This function alerts the adapter that it will be reset.
7641  * If memory space is not currently enabled, proceed directly
7642  * to running BIST on the adapter. The timer must always be started
7643  * so we guarantee we do not run BIST from ipr_isr.
7644  *
7645  * Return value:
7646  * 	IPR_RC_JOB_RETURN
7647  **/
7648 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7649 {
7650 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7651 	u16 cmd_reg;
7652 	int rc;
7653 
7654 	ENTER;
7655 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7656 
7657 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7658 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7659 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7660 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7661 	} else {
7662 		ipr_cmd->job_step = ioa_cfg->reset;
7663 	}
7664 
7665 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7666 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7667 
7668 	LEAVE;
7669 	return IPR_RC_JOB_RETURN;
7670 }
7671 
7672 /**
7673  * ipr_reset_ucode_download_done - Microcode download completion
7674  * @ipr_cmd:	ipr command struct
7675  *
7676  * Description: This function unmaps the microcode download buffer.
7677  *
7678  * Return value:
7679  * 	IPR_RC_JOB_CONTINUE
7680  **/
7681 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7682 {
7683 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7684 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7685 
7686 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7687 		     sglist->num_sg, DMA_TO_DEVICE);
7688 
7689 	ipr_cmd->job_step = ipr_reset_alert;
7690 	return IPR_RC_JOB_CONTINUE;
7691 }
7692 
7693 /**
7694  * ipr_reset_ucode_download - Download microcode to the adapter
7695  * @ipr_cmd:	ipr command struct
7696  *
7697  * Description: This function checks to see if it there is microcode
7698  * to download to the adapter. If there is, a download is performed.
7699  *
7700  * Return value:
7701  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7702  **/
7703 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7704 {
7705 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7706 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7707 
7708 	ENTER;
7709 	ipr_cmd->job_step = ipr_reset_alert;
7710 
7711 	if (!sglist)
7712 		return IPR_RC_JOB_CONTINUE;
7713 
7714 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7715 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7716 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7717 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7718 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7719 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7720 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7721 
7722 	if (ioa_cfg->sis64)
7723 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7724 	else
7725 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7726 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7727 
7728 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7729 		   IPR_WRITE_BUFFER_TIMEOUT);
7730 
7731 	LEAVE;
7732 	return IPR_RC_JOB_RETURN;
7733 }
7734 
7735 /**
7736  * ipr_reset_shutdown_ioa - Shutdown the adapter
7737  * @ipr_cmd:	ipr command struct
7738  *
7739  * Description: This function issues an adapter shutdown of the
7740  * specified type to the specified adapter as part of the
7741  * adapter reset job.
7742  *
7743  * Return value:
7744  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7745  **/
7746 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7747 {
7748 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7749 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7750 	unsigned long timeout;
7751 	int rc = IPR_RC_JOB_CONTINUE;
7752 
7753 	ENTER;
7754 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7755 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7756 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7757 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7758 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7759 
7760 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7761 			timeout = IPR_SHUTDOWN_TIMEOUT;
7762 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7763 			timeout = IPR_INTERNAL_TIMEOUT;
7764 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7765 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7766 		else
7767 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7768 
7769 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7770 
7771 		rc = IPR_RC_JOB_RETURN;
7772 		ipr_cmd->job_step = ipr_reset_ucode_download;
7773 	} else
7774 		ipr_cmd->job_step = ipr_reset_alert;
7775 
7776 	LEAVE;
7777 	return rc;
7778 }
7779 
7780 /**
7781  * ipr_reset_ioa_job - Adapter reset job
7782  * @ipr_cmd:	ipr command struct
7783  *
7784  * Description: This function is the job router for the adapter reset job.
7785  *
7786  * Return value:
7787  * 	none
7788  **/
7789 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7790 {
7791 	u32 rc, ioasc;
7792 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7793 
7794 	do {
7795 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7796 
7797 		if (ioa_cfg->reset_cmd != ipr_cmd) {
7798 			/*
7799 			 * We are doing nested adapter resets and this is
7800 			 * not the current reset job.
7801 			 */
7802 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7803 			return;
7804 		}
7805 
7806 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7807 			rc = ipr_cmd->job_step_failed(ipr_cmd);
7808 			if (rc == IPR_RC_JOB_RETURN)
7809 				return;
7810 		}
7811 
7812 		ipr_reinit_ipr_cmnd(ipr_cmd);
7813 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7814 		rc = ipr_cmd->job_step(ipr_cmd);
7815 	} while(rc == IPR_RC_JOB_CONTINUE);
7816 }
7817 
7818 /**
7819  * _ipr_initiate_ioa_reset - Initiate an adapter reset
7820  * @ioa_cfg:		ioa config struct
7821  * @job_step:		first job step of reset job
7822  * @shutdown_type:	shutdown type
7823  *
7824  * Description: This function will initiate the reset of the given adapter
7825  * starting at the selected job step.
7826  * If the caller needs to wait on the completion of the reset,
7827  * the caller must sleep on the reset_wait_q.
7828  *
7829  * Return value:
7830  * 	none
7831  **/
7832 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7833 				    int (*job_step) (struct ipr_cmnd *),
7834 				    enum ipr_shutdown_type shutdown_type)
7835 {
7836 	struct ipr_cmnd *ipr_cmd;
7837 
7838 	ioa_cfg->in_reset_reload = 1;
7839 	ioa_cfg->allow_cmds = 0;
7840 	scsi_block_requests(ioa_cfg->host);
7841 
7842 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7843 	ioa_cfg->reset_cmd = ipr_cmd;
7844 	ipr_cmd->job_step = job_step;
7845 	ipr_cmd->u.shutdown_type = shutdown_type;
7846 
7847 	ipr_reset_ioa_job(ipr_cmd);
7848 }
7849 
7850 /**
7851  * ipr_initiate_ioa_reset - Initiate an adapter reset
7852  * @ioa_cfg:		ioa config struct
7853  * @shutdown_type:	shutdown type
7854  *
7855  * Description: This function will initiate the reset of the given adapter.
7856  * If the caller needs to wait on the completion of the reset,
7857  * the caller must sleep on the reset_wait_q.
7858  *
7859  * Return value:
7860  * 	none
7861  **/
7862 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7863 				   enum ipr_shutdown_type shutdown_type)
7864 {
7865 	if (ioa_cfg->ioa_is_dead)
7866 		return;
7867 
7868 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7869 		ioa_cfg->sdt_state = ABORT_DUMP;
7870 
7871 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7872 		dev_err(&ioa_cfg->pdev->dev,
7873 			"IOA taken offline - error recovery failed\n");
7874 
7875 		ioa_cfg->reset_retries = 0;
7876 		ioa_cfg->ioa_is_dead = 1;
7877 
7878 		if (ioa_cfg->in_ioa_bringdown) {
7879 			ioa_cfg->reset_cmd = NULL;
7880 			ioa_cfg->in_reset_reload = 0;
7881 			ipr_fail_all_ops(ioa_cfg);
7882 			wake_up_all(&ioa_cfg->reset_wait_q);
7883 
7884 			spin_unlock_irq(ioa_cfg->host->host_lock);
7885 			scsi_unblock_requests(ioa_cfg->host);
7886 			spin_lock_irq(ioa_cfg->host->host_lock);
7887 			return;
7888 		} else {
7889 			ioa_cfg->in_ioa_bringdown = 1;
7890 			shutdown_type = IPR_SHUTDOWN_NONE;
7891 		}
7892 	}
7893 
7894 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7895 				shutdown_type);
7896 }
7897 
7898 /**
7899  * ipr_reset_freeze - Hold off all I/O activity
7900  * @ipr_cmd:	ipr command struct
7901  *
7902  * Description: If the PCI slot is frozen, hold off all I/O
7903  * activity; then, as soon as the slot is available again,
7904  * initiate an adapter reset.
7905  */
7906 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7907 {
7908 	/* Disallow new interrupts, avoid loop */
7909 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7910 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7911 	ipr_cmd->done = ipr_reset_ioa_job;
7912 	return IPR_RC_JOB_RETURN;
7913 }
7914 
7915 /**
7916  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7917  * @pdev:	PCI device struct
7918  *
7919  * Description: This routine is called to tell us that the PCI bus
7920  * is down. Can't do anything here, except put the device driver
7921  * into a holding pattern, waiting for the PCI bus to come back.
7922  */
7923 static void ipr_pci_frozen(struct pci_dev *pdev)
7924 {
7925 	unsigned long flags = 0;
7926 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7927 
7928 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7929 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7930 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7931 }
7932 
7933 /**
7934  * ipr_pci_slot_reset - Called when PCI slot has been reset.
7935  * @pdev:	PCI device struct
7936  *
7937  * Description: This routine is called by the pci error recovery
7938  * code after the PCI slot has been reset, just before we
7939  * should resume normal operations.
7940  */
7941 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7942 {
7943 	unsigned long flags = 0;
7944 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7945 
7946 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7947 	if (ioa_cfg->needs_warm_reset)
7948 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7949 	else
7950 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7951 					IPR_SHUTDOWN_NONE);
7952 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7953 	return PCI_ERS_RESULT_RECOVERED;
7954 }
7955 
7956 /**
7957  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7958  * @pdev:	PCI device struct
7959  *
7960  * Description: This routine is called when the PCI bus has
7961  * permanently failed.
7962  */
7963 static void ipr_pci_perm_failure(struct pci_dev *pdev)
7964 {
7965 	unsigned long flags = 0;
7966 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7967 
7968 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7969 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7970 		ioa_cfg->sdt_state = ABORT_DUMP;
7971 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7972 	ioa_cfg->in_ioa_bringdown = 1;
7973 	ioa_cfg->allow_cmds = 0;
7974 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7975 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7976 }
7977 
7978 /**
7979  * ipr_pci_error_detected - Called when a PCI error is detected.
7980  * @pdev:	PCI device struct
7981  * @state:	PCI channel state
7982  *
7983  * Description: Called when a PCI error is detected.
7984  *
7985  * Return value:
7986  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7987  */
7988 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7989 					       pci_channel_state_t state)
7990 {
7991 	switch (state) {
7992 	case pci_channel_io_frozen:
7993 		ipr_pci_frozen(pdev);
7994 		return PCI_ERS_RESULT_NEED_RESET;
7995 	case pci_channel_io_perm_failure:
7996 		ipr_pci_perm_failure(pdev);
7997 		return PCI_ERS_RESULT_DISCONNECT;
7998 		break;
7999 	default:
8000 		break;
8001 	}
8002 	return PCI_ERS_RESULT_NEED_RESET;
8003 }
8004 
8005 /**
8006  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8007  * @ioa_cfg:	ioa cfg struct
8008  *
8009  * Description: This is the second phase of adapter intialization
8010  * This function takes care of initilizing the adapter to the point
8011  * where it can accept new commands.
8012 
8013  * Return value:
8014  * 	0 on success / -EIO on failure
8015  **/
8016 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8017 {
8018 	int rc = 0;
8019 	unsigned long host_lock_flags = 0;
8020 
8021 	ENTER;
8022 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8023 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8024 	if (ioa_cfg->needs_hard_reset) {
8025 		ioa_cfg->needs_hard_reset = 0;
8026 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8027 	} else
8028 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8029 					IPR_SHUTDOWN_NONE);
8030 
8031 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8032 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8033 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8034 
8035 	if (ioa_cfg->ioa_is_dead) {
8036 		rc = -EIO;
8037 	} else if (ipr_invalid_adapter(ioa_cfg)) {
8038 		if (!ipr_testmode)
8039 			rc = -EIO;
8040 
8041 		dev_err(&ioa_cfg->pdev->dev,
8042 			"Adapter not supported in this hardware configuration.\n");
8043 	}
8044 
8045 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8046 
8047 	LEAVE;
8048 	return rc;
8049 }
8050 
8051 /**
8052  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8053  * @ioa_cfg:	ioa config struct
8054  *
8055  * Return value:
8056  * 	none
8057  **/
8058 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8059 {
8060 	int i;
8061 
8062 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8063 		if (ioa_cfg->ipr_cmnd_list[i])
8064 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8065 				      ioa_cfg->ipr_cmnd_list[i],
8066 				      ioa_cfg->ipr_cmnd_list_dma[i]);
8067 
8068 		ioa_cfg->ipr_cmnd_list[i] = NULL;
8069 	}
8070 
8071 	if (ioa_cfg->ipr_cmd_pool)
8072 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8073 
8074 	ioa_cfg->ipr_cmd_pool = NULL;
8075 }
8076 
8077 /**
8078  * ipr_free_mem - Frees memory allocated for an adapter
8079  * @ioa_cfg:	ioa cfg struct
8080  *
8081  * Return value:
8082  * 	nothing
8083  **/
8084 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8085 {
8086 	int i;
8087 
8088 	kfree(ioa_cfg->res_entries);
8089 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8090 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8091 	ipr_free_cmd_blks(ioa_cfg);
8092 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8093 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8094 	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8095 			    ioa_cfg->u.cfg_table,
8096 			    ioa_cfg->cfg_table_dma);
8097 
8098 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8099 		pci_free_consistent(ioa_cfg->pdev,
8100 				    sizeof(struct ipr_hostrcb),
8101 				    ioa_cfg->hostrcb[i],
8102 				    ioa_cfg->hostrcb_dma[i]);
8103 	}
8104 
8105 	ipr_free_dump(ioa_cfg);
8106 	kfree(ioa_cfg->trace);
8107 }
8108 
8109 /**
8110  * ipr_free_all_resources - Free all allocated resources for an adapter.
8111  * @ipr_cmd:	ipr command struct
8112  *
8113  * This function frees all allocated resources for the
8114  * specified adapter.
8115  *
8116  * Return value:
8117  * 	none
8118  **/
8119 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8120 {
8121 	struct pci_dev *pdev = ioa_cfg->pdev;
8122 
8123 	ENTER;
8124 	free_irq(pdev->irq, ioa_cfg);
8125 	pci_disable_msi(pdev);
8126 	iounmap(ioa_cfg->hdw_dma_regs);
8127 	pci_release_regions(pdev);
8128 	ipr_free_mem(ioa_cfg);
8129 	scsi_host_put(ioa_cfg->host);
8130 	pci_disable_device(pdev);
8131 	LEAVE;
8132 }
8133 
8134 /**
8135  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8136  * @ioa_cfg:	ioa config struct
8137  *
8138  * Return value:
8139  * 	0 on success / -ENOMEM on allocation failure
8140  **/
8141 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8142 {
8143 	struct ipr_cmnd *ipr_cmd;
8144 	struct ipr_ioarcb *ioarcb;
8145 	dma_addr_t dma_addr;
8146 	int i;
8147 
8148 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8149 						 sizeof(struct ipr_cmnd), 16, 0);
8150 
8151 	if (!ioa_cfg->ipr_cmd_pool)
8152 		return -ENOMEM;
8153 
8154 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8155 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8156 
8157 		if (!ipr_cmd) {
8158 			ipr_free_cmd_blks(ioa_cfg);
8159 			return -ENOMEM;
8160 		}
8161 
8162 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8163 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8164 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8165 
8166 		ioarcb = &ipr_cmd->ioarcb;
8167 		ipr_cmd->dma_addr = dma_addr;
8168 		if (ioa_cfg->sis64)
8169 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8170 		else
8171 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8172 
8173 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8174 		if (ioa_cfg->sis64) {
8175 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8176 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8177 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8178 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8179 		} else {
8180 			ioarcb->write_ioadl_addr =
8181 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8182 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8183 			ioarcb->ioasa_host_pci_addr =
8184 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8185 		}
8186 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8187 		ipr_cmd->cmd_index = i;
8188 		ipr_cmd->ioa_cfg = ioa_cfg;
8189 		ipr_cmd->sense_buffer_dma = dma_addr +
8190 			offsetof(struct ipr_cmnd, sense_buffer);
8191 
8192 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8193 	}
8194 
8195 	return 0;
8196 }
8197 
8198 /**
8199  * ipr_alloc_mem - Allocate memory for an adapter
8200  * @ioa_cfg:	ioa config struct
8201  *
8202  * Return value:
8203  * 	0 on success / non-zero for error
8204  **/
8205 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8206 {
8207 	struct pci_dev *pdev = ioa_cfg->pdev;
8208 	int i, rc = -ENOMEM;
8209 
8210 	ENTER;
8211 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8212 				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8213 
8214 	if (!ioa_cfg->res_entries)
8215 		goto out;
8216 
8217 	if (ioa_cfg->sis64) {
8218 		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8219 					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8220 		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8221 					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8222 		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8223 					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8224 	}
8225 
8226 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8227 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8228 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8229 	}
8230 
8231 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8232 						sizeof(struct ipr_misc_cbs),
8233 						&ioa_cfg->vpd_cbs_dma);
8234 
8235 	if (!ioa_cfg->vpd_cbs)
8236 		goto out_free_res_entries;
8237 
8238 	if (ipr_alloc_cmd_blks(ioa_cfg))
8239 		goto out_free_vpd_cbs;
8240 
8241 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8242 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8243 						 &ioa_cfg->host_rrq_dma);
8244 
8245 	if (!ioa_cfg->host_rrq)
8246 		goto out_ipr_free_cmd_blocks;
8247 
8248 	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8249 						    ioa_cfg->cfg_table_size,
8250 						    &ioa_cfg->cfg_table_dma);
8251 
8252 	if (!ioa_cfg->u.cfg_table)
8253 		goto out_free_host_rrq;
8254 
8255 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8256 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8257 							   sizeof(struct ipr_hostrcb),
8258 							   &ioa_cfg->hostrcb_dma[i]);
8259 
8260 		if (!ioa_cfg->hostrcb[i])
8261 			goto out_free_hostrcb_dma;
8262 
8263 		ioa_cfg->hostrcb[i]->hostrcb_dma =
8264 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8265 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8266 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8267 	}
8268 
8269 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8270 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8271 
8272 	if (!ioa_cfg->trace)
8273 		goto out_free_hostrcb_dma;
8274 
8275 	rc = 0;
8276 out:
8277 	LEAVE;
8278 	return rc;
8279 
8280 out_free_hostrcb_dma:
8281 	while (i-- > 0) {
8282 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8283 				    ioa_cfg->hostrcb[i],
8284 				    ioa_cfg->hostrcb_dma[i]);
8285 	}
8286 	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8287 			    ioa_cfg->u.cfg_table,
8288 			    ioa_cfg->cfg_table_dma);
8289 out_free_host_rrq:
8290 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8291 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8292 out_ipr_free_cmd_blocks:
8293 	ipr_free_cmd_blks(ioa_cfg);
8294 out_free_vpd_cbs:
8295 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8296 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8297 out_free_res_entries:
8298 	kfree(ioa_cfg->res_entries);
8299 	goto out;
8300 }
8301 
8302 /**
8303  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8304  * @ioa_cfg:	ioa config struct
8305  *
8306  * Return value:
8307  * 	none
8308  **/
8309 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8310 {
8311 	int i;
8312 
8313 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8314 		ioa_cfg->bus_attr[i].bus = i;
8315 		ioa_cfg->bus_attr[i].qas_enabled = 0;
8316 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8317 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8318 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8319 		else
8320 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8321 	}
8322 }
8323 
8324 /**
8325  * ipr_init_ioa_cfg - Initialize IOA config struct
8326  * @ioa_cfg:	ioa config struct
8327  * @host:		scsi host struct
8328  * @pdev:		PCI dev struct
8329  *
8330  * Return value:
8331  * 	none
8332  **/
8333 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8334 				       struct Scsi_Host *host, struct pci_dev *pdev)
8335 {
8336 	const struct ipr_interrupt_offsets *p;
8337 	struct ipr_interrupts *t;
8338 	void __iomem *base;
8339 
8340 	ioa_cfg->host = host;
8341 	ioa_cfg->pdev = pdev;
8342 	ioa_cfg->log_level = ipr_log_level;
8343 	ioa_cfg->doorbell = IPR_DOORBELL;
8344 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8345 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8346 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8347 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8348 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8349 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8350 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8351 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8352 
8353 	INIT_LIST_HEAD(&ioa_cfg->free_q);
8354 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8355 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8356 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8357 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8358 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8359 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8360 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8361 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8362 	ioa_cfg->sdt_state = INACTIVE;
8363 
8364 	ipr_initialize_bus_attr(ioa_cfg);
8365 	ioa_cfg->max_devs_supported = ipr_max_devs;
8366 
8367 	if (ioa_cfg->sis64) {
8368 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8369 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8370 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8371 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8372 	} else {
8373 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8374 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8375 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8376 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8377 	}
8378 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8379 	host->unique_id = host->host_no;
8380 	host->max_cmd_len = IPR_MAX_CDB_LEN;
8381 	pci_set_drvdata(pdev, ioa_cfg);
8382 
8383 	p = &ioa_cfg->chip_cfg->regs;
8384 	t = &ioa_cfg->regs;
8385 	base = ioa_cfg->hdw_dma_regs;
8386 
8387 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8388 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8389 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8390 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8391 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8392 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8393 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8394 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8395 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8396 	t->ioarrin_reg = base + p->ioarrin_reg;
8397 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8398 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8399 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8400 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8401 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8402 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8403 
8404 	if (ioa_cfg->sis64) {
8405 		t->init_feedback_reg = base + p->init_feedback_reg;
8406 		t->dump_addr_reg = base + p->dump_addr_reg;
8407 		t->dump_data_reg = base + p->dump_data_reg;
8408 		t->endian_swap_reg = base + p->endian_swap_reg;
8409 	}
8410 }
8411 
8412 /**
8413  * ipr_get_chip_info - Find adapter chip information
8414  * @dev_id:		PCI device id struct
8415  *
8416  * Return value:
8417  * 	ptr to chip information on success / NULL on failure
8418  **/
8419 static const struct ipr_chip_t * __devinit
8420 ipr_get_chip_info(const struct pci_device_id *dev_id)
8421 {
8422 	int i;
8423 
8424 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8425 		if (ipr_chip[i].vendor == dev_id->vendor &&
8426 		    ipr_chip[i].device == dev_id->device)
8427 			return &ipr_chip[i];
8428 	return NULL;
8429 }
8430 
8431 /**
8432  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8433  * @pdev:		PCI device struct
8434  *
8435  * Description: Simply set the msi_received flag to 1 indicating that
8436  * Message Signaled Interrupts are supported.
8437  *
8438  * Return value:
8439  * 	0 on success / non-zero on failure
8440  **/
8441 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8442 {
8443 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8444 	unsigned long lock_flags = 0;
8445 	irqreturn_t rc = IRQ_HANDLED;
8446 
8447 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8448 
8449 	ioa_cfg->msi_received = 1;
8450 	wake_up(&ioa_cfg->msi_wait_q);
8451 
8452 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8453 	return rc;
8454 }
8455 
8456 /**
8457  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8458  * @pdev:		PCI device struct
8459  *
8460  * Description: The return value from pci_enable_msi() can not always be
8461  * trusted.  This routine sets up and initiates a test interrupt to determine
8462  * if the interrupt is received via the ipr_test_intr() service routine.
8463  * If the tests fails, the driver will fall back to LSI.
8464  *
8465  * Return value:
8466  * 	0 on success / non-zero on failure
8467  **/
8468 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8469 				  struct pci_dev *pdev)
8470 {
8471 	int rc;
8472 	volatile u32 int_reg;
8473 	unsigned long lock_flags = 0;
8474 
8475 	ENTER;
8476 
8477 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8478 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8479 	ioa_cfg->msi_received = 0;
8480 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8481 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8482 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8483 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8484 
8485 	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8486 	if (rc) {
8487 		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8488 		return rc;
8489 	} else if (ipr_debug)
8490 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8491 
8492 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8493 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8494 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8495 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8496 
8497 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8498 	if (!ioa_cfg->msi_received) {
8499 		/* MSI test failed */
8500 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8501 		rc = -EOPNOTSUPP;
8502 	} else if (ipr_debug)
8503 		dev_info(&pdev->dev, "MSI test succeeded.\n");
8504 
8505 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8506 
8507 	free_irq(pdev->irq, ioa_cfg);
8508 
8509 	LEAVE;
8510 
8511 	return rc;
8512 }
8513 
8514 /**
8515  * ipr_probe_ioa - Allocates memory and does first stage of initialization
8516  * @pdev:		PCI device struct
8517  * @dev_id:		PCI device id struct
8518  *
8519  * Return value:
8520  * 	0 on success / non-zero on failure
8521  **/
8522 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8523 				   const struct pci_device_id *dev_id)
8524 {
8525 	struct ipr_ioa_cfg *ioa_cfg;
8526 	struct Scsi_Host *host;
8527 	unsigned long ipr_regs_pci;
8528 	void __iomem *ipr_regs;
8529 	int rc = PCIBIOS_SUCCESSFUL;
8530 	volatile u32 mask, uproc, interrupts;
8531 
8532 	ENTER;
8533 
8534 	if ((rc = pci_enable_device(pdev))) {
8535 		dev_err(&pdev->dev, "Cannot enable adapter\n");
8536 		goto out;
8537 	}
8538 
8539 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8540 
8541 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8542 
8543 	if (!host) {
8544 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8545 		rc = -ENOMEM;
8546 		goto out_disable;
8547 	}
8548 
8549 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8550 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8551 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8552 		      sata_port_info.flags, &ipr_sata_ops);
8553 
8554 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8555 
8556 	if (!ioa_cfg->ipr_chip) {
8557 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8558 			dev_id->vendor, dev_id->device);
8559 		goto out_scsi_host_put;
8560 	}
8561 
8562 	/* set SIS 32 or SIS 64 */
8563 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8564 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8565 
8566 	if (ipr_transop_timeout)
8567 		ioa_cfg->transop_timeout = ipr_transop_timeout;
8568 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8569 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8570 	else
8571 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8572 
8573 	ioa_cfg->revid = pdev->revision;
8574 
8575 	ipr_regs_pci = pci_resource_start(pdev, 0);
8576 
8577 	rc = pci_request_regions(pdev, IPR_NAME);
8578 	if (rc < 0) {
8579 		dev_err(&pdev->dev,
8580 			"Couldn't register memory range of registers\n");
8581 		goto out_scsi_host_put;
8582 	}
8583 
8584 	ipr_regs = pci_ioremap_bar(pdev, 0);
8585 
8586 	if (!ipr_regs) {
8587 		dev_err(&pdev->dev,
8588 			"Couldn't map memory range of registers\n");
8589 		rc = -ENOMEM;
8590 		goto out_release_regions;
8591 	}
8592 
8593 	ioa_cfg->hdw_dma_regs = ipr_regs;
8594 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8595 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8596 
8597 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8598 
8599 	pci_set_master(pdev);
8600 
8601 	if (ioa_cfg->sis64) {
8602 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8603 		if (rc < 0) {
8604 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8605 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8606 		}
8607 
8608 	} else
8609 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8610 
8611 	if (rc < 0) {
8612 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8613 		goto cleanup_nomem;
8614 	}
8615 
8616 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8617 				   ioa_cfg->chip_cfg->cache_line_size);
8618 
8619 	if (rc != PCIBIOS_SUCCESSFUL) {
8620 		dev_err(&pdev->dev, "Write of cache line size failed\n");
8621 		rc = -EIO;
8622 		goto cleanup_nomem;
8623 	}
8624 
8625 	/* Enable MSI style interrupts if they are supported. */
8626 	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8627 		rc = ipr_test_msi(ioa_cfg, pdev);
8628 		if (rc == -EOPNOTSUPP)
8629 			pci_disable_msi(pdev);
8630 		else if (rc)
8631 			goto out_msi_disable;
8632 		else
8633 			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8634 	} else if (ipr_debug)
8635 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8636 
8637 	/* Save away PCI config space for use following IOA reset */
8638 	rc = pci_save_state(pdev);
8639 
8640 	if (rc != PCIBIOS_SUCCESSFUL) {
8641 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8642 		rc = -EIO;
8643 		goto cleanup_nomem;
8644 	}
8645 
8646 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8647 		goto cleanup_nomem;
8648 
8649 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8650 		goto cleanup_nomem;
8651 
8652 	if (ioa_cfg->sis64)
8653 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8654 				+ ((sizeof(struct ipr_config_table_entry64)
8655 				* ioa_cfg->max_devs_supported)));
8656 	else
8657 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8658 				+ ((sizeof(struct ipr_config_table_entry)
8659 				* ioa_cfg->max_devs_supported)));
8660 
8661 	rc = ipr_alloc_mem(ioa_cfg);
8662 	if (rc < 0) {
8663 		dev_err(&pdev->dev,
8664 			"Couldn't allocate enough memory for device driver!\n");
8665 		goto cleanup_nomem;
8666 	}
8667 
8668 	/*
8669 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8670 	 * the card is in an unknown state and needs a hard reset
8671 	 */
8672 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8673 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8674 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8675 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8676 		ioa_cfg->needs_hard_reset = 1;
8677 	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8678 		ioa_cfg->needs_hard_reset = 1;
8679 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8680 		ioa_cfg->ioa_unit_checked = 1;
8681 
8682 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8683 	rc = request_irq(pdev->irq, ipr_isr,
8684 			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8685 			 IPR_NAME, ioa_cfg);
8686 
8687 	if (rc) {
8688 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8689 			pdev->irq, rc);
8690 		goto cleanup_nolog;
8691 	}
8692 
8693 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8694 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8695 		ioa_cfg->needs_warm_reset = 1;
8696 		ioa_cfg->reset = ipr_reset_slot_reset;
8697 	} else
8698 		ioa_cfg->reset = ipr_reset_start_bist;
8699 
8700 	spin_lock(&ipr_driver_lock);
8701 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8702 	spin_unlock(&ipr_driver_lock);
8703 
8704 	LEAVE;
8705 out:
8706 	return rc;
8707 
8708 cleanup_nolog:
8709 	ipr_free_mem(ioa_cfg);
8710 cleanup_nomem:
8711 	iounmap(ipr_regs);
8712 out_msi_disable:
8713 	pci_disable_msi(pdev);
8714 out_release_regions:
8715 	pci_release_regions(pdev);
8716 out_scsi_host_put:
8717 	scsi_host_put(host);
8718 out_disable:
8719 	pci_disable_device(pdev);
8720 	goto out;
8721 }
8722 
8723 /**
8724  * ipr_scan_vsets - Scans for VSET devices
8725  * @ioa_cfg:	ioa config struct
8726  *
8727  * Description: Since the VSET resources do not follow SAM in that we can have
8728  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8729  *
8730  * Return value:
8731  * 	none
8732  **/
8733 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8734 {
8735 	int target, lun;
8736 
8737 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8738 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8739 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8740 }
8741 
8742 /**
8743  * ipr_initiate_ioa_bringdown - Bring down an adapter
8744  * @ioa_cfg:		ioa config struct
8745  * @shutdown_type:	shutdown type
8746  *
8747  * Description: This function will initiate bringing down the adapter.
8748  * This consists of issuing an IOA shutdown to the adapter
8749  * to flush the cache, and running BIST.
8750  * If the caller needs to wait on the completion of the reset,
8751  * the caller must sleep on the reset_wait_q.
8752  *
8753  * Return value:
8754  * 	none
8755  **/
8756 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8757 				       enum ipr_shutdown_type shutdown_type)
8758 {
8759 	ENTER;
8760 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8761 		ioa_cfg->sdt_state = ABORT_DUMP;
8762 	ioa_cfg->reset_retries = 0;
8763 	ioa_cfg->in_ioa_bringdown = 1;
8764 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8765 	LEAVE;
8766 }
8767 
8768 /**
8769  * __ipr_remove - Remove a single adapter
8770  * @pdev:	pci device struct
8771  *
8772  * Adapter hot plug remove entry point.
8773  *
8774  * Return value:
8775  * 	none
8776  **/
8777 static void __ipr_remove(struct pci_dev *pdev)
8778 {
8779 	unsigned long host_lock_flags = 0;
8780 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8781 	ENTER;
8782 
8783 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8784 	while(ioa_cfg->in_reset_reload) {
8785 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8786 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8787 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8788 	}
8789 
8790 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8791 
8792 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8793 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8794 	flush_scheduled_work();
8795 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8796 
8797 	spin_lock(&ipr_driver_lock);
8798 	list_del(&ioa_cfg->queue);
8799 	spin_unlock(&ipr_driver_lock);
8800 
8801 	if (ioa_cfg->sdt_state == ABORT_DUMP)
8802 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8803 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8804 
8805 	ipr_free_all_resources(ioa_cfg);
8806 
8807 	LEAVE;
8808 }
8809 
8810 /**
8811  * ipr_remove - IOA hot plug remove entry point
8812  * @pdev:	pci device struct
8813  *
8814  * Adapter hot plug remove entry point.
8815  *
8816  * Return value:
8817  * 	none
8818  **/
8819 static void __devexit ipr_remove(struct pci_dev *pdev)
8820 {
8821 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8822 
8823 	ENTER;
8824 
8825 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8826 			      &ipr_trace_attr);
8827 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8828 			     &ipr_dump_attr);
8829 	scsi_remove_host(ioa_cfg->host);
8830 
8831 	__ipr_remove(pdev);
8832 
8833 	LEAVE;
8834 }
8835 
8836 /**
8837  * ipr_probe - Adapter hot plug add entry point
8838  *
8839  * Return value:
8840  * 	0 on success / non-zero on failure
8841  **/
8842 static int __devinit ipr_probe(struct pci_dev *pdev,
8843 			       const struct pci_device_id *dev_id)
8844 {
8845 	struct ipr_ioa_cfg *ioa_cfg;
8846 	int rc;
8847 
8848 	rc = ipr_probe_ioa(pdev, dev_id);
8849 
8850 	if (rc)
8851 		return rc;
8852 
8853 	ioa_cfg = pci_get_drvdata(pdev);
8854 	rc = ipr_probe_ioa_part2(ioa_cfg);
8855 
8856 	if (rc) {
8857 		__ipr_remove(pdev);
8858 		return rc;
8859 	}
8860 
8861 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8862 
8863 	if (rc) {
8864 		__ipr_remove(pdev);
8865 		return rc;
8866 	}
8867 
8868 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8869 				   &ipr_trace_attr);
8870 
8871 	if (rc) {
8872 		scsi_remove_host(ioa_cfg->host);
8873 		__ipr_remove(pdev);
8874 		return rc;
8875 	}
8876 
8877 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8878 				   &ipr_dump_attr);
8879 
8880 	if (rc) {
8881 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8882 				      &ipr_trace_attr);
8883 		scsi_remove_host(ioa_cfg->host);
8884 		__ipr_remove(pdev);
8885 		return rc;
8886 	}
8887 
8888 	scsi_scan_host(ioa_cfg->host);
8889 	ipr_scan_vsets(ioa_cfg);
8890 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8891 	ioa_cfg->allow_ml_add_del = 1;
8892 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8893 	schedule_work(&ioa_cfg->work_q);
8894 	return 0;
8895 }
8896 
8897 /**
8898  * ipr_shutdown - Shutdown handler.
8899  * @pdev:	pci device struct
8900  *
8901  * This function is invoked upon system shutdown/reboot. It will issue
8902  * an adapter shutdown to the adapter to flush the write cache.
8903  *
8904  * Return value:
8905  * 	none
8906  **/
8907 static void ipr_shutdown(struct pci_dev *pdev)
8908 {
8909 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8910 	unsigned long lock_flags = 0;
8911 
8912 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8913 	while(ioa_cfg->in_reset_reload) {
8914 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8915 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8916 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8917 	}
8918 
8919 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8920 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8921 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8922 }
8923 
8924 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8925 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8926 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8927 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8928 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8929 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8930 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8931 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8932 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8933 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8934 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8935 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8936 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8937 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8938 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8939 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8940 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8941 		IPR_USE_LONG_TRANSOP_TIMEOUT },
8942 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8943 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8944 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8945 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8946 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8947 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8948 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8949 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8950 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8951 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8952 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8953 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8954 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
8955 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8956 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8957 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8958 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8959 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8960 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8961 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8962 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8963 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8964 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
8965 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8966 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8967 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8968 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8969 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8970 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8971 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8972 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8973 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8974 		IPR_USE_LONG_TRANSOP_TIMEOUT },
8975 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8976 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8977 		IPR_USE_LONG_TRANSOP_TIMEOUT },
8978 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8979 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8980 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8981 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8982 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8983 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8984 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8985 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8986 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8987 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8988 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8989 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8990 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8991 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8992 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8993 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
8994 	{ }
8995 };
8996 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8997 
8998 static struct pci_error_handlers ipr_err_handler = {
8999 	.error_detected = ipr_pci_error_detected,
9000 	.slot_reset = ipr_pci_slot_reset,
9001 };
9002 
9003 static struct pci_driver ipr_driver = {
9004 	.name = IPR_NAME,
9005 	.id_table = ipr_pci_table,
9006 	.probe = ipr_probe,
9007 	.remove = __devexit_p(ipr_remove),
9008 	.shutdown = ipr_shutdown,
9009 	.err_handler = &ipr_err_handler,
9010 };
9011 
9012 /**
9013  * ipr_halt_done - Shutdown prepare completion
9014  *
9015  * Return value:
9016  * 	none
9017  **/
9018 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9019 {
9020 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9021 
9022 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9023 }
9024 
9025 /**
9026  * ipr_halt - Issue shutdown prepare to all adapters
9027  *
9028  * Return value:
9029  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9030  **/
9031 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9032 {
9033 	struct ipr_cmnd *ipr_cmd;
9034 	struct ipr_ioa_cfg *ioa_cfg;
9035 	unsigned long flags = 0;
9036 
9037 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9038 		return NOTIFY_DONE;
9039 
9040 	spin_lock(&ipr_driver_lock);
9041 
9042 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9043 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9044 		if (!ioa_cfg->allow_cmds) {
9045 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9046 			continue;
9047 		}
9048 
9049 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9050 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9051 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9052 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9053 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9054 
9055 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9056 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9057 	}
9058 	spin_unlock(&ipr_driver_lock);
9059 
9060 	return NOTIFY_OK;
9061 }
9062 
9063 static struct notifier_block ipr_notifier = {
9064 	ipr_halt, NULL, 0
9065 };
9066 
9067 /**
9068  * ipr_init - Module entry point
9069  *
9070  * Return value:
9071  * 	0 on success / negative value on failure
9072  **/
9073 static int __init ipr_init(void)
9074 {
9075 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9076 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9077 
9078 	register_reboot_notifier(&ipr_notifier);
9079 	return pci_register_driver(&ipr_driver);
9080 }
9081 
9082 /**
9083  * ipr_exit - Module unload
9084  *
9085  * Module unload entry point.
9086  *
9087  * Return value:
9088  * 	none
9089  **/
9090 static void __exit ipr_exit(void)
9091 {
9092 	unregister_reboot_notifier(&ipr_notifier);
9093 	pci_unregister_driver(&ipr_driver);
9094 }
9095 
9096 module_init(ipr_init);
9097 module_exit(ipr_exit);
9098