xref: /linux/drivers/scsi/ipr.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include "ipr.h"
83 
84 /*
85  *   Global Data
86  */
87 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89 static unsigned int ipr_max_speed = 1;
90 static int ipr_testmode = 0;
91 static unsigned int ipr_fastfail = 0;
92 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
93 static unsigned int ipr_enable_cache = 1;
94 static unsigned int ipr_debug = 0;
95 static int ipr_auto_create = 1;
96 static DEFINE_SPINLOCK(ipr_driver_lock);
97 
98 /* This table describes the differences between DMA controller chips */
99 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
101 		.mailbox = 0x0042C,
102 		.cache_line_size = 0x20,
103 		{
104 			.set_interrupt_mask_reg = 0x0022C,
105 			.clr_interrupt_mask_reg = 0x00230,
106 			.sense_interrupt_mask_reg = 0x0022C,
107 			.clr_interrupt_reg = 0x00228,
108 			.sense_interrupt_reg = 0x00224,
109 			.ioarrin_reg = 0x00404,
110 			.sense_uproc_interrupt_reg = 0x00214,
111 			.set_uproc_interrupt_reg = 0x00214,
112 			.clr_uproc_interrupt_reg = 0x00218
113 		}
114 	},
115 	{ /* Snipe and Scamp */
116 		.mailbox = 0x0052C,
117 		.cache_line_size = 0x20,
118 		{
119 			.set_interrupt_mask_reg = 0x00288,
120 			.clr_interrupt_mask_reg = 0x0028C,
121 			.sense_interrupt_mask_reg = 0x00288,
122 			.clr_interrupt_reg = 0x00284,
123 			.sense_interrupt_reg = 0x00280,
124 			.ioarrin_reg = 0x00504,
125 			.sense_uproc_interrupt_reg = 0x00290,
126 			.set_uproc_interrupt_reg = 0x00290,
127 			.clr_uproc_interrupt_reg = 0x00294
128 		}
129 	},
130 };
131 
132 static const struct ipr_chip_t ipr_chip[] = {
133 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
136 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 };
141 
142 static int ipr_max_bus_speeds [] = {
143 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 };
145 
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed, ipr_max_speed, uint, 0);
149 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level, ipr_log_level, uint, 0);
151 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode, ipr_testmode, int, 0);
153 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail, ipr_fastfail, int, 0);
155 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache, ipr_enable_cache, int, 0);
159 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug, ipr_debug, int, 0);
161 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create, ipr_auto_create, int, 0);
163 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION);
166 
167 /*  A constant array of IOASCs/URCs/Error Messages */
168 static const
169 struct ipr_error_table_t ipr_error_table[] = {
170 	{0x00000000, 1, 1,
171 	"8155: An unknown error was received"},
172 	{0x00330000, 0, 0,
173 	"Soft underlength error"},
174 	{0x005A0000, 0, 0,
175 	"Command to be cancelled not found"},
176 	{0x00808000, 0, 0,
177 	"Qualified success"},
178 	{0x01080000, 1, 1,
179 	"FFFE: Soft device bus error recovered by the IOA"},
180 	{0x01088100, 0, 1,
181 	"4101: Soft device bus fabric error"},
182 	{0x01170600, 0, 1,
183 	"FFF9: Device sector reassign successful"},
184 	{0x01170900, 0, 1,
185 	"FFF7: Media error recovered by device rewrite procedures"},
186 	{0x01180200, 0, 1,
187 	"7001: IOA sector reassignment successful"},
188 	{0x01180500, 0, 1,
189 	"FFF9: Soft media error. Sector reassignment recommended"},
190 	{0x01180600, 0, 1,
191 	"FFF7: Media error recovered by IOA rewrite procedures"},
192 	{0x01418000, 0, 1,
193 	"FF3D: Soft PCI bus error recovered by the IOA"},
194 	{0x01440000, 1, 1,
195 	"FFF6: Device hardware error recovered by the IOA"},
196 	{0x01448100, 0, 1,
197 	"FFF6: Device hardware error recovered by the device"},
198 	{0x01448200, 1, 1,
199 	"FF3D: Soft IOA error recovered by the IOA"},
200 	{0x01448300, 0, 1,
201 	"FFFA: Undefined device response recovered by the IOA"},
202 	{0x014A0000, 1, 1,
203 	"FFF6: Device bus error, message or command phase"},
204 	{0x014A8000, 0, 1,
205 	"FFFE: Task Management Function failed"},
206 	{0x015D0000, 0, 1,
207 	"FFF6: Failure prediction threshold exceeded"},
208 	{0x015D9200, 0, 1,
209 	"8009: Impending cache battery pack failure"},
210 	{0x02040400, 0, 0,
211 	"34FF: Disk device format in progress"},
212 	{0x023F0000, 0, 0,
213 	"Synchronization required"},
214 	{0x024E0000, 0, 0,
215 	"No ready, IOA shutdown"},
216 	{0x025A0000, 0, 0,
217 	"Not ready, IOA has been shutdown"},
218 	{0x02670100, 0, 1,
219 	"3020: Storage subsystem configuration error"},
220 	{0x03110B00, 0, 0,
221 	"FFF5: Medium error, data unreadable, recommend reassign"},
222 	{0x03110C00, 0, 0,
223 	"7000: Medium error, data unreadable, do not reassign"},
224 	{0x03310000, 0, 1,
225 	"FFF3: Disk media format bad"},
226 	{0x04050000, 0, 1,
227 	"3002: Addressed device failed to respond to selection"},
228 	{0x04080000, 1, 1,
229 	"3100: Device bus error"},
230 	{0x04080100, 0, 1,
231 	"3109: IOA timed out a device command"},
232 	{0x04088000, 0, 0,
233 	"3120: SCSI bus is not operational"},
234 	{0x04088100, 0, 1,
235 	"4100: Hard device bus fabric error"},
236 	{0x04118000, 0, 1,
237 	"9000: IOA reserved area data check"},
238 	{0x04118100, 0, 1,
239 	"9001: IOA reserved area invalid data pattern"},
240 	{0x04118200, 0, 1,
241 	"9002: IOA reserved area LRC error"},
242 	{0x04320000, 0, 1,
243 	"102E: Out of alternate sectors for disk storage"},
244 	{0x04330000, 1, 1,
245 	"FFF4: Data transfer underlength error"},
246 	{0x04338000, 1, 1,
247 	"FFF4: Data transfer overlength error"},
248 	{0x043E0100, 0, 1,
249 	"3400: Logical unit failure"},
250 	{0x04408500, 0, 1,
251 	"FFF4: Device microcode is corrupt"},
252 	{0x04418000, 1, 1,
253 	"8150: PCI bus error"},
254 	{0x04430000, 1, 0,
255 	"Unsupported device bus message received"},
256 	{0x04440000, 1, 1,
257 	"FFF4: Disk device problem"},
258 	{0x04448200, 1, 1,
259 	"8150: Permanent IOA failure"},
260 	{0x04448300, 0, 1,
261 	"3010: Disk device returned wrong response to IOA"},
262 	{0x04448400, 0, 1,
263 	"8151: IOA microcode error"},
264 	{0x04448500, 0, 0,
265 	"Device bus status error"},
266 	{0x04448600, 0, 1,
267 	"8157: IOA error requiring IOA reset to recover"},
268 	{0x04448700, 0, 0,
269 	"ATA device status error"},
270 	{0x04490000, 0, 0,
271 	"Message reject received from the device"},
272 	{0x04449200, 0, 1,
273 	"8008: A permanent cache battery pack failure occurred"},
274 	{0x0444A000, 0, 1,
275 	"9090: Disk unit has been modified after the last known status"},
276 	{0x0444A200, 0, 1,
277 	"9081: IOA detected device error"},
278 	{0x0444A300, 0, 1,
279 	"9082: IOA detected device error"},
280 	{0x044A0000, 1, 1,
281 	"3110: Device bus error, message or command phase"},
282 	{0x044A8000, 1, 1,
283 	"3110: SAS Command / Task Management Function failed"},
284 	{0x04670400, 0, 1,
285 	"9091: Incorrect hardware configuration change has been detected"},
286 	{0x04678000, 0, 1,
287 	"9073: Invalid multi-adapter configuration"},
288 	{0x04678100, 0, 1,
289 	"4010: Incorrect connection between cascaded expanders"},
290 	{0x04678200, 0, 1,
291 	"4020: Connections exceed IOA design limits"},
292 	{0x04678300, 0, 1,
293 	"4030: Incorrect multipath connection"},
294 	{0x04679000, 0, 1,
295 	"4110: Unsupported enclosure function"},
296 	{0x046E0000, 0, 1,
297 	"FFF4: Command to logical unit failed"},
298 	{0x05240000, 1, 0,
299 	"Illegal request, invalid request type or request packet"},
300 	{0x05250000, 0, 0,
301 	"Illegal request, invalid resource handle"},
302 	{0x05258000, 0, 0,
303 	"Illegal request, commands not allowed to this device"},
304 	{0x05258100, 0, 0,
305 	"Illegal request, command not allowed to a secondary adapter"},
306 	{0x05260000, 0, 0,
307 	"Illegal request, invalid field in parameter list"},
308 	{0x05260100, 0, 0,
309 	"Illegal request, parameter not supported"},
310 	{0x05260200, 0, 0,
311 	"Illegal request, parameter value invalid"},
312 	{0x052C0000, 0, 0,
313 	"Illegal request, command sequence error"},
314 	{0x052C8000, 1, 0,
315 	"Illegal request, dual adapter support not enabled"},
316 	{0x06040500, 0, 1,
317 	"9031: Array protection temporarily suspended, protection resuming"},
318 	{0x06040600, 0, 1,
319 	"9040: Array protection temporarily suspended, protection resuming"},
320 	{0x06288000, 0, 1,
321 	"3140: Device bus not ready to ready transition"},
322 	{0x06290000, 0, 1,
323 	"FFFB: SCSI bus was reset"},
324 	{0x06290500, 0, 0,
325 	"FFFE: SCSI bus transition to single ended"},
326 	{0x06290600, 0, 0,
327 	"FFFE: SCSI bus transition to LVD"},
328 	{0x06298000, 0, 1,
329 	"FFFB: SCSI bus was reset by another initiator"},
330 	{0x063F0300, 0, 1,
331 	"3029: A device replacement has occurred"},
332 	{0x064C8000, 0, 1,
333 	"9051: IOA cache data exists for a missing or failed device"},
334 	{0x064C8100, 0, 1,
335 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
336 	{0x06670100, 0, 1,
337 	"9025: Disk unit is not supported at its physical location"},
338 	{0x06670600, 0, 1,
339 	"3020: IOA detected a SCSI bus configuration error"},
340 	{0x06678000, 0, 1,
341 	"3150: SCSI bus configuration error"},
342 	{0x06678100, 0, 1,
343 	"9074: Asymmetric advanced function disk configuration"},
344 	{0x06678300, 0, 1,
345 	"4040: Incomplete multipath connection between IOA and enclosure"},
346 	{0x06678400, 0, 1,
347 	"4041: Incomplete multipath connection between enclosure and device"},
348 	{0x06678500, 0, 1,
349 	"9075: Incomplete multipath connection between IOA and remote IOA"},
350 	{0x06678600, 0, 1,
351 	"9076: Configuration error, missing remote IOA"},
352 	{0x06679100, 0, 1,
353 	"4050: Enclosure does not support a required multipath function"},
354 	{0x06690200, 0, 1,
355 	"9041: Array protection temporarily suspended"},
356 	{0x06698200, 0, 1,
357 	"9042: Corrupt array parity detected on specified device"},
358 	{0x066B0200, 0, 1,
359 	"9030: Array no longer protected due to missing or failed disk unit"},
360 	{0x066B8000, 0, 1,
361 	"9071: Link operational transition"},
362 	{0x066B8100, 0, 1,
363 	"9072: Link not operational transition"},
364 	{0x066B8200, 0, 1,
365 	"9032: Array exposed but still protected"},
366 	{0x066B9100, 0, 1,
367 	"4061: Multipath redundancy level got better"},
368 	{0x066B9200, 0, 1,
369 	"4060: Multipath redundancy level got worse"},
370 	{0x07270000, 0, 0,
371 	"Failure due to other device"},
372 	{0x07278000, 0, 1,
373 	"9008: IOA does not support functions expected by devices"},
374 	{0x07278100, 0, 1,
375 	"9010: Cache data associated with attached devices cannot be found"},
376 	{0x07278200, 0, 1,
377 	"9011: Cache data belongs to devices other than those attached"},
378 	{0x07278400, 0, 1,
379 	"9020: Array missing 2 or more devices with only 1 device present"},
380 	{0x07278500, 0, 1,
381 	"9021: Array missing 2 or more devices with 2 or more devices present"},
382 	{0x07278600, 0, 1,
383 	"9022: Exposed array is missing a required device"},
384 	{0x07278700, 0, 1,
385 	"9023: Array member(s) not at required physical locations"},
386 	{0x07278800, 0, 1,
387 	"9024: Array not functional due to present hardware configuration"},
388 	{0x07278900, 0, 1,
389 	"9026: Array not functional due to present hardware configuration"},
390 	{0x07278A00, 0, 1,
391 	"9027: Array is missing a device and parity is out of sync"},
392 	{0x07278B00, 0, 1,
393 	"9028: Maximum number of arrays already exist"},
394 	{0x07278C00, 0, 1,
395 	"9050: Required cache data cannot be located for a disk unit"},
396 	{0x07278D00, 0, 1,
397 	"9052: Cache data exists for a device that has been modified"},
398 	{0x07278F00, 0, 1,
399 	"9054: IOA resources not available due to previous problems"},
400 	{0x07279100, 0, 1,
401 	"9092: Disk unit requires initialization before use"},
402 	{0x07279200, 0, 1,
403 	"9029: Incorrect hardware configuration change has been detected"},
404 	{0x07279600, 0, 1,
405 	"9060: One or more disk pairs are missing from an array"},
406 	{0x07279700, 0, 1,
407 	"9061: One or more disks are missing from an array"},
408 	{0x07279800, 0, 1,
409 	"9062: One or more disks are missing from an array"},
410 	{0x07279900, 0, 1,
411 	"9063: Maximum number of functional arrays has been exceeded"},
412 	{0x0B260000, 0, 0,
413 	"Aborted command, invalid descriptor"},
414 	{0x0B5A0000, 0, 0,
415 	"Command terminated by host"}
416 };
417 
418 static const struct ipr_ses_table_entry ipr_ses_table[] = {
419 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
420 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
421 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
422 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
423 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
424 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
425 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
426 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
427 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
428 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
430 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
431 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
432 };
433 
434 /*
435  *  Function Prototypes
436  */
437 static int ipr_reset_alert(struct ipr_cmnd *);
438 static void ipr_process_ccn(struct ipr_cmnd *);
439 static void ipr_process_error(struct ipr_cmnd *);
440 static void ipr_reset_ioa_job(struct ipr_cmnd *);
441 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
442 				   enum ipr_shutdown_type);
443 
444 #ifdef CONFIG_SCSI_IPR_TRACE
445 /**
446  * ipr_trc_hook - Add a trace entry to the driver trace
447  * @ipr_cmd:	ipr command struct
448  * @type:		trace type
449  * @add_data:	additional data
450  *
451  * Return value:
452  * 	none
453  **/
454 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
455 			 u8 type, u32 add_data)
456 {
457 	struct ipr_trace_entry *trace_entry;
458 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
459 
460 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
461 	trace_entry->time = jiffies;
462 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
463 	trace_entry->type = type;
464 	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
465 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
466 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
467 	trace_entry->u.add_data = add_data;
468 }
469 #else
470 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
471 #endif
472 
473 /**
474  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
475  * @ipr_cmd:	ipr command struct
476  *
477  * Return value:
478  * 	none
479  **/
480 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481 {
482 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
483 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
484 
485 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 	ioarcb->write_data_transfer_length = 0;
487 	ioarcb->read_data_transfer_length = 0;
488 	ioarcb->write_ioadl_len = 0;
489 	ioarcb->read_ioadl_len = 0;
490 	ioasa->ioasc = 0;
491 	ioasa->residual_data_len = 0;
492 	ioasa->u.gata.status = 0;
493 
494 	ipr_cmd->scsi_cmd = NULL;
495 	ipr_cmd->qc = NULL;
496 	ipr_cmd->sense_buffer[0] = 0;
497 	ipr_cmd->dma_use_sg = 0;
498 }
499 
500 /**
501  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
502  * @ipr_cmd:	ipr command struct
503  *
504  * Return value:
505  * 	none
506  **/
507 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
508 {
509 	ipr_reinit_ipr_cmnd(ipr_cmd);
510 	ipr_cmd->u.scratch = 0;
511 	ipr_cmd->sibling = NULL;
512 	init_timer(&ipr_cmd->timer);
513 }
514 
515 /**
516  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
517  * @ioa_cfg:	ioa config struct
518  *
519  * Return value:
520  * 	pointer to ipr command struct
521  **/
522 static
523 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
524 {
525 	struct ipr_cmnd *ipr_cmd;
526 
527 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
528 	list_del(&ipr_cmd->queue);
529 	ipr_init_ipr_cmnd(ipr_cmd);
530 
531 	return ipr_cmd;
532 }
533 
534 /**
535  * ipr_unmap_sglist - Unmap scatterlist if mapped
536  * @ioa_cfg:	ioa config struct
537  * @ipr_cmd:	ipr command struct
538  *
539  * Return value:
540  * 	nothing
541  **/
542 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
543 			     struct ipr_cmnd *ipr_cmd)
544 {
545 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
546 
547 	if (ipr_cmd->dma_use_sg) {
548 		if (scsi_cmd->use_sg > 0) {
549 			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
550 				     scsi_cmd->use_sg,
551 				     scsi_cmd->sc_data_direction);
552 		} else {
553 			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
554 					 scsi_cmd->request_bufflen,
555 					 scsi_cmd->sc_data_direction);
556 		}
557 	}
558 }
559 
560 /**
561  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
562  * @ioa_cfg:	ioa config struct
563  * @clr_ints:     interrupts to clear
564  *
565  * This function masks all interrupts on the adapter, then clears the
566  * interrupts specified in the mask
567  *
568  * Return value:
569  * 	none
570  **/
571 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
572 					  u32 clr_ints)
573 {
574 	volatile u32 int_reg;
575 
576 	/* Stop new interrupts */
577 	ioa_cfg->allow_interrupts = 0;
578 
579 	/* Set interrupt mask to stop all new interrupts */
580 	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
581 
582 	/* Clear any pending interrupts */
583 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
584 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
585 }
586 
587 /**
588  * ipr_save_pcix_cmd_reg - Save PCI-X command register
589  * @ioa_cfg:	ioa config struct
590  *
591  * Return value:
592  * 	0 on success / -EIO on failure
593  **/
594 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
595 {
596 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
597 
598 	if (pcix_cmd_reg == 0)
599 		return 0;
600 
601 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
602 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
603 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
604 		return -EIO;
605 	}
606 
607 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
608 	return 0;
609 }
610 
611 /**
612  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
613  * @ioa_cfg:	ioa config struct
614  *
615  * Return value:
616  * 	0 on success / -EIO on failure
617  **/
618 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
619 {
620 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
621 
622 	if (pcix_cmd_reg) {
623 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
624 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
625 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
626 			return -EIO;
627 		}
628 	}
629 
630 	return 0;
631 }
632 
633 /**
634  * ipr_sata_eh_done - done function for aborted SATA commands
635  * @ipr_cmd:	ipr command struct
636  *
637  * This function is invoked for ops generated to SATA
638  * devices which are being aborted.
639  *
640  * Return value:
641  * 	none
642  **/
643 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
644 {
645 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
646 	struct ata_queued_cmd *qc = ipr_cmd->qc;
647 	struct ipr_sata_port *sata_port = qc->ap->private_data;
648 
649 	qc->err_mask |= AC_ERR_OTHER;
650 	sata_port->ioasa.status |= ATA_BUSY;
651 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
652 	ata_qc_complete(qc);
653 }
654 
655 /**
656  * ipr_scsi_eh_done - mid-layer done function for aborted ops
657  * @ipr_cmd:	ipr command struct
658  *
659  * This function is invoked by the interrupt handler for
660  * ops generated by the SCSI mid-layer which are being aborted.
661  *
662  * Return value:
663  * 	none
664  **/
665 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
666 {
667 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
668 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
669 
670 	scsi_cmd->result |= (DID_ERROR << 16);
671 
672 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
673 	scsi_cmd->scsi_done(scsi_cmd);
674 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
675 }
676 
677 /**
678  * ipr_fail_all_ops - Fails all outstanding ops.
679  * @ioa_cfg:	ioa config struct
680  *
681  * This function fails all outstanding ops.
682  *
683  * Return value:
684  * 	none
685  **/
686 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
687 {
688 	struct ipr_cmnd *ipr_cmd, *temp;
689 
690 	ENTER;
691 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
692 		list_del(&ipr_cmd->queue);
693 
694 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
695 		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
696 
697 		if (ipr_cmd->scsi_cmd)
698 			ipr_cmd->done = ipr_scsi_eh_done;
699 		else if (ipr_cmd->qc)
700 			ipr_cmd->done = ipr_sata_eh_done;
701 
702 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
703 		del_timer(&ipr_cmd->timer);
704 		ipr_cmd->done(ipr_cmd);
705 	}
706 
707 	LEAVE;
708 }
709 
710 /**
711  * ipr_do_req -  Send driver initiated requests.
712  * @ipr_cmd:		ipr command struct
713  * @done:			done function
714  * @timeout_func:	timeout function
715  * @timeout:		timeout value
716  *
717  * This function sends the specified command to the adapter with the
718  * timeout given. The done function is invoked on command completion.
719  *
720  * Return value:
721  * 	none
722  **/
723 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
724 		       void (*done) (struct ipr_cmnd *),
725 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
726 {
727 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
728 
729 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
730 
731 	ipr_cmd->done = done;
732 
733 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
734 	ipr_cmd->timer.expires = jiffies + timeout;
735 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
736 
737 	add_timer(&ipr_cmd->timer);
738 
739 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
740 
741 	mb();
742 	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
743 	       ioa_cfg->regs.ioarrin_reg);
744 }
745 
746 /**
747  * ipr_internal_cmd_done - Op done function for an internally generated op.
748  * @ipr_cmd:	ipr command struct
749  *
750  * This function is the op done function for an internally generated,
751  * blocking op. It simply wakes the sleeping thread.
752  *
753  * Return value:
754  * 	none
755  **/
756 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
757 {
758 	if (ipr_cmd->sibling)
759 		ipr_cmd->sibling = NULL;
760 	else
761 		complete(&ipr_cmd->completion);
762 }
763 
764 /**
765  * ipr_send_blocking_cmd - Send command and sleep on its completion.
766  * @ipr_cmd:	ipr command struct
767  * @timeout_func:	function to invoke if command times out
768  * @timeout:	timeout
769  *
770  * Return value:
771  * 	none
772  **/
773 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
774 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
775 				  u32 timeout)
776 {
777 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
778 
779 	init_completion(&ipr_cmd->completion);
780 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
781 
782 	spin_unlock_irq(ioa_cfg->host->host_lock);
783 	wait_for_completion(&ipr_cmd->completion);
784 	spin_lock_irq(ioa_cfg->host->host_lock);
785 }
786 
787 /**
788  * ipr_send_hcam - Send an HCAM to the adapter.
789  * @ioa_cfg:	ioa config struct
790  * @type:		HCAM type
791  * @hostrcb:	hostrcb struct
792  *
793  * This function will send a Host Controlled Async command to the adapter.
794  * If HCAMs are currently not allowed to be issued to the adapter, it will
795  * place the hostrcb on the free queue.
796  *
797  * Return value:
798  * 	none
799  **/
800 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
801 			  struct ipr_hostrcb *hostrcb)
802 {
803 	struct ipr_cmnd *ipr_cmd;
804 	struct ipr_ioarcb *ioarcb;
805 
806 	if (ioa_cfg->allow_cmds) {
807 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
808 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
809 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
810 
811 		ipr_cmd->u.hostrcb = hostrcb;
812 		ioarcb = &ipr_cmd->ioarcb;
813 
814 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
815 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
816 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
817 		ioarcb->cmd_pkt.cdb[1] = type;
818 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
819 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
820 
821 		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
822 		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
823 		ipr_cmd->ioadl[0].flags_and_data_len =
824 			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
825 		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
826 
827 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
828 			ipr_cmd->done = ipr_process_ccn;
829 		else
830 			ipr_cmd->done = ipr_process_error;
831 
832 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
833 
834 		mb();
835 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
836 		       ioa_cfg->regs.ioarrin_reg);
837 	} else {
838 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
839 	}
840 }
841 
842 /**
843  * ipr_init_res_entry - Initialize a resource entry struct.
844  * @res:	resource entry struct
845  *
846  * Return value:
847  * 	none
848  **/
849 static void ipr_init_res_entry(struct ipr_resource_entry *res)
850 {
851 	res->needs_sync_complete = 0;
852 	res->in_erp = 0;
853 	res->add_to_ml = 0;
854 	res->del_from_ml = 0;
855 	res->resetting_device = 0;
856 	res->sdev = NULL;
857 	res->sata_port = NULL;
858 }
859 
860 /**
861  * ipr_handle_config_change - Handle a config change from the adapter
862  * @ioa_cfg:	ioa config struct
863  * @hostrcb:	hostrcb
864  *
865  * Return value:
866  * 	none
867  **/
868 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
869 			      struct ipr_hostrcb *hostrcb)
870 {
871 	struct ipr_resource_entry *res = NULL;
872 	struct ipr_config_table_entry *cfgte;
873 	u32 is_ndn = 1;
874 
875 	cfgte = &hostrcb->hcam.u.ccn.cfgte;
876 
877 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
878 		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
879 			    sizeof(cfgte->res_addr))) {
880 			is_ndn = 0;
881 			break;
882 		}
883 	}
884 
885 	if (is_ndn) {
886 		if (list_empty(&ioa_cfg->free_res_q)) {
887 			ipr_send_hcam(ioa_cfg,
888 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
889 				      hostrcb);
890 			return;
891 		}
892 
893 		res = list_entry(ioa_cfg->free_res_q.next,
894 				 struct ipr_resource_entry, queue);
895 
896 		list_del(&res->queue);
897 		ipr_init_res_entry(res);
898 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
899 	}
900 
901 	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
902 
903 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
904 		if (res->sdev) {
905 			res->del_from_ml = 1;
906 			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
907 			if (ioa_cfg->allow_ml_add_del)
908 				schedule_work(&ioa_cfg->work_q);
909 		} else
910 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
911 	} else if (!res->sdev) {
912 		res->add_to_ml = 1;
913 		if (ioa_cfg->allow_ml_add_del)
914 			schedule_work(&ioa_cfg->work_q);
915 	}
916 
917 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
918 }
919 
920 /**
921  * ipr_process_ccn - Op done function for a CCN.
922  * @ipr_cmd:	ipr command struct
923  *
924  * This function is the op done function for a configuration
925  * change notification host controlled async from the adapter.
926  *
927  * Return value:
928  * 	none
929  **/
930 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
931 {
932 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
933 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
934 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
935 
936 	list_del(&hostrcb->queue);
937 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
938 
939 	if (ioasc) {
940 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
941 			dev_err(&ioa_cfg->pdev->dev,
942 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
943 
944 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
945 	} else {
946 		ipr_handle_config_change(ioa_cfg, hostrcb);
947 	}
948 }
949 
950 /**
951  * ipr_log_vpd - Log the passed VPD to the error log.
952  * @vpd:		vendor/product id/sn struct
953  *
954  * Return value:
955  * 	none
956  **/
957 static void ipr_log_vpd(struct ipr_vpd *vpd)
958 {
959 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
960 		    + IPR_SERIAL_NUM_LEN];
961 
962 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
963 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
964 	       IPR_PROD_ID_LEN);
965 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
966 	ipr_err("Vendor/Product ID: %s\n", buffer);
967 
968 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
969 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
970 	ipr_err("    Serial Number: %s\n", buffer);
971 }
972 
973 /**
974  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
975  * @vpd:		vendor/product id/sn/wwn struct
976  *
977  * Return value:
978  * 	none
979  **/
980 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
981 {
982 	ipr_log_vpd(&vpd->vpd);
983 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
984 		be32_to_cpu(vpd->wwid[1]));
985 }
986 
987 /**
988  * ipr_log_enhanced_cache_error - Log a cache error.
989  * @ioa_cfg:	ioa config struct
990  * @hostrcb:	hostrcb struct
991  *
992  * Return value:
993  * 	none
994  **/
995 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
996 					 struct ipr_hostrcb *hostrcb)
997 {
998 	struct ipr_hostrcb_type_12_error *error =
999 		&hostrcb->hcam.u.error.u.type_12_error;
1000 
1001 	ipr_err("-----Current Configuration-----\n");
1002 	ipr_err("Cache Directory Card Information:\n");
1003 	ipr_log_ext_vpd(&error->ioa_vpd);
1004 	ipr_err("Adapter Card Information:\n");
1005 	ipr_log_ext_vpd(&error->cfc_vpd);
1006 
1007 	ipr_err("-----Expected Configuration-----\n");
1008 	ipr_err("Cache Directory Card Information:\n");
1009 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1010 	ipr_err("Adapter Card Information:\n");
1011 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1012 
1013 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1014 		     be32_to_cpu(error->ioa_data[0]),
1015 		     be32_to_cpu(error->ioa_data[1]),
1016 		     be32_to_cpu(error->ioa_data[2]));
1017 }
1018 
1019 /**
1020  * ipr_log_cache_error - Log a cache error.
1021  * @ioa_cfg:	ioa config struct
1022  * @hostrcb:	hostrcb struct
1023  *
1024  * Return value:
1025  * 	none
1026  **/
1027 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1028 				struct ipr_hostrcb *hostrcb)
1029 {
1030 	struct ipr_hostrcb_type_02_error *error =
1031 		&hostrcb->hcam.u.error.u.type_02_error;
1032 
1033 	ipr_err("-----Current Configuration-----\n");
1034 	ipr_err("Cache Directory Card Information:\n");
1035 	ipr_log_vpd(&error->ioa_vpd);
1036 	ipr_err("Adapter Card Information:\n");
1037 	ipr_log_vpd(&error->cfc_vpd);
1038 
1039 	ipr_err("-----Expected Configuration-----\n");
1040 	ipr_err("Cache Directory Card Information:\n");
1041 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1042 	ipr_err("Adapter Card Information:\n");
1043 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1044 
1045 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1046 		     be32_to_cpu(error->ioa_data[0]),
1047 		     be32_to_cpu(error->ioa_data[1]),
1048 		     be32_to_cpu(error->ioa_data[2]));
1049 }
1050 
1051 /**
1052  * ipr_log_enhanced_config_error - Log a configuration error.
1053  * @ioa_cfg:	ioa config struct
1054  * @hostrcb:	hostrcb struct
1055  *
1056  * Return value:
1057  * 	none
1058  **/
1059 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1060 					  struct ipr_hostrcb *hostrcb)
1061 {
1062 	int errors_logged, i;
1063 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1064 	struct ipr_hostrcb_type_13_error *error;
1065 
1066 	error = &hostrcb->hcam.u.error.u.type_13_error;
1067 	errors_logged = be32_to_cpu(error->errors_logged);
1068 
1069 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1070 		be32_to_cpu(error->errors_detected), errors_logged);
1071 
1072 	dev_entry = error->dev;
1073 
1074 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1075 		ipr_err_separator;
1076 
1077 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1078 		ipr_log_ext_vpd(&dev_entry->vpd);
1079 
1080 		ipr_err("-----New Device Information-----\n");
1081 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1082 
1083 		ipr_err("Cache Directory Card Information:\n");
1084 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1085 
1086 		ipr_err("Adapter Card Information:\n");
1087 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1088 	}
1089 }
1090 
1091 /**
1092  * ipr_log_config_error - Log a configuration error.
1093  * @ioa_cfg:	ioa config struct
1094  * @hostrcb:	hostrcb struct
1095  *
1096  * Return value:
1097  * 	none
1098  **/
1099 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1100 				 struct ipr_hostrcb *hostrcb)
1101 {
1102 	int errors_logged, i;
1103 	struct ipr_hostrcb_device_data_entry *dev_entry;
1104 	struct ipr_hostrcb_type_03_error *error;
1105 
1106 	error = &hostrcb->hcam.u.error.u.type_03_error;
1107 	errors_logged = be32_to_cpu(error->errors_logged);
1108 
1109 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1110 		be32_to_cpu(error->errors_detected), errors_logged);
1111 
1112 	dev_entry = error->dev;
1113 
1114 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1115 		ipr_err_separator;
1116 
1117 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1118 		ipr_log_vpd(&dev_entry->vpd);
1119 
1120 		ipr_err("-----New Device Information-----\n");
1121 		ipr_log_vpd(&dev_entry->new_vpd);
1122 
1123 		ipr_err("Cache Directory Card Information:\n");
1124 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1125 
1126 		ipr_err("Adapter Card Information:\n");
1127 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1128 
1129 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1130 			be32_to_cpu(dev_entry->ioa_data[0]),
1131 			be32_to_cpu(dev_entry->ioa_data[1]),
1132 			be32_to_cpu(dev_entry->ioa_data[2]),
1133 			be32_to_cpu(dev_entry->ioa_data[3]),
1134 			be32_to_cpu(dev_entry->ioa_data[4]));
1135 	}
1136 }
1137 
1138 /**
1139  * ipr_log_enhanced_array_error - Log an array configuration error.
1140  * @ioa_cfg:	ioa config struct
1141  * @hostrcb:	hostrcb struct
1142  *
1143  * Return value:
1144  * 	none
1145  **/
1146 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1147 					 struct ipr_hostrcb *hostrcb)
1148 {
1149 	int i, num_entries;
1150 	struct ipr_hostrcb_type_14_error *error;
1151 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1152 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1153 
1154 	error = &hostrcb->hcam.u.error.u.type_14_error;
1155 
1156 	ipr_err_separator;
1157 
1158 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1159 		error->protection_level,
1160 		ioa_cfg->host->host_no,
1161 		error->last_func_vset_res_addr.bus,
1162 		error->last_func_vset_res_addr.target,
1163 		error->last_func_vset_res_addr.lun);
1164 
1165 	ipr_err_separator;
1166 
1167 	array_entry = error->array_member;
1168 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1169 			    sizeof(error->array_member));
1170 
1171 	for (i = 0; i < num_entries; i++, array_entry++) {
1172 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1173 			continue;
1174 
1175 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1176 			ipr_err("Exposed Array Member %d:\n", i);
1177 		else
1178 			ipr_err("Array Member %d:\n", i);
1179 
1180 		ipr_log_ext_vpd(&array_entry->vpd);
1181 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1182 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1183 				 "Expected Location");
1184 
1185 		ipr_err_separator;
1186 	}
1187 }
1188 
1189 /**
1190  * ipr_log_array_error - Log an array configuration error.
1191  * @ioa_cfg:	ioa config struct
1192  * @hostrcb:	hostrcb struct
1193  *
1194  * Return value:
1195  * 	none
1196  **/
1197 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1198 				struct ipr_hostrcb *hostrcb)
1199 {
1200 	int i;
1201 	struct ipr_hostrcb_type_04_error *error;
1202 	struct ipr_hostrcb_array_data_entry *array_entry;
1203 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1204 
1205 	error = &hostrcb->hcam.u.error.u.type_04_error;
1206 
1207 	ipr_err_separator;
1208 
1209 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1210 		error->protection_level,
1211 		ioa_cfg->host->host_no,
1212 		error->last_func_vset_res_addr.bus,
1213 		error->last_func_vset_res_addr.target,
1214 		error->last_func_vset_res_addr.lun);
1215 
1216 	ipr_err_separator;
1217 
1218 	array_entry = error->array_member;
1219 
1220 	for (i = 0; i < 18; i++) {
1221 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1222 			continue;
1223 
1224 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1225 			ipr_err("Exposed Array Member %d:\n", i);
1226 		else
1227 			ipr_err("Array Member %d:\n", i);
1228 
1229 		ipr_log_vpd(&array_entry->vpd);
1230 
1231 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1232 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1233 				 "Expected Location");
1234 
1235 		ipr_err_separator;
1236 
1237 		if (i == 9)
1238 			array_entry = error->array_member2;
1239 		else
1240 			array_entry++;
1241 	}
1242 }
1243 
1244 /**
1245  * ipr_log_hex_data - Log additional hex IOA error data.
1246  * @ioa_cfg:	ioa config struct
1247  * @data:		IOA error data
1248  * @len:		data length
1249  *
1250  * Return value:
1251  * 	none
1252  **/
1253 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1254 {
1255 	int i;
1256 
1257 	if (len == 0)
1258 		return;
1259 
1260 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1261 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1262 
1263 	for (i = 0; i < len / 4; i += 4) {
1264 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1265 			be32_to_cpu(data[i]),
1266 			be32_to_cpu(data[i+1]),
1267 			be32_to_cpu(data[i+2]),
1268 			be32_to_cpu(data[i+3]));
1269 	}
1270 }
1271 
1272 /**
1273  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1274  * @ioa_cfg:	ioa config struct
1275  * @hostrcb:	hostrcb struct
1276  *
1277  * Return value:
1278  * 	none
1279  **/
1280 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1281 					    struct ipr_hostrcb *hostrcb)
1282 {
1283 	struct ipr_hostrcb_type_17_error *error;
1284 
1285 	error = &hostrcb->hcam.u.error.u.type_17_error;
1286 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1287 
1288 	ipr_err("%s\n", error->failure_reason);
1289 	ipr_err("Remote Adapter VPD:\n");
1290 	ipr_log_ext_vpd(&error->vpd);
1291 	ipr_log_hex_data(ioa_cfg, error->data,
1292 			 be32_to_cpu(hostrcb->hcam.length) -
1293 			 (offsetof(struct ipr_hostrcb_error, u) +
1294 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1295 }
1296 
1297 /**
1298  * ipr_log_dual_ioa_error - Log a dual adapter error.
1299  * @ioa_cfg:	ioa config struct
1300  * @hostrcb:	hostrcb struct
1301  *
1302  * Return value:
1303  * 	none
1304  **/
1305 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1306 				   struct ipr_hostrcb *hostrcb)
1307 {
1308 	struct ipr_hostrcb_type_07_error *error;
1309 
1310 	error = &hostrcb->hcam.u.error.u.type_07_error;
1311 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1312 
1313 	ipr_err("%s\n", error->failure_reason);
1314 	ipr_err("Remote Adapter VPD:\n");
1315 	ipr_log_vpd(&error->vpd);
1316 	ipr_log_hex_data(ioa_cfg, error->data,
1317 			 be32_to_cpu(hostrcb->hcam.length) -
1318 			 (offsetof(struct ipr_hostrcb_error, u) +
1319 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1320 }
1321 
1322 static const struct {
1323 	u8 active;
1324 	char *desc;
1325 } path_active_desc[] = {
1326 	{ IPR_PATH_NO_INFO, "Path" },
1327 	{ IPR_PATH_ACTIVE, "Active path" },
1328 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1329 };
1330 
1331 static const struct {
1332 	u8 state;
1333 	char *desc;
1334 } path_state_desc[] = {
1335 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1336 	{ IPR_PATH_HEALTHY, "is healthy" },
1337 	{ IPR_PATH_DEGRADED, "is degraded" },
1338 	{ IPR_PATH_FAILED, "is failed" }
1339 };
1340 
1341 /**
1342  * ipr_log_fabric_path - Log a fabric path error
1343  * @hostrcb:	hostrcb struct
1344  * @fabric:		fabric descriptor
1345  *
1346  * Return value:
1347  * 	none
1348  **/
1349 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1350 				struct ipr_hostrcb_fabric_desc *fabric)
1351 {
1352 	int i, j;
1353 	u8 path_state = fabric->path_state;
1354 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1355 	u8 state = path_state & IPR_PATH_STATE_MASK;
1356 
1357 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1358 		if (path_active_desc[i].active != active)
1359 			continue;
1360 
1361 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1362 			if (path_state_desc[j].state != state)
1363 				continue;
1364 
1365 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1366 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1367 					     path_active_desc[i].desc, path_state_desc[j].desc,
1368 					     fabric->ioa_port);
1369 			} else if (fabric->cascaded_expander == 0xff) {
1370 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1371 					     path_active_desc[i].desc, path_state_desc[j].desc,
1372 					     fabric->ioa_port, fabric->phy);
1373 			} else if (fabric->phy == 0xff) {
1374 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1375 					     path_active_desc[i].desc, path_state_desc[j].desc,
1376 					     fabric->ioa_port, fabric->cascaded_expander);
1377 			} else {
1378 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1379 					     path_active_desc[i].desc, path_state_desc[j].desc,
1380 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1381 			}
1382 			return;
1383 		}
1384 	}
1385 
1386 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1387 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1388 }
1389 
1390 static const struct {
1391 	u8 type;
1392 	char *desc;
1393 } path_type_desc[] = {
1394 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1395 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1396 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1397 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1398 };
1399 
1400 static const struct {
1401 	u8 status;
1402 	char *desc;
1403 } path_status_desc[] = {
1404 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1405 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1406 	{ IPR_PATH_CFG_FAILED, "Failed" },
1407 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1408 	{ IPR_PATH_NOT_DETECTED, "Missing" },
1409 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1410 };
1411 
1412 static const char *link_rate[] = {
1413 	"unknown",
1414 	"disabled",
1415 	"phy reset problem",
1416 	"spinup hold",
1417 	"port selector",
1418 	"unknown",
1419 	"unknown",
1420 	"unknown",
1421 	"1.5Gbps",
1422 	"3.0Gbps",
1423 	"unknown",
1424 	"unknown",
1425 	"unknown",
1426 	"unknown",
1427 	"unknown",
1428 	"unknown"
1429 };
1430 
1431 /**
1432  * ipr_log_path_elem - Log a fabric path element.
1433  * @hostrcb:	hostrcb struct
1434  * @cfg:		fabric path element struct
1435  *
1436  * Return value:
1437  * 	none
1438  **/
1439 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1440 			      struct ipr_hostrcb_config_element *cfg)
1441 {
1442 	int i, j;
1443 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1444 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1445 
1446 	if (type == IPR_PATH_CFG_NOT_EXIST)
1447 		return;
1448 
1449 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1450 		if (path_type_desc[i].type != type)
1451 			continue;
1452 
1453 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1454 			if (path_status_desc[j].status != status)
1455 				continue;
1456 
1457 			if (type == IPR_PATH_CFG_IOA_PORT) {
1458 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1459 					     path_status_desc[j].desc, path_type_desc[i].desc,
1460 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1461 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1462 			} else {
1463 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1464 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1465 						     path_status_desc[j].desc, path_type_desc[i].desc,
1466 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1467 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1468 				} else if (cfg->cascaded_expander == 0xff) {
1469 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1470 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1471 						     path_type_desc[i].desc, cfg->phy,
1472 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1473 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1474 				} else if (cfg->phy == 0xff) {
1475 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1476 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1477 						     path_type_desc[i].desc, cfg->cascaded_expander,
1478 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1479 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1480 				} else {
1481 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1482 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
1483 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1484 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1485 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1486 				}
1487 			}
1488 			return;
1489 		}
1490 	}
1491 
1492 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1493 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1494 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1495 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1496 }
1497 
1498 /**
1499  * ipr_log_fabric_error - Log a fabric error.
1500  * @ioa_cfg:	ioa config struct
1501  * @hostrcb:	hostrcb struct
1502  *
1503  * Return value:
1504  * 	none
1505  **/
1506 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1507 				 struct ipr_hostrcb *hostrcb)
1508 {
1509 	struct ipr_hostrcb_type_20_error *error;
1510 	struct ipr_hostrcb_fabric_desc *fabric;
1511 	struct ipr_hostrcb_config_element *cfg;
1512 	int i, add_len;
1513 
1514 	error = &hostrcb->hcam.u.error.u.type_20_error;
1515 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1516 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1517 
1518 	add_len = be32_to_cpu(hostrcb->hcam.length) -
1519 		(offsetof(struct ipr_hostrcb_error, u) +
1520 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
1521 
1522 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1523 		ipr_log_fabric_path(hostrcb, fabric);
1524 		for_each_fabric_cfg(fabric, cfg)
1525 			ipr_log_path_elem(hostrcb, cfg);
1526 
1527 		add_len -= be16_to_cpu(fabric->length);
1528 		fabric = (struct ipr_hostrcb_fabric_desc *)
1529 			((unsigned long)fabric + be16_to_cpu(fabric->length));
1530 	}
1531 
1532 	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1533 }
1534 
1535 /**
1536  * ipr_log_generic_error - Log an adapter error.
1537  * @ioa_cfg:	ioa config struct
1538  * @hostrcb:	hostrcb struct
1539  *
1540  * Return value:
1541  * 	none
1542  **/
1543 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1544 				  struct ipr_hostrcb *hostrcb)
1545 {
1546 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1547 			 be32_to_cpu(hostrcb->hcam.length));
1548 }
1549 
1550 /**
1551  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1552  * @ioasc:	IOASC
1553  *
1554  * This function will return the index of into the ipr_error_table
1555  * for the specified IOASC. If the IOASC is not in the table,
1556  * 0 will be returned, which points to the entry used for unknown errors.
1557  *
1558  * Return value:
1559  * 	index into the ipr_error_table
1560  **/
1561 static u32 ipr_get_error(u32 ioasc)
1562 {
1563 	int i;
1564 
1565 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1566 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1567 			return i;
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  * ipr_handle_log_data - Log an adapter error.
1574  * @ioa_cfg:	ioa config struct
1575  * @hostrcb:	hostrcb struct
1576  *
1577  * This function logs an adapter error to the system.
1578  *
1579  * Return value:
1580  * 	none
1581  **/
1582 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1583 				struct ipr_hostrcb *hostrcb)
1584 {
1585 	u32 ioasc;
1586 	int error_index;
1587 
1588 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1589 		return;
1590 
1591 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1592 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1593 
1594 	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1595 
1596 	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1597 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1598 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1599 		scsi_report_bus_reset(ioa_cfg->host,
1600 				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1601 	}
1602 
1603 	error_index = ipr_get_error(ioasc);
1604 
1605 	if (!ipr_error_table[error_index].log_hcam)
1606 		return;
1607 
1608 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1609 
1610 	/* Set indication we have logged an error */
1611 	ioa_cfg->errors_logged++;
1612 
1613 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1614 		return;
1615 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1616 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1617 
1618 	switch (hostrcb->hcam.overlay_id) {
1619 	case IPR_HOST_RCB_OVERLAY_ID_2:
1620 		ipr_log_cache_error(ioa_cfg, hostrcb);
1621 		break;
1622 	case IPR_HOST_RCB_OVERLAY_ID_3:
1623 		ipr_log_config_error(ioa_cfg, hostrcb);
1624 		break;
1625 	case IPR_HOST_RCB_OVERLAY_ID_4:
1626 	case IPR_HOST_RCB_OVERLAY_ID_6:
1627 		ipr_log_array_error(ioa_cfg, hostrcb);
1628 		break;
1629 	case IPR_HOST_RCB_OVERLAY_ID_7:
1630 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1631 		break;
1632 	case IPR_HOST_RCB_OVERLAY_ID_12:
1633 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1634 		break;
1635 	case IPR_HOST_RCB_OVERLAY_ID_13:
1636 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1637 		break;
1638 	case IPR_HOST_RCB_OVERLAY_ID_14:
1639 	case IPR_HOST_RCB_OVERLAY_ID_16:
1640 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1641 		break;
1642 	case IPR_HOST_RCB_OVERLAY_ID_17:
1643 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1644 		break;
1645 	case IPR_HOST_RCB_OVERLAY_ID_20:
1646 		ipr_log_fabric_error(ioa_cfg, hostrcb);
1647 		break;
1648 	case IPR_HOST_RCB_OVERLAY_ID_1:
1649 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1650 	default:
1651 		ipr_log_generic_error(ioa_cfg, hostrcb);
1652 		break;
1653 	}
1654 }
1655 
1656 /**
1657  * ipr_process_error - Op done function for an adapter error log.
1658  * @ipr_cmd:	ipr command struct
1659  *
1660  * This function is the op done function for an error log host
1661  * controlled async from the adapter. It will log the error and
1662  * send the HCAM back to the adapter.
1663  *
1664  * Return value:
1665  * 	none
1666  **/
1667 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1668 {
1669 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1670 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1671 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1672 
1673 	list_del(&hostrcb->queue);
1674 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1675 
1676 	if (!ioasc) {
1677 		ipr_handle_log_data(ioa_cfg, hostrcb);
1678 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1679 		dev_err(&ioa_cfg->pdev->dev,
1680 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1681 	}
1682 
1683 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1684 }
1685 
1686 /**
1687  * ipr_timeout -  An internally generated op has timed out.
1688  * @ipr_cmd:	ipr command struct
1689  *
1690  * This function blocks host requests and initiates an
1691  * adapter reset.
1692  *
1693  * Return value:
1694  * 	none
1695  **/
1696 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1697 {
1698 	unsigned long lock_flags = 0;
1699 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1700 
1701 	ENTER;
1702 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1703 
1704 	ioa_cfg->errors_logged++;
1705 	dev_err(&ioa_cfg->pdev->dev,
1706 		"Adapter being reset due to command timeout.\n");
1707 
1708 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1709 		ioa_cfg->sdt_state = GET_DUMP;
1710 
1711 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1712 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1713 
1714 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1715 	LEAVE;
1716 }
1717 
1718 /**
1719  * ipr_oper_timeout -  Adapter timed out transitioning to operational
1720  * @ipr_cmd:	ipr command struct
1721  *
1722  * This function blocks host requests and initiates an
1723  * adapter reset.
1724  *
1725  * Return value:
1726  * 	none
1727  **/
1728 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1729 {
1730 	unsigned long lock_flags = 0;
1731 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1732 
1733 	ENTER;
1734 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1735 
1736 	ioa_cfg->errors_logged++;
1737 	dev_err(&ioa_cfg->pdev->dev,
1738 		"Adapter timed out transitioning to operational.\n");
1739 
1740 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1741 		ioa_cfg->sdt_state = GET_DUMP;
1742 
1743 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1744 		if (ipr_fastfail)
1745 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1746 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1747 	}
1748 
1749 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1750 	LEAVE;
1751 }
1752 
1753 /**
1754  * ipr_reset_reload - Reset/Reload the IOA
1755  * @ioa_cfg:		ioa config struct
1756  * @shutdown_type:	shutdown type
1757  *
1758  * This function resets the adapter and re-initializes it.
1759  * This function assumes that all new host commands have been stopped.
1760  * Return value:
1761  * 	SUCCESS / FAILED
1762  **/
1763 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1764 			    enum ipr_shutdown_type shutdown_type)
1765 {
1766 	if (!ioa_cfg->in_reset_reload)
1767 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1768 
1769 	spin_unlock_irq(ioa_cfg->host->host_lock);
1770 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1771 	spin_lock_irq(ioa_cfg->host->host_lock);
1772 
1773 	/* If we got hit with a host reset while we were already resetting
1774 	 the adapter for some reason, and the reset failed. */
1775 	if (ioa_cfg->ioa_is_dead) {
1776 		ipr_trace;
1777 		return FAILED;
1778 	}
1779 
1780 	return SUCCESS;
1781 }
1782 
1783 /**
1784  * ipr_find_ses_entry - Find matching SES in SES table
1785  * @res:	resource entry struct of SES
1786  *
1787  * Return value:
1788  * 	pointer to SES table entry / NULL on failure
1789  **/
1790 static const struct ipr_ses_table_entry *
1791 ipr_find_ses_entry(struct ipr_resource_entry *res)
1792 {
1793 	int i, j, matches;
1794 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1795 
1796 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1797 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1798 			if (ste->compare_product_id_byte[j] == 'X') {
1799 				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1800 					matches++;
1801 				else
1802 					break;
1803 			} else
1804 				matches++;
1805 		}
1806 
1807 		if (matches == IPR_PROD_ID_LEN)
1808 			return ste;
1809 	}
1810 
1811 	return NULL;
1812 }
1813 
1814 /**
1815  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1816  * @ioa_cfg:	ioa config struct
1817  * @bus:		SCSI bus
1818  * @bus_width:	bus width
1819  *
1820  * Return value:
1821  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1822  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1823  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1824  *	max 160MHz = max 320MB/sec).
1825  **/
1826 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1827 {
1828 	struct ipr_resource_entry *res;
1829 	const struct ipr_ses_table_entry *ste;
1830 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1831 
1832 	/* Loop through each config table entry in the config table buffer */
1833 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1834 		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1835 			continue;
1836 
1837 		if (bus != res->cfgte.res_addr.bus)
1838 			continue;
1839 
1840 		if (!(ste = ipr_find_ses_entry(res)))
1841 			continue;
1842 
1843 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1844 	}
1845 
1846 	return max_xfer_rate;
1847 }
1848 
1849 /**
1850  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1851  * @ioa_cfg:		ioa config struct
1852  * @max_delay:		max delay in micro-seconds to wait
1853  *
1854  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1855  *
1856  * Return value:
1857  * 	0 on success / other on failure
1858  **/
1859 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1860 {
1861 	volatile u32 pcii_reg;
1862 	int delay = 1;
1863 
1864 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1865 	while (delay < max_delay) {
1866 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1867 
1868 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1869 			return 0;
1870 
1871 		/* udelay cannot be used if delay is more than a few milliseconds */
1872 		if ((delay / 1000) > MAX_UDELAY_MS)
1873 			mdelay(delay / 1000);
1874 		else
1875 			udelay(delay);
1876 
1877 		delay += delay;
1878 	}
1879 	return -EIO;
1880 }
1881 
1882 /**
1883  * ipr_get_ldump_data_section - Dump IOA memory
1884  * @ioa_cfg:			ioa config struct
1885  * @start_addr:			adapter address to dump
1886  * @dest:				destination kernel buffer
1887  * @length_in_words:	length to dump in 4 byte words
1888  *
1889  * Return value:
1890  * 	0 on success / -EIO on failure
1891  **/
1892 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1893 				      u32 start_addr,
1894 				      __be32 *dest, u32 length_in_words)
1895 {
1896 	volatile u32 temp_pcii_reg;
1897 	int i, delay = 0;
1898 
1899 	/* Write IOA interrupt reg starting LDUMP state  */
1900 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1901 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1902 
1903 	/* Wait for IO debug acknowledge */
1904 	if (ipr_wait_iodbg_ack(ioa_cfg,
1905 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1906 		dev_err(&ioa_cfg->pdev->dev,
1907 			"IOA dump long data transfer timeout\n");
1908 		return -EIO;
1909 	}
1910 
1911 	/* Signal LDUMP interlocked - clear IO debug ack */
1912 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1913 	       ioa_cfg->regs.clr_interrupt_reg);
1914 
1915 	/* Write Mailbox with starting address */
1916 	writel(start_addr, ioa_cfg->ioa_mailbox);
1917 
1918 	/* Signal address valid - clear IOA Reset alert */
1919 	writel(IPR_UPROCI_RESET_ALERT,
1920 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1921 
1922 	for (i = 0; i < length_in_words; i++) {
1923 		/* Wait for IO debug acknowledge */
1924 		if (ipr_wait_iodbg_ack(ioa_cfg,
1925 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1926 			dev_err(&ioa_cfg->pdev->dev,
1927 				"IOA dump short data transfer timeout\n");
1928 			return -EIO;
1929 		}
1930 
1931 		/* Read data from mailbox and increment destination pointer */
1932 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1933 		dest++;
1934 
1935 		/* For all but the last word of data, signal data received */
1936 		if (i < (length_in_words - 1)) {
1937 			/* Signal dump data received - Clear IO debug Ack */
1938 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1939 			       ioa_cfg->regs.clr_interrupt_reg);
1940 		}
1941 	}
1942 
1943 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1944 	writel(IPR_UPROCI_RESET_ALERT,
1945 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1946 
1947 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1948 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1949 
1950 	/* Signal dump data received - Clear IO debug Ack */
1951 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1952 	       ioa_cfg->regs.clr_interrupt_reg);
1953 
1954 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1955 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1956 		temp_pcii_reg =
1957 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1958 
1959 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1960 			return 0;
1961 
1962 		udelay(10);
1963 		delay += 10;
1964 	}
1965 
1966 	return 0;
1967 }
1968 
1969 #ifdef CONFIG_SCSI_IPR_DUMP
1970 /**
1971  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1972  * @ioa_cfg:		ioa config struct
1973  * @pci_address:	adapter address
1974  * @length:			length of data to copy
1975  *
1976  * Copy data from PCI adapter to kernel buffer.
1977  * Note: length MUST be a 4 byte multiple
1978  * Return value:
1979  * 	0 on success / other on failure
1980  **/
1981 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1982 			unsigned long pci_address, u32 length)
1983 {
1984 	int bytes_copied = 0;
1985 	int cur_len, rc, rem_len, rem_page_len;
1986 	__be32 *page;
1987 	unsigned long lock_flags = 0;
1988 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1989 
1990 	while (bytes_copied < length &&
1991 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1992 		if (ioa_dump->page_offset >= PAGE_SIZE ||
1993 		    ioa_dump->page_offset == 0) {
1994 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1995 
1996 			if (!page) {
1997 				ipr_trace;
1998 				return bytes_copied;
1999 			}
2000 
2001 			ioa_dump->page_offset = 0;
2002 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2003 			ioa_dump->next_page_index++;
2004 		} else
2005 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2006 
2007 		rem_len = length - bytes_copied;
2008 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2009 		cur_len = min(rem_len, rem_page_len);
2010 
2011 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2012 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2013 			rc = -EIO;
2014 		} else {
2015 			rc = ipr_get_ldump_data_section(ioa_cfg,
2016 							pci_address + bytes_copied,
2017 							&page[ioa_dump->page_offset / 4],
2018 							(cur_len / sizeof(u32)));
2019 		}
2020 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2021 
2022 		if (!rc) {
2023 			ioa_dump->page_offset += cur_len;
2024 			bytes_copied += cur_len;
2025 		} else {
2026 			ipr_trace;
2027 			break;
2028 		}
2029 		schedule();
2030 	}
2031 
2032 	return bytes_copied;
2033 }
2034 
2035 /**
2036  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2037  * @hdr:	dump entry header struct
2038  *
2039  * Return value:
2040  * 	nothing
2041  **/
2042 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2043 {
2044 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2045 	hdr->num_elems = 1;
2046 	hdr->offset = sizeof(*hdr);
2047 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2048 }
2049 
2050 /**
2051  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2052  * @ioa_cfg:	ioa config struct
2053  * @driver_dump:	driver dump struct
2054  *
2055  * Return value:
2056  * 	nothing
2057  **/
2058 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2059 				   struct ipr_driver_dump *driver_dump)
2060 {
2061 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2062 
2063 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2064 	driver_dump->ioa_type_entry.hdr.len =
2065 		sizeof(struct ipr_dump_ioa_type_entry) -
2066 		sizeof(struct ipr_dump_entry_header);
2067 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2068 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2069 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2070 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2071 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2072 		ucode_vpd->minor_release[1];
2073 	driver_dump->hdr.num_entries++;
2074 }
2075 
2076 /**
2077  * ipr_dump_version_data - Fill in the driver version in the dump.
2078  * @ioa_cfg:	ioa config struct
2079  * @driver_dump:	driver dump struct
2080  *
2081  * Return value:
2082  * 	nothing
2083  **/
2084 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2085 				  struct ipr_driver_dump *driver_dump)
2086 {
2087 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2088 	driver_dump->version_entry.hdr.len =
2089 		sizeof(struct ipr_dump_version_entry) -
2090 		sizeof(struct ipr_dump_entry_header);
2091 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2092 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2093 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2094 	driver_dump->hdr.num_entries++;
2095 }
2096 
2097 /**
2098  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2099  * @ioa_cfg:	ioa config struct
2100  * @driver_dump:	driver dump struct
2101  *
2102  * Return value:
2103  * 	nothing
2104  **/
2105 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2106 				   struct ipr_driver_dump *driver_dump)
2107 {
2108 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2109 	driver_dump->trace_entry.hdr.len =
2110 		sizeof(struct ipr_dump_trace_entry) -
2111 		sizeof(struct ipr_dump_entry_header);
2112 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2113 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2114 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2115 	driver_dump->hdr.num_entries++;
2116 }
2117 
2118 /**
2119  * ipr_dump_location_data - Fill in the IOA location in the dump.
2120  * @ioa_cfg:	ioa config struct
2121  * @driver_dump:	driver dump struct
2122  *
2123  * Return value:
2124  * 	nothing
2125  **/
2126 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2127 				   struct ipr_driver_dump *driver_dump)
2128 {
2129 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2130 	driver_dump->location_entry.hdr.len =
2131 		sizeof(struct ipr_dump_location_entry) -
2132 		sizeof(struct ipr_dump_entry_header);
2133 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2134 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2135 	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2136 	driver_dump->hdr.num_entries++;
2137 }
2138 
2139 /**
2140  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2141  * @ioa_cfg:	ioa config struct
2142  * @dump:		dump struct
2143  *
2144  * Return value:
2145  * 	nothing
2146  **/
2147 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2148 {
2149 	unsigned long start_addr, sdt_word;
2150 	unsigned long lock_flags = 0;
2151 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2152 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2153 	u32 num_entries, start_off, end_off;
2154 	u32 bytes_to_copy, bytes_copied, rc;
2155 	struct ipr_sdt *sdt;
2156 	int i;
2157 
2158 	ENTER;
2159 
2160 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2161 
2162 	if (ioa_cfg->sdt_state != GET_DUMP) {
2163 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2164 		return;
2165 	}
2166 
2167 	start_addr = readl(ioa_cfg->ioa_mailbox);
2168 
2169 	if (!ipr_sdt_is_fmt2(start_addr)) {
2170 		dev_err(&ioa_cfg->pdev->dev,
2171 			"Invalid dump table format: %lx\n", start_addr);
2172 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2173 		return;
2174 	}
2175 
2176 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2177 
2178 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2179 
2180 	/* Initialize the overall dump header */
2181 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2182 	driver_dump->hdr.num_entries = 1;
2183 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2184 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2185 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2186 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2187 
2188 	ipr_dump_version_data(ioa_cfg, driver_dump);
2189 	ipr_dump_location_data(ioa_cfg, driver_dump);
2190 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2191 	ipr_dump_trace_data(ioa_cfg, driver_dump);
2192 
2193 	/* Update dump_header */
2194 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2195 
2196 	/* IOA Dump entry */
2197 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2198 	ioa_dump->format = IPR_SDT_FMT2;
2199 	ioa_dump->hdr.len = 0;
2200 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2201 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2202 
2203 	/* First entries in sdt are actually a list of dump addresses and
2204 	 lengths to gather the real dump data.  sdt represents the pointer
2205 	 to the ioa generated dump table.  Dump data will be extracted based
2206 	 on entries in this table */
2207 	sdt = &ioa_dump->sdt;
2208 
2209 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2210 					sizeof(struct ipr_sdt) / sizeof(__be32));
2211 
2212 	/* Smart Dump table is ready to use and the first entry is valid */
2213 	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2214 		dev_err(&ioa_cfg->pdev->dev,
2215 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2216 			rc, be32_to_cpu(sdt->hdr.state));
2217 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2218 		ioa_cfg->sdt_state = DUMP_OBTAINED;
2219 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2220 		return;
2221 	}
2222 
2223 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2224 
2225 	if (num_entries > IPR_NUM_SDT_ENTRIES)
2226 		num_entries = IPR_NUM_SDT_ENTRIES;
2227 
2228 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2229 
2230 	for (i = 0; i < num_entries; i++) {
2231 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2232 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2233 			break;
2234 		}
2235 
2236 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2237 			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2238 			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2239 			end_off = be32_to_cpu(sdt->entry[i].end_offset);
2240 
2241 			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2242 				bytes_to_copy = end_off - start_off;
2243 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2244 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2245 					continue;
2246 				}
2247 
2248 				/* Copy data from adapter to driver buffers */
2249 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2250 							    bytes_to_copy);
2251 
2252 				ioa_dump->hdr.len += bytes_copied;
2253 
2254 				if (bytes_copied != bytes_to_copy) {
2255 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2256 					break;
2257 				}
2258 			}
2259 		}
2260 	}
2261 
2262 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2263 
2264 	/* Update dump_header */
2265 	driver_dump->hdr.len += ioa_dump->hdr.len;
2266 	wmb();
2267 	ioa_cfg->sdt_state = DUMP_OBTAINED;
2268 	LEAVE;
2269 }
2270 
2271 #else
2272 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2273 #endif
2274 
2275 /**
2276  * ipr_release_dump - Free adapter dump memory
2277  * @kref:	kref struct
2278  *
2279  * Return value:
2280  *	nothing
2281  **/
2282 static void ipr_release_dump(struct kref *kref)
2283 {
2284 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2285 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2286 	unsigned long lock_flags = 0;
2287 	int i;
2288 
2289 	ENTER;
2290 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2291 	ioa_cfg->dump = NULL;
2292 	ioa_cfg->sdt_state = INACTIVE;
2293 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2294 
2295 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2296 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2297 
2298 	kfree(dump);
2299 	LEAVE;
2300 }
2301 
2302 /**
2303  * ipr_worker_thread - Worker thread
2304  * @work:		ioa config struct
2305  *
2306  * Called at task level from a work thread. This function takes care
2307  * of adding and removing device from the mid-layer as configuration
2308  * changes are detected by the adapter.
2309  *
2310  * Return value:
2311  * 	nothing
2312  **/
2313 static void ipr_worker_thread(struct work_struct *work)
2314 {
2315 	unsigned long lock_flags;
2316 	struct ipr_resource_entry *res;
2317 	struct scsi_device *sdev;
2318 	struct ipr_dump *dump;
2319 	struct ipr_ioa_cfg *ioa_cfg =
2320 		container_of(work, struct ipr_ioa_cfg, work_q);
2321 	u8 bus, target, lun;
2322 	int did_work;
2323 
2324 	ENTER;
2325 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2326 
2327 	if (ioa_cfg->sdt_state == GET_DUMP) {
2328 		dump = ioa_cfg->dump;
2329 		if (!dump) {
2330 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2331 			return;
2332 		}
2333 		kref_get(&dump->kref);
2334 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2335 		ipr_get_ioa_dump(ioa_cfg, dump);
2336 		kref_put(&dump->kref, ipr_release_dump);
2337 
2338 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2339 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2340 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2341 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2342 		return;
2343 	}
2344 
2345 restart:
2346 	do {
2347 		did_work = 0;
2348 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2349 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2350 			return;
2351 		}
2352 
2353 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2354 			if (res->del_from_ml && res->sdev) {
2355 				did_work = 1;
2356 				sdev = res->sdev;
2357 				if (!scsi_device_get(sdev)) {
2358 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2359 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2360 					scsi_remove_device(sdev);
2361 					scsi_device_put(sdev);
2362 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2363 				}
2364 				break;
2365 			}
2366 		}
2367 	} while(did_work);
2368 
2369 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2370 		if (res->add_to_ml) {
2371 			bus = res->cfgte.res_addr.bus;
2372 			target = res->cfgte.res_addr.target;
2373 			lun = res->cfgte.res_addr.lun;
2374 			res->add_to_ml = 0;
2375 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2376 			scsi_add_device(ioa_cfg->host, bus, target, lun);
2377 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2378 			goto restart;
2379 		}
2380 	}
2381 
2382 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2383 	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2384 	LEAVE;
2385 }
2386 
2387 #ifdef CONFIG_SCSI_IPR_TRACE
2388 /**
2389  * ipr_read_trace - Dump the adapter trace
2390  * @kobj:		kobject struct
2391  * @buf:		buffer
2392  * @off:		offset
2393  * @count:		buffer size
2394  *
2395  * Return value:
2396  *	number of bytes printed to buffer
2397  **/
2398 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2399 			      loff_t off, size_t count)
2400 {
2401 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2402 	struct Scsi_Host *shost = class_to_shost(cdev);
2403 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2404 	unsigned long lock_flags = 0;
2405 	int size = IPR_TRACE_SIZE;
2406 	char *src = (char *)ioa_cfg->trace;
2407 
2408 	if (off > size)
2409 		return 0;
2410 	if (off + count > size) {
2411 		size -= off;
2412 		count = size;
2413 	}
2414 
2415 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2416 	memcpy(buf, &src[off], count);
2417 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 	return count;
2419 }
2420 
2421 static struct bin_attribute ipr_trace_attr = {
2422 	.attr =	{
2423 		.name = "trace",
2424 		.mode = S_IRUGO,
2425 	},
2426 	.size = 0,
2427 	.read = ipr_read_trace,
2428 };
2429 #endif
2430 
2431 static const struct {
2432 	enum ipr_cache_state state;
2433 	char *name;
2434 } cache_state [] = {
2435 	{ CACHE_NONE, "none" },
2436 	{ CACHE_DISABLED, "disabled" },
2437 	{ CACHE_ENABLED, "enabled" }
2438 };
2439 
2440 /**
2441  * ipr_show_write_caching - Show the write caching attribute
2442  * @class_dev:	class device struct
2443  * @buf:		buffer
2444  *
2445  * Return value:
2446  *	number of bytes printed to buffer
2447  **/
2448 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2449 {
2450 	struct Scsi_Host *shost = class_to_shost(class_dev);
2451 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2452 	unsigned long lock_flags = 0;
2453 	int i, len = 0;
2454 
2455 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2456 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2457 		if (cache_state[i].state == ioa_cfg->cache_state) {
2458 			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2459 			break;
2460 		}
2461 	}
2462 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2463 	return len;
2464 }
2465 
2466 
2467 /**
2468  * ipr_store_write_caching - Enable/disable adapter write cache
2469  * @class_dev:	class_device struct
2470  * @buf:		buffer
2471  * @count:		buffer size
2472  *
2473  * This function will enable/disable adapter write cache.
2474  *
2475  * Return value:
2476  * 	count on success / other on failure
2477  **/
2478 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2479 					const char *buf, size_t count)
2480 {
2481 	struct Scsi_Host *shost = class_to_shost(class_dev);
2482 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2483 	unsigned long lock_flags = 0;
2484 	enum ipr_cache_state new_state = CACHE_INVALID;
2485 	int i;
2486 
2487 	if (!capable(CAP_SYS_ADMIN))
2488 		return -EACCES;
2489 	if (ioa_cfg->cache_state == CACHE_NONE)
2490 		return -EINVAL;
2491 
2492 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2493 		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2494 			new_state = cache_state[i].state;
2495 			break;
2496 		}
2497 	}
2498 
2499 	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2500 		return -EINVAL;
2501 
2502 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2503 	if (ioa_cfg->cache_state == new_state) {
2504 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2505 		return count;
2506 	}
2507 
2508 	ioa_cfg->cache_state = new_state;
2509 	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2510 		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2511 	if (!ioa_cfg->in_reset_reload)
2512 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2513 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2515 
2516 	return count;
2517 }
2518 
2519 static struct class_device_attribute ipr_ioa_cache_attr = {
2520 	.attr = {
2521 		.name =		"write_cache",
2522 		.mode =		S_IRUGO | S_IWUSR,
2523 	},
2524 	.show = ipr_show_write_caching,
2525 	.store = ipr_store_write_caching
2526 };
2527 
2528 /**
2529  * ipr_show_fw_version - Show the firmware version
2530  * @class_dev:	class device struct
2531  * @buf:		buffer
2532  *
2533  * Return value:
2534  *	number of bytes printed to buffer
2535  **/
2536 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2537 {
2538 	struct Scsi_Host *shost = class_to_shost(class_dev);
2539 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2540 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2541 	unsigned long lock_flags = 0;
2542 	int len;
2543 
2544 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2545 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2546 		       ucode_vpd->major_release, ucode_vpd->card_type,
2547 		       ucode_vpd->minor_release[0],
2548 		       ucode_vpd->minor_release[1]);
2549 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2550 	return len;
2551 }
2552 
2553 static struct class_device_attribute ipr_fw_version_attr = {
2554 	.attr = {
2555 		.name =		"fw_version",
2556 		.mode =		S_IRUGO,
2557 	},
2558 	.show = ipr_show_fw_version,
2559 };
2560 
2561 /**
2562  * ipr_show_log_level - Show the adapter's error logging level
2563  * @class_dev:	class device struct
2564  * @buf:		buffer
2565  *
2566  * Return value:
2567  * 	number of bytes printed to buffer
2568  **/
2569 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2570 {
2571 	struct Scsi_Host *shost = class_to_shost(class_dev);
2572 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2573 	unsigned long lock_flags = 0;
2574 	int len;
2575 
2576 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2577 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2578 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2579 	return len;
2580 }
2581 
2582 /**
2583  * ipr_store_log_level - Change the adapter's error logging level
2584  * @class_dev:	class device struct
2585  * @buf:		buffer
2586  *
2587  * Return value:
2588  * 	number of bytes printed to buffer
2589  **/
2590 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2591 				   const char *buf, size_t count)
2592 {
2593 	struct Scsi_Host *shost = class_to_shost(class_dev);
2594 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2595 	unsigned long lock_flags = 0;
2596 
2597 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2598 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2599 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2600 	return strlen(buf);
2601 }
2602 
2603 static struct class_device_attribute ipr_log_level_attr = {
2604 	.attr = {
2605 		.name =		"log_level",
2606 		.mode =		S_IRUGO | S_IWUSR,
2607 	},
2608 	.show = ipr_show_log_level,
2609 	.store = ipr_store_log_level
2610 };
2611 
2612 /**
2613  * ipr_store_diagnostics - IOA Diagnostics interface
2614  * @class_dev:	class_device struct
2615  * @buf:		buffer
2616  * @count:		buffer size
2617  *
2618  * This function will reset the adapter and wait a reasonable
2619  * amount of time for any errors that the adapter might log.
2620  *
2621  * Return value:
2622  * 	count on success / other on failure
2623  **/
2624 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2625 				     const char *buf, size_t count)
2626 {
2627 	struct Scsi_Host *shost = class_to_shost(class_dev);
2628 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2629 	unsigned long lock_flags = 0;
2630 	int rc = count;
2631 
2632 	if (!capable(CAP_SYS_ADMIN))
2633 		return -EACCES;
2634 
2635 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2636 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2637 	ioa_cfg->errors_logged = 0;
2638 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2639 
2640 	if (ioa_cfg->in_reset_reload) {
2641 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2642 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2643 
2644 		/* Wait for a second for any errors to be logged */
2645 		msleep(1000);
2646 	} else {
2647 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2648 		return -EIO;
2649 	}
2650 
2651 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2652 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2653 		rc = -EIO;
2654 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2655 
2656 	return rc;
2657 }
2658 
2659 static struct class_device_attribute ipr_diagnostics_attr = {
2660 	.attr = {
2661 		.name =		"run_diagnostics",
2662 		.mode =		S_IWUSR,
2663 	},
2664 	.store = ipr_store_diagnostics
2665 };
2666 
2667 /**
2668  * ipr_show_adapter_state - Show the adapter's state
2669  * @class_dev:	class device struct
2670  * @buf:		buffer
2671  *
2672  * Return value:
2673  * 	number of bytes printed to buffer
2674  **/
2675 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2676 {
2677 	struct Scsi_Host *shost = class_to_shost(class_dev);
2678 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2679 	unsigned long lock_flags = 0;
2680 	int len;
2681 
2682 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2683 	if (ioa_cfg->ioa_is_dead)
2684 		len = snprintf(buf, PAGE_SIZE, "offline\n");
2685 	else
2686 		len = snprintf(buf, PAGE_SIZE, "online\n");
2687 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 	return len;
2689 }
2690 
2691 /**
2692  * ipr_store_adapter_state - Change adapter state
2693  * @class_dev:	class_device struct
2694  * @buf:		buffer
2695  * @count:		buffer size
2696  *
2697  * This function will change the adapter's state.
2698  *
2699  * Return value:
2700  * 	count on success / other on failure
2701  **/
2702 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2703 				       const char *buf, size_t count)
2704 {
2705 	struct Scsi_Host *shost = class_to_shost(class_dev);
2706 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2707 	unsigned long lock_flags;
2708 	int result = count;
2709 
2710 	if (!capable(CAP_SYS_ADMIN))
2711 		return -EACCES;
2712 
2713 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2714 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2715 		ioa_cfg->ioa_is_dead = 0;
2716 		ioa_cfg->reset_retries = 0;
2717 		ioa_cfg->in_ioa_bringdown = 0;
2718 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2719 	}
2720 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2722 
2723 	return result;
2724 }
2725 
2726 static struct class_device_attribute ipr_ioa_state_attr = {
2727 	.attr = {
2728 		.name =		"state",
2729 		.mode =		S_IRUGO | S_IWUSR,
2730 	},
2731 	.show = ipr_show_adapter_state,
2732 	.store = ipr_store_adapter_state
2733 };
2734 
2735 /**
2736  * ipr_store_reset_adapter - Reset the adapter
2737  * @class_dev:	class_device struct
2738  * @buf:		buffer
2739  * @count:		buffer size
2740  *
2741  * This function will reset the adapter.
2742  *
2743  * Return value:
2744  * 	count on success / other on failure
2745  **/
2746 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2747 				       const char *buf, size_t count)
2748 {
2749 	struct Scsi_Host *shost = class_to_shost(class_dev);
2750 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2751 	unsigned long lock_flags;
2752 	int result = count;
2753 
2754 	if (!capable(CAP_SYS_ADMIN))
2755 		return -EACCES;
2756 
2757 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2758 	if (!ioa_cfg->in_reset_reload)
2759 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2760 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2761 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2762 
2763 	return result;
2764 }
2765 
2766 static struct class_device_attribute ipr_ioa_reset_attr = {
2767 	.attr = {
2768 		.name =		"reset_host",
2769 		.mode =		S_IWUSR,
2770 	},
2771 	.store = ipr_store_reset_adapter
2772 };
2773 
2774 /**
2775  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2776  * @buf_len:		buffer length
2777  *
2778  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2779  * list to use for microcode download
2780  *
2781  * Return value:
2782  * 	pointer to sglist / NULL on failure
2783  **/
2784 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2785 {
2786 	int sg_size, order, bsize_elem, num_elem, i, j;
2787 	struct ipr_sglist *sglist;
2788 	struct scatterlist *scatterlist;
2789 	struct page *page;
2790 
2791 	/* Get the minimum size per scatter/gather element */
2792 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2793 
2794 	/* Get the actual size per element */
2795 	order = get_order(sg_size);
2796 
2797 	/* Determine the actual number of bytes per element */
2798 	bsize_elem = PAGE_SIZE * (1 << order);
2799 
2800 	/* Determine the actual number of sg entries needed */
2801 	if (buf_len % bsize_elem)
2802 		num_elem = (buf_len / bsize_elem) + 1;
2803 	else
2804 		num_elem = buf_len / bsize_elem;
2805 
2806 	/* Allocate a scatter/gather list for the DMA */
2807 	sglist = kzalloc(sizeof(struct ipr_sglist) +
2808 			 (sizeof(struct scatterlist) * (num_elem - 1)),
2809 			 GFP_KERNEL);
2810 
2811 	if (sglist == NULL) {
2812 		ipr_trace;
2813 		return NULL;
2814 	}
2815 
2816 	scatterlist = sglist->scatterlist;
2817 
2818 	sglist->order = order;
2819 	sglist->num_sg = num_elem;
2820 
2821 	/* Allocate a bunch of sg elements */
2822 	for (i = 0; i < num_elem; i++) {
2823 		page = alloc_pages(GFP_KERNEL, order);
2824 		if (!page) {
2825 			ipr_trace;
2826 
2827 			/* Free up what we already allocated */
2828 			for (j = i - 1; j >= 0; j--)
2829 				__free_pages(scatterlist[j].page, order);
2830 			kfree(sglist);
2831 			return NULL;
2832 		}
2833 
2834 		scatterlist[i].page = page;
2835 	}
2836 
2837 	return sglist;
2838 }
2839 
2840 /**
2841  * ipr_free_ucode_buffer - Frees a microcode download buffer
2842  * @p_dnld:		scatter/gather list pointer
2843  *
2844  * Free a DMA'able ucode download buffer previously allocated with
2845  * ipr_alloc_ucode_buffer
2846  *
2847  * Return value:
2848  * 	nothing
2849  **/
2850 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2851 {
2852 	int i;
2853 
2854 	for (i = 0; i < sglist->num_sg; i++)
2855 		__free_pages(sglist->scatterlist[i].page, sglist->order);
2856 
2857 	kfree(sglist);
2858 }
2859 
2860 /**
2861  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2862  * @sglist:		scatter/gather list pointer
2863  * @buffer:		buffer pointer
2864  * @len:		buffer length
2865  *
2866  * Copy a microcode image from a user buffer into a buffer allocated by
2867  * ipr_alloc_ucode_buffer
2868  *
2869  * Return value:
2870  * 	0 on success / other on failure
2871  **/
2872 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2873 				 u8 *buffer, u32 len)
2874 {
2875 	int bsize_elem, i, result = 0;
2876 	struct scatterlist *scatterlist;
2877 	void *kaddr;
2878 
2879 	/* Determine the actual number of bytes per element */
2880 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2881 
2882 	scatterlist = sglist->scatterlist;
2883 
2884 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2885 		kaddr = kmap(scatterlist[i].page);
2886 		memcpy(kaddr, buffer, bsize_elem);
2887 		kunmap(scatterlist[i].page);
2888 
2889 		scatterlist[i].length = bsize_elem;
2890 
2891 		if (result != 0) {
2892 			ipr_trace;
2893 			return result;
2894 		}
2895 	}
2896 
2897 	if (len % bsize_elem) {
2898 		kaddr = kmap(scatterlist[i].page);
2899 		memcpy(kaddr, buffer, len % bsize_elem);
2900 		kunmap(scatterlist[i].page);
2901 
2902 		scatterlist[i].length = len % bsize_elem;
2903 	}
2904 
2905 	sglist->buffer_len = len;
2906 	return result;
2907 }
2908 
2909 /**
2910  * ipr_build_ucode_ioadl - Build a microcode download IOADL
2911  * @ipr_cmd:	ipr command struct
2912  * @sglist:		scatter/gather list
2913  *
2914  * Builds a microcode download IOA data list (IOADL).
2915  *
2916  **/
2917 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2918 				  struct ipr_sglist *sglist)
2919 {
2920 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2921 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2922 	struct scatterlist *scatterlist = sglist->scatterlist;
2923 	int i;
2924 
2925 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2926 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2927 	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2928 	ioarcb->write_ioadl_len =
2929 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2930 
2931 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2932 		ioadl[i].flags_and_data_len =
2933 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2934 		ioadl[i].address =
2935 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2936 	}
2937 
2938 	ioadl[i-1].flags_and_data_len |=
2939 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2940 }
2941 
2942 /**
2943  * ipr_update_ioa_ucode - Update IOA's microcode
2944  * @ioa_cfg:	ioa config struct
2945  * @sglist:		scatter/gather list
2946  *
2947  * Initiate an adapter reset to update the IOA's microcode
2948  *
2949  * Return value:
2950  * 	0 on success / -EIO on failure
2951  **/
2952 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2953 				struct ipr_sglist *sglist)
2954 {
2955 	unsigned long lock_flags;
2956 
2957 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2958 
2959 	if (ioa_cfg->ucode_sglist) {
2960 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2961 		dev_err(&ioa_cfg->pdev->dev,
2962 			"Microcode download already in progress\n");
2963 		return -EIO;
2964 	}
2965 
2966 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2967 					sglist->num_sg, DMA_TO_DEVICE);
2968 
2969 	if (!sglist->num_dma_sg) {
2970 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2971 		dev_err(&ioa_cfg->pdev->dev,
2972 			"Failed to map microcode download buffer!\n");
2973 		return -EIO;
2974 	}
2975 
2976 	ioa_cfg->ucode_sglist = sglist;
2977 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2978 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2979 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2980 
2981 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2982 	ioa_cfg->ucode_sglist = NULL;
2983 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2984 	return 0;
2985 }
2986 
2987 /**
2988  * ipr_store_update_fw - Update the firmware on the adapter
2989  * @class_dev:	class_device struct
2990  * @buf:		buffer
2991  * @count:		buffer size
2992  *
2993  * This function will update the firmware on the adapter.
2994  *
2995  * Return value:
2996  * 	count on success / other on failure
2997  **/
2998 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2999 				       const char *buf, size_t count)
3000 {
3001 	struct Scsi_Host *shost = class_to_shost(class_dev);
3002 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3003 	struct ipr_ucode_image_header *image_hdr;
3004 	const struct firmware *fw_entry;
3005 	struct ipr_sglist *sglist;
3006 	char fname[100];
3007 	char *src;
3008 	int len, result, dnld_size;
3009 
3010 	if (!capable(CAP_SYS_ADMIN))
3011 		return -EACCES;
3012 
3013 	len = snprintf(fname, 99, "%s", buf);
3014 	fname[len-1] = '\0';
3015 
3016 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3017 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3018 		return -EIO;
3019 	}
3020 
3021 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3022 
3023 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3024 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3025 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3026 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3027 		release_firmware(fw_entry);
3028 		return -EINVAL;
3029 	}
3030 
3031 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3032 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3033 	sglist = ipr_alloc_ucode_buffer(dnld_size);
3034 
3035 	if (!sglist) {
3036 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3037 		release_firmware(fw_entry);
3038 		return -ENOMEM;
3039 	}
3040 
3041 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3042 
3043 	if (result) {
3044 		dev_err(&ioa_cfg->pdev->dev,
3045 			"Microcode buffer copy to DMA buffer failed\n");
3046 		goto out;
3047 	}
3048 
3049 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3050 
3051 	if (!result)
3052 		result = count;
3053 out:
3054 	ipr_free_ucode_buffer(sglist);
3055 	release_firmware(fw_entry);
3056 	return result;
3057 }
3058 
3059 static struct class_device_attribute ipr_update_fw_attr = {
3060 	.attr = {
3061 		.name =		"update_fw",
3062 		.mode =		S_IWUSR,
3063 	},
3064 	.store = ipr_store_update_fw
3065 };
3066 
3067 static struct class_device_attribute *ipr_ioa_attrs[] = {
3068 	&ipr_fw_version_attr,
3069 	&ipr_log_level_attr,
3070 	&ipr_diagnostics_attr,
3071 	&ipr_ioa_state_attr,
3072 	&ipr_ioa_reset_attr,
3073 	&ipr_update_fw_attr,
3074 	&ipr_ioa_cache_attr,
3075 	NULL,
3076 };
3077 
3078 #ifdef CONFIG_SCSI_IPR_DUMP
3079 /**
3080  * ipr_read_dump - Dump the adapter
3081  * @kobj:		kobject struct
3082  * @buf:		buffer
3083  * @off:		offset
3084  * @count:		buffer size
3085  *
3086  * Return value:
3087  *	number of bytes printed to buffer
3088  **/
3089 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3090 			      loff_t off, size_t count)
3091 {
3092 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3093 	struct Scsi_Host *shost = class_to_shost(cdev);
3094 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3095 	struct ipr_dump *dump;
3096 	unsigned long lock_flags = 0;
3097 	char *src;
3098 	int len;
3099 	size_t rc = count;
3100 
3101 	if (!capable(CAP_SYS_ADMIN))
3102 		return -EACCES;
3103 
3104 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3105 	dump = ioa_cfg->dump;
3106 
3107 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3108 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3109 		return 0;
3110 	}
3111 	kref_get(&dump->kref);
3112 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3113 
3114 	if (off > dump->driver_dump.hdr.len) {
3115 		kref_put(&dump->kref, ipr_release_dump);
3116 		return 0;
3117 	}
3118 
3119 	if (off + count > dump->driver_dump.hdr.len) {
3120 		count = dump->driver_dump.hdr.len - off;
3121 		rc = count;
3122 	}
3123 
3124 	if (count && off < sizeof(dump->driver_dump)) {
3125 		if (off + count > sizeof(dump->driver_dump))
3126 			len = sizeof(dump->driver_dump) - off;
3127 		else
3128 			len = count;
3129 		src = (u8 *)&dump->driver_dump + off;
3130 		memcpy(buf, src, len);
3131 		buf += len;
3132 		off += len;
3133 		count -= len;
3134 	}
3135 
3136 	off -= sizeof(dump->driver_dump);
3137 
3138 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3139 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3140 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3141 		else
3142 			len = count;
3143 		src = (u8 *)&dump->ioa_dump + off;
3144 		memcpy(buf, src, len);
3145 		buf += len;
3146 		off += len;
3147 		count -= len;
3148 	}
3149 
3150 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3151 
3152 	while (count) {
3153 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3154 			len = PAGE_ALIGN(off) - off;
3155 		else
3156 			len = count;
3157 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3158 		src += off & ~PAGE_MASK;
3159 		memcpy(buf, src, len);
3160 		buf += len;
3161 		off += len;
3162 		count -= len;
3163 	}
3164 
3165 	kref_put(&dump->kref, ipr_release_dump);
3166 	return rc;
3167 }
3168 
3169 /**
3170  * ipr_alloc_dump - Prepare for adapter dump
3171  * @ioa_cfg:	ioa config struct
3172  *
3173  * Return value:
3174  *	0 on success / other on failure
3175  **/
3176 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3177 {
3178 	struct ipr_dump *dump;
3179 	unsigned long lock_flags = 0;
3180 
3181 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3182 
3183 	if (!dump) {
3184 		ipr_err("Dump memory allocation failed\n");
3185 		return -ENOMEM;
3186 	}
3187 
3188 	kref_init(&dump->kref);
3189 	dump->ioa_cfg = ioa_cfg;
3190 
3191 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3192 
3193 	if (INACTIVE != ioa_cfg->sdt_state) {
3194 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3195 		kfree(dump);
3196 		return 0;
3197 	}
3198 
3199 	ioa_cfg->dump = dump;
3200 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3201 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3202 		ioa_cfg->dump_taken = 1;
3203 		schedule_work(&ioa_cfg->work_q);
3204 	}
3205 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3206 
3207 	return 0;
3208 }
3209 
3210 /**
3211  * ipr_free_dump - Free adapter dump memory
3212  * @ioa_cfg:	ioa config struct
3213  *
3214  * Return value:
3215  *	0 on success / other on failure
3216  **/
3217 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3218 {
3219 	struct ipr_dump *dump;
3220 	unsigned long lock_flags = 0;
3221 
3222 	ENTER;
3223 
3224 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3225 	dump = ioa_cfg->dump;
3226 	if (!dump) {
3227 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3228 		return 0;
3229 	}
3230 
3231 	ioa_cfg->dump = NULL;
3232 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233 
3234 	kref_put(&dump->kref, ipr_release_dump);
3235 
3236 	LEAVE;
3237 	return 0;
3238 }
3239 
3240 /**
3241  * ipr_write_dump - Setup dump state of adapter
3242  * @kobj:		kobject struct
3243  * @buf:		buffer
3244  * @off:		offset
3245  * @count:		buffer size
3246  *
3247  * Return value:
3248  *	number of bytes printed to buffer
3249  **/
3250 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3251 			      loff_t off, size_t count)
3252 {
3253 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3254 	struct Scsi_Host *shost = class_to_shost(cdev);
3255 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3256 	int rc;
3257 
3258 	if (!capable(CAP_SYS_ADMIN))
3259 		return -EACCES;
3260 
3261 	if (buf[0] == '1')
3262 		rc = ipr_alloc_dump(ioa_cfg);
3263 	else if (buf[0] == '0')
3264 		rc = ipr_free_dump(ioa_cfg);
3265 	else
3266 		return -EINVAL;
3267 
3268 	if (rc)
3269 		return rc;
3270 	else
3271 		return count;
3272 }
3273 
3274 static struct bin_attribute ipr_dump_attr = {
3275 	.attr =	{
3276 		.name = "dump",
3277 		.mode = S_IRUSR | S_IWUSR,
3278 	},
3279 	.size = 0,
3280 	.read = ipr_read_dump,
3281 	.write = ipr_write_dump
3282 };
3283 #else
3284 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3285 #endif
3286 
3287 /**
3288  * ipr_change_queue_depth - Change the device's queue depth
3289  * @sdev:	scsi device struct
3290  * @qdepth:	depth to set
3291  *
3292  * Return value:
3293  * 	actual depth set
3294  **/
3295 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3296 {
3297 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3298 	struct ipr_resource_entry *res;
3299 	unsigned long lock_flags = 0;
3300 
3301 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302 	res = (struct ipr_resource_entry *)sdev->hostdata;
3303 
3304 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3305 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3306 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3307 
3308 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3309 	return sdev->queue_depth;
3310 }
3311 
3312 /**
3313  * ipr_change_queue_type - Change the device's queue type
3314  * @dsev:		scsi device struct
3315  * @tag_type:	type of tags to use
3316  *
3317  * Return value:
3318  * 	actual queue type set
3319  **/
3320 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3321 {
3322 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3323 	struct ipr_resource_entry *res;
3324 	unsigned long lock_flags = 0;
3325 
3326 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327 	res = (struct ipr_resource_entry *)sdev->hostdata;
3328 
3329 	if (res) {
3330 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3331 			/*
3332 			 * We don't bother quiescing the device here since the
3333 			 * adapter firmware does it for us.
3334 			 */
3335 			scsi_set_tag_type(sdev, tag_type);
3336 
3337 			if (tag_type)
3338 				scsi_activate_tcq(sdev, sdev->queue_depth);
3339 			else
3340 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3341 		} else
3342 			tag_type = 0;
3343 	} else
3344 		tag_type = 0;
3345 
3346 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 	return tag_type;
3348 }
3349 
3350 /**
3351  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3352  * @dev:	device struct
3353  * @buf:	buffer
3354  *
3355  * Return value:
3356  * 	number of bytes printed to buffer
3357  **/
3358 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3359 {
3360 	struct scsi_device *sdev = to_scsi_device(dev);
3361 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3362 	struct ipr_resource_entry *res;
3363 	unsigned long lock_flags = 0;
3364 	ssize_t len = -ENXIO;
3365 
3366 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 	res = (struct ipr_resource_entry *)sdev->hostdata;
3368 	if (res)
3369 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3370 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3371 	return len;
3372 }
3373 
3374 static struct device_attribute ipr_adapter_handle_attr = {
3375 	.attr = {
3376 		.name = 	"adapter_handle",
3377 		.mode =		S_IRUSR,
3378 	},
3379 	.show = ipr_show_adapter_handle
3380 };
3381 
3382 static struct device_attribute *ipr_dev_attrs[] = {
3383 	&ipr_adapter_handle_attr,
3384 	NULL,
3385 };
3386 
3387 /**
3388  * ipr_biosparam - Return the HSC mapping
3389  * @sdev:			scsi device struct
3390  * @block_device:	block device pointer
3391  * @capacity:		capacity of the device
3392  * @parm:			Array containing returned HSC values.
3393  *
3394  * This function generates the HSC parms that fdisk uses.
3395  * We want to make sure we return something that places partitions
3396  * on 4k boundaries for best performance with the IOA.
3397  *
3398  * Return value:
3399  * 	0 on success
3400  **/
3401 static int ipr_biosparam(struct scsi_device *sdev,
3402 			 struct block_device *block_device,
3403 			 sector_t capacity, int *parm)
3404 {
3405 	int heads, sectors;
3406 	sector_t cylinders;
3407 
3408 	heads = 128;
3409 	sectors = 32;
3410 
3411 	cylinders = capacity;
3412 	sector_div(cylinders, (128 * 32));
3413 
3414 	/* return result */
3415 	parm[0] = heads;
3416 	parm[1] = sectors;
3417 	parm[2] = cylinders;
3418 
3419 	return 0;
3420 }
3421 
3422 /**
3423  * ipr_find_starget - Find target based on bus/target.
3424  * @starget:	scsi target struct
3425  *
3426  * Return value:
3427  * 	resource entry pointer if found / NULL if not found
3428  **/
3429 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3430 {
3431 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3432 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3433 	struct ipr_resource_entry *res;
3434 
3435 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3436 		if ((res->cfgte.res_addr.bus == starget->channel) &&
3437 		    (res->cfgte.res_addr.target == starget->id) &&
3438 		    (res->cfgte.res_addr.lun == 0)) {
3439 			return res;
3440 		}
3441 	}
3442 
3443 	return NULL;
3444 }
3445 
3446 static struct ata_port_info sata_port_info;
3447 
3448 /**
3449  * ipr_target_alloc - Prepare for commands to a SCSI target
3450  * @starget:	scsi target struct
3451  *
3452  * If the device is a SATA device, this function allocates an
3453  * ATA port with libata, else it does nothing.
3454  *
3455  * Return value:
3456  * 	0 on success / non-0 on failure
3457  **/
3458 static int ipr_target_alloc(struct scsi_target *starget)
3459 {
3460 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3461 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3462 	struct ipr_sata_port *sata_port;
3463 	struct ata_port *ap;
3464 	struct ipr_resource_entry *res;
3465 	unsigned long lock_flags;
3466 
3467 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3468 	res = ipr_find_starget(starget);
3469 	starget->hostdata = NULL;
3470 
3471 	if (res && ipr_is_gata(res)) {
3472 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3473 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3474 		if (!sata_port)
3475 			return -ENOMEM;
3476 
3477 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3478 		if (ap) {
3479 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3480 			sata_port->ioa_cfg = ioa_cfg;
3481 			sata_port->ap = ap;
3482 			sata_port->res = res;
3483 
3484 			res->sata_port = sata_port;
3485 			ap->private_data = sata_port;
3486 			starget->hostdata = sata_port;
3487 		} else {
3488 			kfree(sata_port);
3489 			return -ENOMEM;
3490 		}
3491 	}
3492 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3493 
3494 	return 0;
3495 }
3496 
3497 /**
3498  * ipr_target_destroy - Destroy a SCSI target
3499  * @starget:	scsi target struct
3500  *
3501  * If the device was a SATA device, this function frees the libata
3502  * ATA port, else it does nothing.
3503  *
3504  **/
3505 static void ipr_target_destroy(struct scsi_target *starget)
3506 {
3507 	struct ipr_sata_port *sata_port = starget->hostdata;
3508 
3509 	if (sata_port) {
3510 		starget->hostdata = NULL;
3511 		ata_sas_port_destroy(sata_port->ap);
3512 		kfree(sata_port);
3513 	}
3514 }
3515 
3516 /**
3517  * ipr_find_sdev - Find device based on bus/target/lun.
3518  * @sdev:	scsi device struct
3519  *
3520  * Return value:
3521  * 	resource entry pointer if found / NULL if not found
3522  **/
3523 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3524 {
3525 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3526 	struct ipr_resource_entry *res;
3527 
3528 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3529 		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3530 		    (res->cfgte.res_addr.target == sdev->id) &&
3531 		    (res->cfgte.res_addr.lun == sdev->lun))
3532 			return res;
3533 	}
3534 
3535 	return NULL;
3536 }
3537 
3538 /**
3539  * ipr_slave_destroy - Unconfigure a SCSI device
3540  * @sdev:	scsi device struct
3541  *
3542  * Return value:
3543  * 	nothing
3544  **/
3545 static void ipr_slave_destroy(struct scsi_device *sdev)
3546 {
3547 	struct ipr_resource_entry *res;
3548 	struct ipr_ioa_cfg *ioa_cfg;
3549 	unsigned long lock_flags = 0;
3550 
3551 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3552 
3553 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3554 	res = (struct ipr_resource_entry *) sdev->hostdata;
3555 	if (res) {
3556 		if (res->sata_port)
3557 			ata_port_disable(res->sata_port->ap);
3558 		sdev->hostdata = NULL;
3559 		res->sdev = NULL;
3560 		res->sata_port = NULL;
3561 	}
3562 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3563 }
3564 
3565 /**
3566  * ipr_slave_configure - Configure a SCSI device
3567  * @sdev:	scsi device struct
3568  *
3569  * This function configures the specified scsi device.
3570  *
3571  * Return value:
3572  * 	0 on success
3573  **/
3574 static int ipr_slave_configure(struct scsi_device *sdev)
3575 {
3576 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3577 	struct ipr_resource_entry *res;
3578 	unsigned long lock_flags = 0;
3579 
3580 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3581 	res = sdev->hostdata;
3582 	if (res) {
3583 		if (ipr_is_af_dasd_device(res))
3584 			sdev->type = TYPE_RAID;
3585 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3586 			sdev->scsi_level = 4;
3587 			sdev->no_uld_attach = 1;
3588 		}
3589 		if (ipr_is_vset_device(res)) {
3590 			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3591 			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3592 		}
3593 		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3594 			sdev->allow_restart = 1;
3595 		if (ipr_is_gata(res) && res->sata_port) {
3596 			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3597 			ata_sas_slave_configure(sdev, res->sata_port->ap);
3598 		} else {
3599 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3600 		}
3601 	}
3602 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3603 	return 0;
3604 }
3605 
3606 /**
3607  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3608  * @sdev:	scsi device struct
3609  *
3610  * This function initializes an ATA port so that future commands
3611  * sent through queuecommand will work.
3612  *
3613  * Return value:
3614  * 	0 on success
3615  **/
3616 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3617 {
3618 	struct ipr_sata_port *sata_port = NULL;
3619 	int rc = -ENXIO;
3620 
3621 	ENTER;
3622 	if (sdev->sdev_target)
3623 		sata_port = sdev->sdev_target->hostdata;
3624 	if (sata_port)
3625 		rc = ata_sas_port_init(sata_port->ap);
3626 	if (rc)
3627 		ipr_slave_destroy(sdev);
3628 
3629 	LEAVE;
3630 	return rc;
3631 }
3632 
3633 /**
3634  * ipr_slave_alloc - Prepare for commands to a device.
3635  * @sdev:	scsi device struct
3636  *
3637  * This function saves a pointer to the resource entry
3638  * in the scsi device struct if the device exists. We
3639  * can then use this pointer in ipr_queuecommand when
3640  * handling new commands.
3641  *
3642  * Return value:
3643  * 	0 on success / -ENXIO if device does not exist
3644  **/
3645 static int ipr_slave_alloc(struct scsi_device *sdev)
3646 {
3647 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3648 	struct ipr_resource_entry *res;
3649 	unsigned long lock_flags;
3650 	int rc = -ENXIO;
3651 
3652 	sdev->hostdata = NULL;
3653 
3654 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655 
3656 	res = ipr_find_sdev(sdev);
3657 	if (res) {
3658 		res->sdev = sdev;
3659 		res->add_to_ml = 0;
3660 		res->in_erp = 0;
3661 		sdev->hostdata = res;
3662 		if (!ipr_is_naca_model(res))
3663 			res->needs_sync_complete = 1;
3664 		rc = 0;
3665 		if (ipr_is_gata(res)) {
3666 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 			return ipr_ata_slave_alloc(sdev);
3668 		}
3669 	}
3670 
3671 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3672 
3673 	return rc;
3674 }
3675 
3676 /**
3677  * ipr_eh_host_reset - Reset the host adapter
3678  * @scsi_cmd:	scsi command struct
3679  *
3680  * Return value:
3681  * 	SUCCESS / FAILED
3682  **/
3683 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3684 {
3685 	struct ipr_ioa_cfg *ioa_cfg;
3686 	int rc;
3687 
3688 	ENTER;
3689 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3690 
3691 	dev_err(&ioa_cfg->pdev->dev,
3692 		"Adapter being reset as a result of error recovery.\n");
3693 
3694 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3695 		ioa_cfg->sdt_state = GET_DUMP;
3696 
3697 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3698 
3699 	LEAVE;
3700 	return rc;
3701 }
3702 
3703 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3704 {
3705 	int rc;
3706 
3707 	spin_lock_irq(cmd->device->host->host_lock);
3708 	rc = __ipr_eh_host_reset(cmd);
3709 	spin_unlock_irq(cmd->device->host->host_lock);
3710 
3711 	return rc;
3712 }
3713 
3714 /**
3715  * ipr_device_reset - Reset the device
3716  * @ioa_cfg:	ioa config struct
3717  * @res:		resource entry struct
3718  *
3719  * This function issues a device reset to the affected device.
3720  * If the device is a SCSI device, a LUN reset will be sent
3721  * to the device first. If that does not work, a target reset
3722  * will be sent. If the device is a SATA device, a PHY reset will
3723  * be sent.
3724  *
3725  * Return value:
3726  *	0 on success / non-zero on failure
3727  **/
3728 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3729 			    struct ipr_resource_entry *res)
3730 {
3731 	struct ipr_cmnd *ipr_cmd;
3732 	struct ipr_ioarcb *ioarcb;
3733 	struct ipr_cmd_pkt *cmd_pkt;
3734 	struct ipr_ioarcb_ata_regs *regs;
3735 	u32 ioasc;
3736 
3737 	ENTER;
3738 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3739 	ioarcb = &ipr_cmd->ioarcb;
3740 	cmd_pkt = &ioarcb->cmd_pkt;
3741 	regs = &ioarcb->add_data.u.regs;
3742 
3743 	ioarcb->res_handle = res->cfgte.res_handle;
3744 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3745 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3746 	if (ipr_is_gata(res)) {
3747 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3748 		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3749 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3750 	}
3751 
3752 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3753 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3754 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3755 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3756 		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3757 		       sizeof(struct ipr_ioasa_gata));
3758 
3759 	LEAVE;
3760 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3761 }
3762 
3763 /**
3764  * ipr_sata_reset - Reset the SATA port
3765  * @ap:		SATA port to reset
3766  * @classes:	class of the attached device
3767  *
3768  * This function issues a SATA phy reset to the affected ATA port.
3769  *
3770  * Return value:
3771  *	0 on success / non-zero on failure
3772  **/
3773 static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3774 {
3775 	struct ipr_sata_port *sata_port = ap->private_data;
3776 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3777 	struct ipr_resource_entry *res;
3778 	unsigned long lock_flags = 0;
3779 	int rc = -ENXIO;
3780 
3781 	ENTER;
3782 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3783 	while(ioa_cfg->in_reset_reload) {
3784 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3785 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3786 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3787 	}
3788 
3789 	res = sata_port->res;
3790 	if (res) {
3791 		rc = ipr_device_reset(ioa_cfg, res);
3792 		switch(res->cfgte.proto) {
3793 		case IPR_PROTO_SATA:
3794 		case IPR_PROTO_SAS_STP:
3795 			*classes = ATA_DEV_ATA;
3796 			break;
3797 		case IPR_PROTO_SATA_ATAPI:
3798 		case IPR_PROTO_SAS_STP_ATAPI:
3799 			*classes = ATA_DEV_ATAPI;
3800 			break;
3801 		default:
3802 			*classes = ATA_DEV_UNKNOWN;
3803 			break;
3804 		};
3805 	}
3806 
3807 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3808 	LEAVE;
3809 	return rc;
3810 }
3811 
3812 /**
3813  * ipr_eh_dev_reset - Reset the device
3814  * @scsi_cmd:	scsi command struct
3815  *
3816  * This function issues a device reset to the affected device.
3817  * A LUN reset will be sent to the device first. If that does
3818  * not work, a target reset will be sent.
3819  *
3820  * Return value:
3821  *	SUCCESS / FAILED
3822  **/
3823 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3824 {
3825 	struct ipr_cmnd *ipr_cmd;
3826 	struct ipr_ioa_cfg *ioa_cfg;
3827 	struct ipr_resource_entry *res;
3828 	struct ata_port *ap;
3829 	int rc = 0;
3830 
3831 	ENTER;
3832 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3833 	res = scsi_cmd->device->hostdata;
3834 
3835 	if (!res)
3836 		return FAILED;
3837 
3838 	/*
3839 	 * If we are currently going through reset/reload, return failed. This will force the
3840 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3841 	 * reset to complete
3842 	 */
3843 	if (ioa_cfg->in_reset_reload)
3844 		return FAILED;
3845 	if (ioa_cfg->ioa_is_dead)
3846 		return FAILED;
3847 
3848 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3849 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3850 			if (ipr_cmd->scsi_cmd)
3851 				ipr_cmd->done = ipr_scsi_eh_done;
3852 			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3853 				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3854 				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3855 			}
3856 		}
3857 	}
3858 
3859 	res->resetting_device = 1;
3860 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3861 
3862 	if (ipr_is_gata(res) && res->sata_port) {
3863 		ap = res->sata_port->ap;
3864 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
3865 		ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3866 		spin_lock_irq(scsi_cmd->device->host->host_lock);
3867 	} else
3868 		rc = ipr_device_reset(ioa_cfg, res);
3869 	res->resetting_device = 0;
3870 
3871 	LEAVE;
3872 	return (rc ? FAILED : SUCCESS);
3873 }
3874 
3875 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3876 {
3877 	int rc;
3878 
3879 	spin_lock_irq(cmd->device->host->host_lock);
3880 	rc = __ipr_eh_dev_reset(cmd);
3881 	spin_unlock_irq(cmd->device->host->host_lock);
3882 
3883 	return rc;
3884 }
3885 
3886 /**
3887  * ipr_bus_reset_done - Op done function for bus reset.
3888  * @ipr_cmd:	ipr command struct
3889  *
3890  * This function is the op done function for a bus reset
3891  *
3892  * Return value:
3893  * 	none
3894  **/
3895 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3896 {
3897 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3898 	struct ipr_resource_entry *res;
3899 
3900 	ENTER;
3901 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3902 		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3903 			    sizeof(res->cfgte.res_handle))) {
3904 			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3905 			break;
3906 		}
3907 	}
3908 
3909 	/*
3910 	 * If abort has not completed, indicate the reset has, else call the
3911 	 * abort's done function to wake the sleeping eh thread
3912 	 */
3913 	if (ipr_cmd->sibling->sibling)
3914 		ipr_cmd->sibling->sibling = NULL;
3915 	else
3916 		ipr_cmd->sibling->done(ipr_cmd->sibling);
3917 
3918 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3919 	LEAVE;
3920 }
3921 
3922 /**
3923  * ipr_abort_timeout - An abort task has timed out
3924  * @ipr_cmd:	ipr command struct
3925  *
3926  * This function handles when an abort task times out. If this
3927  * happens we issue a bus reset since we have resources tied
3928  * up that must be freed before returning to the midlayer.
3929  *
3930  * Return value:
3931  *	none
3932  **/
3933 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3934 {
3935 	struct ipr_cmnd *reset_cmd;
3936 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3937 	struct ipr_cmd_pkt *cmd_pkt;
3938 	unsigned long lock_flags = 0;
3939 
3940 	ENTER;
3941 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3943 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3944 		return;
3945 	}
3946 
3947 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3948 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3949 	ipr_cmd->sibling = reset_cmd;
3950 	reset_cmd->sibling = ipr_cmd;
3951 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3952 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3953 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3954 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3955 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3956 
3957 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3958 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959 	LEAVE;
3960 }
3961 
3962 /**
3963  * ipr_cancel_op - Cancel specified op
3964  * @scsi_cmd:	scsi command struct
3965  *
3966  * This function cancels specified op.
3967  *
3968  * Return value:
3969  *	SUCCESS / FAILED
3970  **/
3971 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3972 {
3973 	struct ipr_cmnd *ipr_cmd;
3974 	struct ipr_ioa_cfg *ioa_cfg;
3975 	struct ipr_resource_entry *res;
3976 	struct ipr_cmd_pkt *cmd_pkt;
3977 	u32 ioasc;
3978 	int op_found = 0;
3979 
3980 	ENTER;
3981 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3982 	res = scsi_cmd->device->hostdata;
3983 
3984 	/* If we are currently going through reset/reload, return failed.
3985 	 * This will force the mid-layer to call ipr_eh_host_reset,
3986 	 * which will then go to sleep and wait for the reset to complete
3987 	 */
3988 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3989 		return FAILED;
3990 	if (!res || !ipr_is_gscsi(res))
3991 		return FAILED;
3992 
3993 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3994 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3995 			ipr_cmd->done = ipr_scsi_eh_done;
3996 			op_found = 1;
3997 			break;
3998 		}
3999 	}
4000 
4001 	if (!op_found)
4002 		return SUCCESS;
4003 
4004 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4005 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4006 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4007 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4008 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4009 	ipr_cmd->u.sdev = scsi_cmd->device;
4010 
4011 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4012 		    scsi_cmd->cmnd[0]);
4013 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4014 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4015 
4016 	/*
4017 	 * If the abort task timed out and we sent a bus reset, we will get
4018 	 * one the following responses to the abort
4019 	 */
4020 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4021 		ioasc = 0;
4022 		ipr_trace;
4023 	}
4024 
4025 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4026 	if (!ipr_is_naca_model(res))
4027 		res->needs_sync_complete = 1;
4028 
4029 	LEAVE;
4030 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4031 }
4032 
4033 /**
4034  * ipr_eh_abort - Abort a single op
4035  * @scsi_cmd:	scsi command struct
4036  *
4037  * Return value:
4038  * 	SUCCESS / FAILED
4039  **/
4040 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4041 {
4042 	unsigned long flags;
4043 	int rc;
4044 
4045 	ENTER;
4046 
4047 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4048 	rc = ipr_cancel_op(scsi_cmd);
4049 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4050 
4051 	LEAVE;
4052 	return rc;
4053 }
4054 
4055 /**
4056  * ipr_handle_other_interrupt - Handle "other" interrupts
4057  * @ioa_cfg:	ioa config struct
4058  * @int_reg:	interrupt register
4059  *
4060  * Return value:
4061  * 	IRQ_NONE / IRQ_HANDLED
4062  **/
4063 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4064 					      volatile u32 int_reg)
4065 {
4066 	irqreturn_t rc = IRQ_HANDLED;
4067 
4068 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4069 		/* Mask the interrupt */
4070 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4071 
4072 		/* Clear the interrupt */
4073 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4074 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4075 
4076 		list_del(&ioa_cfg->reset_cmd->queue);
4077 		del_timer(&ioa_cfg->reset_cmd->timer);
4078 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4079 	} else {
4080 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4081 			ioa_cfg->ioa_unit_checked = 1;
4082 		else
4083 			dev_err(&ioa_cfg->pdev->dev,
4084 				"Permanent IOA failure. 0x%08X\n", int_reg);
4085 
4086 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4087 			ioa_cfg->sdt_state = GET_DUMP;
4088 
4089 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4090 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4091 	}
4092 
4093 	return rc;
4094 }
4095 
4096 /**
4097  * ipr_isr - Interrupt service routine
4098  * @irq:	irq number
4099  * @devp:	pointer to ioa config struct
4100  *
4101  * Return value:
4102  * 	IRQ_NONE / IRQ_HANDLED
4103  **/
4104 static irqreturn_t ipr_isr(int irq, void *devp)
4105 {
4106 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4107 	unsigned long lock_flags = 0;
4108 	volatile u32 int_reg, int_mask_reg;
4109 	u32 ioasc;
4110 	u16 cmd_index;
4111 	struct ipr_cmnd *ipr_cmd;
4112 	irqreturn_t rc = IRQ_NONE;
4113 
4114 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4115 
4116 	/* If interrupts are disabled, ignore the interrupt */
4117 	if (!ioa_cfg->allow_interrupts) {
4118 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4119 		return IRQ_NONE;
4120 	}
4121 
4122 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4123 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4124 
4125 	/* If an interrupt on the adapter did not occur, ignore it */
4126 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4127 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128 		return IRQ_NONE;
4129 	}
4130 
4131 	while (1) {
4132 		ipr_cmd = NULL;
4133 
4134 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4135 		       ioa_cfg->toggle_bit) {
4136 
4137 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4138 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4139 
4140 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4141 				ioa_cfg->errors_logged++;
4142 				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4143 
4144 				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4145 					ioa_cfg->sdt_state = GET_DUMP;
4146 
4147 				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4148 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4149 				return IRQ_HANDLED;
4150 			}
4151 
4152 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4153 
4154 			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4155 
4156 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4157 
4158 			list_del(&ipr_cmd->queue);
4159 			del_timer(&ipr_cmd->timer);
4160 			ipr_cmd->done(ipr_cmd);
4161 
4162 			rc = IRQ_HANDLED;
4163 
4164 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4165 				ioa_cfg->hrrq_curr++;
4166 			} else {
4167 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4168 				ioa_cfg->toggle_bit ^= 1u;
4169 			}
4170 		}
4171 
4172 		if (ipr_cmd != NULL) {
4173 			/* Clear the PCI interrupt */
4174 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4175 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4176 		} else
4177 			break;
4178 	}
4179 
4180 	if (unlikely(rc == IRQ_NONE))
4181 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4182 
4183 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4184 	return rc;
4185 }
4186 
4187 /**
4188  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4189  * @ioa_cfg:	ioa config struct
4190  * @ipr_cmd:	ipr command struct
4191  *
4192  * Return value:
4193  * 	0 on success / -1 on failure
4194  **/
4195 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4196 			   struct ipr_cmnd *ipr_cmd)
4197 {
4198 	int i;
4199 	struct scatterlist *sglist;
4200 	u32 length;
4201 	u32 ioadl_flags = 0;
4202 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4203 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4204 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4205 
4206 	length = scsi_cmd->request_bufflen;
4207 
4208 	if (length == 0)
4209 		return 0;
4210 
4211 	if (scsi_cmd->use_sg) {
4212 		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4213 						 scsi_cmd->request_buffer,
4214 						 scsi_cmd->use_sg,
4215 						 scsi_cmd->sc_data_direction);
4216 
4217 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4218 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4219 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4220 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4221 			ioarcb->write_ioadl_len =
4222 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4223 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4224 			ioadl_flags = IPR_IOADL_FLAGS_READ;
4225 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4226 			ioarcb->read_ioadl_len =
4227 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4228 		}
4229 
4230 		sglist = scsi_cmd->request_buffer;
4231 
4232 		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4233 			ioadl[i].flags_and_data_len =
4234 				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4235 			ioadl[i].address =
4236 				cpu_to_be32(sg_dma_address(&sglist[i]));
4237 		}
4238 
4239 		if (likely(ipr_cmd->dma_use_sg)) {
4240 			ioadl[i-1].flags_and_data_len |=
4241 				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4242 			return 0;
4243 		} else
4244 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4245 	} else {
4246 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4247 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4248 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4249 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
4250 			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4251 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4252 			ioadl_flags = IPR_IOADL_FLAGS_READ;
4253 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
4254 			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4255 		}
4256 
4257 		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4258 						     scsi_cmd->request_buffer, length,
4259 						     scsi_cmd->sc_data_direction);
4260 
4261 		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4262 			ipr_cmd->dma_use_sg = 1;
4263 			ioadl[0].flags_and_data_len =
4264 				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4265 			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4266 			return 0;
4267 		} else
4268 			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4269 	}
4270 
4271 	return -1;
4272 }
4273 
4274 /**
4275  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4276  * @scsi_cmd:	scsi command struct
4277  *
4278  * Return value:
4279  * 	task attributes
4280  **/
4281 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4282 {
4283 	u8 tag[2];
4284 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4285 
4286 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4287 		switch (tag[0]) {
4288 		case MSG_SIMPLE_TAG:
4289 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
4290 			break;
4291 		case MSG_HEAD_TAG:
4292 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4293 			break;
4294 		case MSG_ORDERED_TAG:
4295 			rc = IPR_FLAGS_LO_ORDERED_TASK;
4296 			break;
4297 		};
4298 	}
4299 
4300 	return rc;
4301 }
4302 
4303 /**
4304  * ipr_erp_done - Process completion of ERP for a device
4305  * @ipr_cmd:		ipr command struct
4306  *
4307  * This function copies the sense buffer into the scsi_cmd
4308  * struct and pushes the scsi_done function.
4309  *
4310  * Return value:
4311  * 	nothing
4312  **/
4313 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4314 {
4315 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4316 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4317 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4318 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4319 
4320 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4321 		scsi_cmd->result |= (DID_ERROR << 16);
4322 		scmd_printk(KERN_ERR, scsi_cmd,
4323 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
4324 	} else {
4325 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4326 		       SCSI_SENSE_BUFFERSIZE);
4327 	}
4328 
4329 	if (res) {
4330 		if (!ipr_is_naca_model(res))
4331 			res->needs_sync_complete = 1;
4332 		res->in_erp = 0;
4333 	}
4334 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4335 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4336 	scsi_cmd->scsi_done(scsi_cmd);
4337 }
4338 
4339 /**
4340  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4341  * @ipr_cmd:	ipr command struct
4342  *
4343  * Return value:
4344  * 	none
4345  **/
4346 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4347 {
4348 	struct ipr_ioarcb *ioarcb;
4349 	struct ipr_ioasa *ioasa;
4350 
4351 	ioarcb = &ipr_cmd->ioarcb;
4352 	ioasa = &ipr_cmd->ioasa;
4353 
4354 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4355 	ioarcb->write_data_transfer_length = 0;
4356 	ioarcb->read_data_transfer_length = 0;
4357 	ioarcb->write_ioadl_len = 0;
4358 	ioarcb->read_ioadl_len = 0;
4359 	ioasa->ioasc = 0;
4360 	ioasa->residual_data_len = 0;
4361 }
4362 
4363 /**
4364  * ipr_erp_request_sense - Send request sense to a device
4365  * @ipr_cmd:	ipr command struct
4366  *
4367  * This function sends a request sense to a device as a result
4368  * of a check condition.
4369  *
4370  * Return value:
4371  * 	nothing
4372  **/
4373 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4374 {
4375 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4376 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4377 
4378 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4379 		ipr_erp_done(ipr_cmd);
4380 		return;
4381 	}
4382 
4383 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4384 
4385 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4386 	cmd_pkt->cdb[0] = REQUEST_SENSE;
4387 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4388 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4389 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4390 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4391 
4392 	ipr_cmd->ioadl[0].flags_and_data_len =
4393 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4394 	ipr_cmd->ioadl[0].address =
4395 		cpu_to_be32(ipr_cmd->sense_buffer_dma);
4396 
4397 	ipr_cmd->ioarcb.read_ioadl_len =
4398 		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4399 	ipr_cmd->ioarcb.read_data_transfer_length =
4400 		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4401 
4402 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4403 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
4404 }
4405 
4406 /**
4407  * ipr_erp_cancel_all - Send cancel all to a device
4408  * @ipr_cmd:	ipr command struct
4409  *
4410  * This function sends a cancel all to a device to clear the
4411  * queue. If we are running TCQ on the device, QERR is set to 1,
4412  * which means all outstanding ops have been dropped on the floor.
4413  * Cancel all will return them to us.
4414  *
4415  * Return value:
4416  * 	nothing
4417  **/
4418 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4419 {
4420 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4421 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4422 	struct ipr_cmd_pkt *cmd_pkt;
4423 
4424 	res->in_erp = 1;
4425 
4426 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4427 
4428 	if (!scsi_get_tag_type(scsi_cmd->device)) {
4429 		ipr_erp_request_sense(ipr_cmd);
4430 		return;
4431 	}
4432 
4433 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4434 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4435 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4436 
4437 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4438 		   IPR_CANCEL_ALL_TIMEOUT);
4439 }
4440 
4441 /**
4442  * ipr_dump_ioasa - Dump contents of IOASA
4443  * @ioa_cfg:	ioa config struct
4444  * @ipr_cmd:	ipr command struct
4445  * @res:		resource entry struct
4446  *
4447  * This function is invoked by the interrupt handler when ops
4448  * fail. It will log the IOASA if appropriate. Only called
4449  * for GPDD ops.
4450  *
4451  * Return value:
4452  * 	none
4453  **/
4454 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4455 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
4456 {
4457 	int i;
4458 	u16 data_len;
4459 	u32 ioasc;
4460 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4461 	__be32 *ioasa_data = (__be32 *)ioasa;
4462 	int error_index;
4463 
4464 	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4465 
4466 	if (0 == ioasc)
4467 		return;
4468 
4469 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4470 		return;
4471 
4472 	error_index = ipr_get_error(ioasc);
4473 
4474 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4475 		/* Don't log an error if the IOA already logged one */
4476 		if (ioasa->ilid != 0)
4477 			return;
4478 
4479 		if (ipr_error_table[error_index].log_ioasa == 0)
4480 			return;
4481 	}
4482 
4483 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4484 
4485 	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4486 		data_len = sizeof(struct ipr_ioasa);
4487 	else
4488 		data_len = be16_to_cpu(ioasa->ret_stat_len);
4489 
4490 	ipr_err("IOASA Dump:\n");
4491 
4492 	for (i = 0; i < data_len / 4; i += 4) {
4493 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4494 			be32_to_cpu(ioasa_data[i]),
4495 			be32_to_cpu(ioasa_data[i+1]),
4496 			be32_to_cpu(ioasa_data[i+2]),
4497 			be32_to_cpu(ioasa_data[i+3]));
4498 	}
4499 }
4500 
4501 /**
4502  * ipr_gen_sense - Generate SCSI sense data from an IOASA
4503  * @ioasa:		IOASA
4504  * @sense_buf:	sense data buffer
4505  *
4506  * Return value:
4507  * 	none
4508  **/
4509 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4510 {
4511 	u32 failing_lba;
4512 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4513 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4514 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4515 	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4516 
4517 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4518 
4519 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4520 		return;
4521 
4522 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4523 
4524 	if (ipr_is_vset_device(res) &&
4525 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4526 	    ioasa->u.vset.failing_lba_hi != 0) {
4527 		sense_buf[0] = 0x72;
4528 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4529 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4530 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4531 
4532 		sense_buf[7] = 12;
4533 		sense_buf[8] = 0;
4534 		sense_buf[9] = 0x0A;
4535 		sense_buf[10] = 0x80;
4536 
4537 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4538 
4539 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4540 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4541 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4542 		sense_buf[15] = failing_lba & 0x000000ff;
4543 
4544 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4545 
4546 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4547 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4548 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4549 		sense_buf[19] = failing_lba & 0x000000ff;
4550 	} else {
4551 		sense_buf[0] = 0x70;
4552 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4553 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4554 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4555 
4556 		/* Illegal request */
4557 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4558 		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4559 			sense_buf[7] = 10;	/* additional length */
4560 
4561 			/* IOARCB was in error */
4562 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4563 				sense_buf[15] = 0xC0;
4564 			else	/* Parameter data was invalid */
4565 				sense_buf[15] = 0x80;
4566 
4567 			sense_buf[16] =
4568 			    ((IPR_FIELD_POINTER_MASK &
4569 			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4570 			sense_buf[17] =
4571 			    (IPR_FIELD_POINTER_MASK &
4572 			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4573 		} else {
4574 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4575 				if (ipr_is_vset_device(res))
4576 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4577 				else
4578 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4579 
4580 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4581 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4582 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4583 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4584 				sense_buf[6] = failing_lba & 0x000000ff;
4585 			}
4586 
4587 			sense_buf[7] = 6;	/* additional length */
4588 		}
4589 	}
4590 }
4591 
4592 /**
4593  * ipr_get_autosense - Copy autosense data to sense buffer
4594  * @ipr_cmd:	ipr command struct
4595  *
4596  * This function copies the autosense buffer to the buffer
4597  * in the scsi_cmd, if there is autosense available.
4598  *
4599  * Return value:
4600  *	1 if autosense was available / 0 if not
4601  **/
4602 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4603 {
4604 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4605 
4606 	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4607 		return 0;
4608 
4609 	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4610 	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4611 		   SCSI_SENSE_BUFFERSIZE));
4612 	return 1;
4613 }
4614 
4615 /**
4616  * ipr_erp_start - Process an error response for a SCSI op
4617  * @ioa_cfg:	ioa config struct
4618  * @ipr_cmd:	ipr command struct
4619  *
4620  * This function determines whether or not to initiate ERP
4621  * on the affected device.
4622  *
4623  * Return value:
4624  * 	nothing
4625  **/
4626 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4627 			      struct ipr_cmnd *ipr_cmd)
4628 {
4629 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4630 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4631 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4632 
4633 	if (!res) {
4634 		ipr_scsi_eh_done(ipr_cmd);
4635 		return;
4636 	}
4637 
4638 	if (ipr_is_gscsi(res))
4639 		ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4640 	else
4641 		ipr_gen_sense(ipr_cmd);
4642 
4643 	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4644 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4645 		if (ipr_is_naca_model(res))
4646 			scsi_cmd->result |= (DID_ABORT << 16);
4647 		else
4648 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4649 		break;
4650 	case IPR_IOASC_IR_RESOURCE_HANDLE:
4651 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4652 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4653 		break;
4654 	case IPR_IOASC_HW_SEL_TIMEOUT:
4655 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4656 		if (!ipr_is_naca_model(res))
4657 			res->needs_sync_complete = 1;
4658 		break;
4659 	case IPR_IOASC_SYNC_REQUIRED:
4660 		if (!res->in_erp)
4661 			res->needs_sync_complete = 1;
4662 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4663 		break;
4664 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4665 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4666 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4667 		break;
4668 	case IPR_IOASC_BUS_WAS_RESET:
4669 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4670 		/*
4671 		 * Report the bus reset and ask for a retry. The device
4672 		 * will give CC/UA the next command.
4673 		 */
4674 		if (!res->resetting_device)
4675 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4676 		scsi_cmd->result |= (DID_ERROR << 16);
4677 		if (!ipr_is_naca_model(res))
4678 			res->needs_sync_complete = 1;
4679 		break;
4680 	case IPR_IOASC_HW_DEV_BUS_STATUS:
4681 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4682 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4683 			if (!ipr_get_autosense(ipr_cmd)) {
4684 				if (!ipr_is_naca_model(res)) {
4685 					ipr_erp_cancel_all(ipr_cmd);
4686 					return;
4687 				}
4688 			}
4689 		}
4690 		if (!ipr_is_naca_model(res))
4691 			res->needs_sync_complete = 1;
4692 		break;
4693 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4694 		break;
4695 	default:
4696 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4697 			scsi_cmd->result |= (DID_ERROR << 16);
4698 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4699 			res->needs_sync_complete = 1;
4700 		break;
4701 	}
4702 
4703 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4704 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4705 	scsi_cmd->scsi_done(scsi_cmd);
4706 }
4707 
4708 /**
4709  * ipr_scsi_done - mid-layer done function
4710  * @ipr_cmd:	ipr command struct
4711  *
4712  * This function is invoked by the interrupt handler for
4713  * ops generated by the SCSI mid-layer
4714  *
4715  * Return value:
4716  * 	none
4717  **/
4718 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4719 {
4720 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4721 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4722 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4723 
4724 	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4725 
4726 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4727 		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4728 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4729 		scsi_cmd->scsi_done(scsi_cmd);
4730 	} else
4731 		ipr_erp_start(ioa_cfg, ipr_cmd);
4732 }
4733 
4734 /**
4735  * ipr_queuecommand - Queue a mid-layer request
4736  * @scsi_cmd:	scsi command struct
4737  * @done:		done function
4738  *
4739  * This function queues a request generated by the mid-layer.
4740  *
4741  * Return value:
4742  *	0 on success
4743  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4744  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4745  **/
4746 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4747 			    void (*done) (struct scsi_cmnd *))
4748 {
4749 	struct ipr_ioa_cfg *ioa_cfg;
4750 	struct ipr_resource_entry *res;
4751 	struct ipr_ioarcb *ioarcb;
4752 	struct ipr_cmnd *ipr_cmd;
4753 	int rc = 0;
4754 
4755 	scsi_cmd->scsi_done = done;
4756 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4757 	res = scsi_cmd->device->hostdata;
4758 	scsi_cmd->result = (DID_OK << 16);
4759 
4760 	/*
4761 	 * We are currently blocking all devices due to a host reset
4762 	 * We have told the host to stop giving us new requests, but
4763 	 * ERP ops don't count. FIXME
4764 	 */
4765 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4766 		return SCSI_MLQUEUE_HOST_BUSY;
4767 
4768 	/*
4769 	 * FIXME - Create scsi_set_host_offline interface
4770 	 *  and the ioa_is_dead check can be removed
4771 	 */
4772 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4773 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4774 		scsi_cmd->result = (DID_NO_CONNECT << 16);
4775 		scsi_cmd->scsi_done(scsi_cmd);
4776 		return 0;
4777 	}
4778 
4779 	if (ipr_is_gata(res) && res->sata_port)
4780 		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4781 
4782 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4783 	ioarcb = &ipr_cmd->ioarcb;
4784 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4785 
4786 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4787 	ipr_cmd->scsi_cmd = scsi_cmd;
4788 	ioarcb->res_handle = res->cfgte.res_handle;
4789 	ipr_cmd->done = ipr_scsi_done;
4790 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4791 
4792 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4793 		if (scsi_cmd->underflow == 0)
4794 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4795 
4796 		if (res->needs_sync_complete) {
4797 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4798 			res->needs_sync_complete = 0;
4799 		}
4800 
4801 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4802 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4803 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4804 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4805 	}
4806 
4807 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4808 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4809 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4810 
4811 	if (likely(rc == 0))
4812 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4813 
4814 	if (likely(rc == 0)) {
4815 		mb();
4816 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4817 		       ioa_cfg->regs.ioarrin_reg);
4818 	} else {
4819 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4820 		 return SCSI_MLQUEUE_HOST_BUSY;
4821 	}
4822 
4823 	return 0;
4824 }
4825 
4826 /**
4827  * ipr_ioctl - IOCTL handler
4828  * @sdev:	scsi device struct
4829  * @cmd:	IOCTL cmd
4830  * @arg:	IOCTL arg
4831  *
4832  * Return value:
4833  * 	0 on success / other on failure
4834  **/
4835 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4836 {
4837 	struct ipr_resource_entry *res;
4838 
4839 	res = (struct ipr_resource_entry *)sdev->hostdata;
4840 	if (res && ipr_is_gata(res))
4841 		return ata_scsi_ioctl(sdev, cmd, arg);
4842 
4843 	return -EINVAL;
4844 }
4845 
4846 /**
4847  * ipr_info - Get information about the card/driver
4848  * @scsi_host:	scsi host struct
4849  *
4850  * Return value:
4851  * 	pointer to buffer with description string
4852  **/
4853 static const char * ipr_ioa_info(struct Scsi_Host *host)
4854 {
4855 	static char buffer[512];
4856 	struct ipr_ioa_cfg *ioa_cfg;
4857 	unsigned long lock_flags = 0;
4858 
4859 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4860 
4861 	spin_lock_irqsave(host->host_lock, lock_flags);
4862 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4863 	spin_unlock_irqrestore(host->host_lock, lock_flags);
4864 
4865 	return buffer;
4866 }
4867 
4868 static struct scsi_host_template driver_template = {
4869 	.module = THIS_MODULE,
4870 	.name = "IPR",
4871 	.info = ipr_ioa_info,
4872 	.ioctl = ipr_ioctl,
4873 	.queuecommand = ipr_queuecommand,
4874 	.eh_abort_handler = ipr_eh_abort,
4875 	.eh_device_reset_handler = ipr_eh_dev_reset,
4876 	.eh_host_reset_handler = ipr_eh_host_reset,
4877 	.slave_alloc = ipr_slave_alloc,
4878 	.slave_configure = ipr_slave_configure,
4879 	.slave_destroy = ipr_slave_destroy,
4880 	.target_alloc = ipr_target_alloc,
4881 	.target_destroy = ipr_target_destroy,
4882 	.change_queue_depth = ipr_change_queue_depth,
4883 	.change_queue_type = ipr_change_queue_type,
4884 	.bios_param = ipr_biosparam,
4885 	.can_queue = IPR_MAX_COMMANDS,
4886 	.this_id = -1,
4887 	.sg_tablesize = IPR_MAX_SGLIST,
4888 	.max_sectors = IPR_IOA_MAX_SECTORS,
4889 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4890 	.use_clustering = ENABLE_CLUSTERING,
4891 	.shost_attrs = ipr_ioa_attrs,
4892 	.sdev_attrs = ipr_dev_attrs,
4893 	.proc_name = IPR_NAME
4894 };
4895 
4896 /**
4897  * ipr_ata_phy_reset - libata phy_reset handler
4898  * @ap:		ata port to reset
4899  *
4900  **/
4901 static void ipr_ata_phy_reset(struct ata_port *ap)
4902 {
4903 	unsigned long flags;
4904 	struct ipr_sata_port *sata_port = ap->private_data;
4905 	struct ipr_resource_entry *res = sata_port->res;
4906 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4907 	int rc;
4908 
4909 	ENTER;
4910 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4911 	while(ioa_cfg->in_reset_reload) {
4912 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4913 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4914 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4915 	}
4916 
4917 	if (!ioa_cfg->allow_cmds)
4918 		goto out_unlock;
4919 
4920 	rc = ipr_device_reset(ioa_cfg, res);
4921 
4922 	if (rc) {
4923 		ap->ops->port_disable(ap);
4924 		goto out_unlock;
4925 	}
4926 
4927 	switch(res->cfgte.proto) {
4928 	case IPR_PROTO_SATA:
4929 	case IPR_PROTO_SAS_STP:
4930 		ap->device[0].class = ATA_DEV_ATA;
4931 		break;
4932 	case IPR_PROTO_SATA_ATAPI:
4933 	case IPR_PROTO_SAS_STP_ATAPI:
4934 		ap->device[0].class = ATA_DEV_ATAPI;
4935 		break;
4936 	default:
4937 		ap->device[0].class = ATA_DEV_UNKNOWN;
4938 		ap->ops->port_disable(ap);
4939 		break;
4940 	};
4941 
4942 out_unlock:
4943 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4944 	LEAVE;
4945 }
4946 
4947 /**
4948  * ipr_ata_post_internal - Cleanup after an internal command
4949  * @qc:	ATA queued command
4950  *
4951  * Return value:
4952  * 	none
4953  **/
4954 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4955 {
4956 	struct ipr_sata_port *sata_port = qc->ap->private_data;
4957 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4958 	struct ipr_cmnd *ipr_cmd;
4959 	unsigned long flags;
4960 
4961 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4962 	while(ioa_cfg->in_reset_reload) {
4963 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4964 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4965 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4966 	}
4967 
4968 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4969 		if (ipr_cmd->qc == qc) {
4970 			ipr_device_reset(ioa_cfg, sata_port->res);
4971 			break;
4972 		}
4973 	}
4974 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4975 }
4976 
4977 /**
4978  * ipr_tf_read - Read the current ATA taskfile for the ATA port
4979  * @ap:	ATA port
4980  * @tf:	destination ATA taskfile
4981  *
4982  * Return value:
4983  * 	none
4984  **/
4985 static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
4986 {
4987 	struct ipr_sata_port *sata_port = ap->private_data;
4988 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
4989 
4990 	tf->feature = g->error;
4991 	tf->nsect = g->nsect;
4992 	tf->lbal = g->lbal;
4993 	tf->lbam = g->lbam;
4994 	tf->lbah = g->lbah;
4995 	tf->device = g->device;
4996 	tf->command = g->status;
4997 	tf->hob_nsect = g->hob_nsect;
4998 	tf->hob_lbal = g->hob_lbal;
4999 	tf->hob_lbam = g->hob_lbam;
5000 	tf->hob_lbah = g->hob_lbah;
5001 	tf->ctl = g->alt_status;
5002 }
5003 
5004 /**
5005  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5006  * @regs:	destination
5007  * @tf:	source ATA taskfile
5008  *
5009  * Return value:
5010  * 	none
5011  **/
5012 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5013 			     struct ata_taskfile *tf)
5014 {
5015 	regs->feature = tf->feature;
5016 	regs->nsect = tf->nsect;
5017 	regs->lbal = tf->lbal;
5018 	regs->lbam = tf->lbam;
5019 	regs->lbah = tf->lbah;
5020 	regs->device = tf->device;
5021 	regs->command = tf->command;
5022 	regs->hob_feature = tf->hob_feature;
5023 	regs->hob_nsect = tf->hob_nsect;
5024 	regs->hob_lbal = tf->hob_lbal;
5025 	regs->hob_lbam = tf->hob_lbam;
5026 	regs->hob_lbah = tf->hob_lbah;
5027 	regs->ctl = tf->ctl;
5028 }
5029 
5030 /**
5031  * ipr_sata_done - done function for SATA commands
5032  * @ipr_cmd:	ipr command struct
5033  *
5034  * This function is invoked by the interrupt handler for
5035  * ops generated by the SCSI mid-layer to SATA devices
5036  *
5037  * Return value:
5038  * 	none
5039  **/
5040 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5041 {
5042 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5043 	struct ata_queued_cmd *qc = ipr_cmd->qc;
5044 	struct ipr_sata_port *sata_port = qc->ap->private_data;
5045 	struct ipr_resource_entry *res = sata_port->res;
5046 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5047 
5048 	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5049 	       sizeof(struct ipr_ioasa_gata));
5050 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5051 
5052 	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5053 		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5054 					 res->cfgte.res_addr.target);
5055 
5056 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5057 		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5058 	else
5059 		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5060 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5061 	ata_qc_complete(qc);
5062 }
5063 
5064 /**
5065  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5066  * @ipr_cmd:	ipr command struct
5067  * @qc:		ATA queued command
5068  *
5069  **/
5070 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5071 				struct ata_queued_cmd *qc)
5072 {
5073 	u32 ioadl_flags = 0;
5074 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5075 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5076 	int len = qc->nbytes + qc->pad_len;
5077 	struct scatterlist *sg;
5078 
5079 	if (len == 0)
5080 		return;
5081 
5082 	if (qc->dma_dir == DMA_TO_DEVICE) {
5083 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5084 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5085 		ioarcb->write_data_transfer_length = cpu_to_be32(len);
5086 		ioarcb->write_ioadl_len =
5087 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5088 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5089 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5090 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5091 		ioarcb->read_ioadl_len =
5092 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5093 	}
5094 
5095 	ata_for_each_sg(sg, qc) {
5096 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5097 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5098 		if (ata_sg_is_last(sg, qc))
5099 			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5100 		else
5101 			ioadl++;
5102 	}
5103 }
5104 
5105 /**
5106  * ipr_qc_issue - Issue a SATA qc to a device
5107  * @qc:	queued command
5108  *
5109  * Return value:
5110  * 	0 if success
5111  **/
5112 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5113 {
5114 	struct ata_port *ap = qc->ap;
5115 	struct ipr_sata_port *sata_port = ap->private_data;
5116 	struct ipr_resource_entry *res = sata_port->res;
5117 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5118 	struct ipr_cmnd *ipr_cmd;
5119 	struct ipr_ioarcb *ioarcb;
5120 	struct ipr_ioarcb_ata_regs *regs;
5121 
5122 	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5123 		return -EIO;
5124 
5125 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5126 	ioarcb = &ipr_cmd->ioarcb;
5127 	regs = &ioarcb->add_data.u.regs;
5128 
5129 	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5130 	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5131 
5132 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5133 	ipr_cmd->qc = qc;
5134 	ipr_cmd->done = ipr_sata_done;
5135 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5136 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5137 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5138 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5139 	ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5140 
5141 	ipr_build_ata_ioadl(ipr_cmd, qc);
5142 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5143 	ipr_copy_sata_tf(regs, &qc->tf);
5144 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5145 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5146 
5147 	switch (qc->tf.protocol) {
5148 	case ATA_PROT_NODATA:
5149 	case ATA_PROT_PIO:
5150 		break;
5151 
5152 	case ATA_PROT_DMA:
5153 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5154 		break;
5155 
5156 	case ATA_PROT_ATAPI:
5157 	case ATA_PROT_ATAPI_NODATA:
5158 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5159 		break;
5160 
5161 	case ATA_PROT_ATAPI_DMA:
5162 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5163 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5164 		break;
5165 
5166 	default:
5167 		WARN_ON(1);
5168 		return -1;
5169 	}
5170 
5171 	mb();
5172 	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5173 	       ioa_cfg->regs.ioarrin_reg);
5174 	return 0;
5175 }
5176 
5177 /**
5178  * ipr_ata_check_status - Return last ATA status
5179  * @ap:	ATA port
5180  *
5181  * Return value:
5182  * 	ATA status
5183  **/
5184 static u8 ipr_ata_check_status(struct ata_port *ap)
5185 {
5186 	struct ipr_sata_port *sata_port = ap->private_data;
5187 	return sata_port->ioasa.status;
5188 }
5189 
5190 /**
5191  * ipr_ata_check_altstatus - Return last ATA altstatus
5192  * @ap:	ATA port
5193  *
5194  * Return value:
5195  * 	Alt ATA status
5196  **/
5197 static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5198 {
5199 	struct ipr_sata_port *sata_port = ap->private_data;
5200 	return sata_port->ioasa.alt_status;
5201 }
5202 
5203 static struct ata_port_operations ipr_sata_ops = {
5204 	.port_disable = ata_port_disable,
5205 	.check_status = ipr_ata_check_status,
5206 	.check_altstatus = ipr_ata_check_altstatus,
5207 	.dev_select = ata_noop_dev_select,
5208 	.phy_reset = ipr_ata_phy_reset,
5209 	.post_internal_cmd = ipr_ata_post_internal,
5210 	.tf_read = ipr_tf_read,
5211 	.qc_prep = ata_noop_qc_prep,
5212 	.qc_issue = ipr_qc_issue,
5213 	.port_start = ata_sas_port_start,
5214 	.port_stop = ata_sas_port_stop
5215 };
5216 
5217 static struct ata_port_info sata_port_info = {
5218 	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5219 	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5220 	.pio_mask	= 0x10, /* pio4 */
5221 	.mwdma_mask = 0x07,
5222 	.udma_mask	= 0x7f, /* udma0-6 */
5223 	.port_ops	= &ipr_sata_ops
5224 };
5225 
5226 #ifdef CONFIG_PPC_PSERIES
5227 static const u16 ipr_blocked_processors[] = {
5228 	PV_NORTHSTAR,
5229 	PV_PULSAR,
5230 	PV_POWER4,
5231 	PV_ICESTAR,
5232 	PV_SSTAR,
5233 	PV_POWER4p,
5234 	PV_630,
5235 	PV_630p
5236 };
5237 
5238 /**
5239  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5240  * @ioa_cfg:	ioa cfg struct
5241  *
5242  * Adapters that use Gemstone revision < 3.1 do not work reliably on
5243  * certain pSeries hardware. This function determines if the given
5244  * adapter is in one of these confgurations or not.
5245  *
5246  * Return value:
5247  * 	1 if adapter is not supported / 0 if adapter is supported
5248  **/
5249 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5250 {
5251 	u8 rev_id;
5252 	int i;
5253 
5254 	if (ioa_cfg->type == 0x5702) {
5255 		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5256 					 &rev_id) == PCIBIOS_SUCCESSFUL) {
5257 			if (rev_id < 4) {
5258 				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5259 					if (__is_processor(ipr_blocked_processors[i]))
5260 						return 1;
5261 				}
5262 			}
5263 		}
5264 	}
5265 	return 0;
5266 }
5267 #else
5268 #define ipr_invalid_adapter(ioa_cfg) 0
5269 #endif
5270 
5271 /**
5272  * ipr_ioa_bringdown_done - IOA bring down completion.
5273  * @ipr_cmd:	ipr command struct
5274  *
5275  * This function processes the completion of an adapter bring down.
5276  * It wakes any reset sleepers.
5277  *
5278  * Return value:
5279  * 	IPR_RC_JOB_RETURN
5280  **/
5281 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5282 {
5283 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5284 
5285 	ENTER;
5286 	ioa_cfg->in_reset_reload = 0;
5287 	ioa_cfg->reset_retries = 0;
5288 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5289 	wake_up_all(&ioa_cfg->reset_wait_q);
5290 
5291 	spin_unlock_irq(ioa_cfg->host->host_lock);
5292 	scsi_unblock_requests(ioa_cfg->host);
5293 	spin_lock_irq(ioa_cfg->host->host_lock);
5294 	LEAVE;
5295 
5296 	return IPR_RC_JOB_RETURN;
5297 }
5298 
5299 /**
5300  * ipr_ioa_reset_done - IOA reset completion.
5301  * @ipr_cmd:	ipr command struct
5302  *
5303  * This function processes the completion of an adapter reset.
5304  * It schedules any necessary mid-layer add/removes and
5305  * wakes any reset sleepers.
5306  *
5307  * Return value:
5308  * 	IPR_RC_JOB_RETURN
5309  **/
5310 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5311 {
5312 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5313 	struct ipr_resource_entry *res;
5314 	struct ipr_hostrcb *hostrcb, *temp;
5315 	int i = 0;
5316 
5317 	ENTER;
5318 	ioa_cfg->in_reset_reload = 0;
5319 	ioa_cfg->allow_cmds = 1;
5320 	ioa_cfg->reset_cmd = NULL;
5321 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
5322 
5323 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5324 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5325 			ipr_trace;
5326 			break;
5327 		}
5328 	}
5329 	schedule_work(&ioa_cfg->work_q);
5330 
5331 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5332 		list_del(&hostrcb->queue);
5333 		if (i++ < IPR_NUM_LOG_HCAMS)
5334 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5335 		else
5336 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5337 	}
5338 
5339 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5340 
5341 	ioa_cfg->reset_retries = 0;
5342 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5343 	wake_up_all(&ioa_cfg->reset_wait_q);
5344 
5345 	spin_unlock_irq(ioa_cfg->host->host_lock);
5346 	scsi_unblock_requests(ioa_cfg->host);
5347 	spin_lock_irq(ioa_cfg->host->host_lock);
5348 
5349 	if (!ioa_cfg->allow_cmds)
5350 		scsi_block_requests(ioa_cfg->host);
5351 
5352 	LEAVE;
5353 	return IPR_RC_JOB_RETURN;
5354 }
5355 
5356 /**
5357  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5358  * @supported_dev:	supported device struct
5359  * @vpids:			vendor product id struct
5360  *
5361  * Return value:
5362  * 	none
5363  **/
5364 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5365 				 struct ipr_std_inq_vpids *vpids)
5366 {
5367 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5368 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5369 	supported_dev->num_records = 1;
5370 	supported_dev->data_length =
5371 		cpu_to_be16(sizeof(struct ipr_supported_device));
5372 	supported_dev->reserved = 0;
5373 }
5374 
5375 /**
5376  * ipr_set_supported_devs - Send Set Supported Devices for a device
5377  * @ipr_cmd:	ipr command struct
5378  *
5379  * This function send a Set Supported Devices to the adapter
5380  *
5381  * Return value:
5382  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5383  **/
5384 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5385 {
5386 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5387 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5388 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5389 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5390 	struct ipr_resource_entry *res = ipr_cmd->u.res;
5391 
5392 	ipr_cmd->job_step = ipr_ioa_reset_done;
5393 
5394 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
5395 		if (!ipr_is_scsi_disk(res))
5396 			continue;
5397 
5398 		ipr_cmd->u.res = res;
5399 		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5400 
5401 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5402 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5403 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5404 
5405 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5406 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5407 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5408 
5409 		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5410 							sizeof(struct ipr_supported_device));
5411 		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5412 					     offsetof(struct ipr_misc_cbs, supp_dev));
5413 		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5414 		ioarcb->write_data_transfer_length =
5415 			cpu_to_be32(sizeof(struct ipr_supported_device));
5416 
5417 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5418 			   IPR_SET_SUP_DEVICE_TIMEOUT);
5419 
5420 		ipr_cmd->job_step = ipr_set_supported_devs;
5421 		return IPR_RC_JOB_RETURN;
5422 	}
5423 
5424 	return IPR_RC_JOB_CONTINUE;
5425 }
5426 
5427 /**
5428  * ipr_setup_write_cache - Disable write cache if needed
5429  * @ipr_cmd:	ipr command struct
5430  *
5431  * This function sets up adapters write cache to desired setting
5432  *
5433  * Return value:
5434  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5435  **/
5436 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5437 {
5438 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5439 
5440 	ipr_cmd->job_step = ipr_set_supported_devs;
5441 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5442 				    struct ipr_resource_entry, queue);
5443 
5444 	if (ioa_cfg->cache_state != CACHE_DISABLED)
5445 		return IPR_RC_JOB_CONTINUE;
5446 
5447 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5448 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5449 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5450 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5451 
5452 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5453 
5454 	return IPR_RC_JOB_RETURN;
5455 }
5456 
5457 /**
5458  * ipr_get_mode_page - Locate specified mode page
5459  * @mode_pages:	mode page buffer
5460  * @page_code:	page code to find
5461  * @len:		minimum required length for mode page
5462  *
5463  * Return value:
5464  * 	pointer to mode page / NULL on failure
5465  **/
5466 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5467 			       u32 page_code, u32 len)
5468 {
5469 	struct ipr_mode_page_hdr *mode_hdr;
5470 	u32 page_length;
5471 	u32 length;
5472 
5473 	if (!mode_pages || (mode_pages->hdr.length == 0))
5474 		return NULL;
5475 
5476 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5477 	mode_hdr = (struct ipr_mode_page_hdr *)
5478 		(mode_pages->data + mode_pages->hdr.block_desc_len);
5479 
5480 	while (length) {
5481 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5482 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5483 				return mode_hdr;
5484 			break;
5485 		} else {
5486 			page_length = (sizeof(struct ipr_mode_page_hdr) +
5487 				       mode_hdr->page_length);
5488 			length -= page_length;
5489 			mode_hdr = (struct ipr_mode_page_hdr *)
5490 				((unsigned long)mode_hdr + page_length);
5491 		}
5492 	}
5493 	return NULL;
5494 }
5495 
5496 /**
5497  * ipr_check_term_power - Check for term power errors
5498  * @ioa_cfg:	ioa config struct
5499  * @mode_pages:	IOAFP mode pages buffer
5500  *
5501  * Check the IOAFP's mode page 28 for term power errors
5502  *
5503  * Return value:
5504  * 	nothing
5505  **/
5506 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5507 				 struct ipr_mode_pages *mode_pages)
5508 {
5509 	int i;
5510 	int entry_length;
5511 	struct ipr_dev_bus_entry *bus;
5512 	struct ipr_mode_page28 *mode_page;
5513 
5514 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5515 				      sizeof(struct ipr_mode_page28));
5516 
5517 	entry_length = mode_page->entry_length;
5518 
5519 	bus = mode_page->bus;
5520 
5521 	for (i = 0; i < mode_page->num_entries; i++) {
5522 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5523 			dev_err(&ioa_cfg->pdev->dev,
5524 				"Term power is absent on scsi bus %d\n",
5525 				bus->res_addr.bus);
5526 		}
5527 
5528 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5529 	}
5530 }
5531 
5532 /**
5533  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5534  * @ioa_cfg:	ioa config struct
5535  *
5536  * Looks through the config table checking for SES devices. If
5537  * the SES device is in the SES table indicating a maximum SCSI
5538  * bus speed, the speed is limited for the bus.
5539  *
5540  * Return value:
5541  * 	none
5542  **/
5543 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5544 {
5545 	u32 max_xfer_rate;
5546 	int i;
5547 
5548 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5549 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5550 						       ioa_cfg->bus_attr[i].bus_width);
5551 
5552 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5553 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5554 	}
5555 }
5556 
5557 /**
5558  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5559  * @ioa_cfg:	ioa config struct
5560  * @mode_pages:	mode page 28 buffer
5561  *
5562  * Updates mode page 28 based on driver configuration
5563  *
5564  * Return value:
5565  * 	none
5566  **/
5567 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5568 					  	struct ipr_mode_pages *mode_pages)
5569 {
5570 	int i, entry_length;
5571 	struct ipr_dev_bus_entry *bus;
5572 	struct ipr_bus_attributes *bus_attr;
5573 	struct ipr_mode_page28 *mode_page;
5574 
5575 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
5576 				      sizeof(struct ipr_mode_page28));
5577 
5578 	entry_length = mode_page->entry_length;
5579 
5580 	/* Loop for each device bus entry */
5581 	for (i = 0, bus = mode_page->bus;
5582 	     i < mode_page->num_entries;
5583 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5584 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5585 			dev_err(&ioa_cfg->pdev->dev,
5586 				"Invalid resource address reported: 0x%08X\n",
5587 				IPR_GET_PHYS_LOC(bus->res_addr));
5588 			continue;
5589 		}
5590 
5591 		bus_attr = &ioa_cfg->bus_attr[i];
5592 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5593 		bus->bus_width = bus_attr->bus_width;
5594 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5595 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5596 		if (bus_attr->qas_enabled)
5597 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5598 		else
5599 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5600 	}
5601 }
5602 
5603 /**
5604  * ipr_build_mode_select - Build a mode select command
5605  * @ipr_cmd:	ipr command struct
5606  * @res_handle:	resource handle to send command to
5607  * @parm:		Byte 2 of Mode Sense command
5608  * @dma_addr:	DMA buffer address
5609  * @xfer_len:	data transfer length
5610  *
5611  * Return value:
5612  * 	none
5613  **/
5614 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5615 				  __be32 res_handle, u8 parm, u32 dma_addr,
5616 				  u8 xfer_len)
5617 {
5618 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5619 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5620 
5621 	ioarcb->res_handle = res_handle;
5622 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5623 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5624 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5625 	ioarcb->cmd_pkt.cdb[1] = parm;
5626 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5627 
5628 	ioadl->flags_and_data_len =
5629 		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5630 	ioadl->address = cpu_to_be32(dma_addr);
5631 	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5632 	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5633 }
5634 
5635 /**
5636  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5637  * @ipr_cmd:	ipr command struct
5638  *
5639  * This function sets up the SCSI bus attributes and sends
5640  * a Mode Select for Page 28 to activate them.
5641  *
5642  * Return value:
5643  * 	IPR_RC_JOB_RETURN
5644  **/
5645 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5646 {
5647 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5648 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5649 	int length;
5650 
5651 	ENTER;
5652 	ipr_scsi_bus_speed_limit(ioa_cfg);
5653 	ipr_check_term_power(ioa_cfg, mode_pages);
5654 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5655 	length = mode_pages->hdr.length + 1;
5656 	mode_pages->hdr.length = 0;
5657 
5658 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5659 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5660 			      length);
5661 
5662 	ipr_cmd->job_step = ipr_setup_write_cache;
5663 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5664 
5665 	LEAVE;
5666 	return IPR_RC_JOB_RETURN;
5667 }
5668 
5669 /**
5670  * ipr_build_mode_sense - Builds a mode sense command
5671  * @ipr_cmd:	ipr command struct
5672  * @res:		resource entry struct
5673  * @parm:		Byte 2 of mode sense command
5674  * @dma_addr:	DMA address of mode sense buffer
5675  * @xfer_len:	Size of DMA buffer
5676  *
5677  * Return value:
5678  * 	none
5679  **/
5680 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5681 				 __be32 res_handle,
5682 				 u8 parm, u32 dma_addr, u8 xfer_len)
5683 {
5684 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5685 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5686 
5687 	ioarcb->res_handle = res_handle;
5688 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5689 	ioarcb->cmd_pkt.cdb[2] = parm;
5690 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5691 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5692 
5693 	ioadl->flags_and_data_len =
5694 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5695 	ioadl->address = cpu_to_be32(dma_addr);
5696 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5697 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5698 }
5699 
5700 /**
5701  * ipr_reset_cmd_failed - Handle failure of IOA reset command
5702  * @ipr_cmd:	ipr command struct
5703  *
5704  * This function handles the failure of an IOA bringup command.
5705  *
5706  * Return value:
5707  * 	IPR_RC_JOB_RETURN
5708  **/
5709 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5710 {
5711 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5712 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5713 
5714 	dev_err(&ioa_cfg->pdev->dev,
5715 		"0x%02X failed with IOASC: 0x%08X\n",
5716 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5717 
5718 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5719 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5720 	return IPR_RC_JOB_RETURN;
5721 }
5722 
5723 /**
5724  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5725  * @ipr_cmd:	ipr command struct
5726  *
5727  * This function handles the failure of a Mode Sense to the IOAFP.
5728  * Some adapters do not handle all mode pages.
5729  *
5730  * Return value:
5731  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5732  **/
5733 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5734 {
5735 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5736 
5737 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5738 		ipr_cmd->job_step = ipr_setup_write_cache;
5739 		return IPR_RC_JOB_CONTINUE;
5740 	}
5741 
5742 	return ipr_reset_cmd_failed(ipr_cmd);
5743 }
5744 
5745 /**
5746  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5747  * @ipr_cmd:	ipr command struct
5748  *
5749  * This function send a Page 28 mode sense to the IOA to
5750  * retrieve SCSI bus attributes.
5751  *
5752  * Return value:
5753  * 	IPR_RC_JOB_RETURN
5754  **/
5755 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5756 {
5757 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5758 
5759 	ENTER;
5760 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5761 			     0x28, ioa_cfg->vpd_cbs_dma +
5762 			     offsetof(struct ipr_misc_cbs, mode_pages),
5763 			     sizeof(struct ipr_mode_pages));
5764 
5765 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
5766 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
5767 
5768 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5769 
5770 	LEAVE;
5771 	return IPR_RC_JOB_RETURN;
5772 }
5773 
5774 /**
5775  * ipr_init_res_table - Initialize the resource table
5776  * @ipr_cmd:	ipr command struct
5777  *
5778  * This function looks through the existing resource table, comparing
5779  * it with the config table. This function will take care of old/new
5780  * devices and schedule adding/removing them from the mid-layer
5781  * as appropriate.
5782  *
5783  * Return value:
5784  * 	IPR_RC_JOB_CONTINUE
5785  **/
5786 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5787 {
5788 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5789 	struct ipr_resource_entry *res, *temp;
5790 	struct ipr_config_table_entry *cfgte;
5791 	int found, i;
5792 	LIST_HEAD(old_res);
5793 
5794 	ENTER;
5795 	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5796 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5797 
5798 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5799 		list_move_tail(&res->queue, &old_res);
5800 
5801 	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5802 		cfgte = &ioa_cfg->cfg_table->dev[i];
5803 		found = 0;
5804 
5805 		list_for_each_entry_safe(res, temp, &old_res, queue) {
5806 			if (!memcmp(&res->cfgte.res_addr,
5807 				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5808 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5809 				found = 1;
5810 				break;
5811 			}
5812 		}
5813 
5814 		if (!found) {
5815 			if (list_empty(&ioa_cfg->free_res_q)) {
5816 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5817 				break;
5818 			}
5819 
5820 			found = 1;
5821 			res = list_entry(ioa_cfg->free_res_q.next,
5822 					 struct ipr_resource_entry, queue);
5823 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5824 			ipr_init_res_entry(res);
5825 			res->add_to_ml = 1;
5826 		}
5827 
5828 		if (found)
5829 			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5830 	}
5831 
5832 	list_for_each_entry_safe(res, temp, &old_res, queue) {
5833 		if (res->sdev) {
5834 			res->del_from_ml = 1;
5835 			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5836 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5837 		} else {
5838 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5839 		}
5840 	}
5841 
5842 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5843 
5844 	LEAVE;
5845 	return IPR_RC_JOB_CONTINUE;
5846 }
5847 
5848 /**
5849  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5850  * @ipr_cmd:	ipr command struct
5851  *
5852  * This function sends a Query IOA Configuration command
5853  * to the adapter to retrieve the IOA configuration table.
5854  *
5855  * Return value:
5856  * 	IPR_RC_JOB_RETURN
5857  **/
5858 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5859 {
5860 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5861 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5862 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5863 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5864 
5865 	ENTER;
5866 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5867 		 ucode_vpd->major_release, ucode_vpd->card_type,
5868 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5869 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5870 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5871 
5872 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5873 	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5874 	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5875 
5876 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5877 	ioarcb->read_data_transfer_length =
5878 		cpu_to_be32(sizeof(struct ipr_config_table));
5879 
5880 	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5881 	ioadl->flags_and_data_len =
5882 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5883 
5884 	ipr_cmd->job_step = ipr_init_res_table;
5885 
5886 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5887 
5888 	LEAVE;
5889 	return IPR_RC_JOB_RETURN;
5890 }
5891 
5892 /**
5893  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5894  * @ipr_cmd:	ipr command struct
5895  *
5896  * This utility function sends an inquiry to the adapter.
5897  *
5898  * Return value:
5899  * 	none
5900  **/
5901 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5902 			      u32 dma_addr, u8 xfer_len)
5903 {
5904 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5905 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5906 
5907 	ENTER;
5908 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5909 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5910 
5911 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5912 	ioarcb->cmd_pkt.cdb[1] = flags;
5913 	ioarcb->cmd_pkt.cdb[2] = page;
5914 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5915 
5916 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5917 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5918 
5919 	ioadl->address = cpu_to_be32(dma_addr);
5920 	ioadl->flags_and_data_len =
5921 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5922 
5923 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5924 	LEAVE;
5925 }
5926 
5927 /**
5928  * ipr_inquiry_page_supported - Is the given inquiry page supported
5929  * @page0:		inquiry page 0 buffer
5930  * @page:		page code.
5931  *
5932  * This function determines if the specified inquiry page is supported.
5933  *
5934  * Return value:
5935  *	1 if page is supported / 0 if not
5936  **/
5937 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5938 {
5939 	int i;
5940 
5941 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5942 		if (page0->page[i] == page)
5943 			return 1;
5944 
5945 	return 0;
5946 }
5947 
5948 /**
5949  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5950  * @ipr_cmd:	ipr command struct
5951  *
5952  * This function sends a Page 3 inquiry to the adapter
5953  * to retrieve software VPD information.
5954  *
5955  * Return value:
5956  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5957  **/
5958 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5959 {
5960 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5961 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5962 
5963 	ENTER;
5964 
5965 	if (!ipr_inquiry_page_supported(page0, 1))
5966 		ioa_cfg->cache_state = CACHE_NONE;
5967 
5968 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5969 
5970 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5971 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5972 			  sizeof(struct ipr_inquiry_page3));
5973 
5974 	LEAVE;
5975 	return IPR_RC_JOB_RETURN;
5976 }
5977 
5978 /**
5979  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5980  * @ipr_cmd:	ipr command struct
5981  *
5982  * This function sends a Page 0 inquiry to the adapter
5983  * to retrieve supported inquiry pages.
5984  *
5985  * Return value:
5986  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5987  **/
5988 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5989 {
5990 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5991 	char type[5];
5992 
5993 	ENTER;
5994 
5995 	/* Grab the type out of the VPD and store it away */
5996 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5997 	type[4] = '\0';
5998 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5999 
6000 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6001 
6002 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6003 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6004 			  sizeof(struct ipr_inquiry_page0));
6005 
6006 	LEAVE;
6007 	return IPR_RC_JOB_RETURN;
6008 }
6009 
6010 /**
6011  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6012  * @ipr_cmd:	ipr command struct
6013  *
6014  * This function sends a standard inquiry to the adapter.
6015  *
6016  * Return value:
6017  * 	IPR_RC_JOB_RETURN
6018  **/
6019 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6020 {
6021 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6022 
6023 	ENTER;
6024 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6025 
6026 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6027 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6028 			  sizeof(struct ipr_ioa_vpd));
6029 
6030 	LEAVE;
6031 	return IPR_RC_JOB_RETURN;
6032 }
6033 
6034 /**
6035  * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6036  * @ipr_cmd:	ipr command struct
6037  *
6038  * This function send an Identify Host Request Response Queue
6039  * command to establish the HRRQ with the adapter.
6040  *
6041  * Return value:
6042  * 	IPR_RC_JOB_RETURN
6043  **/
6044 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6045 {
6046 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6047 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6048 
6049 	ENTER;
6050 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6051 
6052 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6053 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6054 
6055 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6056 	ioarcb->cmd_pkt.cdb[2] =
6057 		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6058 	ioarcb->cmd_pkt.cdb[3] =
6059 		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6060 	ioarcb->cmd_pkt.cdb[4] =
6061 		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6062 	ioarcb->cmd_pkt.cdb[5] =
6063 		((u32) ioa_cfg->host_rrq_dma) & 0xff;
6064 	ioarcb->cmd_pkt.cdb[7] =
6065 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6066 	ioarcb->cmd_pkt.cdb[8] =
6067 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6068 
6069 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6070 
6071 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6072 
6073 	LEAVE;
6074 	return IPR_RC_JOB_RETURN;
6075 }
6076 
6077 /**
6078  * ipr_reset_timer_done - Adapter reset timer function
6079  * @ipr_cmd:	ipr command struct
6080  *
6081  * Description: This function is used in adapter reset processing
6082  * for timing events. If the reset_cmd pointer in the IOA
6083  * config struct is not this adapter's we are doing nested
6084  * resets and fail_all_ops will take care of freeing the
6085  * command block.
6086  *
6087  * Return value:
6088  * 	none
6089  **/
6090 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6091 {
6092 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6093 	unsigned long lock_flags = 0;
6094 
6095 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6096 
6097 	if (ioa_cfg->reset_cmd == ipr_cmd) {
6098 		list_del(&ipr_cmd->queue);
6099 		ipr_cmd->done(ipr_cmd);
6100 	}
6101 
6102 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6103 }
6104 
6105 /**
6106  * ipr_reset_start_timer - Start a timer for adapter reset job
6107  * @ipr_cmd:	ipr command struct
6108  * @timeout:	timeout value
6109  *
6110  * Description: This function is used in adapter reset processing
6111  * for timing events. If the reset_cmd pointer in the IOA
6112  * config struct is not this adapter's we are doing nested
6113  * resets and fail_all_ops will take care of freeing the
6114  * command block.
6115  *
6116  * Return value:
6117  * 	none
6118  **/
6119 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6120 				  unsigned long timeout)
6121 {
6122 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6123 	ipr_cmd->done = ipr_reset_ioa_job;
6124 
6125 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6126 	ipr_cmd->timer.expires = jiffies + timeout;
6127 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6128 	add_timer(&ipr_cmd->timer);
6129 }
6130 
6131 /**
6132  * ipr_init_ioa_mem - Initialize ioa_cfg control block
6133  * @ioa_cfg:	ioa cfg struct
6134  *
6135  * Return value:
6136  * 	nothing
6137  **/
6138 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6139 {
6140 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6141 
6142 	/* Initialize Host RRQ pointers */
6143 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6144 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6145 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6146 	ioa_cfg->toggle_bit = 1;
6147 
6148 	/* Zero out config table */
6149 	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6150 }
6151 
6152 /**
6153  * ipr_reset_enable_ioa - Enable the IOA following a reset.
6154  * @ipr_cmd:	ipr command struct
6155  *
6156  * This function reinitializes some control blocks and
6157  * enables destructive diagnostics on the adapter.
6158  *
6159  * Return value:
6160  * 	IPR_RC_JOB_RETURN
6161  **/
6162 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6163 {
6164 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6165 	volatile u32 int_reg;
6166 
6167 	ENTER;
6168 	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6169 	ipr_init_ioa_mem(ioa_cfg);
6170 
6171 	ioa_cfg->allow_interrupts = 1;
6172 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6173 
6174 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6175 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6176 		       ioa_cfg->regs.clr_interrupt_mask_reg);
6177 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6178 		return IPR_RC_JOB_CONTINUE;
6179 	}
6180 
6181 	/* Enable destructive diagnostics on IOA */
6182 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
6183 
6184 	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6185 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6186 
6187 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6188 
6189 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6190 	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
6191 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6192 	ipr_cmd->done = ipr_reset_ioa_job;
6193 	add_timer(&ipr_cmd->timer);
6194 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6195 
6196 	LEAVE;
6197 	return IPR_RC_JOB_RETURN;
6198 }
6199 
6200 /**
6201  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6202  * @ipr_cmd:	ipr command struct
6203  *
6204  * This function is invoked when an adapter dump has run out
6205  * of processing time.
6206  *
6207  * Return value:
6208  * 	IPR_RC_JOB_CONTINUE
6209  **/
6210 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6211 {
6212 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6213 
6214 	if (ioa_cfg->sdt_state == GET_DUMP)
6215 		ioa_cfg->sdt_state = ABORT_DUMP;
6216 
6217 	ipr_cmd->job_step = ipr_reset_alert;
6218 
6219 	return IPR_RC_JOB_CONTINUE;
6220 }
6221 
6222 /**
6223  * ipr_unit_check_no_data - Log a unit check/no data error log
6224  * @ioa_cfg:		ioa config struct
6225  *
6226  * Logs an error indicating the adapter unit checked, but for some
6227  * reason, we were unable to fetch the unit check buffer.
6228  *
6229  * Return value:
6230  * 	nothing
6231  **/
6232 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6233 {
6234 	ioa_cfg->errors_logged++;
6235 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6236 }
6237 
6238 /**
6239  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6240  * @ioa_cfg:		ioa config struct
6241  *
6242  * Fetches the unit check buffer from the adapter by clocking the data
6243  * through the mailbox register.
6244  *
6245  * Return value:
6246  * 	nothing
6247  **/
6248 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6249 {
6250 	unsigned long mailbox;
6251 	struct ipr_hostrcb *hostrcb;
6252 	struct ipr_uc_sdt sdt;
6253 	int rc, length;
6254 
6255 	mailbox = readl(ioa_cfg->ioa_mailbox);
6256 
6257 	if (!ipr_sdt_is_fmt2(mailbox)) {
6258 		ipr_unit_check_no_data(ioa_cfg);
6259 		return;
6260 	}
6261 
6262 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6263 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6264 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6265 
6266 	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6267 	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6268 		ipr_unit_check_no_data(ioa_cfg);
6269 		return;
6270 	}
6271 
6272 	/* Find length of the first sdt entry (UC buffer) */
6273 	length = (be32_to_cpu(sdt.entry[0].end_offset) -
6274 		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6275 
6276 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6277 			     struct ipr_hostrcb, queue);
6278 	list_del(&hostrcb->queue);
6279 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6280 
6281 	rc = ipr_get_ldump_data_section(ioa_cfg,
6282 					be32_to_cpu(sdt.entry[0].bar_str_offset),
6283 					(__be32 *)&hostrcb->hcam,
6284 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6285 
6286 	if (!rc)
6287 		ipr_handle_log_data(ioa_cfg, hostrcb);
6288 	else
6289 		ipr_unit_check_no_data(ioa_cfg);
6290 
6291 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6292 }
6293 
6294 /**
6295  * ipr_reset_restore_cfg_space - Restore PCI config space.
6296  * @ipr_cmd:	ipr command struct
6297  *
6298  * Description: This function restores the saved PCI config space of
6299  * the adapter, fails all outstanding ops back to the callers, and
6300  * fetches the dump/unit check if applicable to this reset.
6301  *
6302  * Return value:
6303  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6304  **/
6305 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6306 {
6307 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6308 	int rc;
6309 
6310 	ENTER;
6311 	rc = pci_restore_state(ioa_cfg->pdev);
6312 
6313 	if (rc != PCIBIOS_SUCCESSFUL) {
6314 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6315 		return IPR_RC_JOB_CONTINUE;
6316 	}
6317 
6318 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6319 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6320 		return IPR_RC_JOB_CONTINUE;
6321 	}
6322 
6323 	ipr_fail_all_ops(ioa_cfg);
6324 
6325 	if (ioa_cfg->ioa_unit_checked) {
6326 		ioa_cfg->ioa_unit_checked = 0;
6327 		ipr_get_unit_check_buffer(ioa_cfg);
6328 		ipr_cmd->job_step = ipr_reset_alert;
6329 		ipr_reset_start_timer(ipr_cmd, 0);
6330 		return IPR_RC_JOB_RETURN;
6331 	}
6332 
6333 	if (ioa_cfg->in_ioa_bringdown) {
6334 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
6335 	} else {
6336 		ipr_cmd->job_step = ipr_reset_enable_ioa;
6337 
6338 		if (GET_DUMP == ioa_cfg->sdt_state) {
6339 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6340 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
6341 			schedule_work(&ioa_cfg->work_q);
6342 			return IPR_RC_JOB_RETURN;
6343 		}
6344 	}
6345 
6346 	ENTER;
6347 	return IPR_RC_JOB_CONTINUE;
6348 }
6349 
6350 /**
6351  * ipr_reset_bist_done - BIST has completed on the adapter.
6352  * @ipr_cmd:	ipr command struct
6353  *
6354  * Description: Unblock config space and resume the reset process.
6355  *
6356  * Return value:
6357  * 	IPR_RC_JOB_CONTINUE
6358  **/
6359 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6360 {
6361 	ENTER;
6362 	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6363 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6364 	LEAVE;
6365 	return IPR_RC_JOB_CONTINUE;
6366 }
6367 
6368 /**
6369  * ipr_reset_start_bist - Run BIST on the adapter.
6370  * @ipr_cmd:	ipr command struct
6371  *
6372  * Description: This function runs BIST on the adapter, then delays 2 seconds.
6373  *
6374  * Return value:
6375  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6376  **/
6377 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6378 {
6379 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6380 	int rc;
6381 
6382 	ENTER;
6383 	pci_block_user_cfg_access(ioa_cfg->pdev);
6384 	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6385 
6386 	if (rc != PCIBIOS_SUCCESSFUL) {
6387 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6388 		rc = IPR_RC_JOB_CONTINUE;
6389 	} else {
6390 		ipr_cmd->job_step = ipr_reset_bist_done;
6391 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6392 		rc = IPR_RC_JOB_RETURN;
6393 	}
6394 
6395 	LEAVE;
6396 	return rc;
6397 }
6398 
6399 /**
6400  * ipr_reset_allowed - Query whether or not IOA can be reset
6401  * @ioa_cfg:	ioa config struct
6402  *
6403  * Return value:
6404  * 	0 if reset not allowed / non-zero if reset is allowed
6405  **/
6406 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6407 {
6408 	volatile u32 temp_reg;
6409 
6410 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6411 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6412 }
6413 
6414 /**
6415  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6416  * @ipr_cmd:	ipr command struct
6417  *
6418  * Description: This function waits for adapter permission to run BIST,
6419  * then runs BIST. If the adapter does not give permission after a
6420  * reasonable time, we will reset the adapter anyway. The impact of
6421  * resetting the adapter without warning the adapter is the risk of
6422  * losing the persistent error log on the adapter. If the adapter is
6423  * reset while it is writing to the flash on the adapter, the flash
6424  * segment will have bad ECC and be zeroed.
6425  *
6426  * Return value:
6427  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6428  **/
6429 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6430 {
6431 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6432 	int rc = IPR_RC_JOB_RETURN;
6433 
6434 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6435 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6436 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6437 	} else {
6438 		ipr_cmd->job_step = ipr_reset_start_bist;
6439 		rc = IPR_RC_JOB_CONTINUE;
6440 	}
6441 
6442 	return rc;
6443 }
6444 
6445 /**
6446  * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6447  * @ipr_cmd:	ipr command struct
6448  *
6449  * Description: This function alerts the adapter that it will be reset.
6450  * If memory space is not currently enabled, proceed directly
6451  * to running BIST on the adapter. The timer must always be started
6452  * so we guarantee we do not run BIST from ipr_isr.
6453  *
6454  * Return value:
6455  * 	IPR_RC_JOB_RETURN
6456  **/
6457 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6458 {
6459 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6460 	u16 cmd_reg;
6461 	int rc;
6462 
6463 	ENTER;
6464 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6465 
6466 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6467 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6468 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6469 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6470 	} else {
6471 		ipr_cmd->job_step = ipr_reset_start_bist;
6472 	}
6473 
6474 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6475 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6476 
6477 	LEAVE;
6478 	return IPR_RC_JOB_RETURN;
6479 }
6480 
6481 /**
6482  * ipr_reset_ucode_download_done - Microcode download completion
6483  * @ipr_cmd:	ipr command struct
6484  *
6485  * Description: This function unmaps the microcode download buffer.
6486  *
6487  * Return value:
6488  * 	IPR_RC_JOB_CONTINUE
6489  **/
6490 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6491 {
6492 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6493 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6494 
6495 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6496 		     sglist->num_sg, DMA_TO_DEVICE);
6497 
6498 	ipr_cmd->job_step = ipr_reset_alert;
6499 	return IPR_RC_JOB_CONTINUE;
6500 }
6501 
6502 /**
6503  * ipr_reset_ucode_download - Download microcode to the adapter
6504  * @ipr_cmd:	ipr command struct
6505  *
6506  * Description: This function checks to see if it there is microcode
6507  * to download to the adapter. If there is, a download is performed.
6508  *
6509  * Return value:
6510  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6511  **/
6512 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6513 {
6514 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6515 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6516 
6517 	ENTER;
6518 	ipr_cmd->job_step = ipr_reset_alert;
6519 
6520 	if (!sglist)
6521 		return IPR_RC_JOB_CONTINUE;
6522 
6523 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6524 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6525 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6526 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6527 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6528 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6529 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6530 
6531 	ipr_build_ucode_ioadl(ipr_cmd, sglist);
6532 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
6533 
6534 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6535 		   IPR_WRITE_BUFFER_TIMEOUT);
6536 
6537 	LEAVE;
6538 	return IPR_RC_JOB_RETURN;
6539 }
6540 
6541 /**
6542  * ipr_reset_shutdown_ioa - Shutdown the adapter
6543  * @ipr_cmd:	ipr command struct
6544  *
6545  * Description: This function issues an adapter shutdown of the
6546  * specified type to the specified adapter as part of the
6547  * adapter reset job.
6548  *
6549  * Return value:
6550  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6551  **/
6552 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6553 {
6554 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6555 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6556 	unsigned long timeout;
6557 	int rc = IPR_RC_JOB_CONTINUE;
6558 
6559 	ENTER;
6560 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6561 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6562 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6563 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6564 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6565 
6566 		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6567 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6568 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6569 			timeout = IPR_INTERNAL_TIMEOUT;
6570 		else
6571 			timeout = IPR_SHUTDOWN_TIMEOUT;
6572 
6573 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6574 
6575 		rc = IPR_RC_JOB_RETURN;
6576 		ipr_cmd->job_step = ipr_reset_ucode_download;
6577 	} else
6578 		ipr_cmd->job_step = ipr_reset_alert;
6579 
6580 	LEAVE;
6581 	return rc;
6582 }
6583 
6584 /**
6585  * ipr_reset_ioa_job - Adapter reset job
6586  * @ipr_cmd:	ipr command struct
6587  *
6588  * Description: This function is the job router for the adapter reset job.
6589  *
6590  * Return value:
6591  * 	none
6592  **/
6593 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6594 {
6595 	u32 rc, ioasc;
6596 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6597 
6598 	do {
6599 		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6600 
6601 		if (ioa_cfg->reset_cmd != ipr_cmd) {
6602 			/*
6603 			 * We are doing nested adapter resets and this is
6604 			 * not the current reset job.
6605 			 */
6606 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6607 			return;
6608 		}
6609 
6610 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
6611 			rc = ipr_cmd->job_step_failed(ipr_cmd);
6612 			if (rc == IPR_RC_JOB_RETURN)
6613 				return;
6614 		}
6615 
6616 		ipr_reinit_ipr_cmnd(ipr_cmd);
6617 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
6618 		rc = ipr_cmd->job_step(ipr_cmd);
6619 	} while(rc == IPR_RC_JOB_CONTINUE);
6620 }
6621 
6622 /**
6623  * _ipr_initiate_ioa_reset - Initiate an adapter reset
6624  * @ioa_cfg:		ioa config struct
6625  * @job_step:		first job step of reset job
6626  * @shutdown_type:	shutdown type
6627  *
6628  * Description: This function will initiate the reset of the given adapter
6629  * starting at the selected job step.
6630  * If the caller needs to wait on the completion of the reset,
6631  * the caller must sleep on the reset_wait_q.
6632  *
6633  * Return value:
6634  * 	none
6635  **/
6636 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6637 				    int (*job_step) (struct ipr_cmnd *),
6638 				    enum ipr_shutdown_type shutdown_type)
6639 {
6640 	struct ipr_cmnd *ipr_cmd;
6641 
6642 	ioa_cfg->in_reset_reload = 1;
6643 	ioa_cfg->allow_cmds = 0;
6644 	scsi_block_requests(ioa_cfg->host);
6645 
6646 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6647 	ioa_cfg->reset_cmd = ipr_cmd;
6648 	ipr_cmd->job_step = job_step;
6649 	ipr_cmd->u.shutdown_type = shutdown_type;
6650 
6651 	ipr_reset_ioa_job(ipr_cmd);
6652 }
6653 
6654 /**
6655  * ipr_initiate_ioa_reset - Initiate an adapter reset
6656  * @ioa_cfg:		ioa config struct
6657  * @shutdown_type:	shutdown type
6658  *
6659  * Description: This function will initiate the reset of the given adapter.
6660  * If the caller needs to wait on the completion of the reset,
6661  * the caller must sleep on the reset_wait_q.
6662  *
6663  * Return value:
6664  * 	none
6665  **/
6666 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6667 				   enum ipr_shutdown_type shutdown_type)
6668 {
6669 	if (ioa_cfg->ioa_is_dead)
6670 		return;
6671 
6672 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6673 		ioa_cfg->sdt_state = ABORT_DUMP;
6674 
6675 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6676 		dev_err(&ioa_cfg->pdev->dev,
6677 			"IOA taken offline - error recovery failed\n");
6678 
6679 		ioa_cfg->reset_retries = 0;
6680 		ioa_cfg->ioa_is_dead = 1;
6681 
6682 		if (ioa_cfg->in_ioa_bringdown) {
6683 			ioa_cfg->reset_cmd = NULL;
6684 			ioa_cfg->in_reset_reload = 0;
6685 			ipr_fail_all_ops(ioa_cfg);
6686 			wake_up_all(&ioa_cfg->reset_wait_q);
6687 
6688 			spin_unlock_irq(ioa_cfg->host->host_lock);
6689 			scsi_unblock_requests(ioa_cfg->host);
6690 			spin_lock_irq(ioa_cfg->host->host_lock);
6691 			return;
6692 		} else {
6693 			ioa_cfg->in_ioa_bringdown = 1;
6694 			shutdown_type = IPR_SHUTDOWN_NONE;
6695 		}
6696 	}
6697 
6698 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6699 				shutdown_type);
6700 }
6701 
6702 /**
6703  * ipr_reset_freeze - Hold off all I/O activity
6704  * @ipr_cmd:	ipr command struct
6705  *
6706  * Description: If the PCI slot is frozen, hold off all I/O
6707  * activity; then, as soon as the slot is available again,
6708  * initiate an adapter reset.
6709  */
6710 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6711 {
6712 	/* Disallow new interrupts, avoid loop */
6713 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
6714 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6715 	ipr_cmd->done = ipr_reset_ioa_job;
6716 	return IPR_RC_JOB_RETURN;
6717 }
6718 
6719 /**
6720  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6721  * @pdev:	PCI device struct
6722  *
6723  * Description: This routine is called to tell us that the PCI bus
6724  * is down. Can't do anything here, except put the device driver
6725  * into a holding pattern, waiting for the PCI bus to come back.
6726  */
6727 static void ipr_pci_frozen(struct pci_dev *pdev)
6728 {
6729 	unsigned long flags = 0;
6730 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6731 
6732 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6733 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6734 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6735 }
6736 
6737 /**
6738  * ipr_pci_slot_reset - Called when PCI slot has been reset.
6739  * @pdev:	PCI device struct
6740  *
6741  * Description: This routine is called by the pci error recovery
6742  * code after the PCI slot has been reset, just before we
6743  * should resume normal operations.
6744  */
6745 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6746 {
6747 	unsigned long flags = 0;
6748 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6749 
6750 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6751 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6752 	                                 IPR_SHUTDOWN_NONE);
6753 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6754 	return PCI_ERS_RESULT_RECOVERED;
6755 }
6756 
6757 /**
6758  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6759  * @pdev:	PCI device struct
6760  *
6761  * Description: This routine is called when the PCI bus has
6762  * permanently failed.
6763  */
6764 static void ipr_pci_perm_failure(struct pci_dev *pdev)
6765 {
6766 	unsigned long flags = 0;
6767 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6768 
6769 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6770 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6771 		ioa_cfg->sdt_state = ABORT_DUMP;
6772 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6773 	ioa_cfg->in_ioa_bringdown = 1;
6774 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6775 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6776 }
6777 
6778 /**
6779  * ipr_pci_error_detected - Called when a PCI error is detected.
6780  * @pdev:	PCI device struct
6781  * @state:	PCI channel state
6782  *
6783  * Description: Called when a PCI error is detected.
6784  *
6785  * Return value:
6786  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6787  */
6788 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6789 					       pci_channel_state_t state)
6790 {
6791 	switch (state) {
6792 	case pci_channel_io_frozen:
6793 		ipr_pci_frozen(pdev);
6794 		return PCI_ERS_RESULT_NEED_RESET;
6795 	case pci_channel_io_perm_failure:
6796 		ipr_pci_perm_failure(pdev);
6797 		return PCI_ERS_RESULT_DISCONNECT;
6798 		break;
6799 	default:
6800 		break;
6801 	}
6802 	return PCI_ERS_RESULT_NEED_RESET;
6803 }
6804 
6805 /**
6806  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6807  * @ioa_cfg:	ioa cfg struct
6808  *
6809  * Description: This is the second phase of adapter intialization
6810  * This function takes care of initilizing the adapter to the point
6811  * where it can accept new commands.
6812 
6813  * Return value:
6814  * 	0 on sucess / -EIO on failure
6815  **/
6816 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6817 {
6818 	int rc = 0;
6819 	unsigned long host_lock_flags = 0;
6820 
6821 	ENTER;
6822 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6823 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6824 	if (ioa_cfg->needs_hard_reset) {
6825 		ioa_cfg->needs_hard_reset = 0;
6826 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6827 	} else
6828 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6829 					IPR_SHUTDOWN_NONE);
6830 
6831 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6832 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6833 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6834 
6835 	if (ioa_cfg->ioa_is_dead) {
6836 		rc = -EIO;
6837 	} else if (ipr_invalid_adapter(ioa_cfg)) {
6838 		if (!ipr_testmode)
6839 			rc = -EIO;
6840 
6841 		dev_err(&ioa_cfg->pdev->dev,
6842 			"Adapter not supported in this hardware configuration.\n");
6843 	}
6844 
6845 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6846 
6847 	LEAVE;
6848 	return rc;
6849 }
6850 
6851 /**
6852  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6853  * @ioa_cfg:	ioa config struct
6854  *
6855  * Return value:
6856  * 	none
6857  **/
6858 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6859 {
6860 	int i;
6861 
6862 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6863 		if (ioa_cfg->ipr_cmnd_list[i])
6864 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
6865 				      ioa_cfg->ipr_cmnd_list[i],
6866 				      ioa_cfg->ipr_cmnd_list_dma[i]);
6867 
6868 		ioa_cfg->ipr_cmnd_list[i] = NULL;
6869 	}
6870 
6871 	if (ioa_cfg->ipr_cmd_pool)
6872 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6873 
6874 	ioa_cfg->ipr_cmd_pool = NULL;
6875 }
6876 
6877 /**
6878  * ipr_free_mem - Frees memory allocated for an adapter
6879  * @ioa_cfg:	ioa cfg struct
6880  *
6881  * Return value:
6882  * 	nothing
6883  **/
6884 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6885 {
6886 	int i;
6887 
6888 	kfree(ioa_cfg->res_entries);
6889 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6890 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6891 	ipr_free_cmd_blks(ioa_cfg);
6892 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6893 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6894 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6895 			    ioa_cfg->cfg_table,
6896 			    ioa_cfg->cfg_table_dma);
6897 
6898 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6899 		pci_free_consistent(ioa_cfg->pdev,
6900 				    sizeof(struct ipr_hostrcb),
6901 				    ioa_cfg->hostrcb[i],
6902 				    ioa_cfg->hostrcb_dma[i]);
6903 	}
6904 
6905 	ipr_free_dump(ioa_cfg);
6906 	kfree(ioa_cfg->trace);
6907 }
6908 
6909 /**
6910  * ipr_free_all_resources - Free all allocated resources for an adapter.
6911  * @ipr_cmd:	ipr command struct
6912  *
6913  * This function frees all allocated resources for the
6914  * specified adapter.
6915  *
6916  * Return value:
6917  * 	none
6918  **/
6919 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6920 {
6921 	struct pci_dev *pdev = ioa_cfg->pdev;
6922 
6923 	ENTER;
6924 	free_irq(pdev->irq, ioa_cfg);
6925 	iounmap(ioa_cfg->hdw_dma_regs);
6926 	pci_release_regions(pdev);
6927 	ipr_free_mem(ioa_cfg);
6928 	scsi_host_put(ioa_cfg->host);
6929 	pci_disable_device(pdev);
6930 	LEAVE;
6931 }
6932 
6933 /**
6934  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6935  * @ioa_cfg:	ioa config struct
6936  *
6937  * Return value:
6938  * 	0 on success / -ENOMEM on allocation failure
6939  **/
6940 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6941 {
6942 	struct ipr_cmnd *ipr_cmd;
6943 	struct ipr_ioarcb *ioarcb;
6944 	dma_addr_t dma_addr;
6945 	int i;
6946 
6947 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6948 						 sizeof(struct ipr_cmnd), 8, 0);
6949 
6950 	if (!ioa_cfg->ipr_cmd_pool)
6951 		return -ENOMEM;
6952 
6953 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6954 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
6955 
6956 		if (!ipr_cmd) {
6957 			ipr_free_cmd_blks(ioa_cfg);
6958 			return -ENOMEM;
6959 		}
6960 
6961 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6962 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6963 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6964 
6965 		ioarcb = &ipr_cmd->ioarcb;
6966 		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6967 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6968 		ioarcb->write_ioadl_addr =
6969 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6970 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6971 		ioarcb->ioasa_host_pci_addr =
6972 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6973 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6974 		ipr_cmd->cmd_index = i;
6975 		ipr_cmd->ioa_cfg = ioa_cfg;
6976 		ipr_cmd->sense_buffer_dma = dma_addr +
6977 			offsetof(struct ipr_cmnd, sense_buffer);
6978 
6979 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6980 	}
6981 
6982 	return 0;
6983 }
6984 
6985 /**
6986  * ipr_alloc_mem - Allocate memory for an adapter
6987  * @ioa_cfg:	ioa config struct
6988  *
6989  * Return value:
6990  * 	0 on success / non-zero for error
6991  **/
6992 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6993 {
6994 	struct pci_dev *pdev = ioa_cfg->pdev;
6995 	int i, rc = -ENOMEM;
6996 
6997 	ENTER;
6998 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6999 				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7000 
7001 	if (!ioa_cfg->res_entries)
7002 		goto out;
7003 
7004 	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7005 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7006 
7007 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7008 						sizeof(struct ipr_misc_cbs),
7009 						&ioa_cfg->vpd_cbs_dma);
7010 
7011 	if (!ioa_cfg->vpd_cbs)
7012 		goto out_free_res_entries;
7013 
7014 	if (ipr_alloc_cmd_blks(ioa_cfg))
7015 		goto out_free_vpd_cbs;
7016 
7017 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7018 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
7019 						 &ioa_cfg->host_rrq_dma);
7020 
7021 	if (!ioa_cfg->host_rrq)
7022 		goto out_ipr_free_cmd_blocks;
7023 
7024 	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7025 						  sizeof(struct ipr_config_table),
7026 						  &ioa_cfg->cfg_table_dma);
7027 
7028 	if (!ioa_cfg->cfg_table)
7029 		goto out_free_host_rrq;
7030 
7031 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7032 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7033 							   sizeof(struct ipr_hostrcb),
7034 							   &ioa_cfg->hostrcb_dma[i]);
7035 
7036 		if (!ioa_cfg->hostrcb[i])
7037 			goto out_free_hostrcb_dma;
7038 
7039 		ioa_cfg->hostrcb[i]->hostrcb_dma =
7040 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7041 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
7042 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7043 	}
7044 
7045 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
7046 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7047 
7048 	if (!ioa_cfg->trace)
7049 		goto out_free_hostrcb_dma;
7050 
7051 	rc = 0;
7052 out:
7053 	LEAVE;
7054 	return rc;
7055 
7056 out_free_hostrcb_dma:
7057 	while (i-- > 0) {
7058 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7059 				    ioa_cfg->hostrcb[i],
7060 				    ioa_cfg->hostrcb_dma[i]);
7061 	}
7062 	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7063 			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7064 out_free_host_rrq:
7065 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7066 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7067 out_ipr_free_cmd_blocks:
7068 	ipr_free_cmd_blks(ioa_cfg);
7069 out_free_vpd_cbs:
7070 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7071 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7072 out_free_res_entries:
7073 	kfree(ioa_cfg->res_entries);
7074 	goto out;
7075 }
7076 
7077 /**
7078  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7079  * @ioa_cfg:	ioa config struct
7080  *
7081  * Return value:
7082  * 	none
7083  **/
7084 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7085 {
7086 	int i;
7087 
7088 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7089 		ioa_cfg->bus_attr[i].bus = i;
7090 		ioa_cfg->bus_attr[i].qas_enabled = 0;
7091 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7092 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7093 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7094 		else
7095 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7096 	}
7097 }
7098 
7099 /**
7100  * ipr_init_ioa_cfg - Initialize IOA config struct
7101  * @ioa_cfg:	ioa config struct
7102  * @host:		scsi host struct
7103  * @pdev:		PCI dev struct
7104  *
7105  * Return value:
7106  * 	none
7107  **/
7108 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7109 				       struct Scsi_Host *host, struct pci_dev *pdev)
7110 {
7111 	const struct ipr_interrupt_offsets *p;
7112 	struct ipr_interrupts *t;
7113 	void __iomem *base;
7114 
7115 	ioa_cfg->host = host;
7116 	ioa_cfg->pdev = pdev;
7117 	ioa_cfg->log_level = ipr_log_level;
7118 	ioa_cfg->doorbell = IPR_DOORBELL;
7119 	if (!ipr_auto_create)
7120 		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7121 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7122 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7123 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7124 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7125 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7126 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7127 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7128 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7129 
7130 	INIT_LIST_HEAD(&ioa_cfg->free_q);
7131 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
7132 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7133 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7134 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7135 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7136 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7137 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
7138 	ioa_cfg->sdt_state = INACTIVE;
7139 	if (ipr_enable_cache)
7140 		ioa_cfg->cache_state = CACHE_ENABLED;
7141 	else
7142 		ioa_cfg->cache_state = CACHE_DISABLED;
7143 
7144 	ipr_initialize_bus_attr(ioa_cfg);
7145 
7146 	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7147 	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7148 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
7149 	host->unique_id = host->host_no;
7150 	host->max_cmd_len = IPR_MAX_CDB_LEN;
7151 	pci_set_drvdata(pdev, ioa_cfg);
7152 
7153 	p = &ioa_cfg->chip_cfg->regs;
7154 	t = &ioa_cfg->regs;
7155 	base = ioa_cfg->hdw_dma_regs;
7156 
7157 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7158 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7159 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7160 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7161 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7162 	t->ioarrin_reg = base + p->ioarrin_reg;
7163 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7164 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7165 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7166 }
7167 
7168 /**
7169  * ipr_get_chip_cfg - Find adapter chip configuration
7170  * @dev_id:		PCI device id struct
7171  *
7172  * Return value:
7173  * 	ptr to chip config on success / NULL on failure
7174  **/
7175 static const struct ipr_chip_cfg_t * __devinit
7176 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7177 {
7178 	int i;
7179 
7180 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7181 		if (ipr_chip[i].vendor == dev_id->vendor &&
7182 		    ipr_chip[i].device == dev_id->device)
7183 			return ipr_chip[i].cfg;
7184 	return NULL;
7185 }
7186 
7187 /**
7188  * ipr_probe_ioa - Allocates memory and does first stage of initialization
7189  * @pdev:		PCI device struct
7190  * @dev_id:		PCI device id struct
7191  *
7192  * Return value:
7193  * 	0 on success / non-zero on failure
7194  **/
7195 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7196 				   const struct pci_device_id *dev_id)
7197 {
7198 	struct ipr_ioa_cfg *ioa_cfg;
7199 	struct Scsi_Host *host;
7200 	unsigned long ipr_regs_pci;
7201 	void __iomem *ipr_regs;
7202 	int rc = PCIBIOS_SUCCESSFUL;
7203 	volatile u32 mask, uproc;
7204 
7205 	ENTER;
7206 
7207 	if ((rc = pci_enable_device(pdev))) {
7208 		dev_err(&pdev->dev, "Cannot enable adapter\n");
7209 		goto out;
7210 	}
7211 
7212 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7213 
7214 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7215 
7216 	if (!host) {
7217 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7218 		rc = -ENOMEM;
7219 		goto out_disable;
7220 	}
7221 
7222 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7223 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7224 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7225 		      sata_port_info.flags, &ipr_sata_ops);
7226 
7227 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7228 
7229 	if (!ioa_cfg->chip_cfg) {
7230 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7231 			dev_id->vendor, dev_id->device);
7232 		goto out_scsi_host_put;
7233 	}
7234 
7235 	ipr_regs_pci = pci_resource_start(pdev, 0);
7236 
7237 	rc = pci_request_regions(pdev, IPR_NAME);
7238 	if (rc < 0) {
7239 		dev_err(&pdev->dev,
7240 			"Couldn't register memory range of registers\n");
7241 		goto out_scsi_host_put;
7242 	}
7243 
7244 	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7245 
7246 	if (!ipr_regs) {
7247 		dev_err(&pdev->dev,
7248 			"Couldn't map memory range of registers\n");
7249 		rc = -ENOMEM;
7250 		goto out_release_regions;
7251 	}
7252 
7253 	ioa_cfg->hdw_dma_regs = ipr_regs;
7254 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7255 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7256 
7257 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7258 
7259 	pci_set_master(pdev);
7260 
7261 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7262 	if (rc < 0) {
7263 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7264 		goto cleanup_nomem;
7265 	}
7266 
7267 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7268 				   ioa_cfg->chip_cfg->cache_line_size);
7269 
7270 	if (rc != PCIBIOS_SUCCESSFUL) {
7271 		dev_err(&pdev->dev, "Write of cache line size failed\n");
7272 		rc = -EIO;
7273 		goto cleanup_nomem;
7274 	}
7275 
7276 	/* Save away PCI config space for use following IOA reset */
7277 	rc = pci_save_state(pdev);
7278 
7279 	if (rc != PCIBIOS_SUCCESSFUL) {
7280 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
7281 		rc = -EIO;
7282 		goto cleanup_nomem;
7283 	}
7284 
7285 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7286 		goto cleanup_nomem;
7287 
7288 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7289 		goto cleanup_nomem;
7290 
7291 	rc = ipr_alloc_mem(ioa_cfg);
7292 	if (rc < 0) {
7293 		dev_err(&pdev->dev,
7294 			"Couldn't allocate enough memory for device driver!\n");
7295 		goto cleanup_nomem;
7296 	}
7297 
7298 	/*
7299 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
7300 	 * the card is in an unknown state and needs a hard reset
7301 	 */
7302 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7303 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7304 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7305 		ioa_cfg->needs_hard_reset = 1;
7306 
7307 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7308 	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
7309 
7310 	if (rc) {
7311 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7312 			pdev->irq, rc);
7313 		goto cleanup_nolog;
7314 	}
7315 
7316 	spin_lock(&ipr_driver_lock);
7317 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7318 	spin_unlock(&ipr_driver_lock);
7319 
7320 	LEAVE;
7321 out:
7322 	return rc;
7323 
7324 cleanup_nolog:
7325 	ipr_free_mem(ioa_cfg);
7326 cleanup_nomem:
7327 	iounmap(ipr_regs);
7328 out_release_regions:
7329 	pci_release_regions(pdev);
7330 out_scsi_host_put:
7331 	scsi_host_put(host);
7332 out_disable:
7333 	pci_disable_device(pdev);
7334 	goto out;
7335 }
7336 
7337 /**
7338  * ipr_scan_vsets - Scans for VSET devices
7339  * @ioa_cfg:	ioa config struct
7340  *
7341  * Description: Since the VSET resources do not follow SAM in that we can have
7342  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7343  *
7344  * Return value:
7345  * 	none
7346  **/
7347 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7348 {
7349 	int target, lun;
7350 
7351 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7352 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7353 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7354 }
7355 
7356 /**
7357  * ipr_initiate_ioa_bringdown - Bring down an adapter
7358  * @ioa_cfg:		ioa config struct
7359  * @shutdown_type:	shutdown type
7360  *
7361  * Description: This function will initiate bringing down the adapter.
7362  * This consists of issuing an IOA shutdown to the adapter
7363  * to flush the cache, and running BIST.
7364  * If the caller needs to wait on the completion of the reset,
7365  * the caller must sleep on the reset_wait_q.
7366  *
7367  * Return value:
7368  * 	none
7369  **/
7370 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7371 				       enum ipr_shutdown_type shutdown_type)
7372 {
7373 	ENTER;
7374 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7375 		ioa_cfg->sdt_state = ABORT_DUMP;
7376 	ioa_cfg->reset_retries = 0;
7377 	ioa_cfg->in_ioa_bringdown = 1;
7378 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7379 	LEAVE;
7380 }
7381 
7382 /**
7383  * __ipr_remove - Remove a single adapter
7384  * @pdev:	pci device struct
7385  *
7386  * Adapter hot plug remove entry point.
7387  *
7388  * Return value:
7389  * 	none
7390  **/
7391 static void __ipr_remove(struct pci_dev *pdev)
7392 {
7393 	unsigned long host_lock_flags = 0;
7394 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7395 	ENTER;
7396 
7397 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7398 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7399 
7400 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7401 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7402 	flush_scheduled_work();
7403 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7404 
7405 	spin_lock(&ipr_driver_lock);
7406 	list_del(&ioa_cfg->queue);
7407 	spin_unlock(&ipr_driver_lock);
7408 
7409 	if (ioa_cfg->sdt_state == ABORT_DUMP)
7410 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7411 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7412 
7413 	ipr_free_all_resources(ioa_cfg);
7414 
7415 	LEAVE;
7416 }
7417 
7418 /**
7419  * ipr_remove - IOA hot plug remove entry point
7420  * @pdev:	pci device struct
7421  *
7422  * Adapter hot plug remove entry point.
7423  *
7424  * Return value:
7425  * 	none
7426  **/
7427 static void ipr_remove(struct pci_dev *pdev)
7428 {
7429 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7430 
7431 	ENTER;
7432 
7433 	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7434 			      &ipr_trace_attr);
7435 	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7436 			     &ipr_dump_attr);
7437 	scsi_remove_host(ioa_cfg->host);
7438 
7439 	__ipr_remove(pdev);
7440 
7441 	LEAVE;
7442 }
7443 
7444 /**
7445  * ipr_probe - Adapter hot plug add entry point
7446  *
7447  * Return value:
7448  * 	0 on success / non-zero on failure
7449  **/
7450 static int __devinit ipr_probe(struct pci_dev *pdev,
7451 			       const struct pci_device_id *dev_id)
7452 {
7453 	struct ipr_ioa_cfg *ioa_cfg;
7454 	int rc;
7455 
7456 	rc = ipr_probe_ioa(pdev, dev_id);
7457 
7458 	if (rc)
7459 		return rc;
7460 
7461 	ioa_cfg = pci_get_drvdata(pdev);
7462 	rc = ipr_probe_ioa_part2(ioa_cfg);
7463 
7464 	if (rc) {
7465 		__ipr_remove(pdev);
7466 		return rc;
7467 	}
7468 
7469 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7470 
7471 	if (rc) {
7472 		__ipr_remove(pdev);
7473 		return rc;
7474 	}
7475 
7476 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7477 				   &ipr_trace_attr);
7478 
7479 	if (rc) {
7480 		scsi_remove_host(ioa_cfg->host);
7481 		__ipr_remove(pdev);
7482 		return rc;
7483 	}
7484 
7485 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7486 				   &ipr_dump_attr);
7487 
7488 	if (rc) {
7489 		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7490 				      &ipr_trace_attr);
7491 		scsi_remove_host(ioa_cfg->host);
7492 		__ipr_remove(pdev);
7493 		return rc;
7494 	}
7495 
7496 	scsi_scan_host(ioa_cfg->host);
7497 	ipr_scan_vsets(ioa_cfg);
7498 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7499 	ioa_cfg->allow_ml_add_del = 1;
7500 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
7501 	schedule_work(&ioa_cfg->work_q);
7502 	return 0;
7503 }
7504 
7505 /**
7506  * ipr_shutdown - Shutdown handler.
7507  * @pdev:	pci device struct
7508  *
7509  * This function is invoked upon system shutdown/reboot. It will issue
7510  * an adapter shutdown to the adapter to flush the write cache.
7511  *
7512  * Return value:
7513  * 	none
7514  **/
7515 static void ipr_shutdown(struct pci_dev *pdev)
7516 {
7517 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7518 	unsigned long lock_flags = 0;
7519 
7520 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7521 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7522 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7523 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7524 }
7525 
7526 static struct pci_device_id ipr_pci_table[] __devinitdata = {
7527 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7528 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
7529 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7530 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
7531 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7532 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
7533 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
7534 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
7535 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7536 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
7537 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7538 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
7539 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7540 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7541 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7542 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 },
7543 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7544 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7545 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7546 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7547 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7548 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
7549 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7550 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7551 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7552 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7553 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7554 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
7555 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7556 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 },
7557 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7558 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7559 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7560 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7561 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7562 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 },
7563 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7564 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 },
7565 	{ }
7566 };
7567 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7568 
7569 static struct pci_error_handlers ipr_err_handler = {
7570 	.error_detected = ipr_pci_error_detected,
7571 	.slot_reset = ipr_pci_slot_reset,
7572 };
7573 
7574 static struct pci_driver ipr_driver = {
7575 	.name = IPR_NAME,
7576 	.id_table = ipr_pci_table,
7577 	.probe = ipr_probe,
7578 	.remove = ipr_remove,
7579 	.shutdown = ipr_shutdown,
7580 	.err_handler = &ipr_err_handler,
7581 };
7582 
7583 /**
7584  * ipr_init - Module entry point
7585  *
7586  * Return value:
7587  * 	0 on success / negative value on failure
7588  **/
7589 static int __init ipr_init(void)
7590 {
7591 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7592 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7593 
7594 	return pci_register_driver(&ipr_driver);
7595 }
7596 
7597 /**
7598  * ipr_exit - Module unload
7599  *
7600  * Module unload entry point.
7601  *
7602  * Return value:
7603  * 	none
7604  **/
7605 static void __exit ipr_exit(void)
7606 {
7607 	pci_unregister_driver(&ipr_driver);
7608 }
7609 
7610 module_init(ipr_init);
7611 module_exit(ipr_exit);
7612