xref: /linux/drivers/scsi/ipr.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
84 
85 /*
86  *   Global Data
87  */
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
98 
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 	{ /* Gemstone, Citrine, and Obsidian */
102 		.mailbox = 0x0042C,
103 		.cache_line_size = 0x20,
104 		{
105 			.set_interrupt_mask_reg = 0x0022C,
106 			.clr_interrupt_mask_reg = 0x00230,
107 			.sense_interrupt_mask_reg = 0x0022C,
108 			.clr_interrupt_reg = 0x00228,
109 			.sense_interrupt_reg = 0x00224,
110 			.ioarrin_reg = 0x00404,
111 			.sense_uproc_interrupt_reg = 0x00214,
112 			.set_uproc_interrupt_reg = 0x00214,
113 			.clr_uproc_interrupt_reg = 0x00218
114 		}
115 	},
116 	{ /* Snipe and Scamp */
117 		.mailbox = 0x0052C,
118 		.cache_line_size = 0x20,
119 		{
120 			.set_interrupt_mask_reg = 0x00288,
121 			.clr_interrupt_mask_reg = 0x0028C,
122 			.sense_interrupt_mask_reg = 0x00288,
123 			.clr_interrupt_reg = 0x00284,
124 			.sense_interrupt_reg = 0x00280,
125 			.ioarrin_reg = 0x00504,
126 			.sense_uproc_interrupt_reg = 0x00290,
127 			.set_uproc_interrupt_reg = 0x00290,
128 			.clr_uproc_interrupt_reg = 0x00294
129 		}
130 	},
131 };
132 
133 static const struct ipr_chip_t ipr_chip[] = {
134 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 };
141 
142 static int ipr_max_bus_speeds [] = {
143 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 };
145 
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed, ipr_max_speed, uint, 0);
149 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level, ipr_log_level, uint, 0);
151 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode, ipr_testmode, int, 0);
153 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail, ipr_fastfail, int, 0);
155 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache, ipr_enable_cache, int, 0);
159 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug, ipr_debug, int, 0);
161 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create, ipr_auto_create, int, 0);
163 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION);
166 
167 static const char *ipr_gpdd_dev_end_states[] = {
168 	"Command complete",
169 	"Terminated by host",
170 	"Terminated by device reset",
171 	"Terminated by bus reset",
172 	"Unknown",
173 	"Command not started"
174 };
175 
176 static const char *ipr_gpdd_dev_bus_phases[] = {
177 	"Bus free",
178 	"Arbitration",
179 	"Selection",
180 	"Message out",
181 	"Command",
182 	"Message in",
183 	"Data out",
184 	"Data in",
185 	"Status",
186 	"Reselection",
187 	"Unknown"
188 };
189 
190 /*  A constant array of IOASCs/URCs/Error Messages */
191 static const
192 struct ipr_error_table_t ipr_error_table[] = {
193 	{0x00000000, 1, 1,
194 	"8155: An unknown error was received"},
195 	{0x00330000, 0, 0,
196 	"Soft underlength error"},
197 	{0x005A0000, 0, 0,
198 	"Command to be cancelled not found"},
199 	{0x00808000, 0, 0,
200 	"Qualified success"},
201 	{0x01080000, 1, 1,
202 	"FFFE: Soft device bus error recovered by the IOA"},
203 	{0x01170600, 0, 1,
204 	"FFF9: Device sector reassign successful"},
205 	{0x01170900, 0, 1,
206 	"FFF7: Media error recovered by device rewrite procedures"},
207 	{0x01180200, 0, 1,
208 	"7001: IOA sector reassignment successful"},
209 	{0x01180500, 0, 1,
210 	"FFF9: Soft media error. Sector reassignment recommended"},
211 	{0x01180600, 0, 1,
212 	"FFF7: Media error recovered by IOA rewrite procedures"},
213 	{0x01418000, 0, 1,
214 	"FF3D: Soft PCI bus error recovered by the IOA"},
215 	{0x01440000, 1, 1,
216 	"FFF6: Device hardware error recovered by the IOA"},
217 	{0x01448100, 0, 1,
218 	"FFF6: Device hardware error recovered by the device"},
219 	{0x01448200, 1, 1,
220 	"FF3D: Soft IOA error recovered by the IOA"},
221 	{0x01448300, 0, 1,
222 	"FFFA: Undefined device response recovered by the IOA"},
223 	{0x014A0000, 1, 1,
224 	"FFF6: Device bus error, message or command phase"},
225 	{0x015D0000, 0, 1,
226 	"FFF6: Failure prediction threshold exceeded"},
227 	{0x015D9200, 0, 1,
228 	"8009: Impending cache battery pack failure"},
229 	{0x02040400, 0, 0,
230 	"34FF: Disk device format in progress"},
231 	{0x023F0000, 0, 0,
232 	"Synchronization required"},
233 	{0x024E0000, 0, 0,
234 	"No ready, IOA shutdown"},
235 	{0x025A0000, 0, 0,
236 	"Not ready, IOA has been shutdown"},
237 	{0x02670100, 0, 1,
238 	"3020: Storage subsystem configuration error"},
239 	{0x03110B00, 0, 0,
240 	"FFF5: Medium error, data unreadable, recommend reassign"},
241 	{0x03110C00, 0, 0,
242 	"7000: Medium error, data unreadable, do not reassign"},
243 	{0x03310000, 0, 1,
244 	"FFF3: Disk media format bad"},
245 	{0x04050000, 0, 1,
246 	"3002: Addressed device failed to respond to selection"},
247 	{0x04080000, 1, 1,
248 	"3100: Device bus error"},
249 	{0x04080100, 0, 1,
250 	"3109: IOA timed out a device command"},
251 	{0x04088000, 0, 0,
252 	"3120: SCSI bus is not operational"},
253 	{0x04118000, 0, 1,
254 	"9000: IOA reserved area data check"},
255 	{0x04118100, 0, 1,
256 	"9001: IOA reserved area invalid data pattern"},
257 	{0x04118200, 0, 1,
258 	"9002: IOA reserved area LRC error"},
259 	{0x04320000, 0, 1,
260 	"102E: Out of alternate sectors for disk storage"},
261 	{0x04330000, 1, 1,
262 	"FFF4: Data transfer underlength error"},
263 	{0x04338000, 1, 1,
264 	"FFF4: Data transfer overlength error"},
265 	{0x043E0100, 0, 1,
266 	"3400: Logical unit failure"},
267 	{0x04408500, 0, 1,
268 	"FFF4: Device microcode is corrupt"},
269 	{0x04418000, 1, 1,
270 	"8150: PCI bus error"},
271 	{0x04430000, 1, 0,
272 	"Unsupported device bus message received"},
273 	{0x04440000, 1, 1,
274 	"FFF4: Disk device problem"},
275 	{0x04448200, 1, 1,
276 	"8150: Permanent IOA failure"},
277 	{0x04448300, 0, 1,
278 	"3010: Disk device returned wrong response to IOA"},
279 	{0x04448400, 0, 1,
280 	"8151: IOA microcode error"},
281 	{0x04448500, 0, 0,
282 	"Device bus status error"},
283 	{0x04448600, 0, 1,
284 	"8157: IOA error requiring IOA reset to recover"},
285 	{0x04490000, 0, 0,
286 	"Message reject received from the device"},
287 	{0x04449200, 0, 1,
288 	"8008: A permanent cache battery pack failure occurred"},
289 	{0x0444A000, 0, 1,
290 	"9090: Disk unit has been modified after the last known status"},
291 	{0x0444A200, 0, 1,
292 	"9081: IOA detected device error"},
293 	{0x0444A300, 0, 1,
294 	"9082: IOA detected device error"},
295 	{0x044A0000, 1, 1,
296 	"3110: Device bus error, message or command phase"},
297 	{0x04670400, 0, 1,
298 	"9091: Incorrect hardware configuration change has been detected"},
299 	{0x04678000, 0, 1,
300 	"9073: Invalid multi-adapter configuration"},
301 	{0x046E0000, 0, 1,
302 	"FFF4: Command to logical unit failed"},
303 	{0x05240000, 1, 0,
304 	"Illegal request, invalid request type or request packet"},
305 	{0x05250000, 0, 0,
306 	"Illegal request, invalid resource handle"},
307 	{0x05258000, 0, 0,
308 	"Illegal request, commands not allowed to this device"},
309 	{0x05258100, 0, 0,
310 	"Illegal request, command not allowed to a secondary adapter"},
311 	{0x05260000, 0, 0,
312 	"Illegal request, invalid field in parameter list"},
313 	{0x05260100, 0, 0,
314 	"Illegal request, parameter not supported"},
315 	{0x05260200, 0, 0,
316 	"Illegal request, parameter value invalid"},
317 	{0x052C0000, 0, 0,
318 	"Illegal request, command sequence error"},
319 	{0x052C8000, 1, 0,
320 	"Illegal request, dual adapter support not enabled"},
321 	{0x06040500, 0, 1,
322 	"9031: Array protection temporarily suspended, protection resuming"},
323 	{0x06040600, 0, 1,
324 	"9040: Array protection temporarily suspended, protection resuming"},
325 	{0x06290000, 0, 1,
326 	"FFFB: SCSI bus was reset"},
327 	{0x06290500, 0, 0,
328 	"FFFE: SCSI bus transition to single ended"},
329 	{0x06290600, 0, 0,
330 	"FFFE: SCSI bus transition to LVD"},
331 	{0x06298000, 0, 1,
332 	"FFFB: SCSI bus was reset by another initiator"},
333 	{0x063F0300, 0, 1,
334 	"3029: A device replacement has occurred"},
335 	{0x064C8000, 0, 1,
336 	"9051: IOA cache data exists for a missing or failed device"},
337 	{0x064C8100, 0, 1,
338 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
339 	{0x06670100, 0, 1,
340 	"9025: Disk unit is not supported at its physical location"},
341 	{0x06670600, 0, 1,
342 	"3020: IOA detected a SCSI bus configuration error"},
343 	{0x06678000, 0, 1,
344 	"3150: SCSI bus configuration error"},
345 	{0x06678100, 0, 1,
346 	"9074: Asymmetric advanced function disk configuration"},
347 	{0x06690200, 0, 1,
348 	"9041: Array protection temporarily suspended"},
349 	{0x06698200, 0, 1,
350 	"9042: Corrupt array parity detected on specified device"},
351 	{0x066B0200, 0, 1,
352 	"9030: Array no longer protected due to missing or failed disk unit"},
353 	{0x066B8000, 0, 1,
354 	"9071: Link operational transition"},
355 	{0x066B8100, 0, 1,
356 	"9072: Link not operational transition"},
357 	{0x066B8200, 0, 1,
358 	"9032: Array exposed but still protected"},
359 	{0x07270000, 0, 0,
360 	"Failure due to other device"},
361 	{0x07278000, 0, 1,
362 	"9008: IOA does not support functions expected by devices"},
363 	{0x07278100, 0, 1,
364 	"9010: Cache data associated with attached devices cannot be found"},
365 	{0x07278200, 0, 1,
366 	"9011: Cache data belongs to devices other than those attached"},
367 	{0x07278400, 0, 1,
368 	"9020: Array missing 2 or more devices with only 1 device present"},
369 	{0x07278500, 0, 1,
370 	"9021: Array missing 2 or more devices with 2 or more devices present"},
371 	{0x07278600, 0, 1,
372 	"9022: Exposed array is missing a required device"},
373 	{0x07278700, 0, 1,
374 	"9023: Array member(s) not at required physical locations"},
375 	{0x07278800, 0, 1,
376 	"9024: Array not functional due to present hardware configuration"},
377 	{0x07278900, 0, 1,
378 	"9026: Array not functional due to present hardware configuration"},
379 	{0x07278A00, 0, 1,
380 	"9027: Array is missing a device and parity is out of sync"},
381 	{0x07278B00, 0, 1,
382 	"9028: Maximum number of arrays already exist"},
383 	{0x07278C00, 0, 1,
384 	"9050: Required cache data cannot be located for a disk unit"},
385 	{0x07278D00, 0, 1,
386 	"9052: Cache data exists for a device that has been modified"},
387 	{0x07278F00, 0, 1,
388 	"9054: IOA resources not available due to previous problems"},
389 	{0x07279100, 0, 1,
390 	"9092: Disk unit requires initialization before use"},
391 	{0x07279200, 0, 1,
392 	"9029: Incorrect hardware configuration change has been detected"},
393 	{0x07279600, 0, 1,
394 	"9060: One or more disk pairs are missing from an array"},
395 	{0x07279700, 0, 1,
396 	"9061: One or more disks are missing from an array"},
397 	{0x07279800, 0, 1,
398 	"9062: One or more disks are missing from an array"},
399 	{0x07279900, 0, 1,
400 	"9063: Maximum number of functional arrays has been exceeded"},
401 	{0x0B260000, 0, 0,
402 	"Aborted command, invalid descriptor"},
403 	{0x0B5A0000, 0, 0,
404 	"Command terminated by host"}
405 };
406 
407 static const struct ipr_ses_table_entry ipr_ses_table[] = {
408 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
409 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
410 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
411 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
412 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
413 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
414 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
415 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
416 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
417 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
419 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
420 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
421 };
422 
423 /*
424  *  Function Prototypes
425  */
426 static int ipr_reset_alert(struct ipr_cmnd *);
427 static void ipr_process_ccn(struct ipr_cmnd *);
428 static void ipr_process_error(struct ipr_cmnd *);
429 static void ipr_reset_ioa_job(struct ipr_cmnd *);
430 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
431 				   enum ipr_shutdown_type);
432 
433 #ifdef CONFIG_SCSI_IPR_TRACE
434 /**
435  * ipr_trc_hook - Add a trace entry to the driver trace
436  * @ipr_cmd:	ipr command struct
437  * @type:		trace type
438  * @add_data:	additional data
439  *
440  * Return value:
441  * 	none
442  **/
443 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
444 			 u8 type, u32 add_data)
445 {
446 	struct ipr_trace_entry *trace_entry;
447 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
448 
449 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
450 	trace_entry->time = jiffies;
451 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
452 	trace_entry->type = type;
453 	trace_entry->cmd_index = ipr_cmd->cmd_index;
454 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
455 	trace_entry->u.add_data = add_data;
456 }
457 #else
458 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
459 #endif
460 
461 /**
462  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
463  * @ipr_cmd:	ipr command struct
464  *
465  * Return value:
466  * 	none
467  **/
468 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
469 {
470 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
471 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
472 
473 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
474 	ioarcb->write_data_transfer_length = 0;
475 	ioarcb->read_data_transfer_length = 0;
476 	ioarcb->write_ioadl_len = 0;
477 	ioarcb->read_ioadl_len = 0;
478 	ioasa->ioasc = 0;
479 	ioasa->residual_data_len = 0;
480 
481 	ipr_cmd->scsi_cmd = NULL;
482 	ipr_cmd->sense_buffer[0] = 0;
483 	ipr_cmd->dma_use_sg = 0;
484 }
485 
486 /**
487  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
488  * @ipr_cmd:	ipr command struct
489  *
490  * Return value:
491  * 	none
492  **/
493 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
494 {
495 	ipr_reinit_ipr_cmnd(ipr_cmd);
496 	ipr_cmd->u.scratch = 0;
497 	ipr_cmd->sibling = NULL;
498 	init_timer(&ipr_cmd->timer);
499 }
500 
501 /**
502  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
503  * @ioa_cfg:	ioa config struct
504  *
505  * Return value:
506  * 	pointer to ipr command struct
507  **/
508 static
509 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
510 {
511 	struct ipr_cmnd *ipr_cmd;
512 
513 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
514 	list_del(&ipr_cmd->queue);
515 	ipr_init_ipr_cmnd(ipr_cmd);
516 
517 	return ipr_cmd;
518 }
519 
520 /**
521  * ipr_unmap_sglist - Unmap scatterlist if mapped
522  * @ioa_cfg:	ioa config struct
523  * @ipr_cmd:	ipr command struct
524  *
525  * Return value:
526  * 	nothing
527  **/
528 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
529 			     struct ipr_cmnd *ipr_cmd)
530 {
531 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
532 
533 	if (ipr_cmd->dma_use_sg) {
534 		if (scsi_cmd->use_sg > 0) {
535 			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
536 				     scsi_cmd->use_sg,
537 				     scsi_cmd->sc_data_direction);
538 		} else {
539 			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
540 					 scsi_cmd->request_bufflen,
541 					 scsi_cmd->sc_data_direction);
542 		}
543 	}
544 }
545 
546 /**
547  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
548  * @ioa_cfg:	ioa config struct
549  * @clr_ints:     interrupts to clear
550  *
551  * This function masks all interrupts on the adapter, then clears the
552  * interrupts specified in the mask
553  *
554  * Return value:
555  * 	none
556  **/
557 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
558 					  u32 clr_ints)
559 {
560 	volatile u32 int_reg;
561 
562 	/* Stop new interrupts */
563 	ioa_cfg->allow_interrupts = 0;
564 
565 	/* Set interrupt mask to stop all new interrupts */
566 	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
567 
568 	/* Clear any pending interrupts */
569 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
570 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
571 }
572 
573 /**
574  * ipr_save_pcix_cmd_reg - Save PCI-X command register
575  * @ioa_cfg:	ioa config struct
576  *
577  * Return value:
578  * 	0 on success / -EIO on failure
579  **/
580 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
581 {
582 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
583 
584 	if (pcix_cmd_reg == 0) {
585 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
586 		return -EIO;
587 	}
588 
589 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
590 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
591 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
592 		return -EIO;
593 	}
594 
595 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
596 	return 0;
597 }
598 
599 /**
600  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
601  * @ioa_cfg:	ioa config struct
602  *
603  * Return value:
604  * 	0 on success / -EIO on failure
605  **/
606 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
607 {
608 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
609 
610 	if (pcix_cmd_reg) {
611 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
612 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
613 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
614 			return -EIO;
615 		}
616 	} else {
617 		dev_err(&ioa_cfg->pdev->dev,
618 			"Failed to setup PCI-X command register\n");
619 		return -EIO;
620 	}
621 
622 	return 0;
623 }
624 
625 /**
626  * ipr_scsi_eh_done - mid-layer done function for aborted ops
627  * @ipr_cmd:	ipr command struct
628  *
629  * This function is invoked by the interrupt handler for
630  * ops generated by the SCSI mid-layer which are being aborted.
631  *
632  * Return value:
633  * 	none
634  **/
635 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
636 {
637 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
638 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
639 
640 	scsi_cmd->result |= (DID_ERROR << 16);
641 
642 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
643 	scsi_cmd->scsi_done(scsi_cmd);
644 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
645 }
646 
647 /**
648  * ipr_fail_all_ops - Fails all outstanding ops.
649  * @ioa_cfg:	ioa config struct
650  *
651  * This function fails all outstanding ops.
652  *
653  * Return value:
654  * 	none
655  **/
656 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
657 {
658 	struct ipr_cmnd *ipr_cmd, *temp;
659 
660 	ENTER;
661 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
662 		list_del(&ipr_cmd->queue);
663 
664 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
665 		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
666 
667 		if (ipr_cmd->scsi_cmd)
668 			ipr_cmd->done = ipr_scsi_eh_done;
669 
670 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
671 		del_timer(&ipr_cmd->timer);
672 		ipr_cmd->done(ipr_cmd);
673 	}
674 
675 	LEAVE;
676 }
677 
678 /**
679  * ipr_do_req -  Send driver initiated requests.
680  * @ipr_cmd:		ipr command struct
681  * @done:			done function
682  * @timeout_func:	timeout function
683  * @timeout:		timeout value
684  *
685  * This function sends the specified command to the adapter with the
686  * timeout given. The done function is invoked on command completion.
687  *
688  * Return value:
689  * 	none
690  **/
691 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
692 		       void (*done) (struct ipr_cmnd *),
693 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
694 {
695 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
696 
697 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
698 
699 	ipr_cmd->done = done;
700 
701 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
702 	ipr_cmd->timer.expires = jiffies + timeout;
703 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
704 
705 	add_timer(&ipr_cmd->timer);
706 
707 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
708 
709 	mb();
710 	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
711 	       ioa_cfg->regs.ioarrin_reg);
712 }
713 
714 /**
715  * ipr_internal_cmd_done - Op done function for an internally generated op.
716  * @ipr_cmd:	ipr command struct
717  *
718  * This function is the op done function for an internally generated,
719  * blocking op. It simply wakes the sleeping thread.
720  *
721  * Return value:
722  * 	none
723  **/
724 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
725 {
726 	if (ipr_cmd->sibling)
727 		ipr_cmd->sibling = NULL;
728 	else
729 		complete(&ipr_cmd->completion);
730 }
731 
732 /**
733  * ipr_send_blocking_cmd - Send command and sleep on its completion.
734  * @ipr_cmd:	ipr command struct
735  * @timeout_func:	function to invoke if command times out
736  * @timeout:	timeout
737  *
738  * Return value:
739  * 	none
740  **/
741 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
742 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
743 				  u32 timeout)
744 {
745 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 
747 	init_completion(&ipr_cmd->completion);
748 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
749 
750 	spin_unlock_irq(ioa_cfg->host->host_lock);
751 	wait_for_completion(&ipr_cmd->completion);
752 	spin_lock_irq(ioa_cfg->host->host_lock);
753 }
754 
755 /**
756  * ipr_send_hcam - Send an HCAM to the adapter.
757  * @ioa_cfg:	ioa config struct
758  * @type:		HCAM type
759  * @hostrcb:	hostrcb struct
760  *
761  * This function will send a Host Controlled Async command to the adapter.
762  * If HCAMs are currently not allowed to be issued to the adapter, it will
763  * place the hostrcb on the free queue.
764  *
765  * Return value:
766  * 	none
767  **/
768 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
769 			  struct ipr_hostrcb *hostrcb)
770 {
771 	struct ipr_cmnd *ipr_cmd;
772 	struct ipr_ioarcb *ioarcb;
773 
774 	if (ioa_cfg->allow_cmds) {
775 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
776 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
777 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
778 
779 		ipr_cmd->u.hostrcb = hostrcb;
780 		ioarcb = &ipr_cmd->ioarcb;
781 
782 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
783 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
784 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
785 		ioarcb->cmd_pkt.cdb[1] = type;
786 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
787 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
788 
789 		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
790 		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
791 		ipr_cmd->ioadl[0].flags_and_data_len =
792 			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
793 		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
794 
795 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
796 			ipr_cmd->done = ipr_process_ccn;
797 		else
798 			ipr_cmd->done = ipr_process_error;
799 
800 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
801 
802 		mb();
803 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
804 		       ioa_cfg->regs.ioarrin_reg);
805 	} else {
806 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
807 	}
808 }
809 
810 /**
811  * ipr_init_res_entry - Initialize a resource entry struct.
812  * @res:	resource entry struct
813  *
814  * Return value:
815  * 	none
816  **/
817 static void ipr_init_res_entry(struct ipr_resource_entry *res)
818 {
819 	res->needs_sync_complete = 0;
820 	res->in_erp = 0;
821 	res->add_to_ml = 0;
822 	res->del_from_ml = 0;
823 	res->resetting_device = 0;
824 	res->sdev = NULL;
825 }
826 
827 /**
828  * ipr_handle_config_change - Handle a config change from the adapter
829  * @ioa_cfg:	ioa config struct
830  * @hostrcb:	hostrcb
831  *
832  * Return value:
833  * 	none
834  **/
835 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
836 			      struct ipr_hostrcb *hostrcb)
837 {
838 	struct ipr_resource_entry *res = NULL;
839 	struct ipr_config_table_entry *cfgte;
840 	u32 is_ndn = 1;
841 
842 	cfgte = &hostrcb->hcam.u.ccn.cfgte;
843 
844 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
845 		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
846 			    sizeof(cfgte->res_addr))) {
847 			is_ndn = 0;
848 			break;
849 		}
850 	}
851 
852 	if (is_ndn) {
853 		if (list_empty(&ioa_cfg->free_res_q)) {
854 			ipr_send_hcam(ioa_cfg,
855 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
856 				      hostrcb);
857 			return;
858 		}
859 
860 		res = list_entry(ioa_cfg->free_res_q.next,
861 				 struct ipr_resource_entry, queue);
862 
863 		list_del(&res->queue);
864 		ipr_init_res_entry(res);
865 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
866 	}
867 
868 	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
869 
870 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
871 		if (res->sdev) {
872 			res->sdev->hostdata = NULL;
873 			res->del_from_ml = 1;
874 			if (ioa_cfg->allow_ml_add_del)
875 				schedule_work(&ioa_cfg->work_q);
876 		} else
877 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
878 	} else if (!res->sdev) {
879 		res->add_to_ml = 1;
880 		if (ioa_cfg->allow_ml_add_del)
881 			schedule_work(&ioa_cfg->work_q);
882 	}
883 
884 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
885 }
886 
887 /**
888  * ipr_process_ccn - Op done function for a CCN.
889  * @ipr_cmd:	ipr command struct
890  *
891  * This function is the op done function for a configuration
892  * change notification host controlled async from the adapter.
893  *
894  * Return value:
895  * 	none
896  **/
897 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
898 {
899 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
900 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
901 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
902 
903 	list_del(&hostrcb->queue);
904 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
905 
906 	if (ioasc) {
907 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
908 			dev_err(&ioa_cfg->pdev->dev,
909 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
910 
911 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
912 	} else {
913 		ipr_handle_config_change(ioa_cfg, hostrcb);
914 	}
915 }
916 
917 /**
918  * ipr_log_vpd - Log the passed VPD to the error log.
919  * @vpd:		vendor/product id/sn struct
920  *
921  * Return value:
922  * 	none
923  **/
924 static void ipr_log_vpd(struct ipr_vpd *vpd)
925 {
926 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
927 		    + IPR_SERIAL_NUM_LEN];
928 
929 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
930 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
931 	       IPR_PROD_ID_LEN);
932 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
933 	ipr_err("Vendor/Product ID: %s\n", buffer);
934 
935 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
936 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
937 	ipr_err("    Serial Number: %s\n", buffer);
938 }
939 
940 /**
941  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
942  * @vpd:		vendor/product id/sn/wwn struct
943  *
944  * Return value:
945  * 	none
946  **/
947 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
948 {
949 	ipr_log_vpd(&vpd->vpd);
950 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
951 		be32_to_cpu(vpd->wwid[1]));
952 }
953 
954 /**
955  * ipr_log_enhanced_cache_error - Log a cache error.
956  * @ioa_cfg:	ioa config struct
957  * @hostrcb:	hostrcb struct
958  *
959  * Return value:
960  * 	none
961  **/
962 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
963 					 struct ipr_hostrcb *hostrcb)
964 {
965 	struct ipr_hostrcb_type_12_error *error =
966 		&hostrcb->hcam.u.error.u.type_12_error;
967 
968 	ipr_err("-----Current Configuration-----\n");
969 	ipr_err("Cache Directory Card Information:\n");
970 	ipr_log_ext_vpd(&error->ioa_vpd);
971 	ipr_err("Adapter Card Information:\n");
972 	ipr_log_ext_vpd(&error->cfc_vpd);
973 
974 	ipr_err("-----Expected Configuration-----\n");
975 	ipr_err("Cache Directory Card Information:\n");
976 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
977 	ipr_err("Adapter Card Information:\n");
978 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
979 
980 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
981 		     be32_to_cpu(error->ioa_data[0]),
982 		     be32_to_cpu(error->ioa_data[1]),
983 		     be32_to_cpu(error->ioa_data[2]));
984 }
985 
986 /**
987  * ipr_log_cache_error - Log a cache error.
988  * @ioa_cfg:	ioa config struct
989  * @hostrcb:	hostrcb struct
990  *
991  * Return value:
992  * 	none
993  **/
994 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
995 				struct ipr_hostrcb *hostrcb)
996 {
997 	struct ipr_hostrcb_type_02_error *error =
998 		&hostrcb->hcam.u.error.u.type_02_error;
999 
1000 	ipr_err("-----Current Configuration-----\n");
1001 	ipr_err("Cache Directory Card Information:\n");
1002 	ipr_log_vpd(&error->ioa_vpd);
1003 	ipr_err("Adapter Card Information:\n");
1004 	ipr_log_vpd(&error->cfc_vpd);
1005 
1006 	ipr_err("-----Expected Configuration-----\n");
1007 	ipr_err("Cache Directory Card Information:\n");
1008 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1009 	ipr_err("Adapter Card Information:\n");
1010 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1011 
1012 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1013 		     be32_to_cpu(error->ioa_data[0]),
1014 		     be32_to_cpu(error->ioa_data[1]),
1015 		     be32_to_cpu(error->ioa_data[2]));
1016 }
1017 
1018 /**
1019  * ipr_log_enhanced_config_error - Log a configuration error.
1020  * @ioa_cfg:	ioa config struct
1021  * @hostrcb:	hostrcb struct
1022  *
1023  * Return value:
1024  * 	none
1025  **/
1026 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1027 					  struct ipr_hostrcb *hostrcb)
1028 {
1029 	int errors_logged, i;
1030 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1031 	struct ipr_hostrcb_type_13_error *error;
1032 
1033 	error = &hostrcb->hcam.u.error.u.type_13_error;
1034 	errors_logged = be32_to_cpu(error->errors_logged);
1035 
1036 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1037 		be32_to_cpu(error->errors_detected), errors_logged);
1038 
1039 	dev_entry = error->dev;
1040 
1041 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1042 		ipr_err_separator;
1043 
1044 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1045 		ipr_log_ext_vpd(&dev_entry->vpd);
1046 
1047 		ipr_err("-----New Device Information-----\n");
1048 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1049 
1050 		ipr_err("Cache Directory Card Information:\n");
1051 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1052 
1053 		ipr_err("Adapter Card Information:\n");
1054 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1055 	}
1056 }
1057 
1058 /**
1059  * ipr_log_config_error - Log a configuration error.
1060  * @ioa_cfg:	ioa config struct
1061  * @hostrcb:	hostrcb struct
1062  *
1063  * Return value:
1064  * 	none
1065  **/
1066 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1067 				 struct ipr_hostrcb *hostrcb)
1068 {
1069 	int errors_logged, i;
1070 	struct ipr_hostrcb_device_data_entry *dev_entry;
1071 	struct ipr_hostrcb_type_03_error *error;
1072 
1073 	error = &hostrcb->hcam.u.error.u.type_03_error;
1074 	errors_logged = be32_to_cpu(error->errors_logged);
1075 
1076 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1077 		be32_to_cpu(error->errors_detected), errors_logged);
1078 
1079 	dev_entry = error->dev;
1080 
1081 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1082 		ipr_err_separator;
1083 
1084 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1085 		ipr_log_vpd(&dev_entry->vpd);
1086 
1087 		ipr_err("-----New Device Information-----\n");
1088 		ipr_log_vpd(&dev_entry->new_vpd);
1089 
1090 		ipr_err("Cache Directory Card Information:\n");
1091 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1092 
1093 		ipr_err("Adapter Card Information:\n");
1094 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1095 
1096 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1097 			be32_to_cpu(dev_entry->ioa_data[0]),
1098 			be32_to_cpu(dev_entry->ioa_data[1]),
1099 			be32_to_cpu(dev_entry->ioa_data[2]),
1100 			be32_to_cpu(dev_entry->ioa_data[3]),
1101 			be32_to_cpu(dev_entry->ioa_data[4]));
1102 	}
1103 }
1104 
1105 /**
1106  * ipr_log_enhanced_array_error - Log an array configuration error.
1107  * @ioa_cfg:	ioa config struct
1108  * @hostrcb:	hostrcb struct
1109  *
1110  * Return value:
1111  * 	none
1112  **/
1113 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1114 					 struct ipr_hostrcb *hostrcb)
1115 {
1116 	int i, num_entries;
1117 	struct ipr_hostrcb_type_14_error *error;
1118 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1119 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1120 
1121 	error = &hostrcb->hcam.u.error.u.type_14_error;
1122 
1123 	ipr_err_separator;
1124 
1125 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1126 		error->protection_level,
1127 		ioa_cfg->host->host_no,
1128 		error->last_func_vset_res_addr.bus,
1129 		error->last_func_vset_res_addr.target,
1130 		error->last_func_vset_res_addr.lun);
1131 
1132 	ipr_err_separator;
1133 
1134 	array_entry = error->array_member;
1135 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1136 			    sizeof(error->array_member));
1137 
1138 	for (i = 0; i < num_entries; i++, array_entry++) {
1139 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1140 			continue;
1141 
1142 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1143 			ipr_err("Exposed Array Member %d:\n", i);
1144 		else
1145 			ipr_err("Array Member %d:\n", i);
1146 
1147 		ipr_log_ext_vpd(&array_entry->vpd);
1148 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1149 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1150 				 "Expected Location");
1151 
1152 		ipr_err_separator;
1153 	}
1154 }
1155 
1156 /**
1157  * ipr_log_array_error - Log an array configuration error.
1158  * @ioa_cfg:	ioa config struct
1159  * @hostrcb:	hostrcb struct
1160  *
1161  * Return value:
1162  * 	none
1163  **/
1164 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1165 				struct ipr_hostrcb *hostrcb)
1166 {
1167 	int i;
1168 	struct ipr_hostrcb_type_04_error *error;
1169 	struct ipr_hostrcb_array_data_entry *array_entry;
1170 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1171 
1172 	error = &hostrcb->hcam.u.error.u.type_04_error;
1173 
1174 	ipr_err_separator;
1175 
1176 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1177 		error->protection_level,
1178 		ioa_cfg->host->host_no,
1179 		error->last_func_vset_res_addr.bus,
1180 		error->last_func_vset_res_addr.target,
1181 		error->last_func_vset_res_addr.lun);
1182 
1183 	ipr_err_separator;
1184 
1185 	array_entry = error->array_member;
1186 
1187 	for (i = 0; i < 18; i++) {
1188 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1189 			continue;
1190 
1191 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1192 			ipr_err("Exposed Array Member %d:\n", i);
1193 		else
1194 			ipr_err("Array Member %d:\n", i);
1195 
1196 		ipr_log_vpd(&array_entry->vpd);
1197 
1198 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1199 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1200 				 "Expected Location");
1201 
1202 		ipr_err_separator;
1203 
1204 		if (i == 9)
1205 			array_entry = error->array_member2;
1206 		else
1207 			array_entry++;
1208 	}
1209 }
1210 
1211 /**
1212  * ipr_log_hex_data - Log additional hex IOA error data.
1213  * @data:		IOA error data
1214  * @len:		data length
1215  *
1216  * Return value:
1217  * 	none
1218  **/
1219 static void ipr_log_hex_data(u32 *data, int len)
1220 {
1221 	int i;
1222 
1223 	if (len == 0)
1224 		return;
1225 
1226 	for (i = 0; i < len / 4; i += 4) {
1227 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1228 			be32_to_cpu(data[i]),
1229 			be32_to_cpu(data[i+1]),
1230 			be32_to_cpu(data[i+2]),
1231 			be32_to_cpu(data[i+3]));
1232 	}
1233 }
1234 
1235 /**
1236  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1237  * @ioa_cfg:	ioa config struct
1238  * @hostrcb:	hostrcb struct
1239  *
1240  * Return value:
1241  * 	none
1242  **/
1243 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1244 					    struct ipr_hostrcb *hostrcb)
1245 {
1246 	struct ipr_hostrcb_type_17_error *error;
1247 
1248 	error = &hostrcb->hcam.u.error.u.type_17_error;
1249 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1250 
1251 	ipr_err("%s\n", error->failure_reason);
1252 	ipr_err("Remote Adapter VPD:\n");
1253 	ipr_log_ext_vpd(&error->vpd);
1254 	ipr_log_hex_data(error->data,
1255 			 be32_to_cpu(hostrcb->hcam.length) -
1256 			 (offsetof(struct ipr_hostrcb_error, u) +
1257 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1258 }
1259 
1260 /**
1261  * ipr_log_dual_ioa_error - Log a dual adapter error.
1262  * @ioa_cfg:	ioa config struct
1263  * @hostrcb:	hostrcb struct
1264  *
1265  * Return value:
1266  * 	none
1267  **/
1268 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1269 				   struct ipr_hostrcb *hostrcb)
1270 {
1271 	struct ipr_hostrcb_type_07_error *error;
1272 
1273 	error = &hostrcb->hcam.u.error.u.type_07_error;
1274 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1275 
1276 	ipr_err("%s\n", error->failure_reason);
1277 	ipr_err("Remote Adapter VPD:\n");
1278 	ipr_log_vpd(&error->vpd);
1279 	ipr_log_hex_data(error->data,
1280 			 be32_to_cpu(hostrcb->hcam.length) -
1281 			 (offsetof(struct ipr_hostrcb_error, u) +
1282 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1283 }
1284 
1285 /**
1286  * ipr_log_generic_error - Log an adapter error.
1287  * @ioa_cfg:	ioa config struct
1288  * @hostrcb:	hostrcb struct
1289  *
1290  * Return value:
1291  * 	none
1292  **/
1293 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1294 				  struct ipr_hostrcb *hostrcb)
1295 {
1296 	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1297 			 be32_to_cpu(hostrcb->hcam.length));
1298 }
1299 
1300 /**
1301  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1302  * @ioasc:	IOASC
1303  *
1304  * This function will return the index of into the ipr_error_table
1305  * for the specified IOASC. If the IOASC is not in the table,
1306  * 0 will be returned, which points to the entry used for unknown errors.
1307  *
1308  * Return value:
1309  * 	index into the ipr_error_table
1310  **/
1311 static u32 ipr_get_error(u32 ioasc)
1312 {
1313 	int i;
1314 
1315 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1316 		if (ipr_error_table[i].ioasc == ioasc)
1317 			return i;
1318 
1319 	return 0;
1320 }
1321 
1322 /**
1323  * ipr_handle_log_data - Log an adapter error.
1324  * @ioa_cfg:	ioa config struct
1325  * @hostrcb:	hostrcb struct
1326  *
1327  * This function logs an adapter error to the system.
1328  *
1329  * Return value:
1330  * 	none
1331  **/
1332 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1333 				struct ipr_hostrcb *hostrcb)
1334 {
1335 	u32 ioasc;
1336 	int error_index;
1337 
1338 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1339 		return;
1340 
1341 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1342 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1343 
1344 	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1345 
1346 	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1347 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1348 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1349 		scsi_report_bus_reset(ioa_cfg->host,
1350 				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1351 	}
1352 
1353 	error_index = ipr_get_error(ioasc);
1354 
1355 	if (!ipr_error_table[error_index].log_hcam)
1356 		return;
1357 
1358 	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1359 		ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1360 			    "%s\n", ipr_error_table[error_index].error);
1361 	} else {
1362 		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1363 			ipr_error_table[error_index].error);
1364 	}
1365 
1366 	/* Set indication we have logged an error */
1367 	ioa_cfg->errors_logged++;
1368 
1369 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1370 		return;
1371 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1372 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1373 
1374 	switch (hostrcb->hcam.overlay_id) {
1375 	case IPR_HOST_RCB_OVERLAY_ID_2:
1376 		ipr_log_cache_error(ioa_cfg, hostrcb);
1377 		break;
1378 	case IPR_HOST_RCB_OVERLAY_ID_3:
1379 		ipr_log_config_error(ioa_cfg, hostrcb);
1380 		break;
1381 	case IPR_HOST_RCB_OVERLAY_ID_4:
1382 	case IPR_HOST_RCB_OVERLAY_ID_6:
1383 		ipr_log_array_error(ioa_cfg, hostrcb);
1384 		break;
1385 	case IPR_HOST_RCB_OVERLAY_ID_7:
1386 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1387 		break;
1388 	case IPR_HOST_RCB_OVERLAY_ID_12:
1389 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1390 		break;
1391 	case IPR_HOST_RCB_OVERLAY_ID_13:
1392 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1393 		break;
1394 	case IPR_HOST_RCB_OVERLAY_ID_14:
1395 	case IPR_HOST_RCB_OVERLAY_ID_16:
1396 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1397 		break;
1398 	case IPR_HOST_RCB_OVERLAY_ID_17:
1399 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1400 		break;
1401 	case IPR_HOST_RCB_OVERLAY_ID_1:
1402 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1403 	default:
1404 		ipr_log_generic_error(ioa_cfg, hostrcb);
1405 		break;
1406 	}
1407 }
1408 
1409 /**
1410  * ipr_process_error - Op done function for an adapter error log.
1411  * @ipr_cmd:	ipr command struct
1412  *
1413  * This function is the op done function for an error log host
1414  * controlled async from the adapter. It will log the error and
1415  * send the HCAM back to the adapter.
1416  *
1417  * Return value:
1418  * 	none
1419  **/
1420 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1421 {
1422 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1423 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1424 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1425 
1426 	list_del(&hostrcb->queue);
1427 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1428 
1429 	if (!ioasc) {
1430 		ipr_handle_log_data(ioa_cfg, hostrcb);
1431 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1432 		dev_err(&ioa_cfg->pdev->dev,
1433 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1434 	}
1435 
1436 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1437 }
1438 
1439 /**
1440  * ipr_timeout -  An internally generated op has timed out.
1441  * @ipr_cmd:	ipr command struct
1442  *
1443  * This function blocks host requests and initiates an
1444  * adapter reset.
1445  *
1446  * Return value:
1447  * 	none
1448  **/
1449 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1450 {
1451 	unsigned long lock_flags = 0;
1452 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1453 
1454 	ENTER;
1455 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1456 
1457 	ioa_cfg->errors_logged++;
1458 	dev_err(&ioa_cfg->pdev->dev,
1459 		"Adapter being reset due to command timeout.\n");
1460 
1461 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1462 		ioa_cfg->sdt_state = GET_DUMP;
1463 
1464 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1465 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1466 
1467 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1468 	LEAVE;
1469 }
1470 
1471 /**
1472  * ipr_oper_timeout -  Adapter timed out transitioning to operational
1473  * @ipr_cmd:	ipr command struct
1474  *
1475  * This function blocks host requests and initiates an
1476  * adapter reset.
1477  *
1478  * Return value:
1479  * 	none
1480  **/
1481 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1482 {
1483 	unsigned long lock_flags = 0;
1484 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1485 
1486 	ENTER;
1487 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1488 
1489 	ioa_cfg->errors_logged++;
1490 	dev_err(&ioa_cfg->pdev->dev,
1491 		"Adapter timed out transitioning to operational.\n");
1492 
1493 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1494 		ioa_cfg->sdt_state = GET_DUMP;
1495 
1496 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1497 		if (ipr_fastfail)
1498 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1499 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1500 	}
1501 
1502 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1503 	LEAVE;
1504 }
1505 
1506 /**
1507  * ipr_reset_reload - Reset/Reload the IOA
1508  * @ioa_cfg:		ioa config struct
1509  * @shutdown_type:	shutdown type
1510  *
1511  * This function resets the adapter and re-initializes it.
1512  * This function assumes that all new host commands have been stopped.
1513  * Return value:
1514  * 	SUCCESS / FAILED
1515  **/
1516 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1517 			    enum ipr_shutdown_type shutdown_type)
1518 {
1519 	if (!ioa_cfg->in_reset_reload)
1520 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1521 
1522 	spin_unlock_irq(ioa_cfg->host->host_lock);
1523 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1524 	spin_lock_irq(ioa_cfg->host->host_lock);
1525 
1526 	/* If we got hit with a host reset while we were already resetting
1527 	 the adapter for some reason, and the reset failed. */
1528 	if (ioa_cfg->ioa_is_dead) {
1529 		ipr_trace;
1530 		return FAILED;
1531 	}
1532 
1533 	return SUCCESS;
1534 }
1535 
1536 /**
1537  * ipr_find_ses_entry - Find matching SES in SES table
1538  * @res:	resource entry struct of SES
1539  *
1540  * Return value:
1541  * 	pointer to SES table entry / NULL on failure
1542  **/
1543 static const struct ipr_ses_table_entry *
1544 ipr_find_ses_entry(struct ipr_resource_entry *res)
1545 {
1546 	int i, j, matches;
1547 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1548 
1549 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1550 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1551 			if (ste->compare_product_id_byte[j] == 'X') {
1552 				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1553 					matches++;
1554 				else
1555 					break;
1556 			} else
1557 				matches++;
1558 		}
1559 
1560 		if (matches == IPR_PROD_ID_LEN)
1561 			return ste;
1562 	}
1563 
1564 	return NULL;
1565 }
1566 
1567 /**
1568  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1569  * @ioa_cfg:	ioa config struct
1570  * @bus:		SCSI bus
1571  * @bus_width:	bus width
1572  *
1573  * Return value:
1574  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1575  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1576  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1577  *	max 160MHz = max 320MB/sec).
1578  **/
1579 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1580 {
1581 	struct ipr_resource_entry *res;
1582 	const struct ipr_ses_table_entry *ste;
1583 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1584 
1585 	/* Loop through each config table entry in the config table buffer */
1586 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1587 		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1588 			continue;
1589 
1590 		if (bus != res->cfgte.res_addr.bus)
1591 			continue;
1592 
1593 		if (!(ste = ipr_find_ses_entry(res)))
1594 			continue;
1595 
1596 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1597 	}
1598 
1599 	return max_xfer_rate;
1600 }
1601 
1602 /**
1603  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1604  * @ioa_cfg:		ioa config struct
1605  * @max_delay:		max delay in micro-seconds to wait
1606  *
1607  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1608  *
1609  * Return value:
1610  * 	0 on success / other on failure
1611  **/
1612 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1613 {
1614 	volatile u32 pcii_reg;
1615 	int delay = 1;
1616 
1617 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1618 	while (delay < max_delay) {
1619 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1620 
1621 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1622 			return 0;
1623 
1624 		/* udelay cannot be used if delay is more than a few milliseconds */
1625 		if ((delay / 1000) > MAX_UDELAY_MS)
1626 			mdelay(delay / 1000);
1627 		else
1628 			udelay(delay);
1629 
1630 		delay += delay;
1631 	}
1632 	return -EIO;
1633 }
1634 
1635 /**
1636  * ipr_get_ldump_data_section - Dump IOA memory
1637  * @ioa_cfg:			ioa config struct
1638  * @start_addr:			adapter address to dump
1639  * @dest:				destination kernel buffer
1640  * @length_in_words:	length to dump in 4 byte words
1641  *
1642  * Return value:
1643  * 	0 on success / -EIO on failure
1644  **/
1645 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1646 				      u32 start_addr,
1647 				      __be32 *dest, u32 length_in_words)
1648 {
1649 	volatile u32 temp_pcii_reg;
1650 	int i, delay = 0;
1651 
1652 	/* Write IOA interrupt reg starting LDUMP state  */
1653 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1654 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1655 
1656 	/* Wait for IO debug acknowledge */
1657 	if (ipr_wait_iodbg_ack(ioa_cfg,
1658 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1659 		dev_err(&ioa_cfg->pdev->dev,
1660 			"IOA dump long data transfer timeout\n");
1661 		return -EIO;
1662 	}
1663 
1664 	/* Signal LDUMP interlocked - clear IO debug ack */
1665 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1666 	       ioa_cfg->regs.clr_interrupt_reg);
1667 
1668 	/* Write Mailbox with starting address */
1669 	writel(start_addr, ioa_cfg->ioa_mailbox);
1670 
1671 	/* Signal address valid - clear IOA Reset alert */
1672 	writel(IPR_UPROCI_RESET_ALERT,
1673 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1674 
1675 	for (i = 0; i < length_in_words; i++) {
1676 		/* Wait for IO debug acknowledge */
1677 		if (ipr_wait_iodbg_ack(ioa_cfg,
1678 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1679 			dev_err(&ioa_cfg->pdev->dev,
1680 				"IOA dump short data transfer timeout\n");
1681 			return -EIO;
1682 		}
1683 
1684 		/* Read data from mailbox and increment destination pointer */
1685 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1686 		dest++;
1687 
1688 		/* For all but the last word of data, signal data received */
1689 		if (i < (length_in_words - 1)) {
1690 			/* Signal dump data received - Clear IO debug Ack */
1691 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1692 			       ioa_cfg->regs.clr_interrupt_reg);
1693 		}
1694 	}
1695 
1696 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1697 	writel(IPR_UPROCI_RESET_ALERT,
1698 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1699 
1700 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1701 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1702 
1703 	/* Signal dump data received - Clear IO debug Ack */
1704 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1705 	       ioa_cfg->regs.clr_interrupt_reg);
1706 
1707 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1708 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1709 		temp_pcii_reg =
1710 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1711 
1712 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1713 			return 0;
1714 
1715 		udelay(10);
1716 		delay += 10;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 #ifdef CONFIG_SCSI_IPR_DUMP
1723 /**
1724  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1725  * @ioa_cfg:		ioa config struct
1726  * @pci_address:	adapter address
1727  * @length:			length of data to copy
1728  *
1729  * Copy data from PCI adapter to kernel buffer.
1730  * Note: length MUST be a 4 byte multiple
1731  * Return value:
1732  * 	0 on success / other on failure
1733  **/
1734 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1735 			unsigned long pci_address, u32 length)
1736 {
1737 	int bytes_copied = 0;
1738 	int cur_len, rc, rem_len, rem_page_len;
1739 	__be32 *page;
1740 	unsigned long lock_flags = 0;
1741 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1742 
1743 	while (bytes_copied < length &&
1744 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1745 		if (ioa_dump->page_offset >= PAGE_SIZE ||
1746 		    ioa_dump->page_offset == 0) {
1747 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1748 
1749 			if (!page) {
1750 				ipr_trace;
1751 				return bytes_copied;
1752 			}
1753 
1754 			ioa_dump->page_offset = 0;
1755 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1756 			ioa_dump->next_page_index++;
1757 		} else
1758 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1759 
1760 		rem_len = length - bytes_copied;
1761 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1762 		cur_len = min(rem_len, rem_page_len);
1763 
1764 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1765 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1766 			rc = -EIO;
1767 		} else {
1768 			rc = ipr_get_ldump_data_section(ioa_cfg,
1769 							pci_address + bytes_copied,
1770 							&page[ioa_dump->page_offset / 4],
1771 							(cur_len / sizeof(u32)));
1772 		}
1773 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1774 
1775 		if (!rc) {
1776 			ioa_dump->page_offset += cur_len;
1777 			bytes_copied += cur_len;
1778 		} else {
1779 			ipr_trace;
1780 			break;
1781 		}
1782 		schedule();
1783 	}
1784 
1785 	return bytes_copied;
1786 }
1787 
1788 /**
1789  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1790  * @hdr:	dump entry header struct
1791  *
1792  * Return value:
1793  * 	nothing
1794  **/
1795 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1796 {
1797 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1798 	hdr->num_elems = 1;
1799 	hdr->offset = sizeof(*hdr);
1800 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1801 }
1802 
1803 /**
1804  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1805  * @ioa_cfg:	ioa config struct
1806  * @driver_dump:	driver dump struct
1807  *
1808  * Return value:
1809  * 	nothing
1810  **/
1811 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1812 				   struct ipr_driver_dump *driver_dump)
1813 {
1814 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1815 
1816 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1817 	driver_dump->ioa_type_entry.hdr.len =
1818 		sizeof(struct ipr_dump_ioa_type_entry) -
1819 		sizeof(struct ipr_dump_entry_header);
1820 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1821 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1822 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1823 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1824 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1825 		ucode_vpd->minor_release[1];
1826 	driver_dump->hdr.num_entries++;
1827 }
1828 
1829 /**
1830  * ipr_dump_version_data - Fill in the driver version in the dump.
1831  * @ioa_cfg:	ioa config struct
1832  * @driver_dump:	driver dump struct
1833  *
1834  * Return value:
1835  * 	nothing
1836  **/
1837 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1838 				  struct ipr_driver_dump *driver_dump)
1839 {
1840 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1841 	driver_dump->version_entry.hdr.len =
1842 		sizeof(struct ipr_dump_version_entry) -
1843 		sizeof(struct ipr_dump_entry_header);
1844 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1845 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1846 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1847 	driver_dump->hdr.num_entries++;
1848 }
1849 
1850 /**
1851  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1852  * @ioa_cfg:	ioa config struct
1853  * @driver_dump:	driver dump struct
1854  *
1855  * Return value:
1856  * 	nothing
1857  **/
1858 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1859 				   struct ipr_driver_dump *driver_dump)
1860 {
1861 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1862 	driver_dump->trace_entry.hdr.len =
1863 		sizeof(struct ipr_dump_trace_entry) -
1864 		sizeof(struct ipr_dump_entry_header);
1865 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1866 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1867 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1868 	driver_dump->hdr.num_entries++;
1869 }
1870 
1871 /**
1872  * ipr_dump_location_data - Fill in the IOA location in the dump.
1873  * @ioa_cfg:	ioa config struct
1874  * @driver_dump:	driver dump struct
1875  *
1876  * Return value:
1877  * 	nothing
1878  **/
1879 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1880 				   struct ipr_driver_dump *driver_dump)
1881 {
1882 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1883 	driver_dump->location_entry.hdr.len =
1884 		sizeof(struct ipr_dump_location_entry) -
1885 		sizeof(struct ipr_dump_entry_header);
1886 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1887 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1888 	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1889 	driver_dump->hdr.num_entries++;
1890 }
1891 
1892 /**
1893  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1894  * @ioa_cfg:	ioa config struct
1895  * @dump:		dump struct
1896  *
1897  * Return value:
1898  * 	nothing
1899  **/
1900 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1901 {
1902 	unsigned long start_addr, sdt_word;
1903 	unsigned long lock_flags = 0;
1904 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1905 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1906 	u32 num_entries, start_off, end_off;
1907 	u32 bytes_to_copy, bytes_copied, rc;
1908 	struct ipr_sdt *sdt;
1909 	int i;
1910 
1911 	ENTER;
1912 
1913 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1914 
1915 	if (ioa_cfg->sdt_state != GET_DUMP) {
1916 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1917 		return;
1918 	}
1919 
1920 	start_addr = readl(ioa_cfg->ioa_mailbox);
1921 
1922 	if (!ipr_sdt_is_fmt2(start_addr)) {
1923 		dev_err(&ioa_cfg->pdev->dev,
1924 			"Invalid dump table format: %lx\n", start_addr);
1925 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1926 		return;
1927 	}
1928 
1929 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1930 
1931 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1932 
1933 	/* Initialize the overall dump header */
1934 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1935 	driver_dump->hdr.num_entries = 1;
1936 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1937 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1938 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1939 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1940 
1941 	ipr_dump_version_data(ioa_cfg, driver_dump);
1942 	ipr_dump_location_data(ioa_cfg, driver_dump);
1943 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1944 	ipr_dump_trace_data(ioa_cfg, driver_dump);
1945 
1946 	/* Update dump_header */
1947 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1948 
1949 	/* IOA Dump entry */
1950 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1951 	ioa_dump->format = IPR_SDT_FMT2;
1952 	ioa_dump->hdr.len = 0;
1953 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1954 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1955 
1956 	/* First entries in sdt are actually a list of dump addresses and
1957 	 lengths to gather the real dump data.  sdt represents the pointer
1958 	 to the ioa generated dump table.  Dump data will be extracted based
1959 	 on entries in this table */
1960 	sdt = &ioa_dump->sdt;
1961 
1962 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1963 					sizeof(struct ipr_sdt) / sizeof(__be32));
1964 
1965 	/* Smart Dump table is ready to use and the first entry is valid */
1966 	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1967 		dev_err(&ioa_cfg->pdev->dev,
1968 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1969 			rc, be32_to_cpu(sdt->hdr.state));
1970 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1971 		ioa_cfg->sdt_state = DUMP_OBTAINED;
1972 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1973 		return;
1974 	}
1975 
1976 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1977 
1978 	if (num_entries > IPR_NUM_SDT_ENTRIES)
1979 		num_entries = IPR_NUM_SDT_ENTRIES;
1980 
1981 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1982 
1983 	for (i = 0; i < num_entries; i++) {
1984 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1985 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1986 			break;
1987 		}
1988 
1989 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1990 			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1991 			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1992 			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1993 
1994 			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1995 				bytes_to_copy = end_off - start_off;
1996 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1997 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1998 					continue;
1999 				}
2000 
2001 				/* Copy data from adapter to driver buffers */
2002 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2003 							    bytes_to_copy);
2004 
2005 				ioa_dump->hdr.len += bytes_copied;
2006 
2007 				if (bytes_copied != bytes_to_copy) {
2008 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2009 					break;
2010 				}
2011 			}
2012 		}
2013 	}
2014 
2015 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2016 
2017 	/* Update dump_header */
2018 	driver_dump->hdr.len += ioa_dump->hdr.len;
2019 	wmb();
2020 	ioa_cfg->sdt_state = DUMP_OBTAINED;
2021 	LEAVE;
2022 }
2023 
2024 #else
2025 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2026 #endif
2027 
2028 /**
2029  * ipr_release_dump - Free adapter dump memory
2030  * @kref:	kref struct
2031  *
2032  * Return value:
2033  *	nothing
2034  **/
2035 static void ipr_release_dump(struct kref *kref)
2036 {
2037 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2038 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2039 	unsigned long lock_flags = 0;
2040 	int i;
2041 
2042 	ENTER;
2043 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2044 	ioa_cfg->dump = NULL;
2045 	ioa_cfg->sdt_state = INACTIVE;
2046 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2047 
2048 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2049 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2050 
2051 	kfree(dump);
2052 	LEAVE;
2053 }
2054 
2055 /**
2056  * ipr_worker_thread - Worker thread
2057  * @data:		ioa config struct
2058  *
2059  * Called at task level from a work thread. This function takes care
2060  * of adding and removing device from the mid-layer as configuration
2061  * changes are detected by the adapter.
2062  *
2063  * Return value:
2064  * 	nothing
2065  **/
2066 static void ipr_worker_thread(void *data)
2067 {
2068 	unsigned long lock_flags;
2069 	struct ipr_resource_entry *res;
2070 	struct scsi_device *sdev;
2071 	struct ipr_dump *dump;
2072 	struct ipr_ioa_cfg *ioa_cfg = data;
2073 	u8 bus, target, lun;
2074 	int did_work;
2075 
2076 	ENTER;
2077 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2078 
2079 	if (ioa_cfg->sdt_state == GET_DUMP) {
2080 		dump = ioa_cfg->dump;
2081 		if (!dump) {
2082 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2083 			return;
2084 		}
2085 		kref_get(&dump->kref);
2086 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2087 		ipr_get_ioa_dump(ioa_cfg, dump);
2088 		kref_put(&dump->kref, ipr_release_dump);
2089 
2090 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2091 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2092 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2093 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2094 		return;
2095 	}
2096 
2097 restart:
2098 	do {
2099 		did_work = 0;
2100 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2101 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2102 			return;
2103 		}
2104 
2105 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2106 			if (res->del_from_ml && res->sdev) {
2107 				did_work = 1;
2108 				sdev = res->sdev;
2109 				if (!scsi_device_get(sdev)) {
2110 					res->sdev = NULL;
2111 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2112 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2113 					scsi_remove_device(sdev);
2114 					scsi_device_put(sdev);
2115 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2116 				}
2117 				break;
2118 			}
2119 		}
2120 	} while(did_work);
2121 
2122 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2123 		if (res->add_to_ml) {
2124 			bus = res->cfgte.res_addr.bus;
2125 			target = res->cfgte.res_addr.target;
2126 			lun = res->cfgte.res_addr.lun;
2127 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2128 			scsi_add_device(ioa_cfg->host, bus, target, lun);
2129 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2130 			goto restart;
2131 		}
2132 	}
2133 
2134 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2135 	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2136 	LEAVE;
2137 }
2138 
2139 #ifdef CONFIG_SCSI_IPR_TRACE
2140 /**
2141  * ipr_read_trace - Dump the adapter trace
2142  * @kobj:		kobject struct
2143  * @buf:		buffer
2144  * @off:		offset
2145  * @count:		buffer size
2146  *
2147  * Return value:
2148  *	number of bytes printed to buffer
2149  **/
2150 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2151 			      loff_t off, size_t count)
2152 {
2153 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2154 	struct Scsi_Host *shost = class_to_shost(cdev);
2155 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2156 	unsigned long lock_flags = 0;
2157 	int size = IPR_TRACE_SIZE;
2158 	char *src = (char *)ioa_cfg->trace;
2159 
2160 	if (off > size)
2161 		return 0;
2162 	if (off + count > size) {
2163 		size -= off;
2164 		count = size;
2165 	}
2166 
2167 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2168 	memcpy(buf, &src[off], count);
2169 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170 	return count;
2171 }
2172 
2173 static struct bin_attribute ipr_trace_attr = {
2174 	.attr =	{
2175 		.name = "trace",
2176 		.mode = S_IRUGO,
2177 	},
2178 	.size = 0,
2179 	.read = ipr_read_trace,
2180 };
2181 #endif
2182 
2183 static const struct {
2184 	enum ipr_cache_state state;
2185 	char *name;
2186 } cache_state [] = {
2187 	{ CACHE_NONE, "none" },
2188 	{ CACHE_DISABLED, "disabled" },
2189 	{ CACHE_ENABLED, "enabled" }
2190 };
2191 
2192 /**
2193  * ipr_show_write_caching - Show the write caching attribute
2194  * @class_dev:	class device struct
2195  * @buf:		buffer
2196  *
2197  * Return value:
2198  *	number of bytes printed to buffer
2199  **/
2200 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2201 {
2202 	struct Scsi_Host *shost = class_to_shost(class_dev);
2203 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2204 	unsigned long lock_flags = 0;
2205 	int i, len = 0;
2206 
2207 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2208 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2209 		if (cache_state[i].state == ioa_cfg->cache_state) {
2210 			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2211 			break;
2212 		}
2213 	}
2214 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2215 	return len;
2216 }
2217 
2218 
2219 /**
2220  * ipr_store_write_caching - Enable/disable adapter write cache
2221  * @class_dev:	class_device struct
2222  * @buf:		buffer
2223  * @count:		buffer size
2224  *
2225  * This function will enable/disable adapter write cache.
2226  *
2227  * Return value:
2228  * 	count on success / other on failure
2229  **/
2230 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2231 					const char *buf, size_t count)
2232 {
2233 	struct Scsi_Host *shost = class_to_shost(class_dev);
2234 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2235 	unsigned long lock_flags = 0;
2236 	enum ipr_cache_state new_state = CACHE_INVALID;
2237 	int i;
2238 
2239 	if (!capable(CAP_SYS_ADMIN))
2240 		return -EACCES;
2241 	if (ioa_cfg->cache_state == CACHE_NONE)
2242 		return -EINVAL;
2243 
2244 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2245 		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2246 			new_state = cache_state[i].state;
2247 			break;
2248 		}
2249 	}
2250 
2251 	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2252 		return -EINVAL;
2253 
2254 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2255 	if (ioa_cfg->cache_state == new_state) {
2256 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2257 		return count;
2258 	}
2259 
2260 	ioa_cfg->cache_state = new_state;
2261 	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2262 		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2263 	if (!ioa_cfg->in_reset_reload)
2264 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2265 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2266 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2267 
2268 	return count;
2269 }
2270 
2271 static struct class_device_attribute ipr_ioa_cache_attr = {
2272 	.attr = {
2273 		.name =		"write_cache",
2274 		.mode =		S_IRUGO | S_IWUSR,
2275 	},
2276 	.show = ipr_show_write_caching,
2277 	.store = ipr_store_write_caching
2278 };
2279 
2280 /**
2281  * ipr_show_fw_version - Show the firmware version
2282  * @class_dev:	class device struct
2283  * @buf:		buffer
2284  *
2285  * Return value:
2286  *	number of bytes printed to buffer
2287  **/
2288 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2289 {
2290 	struct Scsi_Host *shost = class_to_shost(class_dev);
2291 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2292 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2293 	unsigned long lock_flags = 0;
2294 	int len;
2295 
2296 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2297 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2298 		       ucode_vpd->major_release, ucode_vpd->card_type,
2299 		       ucode_vpd->minor_release[0],
2300 		       ucode_vpd->minor_release[1]);
2301 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2302 	return len;
2303 }
2304 
2305 static struct class_device_attribute ipr_fw_version_attr = {
2306 	.attr = {
2307 		.name =		"fw_version",
2308 		.mode =		S_IRUGO,
2309 	},
2310 	.show = ipr_show_fw_version,
2311 };
2312 
2313 /**
2314  * ipr_show_log_level - Show the adapter's error logging level
2315  * @class_dev:	class device struct
2316  * @buf:		buffer
2317  *
2318  * Return value:
2319  * 	number of bytes printed to buffer
2320  **/
2321 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2322 {
2323 	struct Scsi_Host *shost = class_to_shost(class_dev);
2324 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2325 	unsigned long lock_flags = 0;
2326 	int len;
2327 
2328 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2329 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2330 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2331 	return len;
2332 }
2333 
2334 /**
2335  * ipr_store_log_level - Change the adapter's error logging level
2336  * @class_dev:	class device struct
2337  * @buf:		buffer
2338  *
2339  * Return value:
2340  * 	number of bytes printed to buffer
2341  **/
2342 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2343 				   const char *buf, size_t count)
2344 {
2345 	struct Scsi_Host *shost = class_to_shost(class_dev);
2346 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2347 	unsigned long lock_flags = 0;
2348 
2349 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2350 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2351 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2352 	return strlen(buf);
2353 }
2354 
2355 static struct class_device_attribute ipr_log_level_attr = {
2356 	.attr = {
2357 		.name =		"log_level",
2358 		.mode =		S_IRUGO | S_IWUSR,
2359 	},
2360 	.show = ipr_show_log_level,
2361 	.store = ipr_store_log_level
2362 };
2363 
2364 /**
2365  * ipr_store_diagnostics - IOA Diagnostics interface
2366  * @class_dev:	class_device struct
2367  * @buf:		buffer
2368  * @count:		buffer size
2369  *
2370  * This function will reset the adapter and wait a reasonable
2371  * amount of time for any errors that the adapter might log.
2372  *
2373  * Return value:
2374  * 	count on success / other on failure
2375  **/
2376 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2377 				     const char *buf, size_t count)
2378 {
2379 	struct Scsi_Host *shost = class_to_shost(class_dev);
2380 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2381 	unsigned long lock_flags = 0;
2382 	int rc = count;
2383 
2384 	if (!capable(CAP_SYS_ADMIN))
2385 		return -EACCES;
2386 
2387 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2388 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2389 	ioa_cfg->errors_logged = 0;
2390 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2391 
2392 	if (ioa_cfg->in_reset_reload) {
2393 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2394 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2395 
2396 		/* Wait for a second for any errors to be logged */
2397 		msleep(1000);
2398 	} else {
2399 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2400 		return -EIO;
2401 	}
2402 
2403 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2404 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2405 		rc = -EIO;
2406 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2407 
2408 	return rc;
2409 }
2410 
2411 static struct class_device_attribute ipr_diagnostics_attr = {
2412 	.attr = {
2413 		.name =		"run_diagnostics",
2414 		.mode =		S_IWUSR,
2415 	},
2416 	.store = ipr_store_diagnostics
2417 };
2418 
2419 /**
2420  * ipr_show_adapter_state - Show the adapter's state
2421  * @class_dev:	class device struct
2422  * @buf:		buffer
2423  *
2424  * Return value:
2425  * 	number of bytes printed to buffer
2426  **/
2427 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2428 {
2429 	struct Scsi_Host *shost = class_to_shost(class_dev);
2430 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2431 	unsigned long lock_flags = 0;
2432 	int len;
2433 
2434 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2435 	if (ioa_cfg->ioa_is_dead)
2436 		len = snprintf(buf, PAGE_SIZE, "offline\n");
2437 	else
2438 		len = snprintf(buf, PAGE_SIZE, "online\n");
2439 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2440 	return len;
2441 }
2442 
2443 /**
2444  * ipr_store_adapter_state - Change adapter state
2445  * @class_dev:	class_device struct
2446  * @buf:		buffer
2447  * @count:		buffer size
2448  *
2449  * This function will change the adapter's state.
2450  *
2451  * Return value:
2452  * 	count on success / other on failure
2453  **/
2454 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2455 				       const char *buf, size_t count)
2456 {
2457 	struct Scsi_Host *shost = class_to_shost(class_dev);
2458 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2459 	unsigned long lock_flags;
2460 	int result = count;
2461 
2462 	if (!capable(CAP_SYS_ADMIN))
2463 		return -EACCES;
2464 
2465 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2466 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2467 		ioa_cfg->ioa_is_dead = 0;
2468 		ioa_cfg->reset_retries = 0;
2469 		ioa_cfg->in_ioa_bringdown = 0;
2470 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2471 	}
2472 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2473 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474 
2475 	return result;
2476 }
2477 
2478 static struct class_device_attribute ipr_ioa_state_attr = {
2479 	.attr = {
2480 		.name =		"state",
2481 		.mode =		S_IRUGO | S_IWUSR,
2482 	},
2483 	.show = ipr_show_adapter_state,
2484 	.store = ipr_store_adapter_state
2485 };
2486 
2487 /**
2488  * ipr_store_reset_adapter - Reset the adapter
2489  * @class_dev:	class_device struct
2490  * @buf:		buffer
2491  * @count:		buffer size
2492  *
2493  * This function will reset the adapter.
2494  *
2495  * Return value:
2496  * 	count on success / other on failure
2497  **/
2498 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2499 				       const char *buf, size_t count)
2500 {
2501 	struct Scsi_Host *shost = class_to_shost(class_dev);
2502 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503 	unsigned long lock_flags;
2504 	int result = count;
2505 
2506 	if (!capable(CAP_SYS_ADMIN))
2507 		return -EACCES;
2508 
2509 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2510 	if (!ioa_cfg->in_reset_reload)
2511 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2512 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2513 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2514 
2515 	return result;
2516 }
2517 
2518 static struct class_device_attribute ipr_ioa_reset_attr = {
2519 	.attr = {
2520 		.name =		"reset_host",
2521 		.mode =		S_IWUSR,
2522 	},
2523 	.store = ipr_store_reset_adapter
2524 };
2525 
2526 /**
2527  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2528  * @buf_len:		buffer length
2529  *
2530  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2531  * list to use for microcode download
2532  *
2533  * Return value:
2534  * 	pointer to sglist / NULL on failure
2535  **/
2536 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2537 {
2538 	int sg_size, order, bsize_elem, num_elem, i, j;
2539 	struct ipr_sglist *sglist;
2540 	struct scatterlist *scatterlist;
2541 	struct page *page;
2542 
2543 	/* Get the minimum size per scatter/gather element */
2544 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2545 
2546 	/* Get the actual size per element */
2547 	order = get_order(sg_size);
2548 
2549 	/* Determine the actual number of bytes per element */
2550 	bsize_elem = PAGE_SIZE * (1 << order);
2551 
2552 	/* Determine the actual number of sg entries needed */
2553 	if (buf_len % bsize_elem)
2554 		num_elem = (buf_len / bsize_elem) + 1;
2555 	else
2556 		num_elem = buf_len / bsize_elem;
2557 
2558 	/* Allocate a scatter/gather list for the DMA */
2559 	sglist = kzalloc(sizeof(struct ipr_sglist) +
2560 			 (sizeof(struct scatterlist) * (num_elem - 1)),
2561 			 GFP_KERNEL);
2562 
2563 	if (sglist == NULL) {
2564 		ipr_trace;
2565 		return NULL;
2566 	}
2567 
2568 	scatterlist = sglist->scatterlist;
2569 
2570 	sglist->order = order;
2571 	sglist->num_sg = num_elem;
2572 
2573 	/* Allocate a bunch of sg elements */
2574 	for (i = 0; i < num_elem; i++) {
2575 		page = alloc_pages(GFP_KERNEL, order);
2576 		if (!page) {
2577 			ipr_trace;
2578 
2579 			/* Free up what we already allocated */
2580 			for (j = i - 1; j >= 0; j--)
2581 				__free_pages(scatterlist[j].page, order);
2582 			kfree(sglist);
2583 			return NULL;
2584 		}
2585 
2586 		scatterlist[i].page = page;
2587 	}
2588 
2589 	return sglist;
2590 }
2591 
2592 /**
2593  * ipr_free_ucode_buffer - Frees a microcode download buffer
2594  * @p_dnld:		scatter/gather list pointer
2595  *
2596  * Free a DMA'able ucode download buffer previously allocated with
2597  * ipr_alloc_ucode_buffer
2598  *
2599  * Return value:
2600  * 	nothing
2601  **/
2602 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2603 {
2604 	int i;
2605 
2606 	for (i = 0; i < sglist->num_sg; i++)
2607 		__free_pages(sglist->scatterlist[i].page, sglist->order);
2608 
2609 	kfree(sglist);
2610 }
2611 
2612 /**
2613  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2614  * @sglist:		scatter/gather list pointer
2615  * @buffer:		buffer pointer
2616  * @len:		buffer length
2617  *
2618  * Copy a microcode image from a user buffer into a buffer allocated by
2619  * ipr_alloc_ucode_buffer
2620  *
2621  * Return value:
2622  * 	0 on success / other on failure
2623  **/
2624 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2625 				 u8 *buffer, u32 len)
2626 {
2627 	int bsize_elem, i, result = 0;
2628 	struct scatterlist *scatterlist;
2629 	void *kaddr;
2630 
2631 	/* Determine the actual number of bytes per element */
2632 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2633 
2634 	scatterlist = sglist->scatterlist;
2635 
2636 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2637 		kaddr = kmap(scatterlist[i].page);
2638 		memcpy(kaddr, buffer, bsize_elem);
2639 		kunmap(scatterlist[i].page);
2640 
2641 		scatterlist[i].length = bsize_elem;
2642 
2643 		if (result != 0) {
2644 			ipr_trace;
2645 			return result;
2646 		}
2647 	}
2648 
2649 	if (len % bsize_elem) {
2650 		kaddr = kmap(scatterlist[i].page);
2651 		memcpy(kaddr, buffer, len % bsize_elem);
2652 		kunmap(scatterlist[i].page);
2653 
2654 		scatterlist[i].length = len % bsize_elem;
2655 	}
2656 
2657 	sglist->buffer_len = len;
2658 	return result;
2659 }
2660 
2661 /**
2662  * ipr_build_ucode_ioadl - Build a microcode download IOADL
2663  * @ipr_cmd:	ipr command struct
2664  * @sglist:		scatter/gather list
2665  *
2666  * Builds a microcode download IOA data list (IOADL).
2667  *
2668  **/
2669 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2670 				  struct ipr_sglist *sglist)
2671 {
2672 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2673 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2674 	struct scatterlist *scatterlist = sglist->scatterlist;
2675 	int i;
2676 
2677 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2678 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2679 	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2680 	ioarcb->write_ioadl_len =
2681 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2682 
2683 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2684 		ioadl[i].flags_and_data_len =
2685 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2686 		ioadl[i].address =
2687 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2688 	}
2689 
2690 	ioadl[i-1].flags_and_data_len |=
2691 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2692 }
2693 
2694 /**
2695  * ipr_update_ioa_ucode - Update IOA's microcode
2696  * @ioa_cfg:	ioa config struct
2697  * @sglist:		scatter/gather list
2698  *
2699  * Initiate an adapter reset to update the IOA's microcode
2700  *
2701  * Return value:
2702  * 	0 on success / -EIO on failure
2703  **/
2704 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2705 				struct ipr_sglist *sglist)
2706 {
2707 	unsigned long lock_flags;
2708 
2709 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2710 
2711 	if (ioa_cfg->ucode_sglist) {
2712 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 		dev_err(&ioa_cfg->pdev->dev,
2714 			"Microcode download already in progress\n");
2715 		return -EIO;
2716 	}
2717 
2718 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2719 					sglist->num_sg, DMA_TO_DEVICE);
2720 
2721 	if (!sglist->num_dma_sg) {
2722 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2723 		dev_err(&ioa_cfg->pdev->dev,
2724 			"Failed to map microcode download buffer!\n");
2725 		return -EIO;
2726 	}
2727 
2728 	ioa_cfg->ucode_sglist = sglist;
2729 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2730 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2731 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2732 
2733 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734 	ioa_cfg->ucode_sglist = NULL;
2735 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2736 	return 0;
2737 }
2738 
2739 /**
2740  * ipr_store_update_fw - Update the firmware on the adapter
2741  * @class_dev:	class_device struct
2742  * @buf:		buffer
2743  * @count:		buffer size
2744  *
2745  * This function will update the firmware on the adapter.
2746  *
2747  * Return value:
2748  * 	count on success / other on failure
2749  **/
2750 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2751 				       const char *buf, size_t count)
2752 {
2753 	struct Scsi_Host *shost = class_to_shost(class_dev);
2754 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2755 	struct ipr_ucode_image_header *image_hdr;
2756 	const struct firmware *fw_entry;
2757 	struct ipr_sglist *sglist;
2758 	char fname[100];
2759 	char *src;
2760 	int len, result, dnld_size;
2761 
2762 	if (!capable(CAP_SYS_ADMIN))
2763 		return -EACCES;
2764 
2765 	len = snprintf(fname, 99, "%s", buf);
2766 	fname[len-1] = '\0';
2767 
2768 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2769 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2770 		return -EIO;
2771 	}
2772 
2773 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2774 
2775 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2776 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2777 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2778 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2779 		release_firmware(fw_entry);
2780 		return -EINVAL;
2781 	}
2782 
2783 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2784 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2785 	sglist = ipr_alloc_ucode_buffer(dnld_size);
2786 
2787 	if (!sglist) {
2788 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2789 		release_firmware(fw_entry);
2790 		return -ENOMEM;
2791 	}
2792 
2793 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2794 
2795 	if (result) {
2796 		dev_err(&ioa_cfg->pdev->dev,
2797 			"Microcode buffer copy to DMA buffer failed\n");
2798 		goto out;
2799 	}
2800 
2801 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2802 
2803 	if (!result)
2804 		result = count;
2805 out:
2806 	ipr_free_ucode_buffer(sglist);
2807 	release_firmware(fw_entry);
2808 	return result;
2809 }
2810 
2811 static struct class_device_attribute ipr_update_fw_attr = {
2812 	.attr = {
2813 		.name =		"update_fw",
2814 		.mode =		S_IWUSR,
2815 	},
2816 	.store = ipr_store_update_fw
2817 };
2818 
2819 static struct class_device_attribute *ipr_ioa_attrs[] = {
2820 	&ipr_fw_version_attr,
2821 	&ipr_log_level_attr,
2822 	&ipr_diagnostics_attr,
2823 	&ipr_ioa_state_attr,
2824 	&ipr_ioa_reset_attr,
2825 	&ipr_update_fw_attr,
2826 	&ipr_ioa_cache_attr,
2827 	NULL,
2828 };
2829 
2830 #ifdef CONFIG_SCSI_IPR_DUMP
2831 /**
2832  * ipr_read_dump - Dump the adapter
2833  * @kobj:		kobject struct
2834  * @buf:		buffer
2835  * @off:		offset
2836  * @count:		buffer size
2837  *
2838  * Return value:
2839  *	number of bytes printed to buffer
2840  **/
2841 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2842 			      loff_t off, size_t count)
2843 {
2844 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2845 	struct Scsi_Host *shost = class_to_shost(cdev);
2846 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2847 	struct ipr_dump *dump;
2848 	unsigned long lock_flags = 0;
2849 	char *src;
2850 	int len;
2851 	size_t rc = count;
2852 
2853 	if (!capable(CAP_SYS_ADMIN))
2854 		return -EACCES;
2855 
2856 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2857 	dump = ioa_cfg->dump;
2858 
2859 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2860 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2861 		return 0;
2862 	}
2863 	kref_get(&dump->kref);
2864 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2865 
2866 	if (off > dump->driver_dump.hdr.len) {
2867 		kref_put(&dump->kref, ipr_release_dump);
2868 		return 0;
2869 	}
2870 
2871 	if (off + count > dump->driver_dump.hdr.len) {
2872 		count = dump->driver_dump.hdr.len - off;
2873 		rc = count;
2874 	}
2875 
2876 	if (count && off < sizeof(dump->driver_dump)) {
2877 		if (off + count > sizeof(dump->driver_dump))
2878 			len = sizeof(dump->driver_dump) - off;
2879 		else
2880 			len = count;
2881 		src = (u8 *)&dump->driver_dump + off;
2882 		memcpy(buf, src, len);
2883 		buf += len;
2884 		off += len;
2885 		count -= len;
2886 	}
2887 
2888 	off -= sizeof(dump->driver_dump);
2889 
2890 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2891 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2892 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2893 		else
2894 			len = count;
2895 		src = (u8 *)&dump->ioa_dump + off;
2896 		memcpy(buf, src, len);
2897 		buf += len;
2898 		off += len;
2899 		count -= len;
2900 	}
2901 
2902 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2903 
2904 	while (count) {
2905 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2906 			len = PAGE_ALIGN(off) - off;
2907 		else
2908 			len = count;
2909 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2910 		src += off & ~PAGE_MASK;
2911 		memcpy(buf, src, len);
2912 		buf += len;
2913 		off += len;
2914 		count -= len;
2915 	}
2916 
2917 	kref_put(&dump->kref, ipr_release_dump);
2918 	return rc;
2919 }
2920 
2921 /**
2922  * ipr_alloc_dump - Prepare for adapter dump
2923  * @ioa_cfg:	ioa config struct
2924  *
2925  * Return value:
2926  *	0 on success / other on failure
2927  **/
2928 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2929 {
2930 	struct ipr_dump *dump;
2931 	unsigned long lock_flags = 0;
2932 
2933 	ENTER;
2934 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2935 
2936 	if (!dump) {
2937 		ipr_err("Dump memory allocation failed\n");
2938 		return -ENOMEM;
2939 	}
2940 
2941 	kref_init(&dump->kref);
2942 	dump->ioa_cfg = ioa_cfg;
2943 
2944 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2945 
2946 	if (INACTIVE != ioa_cfg->sdt_state) {
2947 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2948 		kfree(dump);
2949 		return 0;
2950 	}
2951 
2952 	ioa_cfg->dump = dump;
2953 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2954 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2955 		ioa_cfg->dump_taken = 1;
2956 		schedule_work(&ioa_cfg->work_q);
2957 	}
2958 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2959 
2960 	LEAVE;
2961 	return 0;
2962 }
2963 
2964 /**
2965  * ipr_free_dump - Free adapter dump memory
2966  * @ioa_cfg:	ioa config struct
2967  *
2968  * Return value:
2969  *	0 on success / other on failure
2970  **/
2971 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2972 {
2973 	struct ipr_dump *dump;
2974 	unsigned long lock_flags = 0;
2975 
2976 	ENTER;
2977 
2978 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2979 	dump = ioa_cfg->dump;
2980 	if (!dump) {
2981 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982 		return 0;
2983 	}
2984 
2985 	ioa_cfg->dump = NULL;
2986 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2987 
2988 	kref_put(&dump->kref, ipr_release_dump);
2989 
2990 	LEAVE;
2991 	return 0;
2992 }
2993 
2994 /**
2995  * ipr_write_dump - Setup dump state of adapter
2996  * @kobj:		kobject struct
2997  * @buf:		buffer
2998  * @off:		offset
2999  * @count:		buffer size
3000  *
3001  * Return value:
3002  *	number of bytes printed to buffer
3003  **/
3004 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3005 			      loff_t off, size_t count)
3006 {
3007 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3008 	struct Scsi_Host *shost = class_to_shost(cdev);
3009 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3010 	int rc;
3011 
3012 	if (!capable(CAP_SYS_ADMIN))
3013 		return -EACCES;
3014 
3015 	if (buf[0] == '1')
3016 		rc = ipr_alloc_dump(ioa_cfg);
3017 	else if (buf[0] == '0')
3018 		rc = ipr_free_dump(ioa_cfg);
3019 	else
3020 		return -EINVAL;
3021 
3022 	if (rc)
3023 		return rc;
3024 	else
3025 		return count;
3026 }
3027 
3028 static struct bin_attribute ipr_dump_attr = {
3029 	.attr =	{
3030 		.name = "dump",
3031 		.mode = S_IRUSR | S_IWUSR,
3032 	},
3033 	.size = 0,
3034 	.read = ipr_read_dump,
3035 	.write = ipr_write_dump
3036 };
3037 #else
3038 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3039 #endif
3040 
3041 /**
3042  * ipr_change_queue_depth - Change the device's queue depth
3043  * @sdev:	scsi device struct
3044  * @qdepth:	depth to set
3045  *
3046  * Return value:
3047  * 	actual depth set
3048  **/
3049 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3050 {
3051 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3052 	return sdev->queue_depth;
3053 }
3054 
3055 /**
3056  * ipr_change_queue_type - Change the device's queue type
3057  * @dsev:		scsi device struct
3058  * @tag_type:	type of tags to use
3059  *
3060  * Return value:
3061  * 	actual queue type set
3062  **/
3063 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3064 {
3065 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3066 	struct ipr_resource_entry *res;
3067 	unsigned long lock_flags = 0;
3068 
3069 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3070 	res = (struct ipr_resource_entry *)sdev->hostdata;
3071 
3072 	if (res) {
3073 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3074 			/*
3075 			 * We don't bother quiescing the device here since the
3076 			 * adapter firmware does it for us.
3077 			 */
3078 			scsi_set_tag_type(sdev, tag_type);
3079 
3080 			if (tag_type)
3081 				scsi_activate_tcq(sdev, sdev->queue_depth);
3082 			else
3083 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3084 		} else
3085 			tag_type = 0;
3086 	} else
3087 		tag_type = 0;
3088 
3089 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 	return tag_type;
3091 }
3092 
3093 /**
3094  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3095  * @dev:	device struct
3096  * @buf:	buffer
3097  *
3098  * Return value:
3099  * 	number of bytes printed to buffer
3100  **/
3101 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3102 {
3103 	struct scsi_device *sdev = to_scsi_device(dev);
3104 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3105 	struct ipr_resource_entry *res;
3106 	unsigned long lock_flags = 0;
3107 	ssize_t len = -ENXIO;
3108 
3109 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3110 	res = (struct ipr_resource_entry *)sdev->hostdata;
3111 	if (res)
3112 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3113 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3114 	return len;
3115 }
3116 
3117 static struct device_attribute ipr_adapter_handle_attr = {
3118 	.attr = {
3119 		.name = 	"adapter_handle",
3120 		.mode =		S_IRUSR,
3121 	},
3122 	.show = ipr_show_adapter_handle
3123 };
3124 
3125 static struct device_attribute *ipr_dev_attrs[] = {
3126 	&ipr_adapter_handle_attr,
3127 	NULL,
3128 };
3129 
3130 /**
3131  * ipr_biosparam - Return the HSC mapping
3132  * @sdev:			scsi device struct
3133  * @block_device:	block device pointer
3134  * @capacity:		capacity of the device
3135  * @parm:			Array containing returned HSC values.
3136  *
3137  * This function generates the HSC parms that fdisk uses.
3138  * We want to make sure we return something that places partitions
3139  * on 4k boundaries for best performance with the IOA.
3140  *
3141  * Return value:
3142  * 	0 on success
3143  **/
3144 static int ipr_biosparam(struct scsi_device *sdev,
3145 			 struct block_device *block_device,
3146 			 sector_t capacity, int *parm)
3147 {
3148 	int heads, sectors;
3149 	sector_t cylinders;
3150 
3151 	heads = 128;
3152 	sectors = 32;
3153 
3154 	cylinders = capacity;
3155 	sector_div(cylinders, (128 * 32));
3156 
3157 	/* return result */
3158 	parm[0] = heads;
3159 	parm[1] = sectors;
3160 	parm[2] = cylinders;
3161 
3162 	return 0;
3163 }
3164 
3165 /**
3166  * ipr_slave_destroy - Unconfigure a SCSI device
3167  * @sdev:	scsi device struct
3168  *
3169  * Return value:
3170  * 	nothing
3171  **/
3172 static void ipr_slave_destroy(struct scsi_device *sdev)
3173 {
3174 	struct ipr_resource_entry *res;
3175 	struct ipr_ioa_cfg *ioa_cfg;
3176 	unsigned long lock_flags = 0;
3177 
3178 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3179 
3180 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3181 	res = (struct ipr_resource_entry *) sdev->hostdata;
3182 	if (res) {
3183 		sdev->hostdata = NULL;
3184 		res->sdev = NULL;
3185 	}
3186 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3187 }
3188 
3189 /**
3190  * ipr_slave_configure - Configure a SCSI device
3191  * @sdev:	scsi device struct
3192  *
3193  * This function configures the specified scsi device.
3194  *
3195  * Return value:
3196  * 	0 on success
3197  **/
3198 static int ipr_slave_configure(struct scsi_device *sdev)
3199 {
3200 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3201 	struct ipr_resource_entry *res;
3202 	unsigned long lock_flags = 0;
3203 
3204 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3205 	res = sdev->hostdata;
3206 	if (res) {
3207 		if (ipr_is_af_dasd_device(res))
3208 			sdev->type = TYPE_RAID;
3209 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3210 			sdev->scsi_level = 4;
3211 			sdev->no_uld_attach = 1;
3212 		}
3213 		if (ipr_is_vset_device(res)) {
3214 			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3215 			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3216 		}
3217 		if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3218 			sdev->allow_restart = 1;
3219 		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3220 	}
3221 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3222 	return 0;
3223 }
3224 
3225 /**
3226  * ipr_slave_alloc - Prepare for commands to a device.
3227  * @sdev:	scsi device struct
3228  *
3229  * This function saves a pointer to the resource entry
3230  * in the scsi device struct if the device exists. We
3231  * can then use this pointer in ipr_queuecommand when
3232  * handling new commands.
3233  *
3234  * Return value:
3235  * 	0 on success / -ENXIO if device does not exist
3236  **/
3237 static int ipr_slave_alloc(struct scsi_device *sdev)
3238 {
3239 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3240 	struct ipr_resource_entry *res;
3241 	unsigned long lock_flags;
3242 	int rc = -ENXIO;
3243 
3244 	sdev->hostdata = NULL;
3245 
3246 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3247 
3248 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3249 		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3250 		    (res->cfgte.res_addr.target == sdev->id) &&
3251 		    (res->cfgte.res_addr.lun == sdev->lun)) {
3252 			res->sdev = sdev;
3253 			res->add_to_ml = 0;
3254 			res->in_erp = 0;
3255 			sdev->hostdata = res;
3256 			if (!ipr_is_naca_model(res))
3257 				res->needs_sync_complete = 1;
3258 			rc = 0;
3259 			break;
3260 		}
3261 	}
3262 
3263 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3264 
3265 	return rc;
3266 }
3267 
3268 /**
3269  * ipr_eh_host_reset - Reset the host adapter
3270  * @scsi_cmd:	scsi command struct
3271  *
3272  * Return value:
3273  * 	SUCCESS / FAILED
3274  **/
3275 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3276 {
3277 	struct ipr_ioa_cfg *ioa_cfg;
3278 	int rc;
3279 
3280 	ENTER;
3281 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3282 
3283 	dev_err(&ioa_cfg->pdev->dev,
3284 		"Adapter being reset as a result of error recovery.\n");
3285 
3286 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3287 		ioa_cfg->sdt_state = GET_DUMP;
3288 
3289 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3290 
3291 	LEAVE;
3292 	return rc;
3293 }
3294 
3295 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3296 {
3297 	int rc;
3298 
3299 	spin_lock_irq(cmd->device->host->host_lock);
3300 	rc = __ipr_eh_host_reset(cmd);
3301 	spin_unlock_irq(cmd->device->host->host_lock);
3302 
3303 	return rc;
3304 }
3305 
3306 /**
3307  * ipr_eh_dev_reset - Reset the device
3308  * @scsi_cmd:	scsi command struct
3309  *
3310  * This function issues a device reset to the affected device.
3311  * A LUN reset will be sent to the device first. If that does
3312  * not work, a target reset will be sent.
3313  *
3314  * Return value:
3315  *	SUCCESS / FAILED
3316  **/
3317 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3318 {
3319 	struct ipr_cmnd *ipr_cmd;
3320 	struct ipr_ioa_cfg *ioa_cfg;
3321 	struct ipr_resource_entry *res;
3322 	struct ipr_cmd_pkt *cmd_pkt;
3323 	u32 ioasc;
3324 
3325 	ENTER;
3326 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3327 	res = scsi_cmd->device->hostdata;
3328 
3329 	if (!res)
3330 		return FAILED;
3331 
3332 	/*
3333 	 * If we are currently going through reset/reload, return failed. This will force the
3334 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3335 	 * reset to complete
3336 	 */
3337 	if (ioa_cfg->in_reset_reload)
3338 		return FAILED;
3339 	if (ioa_cfg->ioa_is_dead)
3340 		return FAILED;
3341 
3342 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3343 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3344 			if (ipr_cmd->scsi_cmd)
3345 				ipr_cmd->done = ipr_scsi_eh_done;
3346 		}
3347 	}
3348 
3349 	res->resetting_device = 1;
3350 
3351 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3352 
3353 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3354 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3355 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3356 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3357 
3358 	ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3359 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3360 
3361 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3362 
3363 	res->resetting_device = 0;
3364 
3365 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3366 
3367 	LEAVE;
3368 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3369 }
3370 
3371 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3372 {
3373 	int rc;
3374 
3375 	spin_lock_irq(cmd->device->host->host_lock);
3376 	rc = __ipr_eh_dev_reset(cmd);
3377 	spin_unlock_irq(cmd->device->host->host_lock);
3378 
3379 	return rc;
3380 }
3381 
3382 /**
3383  * ipr_bus_reset_done - Op done function for bus reset.
3384  * @ipr_cmd:	ipr command struct
3385  *
3386  * This function is the op done function for a bus reset
3387  *
3388  * Return value:
3389  * 	none
3390  **/
3391 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3392 {
3393 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3394 	struct ipr_resource_entry *res;
3395 
3396 	ENTER;
3397 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398 		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3399 			    sizeof(res->cfgte.res_handle))) {
3400 			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3401 			break;
3402 		}
3403 	}
3404 
3405 	/*
3406 	 * If abort has not completed, indicate the reset has, else call the
3407 	 * abort's done function to wake the sleeping eh thread
3408 	 */
3409 	if (ipr_cmd->sibling->sibling)
3410 		ipr_cmd->sibling->sibling = NULL;
3411 	else
3412 		ipr_cmd->sibling->done(ipr_cmd->sibling);
3413 
3414 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3415 	LEAVE;
3416 }
3417 
3418 /**
3419  * ipr_abort_timeout - An abort task has timed out
3420  * @ipr_cmd:	ipr command struct
3421  *
3422  * This function handles when an abort task times out. If this
3423  * happens we issue a bus reset since we have resources tied
3424  * up that must be freed before returning to the midlayer.
3425  *
3426  * Return value:
3427  *	none
3428  **/
3429 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3430 {
3431 	struct ipr_cmnd *reset_cmd;
3432 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3433 	struct ipr_cmd_pkt *cmd_pkt;
3434 	unsigned long lock_flags = 0;
3435 
3436 	ENTER;
3437 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3438 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3439 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 		return;
3441 	}
3442 
3443 	ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3444 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3445 	ipr_cmd->sibling = reset_cmd;
3446 	reset_cmd->sibling = ipr_cmd;
3447 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3448 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3449 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3450 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3451 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3452 
3453 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3454 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 	LEAVE;
3456 }
3457 
3458 /**
3459  * ipr_cancel_op - Cancel specified op
3460  * @scsi_cmd:	scsi command struct
3461  *
3462  * This function cancels specified op.
3463  *
3464  * Return value:
3465  *	SUCCESS / FAILED
3466  **/
3467 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3468 {
3469 	struct ipr_cmnd *ipr_cmd;
3470 	struct ipr_ioa_cfg *ioa_cfg;
3471 	struct ipr_resource_entry *res;
3472 	struct ipr_cmd_pkt *cmd_pkt;
3473 	u32 ioasc;
3474 	int op_found = 0;
3475 
3476 	ENTER;
3477 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3478 	res = scsi_cmd->device->hostdata;
3479 
3480 	/* If we are currently going through reset/reload, return failed.
3481 	 * This will force the mid-layer to call ipr_eh_host_reset,
3482 	 * which will then go to sleep and wait for the reset to complete
3483 	 */
3484 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3485 		return FAILED;
3486 	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3487 		return FAILED;
3488 
3489 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3490 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3491 			ipr_cmd->done = ipr_scsi_eh_done;
3492 			op_found = 1;
3493 			break;
3494 		}
3495 	}
3496 
3497 	if (!op_found)
3498 		return SUCCESS;
3499 
3500 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3501 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3502 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3503 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3504 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3505 	ipr_cmd->u.sdev = scsi_cmd->device;
3506 
3507 	ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3508 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3509 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3510 
3511 	/*
3512 	 * If the abort task timed out and we sent a bus reset, we will get
3513 	 * one the following responses to the abort
3514 	 */
3515 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3516 		ioasc = 0;
3517 		ipr_trace;
3518 	}
3519 
3520 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3521 	if (!ipr_is_naca_model(res))
3522 		res->needs_sync_complete = 1;
3523 
3524 	LEAVE;
3525 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3526 }
3527 
3528 /**
3529  * ipr_eh_abort - Abort a single op
3530  * @scsi_cmd:	scsi command struct
3531  *
3532  * Return value:
3533  * 	SUCCESS / FAILED
3534  **/
3535 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3536 {
3537 	unsigned long flags;
3538 	int rc;
3539 
3540 	ENTER;
3541 
3542 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3543 	rc = ipr_cancel_op(scsi_cmd);
3544 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3545 
3546 	LEAVE;
3547 	return rc;
3548 }
3549 
3550 /**
3551  * ipr_handle_other_interrupt - Handle "other" interrupts
3552  * @ioa_cfg:	ioa config struct
3553  * @int_reg:	interrupt register
3554  *
3555  * Return value:
3556  * 	IRQ_NONE / IRQ_HANDLED
3557  **/
3558 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3559 					      volatile u32 int_reg)
3560 {
3561 	irqreturn_t rc = IRQ_HANDLED;
3562 
3563 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3564 		/* Mask the interrupt */
3565 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3566 
3567 		/* Clear the interrupt */
3568 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3569 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3570 
3571 		list_del(&ioa_cfg->reset_cmd->queue);
3572 		del_timer(&ioa_cfg->reset_cmd->timer);
3573 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3574 	} else {
3575 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3576 			ioa_cfg->ioa_unit_checked = 1;
3577 		else
3578 			dev_err(&ioa_cfg->pdev->dev,
3579 				"Permanent IOA failure. 0x%08X\n", int_reg);
3580 
3581 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3582 			ioa_cfg->sdt_state = GET_DUMP;
3583 
3584 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3585 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3586 	}
3587 
3588 	return rc;
3589 }
3590 
3591 /**
3592  * ipr_isr - Interrupt service routine
3593  * @irq:	irq number
3594  * @devp:	pointer to ioa config struct
3595  * @regs:	pt_regs struct
3596  *
3597  * Return value:
3598  * 	IRQ_NONE / IRQ_HANDLED
3599  **/
3600 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3601 {
3602 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3603 	unsigned long lock_flags = 0;
3604 	volatile u32 int_reg, int_mask_reg;
3605 	u32 ioasc;
3606 	u16 cmd_index;
3607 	struct ipr_cmnd *ipr_cmd;
3608 	irqreturn_t rc = IRQ_NONE;
3609 
3610 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611 
3612 	/* If interrupts are disabled, ignore the interrupt */
3613 	if (!ioa_cfg->allow_interrupts) {
3614 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3615 		return IRQ_NONE;
3616 	}
3617 
3618 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3619 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3620 
3621 	/* If an interrupt on the adapter did not occur, ignore it */
3622 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3623 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624 		return IRQ_NONE;
3625 	}
3626 
3627 	while (1) {
3628 		ipr_cmd = NULL;
3629 
3630 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3631 		       ioa_cfg->toggle_bit) {
3632 
3633 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3634 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3635 
3636 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3637 				ioa_cfg->errors_logged++;
3638 				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3639 
3640 				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3641 					ioa_cfg->sdt_state = GET_DUMP;
3642 
3643 				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3644 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3645 				return IRQ_HANDLED;
3646 			}
3647 
3648 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3649 
3650 			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3651 
3652 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3653 
3654 			list_del(&ipr_cmd->queue);
3655 			del_timer(&ipr_cmd->timer);
3656 			ipr_cmd->done(ipr_cmd);
3657 
3658 			rc = IRQ_HANDLED;
3659 
3660 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3661 				ioa_cfg->hrrq_curr++;
3662 			} else {
3663 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3664 				ioa_cfg->toggle_bit ^= 1u;
3665 			}
3666 		}
3667 
3668 		if (ipr_cmd != NULL) {
3669 			/* Clear the PCI interrupt */
3670 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3671 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3672 		} else
3673 			break;
3674 	}
3675 
3676 	if (unlikely(rc == IRQ_NONE))
3677 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3678 
3679 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 	return rc;
3681 }
3682 
3683 /**
3684  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3685  * @ioa_cfg:	ioa config struct
3686  * @ipr_cmd:	ipr command struct
3687  *
3688  * Return value:
3689  * 	0 on success / -1 on failure
3690  **/
3691 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3692 			   struct ipr_cmnd *ipr_cmd)
3693 {
3694 	int i;
3695 	struct scatterlist *sglist;
3696 	u32 length;
3697 	u32 ioadl_flags = 0;
3698 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3699 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3700 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3701 
3702 	length = scsi_cmd->request_bufflen;
3703 
3704 	if (length == 0)
3705 		return 0;
3706 
3707 	if (scsi_cmd->use_sg) {
3708 		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3709 						 scsi_cmd->request_buffer,
3710 						 scsi_cmd->use_sg,
3711 						 scsi_cmd->sc_data_direction);
3712 
3713 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3714 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3715 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3716 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3717 			ioarcb->write_ioadl_len =
3718 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3719 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3720 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3721 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3722 			ioarcb->read_ioadl_len =
3723 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3724 		}
3725 
3726 		sglist = scsi_cmd->request_buffer;
3727 
3728 		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3729 			ioadl[i].flags_and_data_len =
3730 				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3731 			ioadl[i].address =
3732 				cpu_to_be32(sg_dma_address(&sglist[i]));
3733 		}
3734 
3735 		if (likely(ipr_cmd->dma_use_sg)) {
3736 			ioadl[i-1].flags_and_data_len |=
3737 				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3738 			return 0;
3739 		} else
3740 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3741 	} else {
3742 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3743 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3744 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3745 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3746 			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3747 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3749 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750 			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3751 		}
3752 
3753 		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3754 						     scsi_cmd->request_buffer, length,
3755 						     scsi_cmd->sc_data_direction);
3756 
3757 		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3758 			ipr_cmd->dma_use_sg = 1;
3759 			ioadl[0].flags_and_data_len =
3760 				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3761 			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3762 			return 0;
3763 		} else
3764 			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3765 	}
3766 
3767 	return -1;
3768 }
3769 
3770 /**
3771  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3772  * @scsi_cmd:	scsi command struct
3773  *
3774  * Return value:
3775  * 	task attributes
3776  **/
3777 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3778 {
3779 	u8 tag[2];
3780 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3781 
3782 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3783 		switch (tag[0]) {
3784 		case MSG_SIMPLE_TAG:
3785 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3786 			break;
3787 		case MSG_HEAD_TAG:
3788 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3789 			break;
3790 		case MSG_ORDERED_TAG:
3791 			rc = IPR_FLAGS_LO_ORDERED_TASK;
3792 			break;
3793 		};
3794 	}
3795 
3796 	return rc;
3797 }
3798 
3799 /**
3800  * ipr_erp_done - Process completion of ERP for a device
3801  * @ipr_cmd:		ipr command struct
3802  *
3803  * This function copies the sense buffer into the scsi_cmd
3804  * struct and pushes the scsi_done function.
3805  *
3806  * Return value:
3807  * 	nothing
3808  **/
3809 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3810 {
3811 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3812 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3813 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3814 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3815 
3816 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3817 		scsi_cmd->result |= (DID_ERROR << 16);
3818 		ipr_sdev_err(scsi_cmd->device,
3819 			     "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3820 	} else {
3821 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3822 		       SCSI_SENSE_BUFFERSIZE);
3823 	}
3824 
3825 	if (res) {
3826 		if (!ipr_is_naca_model(res))
3827 			res->needs_sync_complete = 1;
3828 		res->in_erp = 0;
3829 	}
3830 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3831 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3832 	scsi_cmd->scsi_done(scsi_cmd);
3833 }
3834 
3835 /**
3836  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3837  * @ipr_cmd:	ipr command struct
3838  *
3839  * Return value:
3840  * 	none
3841  **/
3842 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3843 {
3844 	struct ipr_ioarcb *ioarcb;
3845 	struct ipr_ioasa *ioasa;
3846 
3847 	ioarcb = &ipr_cmd->ioarcb;
3848 	ioasa = &ipr_cmd->ioasa;
3849 
3850 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3851 	ioarcb->write_data_transfer_length = 0;
3852 	ioarcb->read_data_transfer_length = 0;
3853 	ioarcb->write_ioadl_len = 0;
3854 	ioarcb->read_ioadl_len = 0;
3855 	ioasa->ioasc = 0;
3856 	ioasa->residual_data_len = 0;
3857 }
3858 
3859 /**
3860  * ipr_erp_request_sense - Send request sense to a device
3861  * @ipr_cmd:	ipr command struct
3862  *
3863  * This function sends a request sense to a device as a result
3864  * of a check condition.
3865  *
3866  * Return value:
3867  * 	nothing
3868  **/
3869 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3870 {
3871 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3872 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3873 
3874 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3875 		ipr_erp_done(ipr_cmd);
3876 		return;
3877 	}
3878 
3879 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3880 
3881 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3882 	cmd_pkt->cdb[0] = REQUEST_SENSE;
3883 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3884 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3885 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3886 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3887 
3888 	ipr_cmd->ioadl[0].flags_and_data_len =
3889 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3890 	ipr_cmd->ioadl[0].address =
3891 		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3892 
3893 	ipr_cmd->ioarcb.read_ioadl_len =
3894 		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3895 	ipr_cmd->ioarcb.read_data_transfer_length =
3896 		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3897 
3898 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3899 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3900 }
3901 
3902 /**
3903  * ipr_erp_cancel_all - Send cancel all to a device
3904  * @ipr_cmd:	ipr command struct
3905  *
3906  * This function sends a cancel all to a device to clear the
3907  * queue. If we are running TCQ on the device, QERR is set to 1,
3908  * which means all outstanding ops have been dropped on the floor.
3909  * Cancel all will return them to us.
3910  *
3911  * Return value:
3912  * 	nothing
3913  **/
3914 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3915 {
3916 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3917 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3918 	struct ipr_cmd_pkt *cmd_pkt;
3919 
3920 	res->in_erp = 1;
3921 
3922 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3923 
3924 	if (!scsi_get_tag_type(scsi_cmd->device)) {
3925 		ipr_erp_request_sense(ipr_cmd);
3926 		return;
3927 	}
3928 
3929 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3930 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3931 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3932 
3933 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3934 		   IPR_CANCEL_ALL_TIMEOUT);
3935 }
3936 
3937 /**
3938  * ipr_dump_ioasa - Dump contents of IOASA
3939  * @ioa_cfg:	ioa config struct
3940  * @ipr_cmd:	ipr command struct
3941  *
3942  * This function is invoked by the interrupt handler when ops
3943  * fail. It will log the IOASA if appropriate. Only called
3944  * for GPDD ops.
3945  *
3946  * Return value:
3947  * 	none
3948  **/
3949 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3950 			   struct ipr_cmnd *ipr_cmd)
3951 {
3952 	int i;
3953 	u16 data_len;
3954 	u32 ioasc;
3955 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3956 	__be32 *ioasa_data = (__be32 *)ioasa;
3957 	int error_index;
3958 
3959 	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3960 
3961 	if (0 == ioasc)
3962 		return;
3963 
3964 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3965 		return;
3966 
3967 	error_index = ipr_get_error(ioasc);
3968 
3969 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3970 		/* Don't log an error if the IOA already logged one */
3971 		if (ioasa->ilid != 0)
3972 			return;
3973 
3974 		if (ipr_error_table[error_index].log_ioasa == 0)
3975 			return;
3976 	}
3977 
3978 	ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3979 		     ipr_error_table[error_index].error);
3980 
3981 	if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3982 	    (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3983 		ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3984 			     "Device End state: %s Phase: %s\n",
3985 			     ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3986 			     ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3987 	}
3988 
3989 	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3990 		data_len = sizeof(struct ipr_ioasa);
3991 	else
3992 		data_len = be16_to_cpu(ioasa->ret_stat_len);
3993 
3994 	ipr_err("IOASA Dump:\n");
3995 
3996 	for (i = 0; i < data_len / 4; i += 4) {
3997 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3998 			be32_to_cpu(ioasa_data[i]),
3999 			be32_to_cpu(ioasa_data[i+1]),
4000 			be32_to_cpu(ioasa_data[i+2]),
4001 			be32_to_cpu(ioasa_data[i+3]));
4002 	}
4003 }
4004 
4005 /**
4006  * ipr_gen_sense - Generate SCSI sense data from an IOASA
4007  * @ioasa:		IOASA
4008  * @sense_buf:	sense data buffer
4009  *
4010  * Return value:
4011  * 	none
4012  **/
4013 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4014 {
4015 	u32 failing_lba;
4016 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4017 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4018 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4019 	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4020 
4021 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4022 
4023 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4024 		return;
4025 
4026 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4027 
4028 	if (ipr_is_vset_device(res) &&
4029 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4030 	    ioasa->u.vset.failing_lba_hi != 0) {
4031 		sense_buf[0] = 0x72;
4032 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4033 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4034 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4035 
4036 		sense_buf[7] = 12;
4037 		sense_buf[8] = 0;
4038 		sense_buf[9] = 0x0A;
4039 		sense_buf[10] = 0x80;
4040 
4041 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4042 
4043 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4044 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4045 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4046 		sense_buf[15] = failing_lba & 0x000000ff;
4047 
4048 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4049 
4050 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4051 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4052 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4053 		sense_buf[19] = failing_lba & 0x000000ff;
4054 	} else {
4055 		sense_buf[0] = 0x70;
4056 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4057 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4058 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4059 
4060 		/* Illegal request */
4061 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4062 		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4063 			sense_buf[7] = 10;	/* additional length */
4064 
4065 			/* IOARCB was in error */
4066 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4067 				sense_buf[15] = 0xC0;
4068 			else	/* Parameter data was invalid */
4069 				sense_buf[15] = 0x80;
4070 
4071 			sense_buf[16] =
4072 			    ((IPR_FIELD_POINTER_MASK &
4073 			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4074 			sense_buf[17] =
4075 			    (IPR_FIELD_POINTER_MASK &
4076 			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4077 		} else {
4078 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4079 				if (ipr_is_vset_device(res))
4080 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4081 				else
4082 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4083 
4084 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4085 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4086 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4087 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4088 				sense_buf[6] = failing_lba & 0x000000ff;
4089 			}
4090 
4091 			sense_buf[7] = 6;	/* additional length */
4092 		}
4093 	}
4094 }
4095 
4096 /**
4097  * ipr_get_autosense - Copy autosense data to sense buffer
4098  * @ipr_cmd:	ipr command struct
4099  *
4100  * This function copies the autosense buffer to the buffer
4101  * in the scsi_cmd, if there is autosense available.
4102  *
4103  * Return value:
4104  *	1 if autosense was available / 0 if not
4105  **/
4106 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4107 {
4108 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4109 
4110 	if ((be32_to_cpu(ioasa->ioasc_specific) &
4111 	     (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4112 		return 0;
4113 
4114 	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4115 	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4116 		   SCSI_SENSE_BUFFERSIZE));
4117 	return 1;
4118 }
4119 
4120 /**
4121  * ipr_erp_start - Process an error response for a SCSI op
4122  * @ioa_cfg:	ioa config struct
4123  * @ipr_cmd:	ipr command struct
4124  *
4125  * This function determines whether or not to initiate ERP
4126  * on the affected device.
4127  *
4128  * Return value:
4129  * 	nothing
4130  **/
4131 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4132 			      struct ipr_cmnd *ipr_cmd)
4133 {
4134 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4135 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4136 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4137 
4138 	if (!res) {
4139 		ipr_scsi_eh_done(ipr_cmd);
4140 		return;
4141 	}
4142 
4143 	if (ipr_is_gscsi(res))
4144 		ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4145 	else
4146 		ipr_gen_sense(ipr_cmd);
4147 
4148 	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4149 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4150 		if (ipr_is_naca_model(res))
4151 			scsi_cmd->result |= (DID_ABORT << 16);
4152 		else
4153 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4154 		break;
4155 	case IPR_IOASC_IR_RESOURCE_HANDLE:
4156 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4157 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4158 		break;
4159 	case IPR_IOASC_HW_SEL_TIMEOUT:
4160 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4161 		if (!ipr_is_naca_model(res))
4162 			res->needs_sync_complete = 1;
4163 		break;
4164 	case IPR_IOASC_SYNC_REQUIRED:
4165 		if (!res->in_erp)
4166 			res->needs_sync_complete = 1;
4167 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4168 		break;
4169 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4170 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4171 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4172 		break;
4173 	case IPR_IOASC_BUS_WAS_RESET:
4174 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4175 		/*
4176 		 * Report the bus reset and ask for a retry. The device
4177 		 * will give CC/UA the next command.
4178 		 */
4179 		if (!res->resetting_device)
4180 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4181 		scsi_cmd->result |= (DID_ERROR << 16);
4182 		if (!ipr_is_naca_model(res))
4183 			res->needs_sync_complete = 1;
4184 		break;
4185 	case IPR_IOASC_HW_DEV_BUS_STATUS:
4186 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4187 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4188 			if (!ipr_get_autosense(ipr_cmd)) {
4189 				if (!ipr_is_naca_model(res)) {
4190 					ipr_erp_cancel_all(ipr_cmd);
4191 					return;
4192 				}
4193 			}
4194 		}
4195 		if (!ipr_is_naca_model(res))
4196 			res->needs_sync_complete = 1;
4197 		break;
4198 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4199 		break;
4200 	default:
4201 		scsi_cmd->result |= (DID_ERROR << 16);
4202 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4203 			res->needs_sync_complete = 1;
4204 		break;
4205 	}
4206 
4207 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4208 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4209 	scsi_cmd->scsi_done(scsi_cmd);
4210 }
4211 
4212 /**
4213  * ipr_scsi_done - mid-layer done function
4214  * @ipr_cmd:	ipr command struct
4215  *
4216  * This function is invoked by the interrupt handler for
4217  * ops generated by the SCSI mid-layer
4218  *
4219  * Return value:
4220  * 	none
4221  **/
4222 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4223 {
4224 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4225 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4226 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4227 
4228 	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4229 
4230 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4231 		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4232 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4233 		scsi_cmd->scsi_done(scsi_cmd);
4234 	} else
4235 		ipr_erp_start(ioa_cfg, ipr_cmd);
4236 }
4237 
4238 /**
4239  * ipr_queuecommand - Queue a mid-layer request
4240  * @scsi_cmd:	scsi command struct
4241  * @done:		done function
4242  *
4243  * This function queues a request generated by the mid-layer.
4244  *
4245  * Return value:
4246  *	0 on success
4247  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4248  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4249  **/
4250 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4251 			    void (*done) (struct scsi_cmnd *))
4252 {
4253 	struct ipr_ioa_cfg *ioa_cfg;
4254 	struct ipr_resource_entry *res;
4255 	struct ipr_ioarcb *ioarcb;
4256 	struct ipr_cmnd *ipr_cmd;
4257 	int rc = 0;
4258 
4259 	scsi_cmd->scsi_done = done;
4260 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4261 	res = scsi_cmd->device->hostdata;
4262 	scsi_cmd->result = (DID_OK << 16);
4263 
4264 	/*
4265 	 * We are currently blocking all devices due to a host reset
4266 	 * We have told the host to stop giving us new requests, but
4267 	 * ERP ops don't count. FIXME
4268 	 */
4269 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4270 		return SCSI_MLQUEUE_HOST_BUSY;
4271 
4272 	/*
4273 	 * FIXME - Create scsi_set_host_offline interface
4274 	 *  and the ioa_is_dead check can be removed
4275 	 */
4276 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4277 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4278 		scsi_cmd->result = (DID_NO_CONNECT << 16);
4279 		scsi_cmd->scsi_done(scsi_cmd);
4280 		return 0;
4281 	}
4282 
4283 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4284 	ioarcb = &ipr_cmd->ioarcb;
4285 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4286 
4287 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4288 	ipr_cmd->scsi_cmd = scsi_cmd;
4289 	ioarcb->res_handle = res->cfgte.res_handle;
4290 	ipr_cmd->done = ipr_scsi_done;
4291 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4292 
4293 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4294 		if (scsi_cmd->underflow == 0)
4295 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4296 
4297 		if (res->needs_sync_complete) {
4298 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4299 			res->needs_sync_complete = 0;
4300 		}
4301 
4302 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4303 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4304 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4305 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4306 	}
4307 
4308 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4309 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4310 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4311 
4312 	if (likely(rc == 0))
4313 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4314 
4315 	if (likely(rc == 0)) {
4316 		mb();
4317 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4318 		       ioa_cfg->regs.ioarrin_reg);
4319 	} else {
4320 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4321 		 return SCSI_MLQUEUE_HOST_BUSY;
4322 	}
4323 
4324 	return 0;
4325 }
4326 
4327 /**
4328  * ipr_info - Get information about the card/driver
4329  * @scsi_host:	scsi host struct
4330  *
4331  * Return value:
4332  * 	pointer to buffer with description string
4333  **/
4334 static const char * ipr_ioa_info(struct Scsi_Host *host)
4335 {
4336 	static char buffer[512];
4337 	struct ipr_ioa_cfg *ioa_cfg;
4338 	unsigned long lock_flags = 0;
4339 
4340 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4341 
4342 	spin_lock_irqsave(host->host_lock, lock_flags);
4343 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4344 	spin_unlock_irqrestore(host->host_lock, lock_flags);
4345 
4346 	return buffer;
4347 }
4348 
4349 static struct scsi_host_template driver_template = {
4350 	.module = THIS_MODULE,
4351 	.name = "IPR",
4352 	.info = ipr_ioa_info,
4353 	.queuecommand = ipr_queuecommand,
4354 	.eh_abort_handler = ipr_eh_abort,
4355 	.eh_device_reset_handler = ipr_eh_dev_reset,
4356 	.eh_host_reset_handler = ipr_eh_host_reset,
4357 	.slave_alloc = ipr_slave_alloc,
4358 	.slave_configure = ipr_slave_configure,
4359 	.slave_destroy = ipr_slave_destroy,
4360 	.change_queue_depth = ipr_change_queue_depth,
4361 	.change_queue_type = ipr_change_queue_type,
4362 	.bios_param = ipr_biosparam,
4363 	.can_queue = IPR_MAX_COMMANDS,
4364 	.this_id = -1,
4365 	.sg_tablesize = IPR_MAX_SGLIST,
4366 	.max_sectors = IPR_IOA_MAX_SECTORS,
4367 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4368 	.use_clustering = ENABLE_CLUSTERING,
4369 	.shost_attrs = ipr_ioa_attrs,
4370 	.sdev_attrs = ipr_dev_attrs,
4371 	.proc_name = IPR_NAME
4372 };
4373 
4374 #ifdef CONFIG_PPC_PSERIES
4375 static const u16 ipr_blocked_processors[] = {
4376 	PV_NORTHSTAR,
4377 	PV_PULSAR,
4378 	PV_POWER4,
4379 	PV_ICESTAR,
4380 	PV_SSTAR,
4381 	PV_POWER4p,
4382 	PV_630,
4383 	PV_630p
4384 };
4385 
4386 /**
4387  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4388  * @ioa_cfg:	ioa cfg struct
4389  *
4390  * Adapters that use Gemstone revision < 3.1 do not work reliably on
4391  * certain pSeries hardware. This function determines if the given
4392  * adapter is in one of these confgurations or not.
4393  *
4394  * Return value:
4395  * 	1 if adapter is not supported / 0 if adapter is supported
4396  **/
4397 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4398 {
4399 	u8 rev_id;
4400 	int i;
4401 
4402 	if (ioa_cfg->type == 0x5702) {
4403 		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4404 					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4405 			if (rev_id < 4) {
4406 				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4407 					if (__is_processor(ipr_blocked_processors[i]))
4408 						return 1;
4409 				}
4410 			}
4411 		}
4412 	}
4413 	return 0;
4414 }
4415 #else
4416 #define ipr_invalid_adapter(ioa_cfg) 0
4417 #endif
4418 
4419 /**
4420  * ipr_ioa_bringdown_done - IOA bring down completion.
4421  * @ipr_cmd:	ipr command struct
4422  *
4423  * This function processes the completion of an adapter bring down.
4424  * It wakes any reset sleepers.
4425  *
4426  * Return value:
4427  * 	IPR_RC_JOB_RETURN
4428  **/
4429 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4430 {
4431 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4432 
4433 	ENTER;
4434 	ioa_cfg->in_reset_reload = 0;
4435 	ioa_cfg->reset_retries = 0;
4436 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4437 	wake_up_all(&ioa_cfg->reset_wait_q);
4438 
4439 	spin_unlock_irq(ioa_cfg->host->host_lock);
4440 	scsi_unblock_requests(ioa_cfg->host);
4441 	spin_lock_irq(ioa_cfg->host->host_lock);
4442 	LEAVE;
4443 
4444 	return IPR_RC_JOB_RETURN;
4445 }
4446 
4447 /**
4448  * ipr_ioa_reset_done - IOA reset completion.
4449  * @ipr_cmd:	ipr command struct
4450  *
4451  * This function processes the completion of an adapter reset.
4452  * It schedules any necessary mid-layer add/removes and
4453  * wakes any reset sleepers.
4454  *
4455  * Return value:
4456  * 	IPR_RC_JOB_RETURN
4457  **/
4458 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4459 {
4460 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4461 	struct ipr_resource_entry *res;
4462 	struct ipr_hostrcb *hostrcb, *temp;
4463 	int i = 0;
4464 
4465 	ENTER;
4466 	ioa_cfg->in_reset_reload = 0;
4467 	ioa_cfg->allow_cmds = 1;
4468 	ioa_cfg->reset_cmd = NULL;
4469 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4470 
4471 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4472 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4473 			ipr_trace;
4474 			break;
4475 		}
4476 	}
4477 	schedule_work(&ioa_cfg->work_q);
4478 
4479 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4480 		list_del(&hostrcb->queue);
4481 		if (i++ < IPR_NUM_LOG_HCAMS)
4482 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4483 		else
4484 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4485 	}
4486 
4487 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4488 
4489 	ioa_cfg->reset_retries = 0;
4490 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4491 	wake_up_all(&ioa_cfg->reset_wait_q);
4492 
4493 	spin_unlock_irq(ioa_cfg->host->host_lock);
4494 	scsi_unblock_requests(ioa_cfg->host);
4495 	spin_lock_irq(ioa_cfg->host->host_lock);
4496 
4497 	if (!ioa_cfg->allow_cmds)
4498 		scsi_block_requests(ioa_cfg->host);
4499 
4500 	LEAVE;
4501 	return IPR_RC_JOB_RETURN;
4502 }
4503 
4504 /**
4505  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4506  * @supported_dev:	supported device struct
4507  * @vpids:			vendor product id struct
4508  *
4509  * Return value:
4510  * 	none
4511  **/
4512 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4513 				 struct ipr_std_inq_vpids *vpids)
4514 {
4515 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4516 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4517 	supported_dev->num_records = 1;
4518 	supported_dev->data_length =
4519 		cpu_to_be16(sizeof(struct ipr_supported_device));
4520 	supported_dev->reserved = 0;
4521 }
4522 
4523 /**
4524  * ipr_set_supported_devs - Send Set Supported Devices for a device
4525  * @ipr_cmd:	ipr command struct
4526  *
4527  * This function send a Set Supported Devices to the adapter
4528  *
4529  * Return value:
4530  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4531  **/
4532 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4533 {
4534 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4535 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4536 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4537 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4538 	struct ipr_resource_entry *res = ipr_cmd->u.res;
4539 
4540 	ipr_cmd->job_step = ipr_ioa_reset_done;
4541 
4542 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4543 		if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4544 			continue;
4545 
4546 		ipr_cmd->u.res = res;
4547 		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4548 
4549 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4550 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4551 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4552 
4553 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4554 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4555 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4556 
4557 		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4558 							sizeof(struct ipr_supported_device));
4559 		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4560 					     offsetof(struct ipr_misc_cbs, supp_dev));
4561 		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4562 		ioarcb->write_data_transfer_length =
4563 			cpu_to_be32(sizeof(struct ipr_supported_device));
4564 
4565 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4566 			   IPR_SET_SUP_DEVICE_TIMEOUT);
4567 
4568 		ipr_cmd->job_step = ipr_set_supported_devs;
4569 		return IPR_RC_JOB_RETURN;
4570 	}
4571 
4572 	return IPR_RC_JOB_CONTINUE;
4573 }
4574 
4575 /**
4576  * ipr_setup_write_cache - Disable write cache if needed
4577  * @ipr_cmd:	ipr command struct
4578  *
4579  * This function sets up adapters write cache to desired setting
4580  *
4581  * Return value:
4582  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4583  **/
4584 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4585 {
4586 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4587 
4588 	ipr_cmd->job_step = ipr_set_supported_devs;
4589 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4590 				    struct ipr_resource_entry, queue);
4591 
4592 	if (ioa_cfg->cache_state != CACHE_DISABLED)
4593 		return IPR_RC_JOB_CONTINUE;
4594 
4595 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4596 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4597 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4598 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4599 
4600 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4601 
4602 	return IPR_RC_JOB_RETURN;
4603 }
4604 
4605 /**
4606  * ipr_get_mode_page - Locate specified mode page
4607  * @mode_pages:	mode page buffer
4608  * @page_code:	page code to find
4609  * @len:		minimum required length for mode page
4610  *
4611  * Return value:
4612  * 	pointer to mode page / NULL on failure
4613  **/
4614 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4615 			       u32 page_code, u32 len)
4616 {
4617 	struct ipr_mode_page_hdr *mode_hdr;
4618 	u32 page_length;
4619 	u32 length;
4620 
4621 	if (!mode_pages || (mode_pages->hdr.length == 0))
4622 		return NULL;
4623 
4624 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4625 	mode_hdr = (struct ipr_mode_page_hdr *)
4626 		(mode_pages->data + mode_pages->hdr.block_desc_len);
4627 
4628 	while (length) {
4629 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4630 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4631 				return mode_hdr;
4632 			break;
4633 		} else {
4634 			page_length = (sizeof(struct ipr_mode_page_hdr) +
4635 				       mode_hdr->page_length);
4636 			length -= page_length;
4637 			mode_hdr = (struct ipr_mode_page_hdr *)
4638 				((unsigned long)mode_hdr + page_length);
4639 		}
4640 	}
4641 	return NULL;
4642 }
4643 
4644 /**
4645  * ipr_check_term_power - Check for term power errors
4646  * @ioa_cfg:	ioa config struct
4647  * @mode_pages:	IOAFP mode pages buffer
4648  *
4649  * Check the IOAFP's mode page 28 for term power errors
4650  *
4651  * Return value:
4652  * 	nothing
4653  **/
4654 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4655 				 struct ipr_mode_pages *mode_pages)
4656 {
4657 	int i;
4658 	int entry_length;
4659 	struct ipr_dev_bus_entry *bus;
4660 	struct ipr_mode_page28 *mode_page;
4661 
4662 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4663 				      sizeof(struct ipr_mode_page28));
4664 
4665 	entry_length = mode_page->entry_length;
4666 
4667 	bus = mode_page->bus;
4668 
4669 	for (i = 0; i < mode_page->num_entries; i++) {
4670 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4671 			dev_err(&ioa_cfg->pdev->dev,
4672 				"Term power is absent on scsi bus %d\n",
4673 				bus->res_addr.bus);
4674 		}
4675 
4676 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4677 	}
4678 }
4679 
4680 /**
4681  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4682  * @ioa_cfg:	ioa config struct
4683  *
4684  * Looks through the config table checking for SES devices. If
4685  * the SES device is in the SES table indicating a maximum SCSI
4686  * bus speed, the speed is limited for the bus.
4687  *
4688  * Return value:
4689  * 	none
4690  **/
4691 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4692 {
4693 	u32 max_xfer_rate;
4694 	int i;
4695 
4696 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4697 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4698 						       ioa_cfg->bus_attr[i].bus_width);
4699 
4700 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4701 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4702 	}
4703 }
4704 
4705 /**
4706  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4707  * @ioa_cfg:	ioa config struct
4708  * @mode_pages:	mode page 28 buffer
4709  *
4710  * Updates mode page 28 based on driver configuration
4711  *
4712  * Return value:
4713  * 	none
4714  **/
4715 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4716 					  	struct ipr_mode_pages *mode_pages)
4717 {
4718 	int i, entry_length;
4719 	struct ipr_dev_bus_entry *bus;
4720 	struct ipr_bus_attributes *bus_attr;
4721 	struct ipr_mode_page28 *mode_page;
4722 
4723 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4724 				      sizeof(struct ipr_mode_page28));
4725 
4726 	entry_length = mode_page->entry_length;
4727 
4728 	/* Loop for each device bus entry */
4729 	for (i = 0, bus = mode_page->bus;
4730 	     i < mode_page->num_entries;
4731 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4732 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4733 			dev_err(&ioa_cfg->pdev->dev,
4734 				"Invalid resource address reported: 0x%08X\n",
4735 				IPR_GET_PHYS_LOC(bus->res_addr));
4736 			continue;
4737 		}
4738 
4739 		bus_attr = &ioa_cfg->bus_attr[i];
4740 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4741 		bus->bus_width = bus_attr->bus_width;
4742 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4743 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4744 		if (bus_attr->qas_enabled)
4745 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4746 		else
4747 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4748 	}
4749 }
4750 
4751 /**
4752  * ipr_build_mode_select - Build a mode select command
4753  * @ipr_cmd:	ipr command struct
4754  * @res_handle:	resource handle to send command to
4755  * @parm:		Byte 2 of Mode Sense command
4756  * @dma_addr:	DMA buffer address
4757  * @xfer_len:	data transfer length
4758  *
4759  * Return value:
4760  * 	none
4761  **/
4762 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4763 				  __be32 res_handle, u8 parm, u32 dma_addr,
4764 				  u8 xfer_len)
4765 {
4766 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4767 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4768 
4769 	ioarcb->res_handle = res_handle;
4770 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4771 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4772 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4773 	ioarcb->cmd_pkt.cdb[1] = parm;
4774 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4775 
4776 	ioadl->flags_and_data_len =
4777 		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4778 	ioadl->address = cpu_to_be32(dma_addr);
4779 	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4780 	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4781 }
4782 
4783 /**
4784  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4785  * @ipr_cmd:	ipr command struct
4786  *
4787  * This function sets up the SCSI bus attributes and sends
4788  * a Mode Select for Page 28 to activate them.
4789  *
4790  * Return value:
4791  * 	IPR_RC_JOB_RETURN
4792  **/
4793 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4794 {
4795 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4796 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4797 	int length;
4798 
4799 	ENTER;
4800 	ipr_scsi_bus_speed_limit(ioa_cfg);
4801 	ipr_check_term_power(ioa_cfg, mode_pages);
4802 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4803 	length = mode_pages->hdr.length + 1;
4804 	mode_pages->hdr.length = 0;
4805 
4806 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4807 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4808 			      length);
4809 
4810 	ipr_cmd->job_step = ipr_setup_write_cache;
4811 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4812 
4813 	LEAVE;
4814 	return IPR_RC_JOB_RETURN;
4815 }
4816 
4817 /**
4818  * ipr_build_mode_sense - Builds a mode sense command
4819  * @ipr_cmd:	ipr command struct
4820  * @res:		resource entry struct
4821  * @parm:		Byte 2 of mode sense command
4822  * @dma_addr:	DMA address of mode sense buffer
4823  * @xfer_len:	Size of DMA buffer
4824  *
4825  * Return value:
4826  * 	none
4827  **/
4828 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4829 				 __be32 res_handle,
4830 				 u8 parm, u32 dma_addr, u8 xfer_len)
4831 {
4832 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4833 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4834 
4835 	ioarcb->res_handle = res_handle;
4836 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4837 	ioarcb->cmd_pkt.cdb[2] = parm;
4838 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4839 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4840 
4841 	ioadl->flags_and_data_len =
4842 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4843 	ioadl->address = cpu_to_be32(dma_addr);
4844 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4845 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4846 }
4847 
4848 /**
4849  * ipr_reset_cmd_failed - Handle failure of IOA reset command
4850  * @ipr_cmd:	ipr command struct
4851  *
4852  * This function handles the failure of an IOA bringup command.
4853  *
4854  * Return value:
4855  * 	IPR_RC_JOB_RETURN
4856  **/
4857 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4858 {
4859 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4860 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4861 
4862 	dev_err(&ioa_cfg->pdev->dev,
4863 		"0x%02X failed with IOASC: 0x%08X\n",
4864 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4865 
4866 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4867 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4868 	return IPR_RC_JOB_RETURN;
4869 }
4870 
4871 /**
4872  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4873  * @ipr_cmd:	ipr command struct
4874  *
4875  * This function handles the failure of a Mode Sense to the IOAFP.
4876  * Some adapters do not handle all mode pages.
4877  *
4878  * Return value:
4879  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4880  **/
4881 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4882 {
4883 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4884 
4885 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4886 		ipr_cmd->job_step = ipr_setup_write_cache;
4887 		return IPR_RC_JOB_CONTINUE;
4888 	}
4889 
4890 	return ipr_reset_cmd_failed(ipr_cmd);
4891 }
4892 
4893 /**
4894  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4895  * @ipr_cmd:	ipr command struct
4896  *
4897  * This function send a Page 28 mode sense to the IOA to
4898  * retrieve SCSI bus attributes.
4899  *
4900  * Return value:
4901  * 	IPR_RC_JOB_RETURN
4902  **/
4903 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4904 {
4905 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4906 
4907 	ENTER;
4908 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4909 			     0x28, ioa_cfg->vpd_cbs_dma +
4910 			     offsetof(struct ipr_misc_cbs, mode_pages),
4911 			     sizeof(struct ipr_mode_pages));
4912 
4913 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4914 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4915 
4916 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4917 
4918 	LEAVE;
4919 	return IPR_RC_JOB_RETURN;
4920 }
4921 
4922 /**
4923  * ipr_init_res_table - Initialize the resource table
4924  * @ipr_cmd:	ipr command struct
4925  *
4926  * This function looks through the existing resource table, comparing
4927  * it with the config table. This function will take care of old/new
4928  * devices and schedule adding/removing them from the mid-layer
4929  * as appropriate.
4930  *
4931  * Return value:
4932  * 	IPR_RC_JOB_CONTINUE
4933  **/
4934 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4935 {
4936 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4937 	struct ipr_resource_entry *res, *temp;
4938 	struct ipr_config_table_entry *cfgte;
4939 	int found, i;
4940 	LIST_HEAD(old_res);
4941 
4942 	ENTER;
4943 	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4944 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4945 
4946 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4947 		list_move_tail(&res->queue, &old_res);
4948 
4949 	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4950 		cfgte = &ioa_cfg->cfg_table->dev[i];
4951 		found = 0;
4952 
4953 		list_for_each_entry_safe(res, temp, &old_res, queue) {
4954 			if (!memcmp(&res->cfgte.res_addr,
4955 				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4956 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4957 				found = 1;
4958 				break;
4959 			}
4960 		}
4961 
4962 		if (!found) {
4963 			if (list_empty(&ioa_cfg->free_res_q)) {
4964 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4965 				break;
4966 			}
4967 
4968 			found = 1;
4969 			res = list_entry(ioa_cfg->free_res_q.next,
4970 					 struct ipr_resource_entry, queue);
4971 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4972 			ipr_init_res_entry(res);
4973 			res->add_to_ml = 1;
4974 		}
4975 
4976 		if (found)
4977 			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4978 	}
4979 
4980 	list_for_each_entry_safe(res, temp, &old_res, queue) {
4981 		if (res->sdev) {
4982 			res->del_from_ml = 1;
4983 			res->sdev->hostdata = NULL;
4984 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4985 		} else {
4986 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4987 		}
4988 	}
4989 
4990 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4991 
4992 	LEAVE;
4993 	return IPR_RC_JOB_CONTINUE;
4994 }
4995 
4996 /**
4997  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4998  * @ipr_cmd:	ipr command struct
4999  *
5000  * This function sends a Query IOA Configuration command
5001  * to the adapter to retrieve the IOA configuration table.
5002  *
5003  * Return value:
5004  * 	IPR_RC_JOB_RETURN
5005  **/
5006 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5007 {
5008 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5009 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5010 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5011 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5012 
5013 	ENTER;
5014 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5015 		 ucode_vpd->major_release, ucode_vpd->card_type,
5016 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5017 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5018 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5019 
5020 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5021 	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5022 	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5023 
5024 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5025 	ioarcb->read_data_transfer_length =
5026 		cpu_to_be32(sizeof(struct ipr_config_table));
5027 
5028 	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5029 	ioadl->flags_and_data_len =
5030 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5031 
5032 	ipr_cmd->job_step = ipr_init_res_table;
5033 
5034 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5035 
5036 	LEAVE;
5037 	return IPR_RC_JOB_RETURN;
5038 }
5039 
5040 /**
5041  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5042  * @ipr_cmd:	ipr command struct
5043  *
5044  * This utility function sends an inquiry to the adapter.
5045  *
5046  * Return value:
5047  * 	none
5048  **/
5049 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5050 			      u32 dma_addr, u8 xfer_len)
5051 {
5052 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5053 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5054 
5055 	ENTER;
5056 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5057 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5058 
5059 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5060 	ioarcb->cmd_pkt.cdb[1] = flags;
5061 	ioarcb->cmd_pkt.cdb[2] = page;
5062 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5063 
5064 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5065 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5066 
5067 	ioadl->address = cpu_to_be32(dma_addr);
5068 	ioadl->flags_and_data_len =
5069 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5070 
5071 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5072 	LEAVE;
5073 }
5074 
5075 /**
5076  * ipr_inquiry_page_supported - Is the given inquiry page supported
5077  * @page0:		inquiry page 0 buffer
5078  * @page:		page code.
5079  *
5080  * This function determines if the specified inquiry page is supported.
5081  *
5082  * Return value:
5083  *	1 if page is supported / 0 if not
5084  **/
5085 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5086 {
5087 	int i;
5088 
5089 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5090 		if (page0->page[i] == page)
5091 			return 1;
5092 
5093 	return 0;
5094 }
5095 
5096 /**
5097  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5098  * @ipr_cmd:	ipr command struct
5099  *
5100  * This function sends a Page 3 inquiry to the adapter
5101  * to retrieve software VPD information.
5102  *
5103  * Return value:
5104  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5105  **/
5106 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5107 {
5108 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5109 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5110 
5111 	ENTER;
5112 
5113 	if (!ipr_inquiry_page_supported(page0, 1))
5114 		ioa_cfg->cache_state = CACHE_NONE;
5115 
5116 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5117 
5118 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5119 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5120 			  sizeof(struct ipr_inquiry_page3));
5121 
5122 	LEAVE;
5123 	return IPR_RC_JOB_RETURN;
5124 }
5125 
5126 /**
5127  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5128  * @ipr_cmd:	ipr command struct
5129  *
5130  * This function sends a Page 0 inquiry to the adapter
5131  * to retrieve supported inquiry pages.
5132  *
5133  * Return value:
5134  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5135  **/
5136 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5137 {
5138 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5139 	char type[5];
5140 
5141 	ENTER;
5142 
5143 	/* Grab the type out of the VPD and store it away */
5144 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5145 	type[4] = '\0';
5146 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5147 
5148 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5149 
5150 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5151 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5152 			  sizeof(struct ipr_inquiry_page0));
5153 
5154 	LEAVE;
5155 	return IPR_RC_JOB_RETURN;
5156 }
5157 
5158 /**
5159  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5160  * @ipr_cmd:	ipr command struct
5161  *
5162  * This function sends a standard inquiry to the adapter.
5163  *
5164  * Return value:
5165  * 	IPR_RC_JOB_RETURN
5166  **/
5167 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5168 {
5169 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5170 
5171 	ENTER;
5172 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5173 
5174 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5175 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5176 			  sizeof(struct ipr_ioa_vpd));
5177 
5178 	LEAVE;
5179 	return IPR_RC_JOB_RETURN;
5180 }
5181 
5182 /**
5183  * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5184  * @ipr_cmd:	ipr command struct
5185  *
5186  * This function send an Identify Host Request Response Queue
5187  * command to establish the HRRQ with the adapter.
5188  *
5189  * Return value:
5190  * 	IPR_RC_JOB_RETURN
5191  **/
5192 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5193 {
5194 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5195 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5196 
5197 	ENTER;
5198 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5199 
5200 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5201 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5202 
5203 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5204 	ioarcb->cmd_pkt.cdb[2] =
5205 		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5206 	ioarcb->cmd_pkt.cdb[3] =
5207 		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5208 	ioarcb->cmd_pkt.cdb[4] =
5209 		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5210 	ioarcb->cmd_pkt.cdb[5] =
5211 		((u32) ioa_cfg->host_rrq_dma) & 0xff;
5212 	ioarcb->cmd_pkt.cdb[7] =
5213 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5214 	ioarcb->cmd_pkt.cdb[8] =
5215 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5216 
5217 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5218 
5219 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5220 
5221 	LEAVE;
5222 	return IPR_RC_JOB_RETURN;
5223 }
5224 
5225 /**
5226  * ipr_reset_timer_done - Adapter reset timer function
5227  * @ipr_cmd:	ipr command struct
5228  *
5229  * Description: This function is used in adapter reset processing
5230  * for timing events. If the reset_cmd pointer in the IOA
5231  * config struct is not this adapter's we are doing nested
5232  * resets and fail_all_ops will take care of freeing the
5233  * command block.
5234  *
5235  * Return value:
5236  * 	none
5237  **/
5238 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5239 {
5240 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5241 	unsigned long lock_flags = 0;
5242 
5243 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5244 
5245 	if (ioa_cfg->reset_cmd == ipr_cmd) {
5246 		list_del(&ipr_cmd->queue);
5247 		ipr_cmd->done(ipr_cmd);
5248 	}
5249 
5250 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5251 }
5252 
5253 /**
5254  * ipr_reset_start_timer - Start a timer for adapter reset job
5255  * @ipr_cmd:	ipr command struct
5256  * @timeout:	timeout value
5257  *
5258  * Description: This function is used in adapter reset processing
5259  * for timing events. If the reset_cmd pointer in the IOA
5260  * config struct is not this adapter's we are doing nested
5261  * resets and fail_all_ops will take care of freeing the
5262  * command block.
5263  *
5264  * Return value:
5265  * 	none
5266  **/
5267 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5268 				  unsigned long timeout)
5269 {
5270 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5271 	ipr_cmd->done = ipr_reset_ioa_job;
5272 
5273 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5274 	ipr_cmd->timer.expires = jiffies + timeout;
5275 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5276 	add_timer(&ipr_cmd->timer);
5277 }
5278 
5279 /**
5280  * ipr_init_ioa_mem - Initialize ioa_cfg control block
5281  * @ioa_cfg:	ioa cfg struct
5282  *
5283  * Return value:
5284  * 	nothing
5285  **/
5286 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5287 {
5288 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5289 
5290 	/* Initialize Host RRQ pointers */
5291 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5292 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5293 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5294 	ioa_cfg->toggle_bit = 1;
5295 
5296 	/* Zero out config table */
5297 	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5298 }
5299 
5300 /**
5301  * ipr_reset_enable_ioa - Enable the IOA following a reset.
5302  * @ipr_cmd:	ipr command struct
5303  *
5304  * This function reinitializes some control blocks and
5305  * enables destructive diagnostics on the adapter.
5306  *
5307  * Return value:
5308  * 	IPR_RC_JOB_RETURN
5309  **/
5310 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5311 {
5312 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5313 	volatile u32 int_reg;
5314 
5315 	ENTER;
5316 	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5317 	ipr_init_ioa_mem(ioa_cfg);
5318 
5319 	ioa_cfg->allow_interrupts = 1;
5320 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5321 
5322 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5323 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5324 		       ioa_cfg->regs.clr_interrupt_mask_reg);
5325 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5326 		return IPR_RC_JOB_CONTINUE;
5327 	}
5328 
5329 	/* Enable destructive diagnostics on IOA */
5330 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5331 
5332 	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5333 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5334 
5335 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5336 
5337 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5338 	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5339 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5340 	ipr_cmd->done = ipr_reset_ioa_job;
5341 	add_timer(&ipr_cmd->timer);
5342 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5343 
5344 	LEAVE;
5345 	return IPR_RC_JOB_RETURN;
5346 }
5347 
5348 /**
5349  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5350  * @ipr_cmd:	ipr command struct
5351  *
5352  * This function is invoked when an adapter dump has run out
5353  * of processing time.
5354  *
5355  * Return value:
5356  * 	IPR_RC_JOB_CONTINUE
5357  **/
5358 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5359 {
5360 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5361 
5362 	if (ioa_cfg->sdt_state == GET_DUMP)
5363 		ioa_cfg->sdt_state = ABORT_DUMP;
5364 
5365 	ipr_cmd->job_step = ipr_reset_alert;
5366 
5367 	return IPR_RC_JOB_CONTINUE;
5368 }
5369 
5370 /**
5371  * ipr_unit_check_no_data - Log a unit check/no data error log
5372  * @ioa_cfg:		ioa config struct
5373  *
5374  * Logs an error indicating the adapter unit checked, but for some
5375  * reason, we were unable to fetch the unit check buffer.
5376  *
5377  * Return value:
5378  * 	nothing
5379  **/
5380 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5381 {
5382 	ioa_cfg->errors_logged++;
5383 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5384 }
5385 
5386 /**
5387  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5388  * @ioa_cfg:		ioa config struct
5389  *
5390  * Fetches the unit check buffer from the adapter by clocking the data
5391  * through the mailbox register.
5392  *
5393  * Return value:
5394  * 	nothing
5395  **/
5396 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5397 {
5398 	unsigned long mailbox;
5399 	struct ipr_hostrcb *hostrcb;
5400 	struct ipr_uc_sdt sdt;
5401 	int rc, length;
5402 
5403 	mailbox = readl(ioa_cfg->ioa_mailbox);
5404 
5405 	if (!ipr_sdt_is_fmt2(mailbox)) {
5406 		ipr_unit_check_no_data(ioa_cfg);
5407 		return;
5408 	}
5409 
5410 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5411 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5412 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5413 
5414 	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5415 	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5416 		ipr_unit_check_no_data(ioa_cfg);
5417 		return;
5418 	}
5419 
5420 	/* Find length of the first sdt entry (UC buffer) */
5421 	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5422 		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5423 
5424 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5425 			     struct ipr_hostrcb, queue);
5426 	list_del(&hostrcb->queue);
5427 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5428 
5429 	rc = ipr_get_ldump_data_section(ioa_cfg,
5430 					be32_to_cpu(sdt.entry[0].bar_str_offset),
5431 					(__be32 *)&hostrcb->hcam,
5432 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5433 
5434 	if (!rc)
5435 		ipr_handle_log_data(ioa_cfg, hostrcb);
5436 	else
5437 		ipr_unit_check_no_data(ioa_cfg);
5438 
5439 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5440 }
5441 
5442 /**
5443  * ipr_reset_restore_cfg_space - Restore PCI config space.
5444  * @ipr_cmd:	ipr command struct
5445  *
5446  * Description: This function restores the saved PCI config space of
5447  * the adapter, fails all outstanding ops back to the callers, and
5448  * fetches the dump/unit check if applicable to this reset.
5449  *
5450  * Return value:
5451  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5452  **/
5453 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5454 {
5455 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5456 	int rc;
5457 
5458 	ENTER;
5459 	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5460 	rc = pci_restore_state(ioa_cfg->pdev);
5461 
5462 	if (rc != PCIBIOS_SUCCESSFUL) {
5463 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5464 		return IPR_RC_JOB_CONTINUE;
5465 	}
5466 
5467 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5468 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5469 		return IPR_RC_JOB_CONTINUE;
5470 	}
5471 
5472 	ipr_fail_all_ops(ioa_cfg);
5473 
5474 	if (ioa_cfg->ioa_unit_checked) {
5475 		ioa_cfg->ioa_unit_checked = 0;
5476 		ipr_get_unit_check_buffer(ioa_cfg);
5477 		ipr_cmd->job_step = ipr_reset_alert;
5478 		ipr_reset_start_timer(ipr_cmd, 0);
5479 		return IPR_RC_JOB_RETURN;
5480 	}
5481 
5482 	if (ioa_cfg->in_ioa_bringdown) {
5483 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5484 	} else {
5485 		ipr_cmd->job_step = ipr_reset_enable_ioa;
5486 
5487 		if (GET_DUMP == ioa_cfg->sdt_state) {
5488 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5489 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5490 			schedule_work(&ioa_cfg->work_q);
5491 			return IPR_RC_JOB_RETURN;
5492 		}
5493 	}
5494 
5495 	ENTER;
5496 	return IPR_RC_JOB_CONTINUE;
5497 }
5498 
5499 /**
5500  * ipr_reset_start_bist - Run BIST on the adapter.
5501  * @ipr_cmd:	ipr command struct
5502  *
5503  * Description: This function runs BIST on the adapter, then delays 2 seconds.
5504  *
5505  * Return value:
5506  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5507  **/
5508 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5509 {
5510 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5511 	int rc;
5512 
5513 	ENTER;
5514 	pci_block_user_cfg_access(ioa_cfg->pdev);
5515 	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5516 
5517 	if (rc != PCIBIOS_SUCCESSFUL) {
5518 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5519 		rc = IPR_RC_JOB_CONTINUE;
5520 	} else {
5521 		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5522 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5523 		rc = IPR_RC_JOB_RETURN;
5524 	}
5525 
5526 	LEAVE;
5527 	return rc;
5528 }
5529 
5530 /**
5531  * ipr_reset_allowed - Query whether or not IOA can be reset
5532  * @ioa_cfg:	ioa config struct
5533  *
5534  * Return value:
5535  * 	0 if reset not allowed / non-zero if reset is allowed
5536  **/
5537 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5538 {
5539 	volatile u32 temp_reg;
5540 
5541 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5542 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5543 }
5544 
5545 /**
5546  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5547  * @ipr_cmd:	ipr command struct
5548  *
5549  * Description: This function waits for adapter permission to run BIST,
5550  * then runs BIST. If the adapter does not give permission after a
5551  * reasonable time, we will reset the adapter anyway. The impact of
5552  * resetting the adapter without warning the adapter is the risk of
5553  * losing the persistent error log on the adapter. If the adapter is
5554  * reset while it is writing to the flash on the adapter, the flash
5555  * segment will have bad ECC and be zeroed.
5556  *
5557  * Return value:
5558  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5559  **/
5560 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5561 {
5562 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5563 	int rc = IPR_RC_JOB_RETURN;
5564 
5565 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5566 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5567 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5568 	} else {
5569 		ipr_cmd->job_step = ipr_reset_start_bist;
5570 		rc = IPR_RC_JOB_CONTINUE;
5571 	}
5572 
5573 	return rc;
5574 }
5575 
5576 /**
5577  * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5578  * @ipr_cmd:	ipr command struct
5579  *
5580  * Description: This function alerts the adapter that it will be reset.
5581  * If memory space is not currently enabled, proceed directly
5582  * to running BIST on the adapter. The timer must always be started
5583  * so we guarantee we do not run BIST from ipr_isr.
5584  *
5585  * Return value:
5586  * 	IPR_RC_JOB_RETURN
5587  **/
5588 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5589 {
5590 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5591 	u16 cmd_reg;
5592 	int rc;
5593 
5594 	ENTER;
5595 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5596 
5597 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5598 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5599 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5600 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5601 	} else {
5602 		ipr_cmd->job_step = ipr_reset_start_bist;
5603 	}
5604 
5605 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5606 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5607 
5608 	LEAVE;
5609 	return IPR_RC_JOB_RETURN;
5610 }
5611 
5612 /**
5613  * ipr_reset_ucode_download_done - Microcode download completion
5614  * @ipr_cmd:	ipr command struct
5615  *
5616  * Description: This function unmaps the microcode download buffer.
5617  *
5618  * Return value:
5619  * 	IPR_RC_JOB_CONTINUE
5620  **/
5621 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5622 {
5623 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5624 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5625 
5626 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5627 		     sglist->num_sg, DMA_TO_DEVICE);
5628 
5629 	ipr_cmd->job_step = ipr_reset_alert;
5630 	return IPR_RC_JOB_CONTINUE;
5631 }
5632 
5633 /**
5634  * ipr_reset_ucode_download - Download microcode to the adapter
5635  * @ipr_cmd:	ipr command struct
5636  *
5637  * Description: This function checks to see if it there is microcode
5638  * to download to the adapter. If there is, a download is performed.
5639  *
5640  * Return value:
5641  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5642  **/
5643 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5644 {
5645 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5646 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5647 
5648 	ENTER;
5649 	ipr_cmd->job_step = ipr_reset_alert;
5650 
5651 	if (!sglist)
5652 		return IPR_RC_JOB_CONTINUE;
5653 
5654 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5655 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5656 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5657 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5658 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5659 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5660 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5661 
5662 	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5663 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5664 
5665 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5666 		   IPR_WRITE_BUFFER_TIMEOUT);
5667 
5668 	LEAVE;
5669 	return IPR_RC_JOB_RETURN;
5670 }
5671 
5672 /**
5673  * ipr_reset_shutdown_ioa - Shutdown the adapter
5674  * @ipr_cmd:	ipr command struct
5675  *
5676  * Description: This function issues an adapter shutdown of the
5677  * specified type to the specified adapter as part of the
5678  * adapter reset job.
5679  *
5680  * Return value:
5681  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5682  **/
5683 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5684 {
5685 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5686 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5687 	unsigned long timeout;
5688 	int rc = IPR_RC_JOB_CONTINUE;
5689 
5690 	ENTER;
5691 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5692 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5693 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5694 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5695 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5696 
5697 		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5698 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5699 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5700 			timeout = IPR_INTERNAL_TIMEOUT;
5701 		else
5702 			timeout = IPR_SHUTDOWN_TIMEOUT;
5703 
5704 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5705 
5706 		rc = IPR_RC_JOB_RETURN;
5707 		ipr_cmd->job_step = ipr_reset_ucode_download;
5708 	} else
5709 		ipr_cmd->job_step = ipr_reset_alert;
5710 
5711 	LEAVE;
5712 	return rc;
5713 }
5714 
5715 /**
5716  * ipr_reset_ioa_job - Adapter reset job
5717  * @ipr_cmd:	ipr command struct
5718  *
5719  * Description: This function is the job router for the adapter reset job.
5720  *
5721  * Return value:
5722  * 	none
5723  **/
5724 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5725 {
5726 	u32 rc, ioasc;
5727 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5728 
5729 	do {
5730 		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5731 
5732 		if (ioa_cfg->reset_cmd != ipr_cmd) {
5733 			/*
5734 			 * We are doing nested adapter resets and this is
5735 			 * not the current reset job.
5736 			 */
5737 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5738 			return;
5739 		}
5740 
5741 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5742 			rc = ipr_cmd->job_step_failed(ipr_cmd);
5743 			if (rc == IPR_RC_JOB_RETURN)
5744 				return;
5745 		}
5746 
5747 		ipr_reinit_ipr_cmnd(ipr_cmd);
5748 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5749 		rc = ipr_cmd->job_step(ipr_cmd);
5750 	} while(rc == IPR_RC_JOB_CONTINUE);
5751 }
5752 
5753 /**
5754  * _ipr_initiate_ioa_reset - Initiate an adapter reset
5755  * @ioa_cfg:		ioa config struct
5756  * @job_step:		first job step of reset job
5757  * @shutdown_type:	shutdown type
5758  *
5759  * Description: This function will initiate the reset of the given adapter
5760  * starting at the selected job step.
5761  * If the caller needs to wait on the completion of the reset,
5762  * the caller must sleep on the reset_wait_q.
5763  *
5764  * Return value:
5765  * 	none
5766  **/
5767 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5768 				    int (*job_step) (struct ipr_cmnd *),
5769 				    enum ipr_shutdown_type shutdown_type)
5770 {
5771 	struct ipr_cmnd *ipr_cmd;
5772 
5773 	ioa_cfg->in_reset_reload = 1;
5774 	ioa_cfg->allow_cmds = 0;
5775 	scsi_block_requests(ioa_cfg->host);
5776 
5777 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5778 	ioa_cfg->reset_cmd = ipr_cmd;
5779 	ipr_cmd->job_step = job_step;
5780 	ipr_cmd->u.shutdown_type = shutdown_type;
5781 
5782 	ipr_reset_ioa_job(ipr_cmd);
5783 }
5784 
5785 /**
5786  * ipr_initiate_ioa_reset - Initiate an adapter reset
5787  * @ioa_cfg:		ioa config struct
5788  * @shutdown_type:	shutdown type
5789  *
5790  * Description: This function will initiate the reset of the given adapter.
5791  * If the caller needs to wait on the completion of the reset,
5792  * the caller must sleep on the reset_wait_q.
5793  *
5794  * Return value:
5795  * 	none
5796  **/
5797 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5798 				   enum ipr_shutdown_type shutdown_type)
5799 {
5800 	if (ioa_cfg->ioa_is_dead)
5801 		return;
5802 
5803 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5804 		ioa_cfg->sdt_state = ABORT_DUMP;
5805 
5806 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5807 		dev_err(&ioa_cfg->pdev->dev,
5808 			"IOA taken offline - error recovery failed\n");
5809 
5810 		ioa_cfg->reset_retries = 0;
5811 		ioa_cfg->ioa_is_dead = 1;
5812 
5813 		if (ioa_cfg->in_ioa_bringdown) {
5814 			ioa_cfg->reset_cmd = NULL;
5815 			ioa_cfg->in_reset_reload = 0;
5816 			ipr_fail_all_ops(ioa_cfg);
5817 			wake_up_all(&ioa_cfg->reset_wait_q);
5818 
5819 			spin_unlock_irq(ioa_cfg->host->host_lock);
5820 			scsi_unblock_requests(ioa_cfg->host);
5821 			spin_lock_irq(ioa_cfg->host->host_lock);
5822 			return;
5823 		} else {
5824 			ioa_cfg->in_ioa_bringdown = 1;
5825 			shutdown_type = IPR_SHUTDOWN_NONE;
5826 		}
5827 	}
5828 
5829 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5830 				shutdown_type);
5831 }
5832 
5833 /**
5834  * ipr_reset_freeze - Hold off all I/O activity
5835  * @ipr_cmd:	ipr command struct
5836  *
5837  * Description: If the PCI slot is frozen, hold off all I/O
5838  * activity; then, as soon as the slot is available again,
5839  * initiate an adapter reset.
5840  */
5841 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5842 {
5843 	/* Disallow new interrupts, avoid loop */
5844 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
5845 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5846 	ipr_cmd->done = ipr_reset_ioa_job;
5847 	return IPR_RC_JOB_RETURN;
5848 }
5849 
5850 /**
5851  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5852  * @pdev:	PCI device struct
5853  *
5854  * Description: This routine is called to tell us that the PCI bus
5855  * is down. Can't do anything here, except put the device driver
5856  * into a holding pattern, waiting for the PCI bus to come back.
5857  */
5858 static void ipr_pci_frozen(struct pci_dev *pdev)
5859 {
5860 	unsigned long flags = 0;
5861 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5862 
5863 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5864 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5865 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5866 }
5867 
5868 /**
5869  * ipr_pci_slot_reset - Called when PCI slot has been reset.
5870  * @pdev:	PCI device struct
5871  *
5872  * Description: This routine is called by the pci error recovery
5873  * code after the PCI slot has been reset, just before we
5874  * should resume normal operations.
5875  */
5876 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5877 {
5878 	unsigned long flags = 0;
5879 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5880 
5881 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5882 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5883 	                                 IPR_SHUTDOWN_NONE);
5884 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5885 	return PCI_ERS_RESULT_RECOVERED;
5886 }
5887 
5888 /**
5889  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5890  * @pdev:	PCI device struct
5891  *
5892  * Description: This routine is called when the PCI bus has
5893  * permanently failed.
5894  */
5895 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5896 {
5897 	unsigned long flags = 0;
5898 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5899 
5900 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5901 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5902 		ioa_cfg->sdt_state = ABORT_DUMP;
5903 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5904 	ioa_cfg->in_ioa_bringdown = 1;
5905 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5906 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5907 }
5908 
5909 /**
5910  * ipr_pci_error_detected - Called when a PCI error is detected.
5911  * @pdev:	PCI device struct
5912  * @state:	PCI channel state
5913  *
5914  * Description: Called when a PCI error is detected.
5915  *
5916  * Return value:
5917  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5918  */
5919 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5920 					       pci_channel_state_t state)
5921 {
5922 	switch (state) {
5923 	case pci_channel_io_frozen:
5924 		ipr_pci_frozen(pdev);
5925 		return PCI_ERS_RESULT_NEED_RESET;
5926 	case pci_channel_io_perm_failure:
5927 		ipr_pci_perm_failure(pdev);
5928 		return PCI_ERS_RESULT_DISCONNECT;
5929 		break;
5930 	default:
5931 		break;
5932 	}
5933 	return PCI_ERS_RESULT_NEED_RESET;
5934 }
5935 
5936 /**
5937  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5938  * @ioa_cfg:	ioa cfg struct
5939  *
5940  * Description: This is the second phase of adapter intialization
5941  * This function takes care of initilizing the adapter to the point
5942  * where it can accept new commands.
5943 
5944  * Return value:
5945  * 	0 on sucess / -EIO on failure
5946  **/
5947 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5948 {
5949 	int rc = 0;
5950 	unsigned long host_lock_flags = 0;
5951 
5952 	ENTER;
5953 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5954 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5955 	if (ioa_cfg->needs_hard_reset) {
5956 		ioa_cfg->needs_hard_reset = 0;
5957 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5958 	} else
5959 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5960 					IPR_SHUTDOWN_NONE);
5961 
5962 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5963 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5964 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5965 
5966 	if (ioa_cfg->ioa_is_dead) {
5967 		rc = -EIO;
5968 	} else if (ipr_invalid_adapter(ioa_cfg)) {
5969 		if (!ipr_testmode)
5970 			rc = -EIO;
5971 
5972 		dev_err(&ioa_cfg->pdev->dev,
5973 			"Adapter not supported in this hardware configuration.\n");
5974 	}
5975 
5976 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5977 
5978 	LEAVE;
5979 	return rc;
5980 }
5981 
5982 /**
5983  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5984  * @ioa_cfg:	ioa config struct
5985  *
5986  * Return value:
5987  * 	none
5988  **/
5989 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5990 {
5991 	int i;
5992 
5993 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5994 		if (ioa_cfg->ipr_cmnd_list[i])
5995 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5996 				      ioa_cfg->ipr_cmnd_list[i],
5997 				      ioa_cfg->ipr_cmnd_list_dma[i]);
5998 
5999 		ioa_cfg->ipr_cmnd_list[i] = NULL;
6000 	}
6001 
6002 	if (ioa_cfg->ipr_cmd_pool)
6003 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6004 
6005 	ioa_cfg->ipr_cmd_pool = NULL;
6006 }
6007 
6008 /**
6009  * ipr_free_mem - Frees memory allocated for an adapter
6010  * @ioa_cfg:	ioa cfg struct
6011  *
6012  * Return value:
6013  * 	nothing
6014  **/
6015 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6016 {
6017 	int i;
6018 
6019 	kfree(ioa_cfg->res_entries);
6020 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6021 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6022 	ipr_free_cmd_blks(ioa_cfg);
6023 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6024 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6025 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6026 			    ioa_cfg->cfg_table,
6027 			    ioa_cfg->cfg_table_dma);
6028 
6029 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6030 		pci_free_consistent(ioa_cfg->pdev,
6031 				    sizeof(struct ipr_hostrcb),
6032 				    ioa_cfg->hostrcb[i],
6033 				    ioa_cfg->hostrcb_dma[i]);
6034 	}
6035 
6036 	ipr_free_dump(ioa_cfg);
6037 	kfree(ioa_cfg->trace);
6038 }
6039 
6040 /**
6041  * ipr_free_all_resources - Free all allocated resources for an adapter.
6042  * @ipr_cmd:	ipr command struct
6043  *
6044  * This function frees all allocated resources for the
6045  * specified adapter.
6046  *
6047  * Return value:
6048  * 	none
6049  **/
6050 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6051 {
6052 	struct pci_dev *pdev = ioa_cfg->pdev;
6053 
6054 	ENTER;
6055 	free_irq(pdev->irq, ioa_cfg);
6056 	iounmap(ioa_cfg->hdw_dma_regs);
6057 	pci_release_regions(pdev);
6058 	ipr_free_mem(ioa_cfg);
6059 	scsi_host_put(ioa_cfg->host);
6060 	pci_disable_device(pdev);
6061 	LEAVE;
6062 }
6063 
6064 /**
6065  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6066  * @ioa_cfg:	ioa config struct
6067  *
6068  * Return value:
6069  * 	0 on success / -ENOMEM on allocation failure
6070  **/
6071 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6072 {
6073 	struct ipr_cmnd *ipr_cmd;
6074 	struct ipr_ioarcb *ioarcb;
6075 	dma_addr_t dma_addr;
6076 	int i;
6077 
6078 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6079 						 sizeof(struct ipr_cmnd), 8, 0);
6080 
6081 	if (!ioa_cfg->ipr_cmd_pool)
6082 		return -ENOMEM;
6083 
6084 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6085 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6086 
6087 		if (!ipr_cmd) {
6088 			ipr_free_cmd_blks(ioa_cfg);
6089 			return -ENOMEM;
6090 		}
6091 
6092 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6093 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6094 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6095 
6096 		ioarcb = &ipr_cmd->ioarcb;
6097 		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6098 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6099 		ioarcb->write_ioadl_addr =
6100 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6101 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6102 		ioarcb->ioasa_host_pci_addr =
6103 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6104 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6105 		ipr_cmd->cmd_index = i;
6106 		ipr_cmd->ioa_cfg = ioa_cfg;
6107 		ipr_cmd->sense_buffer_dma = dma_addr +
6108 			offsetof(struct ipr_cmnd, sense_buffer);
6109 
6110 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6111 	}
6112 
6113 	return 0;
6114 }
6115 
6116 /**
6117  * ipr_alloc_mem - Allocate memory for an adapter
6118  * @ioa_cfg:	ioa config struct
6119  *
6120  * Return value:
6121  * 	0 on success / non-zero for error
6122  **/
6123 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6124 {
6125 	struct pci_dev *pdev = ioa_cfg->pdev;
6126 	int i, rc = -ENOMEM;
6127 
6128 	ENTER;
6129 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6130 				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6131 
6132 	if (!ioa_cfg->res_entries)
6133 		goto out;
6134 
6135 	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6136 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6137 
6138 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6139 						sizeof(struct ipr_misc_cbs),
6140 						&ioa_cfg->vpd_cbs_dma);
6141 
6142 	if (!ioa_cfg->vpd_cbs)
6143 		goto out_free_res_entries;
6144 
6145 	if (ipr_alloc_cmd_blks(ioa_cfg))
6146 		goto out_free_vpd_cbs;
6147 
6148 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6149 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
6150 						 &ioa_cfg->host_rrq_dma);
6151 
6152 	if (!ioa_cfg->host_rrq)
6153 		goto out_ipr_free_cmd_blocks;
6154 
6155 	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6156 						  sizeof(struct ipr_config_table),
6157 						  &ioa_cfg->cfg_table_dma);
6158 
6159 	if (!ioa_cfg->cfg_table)
6160 		goto out_free_host_rrq;
6161 
6162 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6163 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6164 							   sizeof(struct ipr_hostrcb),
6165 							   &ioa_cfg->hostrcb_dma[i]);
6166 
6167 		if (!ioa_cfg->hostrcb[i])
6168 			goto out_free_hostrcb_dma;
6169 
6170 		ioa_cfg->hostrcb[i]->hostrcb_dma =
6171 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6172 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6173 	}
6174 
6175 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6176 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6177 
6178 	if (!ioa_cfg->trace)
6179 		goto out_free_hostrcb_dma;
6180 
6181 	rc = 0;
6182 out:
6183 	LEAVE;
6184 	return rc;
6185 
6186 out_free_hostrcb_dma:
6187 	while (i-- > 0) {
6188 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6189 				    ioa_cfg->hostrcb[i],
6190 				    ioa_cfg->hostrcb_dma[i]);
6191 	}
6192 	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6193 			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6194 out_free_host_rrq:
6195 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6196 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6197 out_ipr_free_cmd_blocks:
6198 	ipr_free_cmd_blks(ioa_cfg);
6199 out_free_vpd_cbs:
6200 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6201 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6202 out_free_res_entries:
6203 	kfree(ioa_cfg->res_entries);
6204 	goto out;
6205 }
6206 
6207 /**
6208  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6209  * @ioa_cfg:	ioa config struct
6210  *
6211  * Return value:
6212  * 	none
6213  **/
6214 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6215 {
6216 	int i;
6217 
6218 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6219 		ioa_cfg->bus_attr[i].bus = i;
6220 		ioa_cfg->bus_attr[i].qas_enabled = 0;
6221 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6222 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6223 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6224 		else
6225 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6226 	}
6227 }
6228 
6229 /**
6230  * ipr_init_ioa_cfg - Initialize IOA config struct
6231  * @ioa_cfg:	ioa config struct
6232  * @host:		scsi host struct
6233  * @pdev:		PCI dev struct
6234  *
6235  * Return value:
6236  * 	none
6237  **/
6238 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6239 				       struct Scsi_Host *host, struct pci_dev *pdev)
6240 {
6241 	const struct ipr_interrupt_offsets *p;
6242 	struct ipr_interrupts *t;
6243 	void __iomem *base;
6244 
6245 	ioa_cfg->host = host;
6246 	ioa_cfg->pdev = pdev;
6247 	ioa_cfg->log_level = ipr_log_level;
6248 	ioa_cfg->doorbell = IPR_DOORBELL;
6249 	if (!ipr_auto_create)
6250 		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6251 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6252 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6253 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6254 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6255 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6256 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6257 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6258 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6259 
6260 	INIT_LIST_HEAD(&ioa_cfg->free_q);
6261 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
6262 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6263 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6264 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6265 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6266 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6267 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
6268 	ioa_cfg->sdt_state = INACTIVE;
6269 	if (ipr_enable_cache)
6270 		ioa_cfg->cache_state = CACHE_ENABLED;
6271 	else
6272 		ioa_cfg->cache_state = CACHE_DISABLED;
6273 
6274 	ipr_initialize_bus_attr(ioa_cfg);
6275 
6276 	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6277 	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6278 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
6279 	host->unique_id = host->host_no;
6280 	host->max_cmd_len = IPR_MAX_CDB_LEN;
6281 	pci_set_drvdata(pdev, ioa_cfg);
6282 
6283 	p = &ioa_cfg->chip_cfg->regs;
6284 	t = &ioa_cfg->regs;
6285 	base = ioa_cfg->hdw_dma_regs;
6286 
6287 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6288 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6289 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6290 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6291 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6292 	t->ioarrin_reg = base + p->ioarrin_reg;
6293 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6294 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6295 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6296 }
6297 
6298 /**
6299  * ipr_get_chip_cfg - Find adapter chip configuration
6300  * @dev_id:		PCI device id struct
6301  *
6302  * Return value:
6303  * 	ptr to chip config on success / NULL on failure
6304  **/
6305 static const struct ipr_chip_cfg_t * __devinit
6306 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6307 {
6308 	int i;
6309 
6310 	if (dev_id->driver_data)
6311 		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6312 
6313 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6314 		if (ipr_chip[i].vendor == dev_id->vendor &&
6315 		    ipr_chip[i].device == dev_id->device)
6316 			return ipr_chip[i].cfg;
6317 	return NULL;
6318 }
6319 
6320 /**
6321  * ipr_probe_ioa - Allocates memory and does first stage of initialization
6322  * @pdev:		PCI device struct
6323  * @dev_id:		PCI device id struct
6324  *
6325  * Return value:
6326  * 	0 on success / non-zero on failure
6327  **/
6328 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6329 				   const struct pci_device_id *dev_id)
6330 {
6331 	struct ipr_ioa_cfg *ioa_cfg;
6332 	struct Scsi_Host *host;
6333 	unsigned long ipr_regs_pci;
6334 	void __iomem *ipr_regs;
6335 	u32 rc = PCIBIOS_SUCCESSFUL;
6336 	volatile u32 mask, uproc;
6337 
6338 	ENTER;
6339 
6340 	if ((rc = pci_enable_device(pdev))) {
6341 		dev_err(&pdev->dev, "Cannot enable adapter\n");
6342 		goto out;
6343 	}
6344 
6345 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6346 
6347 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6348 
6349 	if (!host) {
6350 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6351 		rc = -ENOMEM;
6352 		goto out_disable;
6353 	}
6354 
6355 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6356 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6357 
6358 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6359 
6360 	if (!ioa_cfg->chip_cfg) {
6361 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6362 			dev_id->vendor, dev_id->device);
6363 		goto out_scsi_host_put;
6364 	}
6365 
6366 	ipr_regs_pci = pci_resource_start(pdev, 0);
6367 
6368 	rc = pci_request_regions(pdev, IPR_NAME);
6369 	if (rc < 0) {
6370 		dev_err(&pdev->dev,
6371 			"Couldn't register memory range of registers\n");
6372 		goto out_scsi_host_put;
6373 	}
6374 
6375 	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6376 
6377 	if (!ipr_regs) {
6378 		dev_err(&pdev->dev,
6379 			"Couldn't map memory range of registers\n");
6380 		rc = -ENOMEM;
6381 		goto out_release_regions;
6382 	}
6383 
6384 	ioa_cfg->hdw_dma_regs = ipr_regs;
6385 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6386 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6387 
6388 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6389 
6390 	pci_set_master(pdev);
6391 
6392 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6393 	if (rc < 0) {
6394 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6395 		goto cleanup_nomem;
6396 	}
6397 
6398 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6399 				   ioa_cfg->chip_cfg->cache_line_size);
6400 
6401 	if (rc != PCIBIOS_SUCCESSFUL) {
6402 		dev_err(&pdev->dev, "Write of cache line size failed\n");
6403 		rc = -EIO;
6404 		goto cleanup_nomem;
6405 	}
6406 
6407 	/* Save away PCI config space for use following IOA reset */
6408 	rc = pci_save_state(pdev);
6409 
6410 	if (rc != PCIBIOS_SUCCESSFUL) {
6411 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6412 		rc = -EIO;
6413 		goto cleanup_nomem;
6414 	}
6415 
6416 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6417 		goto cleanup_nomem;
6418 
6419 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6420 		goto cleanup_nomem;
6421 
6422 	rc = ipr_alloc_mem(ioa_cfg);
6423 	if (rc < 0) {
6424 		dev_err(&pdev->dev,
6425 			"Couldn't allocate enough memory for device driver!\n");
6426 		goto cleanup_nomem;
6427 	}
6428 
6429 	/*
6430 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
6431 	 * the card is in an unknown state and needs a hard reset
6432 	 */
6433 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6434 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6435 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6436 		ioa_cfg->needs_hard_reset = 1;
6437 
6438 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6439 	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6440 
6441 	if (rc) {
6442 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6443 			pdev->irq, rc);
6444 		goto cleanup_nolog;
6445 	}
6446 
6447 	spin_lock(&ipr_driver_lock);
6448 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6449 	spin_unlock(&ipr_driver_lock);
6450 
6451 	LEAVE;
6452 out:
6453 	return rc;
6454 
6455 cleanup_nolog:
6456 	ipr_free_mem(ioa_cfg);
6457 cleanup_nomem:
6458 	iounmap(ipr_regs);
6459 out_release_regions:
6460 	pci_release_regions(pdev);
6461 out_scsi_host_put:
6462 	scsi_host_put(host);
6463 out_disable:
6464 	pci_disable_device(pdev);
6465 	goto out;
6466 }
6467 
6468 /**
6469  * ipr_scan_vsets - Scans for VSET devices
6470  * @ioa_cfg:	ioa config struct
6471  *
6472  * Description: Since the VSET resources do not follow SAM in that we can have
6473  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6474  *
6475  * Return value:
6476  * 	none
6477  **/
6478 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6479 {
6480 	int target, lun;
6481 
6482 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6483 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6484 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6485 }
6486 
6487 /**
6488  * ipr_initiate_ioa_bringdown - Bring down an adapter
6489  * @ioa_cfg:		ioa config struct
6490  * @shutdown_type:	shutdown type
6491  *
6492  * Description: This function will initiate bringing down the adapter.
6493  * This consists of issuing an IOA shutdown to the adapter
6494  * to flush the cache, and running BIST.
6495  * If the caller needs to wait on the completion of the reset,
6496  * the caller must sleep on the reset_wait_q.
6497  *
6498  * Return value:
6499  * 	none
6500  **/
6501 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6502 				       enum ipr_shutdown_type shutdown_type)
6503 {
6504 	ENTER;
6505 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6506 		ioa_cfg->sdt_state = ABORT_DUMP;
6507 	ioa_cfg->reset_retries = 0;
6508 	ioa_cfg->in_ioa_bringdown = 1;
6509 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6510 	LEAVE;
6511 }
6512 
6513 /**
6514  * __ipr_remove - Remove a single adapter
6515  * @pdev:	pci device struct
6516  *
6517  * Adapter hot plug remove entry point.
6518  *
6519  * Return value:
6520  * 	none
6521  **/
6522 static void __ipr_remove(struct pci_dev *pdev)
6523 {
6524 	unsigned long host_lock_flags = 0;
6525 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6526 	ENTER;
6527 
6528 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6529 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6530 
6531 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6532 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6533 	flush_scheduled_work();
6534 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6535 
6536 	spin_lock(&ipr_driver_lock);
6537 	list_del(&ioa_cfg->queue);
6538 	spin_unlock(&ipr_driver_lock);
6539 
6540 	if (ioa_cfg->sdt_state == ABORT_DUMP)
6541 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6542 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6543 
6544 	ipr_free_all_resources(ioa_cfg);
6545 
6546 	LEAVE;
6547 }
6548 
6549 /**
6550  * ipr_remove - IOA hot plug remove entry point
6551  * @pdev:	pci device struct
6552  *
6553  * Adapter hot plug remove entry point.
6554  *
6555  * Return value:
6556  * 	none
6557  **/
6558 static void ipr_remove(struct pci_dev *pdev)
6559 {
6560 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6561 
6562 	ENTER;
6563 
6564 	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6565 			      &ipr_trace_attr);
6566 	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6567 			     &ipr_dump_attr);
6568 	scsi_remove_host(ioa_cfg->host);
6569 
6570 	__ipr_remove(pdev);
6571 
6572 	LEAVE;
6573 }
6574 
6575 /**
6576  * ipr_probe - Adapter hot plug add entry point
6577  *
6578  * Return value:
6579  * 	0 on success / non-zero on failure
6580  **/
6581 static int __devinit ipr_probe(struct pci_dev *pdev,
6582 			       const struct pci_device_id *dev_id)
6583 {
6584 	struct ipr_ioa_cfg *ioa_cfg;
6585 	int rc;
6586 
6587 	rc = ipr_probe_ioa(pdev, dev_id);
6588 
6589 	if (rc)
6590 		return rc;
6591 
6592 	ioa_cfg = pci_get_drvdata(pdev);
6593 	rc = ipr_probe_ioa_part2(ioa_cfg);
6594 
6595 	if (rc) {
6596 		__ipr_remove(pdev);
6597 		return rc;
6598 	}
6599 
6600 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6601 
6602 	if (rc) {
6603 		__ipr_remove(pdev);
6604 		return rc;
6605 	}
6606 
6607 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6608 				   &ipr_trace_attr);
6609 
6610 	if (rc) {
6611 		scsi_remove_host(ioa_cfg->host);
6612 		__ipr_remove(pdev);
6613 		return rc;
6614 	}
6615 
6616 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6617 				   &ipr_dump_attr);
6618 
6619 	if (rc) {
6620 		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6621 				      &ipr_trace_attr);
6622 		scsi_remove_host(ioa_cfg->host);
6623 		__ipr_remove(pdev);
6624 		return rc;
6625 	}
6626 
6627 	scsi_scan_host(ioa_cfg->host);
6628 	ipr_scan_vsets(ioa_cfg);
6629 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6630 	ioa_cfg->allow_ml_add_del = 1;
6631 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6632 	schedule_work(&ioa_cfg->work_q);
6633 	return 0;
6634 }
6635 
6636 /**
6637  * ipr_shutdown - Shutdown handler.
6638  * @pdev:	pci device struct
6639  *
6640  * This function is invoked upon system shutdown/reboot. It will issue
6641  * an adapter shutdown to the adapter to flush the write cache.
6642  *
6643  * Return value:
6644  * 	none
6645  **/
6646 static void ipr_shutdown(struct pci_dev *pdev)
6647 {
6648 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6649 	unsigned long lock_flags = 0;
6650 
6651 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6652 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6653 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6654 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6655 }
6656 
6657 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6658 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6659 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6660 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6661 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6662 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6663 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6664 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6665 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6666 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6667 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6668 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6669 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6670 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6671 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6672 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6673 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6674 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6675 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6676 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6677 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6678 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6679 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6680 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6681 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6682 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6683 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6684 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6685 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6686 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6687 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6688 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6689 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6690 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6691 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6692 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6693 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6694 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6695 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6696 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6697 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6698 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6699 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6700 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6701 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6702 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6703 	{ }
6704 };
6705 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6706 
6707 static struct pci_error_handlers ipr_err_handler = {
6708 	.error_detected = ipr_pci_error_detected,
6709 	.slot_reset = ipr_pci_slot_reset,
6710 };
6711 
6712 static struct pci_driver ipr_driver = {
6713 	.name = IPR_NAME,
6714 	.id_table = ipr_pci_table,
6715 	.probe = ipr_probe,
6716 	.remove = ipr_remove,
6717 	.shutdown = ipr_shutdown,
6718 	.err_handler = &ipr_err_handler,
6719 };
6720 
6721 /**
6722  * ipr_init - Module entry point
6723  *
6724  * Return value:
6725  * 	0 on success / negative value on failure
6726  **/
6727 static int __init ipr_init(void)
6728 {
6729 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6730 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6731 
6732 	return pci_module_init(&ipr_driver);
6733 }
6734 
6735 /**
6736  * ipr_exit - Module unload
6737  *
6738  * Module unload entry point.
6739  *
6740  * Return value:
6741  * 	none
6742  **/
6743 static void __exit ipr_exit(void)
6744 {
6745 	pci_unregister_driver(&ipr_driver);
6746 }
6747 
6748 module_init(ipr_init);
6749 module_exit(ipr_exit);
6750