xref: /linux/drivers/scsi/ipr.c (revision 7b12b9137930eb821b68e1bfa11e9de692208620)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
84 
85 /*
86  *   Global Data
87  */
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
98 
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 	{ /* Gemstone, Citrine, and Obsidian */
102 		.mailbox = 0x0042C,
103 		.cache_line_size = 0x20,
104 		{
105 			.set_interrupt_mask_reg = 0x0022C,
106 			.clr_interrupt_mask_reg = 0x00230,
107 			.sense_interrupt_mask_reg = 0x0022C,
108 			.clr_interrupt_reg = 0x00228,
109 			.sense_interrupt_reg = 0x00224,
110 			.ioarrin_reg = 0x00404,
111 			.sense_uproc_interrupt_reg = 0x00214,
112 			.set_uproc_interrupt_reg = 0x00214,
113 			.clr_uproc_interrupt_reg = 0x00218
114 		}
115 	},
116 	{ /* Snipe and Scamp */
117 		.mailbox = 0x0052C,
118 		.cache_line_size = 0x20,
119 		{
120 			.set_interrupt_mask_reg = 0x00288,
121 			.clr_interrupt_mask_reg = 0x0028C,
122 			.sense_interrupt_mask_reg = 0x00288,
123 			.clr_interrupt_reg = 0x00284,
124 			.sense_interrupt_reg = 0x00280,
125 			.ioarrin_reg = 0x00504,
126 			.sense_uproc_interrupt_reg = 0x00290,
127 			.set_uproc_interrupt_reg = 0x00290,
128 			.clr_uproc_interrupt_reg = 0x00294
129 		}
130 	},
131 };
132 
133 static const struct ipr_chip_t ipr_chip[] = {
134 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 };
141 
142 static int ipr_max_bus_speeds [] = {
143 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 };
145 
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed, ipr_max_speed, uint, 0);
149 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level, ipr_log_level, uint, 0);
151 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode, ipr_testmode, int, 0);
153 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail, ipr_fastfail, int, 0);
155 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache, ipr_enable_cache, int, 0);
159 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug, ipr_debug, int, 0);
161 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create, ipr_auto_create, int, 0);
163 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION);
166 
167 /*  A constant array of IOASCs/URCs/Error Messages */
168 static const
169 struct ipr_error_table_t ipr_error_table[] = {
170 	{0x00000000, 1, 1,
171 	"8155: An unknown error was received"},
172 	{0x00330000, 0, 0,
173 	"Soft underlength error"},
174 	{0x005A0000, 0, 0,
175 	"Command to be cancelled not found"},
176 	{0x00808000, 0, 0,
177 	"Qualified success"},
178 	{0x01080000, 1, 1,
179 	"FFFE: Soft device bus error recovered by the IOA"},
180 	{0x01170600, 0, 1,
181 	"FFF9: Device sector reassign successful"},
182 	{0x01170900, 0, 1,
183 	"FFF7: Media error recovered by device rewrite procedures"},
184 	{0x01180200, 0, 1,
185 	"7001: IOA sector reassignment successful"},
186 	{0x01180500, 0, 1,
187 	"FFF9: Soft media error. Sector reassignment recommended"},
188 	{0x01180600, 0, 1,
189 	"FFF7: Media error recovered by IOA rewrite procedures"},
190 	{0x01418000, 0, 1,
191 	"FF3D: Soft PCI bus error recovered by the IOA"},
192 	{0x01440000, 1, 1,
193 	"FFF6: Device hardware error recovered by the IOA"},
194 	{0x01448100, 0, 1,
195 	"FFF6: Device hardware error recovered by the device"},
196 	{0x01448200, 1, 1,
197 	"FF3D: Soft IOA error recovered by the IOA"},
198 	{0x01448300, 0, 1,
199 	"FFFA: Undefined device response recovered by the IOA"},
200 	{0x014A0000, 1, 1,
201 	"FFF6: Device bus error, message or command phase"},
202 	{0x015D0000, 0, 1,
203 	"FFF6: Failure prediction threshold exceeded"},
204 	{0x015D9200, 0, 1,
205 	"8009: Impending cache battery pack failure"},
206 	{0x02040400, 0, 0,
207 	"34FF: Disk device format in progress"},
208 	{0x023F0000, 0, 0,
209 	"Synchronization required"},
210 	{0x024E0000, 0, 0,
211 	"No ready, IOA shutdown"},
212 	{0x025A0000, 0, 0,
213 	"Not ready, IOA has been shutdown"},
214 	{0x02670100, 0, 1,
215 	"3020: Storage subsystem configuration error"},
216 	{0x03110B00, 0, 0,
217 	"FFF5: Medium error, data unreadable, recommend reassign"},
218 	{0x03110C00, 0, 0,
219 	"7000: Medium error, data unreadable, do not reassign"},
220 	{0x03310000, 0, 1,
221 	"FFF3: Disk media format bad"},
222 	{0x04050000, 0, 1,
223 	"3002: Addressed device failed to respond to selection"},
224 	{0x04080000, 1, 1,
225 	"3100: Device bus error"},
226 	{0x04080100, 0, 1,
227 	"3109: IOA timed out a device command"},
228 	{0x04088000, 0, 0,
229 	"3120: SCSI bus is not operational"},
230 	{0x04118000, 0, 1,
231 	"9000: IOA reserved area data check"},
232 	{0x04118100, 0, 1,
233 	"9001: IOA reserved area invalid data pattern"},
234 	{0x04118200, 0, 1,
235 	"9002: IOA reserved area LRC error"},
236 	{0x04320000, 0, 1,
237 	"102E: Out of alternate sectors for disk storage"},
238 	{0x04330000, 1, 1,
239 	"FFF4: Data transfer underlength error"},
240 	{0x04338000, 1, 1,
241 	"FFF4: Data transfer overlength error"},
242 	{0x043E0100, 0, 1,
243 	"3400: Logical unit failure"},
244 	{0x04408500, 0, 1,
245 	"FFF4: Device microcode is corrupt"},
246 	{0x04418000, 1, 1,
247 	"8150: PCI bus error"},
248 	{0x04430000, 1, 0,
249 	"Unsupported device bus message received"},
250 	{0x04440000, 1, 1,
251 	"FFF4: Disk device problem"},
252 	{0x04448200, 1, 1,
253 	"8150: Permanent IOA failure"},
254 	{0x04448300, 0, 1,
255 	"3010: Disk device returned wrong response to IOA"},
256 	{0x04448400, 0, 1,
257 	"8151: IOA microcode error"},
258 	{0x04448500, 0, 0,
259 	"Device bus status error"},
260 	{0x04448600, 0, 1,
261 	"8157: IOA error requiring IOA reset to recover"},
262 	{0x04490000, 0, 0,
263 	"Message reject received from the device"},
264 	{0x04449200, 0, 1,
265 	"8008: A permanent cache battery pack failure occurred"},
266 	{0x0444A000, 0, 1,
267 	"9090: Disk unit has been modified after the last known status"},
268 	{0x0444A200, 0, 1,
269 	"9081: IOA detected device error"},
270 	{0x0444A300, 0, 1,
271 	"9082: IOA detected device error"},
272 	{0x044A0000, 1, 1,
273 	"3110: Device bus error, message or command phase"},
274 	{0x04670400, 0, 1,
275 	"9091: Incorrect hardware configuration change has been detected"},
276 	{0x04678000, 0, 1,
277 	"9073: Invalid multi-adapter configuration"},
278 	{0x046E0000, 0, 1,
279 	"FFF4: Command to logical unit failed"},
280 	{0x05240000, 1, 0,
281 	"Illegal request, invalid request type or request packet"},
282 	{0x05250000, 0, 0,
283 	"Illegal request, invalid resource handle"},
284 	{0x05258000, 0, 0,
285 	"Illegal request, commands not allowed to this device"},
286 	{0x05258100, 0, 0,
287 	"Illegal request, command not allowed to a secondary adapter"},
288 	{0x05260000, 0, 0,
289 	"Illegal request, invalid field in parameter list"},
290 	{0x05260100, 0, 0,
291 	"Illegal request, parameter not supported"},
292 	{0x05260200, 0, 0,
293 	"Illegal request, parameter value invalid"},
294 	{0x052C0000, 0, 0,
295 	"Illegal request, command sequence error"},
296 	{0x052C8000, 1, 0,
297 	"Illegal request, dual adapter support not enabled"},
298 	{0x06040500, 0, 1,
299 	"9031: Array protection temporarily suspended, protection resuming"},
300 	{0x06040600, 0, 1,
301 	"9040: Array protection temporarily suspended, protection resuming"},
302 	{0x06290000, 0, 1,
303 	"FFFB: SCSI bus was reset"},
304 	{0x06290500, 0, 0,
305 	"FFFE: SCSI bus transition to single ended"},
306 	{0x06290600, 0, 0,
307 	"FFFE: SCSI bus transition to LVD"},
308 	{0x06298000, 0, 1,
309 	"FFFB: SCSI bus was reset by another initiator"},
310 	{0x063F0300, 0, 1,
311 	"3029: A device replacement has occurred"},
312 	{0x064C8000, 0, 1,
313 	"9051: IOA cache data exists for a missing or failed device"},
314 	{0x064C8100, 0, 1,
315 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
316 	{0x06670100, 0, 1,
317 	"9025: Disk unit is not supported at its physical location"},
318 	{0x06670600, 0, 1,
319 	"3020: IOA detected a SCSI bus configuration error"},
320 	{0x06678000, 0, 1,
321 	"3150: SCSI bus configuration error"},
322 	{0x06678100, 0, 1,
323 	"9074: Asymmetric advanced function disk configuration"},
324 	{0x06690200, 0, 1,
325 	"9041: Array protection temporarily suspended"},
326 	{0x06698200, 0, 1,
327 	"9042: Corrupt array parity detected on specified device"},
328 	{0x066B0200, 0, 1,
329 	"9030: Array no longer protected due to missing or failed disk unit"},
330 	{0x066B8000, 0, 1,
331 	"9071: Link operational transition"},
332 	{0x066B8100, 0, 1,
333 	"9072: Link not operational transition"},
334 	{0x066B8200, 0, 1,
335 	"9032: Array exposed but still protected"},
336 	{0x07270000, 0, 0,
337 	"Failure due to other device"},
338 	{0x07278000, 0, 1,
339 	"9008: IOA does not support functions expected by devices"},
340 	{0x07278100, 0, 1,
341 	"9010: Cache data associated with attached devices cannot be found"},
342 	{0x07278200, 0, 1,
343 	"9011: Cache data belongs to devices other than those attached"},
344 	{0x07278400, 0, 1,
345 	"9020: Array missing 2 or more devices with only 1 device present"},
346 	{0x07278500, 0, 1,
347 	"9021: Array missing 2 or more devices with 2 or more devices present"},
348 	{0x07278600, 0, 1,
349 	"9022: Exposed array is missing a required device"},
350 	{0x07278700, 0, 1,
351 	"9023: Array member(s) not at required physical locations"},
352 	{0x07278800, 0, 1,
353 	"9024: Array not functional due to present hardware configuration"},
354 	{0x07278900, 0, 1,
355 	"9026: Array not functional due to present hardware configuration"},
356 	{0x07278A00, 0, 1,
357 	"9027: Array is missing a device and parity is out of sync"},
358 	{0x07278B00, 0, 1,
359 	"9028: Maximum number of arrays already exist"},
360 	{0x07278C00, 0, 1,
361 	"9050: Required cache data cannot be located for a disk unit"},
362 	{0x07278D00, 0, 1,
363 	"9052: Cache data exists for a device that has been modified"},
364 	{0x07278F00, 0, 1,
365 	"9054: IOA resources not available due to previous problems"},
366 	{0x07279100, 0, 1,
367 	"9092: Disk unit requires initialization before use"},
368 	{0x07279200, 0, 1,
369 	"9029: Incorrect hardware configuration change has been detected"},
370 	{0x07279600, 0, 1,
371 	"9060: One or more disk pairs are missing from an array"},
372 	{0x07279700, 0, 1,
373 	"9061: One or more disks are missing from an array"},
374 	{0x07279800, 0, 1,
375 	"9062: One or more disks are missing from an array"},
376 	{0x07279900, 0, 1,
377 	"9063: Maximum number of functional arrays has been exceeded"},
378 	{0x0B260000, 0, 0,
379 	"Aborted command, invalid descriptor"},
380 	{0x0B5A0000, 0, 0,
381 	"Command terminated by host"}
382 };
383 
384 static const struct ipr_ses_table_entry ipr_ses_table[] = {
385 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
386 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
387 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
388 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
389 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
390 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
391 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
392 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
393 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
394 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
395 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
396 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
397 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
398 };
399 
400 /*
401  *  Function Prototypes
402  */
403 static int ipr_reset_alert(struct ipr_cmnd *);
404 static void ipr_process_ccn(struct ipr_cmnd *);
405 static void ipr_process_error(struct ipr_cmnd *);
406 static void ipr_reset_ioa_job(struct ipr_cmnd *);
407 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
408 				   enum ipr_shutdown_type);
409 
410 #ifdef CONFIG_SCSI_IPR_TRACE
411 /**
412  * ipr_trc_hook - Add a trace entry to the driver trace
413  * @ipr_cmd:	ipr command struct
414  * @type:		trace type
415  * @add_data:	additional data
416  *
417  * Return value:
418  * 	none
419  **/
420 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
421 			 u8 type, u32 add_data)
422 {
423 	struct ipr_trace_entry *trace_entry;
424 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
425 
426 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
427 	trace_entry->time = jiffies;
428 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
429 	trace_entry->type = type;
430 	trace_entry->cmd_index = ipr_cmd->cmd_index;
431 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
432 	trace_entry->u.add_data = add_data;
433 }
434 #else
435 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
436 #endif
437 
438 /**
439  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
440  * @ipr_cmd:	ipr command struct
441  *
442  * Return value:
443  * 	none
444  **/
445 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
446 {
447 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
448 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
449 
450 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
451 	ioarcb->write_data_transfer_length = 0;
452 	ioarcb->read_data_transfer_length = 0;
453 	ioarcb->write_ioadl_len = 0;
454 	ioarcb->read_ioadl_len = 0;
455 	ioasa->ioasc = 0;
456 	ioasa->residual_data_len = 0;
457 
458 	ipr_cmd->scsi_cmd = NULL;
459 	ipr_cmd->sense_buffer[0] = 0;
460 	ipr_cmd->dma_use_sg = 0;
461 }
462 
463 /**
464  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
465  * @ipr_cmd:	ipr command struct
466  *
467  * Return value:
468  * 	none
469  **/
470 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
471 {
472 	ipr_reinit_ipr_cmnd(ipr_cmd);
473 	ipr_cmd->u.scratch = 0;
474 	ipr_cmd->sibling = NULL;
475 	init_timer(&ipr_cmd->timer);
476 }
477 
478 /**
479  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
480  * @ioa_cfg:	ioa config struct
481  *
482  * Return value:
483  * 	pointer to ipr command struct
484  **/
485 static
486 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
487 {
488 	struct ipr_cmnd *ipr_cmd;
489 
490 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
491 	list_del(&ipr_cmd->queue);
492 	ipr_init_ipr_cmnd(ipr_cmd);
493 
494 	return ipr_cmd;
495 }
496 
497 /**
498  * ipr_unmap_sglist - Unmap scatterlist if mapped
499  * @ioa_cfg:	ioa config struct
500  * @ipr_cmd:	ipr command struct
501  *
502  * Return value:
503  * 	nothing
504  **/
505 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
506 			     struct ipr_cmnd *ipr_cmd)
507 {
508 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
509 
510 	if (ipr_cmd->dma_use_sg) {
511 		if (scsi_cmd->use_sg > 0) {
512 			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
513 				     scsi_cmd->use_sg,
514 				     scsi_cmd->sc_data_direction);
515 		} else {
516 			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
517 					 scsi_cmd->request_bufflen,
518 					 scsi_cmd->sc_data_direction);
519 		}
520 	}
521 }
522 
523 /**
524  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
525  * @ioa_cfg:	ioa config struct
526  * @clr_ints:     interrupts to clear
527  *
528  * This function masks all interrupts on the adapter, then clears the
529  * interrupts specified in the mask
530  *
531  * Return value:
532  * 	none
533  **/
534 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
535 					  u32 clr_ints)
536 {
537 	volatile u32 int_reg;
538 
539 	/* Stop new interrupts */
540 	ioa_cfg->allow_interrupts = 0;
541 
542 	/* Set interrupt mask to stop all new interrupts */
543 	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
544 
545 	/* Clear any pending interrupts */
546 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
547 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
548 }
549 
550 /**
551  * ipr_save_pcix_cmd_reg - Save PCI-X command register
552  * @ioa_cfg:	ioa config struct
553  *
554  * Return value:
555  * 	0 on success / -EIO on failure
556  **/
557 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
558 {
559 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
560 
561 	if (pcix_cmd_reg == 0) {
562 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
563 		return -EIO;
564 	}
565 
566 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
567 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
568 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
569 		return -EIO;
570 	}
571 
572 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
573 	return 0;
574 }
575 
576 /**
577  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
578  * @ioa_cfg:	ioa config struct
579  *
580  * Return value:
581  * 	0 on success / -EIO on failure
582  **/
583 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584 {
585 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586 
587 	if (pcix_cmd_reg) {
588 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
589 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
590 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
591 			return -EIO;
592 		}
593 	} else {
594 		dev_err(&ioa_cfg->pdev->dev,
595 			"Failed to setup PCI-X command register\n");
596 		return -EIO;
597 	}
598 
599 	return 0;
600 }
601 
602 /**
603  * ipr_scsi_eh_done - mid-layer done function for aborted ops
604  * @ipr_cmd:	ipr command struct
605  *
606  * This function is invoked by the interrupt handler for
607  * ops generated by the SCSI mid-layer which are being aborted.
608  *
609  * Return value:
610  * 	none
611  **/
612 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
613 {
614 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
615 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
616 
617 	scsi_cmd->result |= (DID_ERROR << 16);
618 
619 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
620 	scsi_cmd->scsi_done(scsi_cmd);
621 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
622 }
623 
624 /**
625  * ipr_fail_all_ops - Fails all outstanding ops.
626  * @ioa_cfg:	ioa config struct
627  *
628  * This function fails all outstanding ops.
629  *
630  * Return value:
631  * 	none
632  **/
633 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
634 {
635 	struct ipr_cmnd *ipr_cmd, *temp;
636 
637 	ENTER;
638 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
639 		list_del(&ipr_cmd->queue);
640 
641 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
642 		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
643 
644 		if (ipr_cmd->scsi_cmd)
645 			ipr_cmd->done = ipr_scsi_eh_done;
646 
647 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
648 		del_timer(&ipr_cmd->timer);
649 		ipr_cmd->done(ipr_cmd);
650 	}
651 
652 	LEAVE;
653 }
654 
655 /**
656  * ipr_do_req -  Send driver initiated requests.
657  * @ipr_cmd:		ipr command struct
658  * @done:			done function
659  * @timeout_func:	timeout function
660  * @timeout:		timeout value
661  *
662  * This function sends the specified command to the adapter with the
663  * timeout given. The done function is invoked on command completion.
664  *
665  * Return value:
666  * 	none
667  **/
668 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
669 		       void (*done) (struct ipr_cmnd *),
670 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
671 {
672 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673 
674 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
675 
676 	ipr_cmd->done = done;
677 
678 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
679 	ipr_cmd->timer.expires = jiffies + timeout;
680 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
681 
682 	add_timer(&ipr_cmd->timer);
683 
684 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
685 
686 	mb();
687 	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
688 	       ioa_cfg->regs.ioarrin_reg);
689 }
690 
691 /**
692  * ipr_internal_cmd_done - Op done function for an internally generated op.
693  * @ipr_cmd:	ipr command struct
694  *
695  * This function is the op done function for an internally generated,
696  * blocking op. It simply wakes the sleeping thread.
697  *
698  * Return value:
699  * 	none
700  **/
701 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
702 {
703 	if (ipr_cmd->sibling)
704 		ipr_cmd->sibling = NULL;
705 	else
706 		complete(&ipr_cmd->completion);
707 }
708 
709 /**
710  * ipr_send_blocking_cmd - Send command and sleep on its completion.
711  * @ipr_cmd:	ipr command struct
712  * @timeout_func:	function to invoke if command times out
713  * @timeout:	timeout
714  *
715  * Return value:
716  * 	none
717  **/
718 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
719 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
720 				  u32 timeout)
721 {
722 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
723 
724 	init_completion(&ipr_cmd->completion);
725 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
726 
727 	spin_unlock_irq(ioa_cfg->host->host_lock);
728 	wait_for_completion(&ipr_cmd->completion);
729 	spin_lock_irq(ioa_cfg->host->host_lock);
730 }
731 
732 /**
733  * ipr_send_hcam - Send an HCAM to the adapter.
734  * @ioa_cfg:	ioa config struct
735  * @type:		HCAM type
736  * @hostrcb:	hostrcb struct
737  *
738  * This function will send a Host Controlled Async command to the adapter.
739  * If HCAMs are currently not allowed to be issued to the adapter, it will
740  * place the hostrcb on the free queue.
741  *
742  * Return value:
743  * 	none
744  **/
745 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
746 			  struct ipr_hostrcb *hostrcb)
747 {
748 	struct ipr_cmnd *ipr_cmd;
749 	struct ipr_ioarcb *ioarcb;
750 
751 	if (ioa_cfg->allow_cmds) {
752 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
753 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
754 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
755 
756 		ipr_cmd->u.hostrcb = hostrcb;
757 		ioarcb = &ipr_cmd->ioarcb;
758 
759 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
760 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
761 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
762 		ioarcb->cmd_pkt.cdb[1] = type;
763 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
764 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
765 
766 		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
767 		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
768 		ipr_cmd->ioadl[0].flags_and_data_len =
769 			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
770 		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
771 
772 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
773 			ipr_cmd->done = ipr_process_ccn;
774 		else
775 			ipr_cmd->done = ipr_process_error;
776 
777 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
778 
779 		mb();
780 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
781 		       ioa_cfg->regs.ioarrin_reg);
782 	} else {
783 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
784 	}
785 }
786 
787 /**
788  * ipr_init_res_entry - Initialize a resource entry struct.
789  * @res:	resource entry struct
790  *
791  * Return value:
792  * 	none
793  **/
794 static void ipr_init_res_entry(struct ipr_resource_entry *res)
795 {
796 	res->needs_sync_complete = 0;
797 	res->in_erp = 0;
798 	res->add_to_ml = 0;
799 	res->del_from_ml = 0;
800 	res->resetting_device = 0;
801 	res->sdev = NULL;
802 }
803 
804 /**
805  * ipr_handle_config_change - Handle a config change from the adapter
806  * @ioa_cfg:	ioa config struct
807  * @hostrcb:	hostrcb
808  *
809  * Return value:
810  * 	none
811  **/
812 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
813 			      struct ipr_hostrcb *hostrcb)
814 {
815 	struct ipr_resource_entry *res = NULL;
816 	struct ipr_config_table_entry *cfgte;
817 	u32 is_ndn = 1;
818 
819 	cfgte = &hostrcb->hcam.u.ccn.cfgte;
820 
821 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
822 		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
823 			    sizeof(cfgte->res_addr))) {
824 			is_ndn = 0;
825 			break;
826 		}
827 	}
828 
829 	if (is_ndn) {
830 		if (list_empty(&ioa_cfg->free_res_q)) {
831 			ipr_send_hcam(ioa_cfg,
832 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
833 				      hostrcb);
834 			return;
835 		}
836 
837 		res = list_entry(ioa_cfg->free_res_q.next,
838 				 struct ipr_resource_entry, queue);
839 
840 		list_del(&res->queue);
841 		ipr_init_res_entry(res);
842 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
843 	}
844 
845 	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
846 
847 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
848 		if (res->sdev) {
849 			res->del_from_ml = 1;
850 			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
851 			if (ioa_cfg->allow_ml_add_del)
852 				schedule_work(&ioa_cfg->work_q);
853 		} else
854 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
855 	} else if (!res->sdev) {
856 		res->add_to_ml = 1;
857 		if (ioa_cfg->allow_ml_add_del)
858 			schedule_work(&ioa_cfg->work_q);
859 	}
860 
861 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
862 }
863 
864 /**
865  * ipr_process_ccn - Op done function for a CCN.
866  * @ipr_cmd:	ipr command struct
867  *
868  * This function is the op done function for a configuration
869  * change notification host controlled async from the adapter.
870  *
871  * Return value:
872  * 	none
873  **/
874 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
875 {
876 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
877 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
878 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
879 
880 	list_del(&hostrcb->queue);
881 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
882 
883 	if (ioasc) {
884 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
885 			dev_err(&ioa_cfg->pdev->dev,
886 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
887 
888 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
889 	} else {
890 		ipr_handle_config_change(ioa_cfg, hostrcb);
891 	}
892 }
893 
894 /**
895  * ipr_log_vpd - Log the passed VPD to the error log.
896  * @vpd:		vendor/product id/sn struct
897  *
898  * Return value:
899  * 	none
900  **/
901 static void ipr_log_vpd(struct ipr_vpd *vpd)
902 {
903 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
904 		    + IPR_SERIAL_NUM_LEN];
905 
906 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
907 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
908 	       IPR_PROD_ID_LEN);
909 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
910 	ipr_err("Vendor/Product ID: %s\n", buffer);
911 
912 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
913 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
914 	ipr_err("    Serial Number: %s\n", buffer);
915 }
916 
917 /**
918  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
919  * @vpd:		vendor/product id/sn/wwn struct
920  *
921  * Return value:
922  * 	none
923  **/
924 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
925 {
926 	ipr_log_vpd(&vpd->vpd);
927 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
928 		be32_to_cpu(vpd->wwid[1]));
929 }
930 
931 /**
932  * ipr_log_enhanced_cache_error - Log a cache error.
933  * @ioa_cfg:	ioa config struct
934  * @hostrcb:	hostrcb struct
935  *
936  * Return value:
937  * 	none
938  **/
939 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
940 					 struct ipr_hostrcb *hostrcb)
941 {
942 	struct ipr_hostrcb_type_12_error *error =
943 		&hostrcb->hcam.u.error.u.type_12_error;
944 
945 	ipr_err("-----Current Configuration-----\n");
946 	ipr_err("Cache Directory Card Information:\n");
947 	ipr_log_ext_vpd(&error->ioa_vpd);
948 	ipr_err("Adapter Card Information:\n");
949 	ipr_log_ext_vpd(&error->cfc_vpd);
950 
951 	ipr_err("-----Expected Configuration-----\n");
952 	ipr_err("Cache Directory Card Information:\n");
953 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
954 	ipr_err("Adapter Card Information:\n");
955 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
956 
957 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
958 		     be32_to_cpu(error->ioa_data[0]),
959 		     be32_to_cpu(error->ioa_data[1]),
960 		     be32_to_cpu(error->ioa_data[2]));
961 }
962 
963 /**
964  * ipr_log_cache_error - Log a cache error.
965  * @ioa_cfg:	ioa config struct
966  * @hostrcb:	hostrcb struct
967  *
968  * Return value:
969  * 	none
970  **/
971 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
972 				struct ipr_hostrcb *hostrcb)
973 {
974 	struct ipr_hostrcb_type_02_error *error =
975 		&hostrcb->hcam.u.error.u.type_02_error;
976 
977 	ipr_err("-----Current Configuration-----\n");
978 	ipr_err("Cache Directory Card Information:\n");
979 	ipr_log_vpd(&error->ioa_vpd);
980 	ipr_err("Adapter Card Information:\n");
981 	ipr_log_vpd(&error->cfc_vpd);
982 
983 	ipr_err("-----Expected Configuration-----\n");
984 	ipr_err("Cache Directory Card Information:\n");
985 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
986 	ipr_err("Adapter Card Information:\n");
987 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
988 
989 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
990 		     be32_to_cpu(error->ioa_data[0]),
991 		     be32_to_cpu(error->ioa_data[1]),
992 		     be32_to_cpu(error->ioa_data[2]));
993 }
994 
995 /**
996  * ipr_log_enhanced_config_error - Log a configuration error.
997  * @ioa_cfg:	ioa config struct
998  * @hostrcb:	hostrcb struct
999  *
1000  * Return value:
1001  * 	none
1002  **/
1003 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1004 					  struct ipr_hostrcb *hostrcb)
1005 {
1006 	int errors_logged, i;
1007 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1008 	struct ipr_hostrcb_type_13_error *error;
1009 
1010 	error = &hostrcb->hcam.u.error.u.type_13_error;
1011 	errors_logged = be32_to_cpu(error->errors_logged);
1012 
1013 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1014 		be32_to_cpu(error->errors_detected), errors_logged);
1015 
1016 	dev_entry = error->dev;
1017 
1018 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1019 		ipr_err_separator;
1020 
1021 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1022 		ipr_log_ext_vpd(&dev_entry->vpd);
1023 
1024 		ipr_err("-----New Device Information-----\n");
1025 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1026 
1027 		ipr_err("Cache Directory Card Information:\n");
1028 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1029 
1030 		ipr_err("Adapter Card Information:\n");
1031 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1032 	}
1033 }
1034 
1035 /**
1036  * ipr_log_config_error - Log a configuration error.
1037  * @ioa_cfg:	ioa config struct
1038  * @hostrcb:	hostrcb struct
1039  *
1040  * Return value:
1041  * 	none
1042  **/
1043 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1044 				 struct ipr_hostrcb *hostrcb)
1045 {
1046 	int errors_logged, i;
1047 	struct ipr_hostrcb_device_data_entry *dev_entry;
1048 	struct ipr_hostrcb_type_03_error *error;
1049 
1050 	error = &hostrcb->hcam.u.error.u.type_03_error;
1051 	errors_logged = be32_to_cpu(error->errors_logged);
1052 
1053 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1054 		be32_to_cpu(error->errors_detected), errors_logged);
1055 
1056 	dev_entry = error->dev;
1057 
1058 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1059 		ipr_err_separator;
1060 
1061 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1062 		ipr_log_vpd(&dev_entry->vpd);
1063 
1064 		ipr_err("-----New Device Information-----\n");
1065 		ipr_log_vpd(&dev_entry->new_vpd);
1066 
1067 		ipr_err("Cache Directory Card Information:\n");
1068 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1069 
1070 		ipr_err("Adapter Card Information:\n");
1071 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1072 
1073 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1074 			be32_to_cpu(dev_entry->ioa_data[0]),
1075 			be32_to_cpu(dev_entry->ioa_data[1]),
1076 			be32_to_cpu(dev_entry->ioa_data[2]),
1077 			be32_to_cpu(dev_entry->ioa_data[3]),
1078 			be32_to_cpu(dev_entry->ioa_data[4]));
1079 	}
1080 }
1081 
1082 /**
1083  * ipr_log_enhanced_array_error - Log an array configuration error.
1084  * @ioa_cfg:	ioa config struct
1085  * @hostrcb:	hostrcb struct
1086  *
1087  * Return value:
1088  * 	none
1089  **/
1090 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1091 					 struct ipr_hostrcb *hostrcb)
1092 {
1093 	int i, num_entries;
1094 	struct ipr_hostrcb_type_14_error *error;
1095 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1096 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1097 
1098 	error = &hostrcb->hcam.u.error.u.type_14_error;
1099 
1100 	ipr_err_separator;
1101 
1102 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1103 		error->protection_level,
1104 		ioa_cfg->host->host_no,
1105 		error->last_func_vset_res_addr.bus,
1106 		error->last_func_vset_res_addr.target,
1107 		error->last_func_vset_res_addr.lun);
1108 
1109 	ipr_err_separator;
1110 
1111 	array_entry = error->array_member;
1112 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1113 			    sizeof(error->array_member));
1114 
1115 	for (i = 0; i < num_entries; i++, array_entry++) {
1116 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1117 			continue;
1118 
1119 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1120 			ipr_err("Exposed Array Member %d:\n", i);
1121 		else
1122 			ipr_err("Array Member %d:\n", i);
1123 
1124 		ipr_log_ext_vpd(&array_entry->vpd);
1125 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1126 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1127 				 "Expected Location");
1128 
1129 		ipr_err_separator;
1130 	}
1131 }
1132 
1133 /**
1134  * ipr_log_array_error - Log an array configuration error.
1135  * @ioa_cfg:	ioa config struct
1136  * @hostrcb:	hostrcb struct
1137  *
1138  * Return value:
1139  * 	none
1140  **/
1141 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1142 				struct ipr_hostrcb *hostrcb)
1143 {
1144 	int i;
1145 	struct ipr_hostrcb_type_04_error *error;
1146 	struct ipr_hostrcb_array_data_entry *array_entry;
1147 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1148 
1149 	error = &hostrcb->hcam.u.error.u.type_04_error;
1150 
1151 	ipr_err_separator;
1152 
1153 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1154 		error->protection_level,
1155 		ioa_cfg->host->host_no,
1156 		error->last_func_vset_res_addr.bus,
1157 		error->last_func_vset_res_addr.target,
1158 		error->last_func_vset_res_addr.lun);
1159 
1160 	ipr_err_separator;
1161 
1162 	array_entry = error->array_member;
1163 
1164 	for (i = 0; i < 18; i++) {
1165 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1166 			continue;
1167 
1168 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1169 			ipr_err("Exposed Array Member %d:\n", i);
1170 		else
1171 			ipr_err("Array Member %d:\n", i);
1172 
1173 		ipr_log_vpd(&array_entry->vpd);
1174 
1175 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1176 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1177 				 "Expected Location");
1178 
1179 		ipr_err_separator;
1180 
1181 		if (i == 9)
1182 			array_entry = error->array_member2;
1183 		else
1184 			array_entry++;
1185 	}
1186 }
1187 
1188 /**
1189  * ipr_log_hex_data - Log additional hex IOA error data.
1190  * @data:		IOA error data
1191  * @len:		data length
1192  *
1193  * Return value:
1194  * 	none
1195  **/
1196 static void ipr_log_hex_data(u32 *data, int len)
1197 {
1198 	int i;
1199 
1200 	if (len == 0)
1201 		return;
1202 
1203 	for (i = 0; i < len / 4; i += 4) {
1204 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1205 			be32_to_cpu(data[i]),
1206 			be32_to_cpu(data[i+1]),
1207 			be32_to_cpu(data[i+2]),
1208 			be32_to_cpu(data[i+3]));
1209 	}
1210 }
1211 
1212 /**
1213  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1214  * @ioa_cfg:	ioa config struct
1215  * @hostrcb:	hostrcb struct
1216  *
1217  * Return value:
1218  * 	none
1219  **/
1220 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1221 					    struct ipr_hostrcb *hostrcb)
1222 {
1223 	struct ipr_hostrcb_type_17_error *error;
1224 
1225 	error = &hostrcb->hcam.u.error.u.type_17_error;
1226 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1227 
1228 	ipr_err("%s\n", error->failure_reason);
1229 	ipr_err("Remote Adapter VPD:\n");
1230 	ipr_log_ext_vpd(&error->vpd);
1231 	ipr_log_hex_data(error->data,
1232 			 be32_to_cpu(hostrcb->hcam.length) -
1233 			 (offsetof(struct ipr_hostrcb_error, u) +
1234 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1235 }
1236 
1237 /**
1238  * ipr_log_dual_ioa_error - Log a dual adapter error.
1239  * @ioa_cfg:	ioa config struct
1240  * @hostrcb:	hostrcb struct
1241  *
1242  * Return value:
1243  * 	none
1244  **/
1245 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1246 				   struct ipr_hostrcb *hostrcb)
1247 {
1248 	struct ipr_hostrcb_type_07_error *error;
1249 
1250 	error = &hostrcb->hcam.u.error.u.type_07_error;
1251 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1252 
1253 	ipr_err("%s\n", error->failure_reason);
1254 	ipr_err("Remote Adapter VPD:\n");
1255 	ipr_log_vpd(&error->vpd);
1256 	ipr_log_hex_data(error->data,
1257 			 be32_to_cpu(hostrcb->hcam.length) -
1258 			 (offsetof(struct ipr_hostrcb_error, u) +
1259 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1260 }
1261 
1262 /**
1263  * ipr_log_generic_error - Log an adapter error.
1264  * @ioa_cfg:	ioa config struct
1265  * @hostrcb:	hostrcb struct
1266  *
1267  * Return value:
1268  * 	none
1269  **/
1270 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1271 				  struct ipr_hostrcb *hostrcb)
1272 {
1273 	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1274 			 be32_to_cpu(hostrcb->hcam.length));
1275 }
1276 
1277 /**
1278  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1279  * @ioasc:	IOASC
1280  *
1281  * This function will return the index of into the ipr_error_table
1282  * for the specified IOASC. If the IOASC is not in the table,
1283  * 0 will be returned, which points to the entry used for unknown errors.
1284  *
1285  * Return value:
1286  * 	index into the ipr_error_table
1287  **/
1288 static u32 ipr_get_error(u32 ioasc)
1289 {
1290 	int i;
1291 
1292 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1293 		if (ipr_error_table[i].ioasc == ioasc)
1294 			return i;
1295 
1296 	return 0;
1297 }
1298 
1299 /**
1300  * ipr_handle_log_data - Log an adapter error.
1301  * @ioa_cfg:	ioa config struct
1302  * @hostrcb:	hostrcb struct
1303  *
1304  * This function logs an adapter error to the system.
1305  *
1306  * Return value:
1307  * 	none
1308  **/
1309 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1310 				struct ipr_hostrcb *hostrcb)
1311 {
1312 	u32 ioasc;
1313 	int error_index;
1314 
1315 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1316 		return;
1317 
1318 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1319 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1320 
1321 	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1322 
1323 	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1324 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1325 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1326 		scsi_report_bus_reset(ioa_cfg->host,
1327 				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1328 	}
1329 
1330 	error_index = ipr_get_error(ioasc);
1331 
1332 	if (!ipr_error_table[error_index].log_hcam)
1333 		return;
1334 
1335 	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1336 		ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1337 			   "%s\n", ipr_error_table[error_index].error);
1338 	} else {
1339 		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1340 			ipr_error_table[error_index].error);
1341 	}
1342 
1343 	/* Set indication we have logged an error */
1344 	ioa_cfg->errors_logged++;
1345 
1346 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1347 		return;
1348 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1349 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1350 
1351 	switch (hostrcb->hcam.overlay_id) {
1352 	case IPR_HOST_RCB_OVERLAY_ID_2:
1353 		ipr_log_cache_error(ioa_cfg, hostrcb);
1354 		break;
1355 	case IPR_HOST_RCB_OVERLAY_ID_3:
1356 		ipr_log_config_error(ioa_cfg, hostrcb);
1357 		break;
1358 	case IPR_HOST_RCB_OVERLAY_ID_4:
1359 	case IPR_HOST_RCB_OVERLAY_ID_6:
1360 		ipr_log_array_error(ioa_cfg, hostrcb);
1361 		break;
1362 	case IPR_HOST_RCB_OVERLAY_ID_7:
1363 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1364 		break;
1365 	case IPR_HOST_RCB_OVERLAY_ID_12:
1366 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1367 		break;
1368 	case IPR_HOST_RCB_OVERLAY_ID_13:
1369 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1370 		break;
1371 	case IPR_HOST_RCB_OVERLAY_ID_14:
1372 	case IPR_HOST_RCB_OVERLAY_ID_16:
1373 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1374 		break;
1375 	case IPR_HOST_RCB_OVERLAY_ID_17:
1376 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1377 		break;
1378 	case IPR_HOST_RCB_OVERLAY_ID_1:
1379 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1380 	default:
1381 		ipr_log_generic_error(ioa_cfg, hostrcb);
1382 		break;
1383 	}
1384 }
1385 
1386 /**
1387  * ipr_process_error - Op done function for an adapter error log.
1388  * @ipr_cmd:	ipr command struct
1389  *
1390  * This function is the op done function for an error log host
1391  * controlled async from the adapter. It will log the error and
1392  * send the HCAM back to the adapter.
1393  *
1394  * Return value:
1395  * 	none
1396  **/
1397 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1398 {
1399 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1400 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1401 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1402 
1403 	list_del(&hostrcb->queue);
1404 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1405 
1406 	if (!ioasc) {
1407 		ipr_handle_log_data(ioa_cfg, hostrcb);
1408 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1409 		dev_err(&ioa_cfg->pdev->dev,
1410 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1411 	}
1412 
1413 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1414 }
1415 
1416 /**
1417  * ipr_timeout -  An internally generated op has timed out.
1418  * @ipr_cmd:	ipr command struct
1419  *
1420  * This function blocks host requests and initiates an
1421  * adapter reset.
1422  *
1423  * Return value:
1424  * 	none
1425  **/
1426 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1427 {
1428 	unsigned long lock_flags = 0;
1429 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1430 
1431 	ENTER;
1432 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1433 
1434 	ioa_cfg->errors_logged++;
1435 	dev_err(&ioa_cfg->pdev->dev,
1436 		"Adapter being reset due to command timeout.\n");
1437 
1438 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1439 		ioa_cfg->sdt_state = GET_DUMP;
1440 
1441 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1442 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1443 
1444 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1445 	LEAVE;
1446 }
1447 
1448 /**
1449  * ipr_oper_timeout -  Adapter timed out transitioning to operational
1450  * @ipr_cmd:	ipr command struct
1451  *
1452  * This function blocks host requests and initiates an
1453  * adapter reset.
1454  *
1455  * Return value:
1456  * 	none
1457  **/
1458 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1459 {
1460 	unsigned long lock_flags = 0;
1461 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1462 
1463 	ENTER;
1464 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1465 
1466 	ioa_cfg->errors_logged++;
1467 	dev_err(&ioa_cfg->pdev->dev,
1468 		"Adapter timed out transitioning to operational.\n");
1469 
1470 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1471 		ioa_cfg->sdt_state = GET_DUMP;
1472 
1473 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1474 		if (ipr_fastfail)
1475 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1476 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1477 	}
1478 
1479 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1480 	LEAVE;
1481 }
1482 
1483 /**
1484  * ipr_reset_reload - Reset/Reload the IOA
1485  * @ioa_cfg:		ioa config struct
1486  * @shutdown_type:	shutdown type
1487  *
1488  * This function resets the adapter and re-initializes it.
1489  * This function assumes that all new host commands have been stopped.
1490  * Return value:
1491  * 	SUCCESS / FAILED
1492  **/
1493 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1494 			    enum ipr_shutdown_type shutdown_type)
1495 {
1496 	if (!ioa_cfg->in_reset_reload)
1497 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1498 
1499 	spin_unlock_irq(ioa_cfg->host->host_lock);
1500 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1501 	spin_lock_irq(ioa_cfg->host->host_lock);
1502 
1503 	/* If we got hit with a host reset while we were already resetting
1504 	 the adapter for some reason, and the reset failed. */
1505 	if (ioa_cfg->ioa_is_dead) {
1506 		ipr_trace;
1507 		return FAILED;
1508 	}
1509 
1510 	return SUCCESS;
1511 }
1512 
1513 /**
1514  * ipr_find_ses_entry - Find matching SES in SES table
1515  * @res:	resource entry struct of SES
1516  *
1517  * Return value:
1518  * 	pointer to SES table entry / NULL on failure
1519  **/
1520 static const struct ipr_ses_table_entry *
1521 ipr_find_ses_entry(struct ipr_resource_entry *res)
1522 {
1523 	int i, j, matches;
1524 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1525 
1526 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1527 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1528 			if (ste->compare_product_id_byte[j] == 'X') {
1529 				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1530 					matches++;
1531 				else
1532 					break;
1533 			} else
1534 				matches++;
1535 		}
1536 
1537 		if (matches == IPR_PROD_ID_LEN)
1538 			return ste;
1539 	}
1540 
1541 	return NULL;
1542 }
1543 
1544 /**
1545  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1546  * @ioa_cfg:	ioa config struct
1547  * @bus:		SCSI bus
1548  * @bus_width:	bus width
1549  *
1550  * Return value:
1551  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1552  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1553  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1554  *	max 160MHz = max 320MB/sec).
1555  **/
1556 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1557 {
1558 	struct ipr_resource_entry *res;
1559 	const struct ipr_ses_table_entry *ste;
1560 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1561 
1562 	/* Loop through each config table entry in the config table buffer */
1563 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1564 		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1565 			continue;
1566 
1567 		if (bus != res->cfgte.res_addr.bus)
1568 			continue;
1569 
1570 		if (!(ste = ipr_find_ses_entry(res)))
1571 			continue;
1572 
1573 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1574 	}
1575 
1576 	return max_xfer_rate;
1577 }
1578 
1579 /**
1580  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1581  * @ioa_cfg:		ioa config struct
1582  * @max_delay:		max delay in micro-seconds to wait
1583  *
1584  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1585  *
1586  * Return value:
1587  * 	0 on success / other on failure
1588  **/
1589 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1590 {
1591 	volatile u32 pcii_reg;
1592 	int delay = 1;
1593 
1594 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1595 	while (delay < max_delay) {
1596 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1597 
1598 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1599 			return 0;
1600 
1601 		/* udelay cannot be used if delay is more than a few milliseconds */
1602 		if ((delay / 1000) > MAX_UDELAY_MS)
1603 			mdelay(delay / 1000);
1604 		else
1605 			udelay(delay);
1606 
1607 		delay += delay;
1608 	}
1609 	return -EIO;
1610 }
1611 
1612 /**
1613  * ipr_get_ldump_data_section - Dump IOA memory
1614  * @ioa_cfg:			ioa config struct
1615  * @start_addr:			adapter address to dump
1616  * @dest:				destination kernel buffer
1617  * @length_in_words:	length to dump in 4 byte words
1618  *
1619  * Return value:
1620  * 	0 on success / -EIO on failure
1621  **/
1622 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1623 				      u32 start_addr,
1624 				      __be32 *dest, u32 length_in_words)
1625 {
1626 	volatile u32 temp_pcii_reg;
1627 	int i, delay = 0;
1628 
1629 	/* Write IOA interrupt reg starting LDUMP state  */
1630 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1631 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1632 
1633 	/* Wait for IO debug acknowledge */
1634 	if (ipr_wait_iodbg_ack(ioa_cfg,
1635 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1636 		dev_err(&ioa_cfg->pdev->dev,
1637 			"IOA dump long data transfer timeout\n");
1638 		return -EIO;
1639 	}
1640 
1641 	/* Signal LDUMP interlocked - clear IO debug ack */
1642 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1643 	       ioa_cfg->regs.clr_interrupt_reg);
1644 
1645 	/* Write Mailbox with starting address */
1646 	writel(start_addr, ioa_cfg->ioa_mailbox);
1647 
1648 	/* Signal address valid - clear IOA Reset alert */
1649 	writel(IPR_UPROCI_RESET_ALERT,
1650 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1651 
1652 	for (i = 0; i < length_in_words; i++) {
1653 		/* Wait for IO debug acknowledge */
1654 		if (ipr_wait_iodbg_ack(ioa_cfg,
1655 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1656 			dev_err(&ioa_cfg->pdev->dev,
1657 				"IOA dump short data transfer timeout\n");
1658 			return -EIO;
1659 		}
1660 
1661 		/* Read data from mailbox and increment destination pointer */
1662 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1663 		dest++;
1664 
1665 		/* For all but the last word of data, signal data received */
1666 		if (i < (length_in_words - 1)) {
1667 			/* Signal dump data received - Clear IO debug Ack */
1668 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1669 			       ioa_cfg->regs.clr_interrupt_reg);
1670 		}
1671 	}
1672 
1673 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1674 	writel(IPR_UPROCI_RESET_ALERT,
1675 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1676 
1677 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1678 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1679 
1680 	/* Signal dump data received - Clear IO debug Ack */
1681 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1682 	       ioa_cfg->regs.clr_interrupt_reg);
1683 
1684 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1685 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1686 		temp_pcii_reg =
1687 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1688 
1689 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1690 			return 0;
1691 
1692 		udelay(10);
1693 		delay += 10;
1694 	}
1695 
1696 	return 0;
1697 }
1698 
1699 #ifdef CONFIG_SCSI_IPR_DUMP
1700 /**
1701  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1702  * @ioa_cfg:		ioa config struct
1703  * @pci_address:	adapter address
1704  * @length:			length of data to copy
1705  *
1706  * Copy data from PCI adapter to kernel buffer.
1707  * Note: length MUST be a 4 byte multiple
1708  * Return value:
1709  * 	0 on success / other on failure
1710  **/
1711 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1712 			unsigned long pci_address, u32 length)
1713 {
1714 	int bytes_copied = 0;
1715 	int cur_len, rc, rem_len, rem_page_len;
1716 	__be32 *page;
1717 	unsigned long lock_flags = 0;
1718 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1719 
1720 	while (bytes_copied < length &&
1721 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1722 		if (ioa_dump->page_offset >= PAGE_SIZE ||
1723 		    ioa_dump->page_offset == 0) {
1724 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1725 
1726 			if (!page) {
1727 				ipr_trace;
1728 				return bytes_copied;
1729 			}
1730 
1731 			ioa_dump->page_offset = 0;
1732 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1733 			ioa_dump->next_page_index++;
1734 		} else
1735 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1736 
1737 		rem_len = length - bytes_copied;
1738 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1739 		cur_len = min(rem_len, rem_page_len);
1740 
1741 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1742 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1743 			rc = -EIO;
1744 		} else {
1745 			rc = ipr_get_ldump_data_section(ioa_cfg,
1746 							pci_address + bytes_copied,
1747 							&page[ioa_dump->page_offset / 4],
1748 							(cur_len / sizeof(u32)));
1749 		}
1750 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1751 
1752 		if (!rc) {
1753 			ioa_dump->page_offset += cur_len;
1754 			bytes_copied += cur_len;
1755 		} else {
1756 			ipr_trace;
1757 			break;
1758 		}
1759 		schedule();
1760 	}
1761 
1762 	return bytes_copied;
1763 }
1764 
1765 /**
1766  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1767  * @hdr:	dump entry header struct
1768  *
1769  * Return value:
1770  * 	nothing
1771  **/
1772 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1773 {
1774 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1775 	hdr->num_elems = 1;
1776 	hdr->offset = sizeof(*hdr);
1777 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1778 }
1779 
1780 /**
1781  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1782  * @ioa_cfg:	ioa config struct
1783  * @driver_dump:	driver dump struct
1784  *
1785  * Return value:
1786  * 	nothing
1787  **/
1788 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1789 				   struct ipr_driver_dump *driver_dump)
1790 {
1791 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1792 
1793 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1794 	driver_dump->ioa_type_entry.hdr.len =
1795 		sizeof(struct ipr_dump_ioa_type_entry) -
1796 		sizeof(struct ipr_dump_entry_header);
1797 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1798 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1799 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1800 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1801 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1802 		ucode_vpd->minor_release[1];
1803 	driver_dump->hdr.num_entries++;
1804 }
1805 
1806 /**
1807  * ipr_dump_version_data - Fill in the driver version in the dump.
1808  * @ioa_cfg:	ioa config struct
1809  * @driver_dump:	driver dump struct
1810  *
1811  * Return value:
1812  * 	nothing
1813  **/
1814 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1815 				  struct ipr_driver_dump *driver_dump)
1816 {
1817 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1818 	driver_dump->version_entry.hdr.len =
1819 		sizeof(struct ipr_dump_version_entry) -
1820 		sizeof(struct ipr_dump_entry_header);
1821 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1822 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1823 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1824 	driver_dump->hdr.num_entries++;
1825 }
1826 
1827 /**
1828  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1829  * @ioa_cfg:	ioa config struct
1830  * @driver_dump:	driver dump struct
1831  *
1832  * Return value:
1833  * 	nothing
1834  **/
1835 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1836 				   struct ipr_driver_dump *driver_dump)
1837 {
1838 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1839 	driver_dump->trace_entry.hdr.len =
1840 		sizeof(struct ipr_dump_trace_entry) -
1841 		sizeof(struct ipr_dump_entry_header);
1842 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1843 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1844 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1845 	driver_dump->hdr.num_entries++;
1846 }
1847 
1848 /**
1849  * ipr_dump_location_data - Fill in the IOA location in the dump.
1850  * @ioa_cfg:	ioa config struct
1851  * @driver_dump:	driver dump struct
1852  *
1853  * Return value:
1854  * 	nothing
1855  **/
1856 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1857 				   struct ipr_driver_dump *driver_dump)
1858 {
1859 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1860 	driver_dump->location_entry.hdr.len =
1861 		sizeof(struct ipr_dump_location_entry) -
1862 		sizeof(struct ipr_dump_entry_header);
1863 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1864 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1865 	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1866 	driver_dump->hdr.num_entries++;
1867 }
1868 
1869 /**
1870  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1871  * @ioa_cfg:	ioa config struct
1872  * @dump:		dump struct
1873  *
1874  * Return value:
1875  * 	nothing
1876  **/
1877 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1878 {
1879 	unsigned long start_addr, sdt_word;
1880 	unsigned long lock_flags = 0;
1881 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1882 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1883 	u32 num_entries, start_off, end_off;
1884 	u32 bytes_to_copy, bytes_copied, rc;
1885 	struct ipr_sdt *sdt;
1886 	int i;
1887 
1888 	ENTER;
1889 
1890 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1891 
1892 	if (ioa_cfg->sdt_state != GET_DUMP) {
1893 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1894 		return;
1895 	}
1896 
1897 	start_addr = readl(ioa_cfg->ioa_mailbox);
1898 
1899 	if (!ipr_sdt_is_fmt2(start_addr)) {
1900 		dev_err(&ioa_cfg->pdev->dev,
1901 			"Invalid dump table format: %lx\n", start_addr);
1902 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1903 		return;
1904 	}
1905 
1906 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1907 
1908 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1909 
1910 	/* Initialize the overall dump header */
1911 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1912 	driver_dump->hdr.num_entries = 1;
1913 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1914 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1915 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1916 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1917 
1918 	ipr_dump_version_data(ioa_cfg, driver_dump);
1919 	ipr_dump_location_data(ioa_cfg, driver_dump);
1920 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1921 	ipr_dump_trace_data(ioa_cfg, driver_dump);
1922 
1923 	/* Update dump_header */
1924 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1925 
1926 	/* IOA Dump entry */
1927 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1928 	ioa_dump->format = IPR_SDT_FMT2;
1929 	ioa_dump->hdr.len = 0;
1930 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1931 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1932 
1933 	/* First entries in sdt are actually a list of dump addresses and
1934 	 lengths to gather the real dump data.  sdt represents the pointer
1935 	 to the ioa generated dump table.  Dump data will be extracted based
1936 	 on entries in this table */
1937 	sdt = &ioa_dump->sdt;
1938 
1939 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1940 					sizeof(struct ipr_sdt) / sizeof(__be32));
1941 
1942 	/* Smart Dump table is ready to use and the first entry is valid */
1943 	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1944 		dev_err(&ioa_cfg->pdev->dev,
1945 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1946 			rc, be32_to_cpu(sdt->hdr.state));
1947 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1948 		ioa_cfg->sdt_state = DUMP_OBTAINED;
1949 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1950 		return;
1951 	}
1952 
1953 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1954 
1955 	if (num_entries > IPR_NUM_SDT_ENTRIES)
1956 		num_entries = IPR_NUM_SDT_ENTRIES;
1957 
1958 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1959 
1960 	for (i = 0; i < num_entries; i++) {
1961 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1962 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1963 			break;
1964 		}
1965 
1966 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1967 			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1968 			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1969 			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1970 
1971 			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1972 				bytes_to_copy = end_off - start_off;
1973 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1974 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1975 					continue;
1976 				}
1977 
1978 				/* Copy data from adapter to driver buffers */
1979 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1980 							    bytes_to_copy);
1981 
1982 				ioa_dump->hdr.len += bytes_copied;
1983 
1984 				if (bytes_copied != bytes_to_copy) {
1985 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1986 					break;
1987 				}
1988 			}
1989 		}
1990 	}
1991 
1992 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1993 
1994 	/* Update dump_header */
1995 	driver_dump->hdr.len += ioa_dump->hdr.len;
1996 	wmb();
1997 	ioa_cfg->sdt_state = DUMP_OBTAINED;
1998 	LEAVE;
1999 }
2000 
2001 #else
2002 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2003 #endif
2004 
2005 /**
2006  * ipr_release_dump - Free adapter dump memory
2007  * @kref:	kref struct
2008  *
2009  * Return value:
2010  *	nothing
2011  **/
2012 static void ipr_release_dump(struct kref *kref)
2013 {
2014 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2015 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2016 	unsigned long lock_flags = 0;
2017 	int i;
2018 
2019 	ENTER;
2020 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2021 	ioa_cfg->dump = NULL;
2022 	ioa_cfg->sdt_state = INACTIVE;
2023 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2024 
2025 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2026 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2027 
2028 	kfree(dump);
2029 	LEAVE;
2030 }
2031 
2032 /**
2033  * ipr_worker_thread - Worker thread
2034  * @data:		ioa config struct
2035  *
2036  * Called at task level from a work thread. This function takes care
2037  * of adding and removing device from the mid-layer as configuration
2038  * changes are detected by the adapter.
2039  *
2040  * Return value:
2041  * 	nothing
2042  **/
2043 static void ipr_worker_thread(void *data)
2044 {
2045 	unsigned long lock_flags;
2046 	struct ipr_resource_entry *res;
2047 	struct scsi_device *sdev;
2048 	struct ipr_dump *dump;
2049 	struct ipr_ioa_cfg *ioa_cfg = data;
2050 	u8 bus, target, lun;
2051 	int did_work;
2052 
2053 	ENTER;
2054 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2055 
2056 	if (ioa_cfg->sdt_state == GET_DUMP) {
2057 		dump = ioa_cfg->dump;
2058 		if (!dump) {
2059 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2060 			return;
2061 		}
2062 		kref_get(&dump->kref);
2063 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2064 		ipr_get_ioa_dump(ioa_cfg, dump);
2065 		kref_put(&dump->kref, ipr_release_dump);
2066 
2067 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2068 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2069 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2070 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2071 		return;
2072 	}
2073 
2074 restart:
2075 	do {
2076 		did_work = 0;
2077 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2078 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2079 			return;
2080 		}
2081 
2082 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2083 			if (res->del_from_ml && res->sdev) {
2084 				did_work = 1;
2085 				sdev = res->sdev;
2086 				if (!scsi_device_get(sdev)) {
2087 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2088 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2089 					scsi_remove_device(sdev);
2090 					scsi_device_put(sdev);
2091 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 				}
2093 				break;
2094 			}
2095 		}
2096 	} while(did_work);
2097 
2098 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2099 		if (res->add_to_ml) {
2100 			bus = res->cfgte.res_addr.bus;
2101 			target = res->cfgte.res_addr.target;
2102 			lun = res->cfgte.res_addr.lun;
2103 			res->add_to_ml = 0;
2104 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105 			scsi_add_device(ioa_cfg->host, bus, target, lun);
2106 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2107 			goto restart;
2108 		}
2109 	}
2110 
2111 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2112 	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2113 	LEAVE;
2114 }
2115 
2116 #ifdef CONFIG_SCSI_IPR_TRACE
2117 /**
2118  * ipr_read_trace - Dump the adapter trace
2119  * @kobj:		kobject struct
2120  * @buf:		buffer
2121  * @off:		offset
2122  * @count:		buffer size
2123  *
2124  * Return value:
2125  *	number of bytes printed to buffer
2126  **/
2127 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2128 			      loff_t off, size_t count)
2129 {
2130 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2131 	struct Scsi_Host *shost = class_to_shost(cdev);
2132 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2133 	unsigned long lock_flags = 0;
2134 	int size = IPR_TRACE_SIZE;
2135 	char *src = (char *)ioa_cfg->trace;
2136 
2137 	if (off > size)
2138 		return 0;
2139 	if (off + count > size) {
2140 		size -= off;
2141 		count = size;
2142 	}
2143 
2144 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2145 	memcpy(buf, &src[off], count);
2146 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2147 	return count;
2148 }
2149 
2150 static struct bin_attribute ipr_trace_attr = {
2151 	.attr =	{
2152 		.name = "trace",
2153 		.mode = S_IRUGO,
2154 	},
2155 	.size = 0,
2156 	.read = ipr_read_trace,
2157 };
2158 #endif
2159 
2160 static const struct {
2161 	enum ipr_cache_state state;
2162 	char *name;
2163 } cache_state [] = {
2164 	{ CACHE_NONE, "none" },
2165 	{ CACHE_DISABLED, "disabled" },
2166 	{ CACHE_ENABLED, "enabled" }
2167 };
2168 
2169 /**
2170  * ipr_show_write_caching - Show the write caching attribute
2171  * @class_dev:	class device struct
2172  * @buf:		buffer
2173  *
2174  * Return value:
2175  *	number of bytes printed to buffer
2176  **/
2177 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2178 {
2179 	struct Scsi_Host *shost = class_to_shost(class_dev);
2180 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2181 	unsigned long lock_flags = 0;
2182 	int i, len = 0;
2183 
2184 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2185 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2186 		if (cache_state[i].state == ioa_cfg->cache_state) {
2187 			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2188 			break;
2189 		}
2190 	}
2191 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2192 	return len;
2193 }
2194 
2195 
2196 /**
2197  * ipr_store_write_caching - Enable/disable adapter write cache
2198  * @class_dev:	class_device struct
2199  * @buf:		buffer
2200  * @count:		buffer size
2201  *
2202  * This function will enable/disable adapter write cache.
2203  *
2204  * Return value:
2205  * 	count on success / other on failure
2206  **/
2207 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2208 					const char *buf, size_t count)
2209 {
2210 	struct Scsi_Host *shost = class_to_shost(class_dev);
2211 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2212 	unsigned long lock_flags = 0;
2213 	enum ipr_cache_state new_state = CACHE_INVALID;
2214 	int i;
2215 
2216 	if (!capable(CAP_SYS_ADMIN))
2217 		return -EACCES;
2218 	if (ioa_cfg->cache_state == CACHE_NONE)
2219 		return -EINVAL;
2220 
2221 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2222 		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2223 			new_state = cache_state[i].state;
2224 			break;
2225 		}
2226 	}
2227 
2228 	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2229 		return -EINVAL;
2230 
2231 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2232 	if (ioa_cfg->cache_state == new_state) {
2233 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2234 		return count;
2235 	}
2236 
2237 	ioa_cfg->cache_state = new_state;
2238 	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2239 		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2240 	if (!ioa_cfg->in_reset_reload)
2241 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2242 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2243 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2244 
2245 	return count;
2246 }
2247 
2248 static struct class_device_attribute ipr_ioa_cache_attr = {
2249 	.attr = {
2250 		.name =		"write_cache",
2251 		.mode =		S_IRUGO | S_IWUSR,
2252 	},
2253 	.show = ipr_show_write_caching,
2254 	.store = ipr_store_write_caching
2255 };
2256 
2257 /**
2258  * ipr_show_fw_version - Show the firmware version
2259  * @class_dev:	class device struct
2260  * @buf:		buffer
2261  *
2262  * Return value:
2263  *	number of bytes printed to buffer
2264  **/
2265 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2266 {
2267 	struct Scsi_Host *shost = class_to_shost(class_dev);
2268 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2269 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2270 	unsigned long lock_flags = 0;
2271 	int len;
2272 
2273 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2274 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2275 		       ucode_vpd->major_release, ucode_vpd->card_type,
2276 		       ucode_vpd->minor_release[0],
2277 		       ucode_vpd->minor_release[1]);
2278 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2279 	return len;
2280 }
2281 
2282 static struct class_device_attribute ipr_fw_version_attr = {
2283 	.attr = {
2284 		.name =		"fw_version",
2285 		.mode =		S_IRUGO,
2286 	},
2287 	.show = ipr_show_fw_version,
2288 };
2289 
2290 /**
2291  * ipr_show_log_level - Show the adapter's error logging level
2292  * @class_dev:	class device struct
2293  * @buf:		buffer
2294  *
2295  * Return value:
2296  * 	number of bytes printed to buffer
2297  **/
2298 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2299 {
2300 	struct Scsi_Host *shost = class_to_shost(class_dev);
2301 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2302 	unsigned long lock_flags = 0;
2303 	int len;
2304 
2305 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2306 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2307 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2308 	return len;
2309 }
2310 
2311 /**
2312  * ipr_store_log_level - Change the adapter's error logging level
2313  * @class_dev:	class device struct
2314  * @buf:		buffer
2315  *
2316  * Return value:
2317  * 	number of bytes printed to buffer
2318  **/
2319 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2320 				   const char *buf, size_t count)
2321 {
2322 	struct Scsi_Host *shost = class_to_shost(class_dev);
2323 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2324 	unsigned long lock_flags = 0;
2325 
2326 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2327 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2328 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2329 	return strlen(buf);
2330 }
2331 
2332 static struct class_device_attribute ipr_log_level_attr = {
2333 	.attr = {
2334 		.name =		"log_level",
2335 		.mode =		S_IRUGO | S_IWUSR,
2336 	},
2337 	.show = ipr_show_log_level,
2338 	.store = ipr_store_log_level
2339 };
2340 
2341 /**
2342  * ipr_store_diagnostics - IOA Diagnostics interface
2343  * @class_dev:	class_device struct
2344  * @buf:		buffer
2345  * @count:		buffer size
2346  *
2347  * This function will reset the adapter and wait a reasonable
2348  * amount of time for any errors that the adapter might log.
2349  *
2350  * Return value:
2351  * 	count on success / other on failure
2352  **/
2353 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2354 				     const char *buf, size_t count)
2355 {
2356 	struct Scsi_Host *shost = class_to_shost(class_dev);
2357 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2358 	unsigned long lock_flags = 0;
2359 	int rc = count;
2360 
2361 	if (!capable(CAP_SYS_ADMIN))
2362 		return -EACCES;
2363 
2364 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2365 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2366 	ioa_cfg->errors_logged = 0;
2367 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2368 
2369 	if (ioa_cfg->in_reset_reload) {
2370 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2371 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2372 
2373 		/* Wait for a second for any errors to be logged */
2374 		msleep(1000);
2375 	} else {
2376 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2377 		return -EIO;
2378 	}
2379 
2380 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2381 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2382 		rc = -EIO;
2383 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2384 
2385 	return rc;
2386 }
2387 
2388 static struct class_device_attribute ipr_diagnostics_attr = {
2389 	.attr = {
2390 		.name =		"run_diagnostics",
2391 		.mode =		S_IWUSR,
2392 	},
2393 	.store = ipr_store_diagnostics
2394 };
2395 
2396 /**
2397  * ipr_show_adapter_state - Show the adapter's state
2398  * @class_dev:	class device struct
2399  * @buf:		buffer
2400  *
2401  * Return value:
2402  * 	number of bytes printed to buffer
2403  **/
2404 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2405 {
2406 	struct Scsi_Host *shost = class_to_shost(class_dev);
2407 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2408 	unsigned long lock_flags = 0;
2409 	int len;
2410 
2411 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2412 	if (ioa_cfg->ioa_is_dead)
2413 		len = snprintf(buf, PAGE_SIZE, "offline\n");
2414 	else
2415 		len = snprintf(buf, PAGE_SIZE, "online\n");
2416 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2417 	return len;
2418 }
2419 
2420 /**
2421  * ipr_store_adapter_state - Change adapter state
2422  * @class_dev:	class_device struct
2423  * @buf:		buffer
2424  * @count:		buffer size
2425  *
2426  * This function will change the adapter's state.
2427  *
2428  * Return value:
2429  * 	count on success / other on failure
2430  **/
2431 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2432 				       const char *buf, size_t count)
2433 {
2434 	struct Scsi_Host *shost = class_to_shost(class_dev);
2435 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2436 	unsigned long lock_flags;
2437 	int result = count;
2438 
2439 	if (!capable(CAP_SYS_ADMIN))
2440 		return -EACCES;
2441 
2442 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2443 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2444 		ioa_cfg->ioa_is_dead = 0;
2445 		ioa_cfg->reset_retries = 0;
2446 		ioa_cfg->in_ioa_bringdown = 0;
2447 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2448 	}
2449 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2450 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2451 
2452 	return result;
2453 }
2454 
2455 static struct class_device_attribute ipr_ioa_state_attr = {
2456 	.attr = {
2457 		.name =		"state",
2458 		.mode =		S_IRUGO | S_IWUSR,
2459 	},
2460 	.show = ipr_show_adapter_state,
2461 	.store = ipr_store_adapter_state
2462 };
2463 
2464 /**
2465  * ipr_store_reset_adapter - Reset the adapter
2466  * @class_dev:	class_device struct
2467  * @buf:		buffer
2468  * @count:		buffer size
2469  *
2470  * This function will reset the adapter.
2471  *
2472  * Return value:
2473  * 	count on success / other on failure
2474  **/
2475 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2476 				       const char *buf, size_t count)
2477 {
2478 	struct Scsi_Host *shost = class_to_shost(class_dev);
2479 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2480 	unsigned long lock_flags;
2481 	int result = count;
2482 
2483 	if (!capable(CAP_SYS_ADMIN))
2484 		return -EACCES;
2485 
2486 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2487 	if (!ioa_cfg->in_reset_reload)
2488 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2489 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2490 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2491 
2492 	return result;
2493 }
2494 
2495 static struct class_device_attribute ipr_ioa_reset_attr = {
2496 	.attr = {
2497 		.name =		"reset_host",
2498 		.mode =		S_IWUSR,
2499 	},
2500 	.store = ipr_store_reset_adapter
2501 };
2502 
2503 /**
2504  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2505  * @buf_len:		buffer length
2506  *
2507  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2508  * list to use for microcode download
2509  *
2510  * Return value:
2511  * 	pointer to sglist / NULL on failure
2512  **/
2513 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2514 {
2515 	int sg_size, order, bsize_elem, num_elem, i, j;
2516 	struct ipr_sglist *sglist;
2517 	struct scatterlist *scatterlist;
2518 	struct page *page;
2519 
2520 	/* Get the minimum size per scatter/gather element */
2521 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2522 
2523 	/* Get the actual size per element */
2524 	order = get_order(sg_size);
2525 
2526 	/* Determine the actual number of bytes per element */
2527 	bsize_elem = PAGE_SIZE * (1 << order);
2528 
2529 	/* Determine the actual number of sg entries needed */
2530 	if (buf_len % bsize_elem)
2531 		num_elem = (buf_len / bsize_elem) + 1;
2532 	else
2533 		num_elem = buf_len / bsize_elem;
2534 
2535 	/* Allocate a scatter/gather list for the DMA */
2536 	sglist = kzalloc(sizeof(struct ipr_sglist) +
2537 			 (sizeof(struct scatterlist) * (num_elem - 1)),
2538 			 GFP_KERNEL);
2539 
2540 	if (sglist == NULL) {
2541 		ipr_trace;
2542 		return NULL;
2543 	}
2544 
2545 	scatterlist = sglist->scatterlist;
2546 
2547 	sglist->order = order;
2548 	sglist->num_sg = num_elem;
2549 
2550 	/* Allocate a bunch of sg elements */
2551 	for (i = 0; i < num_elem; i++) {
2552 		page = alloc_pages(GFP_KERNEL, order);
2553 		if (!page) {
2554 			ipr_trace;
2555 
2556 			/* Free up what we already allocated */
2557 			for (j = i - 1; j >= 0; j--)
2558 				__free_pages(scatterlist[j].page, order);
2559 			kfree(sglist);
2560 			return NULL;
2561 		}
2562 
2563 		scatterlist[i].page = page;
2564 	}
2565 
2566 	return sglist;
2567 }
2568 
2569 /**
2570  * ipr_free_ucode_buffer - Frees a microcode download buffer
2571  * @p_dnld:		scatter/gather list pointer
2572  *
2573  * Free a DMA'able ucode download buffer previously allocated with
2574  * ipr_alloc_ucode_buffer
2575  *
2576  * Return value:
2577  * 	nothing
2578  **/
2579 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2580 {
2581 	int i;
2582 
2583 	for (i = 0; i < sglist->num_sg; i++)
2584 		__free_pages(sglist->scatterlist[i].page, sglist->order);
2585 
2586 	kfree(sglist);
2587 }
2588 
2589 /**
2590  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2591  * @sglist:		scatter/gather list pointer
2592  * @buffer:		buffer pointer
2593  * @len:		buffer length
2594  *
2595  * Copy a microcode image from a user buffer into a buffer allocated by
2596  * ipr_alloc_ucode_buffer
2597  *
2598  * Return value:
2599  * 	0 on success / other on failure
2600  **/
2601 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2602 				 u8 *buffer, u32 len)
2603 {
2604 	int bsize_elem, i, result = 0;
2605 	struct scatterlist *scatterlist;
2606 	void *kaddr;
2607 
2608 	/* Determine the actual number of bytes per element */
2609 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2610 
2611 	scatterlist = sglist->scatterlist;
2612 
2613 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2614 		kaddr = kmap(scatterlist[i].page);
2615 		memcpy(kaddr, buffer, bsize_elem);
2616 		kunmap(scatterlist[i].page);
2617 
2618 		scatterlist[i].length = bsize_elem;
2619 
2620 		if (result != 0) {
2621 			ipr_trace;
2622 			return result;
2623 		}
2624 	}
2625 
2626 	if (len % bsize_elem) {
2627 		kaddr = kmap(scatterlist[i].page);
2628 		memcpy(kaddr, buffer, len % bsize_elem);
2629 		kunmap(scatterlist[i].page);
2630 
2631 		scatterlist[i].length = len % bsize_elem;
2632 	}
2633 
2634 	sglist->buffer_len = len;
2635 	return result;
2636 }
2637 
2638 /**
2639  * ipr_build_ucode_ioadl - Build a microcode download IOADL
2640  * @ipr_cmd:	ipr command struct
2641  * @sglist:		scatter/gather list
2642  *
2643  * Builds a microcode download IOA data list (IOADL).
2644  *
2645  **/
2646 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2647 				  struct ipr_sglist *sglist)
2648 {
2649 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2650 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2651 	struct scatterlist *scatterlist = sglist->scatterlist;
2652 	int i;
2653 
2654 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2655 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2656 	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2657 	ioarcb->write_ioadl_len =
2658 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2659 
2660 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2661 		ioadl[i].flags_and_data_len =
2662 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2663 		ioadl[i].address =
2664 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2665 	}
2666 
2667 	ioadl[i-1].flags_and_data_len |=
2668 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2669 }
2670 
2671 /**
2672  * ipr_update_ioa_ucode - Update IOA's microcode
2673  * @ioa_cfg:	ioa config struct
2674  * @sglist:		scatter/gather list
2675  *
2676  * Initiate an adapter reset to update the IOA's microcode
2677  *
2678  * Return value:
2679  * 	0 on success / -EIO on failure
2680  **/
2681 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2682 				struct ipr_sglist *sglist)
2683 {
2684 	unsigned long lock_flags;
2685 
2686 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2687 
2688 	if (ioa_cfg->ucode_sglist) {
2689 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2690 		dev_err(&ioa_cfg->pdev->dev,
2691 			"Microcode download already in progress\n");
2692 		return -EIO;
2693 	}
2694 
2695 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2696 					sglist->num_sg, DMA_TO_DEVICE);
2697 
2698 	if (!sglist->num_dma_sg) {
2699 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2700 		dev_err(&ioa_cfg->pdev->dev,
2701 			"Failed to map microcode download buffer!\n");
2702 		return -EIO;
2703 	}
2704 
2705 	ioa_cfg->ucode_sglist = sglist;
2706 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2707 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2708 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2709 
2710 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2711 	ioa_cfg->ucode_sglist = NULL;
2712 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 	return 0;
2714 }
2715 
2716 /**
2717  * ipr_store_update_fw - Update the firmware on the adapter
2718  * @class_dev:	class_device struct
2719  * @buf:		buffer
2720  * @count:		buffer size
2721  *
2722  * This function will update the firmware on the adapter.
2723  *
2724  * Return value:
2725  * 	count on success / other on failure
2726  **/
2727 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2728 				       const char *buf, size_t count)
2729 {
2730 	struct Scsi_Host *shost = class_to_shost(class_dev);
2731 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2732 	struct ipr_ucode_image_header *image_hdr;
2733 	const struct firmware *fw_entry;
2734 	struct ipr_sglist *sglist;
2735 	char fname[100];
2736 	char *src;
2737 	int len, result, dnld_size;
2738 
2739 	if (!capable(CAP_SYS_ADMIN))
2740 		return -EACCES;
2741 
2742 	len = snprintf(fname, 99, "%s", buf);
2743 	fname[len-1] = '\0';
2744 
2745 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2746 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2747 		return -EIO;
2748 	}
2749 
2750 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2751 
2752 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2753 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2754 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2755 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2756 		release_firmware(fw_entry);
2757 		return -EINVAL;
2758 	}
2759 
2760 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2761 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2762 	sglist = ipr_alloc_ucode_buffer(dnld_size);
2763 
2764 	if (!sglist) {
2765 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2766 		release_firmware(fw_entry);
2767 		return -ENOMEM;
2768 	}
2769 
2770 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2771 
2772 	if (result) {
2773 		dev_err(&ioa_cfg->pdev->dev,
2774 			"Microcode buffer copy to DMA buffer failed\n");
2775 		goto out;
2776 	}
2777 
2778 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2779 
2780 	if (!result)
2781 		result = count;
2782 out:
2783 	ipr_free_ucode_buffer(sglist);
2784 	release_firmware(fw_entry);
2785 	return result;
2786 }
2787 
2788 static struct class_device_attribute ipr_update_fw_attr = {
2789 	.attr = {
2790 		.name =		"update_fw",
2791 		.mode =		S_IWUSR,
2792 	},
2793 	.store = ipr_store_update_fw
2794 };
2795 
2796 static struct class_device_attribute *ipr_ioa_attrs[] = {
2797 	&ipr_fw_version_attr,
2798 	&ipr_log_level_attr,
2799 	&ipr_diagnostics_attr,
2800 	&ipr_ioa_state_attr,
2801 	&ipr_ioa_reset_attr,
2802 	&ipr_update_fw_attr,
2803 	&ipr_ioa_cache_attr,
2804 	NULL,
2805 };
2806 
2807 #ifdef CONFIG_SCSI_IPR_DUMP
2808 /**
2809  * ipr_read_dump - Dump the adapter
2810  * @kobj:		kobject struct
2811  * @buf:		buffer
2812  * @off:		offset
2813  * @count:		buffer size
2814  *
2815  * Return value:
2816  *	number of bytes printed to buffer
2817  **/
2818 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2819 			      loff_t off, size_t count)
2820 {
2821 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2822 	struct Scsi_Host *shost = class_to_shost(cdev);
2823 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2824 	struct ipr_dump *dump;
2825 	unsigned long lock_flags = 0;
2826 	char *src;
2827 	int len;
2828 	size_t rc = count;
2829 
2830 	if (!capable(CAP_SYS_ADMIN))
2831 		return -EACCES;
2832 
2833 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2834 	dump = ioa_cfg->dump;
2835 
2836 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2837 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2838 		return 0;
2839 	}
2840 	kref_get(&dump->kref);
2841 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2842 
2843 	if (off > dump->driver_dump.hdr.len) {
2844 		kref_put(&dump->kref, ipr_release_dump);
2845 		return 0;
2846 	}
2847 
2848 	if (off + count > dump->driver_dump.hdr.len) {
2849 		count = dump->driver_dump.hdr.len - off;
2850 		rc = count;
2851 	}
2852 
2853 	if (count && off < sizeof(dump->driver_dump)) {
2854 		if (off + count > sizeof(dump->driver_dump))
2855 			len = sizeof(dump->driver_dump) - off;
2856 		else
2857 			len = count;
2858 		src = (u8 *)&dump->driver_dump + off;
2859 		memcpy(buf, src, len);
2860 		buf += len;
2861 		off += len;
2862 		count -= len;
2863 	}
2864 
2865 	off -= sizeof(dump->driver_dump);
2866 
2867 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2868 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2869 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2870 		else
2871 			len = count;
2872 		src = (u8 *)&dump->ioa_dump + off;
2873 		memcpy(buf, src, len);
2874 		buf += len;
2875 		off += len;
2876 		count -= len;
2877 	}
2878 
2879 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2880 
2881 	while (count) {
2882 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2883 			len = PAGE_ALIGN(off) - off;
2884 		else
2885 			len = count;
2886 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2887 		src += off & ~PAGE_MASK;
2888 		memcpy(buf, src, len);
2889 		buf += len;
2890 		off += len;
2891 		count -= len;
2892 	}
2893 
2894 	kref_put(&dump->kref, ipr_release_dump);
2895 	return rc;
2896 }
2897 
2898 /**
2899  * ipr_alloc_dump - Prepare for adapter dump
2900  * @ioa_cfg:	ioa config struct
2901  *
2902  * Return value:
2903  *	0 on success / other on failure
2904  **/
2905 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2906 {
2907 	struct ipr_dump *dump;
2908 	unsigned long lock_flags = 0;
2909 
2910 	ENTER;
2911 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2912 
2913 	if (!dump) {
2914 		ipr_err("Dump memory allocation failed\n");
2915 		return -ENOMEM;
2916 	}
2917 
2918 	kref_init(&dump->kref);
2919 	dump->ioa_cfg = ioa_cfg;
2920 
2921 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2922 
2923 	if (INACTIVE != ioa_cfg->sdt_state) {
2924 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2925 		kfree(dump);
2926 		return 0;
2927 	}
2928 
2929 	ioa_cfg->dump = dump;
2930 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2931 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2932 		ioa_cfg->dump_taken = 1;
2933 		schedule_work(&ioa_cfg->work_q);
2934 	}
2935 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2936 
2937 	LEAVE;
2938 	return 0;
2939 }
2940 
2941 /**
2942  * ipr_free_dump - Free adapter dump memory
2943  * @ioa_cfg:	ioa config struct
2944  *
2945  * Return value:
2946  *	0 on success / other on failure
2947  **/
2948 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2949 {
2950 	struct ipr_dump *dump;
2951 	unsigned long lock_flags = 0;
2952 
2953 	ENTER;
2954 
2955 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2956 	dump = ioa_cfg->dump;
2957 	if (!dump) {
2958 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2959 		return 0;
2960 	}
2961 
2962 	ioa_cfg->dump = NULL;
2963 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 
2965 	kref_put(&dump->kref, ipr_release_dump);
2966 
2967 	LEAVE;
2968 	return 0;
2969 }
2970 
2971 /**
2972  * ipr_write_dump - Setup dump state of adapter
2973  * @kobj:		kobject struct
2974  * @buf:		buffer
2975  * @off:		offset
2976  * @count:		buffer size
2977  *
2978  * Return value:
2979  *	number of bytes printed to buffer
2980  **/
2981 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2982 			      loff_t off, size_t count)
2983 {
2984 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2985 	struct Scsi_Host *shost = class_to_shost(cdev);
2986 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2987 	int rc;
2988 
2989 	if (!capable(CAP_SYS_ADMIN))
2990 		return -EACCES;
2991 
2992 	if (buf[0] == '1')
2993 		rc = ipr_alloc_dump(ioa_cfg);
2994 	else if (buf[0] == '0')
2995 		rc = ipr_free_dump(ioa_cfg);
2996 	else
2997 		return -EINVAL;
2998 
2999 	if (rc)
3000 		return rc;
3001 	else
3002 		return count;
3003 }
3004 
3005 static struct bin_attribute ipr_dump_attr = {
3006 	.attr =	{
3007 		.name = "dump",
3008 		.mode = S_IRUSR | S_IWUSR,
3009 	},
3010 	.size = 0,
3011 	.read = ipr_read_dump,
3012 	.write = ipr_write_dump
3013 };
3014 #else
3015 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3016 #endif
3017 
3018 /**
3019  * ipr_change_queue_depth - Change the device's queue depth
3020  * @sdev:	scsi device struct
3021  * @qdepth:	depth to set
3022  *
3023  * Return value:
3024  * 	actual depth set
3025  **/
3026 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3027 {
3028 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3029 	return sdev->queue_depth;
3030 }
3031 
3032 /**
3033  * ipr_change_queue_type - Change the device's queue type
3034  * @dsev:		scsi device struct
3035  * @tag_type:	type of tags to use
3036  *
3037  * Return value:
3038  * 	actual queue type set
3039  **/
3040 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3041 {
3042 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3043 	struct ipr_resource_entry *res;
3044 	unsigned long lock_flags = 0;
3045 
3046 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3047 	res = (struct ipr_resource_entry *)sdev->hostdata;
3048 
3049 	if (res) {
3050 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3051 			/*
3052 			 * We don't bother quiescing the device here since the
3053 			 * adapter firmware does it for us.
3054 			 */
3055 			scsi_set_tag_type(sdev, tag_type);
3056 
3057 			if (tag_type)
3058 				scsi_activate_tcq(sdev, sdev->queue_depth);
3059 			else
3060 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3061 		} else
3062 			tag_type = 0;
3063 	} else
3064 		tag_type = 0;
3065 
3066 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 	return tag_type;
3068 }
3069 
3070 /**
3071  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3072  * @dev:	device struct
3073  * @buf:	buffer
3074  *
3075  * Return value:
3076  * 	number of bytes printed to buffer
3077  **/
3078 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3079 {
3080 	struct scsi_device *sdev = to_scsi_device(dev);
3081 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3082 	struct ipr_resource_entry *res;
3083 	unsigned long lock_flags = 0;
3084 	ssize_t len = -ENXIO;
3085 
3086 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3087 	res = (struct ipr_resource_entry *)sdev->hostdata;
3088 	if (res)
3089 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3090 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091 	return len;
3092 }
3093 
3094 static struct device_attribute ipr_adapter_handle_attr = {
3095 	.attr = {
3096 		.name = 	"adapter_handle",
3097 		.mode =		S_IRUSR,
3098 	},
3099 	.show = ipr_show_adapter_handle
3100 };
3101 
3102 static struct device_attribute *ipr_dev_attrs[] = {
3103 	&ipr_adapter_handle_attr,
3104 	NULL,
3105 };
3106 
3107 /**
3108  * ipr_biosparam - Return the HSC mapping
3109  * @sdev:			scsi device struct
3110  * @block_device:	block device pointer
3111  * @capacity:		capacity of the device
3112  * @parm:			Array containing returned HSC values.
3113  *
3114  * This function generates the HSC parms that fdisk uses.
3115  * We want to make sure we return something that places partitions
3116  * on 4k boundaries for best performance with the IOA.
3117  *
3118  * Return value:
3119  * 	0 on success
3120  **/
3121 static int ipr_biosparam(struct scsi_device *sdev,
3122 			 struct block_device *block_device,
3123 			 sector_t capacity, int *parm)
3124 {
3125 	int heads, sectors;
3126 	sector_t cylinders;
3127 
3128 	heads = 128;
3129 	sectors = 32;
3130 
3131 	cylinders = capacity;
3132 	sector_div(cylinders, (128 * 32));
3133 
3134 	/* return result */
3135 	parm[0] = heads;
3136 	parm[1] = sectors;
3137 	parm[2] = cylinders;
3138 
3139 	return 0;
3140 }
3141 
3142 /**
3143  * ipr_slave_destroy - Unconfigure a SCSI device
3144  * @sdev:	scsi device struct
3145  *
3146  * Return value:
3147  * 	nothing
3148  **/
3149 static void ipr_slave_destroy(struct scsi_device *sdev)
3150 {
3151 	struct ipr_resource_entry *res;
3152 	struct ipr_ioa_cfg *ioa_cfg;
3153 	unsigned long lock_flags = 0;
3154 
3155 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3156 
3157 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3158 	res = (struct ipr_resource_entry *) sdev->hostdata;
3159 	if (res) {
3160 		sdev->hostdata = NULL;
3161 		res->sdev = NULL;
3162 	}
3163 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3164 }
3165 
3166 /**
3167  * ipr_slave_configure - Configure a SCSI device
3168  * @sdev:	scsi device struct
3169  *
3170  * This function configures the specified scsi device.
3171  *
3172  * Return value:
3173  * 	0 on success
3174  **/
3175 static int ipr_slave_configure(struct scsi_device *sdev)
3176 {
3177 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3178 	struct ipr_resource_entry *res;
3179 	unsigned long lock_flags = 0;
3180 
3181 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3182 	res = sdev->hostdata;
3183 	if (res) {
3184 		if (ipr_is_af_dasd_device(res))
3185 			sdev->type = TYPE_RAID;
3186 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3187 			sdev->scsi_level = 4;
3188 			sdev->no_uld_attach = 1;
3189 		}
3190 		if (ipr_is_vset_device(res)) {
3191 			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3192 			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3193 		}
3194 		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3195 			sdev->allow_restart = 1;
3196 		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3197 	}
3198 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3199 	return 0;
3200 }
3201 
3202 /**
3203  * ipr_slave_alloc - Prepare for commands to a device.
3204  * @sdev:	scsi device struct
3205  *
3206  * This function saves a pointer to the resource entry
3207  * in the scsi device struct if the device exists. We
3208  * can then use this pointer in ipr_queuecommand when
3209  * handling new commands.
3210  *
3211  * Return value:
3212  * 	0 on success / -ENXIO if device does not exist
3213  **/
3214 static int ipr_slave_alloc(struct scsi_device *sdev)
3215 {
3216 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3217 	struct ipr_resource_entry *res;
3218 	unsigned long lock_flags;
3219 	int rc = -ENXIO;
3220 
3221 	sdev->hostdata = NULL;
3222 
3223 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 
3225 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3226 		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3227 		    (res->cfgte.res_addr.target == sdev->id) &&
3228 		    (res->cfgte.res_addr.lun == sdev->lun)) {
3229 			res->sdev = sdev;
3230 			res->add_to_ml = 0;
3231 			res->in_erp = 0;
3232 			sdev->hostdata = res;
3233 			if (!ipr_is_naca_model(res))
3234 				res->needs_sync_complete = 1;
3235 			rc = 0;
3236 			break;
3237 		}
3238 	}
3239 
3240 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 
3242 	return rc;
3243 }
3244 
3245 /**
3246  * ipr_eh_host_reset - Reset the host adapter
3247  * @scsi_cmd:	scsi command struct
3248  *
3249  * Return value:
3250  * 	SUCCESS / FAILED
3251  **/
3252 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3253 {
3254 	struct ipr_ioa_cfg *ioa_cfg;
3255 	int rc;
3256 
3257 	ENTER;
3258 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3259 
3260 	dev_err(&ioa_cfg->pdev->dev,
3261 		"Adapter being reset as a result of error recovery.\n");
3262 
3263 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3264 		ioa_cfg->sdt_state = GET_DUMP;
3265 
3266 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3267 
3268 	LEAVE;
3269 	return rc;
3270 }
3271 
3272 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3273 {
3274 	int rc;
3275 
3276 	spin_lock_irq(cmd->device->host->host_lock);
3277 	rc = __ipr_eh_host_reset(cmd);
3278 	spin_unlock_irq(cmd->device->host->host_lock);
3279 
3280 	return rc;
3281 }
3282 
3283 /**
3284  * ipr_device_reset - Reset the device
3285  * @ioa_cfg:	ioa config struct
3286  * @res:		resource entry struct
3287  *
3288  * This function issues a device reset to the affected device.
3289  * If the device is a SCSI device, a LUN reset will be sent
3290  * to the device first. If that does not work, a target reset
3291  * will be sent.
3292  *
3293  * Return value:
3294  *	0 on success / non-zero on failure
3295  **/
3296 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3297 			    struct ipr_resource_entry *res)
3298 {
3299 	struct ipr_cmnd *ipr_cmd;
3300 	struct ipr_ioarcb *ioarcb;
3301 	struct ipr_cmd_pkt *cmd_pkt;
3302 	u32 ioasc;
3303 
3304 	ENTER;
3305 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3306 	ioarcb = &ipr_cmd->ioarcb;
3307 	cmd_pkt = &ioarcb->cmd_pkt;
3308 
3309 	ioarcb->res_handle = res->cfgte.res_handle;
3310 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3311 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3312 
3313 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3314 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3315 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3316 
3317 	LEAVE;
3318 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3319 }
3320 
3321 /**
3322  * ipr_eh_dev_reset - Reset the device
3323  * @scsi_cmd:	scsi command struct
3324  *
3325  * This function issues a device reset to the affected device.
3326  * A LUN reset will be sent to the device first. If that does
3327  * not work, a target reset will be sent.
3328  *
3329  * Return value:
3330  *	SUCCESS / FAILED
3331  **/
3332 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3333 {
3334 	struct ipr_cmnd *ipr_cmd;
3335 	struct ipr_ioa_cfg *ioa_cfg;
3336 	struct ipr_resource_entry *res;
3337 	int rc;
3338 
3339 	ENTER;
3340 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3341 	res = scsi_cmd->device->hostdata;
3342 
3343 	if (!res)
3344 		return FAILED;
3345 
3346 	/*
3347 	 * If we are currently going through reset/reload, return failed. This will force the
3348 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3349 	 * reset to complete
3350 	 */
3351 	if (ioa_cfg->in_reset_reload)
3352 		return FAILED;
3353 	if (ioa_cfg->ioa_is_dead)
3354 		return FAILED;
3355 
3356 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3357 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3358 			if (ipr_cmd->scsi_cmd)
3359 				ipr_cmd->done = ipr_scsi_eh_done;
3360 		}
3361 	}
3362 
3363 	res->resetting_device = 1;
3364 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3365 	rc = ipr_device_reset(ioa_cfg, res);
3366 	res->resetting_device = 0;
3367 
3368 	LEAVE;
3369 	return (rc ? FAILED : SUCCESS);
3370 }
3371 
3372 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3373 {
3374 	int rc;
3375 
3376 	spin_lock_irq(cmd->device->host->host_lock);
3377 	rc = __ipr_eh_dev_reset(cmd);
3378 	spin_unlock_irq(cmd->device->host->host_lock);
3379 
3380 	return rc;
3381 }
3382 
3383 /**
3384  * ipr_bus_reset_done - Op done function for bus reset.
3385  * @ipr_cmd:	ipr command struct
3386  *
3387  * This function is the op done function for a bus reset
3388  *
3389  * Return value:
3390  * 	none
3391  **/
3392 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3393 {
3394 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3395 	struct ipr_resource_entry *res;
3396 
3397 	ENTER;
3398 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3399 		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3400 			    sizeof(res->cfgte.res_handle))) {
3401 			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3402 			break;
3403 		}
3404 	}
3405 
3406 	/*
3407 	 * If abort has not completed, indicate the reset has, else call the
3408 	 * abort's done function to wake the sleeping eh thread
3409 	 */
3410 	if (ipr_cmd->sibling->sibling)
3411 		ipr_cmd->sibling->sibling = NULL;
3412 	else
3413 		ipr_cmd->sibling->done(ipr_cmd->sibling);
3414 
3415 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3416 	LEAVE;
3417 }
3418 
3419 /**
3420  * ipr_abort_timeout - An abort task has timed out
3421  * @ipr_cmd:	ipr command struct
3422  *
3423  * This function handles when an abort task times out. If this
3424  * happens we issue a bus reset since we have resources tied
3425  * up that must be freed before returning to the midlayer.
3426  *
3427  * Return value:
3428  *	none
3429  **/
3430 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3431 {
3432 	struct ipr_cmnd *reset_cmd;
3433 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3434 	struct ipr_cmd_pkt *cmd_pkt;
3435 	unsigned long lock_flags = 0;
3436 
3437 	ENTER;
3438 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3440 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441 		return;
3442 	}
3443 
3444 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3445 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3446 	ipr_cmd->sibling = reset_cmd;
3447 	reset_cmd->sibling = ipr_cmd;
3448 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3449 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3450 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3451 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3452 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3453 
3454 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3455 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3456 	LEAVE;
3457 }
3458 
3459 /**
3460  * ipr_cancel_op - Cancel specified op
3461  * @scsi_cmd:	scsi command struct
3462  *
3463  * This function cancels specified op.
3464  *
3465  * Return value:
3466  *	SUCCESS / FAILED
3467  **/
3468 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3469 {
3470 	struct ipr_cmnd *ipr_cmd;
3471 	struct ipr_ioa_cfg *ioa_cfg;
3472 	struct ipr_resource_entry *res;
3473 	struct ipr_cmd_pkt *cmd_pkt;
3474 	u32 ioasc;
3475 	int op_found = 0;
3476 
3477 	ENTER;
3478 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3479 	res = scsi_cmd->device->hostdata;
3480 
3481 	/* If we are currently going through reset/reload, return failed.
3482 	 * This will force the mid-layer to call ipr_eh_host_reset,
3483 	 * which will then go to sleep and wait for the reset to complete
3484 	 */
3485 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3486 		return FAILED;
3487 	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3488 		return FAILED;
3489 
3490 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3491 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3492 			ipr_cmd->done = ipr_scsi_eh_done;
3493 			op_found = 1;
3494 			break;
3495 		}
3496 	}
3497 
3498 	if (!op_found)
3499 		return SUCCESS;
3500 
3501 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3502 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3503 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3504 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3505 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3506 	ipr_cmd->u.sdev = scsi_cmd->device;
3507 
3508 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3509 		    scsi_cmd->cmnd[0]);
3510 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3511 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3512 
3513 	/*
3514 	 * If the abort task timed out and we sent a bus reset, we will get
3515 	 * one the following responses to the abort
3516 	 */
3517 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3518 		ioasc = 0;
3519 		ipr_trace;
3520 	}
3521 
3522 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3523 	if (!ipr_is_naca_model(res))
3524 		res->needs_sync_complete = 1;
3525 
3526 	LEAVE;
3527 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3528 }
3529 
3530 /**
3531  * ipr_eh_abort - Abort a single op
3532  * @scsi_cmd:	scsi command struct
3533  *
3534  * Return value:
3535  * 	SUCCESS / FAILED
3536  **/
3537 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3538 {
3539 	unsigned long flags;
3540 	int rc;
3541 
3542 	ENTER;
3543 
3544 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3545 	rc = ipr_cancel_op(scsi_cmd);
3546 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3547 
3548 	LEAVE;
3549 	return rc;
3550 }
3551 
3552 /**
3553  * ipr_handle_other_interrupt - Handle "other" interrupts
3554  * @ioa_cfg:	ioa config struct
3555  * @int_reg:	interrupt register
3556  *
3557  * Return value:
3558  * 	IRQ_NONE / IRQ_HANDLED
3559  **/
3560 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3561 					      volatile u32 int_reg)
3562 {
3563 	irqreturn_t rc = IRQ_HANDLED;
3564 
3565 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3566 		/* Mask the interrupt */
3567 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3568 
3569 		/* Clear the interrupt */
3570 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3571 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3572 
3573 		list_del(&ioa_cfg->reset_cmd->queue);
3574 		del_timer(&ioa_cfg->reset_cmd->timer);
3575 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3576 	} else {
3577 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3578 			ioa_cfg->ioa_unit_checked = 1;
3579 		else
3580 			dev_err(&ioa_cfg->pdev->dev,
3581 				"Permanent IOA failure. 0x%08X\n", int_reg);
3582 
3583 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3584 			ioa_cfg->sdt_state = GET_DUMP;
3585 
3586 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3587 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3588 	}
3589 
3590 	return rc;
3591 }
3592 
3593 /**
3594  * ipr_isr - Interrupt service routine
3595  * @irq:	irq number
3596  * @devp:	pointer to ioa config struct
3597  * @regs:	pt_regs struct
3598  *
3599  * Return value:
3600  * 	IRQ_NONE / IRQ_HANDLED
3601  **/
3602 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3603 {
3604 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3605 	unsigned long lock_flags = 0;
3606 	volatile u32 int_reg, int_mask_reg;
3607 	u32 ioasc;
3608 	u16 cmd_index;
3609 	struct ipr_cmnd *ipr_cmd;
3610 	irqreturn_t rc = IRQ_NONE;
3611 
3612 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3613 
3614 	/* If interrupts are disabled, ignore the interrupt */
3615 	if (!ioa_cfg->allow_interrupts) {
3616 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617 		return IRQ_NONE;
3618 	}
3619 
3620 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3621 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3622 
3623 	/* If an interrupt on the adapter did not occur, ignore it */
3624 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3625 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3626 		return IRQ_NONE;
3627 	}
3628 
3629 	while (1) {
3630 		ipr_cmd = NULL;
3631 
3632 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3633 		       ioa_cfg->toggle_bit) {
3634 
3635 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3636 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3637 
3638 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3639 				ioa_cfg->errors_logged++;
3640 				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3641 
3642 				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3643 					ioa_cfg->sdt_state = GET_DUMP;
3644 
3645 				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3646 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3647 				return IRQ_HANDLED;
3648 			}
3649 
3650 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3651 
3652 			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3653 
3654 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3655 
3656 			list_del(&ipr_cmd->queue);
3657 			del_timer(&ipr_cmd->timer);
3658 			ipr_cmd->done(ipr_cmd);
3659 
3660 			rc = IRQ_HANDLED;
3661 
3662 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3663 				ioa_cfg->hrrq_curr++;
3664 			} else {
3665 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3666 				ioa_cfg->toggle_bit ^= 1u;
3667 			}
3668 		}
3669 
3670 		if (ipr_cmd != NULL) {
3671 			/* Clear the PCI interrupt */
3672 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3673 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3674 		} else
3675 			break;
3676 	}
3677 
3678 	if (unlikely(rc == IRQ_NONE))
3679 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3680 
3681 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3682 	return rc;
3683 }
3684 
3685 /**
3686  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3687  * @ioa_cfg:	ioa config struct
3688  * @ipr_cmd:	ipr command struct
3689  *
3690  * Return value:
3691  * 	0 on success / -1 on failure
3692  **/
3693 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3694 			   struct ipr_cmnd *ipr_cmd)
3695 {
3696 	int i;
3697 	struct scatterlist *sglist;
3698 	u32 length;
3699 	u32 ioadl_flags = 0;
3700 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3701 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3702 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3703 
3704 	length = scsi_cmd->request_bufflen;
3705 
3706 	if (length == 0)
3707 		return 0;
3708 
3709 	if (scsi_cmd->use_sg) {
3710 		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3711 						 scsi_cmd->request_buffer,
3712 						 scsi_cmd->use_sg,
3713 						 scsi_cmd->sc_data_direction);
3714 
3715 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3716 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3717 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3718 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3719 			ioarcb->write_ioadl_len =
3720 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3721 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3722 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3723 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3724 			ioarcb->read_ioadl_len =
3725 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3726 		}
3727 
3728 		sglist = scsi_cmd->request_buffer;
3729 
3730 		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3731 			ioadl[i].flags_and_data_len =
3732 				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3733 			ioadl[i].address =
3734 				cpu_to_be32(sg_dma_address(&sglist[i]));
3735 		}
3736 
3737 		if (likely(ipr_cmd->dma_use_sg)) {
3738 			ioadl[i-1].flags_and_data_len |=
3739 				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3740 			return 0;
3741 		} else
3742 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3743 	} else {
3744 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3745 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3746 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3747 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3748 			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3749 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3750 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3751 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3752 			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3753 		}
3754 
3755 		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3756 						     scsi_cmd->request_buffer, length,
3757 						     scsi_cmd->sc_data_direction);
3758 
3759 		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3760 			ipr_cmd->dma_use_sg = 1;
3761 			ioadl[0].flags_and_data_len =
3762 				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3763 			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3764 			return 0;
3765 		} else
3766 			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3767 	}
3768 
3769 	return -1;
3770 }
3771 
3772 /**
3773  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3774  * @scsi_cmd:	scsi command struct
3775  *
3776  * Return value:
3777  * 	task attributes
3778  **/
3779 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3780 {
3781 	u8 tag[2];
3782 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3783 
3784 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3785 		switch (tag[0]) {
3786 		case MSG_SIMPLE_TAG:
3787 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3788 			break;
3789 		case MSG_HEAD_TAG:
3790 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3791 			break;
3792 		case MSG_ORDERED_TAG:
3793 			rc = IPR_FLAGS_LO_ORDERED_TASK;
3794 			break;
3795 		};
3796 	}
3797 
3798 	return rc;
3799 }
3800 
3801 /**
3802  * ipr_erp_done - Process completion of ERP for a device
3803  * @ipr_cmd:		ipr command struct
3804  *
3805  * This function copies the sense buffer into the scsi_cmd
3806  * struct and pushes the scsi_done function.
3807  *
3808  * Return value:
3809  * 	nothing
3810  **/
3811 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3812 {
3813 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3814 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3815 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3816 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3817 
3818 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3819 		scsi_cmd->result |= (DID_ERROR << 16);
3820 		scmd_printk(KERN_ERR, scsi_cmd,
3821 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3822 	} else {
3823 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3824 		       SCSI_SENSE_BUFFERSIZE);
3825 	}
3826 
3827 	if (res) {
3828 		if (!ipr_is_naca_model(res))
3829 			res->needs_sync_complete = 1;
3830 		res->in_erp = 0;
3831 	}
3832 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3833 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3834 	scsi_cmd->scsi_done(scsi_cmd);
3835 }
3836 
3837 /**
3838  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3839  * @ipr_cmd:	ipr command struct
3840  *
3841  * Return value:
3842  * 	none
3843  **/
3844 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3845 {
3846 	struct ipr_ioarcb *ioarcb;
3847 	struct ipr_ioasa *ioasa;
3848 
3849 	ioarcb = &ipr_cmd->ioarcb;
3850 	ioasa = &ipr_cmd->ioasa;
3851 
3852 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3853 	ioarcb->write_data_transfer_length = 0;
3854 	ioarcb->read_data_transfer_length = 0;
3855 	ioarcb->write_ioadl_len = 0;
3856 	ioarcb->read_ioadl_len = 0;
3857 	ioasa->ioasc = 0;
3858 	ioasa->residual_data_len = 0;
3859 }
3860 
3861 /**
3862  * ipr_erp_request_sense - Send request sense to a device
3863  * @ipr_cmd:	ipr command struct
3864  *
3865  * This function sends a request sense to a device as a result
3866  * of a check condition.
3867  *
3868  * Return value:
3869  * 	nothing
3870  **/
3871 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3872 {
3873 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3874 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3875 
3876 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3877 		ipr_erp_done(ipr_cmd);
3878 		return;
3879 	}
3880 
3881 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3882 
3883 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3884 	cmd_pkt->cdb[0] = REQUEST_SENSE;
3885 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3886 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3887 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3888 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3889 
3890 	ipr_cmd->ioadl[0].flags_and_data_len =
3891 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3892 	ipr_cmd->ioadl[0].address =
3893 		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3894 
3895 	ipr_cmd->ioarcb.read_ioadl_len =
3896 		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3897 	ipr_cmd->ioarcb.read_data_transfer_length =
3898 		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3899 
3900 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3901 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3902 }
3903 
3904 /**
3905  * ipr_erp_cancel_all - Send cancel all to a device
3906  * @ipr_cmd:	ipr command struct
3907  *
3908  * This function sends a cancel all to a device to clear the
3909  * queue. If we are running TCQ on the device, QERR is set to 1,
3910  * which means all outstanding ops have been dropped on the floor.
3911  * Cancel all will return them to us.
3912  *
3913  * Return value:
3914  * 	nothing
3915  **/
3916 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3917 {
3918 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3919 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3920 	struct ipr_cmd_pkt *cmd_pkt;
3921 
3922 	res->in_erp = 1;
3923 
3924 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3925 
3926 	if (!scsi_get_tag_type(scsi_cmd->device)) {
3927 		ipr_erp_request_sense(ipr_cmd);
3928 		return;
3929 	}
3930 
3931 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3932 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3933 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3934 
3935 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3936 		   IPR_CANCEL_ALL_TIMEOUT);
3937 }
3938 
3939 /**
3940  * ipr_dump_ioasa - Dump contents of IOASA
3941  * @ioa_cfg:	ioa config struct
3942  * @ipr_cmd:	ipr command struct
3943  * @res:		resource entry struct
3944  *
3945  * This function is invoked by the interrupt handler when ops
3946  * fail. It will log the IOASA if appropriate. Only called
3947  * for GPDD ops.
3948  *
3949  * Return value:
3950  * 	none
3951  **/
3952 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3953 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3954 {
3955 	int i;
3956 	u16 data_len;
3957 	u32 ioasc;
3958 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3959 	__be32 *ioasa_data = (__be32 *)ioasa;
3960 	int error_index;
3961 
3962 	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3963 
3964 	if (0 == ioasc)
3965 		return;
3966 
3967 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3968 		return;
3969 
3970 	error_index = ipr_get_error(ioasc);
3971 
3972 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3973 		/* Don't log an error if the IOA already logged one */
3974 		if (ioasa->ilid != 0)
3975 			return;
3976 
3977 		if (ipr_error_table[error_index].log_ioasa == 0)
3978 			return;
3979 	}
3980 
3981 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
3982 
3983 	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3984 		data_len = sizeof(struct ipr_ioasa);
3985 	else
3986 		data_len = be16_to_cpu(ioasa->ret_stat_len);
3987 
3988 	ipr_err("IOASA Dump:\n");
3989 
3990 	for (i = 0; i < data_len / 4; i += 4) {
3991 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3992 			be32_to_cpu(ioasa_data[i]),
3993 			be32_to_cpu(ioasa_data[i+1]),
3994 			be32_to_cpu(ioasa_data[i+2]),
3995 			be32_to_cpu(ioasa_data[i+3]));
3996 	}
3997 }
3998 
3999 /**
4000  * ipr_gen_sense - Generate SCSI sense data from an IOASA
4001  * @ioasa:		IOASA
4002  * @sense_buf:	sense data buffer
4003  *
4004  * Return value:
4005  * 	none
4006  **/
4007 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4008 {
4009 	u32 failing_lba;
4010 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4011 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4012 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4013 	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4014 
4015 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4016 
4017 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4018 		return;
4019 
4020 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4021 
4022 	if (ipr_is_vset_device(res) &&
4023 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4024 	    ioasa->u.vset.failing_lba_hi != 0) {
4025 		sense_buf[0] = 0x72;
4026 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4027 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4028 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4029 
4030 		sense_buf[7] = 12;
4031 		sense_buf[8] = 0;
4032 		sense_buf[9] = 0x0A;
4033 		sense_buf[10] = 0x80;
4034 
4035 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4036 
4037 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4038 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4039 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4040 		sense_buf[15] = failing_lba & 0x000000ff;
4041 
4042 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4043 
4044 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4045 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4046 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4047 		sense_buf[19] = failing_lba & 0x000000ff;
4048 	} else {
4049 		sense_buf[0] = 0x70;
4050 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4051 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4052 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4053 
4054 		/* Illegal request */
4055 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4056 		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4057 			sense_buf[7] = 10;	/* additional length */
4058 
4059 			/* IOARCB was in error */
4060 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4061 				sense_buf[15] = 0xC0;
4062 			else	/* Parameter data was invalid */
4063 				sense_buf[15] = 0x80;
4064 
4065 			sense_buf[16] =
4066 			    ((IPR_FIELD_POINTER_MASK &
4067 			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4068 			sense_buf[17] =
4069 			    (IPR_FIELD_POINTER_MASK &
4070 			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4071 		} else {
4072 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4073 				if (ipr_is_vset_device(res))
4074 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4075 				else
4076 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4077 
4078 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4079 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4080 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4081 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4082 				sense_buf[6] = failing_lba & 0x000000ff;
4083 			}
4084 
4085 			sense_buf[7] = 6;	/* additional length */
4086 		}
4087 	}
4088 }
4089 
4090 /**
4091  * ipr_get_autosense - Copy autosense data to sense buffer
4092  * @ipr_cmd:	ipr command struct
4093  *
4094  * This function copies the autosense buffer to the buffer
4095  * in the scsi_cmd, if there is autosense available.
4096  *
4097  * Return value:
4098  *	1 if autosense was available / 0 if not
4099  **/
4100 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4101 {
4102 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4103 
4104 	if ((be32_to_cpu(ioasa->ioasc_specific) &
4105 	     (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4106 		return 0;
4107 
4108 	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4109 	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4110 		   SCSI_SENSE_BUFFERSIZE));
4111 	return 1;
4112 }
4113 
4114 /**
4115  * ipr_erp_start - Process an error response for a SCSI op
4116  * @ioa_cfg:	ioa config struct
4117  * @ipr_cmd:	ipr command struct
4118  *
4119  * This function determines whether or not to initiate ERP
4120  * on the affected device.
4121  *
4122  * Return value:
4123  * 	nothing
4124  **/
4125 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4126 			      struct ipr_cmnd *ipr_cmd)
4127 {
4128 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4129 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4130 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4131 
4132 	if (!res) {
4133 		ipr_scsi_eh_done(ipr_cmd);
4134 		return;
4135 	}
4136 
4137 	if (ipr_is_gscsi(res))
4138 		ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4139 	else
4140 		ipr_gen_sense(ipr_cmd);
4141 
4142 	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4143 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4144 		if (ipr_is_naca_model(res))
4145 			scsi_cmd->result |= (DID_ABORT << 16);
4146 		else
4147 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4148 		break;
4149 	case IPR_IOASC_IR_RESOURCE_HANDLE:
4150 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4151 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4152 		break;
4153 	case IPR_IOASC_HW_SEL_TIMEOUT:
4154 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4155 		if (!ipr_is_naca_model(res))
4156 			res->needs_sync_complete = 1;
4157 		break;
4158 	case IPR_IOASC_SYNC_REQUIRED:
4159 		if (!res->in_erp)
4160 			res->needs_sync_complete = 1;
4161 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4162 		break;
4163 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4164 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4165 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4166 		break;
4167 	case IPR_IOASC_BUS_WAS_RESET:
4168 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4169 		/*
4170 		 * Report the bus reset and ask for a retry. The device
4171 		 * will give CC/UA the next command.
4172 		 */
4173 		if (!res->resetting_device)
4174 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4175 		scsi_cmd->result |= (DID_ERROR << 16);
4176 		if (!ipr_is_naca_model(res))
4177 			res->needs_sync_complete = 1;
4178 		break;
4179 	case IPR_IOASC_HW_DEV_BUS_STATUS:
4180 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4181 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4182 			if (!ipr_get_autosense(ipr_cmd)) {
4183 				if (!ipr_is_naca_model(res)) {
4184 					ipr_erp_cancel_all(ipr_cmd);
4185 					return;
4186 				}
4187 			}
4188 		}
4189 		if (!ipr_is_naca_model(res))
4190 			res->needs_sync_complete = 1;
4191 		break;
4192 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4193 		break;
4194 	default:
4195 		scsi_cmd->result |= (DID_ERROR << 16);
4196 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4197 			res->needs_sync_complete = 1;
4198 		break;
4199 	}
4200 
4201 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4202 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4203 	scsi_cmd->scsi_done(scsi_cmd);
4204 }
4205 
4206 /**
4207  * ipr_scsi_done - mid-layer done function
4208  * @ipr_cmd:	ipr command struct
4209  *
4210  * This function is invoked by the interrupt handler for
4211  * ops generated by the SCSI mid-layer
4212  *
4213  * Return value:
4214  * 	none
4215  **/
4216 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4217 {
4218 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4219 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4220 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4221 
4222 	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4223 
4224 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4225 		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4226 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4227 		scsi_cmd->scsi_done(scsi_cmd);
4228 	} else
4229 		ipr_erp_start(ioa_cfg, ipr_cmd);
4230 }
4231 
4232 /**
4233  * ipr_queuecommand - Queue a mid-layer request
4234  * @scsi_cmd:	scsi command struct
4235  * @done:		done function
4236  *
4237  * This function queues a request generated by the mid-layer.
4238  *
4239  * Return value:
4240  *	0 on success
4241  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4242  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4243  **/
4244 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4245 			    void (*done) (struct scsi_cmnd *))
4246 {
4247 	struct ipr_ioa_cfg *ioa_cfg;
4248 	struct ipr_resource_entry *res;
4249 	struct ipr_ioarcb *ioarcb;
4250 	struct ipr_cmnd *ipr_cmd;
4251 	int rc = 0;
4252 
4253 	scsi_cmd->scsi_done = done;
4254 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4255 	res = scsi_cmd->device->hostdata;
4256 	scsi_cmd->result = (DID_OK << 16);
4257 
4258 	/*
4259 	 * We are currently blocking all devices due to a host reset
4260 	 * We have told the host to stop giving us new requests, but
4261 	 * ERP ops don't count. FIXME
4262 	 */
4263 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4264 		return SCSI_MLQUEUE_HOST_BUSY;
4265 
4266 	/*
4267 	 * FIXME - Create scsi_set_host_offline interface
4268 	 *  and the ioa_is_dead check can be removed
4269 	 */
4270 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4271 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4272 		scsi_cmd->result = (DID_NO_CONNECT << 16);
4273 		scsi_cmd->scsi_done(scsi_cmd);
4274 		return 0;
4275 	}
4276 
4277 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4278 	ioarcb = &ipr_cmd->ioarcb;
4279 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4280 
4281 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4282 	ipr_cmd->scsi_cmd = scsi_cmd;
4283 	ioarcb->res_handle = res->cfgte.res_handle;
4284 	ipr_cmd->done = ipr_scsi_done;
4285 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4286 
4287 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4288 		if (scsi_cmd->underflow == 0)
4289 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4290 
4291 		if (res->needs_sync_complete) {
4292 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4293 			res->needs_sync_complete = 0;
4294 		}
4295 
4296 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4297 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4298 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4299 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4300 	}
4301 
4302 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4303 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4304 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4305 
4306 	if (likely(rc == 0))
4307 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4308 
4309 	if (likely(rc == 0)) {
4310 		mb();
4311 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4312 		       ioa_cfg->regs.ioarrin_reg);
4313 	} else {
4314 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4315 		 return SCSI_MLQUEUE_HOST_BUSY;
4316 	}
4317 
4318 	return 0;
4319 }
4320 
4321 /**
4322  * ipr_info - Get information about the card/driver
4323  * @scsi_host:	scsi host struct
4324  *
4325  * Return value:
4326  * 	pointer to buffer with description string
4327  **/
4328 static const char * ipr_ioa_info(struct Scsi_Host *host)
4329 {
4330 	static char buffer[512];
4331 	struct ipr_ioa_cfg *ioa_cfg;
4332 	unsigned long lock_flags = 0;
4333 
4334 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4335 
4336 	spin_lock_irqsave(host->host_lock, lock_flags);
4337 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4338 	spin_unlock_irqrestore(host->host_lock, lock_flags);
4339 
4340 	return buffer;
4341 }
4342 
4343 static struct scsi_host_template driver_template = {
4344 	.module = THIS_MODULE,
4345 	.name = "IPR",
4346 	.info = ipr_ioa_info,
4347 	.queuecommand = ipr_queuecommand,
4348 	.eh_abort_handler = ipr_eh_abort,
4349 	.eh_device_reset_handler = ipr_eh_dev_reset,
4350 	.eh_host_reset_handler = ipr_eh_host_reset,
4351 	.slave_alloc = ipr_slave_alloc,
4352 	.slave_configure = ipr_slave_configure,
4353 	.slave_destroy = ipr_slave_destroy,
4354 	.change_queue_depth = ipr_change_queue_depth,
4355 	.change_queue_type = ipr_change_queue_type,
4356 	.bios_param = ipr_biosparam,
4357 	.can_queue = IPR_MAX_COMMANDS,
4358 	.this_id = -1,
4359 	.sg_tablesize = IPR_MAX_SGLIST,
4360 	.max_sectors = IPR_IOA_MAX_SECTORS,
4361 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4362 	.use_clustering = ENABLE_CLUSTERING,
4363 	.shost_attrs = ipr_ioa_attrs,
4364 	.sdev_attrs = ipr_dev_attrs,
4365 	.proc_name = IPR_NAME
4366 };
4367 
4368 #ifdef CONFIG_PPC_PSERIES
4369 static const u16 ipr_blocked_processors[] = {
4370 	PV_NORTHSTAR,
4371 	PV_PULSAR,
4372 	PV_POWER4,
4373 	PV_ICESTAR,
4374 	PV_SSTAR,
4375 	PV_POWER4p,
4376 	PV_630,
4377 	PV_630p
4378 };
4379 
4380 /**
4381  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4382  * @ioa_cfg:	ioa cfg struct
4383  *
4384  * Adapters that use Gemstone revision < 3.1 do not work reliably on
4385  * certain pSeries hardware. This function determines if the given
4386  * adapter is in one of these confgurations or not.
4387  *
4388  * Return value:
4389  * 	1 if adapter is not supported / 0 if adapter is supported
4390  **/
4391 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4392 {
4393 	u8 rev_id;
4394 	int i;
4395 
4396 	if (ioa_cfg->type == 0x5702) {
4397 		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4398 					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4399 			if (rev_id < 4) {
4400 				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4401 					if (__is_processor(ipr_blocked_processors[i]))
4402 						return 1;
4403 				}
4404 			}
4405 		}
4406 	}
4407 	return 0;
4408 }
4409 #else
4410 #define ipr_invalid_adapter(ioa_cfg) 0
4411 #endif
4412 
4413 /**
4414  * ipr_ioa_bringdown_done - IOA bring down completion.
4415  * @ipr_cmd:	ipr command struct
4416  *
4417  * This function processes the completion of an adapter bring down.
4418  * It wakes any reset sleepers.
4419  *
4420  * Return value:
4421  * 	IPR_RC_JOB_RETURN
4422  **/
4423 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4424 {
4425 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4426 
4427 	ENTER;
4428 	ioa_cfg->in_reset_reload = 0;
4429 	ioa_cfg->reset_retries = 0;
4430 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4431 	wake_up_all(&ioa_cfg->reset_wait_q);
4432 
4433 	spin_unlock_irq(ioa_cfg->host->host_lock);
4434 	scsi_unblock_requests(ioa_cfg->host);
4435 	spin_lock_irq(ioa_cfg->host->host_lock);
4436 	LEAVE;
4437 
4438 	return IPR_RC_JOB_RETURN;
4439 }
4440 
4441 /**
4442  * ipr_ioa_reset_done - IOA reset completion.
4443  * @ipr_cmd:	ipr command struct
4444  *
4445  * This function processes the completion of an adapter reset.
4446  * It schedules any necessary mid-layer add/removes and
4447  * wakes any reset sleepers.
4448  *
4449  * Return value:
4450  * 	IPR_RC_JOB_RETURN
4451  **/
4452 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4453 {
4454 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4455 	struct ipr_resource_entry *res;
4456 	struct ipr_hostrcb *hostrcb, *temp;
4457 	int i = 0;
4458 
4459 	ENTER;
4460 	ioa_cfg->in_reset_reload = 0;
4461 	ioa_cfg->allow_cmds = 1;
4462 	ioa_cfg->reset_cmd = NULL;
4463 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4464 
4465 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4466 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4467 			ipr_trace;
4468 			break;
4469 		}
4470 	}
4471 	schedule_work(&ioa_cfg->work_q);
4472 
4473 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4474 		list_del(&hostrcb->queue);
4475 		if (i++ < IPR_NUM_LOG_HCAMS)
4476 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4477 		else
4478 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4479 	}
4480 
4481 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4482 
4483 	ioa_cfg->reset_retries = 0;
4484 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4485 	wake_up_all(&ioa_cfg->reset_wait_q);
4486 
4487 	spin_unlock_irq(ioa_cfg->host->host_lock);
4488 	scsi_unblock_requests(ioa_cfg->host);
4489 	spin_lock_irq(ioa_cfg->host->host_lock);
4490 
4491 	if (!ioa_cfg->allow_cmds)
4492 		scsi_block_requests(ioa_cfg->host);
4493 
4494 	LEAVE;
4495 	return IPR_RC_JOB_RETURN;
4496 }
4497 
4498 /**
4499  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4500  * @supported_dev:	supported device struct
4501  * @vpids:			vendor product id struct
4502  *
4503  * Return value:
4504  * 	none
4505  **/
4506 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4507 				 struct ipr_std_inq_vpids *vpids)
4508 {
4509 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4510 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4511 	supported_dev->num_records = 1;
4512 	supported_dev->data_length =
4513 		cpu_to_be16(sizeof(struct ipr_supported_device));
4514 	supported_dev->reserved = 0;
4515 }
4516 
4517 /**
4518  * ipr_set_supported_devs - Send Set Supported Devices for a device
4519  * @ipr_cmd:	ipr command struct
4520  *
4521  * This function send a Set Supported Devices to the adapter
4522  *
4523  * Return value:
4524  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4525  **/
4526 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4527 {
4528 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4529 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4530 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4531 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4532 	struct ipr_resource_entry *res = ipr_cmd->u.res;
4533 
4534 	ipr_cmd->job_step = ipr_ioa_reset_done;
4535 
4536 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4537 		if (!ipr_is_scsi_disk(res))
4538 			continue;
4539 
4540 		ipr_cmd->u.res = res;
4541 		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4542 
4543 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4544 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4545 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4546 
4547 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4548 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4549 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4550 
4551 		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4552 							sizeof(struct ipr_supported_device));
4553 		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4554 					     offsetof(struct ipr_misc_cbs, supp_dev));
4555 		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4556 		ioarcb->write_data_transfer_length =
4557 			cpu_to_be32(sizeof(struct ipr_supported_device));
4558 
4559 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4560 			   IPR_SET_SUP_DEVICE_TIMEOUT);
4561 
4562 		ipr_cmd->job_step = ipr_set_supported_devs;
4563 		return IPR_RC_JOB_RETURN;
4564 	}
4565 
4566 	return IPR_RC_JOB_CONTINUE;
4567 }
4568 
4569 /**
4570  * ipr_setup_write_cache - Disable write cache if needed
4571  * @ipr_cmd:	ipr command struct
4572  *
4573  * This function sets up adapters write cache to desired setting
4574  *
4575  * Return value:
4576  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4577  **/
4578 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4579 {
4580 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4581 
4582 	ipr_cmd->job_step = ipr_set_supported_devs;
4583 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4584 				    struct ipr_resource_entry, queue);
4585 
4586 	if (ioa_cfg->cache_state != CACHE_DISABLED)
4587 		return IPR_RC_JOB_CONTINUE;
4588 
4589 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4590 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4591 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4592 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4593 
4594 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4595 
4596 	return IPR_RC_JOB_RETURN;
4597 }
4598 
4599 /**
4600  * ipr_get_mode_page - Locate specified mode page
4601  * @mode_pages:	mode page buffer
4602  * @page_code:	page code to find
4603  * @len:		minimum required length for mode page
4604  *
4605  * Return value:
4606  * 	pointer to mode page / NULL on failure
4607  **/
4608 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4609 			       u32 page_code, u32 len)
4610 {
4611 	struct ipr_mode_page_hdr *mode_hdr;
4612 	u32 page_length;
4613 	u32 length;
4614 
4615 	if (!mode_pages || (mode_pages->hdr.length == 0))
4616 		return NULL;
4617 
4618 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4619 	mode_hdr = (struct ipr_mode_page_hdr *)
4620 		(mode_pages->data + mode_pages->hdr.block_desc_len);
4621 
4622 	while (length) {
4623 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4624 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4625 				return mode_hdr;
4626 			break;
4627 		} else {
4628 			page_length = (sizeof(struct ipr_mode_page_hdr) +
4629 				       mode_hdr->page_length);
4630 			length -= page_length;
4631 			mode_hdr = (struct ipr_mode_page_hdr *)
4632 				((unsigned long)mode_hdr + page_length);
4633 		}
4634 	}
4635 	return NULL;
4636 }
4637 
4638 /**
4639  * ipr_check_term_power - Check for term power errors
4640  * @ioa_cfg:	ioa config struct
4641  * @mode_pages:	IOAFP mode pages buffer
4642  *
4643  * Check the IOAFP's mode page 28 for term power errors
4644  *
4645  * Return value:
4646  * 	nothing
4647  **/
4648 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4649 				 struct ipr_mode_pages *mode_pages)
4650 {
4651 	int i;
4652 	int entry_length;
4653 	struct ipr_dev_bus_entry *bus;
4654 	struct ipr_mode_page28 *mode_page;
4655 
4656 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4657 				      sizeof(struct ipr_mode_page28));
4658 
4659 	entry_length = mode_page->entry_length;
4660 
4661 	bus = mode_page->bus;
4662 
4663 	for (i = 0; i < mode_page->num_entries; i++) {
4664 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4665 			dev_err(&ioa_cfg->pdev->dev,
4666 				"Term power is absent on scsi bus %d\n",
4667 				bus->res_addr.bus);
4668 		}
4669 
4670 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4671 	}
4672 }
4673 
4674 /**
4675  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4676  * @ioa_cfg:	ioa config struct
4677  *
4678  * Looks through the config table checking for SES devices. If
4679  * the SES device is in the SES table indicating a maximum SCSI
4680  * bus speed, the speed is limited for the bus.
4681  *
4682  * Return value:
4683  * 	none
4684  **/
4685 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4686 {
4687 	u32 max_xfer_rate;
4688 	int i;
4689 
4690 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4691 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4692 						       ioa_cfg->bus_attr[i].bus_width);
4693 
4694 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4695 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4696 	}
4697 }
4698 
4699 /**
4700  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4701  * @ioa_cfg:	ioa config struct
4702  * @mode_pages:	mode page 28 buffer
4703  *
4704  * Updates mode page 28 based on driver configuration
4705  *
4706  * Return value:
4707  * 	none
4708  **/
4709 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4710 					  	struct ipr_mode_pages *mode_pages)
4711 {
4712 	int i, entry_length;
4713 	struct ipr_dev_bus_entry *bus;
4714 	struct ipr_bus_attributes *bus_attr;
4715 	struct ipr_mode_page28 *mode_page;
4716 
4717 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4718 				      sizeof(struct ipr_mode_page28));
4719 
4720 	entry_length = mode_page->entry_length;
4721 
4722 	/* Loop for each device bus entry */
4723 	for (i = 0, bus = mode_page->bus;
4724 	     i < mode_page->num_entries;
4725 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4726 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4727 			dev_err(&ioa_cfg->pdev->dev,
4728 				"Invalid resource address reported: 0x%08X\n",
4729 				IPR_GET_PHYS_LOC(bus->res_addr));
4730 			continue;
4731 		}
4732 
4733 		bus_attr = &ioa_cfg->bus_attr[i];
4734 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4735 		bus->bus_width = bus_attr->bus_width;
4736 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4737 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4738 		if (bus_attr->qas_enabled)
4739 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4740 		else
4741 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4742 	}
4743 }
4744 
4745 /**
4746  * ipr_build_mode_select - Build a mode select command
4747  * @ipr_cmd:	ipr command struct
4748  * @res_handle:	resource handle to send command to
4749  * @parm:		Byte 2 of Mode Sense command
4750  * @dma_addr:	DMA buffer address
4751  * @xfer_len:	data transfer length
4752  *
4753  * Return value:
4754  * 	none
4755  **/
4756 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4757 				  __be32 res_handle, u8 parm, u32 dma_addr,
4758 				  u8 xfer_len)
4759 {
4760 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4761 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4762 
4763 	ioarcb->res_handle = res_handle;
4764 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4765 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4766 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4767 	ioarcb->cmd_pkt.cdb[1] = parm;
4768 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4769 
4770 	ioadl->flags_and_data_len =
4771 		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4772 	ioadl->address = cpu_to_be32(dma_addr);
4773 	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4774 	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4775 }
4776 
4777 /**
4778  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4779  * @ipr_cmd:	ipr command struct
4780  *
4781  * This function sets up the SCSI bus attributes and sends
4782  * a Mode Select for Page 28 to activate them.
4783  *
4784  * Return value:
4785  * 	IPR_RC_JOB_RETURN
4786  **/
4787 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4788 {
4789 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4790 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4791 	int length;
4792 
4793 	ENTER;
4794 	ipr_scsi_bus_speed_limit(ioa_cfg);
4795 	ipr_check_term_power(ioa_cfg, mode_pages);
4796 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4797 	length = mode_pages->hdr.length + 1;
4798 	mode_pages->hdr.length = 0;
4799 
4800 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4801 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4802 			      length);
4803 
4804 	ipr_cmd->job_step = ipr_setup_write_cache;
4805 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4806 
4807 	LEAVE;
4808 	return IPR_RC_JOB_RETURN;
4809 }
4810 
4811 /**
4812  * ipr_build_mode_sense - Builds a mode sense command
4813  * @ipr_cmd:	ipr command struct
4814  * @res:		resource entry struct
4815  * @parm:		Byte 2 of mode sense command
4816  * @dma_addr:	DMA address of mode sense buffer
4817  * @xfer_len:	Size of DMA buffer
4818  *
4819  * Return value:
4820  * 	none
4821  **/
4822 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4823 				 __be32 res_handle,
4824 				 u8 parm, u32 dma_addr, u8 xfer_len)
4825 {
4826 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4827 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4828 
4829 	ioarcb->res_handle = res_handle;
4830 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4831 	ioarcb->cmd_pkt.cdb[2] = parm;
4832 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4833 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4834 
4835 	ioadl->flags_and_data_len =
4836 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4837 	ioadl->address = cpu_to_be32(dma_addr);
4838 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4839 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4840 }
4841 
4842 /**
4843  * ipr_reset_cmd_failed - Handle failure of IOA reset command
4844  * @ipr_cmd:	ipr command struct
4845  *
4846  * This function handles the failure of an IOA bringup command.
4847  *
4848  * Return value:
4849  * 	IPR_RC_JOB_RETURN
4850  **/
4851 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4852 {
4853 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4854 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4855 
4856 	dev_err(&ioa_cfg->pdev->dev,
4857 		"0x%02X failed with IOASC: 0x%08X\n",
4858 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4859 
4860 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4861 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4862 	return IPR_RC_JOB_RETURN;
4863 }
4864 
4865 /**
4866  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4867  * @ipr_cmd:	ipr command struct
4868  *
4869  * This function handles the failure of a Mode Sense to the IOAFP.
4870  * Some adapters do not handle all mode pages.
4871  *
4872  * Return value:
4873  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4874  **/
4875 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4876 {
4877 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4878 
4879 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4880 		ipr_cmd->job_step = ipr_setup_write_cache;
4881 		return IPR_RC_JOB_CONTINUE;
4882 	}
4883 
4884 	return ipr_reset_cmd_failed(ipr_cmd);
4885 }
4886 
4887 /**
4888  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4889  * @ipr_cmd:	ipr command struct
4890  *
4891  * This function send a Page 28 mode sense to the IOA to
4892  * retrieve SCSI bus attributes.
4893  *
4894  * Return value:
4895  * 	IPR_RC_JOB_RETURN
4896  **/
4897 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4898 {
4899 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4900 
4901 	ENTER;
4902 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4903 			     0x28, ioa_cfg->vpd_cbs_dma +
4904 			     offsetof(struct ipr_misc_cbs, mode_pages),
4905 			     sizeof(struct ipr_mode_pages));
4906 
4907 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4908 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4909 
4910 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4911 
4912 	LEAVE;
4913 	return IPR_RC_JOB_RETURN;
4914 }
4915 
4916 /**
4917  * ipr_init_res_table - Initialize the resource table
4918  * @ipr_cmd:	ipr command struct
4919  *
4920  * This function looks through the existing resource table, comparing
4921  * it with the config table. This function will take care of old/new
4922  * devices and schedule adding/removing them from the mid-layer
4923  * as appropriate.
4924  *
4925  * Return value:
4926  * 	IPR_RC_JOB_CONTINUE
4927  **/
4928 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4929 {
4930 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4931 	struct ipr_resource_entry *res, *temp;
4932 	struct ipr_config_table_entry *cfgte;
4933 	int found, i;
4934 	LIST_HEAD(old_res);
4935 
4936 	ENTER;
4937 	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4938 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4939 
4940 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4941 		list_move_tail(&res->queue, &old_res);
4942 
4943 	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4944 		cfgte = &ioa_cfg->cfg_table->dev[i];
4945 		found = 0;
4946 
4947 		list_for_each_entry_safe(res, temp, &old_res, queue) {
4948 			if (!memcmp(&res->cfgte.res_addr,
4949 				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4950 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4951 				found = 1;
4952 				break;
4953 			}
4954 		}
4955 
4956 		if (!found) {
4957 			if (list_empty(&ioa_cfg->free_res_q)) {
4958 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4959 				break;
4960 			}
4961 
4962 			found = 1;
4963 			res = list_entry(ioa_cfg->free_res_q.next,
4964 					 struct ipr_resource_entry, queue);
4965 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4966 			ipr_init_res_entry(res);
4967 			res->add_to_ml = 1;
4968 		}
4969 
4970 		if (found)
4971 			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4972 	}
4973 
4974 	list_for_each_entry_safe(res, temp, &old_res, queue) {
4975 		if (res->sdev) {
4976 			res->del_from_ml = 1;
4977 			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
4978 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4979 		} else {
4980 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4981 		}
4982 	}
4983 
4984 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4985 
4986 	LEAVE;
4987 	return IPR_RC_JOB_CONTINUE;
4988 }
4989 
4990 /**
4991  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4992  * @ipr_cmd:	ipr command struct
4993  *
4994  * This function sends a Query IOA Configuration command
4995  * to the adapter to retrieve the IOA configuration table.
4996  *
4997  * Return value:
4998  * 	IPR_RC_JOB_RETURN
4999  **/
5000 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5001 {
5002 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5003 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5004 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5005 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5006 
5007 	ENTER;
5008 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5009 		 ucode_vpd->major_release, ucode_vpd->card_type,
5010 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5011 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5012 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5013 
5014 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5015 	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5016 	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5017 
5018 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5019 	ioarcb->read_data_transfer_length =
5020 		cpu_to_be32(sizeof(struct ipr_config_table));
5021 
5022 	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5023 	ioadl->flags_and_data_len =
5024 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5025 
5026 	ipr_cmd->job_step = ipr_init_res_table;
5027 
5028 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5029 
5030 	LEAVE;
5031 	return IPR_RC_JOB_RETURN;
5032 }
5033 
5034 /**
5035  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5036  * @ipr_cmd:	ipr command struct
5037  *
5038  * This utility function sends an inquiry to the adapter.
5039  *
5040  * Return value:
5041  * 	none
5042  **/
5043 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5044 			      u32 dma_addr, u8 xfer_len)
5045 {
5046 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5047 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5048 
5049 	ENTER;
5050 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5051 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5052 
5053 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5054 	ioarcb->cmd_pkt.cdb[1] = flags;
5055 	ioarcb->cmd_pkt.cdb[2] = page;
5056 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5057 
5058 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5059 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5060 
5061 	ioadl->address = cpu_to_be32(dma_addr);
5062 	ioadl->flags_and_data_len =
5063 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5064 
5065 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5066 	LEAVE;
5067 }
5068 
5069 /**
5070  * ipr_inquiry_page_supported - Is the given inquiry page supported
5071  * @page0:		inquiry page 0 buffer
5072  * @page:		page code.
5073  *
5074  * This function determines if the specified inquiry page is supported.
5075  *
5076  * Return value:
5077  *	1 if page is supported / 0 if not
5078  **/
5079 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5080 {
5081 	int i;
5082 
5083 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5084 		if (page0->page[i] == page)
5085 			return 1;
5086 
5087 	return 0;
5088 }
5089 
5090 /**
5091  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5092  * @ipr_cmd:	ipr command struct
5093  *
5094  * This function sends a Page 3 inquiry to the adapter
5095  * to retrieve software VPD information.
5096  *
5097  * Return value:
5098  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5099  **/
5100 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5101 {
5102 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5103 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5104 
5105 	ENTER;
5106 
5107 	if (!ipr_inquiry_page_supported(page0, 1))
5108 		ioa_cfg->cache_state = CACHE_NONE;
5109 
5110 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5111 
5112 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5113 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5114 			  sizeof(struct ipr_inquiry_page3));
5115 
5116 	LEAVE;
5117 	return IPR_RC_JOB_RETURN;
5118 }
5119 
5120 /**
5121  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5122  * @ipr_cmd:	ipr command struct
5123  *
5124  * This function sends a Page 0 inquiry to the adapter
5125  * to retrieve supported inquiry pages.
5126  *
5127  * Return value:
5128  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5129  **/
5130 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5131 {
5132 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5133 	char type[5];
5134 
5135 	ENTER;
5136 
5137 	/* Grab the type out of the VPD and store it away */
5138 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5139 	type[4] = '\0';
5140 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5141 
5142 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5143 
5144 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5145 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5146 			  sizeof(struct ipr_inquiry_page0));
5147 
5148 	LEAVE;
5149 	return IPR_RC_JOB_RETURN;
5150 }
5151 
5152 /**
5153  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5154  * @ipr_cmd:	ipr command struct
5155  *
5156  * This function sends a standard inquiry to the adapter.
5157  *
5158  * Return value:
5159  * 	IPR_RC_JOB_RETURN
5160  **/
5161 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5162 {
5163 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5164 
5165 	ENTER;
5166 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5167 
5168 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5169 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5170 			  sizeof(struct ipr_ioa_vpd));
5171 
5172 	LEAVE;
5173 	return IPR_RC_JOB_RETURN;
5174 }
5175 
5176 /**
5177  * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5178  * @ipr_cmd:	ipr command struct
5179  *
5180  * This function send an Identify Host Request Response Queue
5181  * command to establish the HRRQ with the adapter.
5182  *
5183  * Return value:
5184  * 	IPR_RC_JOB_RETURN
5185  **/
5186 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5187 {
5188 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5189 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5190 
5191 	ENTER;
5192 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5193 
5194 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5195 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5196 
5197 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5198 	ioarcb->cmd_pkt.cdb[2] =
5199 		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5200 	ioarcb->cmd_pkt.cdb[3] =
5201 		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5202 	ioarcb->cmd_pkt.cdb[4] =
5203 		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5204 	ioarcb->cmd_pkt.cdb[5] =
5205 		((u32) ioa_cfg->host_rrq_dma) & 0xff;
5206 	ioarcb->cmd_pkt.cdb[7] =
5207 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5208 	ioarcb->cmd_pkt.cdb[8] =
5209 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5210 
5211 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5212 
5213 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5214 
5215 	LEAVE;
5216 	return IPR_RC_JOB_RETURN;
5217 }
5218 
5219 /**
5220  * ipr_reset_timer_done - Adapter reset timer function
5221  * @ipr_cmd:	ipr command struct
5222  *
5223  * Description: This function is used in adapter reset processing
5224  * for timing events. If the reset_cmd pointer in the IOA
5225  * config struct is not this adapter's we are doing nested
5226  * resets and fail_all_ops will take care of freeing the
5227  * command block.
5228  *
5229  * Return value:
5230  * 	none
5231  **/
5232 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5233 {
5234 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5235 	unsigned long lock_flags = 0;
5236 
5237 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5238 
5239 	if (ioa_cfg->reset_cmd == ipr_cmd) {
5240 		list_del(&ipr_cmd->queue);
5241 		ipr_cmd->done(ipr_cmd);
5242 	}
5243 
5244 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5245 }
5246 
5247 /**
5248  * ipr_reset_start_timer - Start a timer for adapter reset job
5249  * @ipr_cmd:	ipr command struct
5250  * @timeout:	timeout value
5251  *
5252  * Description: This function is used in adapter reset processing
5253  * for timing events. If the reset_cmd pointer in the IOA
5254  * config struct is not this adapter's we are doing nested
5255  * resets and fail_all_ops will take care of freeing the
5256  * command block.
5257  *
5258  * Return value:
5259  * 	none
5260  **/
5261 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5262 				  unsigned long timeout)
5263 {
5264 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5265 	ipr_cmd->done = ipr_reset_ioa_job;
5266 
5267 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5268 	ipr_cmd->timer.expires = jiffies + timeout;
5269 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5270 	add_timer(&ipr_cmd->timer);
5271 }
5272 
5273 /**
5274  * ipr_init_ioa_mem - Initialize ioa_cfg control block
5275  * @ioa_cfg:	ioa cfg struct
5276  *
5277  * Return value:
5278  * 	nothing
5279  **/
5280 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5281 {
5282 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5283 
5284 	/* Initialize Host RRQ pointers */
5285 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5286 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5287 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5288 	ioa_cfg->toggle_bit = 1;
5289 
5290 	/* Zero out config table */
5291 	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5292 }
5293 
5294 /**
5295  * ipr_reset_enable_ioa - Enable the IOA following a reset.
5296  * @ipr_cmd:	ipr command struct
5297  *
5298  * This function reinitializes some control blocks and
5299  * enables destructive diagnostics on the adapter.
5300  *
5301  * Return value:
5302  * 	IPR_RC_JOB_RETURN
5303  **/
5304 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5305 {
5306 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5307 	volatile u32 int_reg;
5308 
5309 	ENTER;
5310 	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5311 	ipr_init_ioa_mem(ioa_cfg);
5312 
5313 	ioa_cfg->allow_interrupts = 1;
5314 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5315 
5316 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5317 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5318 		       ioa_cfg->regs.clr_interrupt_mask_reg);
5319 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5320 		return IPR_RC_JOB_CONTINUE;
5321 	}
5322 
5323 	/* Enable destructive diagnostics on IOA */
5324 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5325 
5326 	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5327 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5328 
5329 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5330 
5331 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5332 	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5333 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5334 	ipr_cmd->done = ipr_reset_ioa_job;
5335 	add_timer(&ipr_cmd->timer);
5336 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5337 
5338 	LEAVE;
5339 	return IPR_RC_JOB_RETURN;
5340 }
5341 
5342 /**
5343  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5344  * @ipr_cmd:	ipr command struct
5345  *
5346  * This function is invoked when an adapter dump has run out
5347  * of processing time.
5348  *
5349  * Return value:
5350  * 	IPR_RC_JOB_CONTINUE
5351  **/
5352 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5353 {
5354 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5355 
5356 	if (ioa_cfg->sdt_state == GET_DUMP)
5357 		ioa_cfg->sdt_state = ABORT_DUMP;
5358 
5359 	ipr_cmd->job_step = ipr_reset_alert;
5360 
5361 	return IPR_RC_JOB_CONTINUE;
5362 }
5363 
5364 /**
5365  * ipr_unit_check_no_data - Log a unit check/no data error log
5366  * @ioa_cfg:		ioa config struct
5367  *
5368  * Logs an error indicating the adapter unit checked, but for some
5369  * reason, we were unable to fetch the unit check buffer.
5370  *
5371  * Return value:
5372  * 	nothing
5373  **/
5374 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5375 {
5376 	ioa_cfg->errors_logged++;
5377 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5378 }
5379 
5380 /**
5381  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5382  * @ioa_cfg:		ioa config struct
5383  *
5384  * Fetches the unit check buffer from the adapter by clocking the data
5385  * through the mailbox register.
5386  *
5387  * Return value:
5388  * 	nothing
5389  **/
5390 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5391 {
5392 	unsigned long mailbox;
5393 	struct ipr_hostrcb *hostrcb;
5394 	struct ipr_uc_sdt sdt;
5395 	int rc, length;
5396 
5397 	mailbox = readl(ioa_cfg->ioa_mailbox);
5398 
5399 	if (!ipr_sdt_is_fmt2(mailbox)) {
5400 		ipr_unit_check_no_data(ioa_cfg);
5401 		return;
5402 	}
5403 
5404 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5405 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5406 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5407 
5408 	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5409 	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5410 		ipr_unit_check_no_data(ioa_cfg);
5411 		return;
5412 	}
5413 
5414 	/* Find length of the first sdt entry (UC buffer) */
5415 	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5416 		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5417 
5418 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5419 			     struct ipr_hostrcb, queue);
5420 	list_del(&hostrcb->queue);
5421 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5422 
5423 	rc = ipr_get_ldump_data_section(ioa_cfg,
5424 					be32_to_cpu(sdt.entry[0].bar_str_offset),
5425 					(__be32 *)&hostrcb->hcam,
5426 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5427 
5428 	if (!rc)
5429 		ipr_handle_log_data(ioa_cfg, hostrcb);
5430 	else
5431 		ipr_unit_check_no_data(ioa_cfg);
5432 
5433 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5434 }
5435 
5436 /**
5437  * ipr_reset_restore_cfg_space - Restore PCI config space.
5438  * @ipr_cmd:	ipr command struct
5439  *
5440  * Description: This function restores the saved PCI config space of
5441  * the adapter, fails all outstanding ops back to the callers, and
5442  * fetches the dump/unit check if applicable to this reset.
5443  *
5444  * Return value:
5445  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5446  **/
5447 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5448 {
5449 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5450 	int rc;
5451 
5452 	ENTER;
5453 	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5454 	rc = pci_restore_state(ioa_cfg->pdev);
5455 
5456 	if (rc != PCIBIOS_SUCCESSFUL) {
5457 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5458 		return IPR_RC_JOB_CONTINUE;
5459 	}
5460 
5461 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5462 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5463 		return IPR_RC_JOB_CONTINUE;
5464 	}
5465 
5466 	ipr_fail_all_ops(ioa_cfg);
5467 
5468 	if (ioa_cfg->ioa_unit_checked) {
5469 		ioa_cfg->ioa_unit_checked = 0;
5470 		ipr_get_unit_check_buffer(ioa_cfg);
5471 		ipr_cmd->job_step = ipr_reset_alert;
5472 		ipr_reset_start_timer(ipr_cmd, 0);
5473 		return IPR_RC_JOB_RETURN;
5474 	}
5475 
5476 	if (ioa_cfg->in_ioa_bringdown) {
5477 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5478 	} else {
5479 		ipr_cmd->job_step = ipr_reset_enable_ioa;
5480 
5481 		if (GET_DUMP == ioa_cfg->sdt_state) {
5482 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5483 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5484 			schedule_work(&ioa_cfg->work_q);
5485 			return IPR_RC_JOB_RETURN;
5486 		}
5487 	}
5488 
5489 	ENTER;
5490 	return IPR_RC_JOB_CONTINUE;
5491 }
5492 
5493 /**
5494  * ipr_reset_start_bist - Run BIST on the adapter.
5495  * @ipr_cmd:	ipr command struct
5496  *
5497  * Description: This function runs BIST on the adapter, then delays 2 seconds.
5498  *
5499  * Return value:
5500  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5501  **/
5502 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5503 {
5504 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5505 	int rc;
5506 
5507 	ENTER;
5508 	pci_block_user_cfg_access(ioa_cfg->pdev);
5509 	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5510 
5511 	if (rc != PCIBIOS_SUCCESSFUL) {
5512 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5513 		rc = IPR_RC_JOB_CONTINUE;
5514 	} else {
5515 		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5516 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5517 		rc = IPR_RC_JOB_RETURN;
5518 	}
5519 
5520 	LEAVE;
5521 	return rc;
5522 }
5523 
5524 /**
5525  * ipr_reset_allowed - Query whether or not IOA can be reset
5526  * @ioa_cfg:	ioa config struct
5527  *
5528  * Return value:
5529  * 	0 if reset not allowed / non-zero if reset is allowed
5530  **/
5531 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5532 {
5533 	volatile u32 temp_reg;
5534 
5535 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5536 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5537 }
5538 
5539 /**
5540  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5541  * @ipr_cmd:	ipr command struct
5542  *
5543  * Description: This function waits for adapter permission to run BIST,
5544  * then runs BIST. If the adapter does not give permission after a
5545  * reasonable time, we will reset the adapter anyway. The impact of
5546  * resetting the adapter without warning the adapter is the risk of
5547  * losing the persistent error log on the adapter. If the adapter is
5548  * reset while it is writing to the flash on the adapter, the flash
5549  * segment will have bad ECC and be zeroed.
5550  *
5551  * Return value:
5552  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5553  **/
5554 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5555 {
5556 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5557 	int rc = IPR_RC_JOB_RETURN;
5558 
5559 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5560 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5561 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5562 	} else {
5563 		ipr_cmd->job_step = ipr_reset_start_bist;
5564 		rc = IPR_RC_JOB_CONTINUE;
5565 	}
5566 
5567 	return rc;
5568 }
5569 
5570 /**
5571  * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5572  * @ipr_cmd:	ipr command struct
5573  *
5574  * Description: This function alerts the adapter that it will be reset.
5575  * If memory space is not currently enabled, proceed directly
5576  * to running BIST on the adapter. The timer must always be started
5577  * so we guarantee we do not run BIST from ipr_isr.
5578  *
5579  * Return value:
5580  * 	IPR_RC_JOB_RETURN
5581  **/
5582 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5583 {
5584 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5585 	u16 cmd_reg;
5586 	int rc;
5587 
5588 	ENTER;
5589 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5590 
5591 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5592 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5593 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5594 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5595 	} else {
5596 		ipr_cmd->job_step = ipr_reset_start_bist;
5597 	}
5598 
5599 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5600 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5601 
5602 	LEAVE;
5603 	return IPR_RC_JOB_RETURN;
5604 }
5605 
5606 /**
5607  * ipr_reset_ucode_download_done - Microcode download completion
5608  * @ipr_cmd:	ipr command struct
5609  *
5610  * Description: This function unmaps the microcode download buffer.
5611  *
5612  * Return value:
5613  * 	IPR_RC_JOB_CONTINUE
5614  **/
5615 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5616 {
5617 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5618 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5619 
5620 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5621 		     sglist->num_sg, DMA_TO_DEVICE);
5622 
5623 	ipr_cmd->job_step = ipr_reset_alert;
5624 	return IPR_RC_JOB_CONTINUE;
5625 }
5626 
5627 /**
5628  * ipr_reset_ucode_download - Download microcode to the adapter
5629  * @ipr_cmd:	ipr command struct
5630  *
5631  * Description: This function checks to see if it there is microcode
5632  * to download to the adapter. If there is, a download is performed.
5633  *
5634  * Return value:
5635  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5636  **/
5637 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5638 {
5639 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5640 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5641 
5642 	ENTER;
5643 	ipr_cmd->job_step = ipr_reset_alert;
5644 
5645 	if (!sglist)
5646 		return IPR_RC_JOB_CONTINUE;
5647 
5648 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5649 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5650 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5651 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5652 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5653 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5654 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5655 
5656 	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5657 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5658 
5659 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5660 		   IPR_WRITE_BUFFER_TIMEOUT);
5661 
5662 	LEAVE;
5663 	return IPR_RC_JOB_RETURN;
5664 }
5665 
5666 /**
5667  * ipr_reset_shutdown_ioa - Shutdown the adapter
5668  * @ipr_cmd:	ipr command struct
5669  *
5670  * Description: This function issues an adapter shutdown of the
5671  * specified type to the specified adapter as part of the
5672  * adapter reset job.
5673  *
5674  * Return value:
5675  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5676  **/
5677 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5678 {
5679 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5680 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5681 	unsigned long timeout;
5682 	int rc = IPR_RC_JOB_CONTINUE;
5683 
5684 	ENTER;
5685 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5686 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5687 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5688 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5689 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5690 
5691 		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5692 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5693 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5694 			timeout = IPR_INTERNAL_TIMEOUT;
5695 		else
5696 			timeout = IPR_SHUTDOWN_TIMEOUT;
5697 
5698 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5699 
5700 		rc = IPR_RC_JOB_RETURN;
5701 		ipr_cmd->job_step = ipr_reset_ucode_download;
5702 	} else
5703 		ipr_cmd->job_step = ipr_reset_alert;
5704 
5705 	LEAVE;
5706 	return rc;
5707 }
5708 
5709 /**
5710  * ipr_reset_ioa_job - Adapter reset job
5711  * @ipr_cmd:	ipr command struct
5712  *
5713  * Description: This function is the job router for the adapter reset job.
5714  *
5715  * Return value:
5716  * 	none
5717  **/
5718 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5719 {
5720 	u32 rc, ioasc;
5721 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5722 
5723 	do {
5724 		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5725 
5726 		if (ioa_cfg->reset_cmd != ipr_cmd) {
5727 			/*
5728 			 * We are doing nested adapter resets and this is
5729 			 * not the current reset job.
5730 			 */
5731 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5732 			return;
5733 		}
5734 
5735 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5736 			rc = ipr_cmd->job_step_failed(ipr_cmd);
5737 			if (rc == IPR_RC_JOB_RETURN)
5738 				return;
5739 		}
5740 
5741 		ipr_reinit_ipr_cmnd(ipr_cmd);
5742 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5743 		rc = ipr_cmd->job_step(ipr_cmd);
5744 	} while(rc == IPR_RC_JOB_CONTINUE);
5745 }
5746 
5747 /**
5748  * _ipr_initiate_ioa_reset - Initiate an adapter reset
5749  * @ioa_cfg:		ioa config struct
5750  * @job_step:		first job step of reset job
5751  * @shutdown_type:	shutdown type
5752  *
5753  * Description: This function will initiate the reset of the given adapter
5754  * starting at the selected job step.
5755  * If the caller needs to wait on the completion of the reset,
5756  * the caller must sleep on the reset_wait_q.
5757  *
5758  * Return value:
5759  * 	none
5760  **/
5761 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5762 				    int (*job_step) (struct ipr_cmnd *),
5763 				    enum ipr_shutdown_type shutdown_type)
5764 {
5765 	struct ipr_cmnd *ipr_cmd;
5766 
5767 	ioa_cfg->in_reset_reload = 1;
5768 	ioa_cfg->allow_cmds = 0;
5769 	scsi_block_requests(ioa_cfg->host);
5770 
5771 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5772 	ioa_cfg->reset_cmd = ipr_cmd;
5773 	ipr_cmd->job_step = job_step;
5774 	ipr_cmd->u.shutdown_type = shutdown_type;
5775 
5776 	ipr_reset_ioa_job(ipr_cmd);
5777 }
5778 
5779 /**
5780  * ipr_initiate_ioa_reset - Initiate an adapter reset
5781  * @ioa_cfg:		ioa config struct
5782  * @shutdown_type:	shutdown type
5783  *
5784  * Description: This function will initiate the reset of the given adapter.
5785  * If the caller needs to wait on the completion of the reset,
5786  * the caller must sleep on the reset_wait_q.
5787  *
5788  * Return value:
5789  * 	none
5790  **/
5791 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5792 				   enum ipr_shutdown_type shutdown_type)
5793 {
5794 	if (ioa_cfg->ioa_is_dead)
5795 		return;
5796 
5797 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5798 		ioa_cfg->sdt_state = ABORT_DUMP;
5799 
5800 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5801 		dev_err(&ioa_cfg->pdev->dev,
5802 			"IOA taken offline - error recovery failed\n");
5803 
5804 		ioa_cfg->reset_retries = 0;
5805 		ioa_cfg->ioa_is_dead = 1;
5806 
5807 		if (ioa_cfg->in_ioa_bringdown) {
5808 			ioa_cfg->reset_cmd = NULL;
5809 			ioa_cfg->in_reset_reload = 0;
5810 			ipr_fail_all_ops(ioa_cfg);
5811 			wake_up_all(&ioa_cfg->reset_wait_q);
5812 
5813 			spin_unlock_irq(ioa_cfg->host->host_lock);
5814 			scsi_unblock_requests(ioa_cfg->host);
5815 			spin_lock_irq(ioa_cfg->host->host_lock);
5816 			return;
5817 		} else {
5818 			ioa_cfg->in_ioa_bringdown = 1;
5819 			shutdown_type = IPR_SHUTDOWN_NONE;
5820 		}
5821 	}
5822 
5823 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5824 				shutdown_type);
5825 }
5826 
5827 /**
5828  * ipr_reset_freeze - Hold off all I/O activity
5829  * @ipr_cmd:	ipr command struct
5830  *
5831  * Description: If the PCI slot is frozen, hold off all I/O
5832  * activity; then, as soon as the slot is available again,
5833  * initiate an adapter reset.
5834  */
5835 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5836 {
5837 	/* Disallow new interrupts, avoid loop */
5838 	ipr_cmd->ioa_cfg->allow_interrupts = 0;
5839 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5840 	ipr_cmd->done = ipr_reset_ioa_job;
5841 	return IPR_RC_JOB_RETURN;
5842 }
5843 
5844 /**
5845  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5846  * @pdev:	PCI device struct
5847  *
5848  * Description: This routine is called to tell us that the PCI bus
5849  * is down. Can't do anything here, except put the device driver
5850  * into a holding pattern, waiting for the PCI bus to come back.
5851  */
5852 static void ipr_pci_frozen(struct pci_dev *pdev)
5853 {
5854 	unsigned long flags = 0;
5855 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5856 
5857 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5858 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5859 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5860 }
5861 
5862 /**
5863  * ipr_pci_slot_reset - Called when PCI slot has been reset.
5864  * @pdev:	PCI device struct
5865  *
5866  * Description: This routine is called by the pci error recovery
5867  * code after the PCI slot has been reset, just before we
5868  * should resume normal operations.
5869  */
5870 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5871 {
5872 	unsigned long flags = 0;
5873 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5874 
5875 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5876 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5877 	                                 IPR_SHUTDOWN_NONE);
5878 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5879 	return PCI_ERS_RESULT_RECOVERED;
5880 }
5881 
5882 /**
5883  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5884  * @pdev:	PCI device struct
5885  *
5886  * Description: This routine is called when the PCI bus has
5887  * permanently failed.
5888  */
5889 static void ipr_pci_perm_failure(struct pci_dev *pdev)
5890 {
5891 	unsigned long flags = 0;
5892 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5893 
5894 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5895 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5896 		ioa_cfg->sdt_state = ABORT_DUMP;
5897 	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5898 	ioa_cfg->in_ioa_bringdown = 1;
5899 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5900 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5901 }
5902 
5903 /**
5904  * ipr_pci_error_detected - Called when a PCI error is detected.
5905  * @pdev:	PCI device struct
5906  * @state:	PCI channel state
5907  *
5908  * Description: Called when a PCI error is detected.
5909  *
5910  * Return value:
5911  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5912  */
5913 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5914 					       pci_channel_state_t state)
5915 {
5916 	switch (state) {
5917 	case pci_channel_io_frozen:
5918 		ipr_pci_frozen(pdev);
5919 		return PCI_ERS_RESULT_NEED_RESET;
5920 	case pci_channel_io_perm_failure:
5921 		ipr_pci_perm_failure(pdev);
5922 		return PCI_ERS_RESULT_DISCONNECT;
5923 		break;
5924 	default:
5925 		break;
5926 	}
5927 	return PCI_ERS_RESULT_NEED_RESET;
5928 }
5929 
5930 /**
5931  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5932  * @ioa_cfg:	ioa cfg struct
5933  *
5934  * Description: This is the second phase of adapter intialization
5935  * This function takes care of initilizing the adapter to the point
5936  * where it can accept new commands.
5937 
5938  * Return value:
5939  * 	0 on sucess / -EIO on failure
5940  **/
5941 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5942 {
5943 	int rc = 0;
5944 	unsigned long host_lock_flags = 0;
5945 
5946 	ENTER;
5947 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5948 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5949 	if (ioa_cfg->needs_hard_reset) {
5950 		ioa_cfg->needs_hard_reset = 0;
5951 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5952 	} else
5953 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5954 					IPR_SHUTDOWN_NONE);
5955 
5956 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5957 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5958 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5959 
5960 	if (ioa_cfg->ioa_is_dead) {
5961 		rc = -EIO;
5962 	} else if (ipr_invalid_adapter(ioa_cfg)) {
5963 		if (!ipr_testmode)
5964 			rc = -EIO;
5965 
5966 		dev_err(&ioa_cfg->pdev->dev,
5967 			"Adapter not supported in this hardware configuration.\n");
5968 	}
5969 
5970 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5971 
5972 	LEAVE;
5973 	return rc;
5974 }
5975 
5976 /**
5977  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5978  * @ioa_cfg:	ioa config struct
5979  *
5980  * Return value:
5981  * 	none
5982  **/
5983 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5984 {
5985 	int i;
5986 
5987 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5988 		if (ioa_cfg->ipr_cmnd_list[i])
5989 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5990 				      ioa_cfg->ipr_cmnd_list[i],
5991 				      ioa_cfg->ipr_cmnd_list_dma[i]);
5992 
5993 		ioa_cfg->ipr_cmnd_list[i] = NULL;
5994 	}
5995 
5996 	if (ioa_cfg->ipr_cmd_pool)
5997 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5998 
5999 	ioa_cfg->ipr_cmd_pool = NULL;
6000 }
6001 
6002 /**
6003  * ipr_free_mem - Frees memory allocated for an adapter
6004  * @ioa_cfg:	ioa cfg struct
6005  *
6006  * Return value:
6007  * 	nothing
6008  **/
6009 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6010 {
6011 	int i;
6012 
6013 	kfree(ioa_cfg->res_entries);
6014 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6015 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6016 	ipr_free_cmd_blks(ioa_cfg);
6017 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6018 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6019 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6020 			    ioa_cfg->cfg_table,
6021 			    ioa_cfg->cfg_table_dma);
6022 
6023 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6024 		pci_free_consistent(ioa_cfg->pdev,
6025 				    sizeof(struct ipr_hostrcb),
6026 				    ioa_cfg->hostrcb[i],
6027 				    ioa_cfg->hostrcb_dma[i]);
6028 	}
6029 
6030 	ipr_free_dump(ioa_cfg);
6031 	kfree(ioa_cfg->trace);
6032 }
6033 
6034 /**
6035  * ipr_free_all_resources - Free all allocated resources for an adapter.
6036  * @ipr_cmd:	ipr command struct
6037  *
6038  * This function frees all allocated resources for the
6039  * specified adapter.
6040  *
6041  * Return value:
6042  * 	none
6043  **/
6044 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6045 {
6046 	struct pci_dev *pdev = ioa_cfg->pdev;
6047 
6048 	ENTER;
6049 	free_irq(pdev->irq, ioa_cfg);
6050 	iounmap(ioa_cfg->hdw_dma_regs);
6051 	pci_release_regions(pdev);
6052 	ipr_free_mem(ioa_cfg);
6053 	scsi_host_put(ioa_cfg->host);
6054 	pci_disable_device(pdev);
6055 	LEAVE;
6056 }
6057 
6058 /**
6059  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6060  * @ioa_cfg:	ioa config struct
6061  *
6062  * Return value:
6063  * 	0 on success / -ENOMEM on allocation failure
6064  **/
6065 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6066 {
6067 	struct ipr_cmnd *ipr_cmd;
6068 	struct ipr_ioarcb *ioarcb;
6069 	dma_addr_t dma_addr;
6070 	int i;
6071 
6072 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6073 						 sizeof(struct ipr_cmnd), 8, 0);
6074 
6075 	if (!ioa_cfg->ipr_cmd_pool)
6076 		return -ENOMEM;
6077 
6078 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6079 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6080 
6081 		if (!ipr_cmd) {
6082 			ipr_free_cmd_blks(ioa_cfg);
6083 			return -ENOMEM;
6084 		}
6085 
6086 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6087 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6088 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6089 
6090 		ioarcb = &ipr_cmd->ioarcb;
6091 		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6092 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6093 		ioarcb->write_ioadl_addr =
6094 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6095 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6096 		ioarcb->ioasa_host_pci_addr =
6097 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6098 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6099 		ipr_cmd->cmd_index = i;
6100 		ipr_cmd->ioa_cfg = ioa_cfg;
6101 		ipr_cmd->sense_buffer_dma = dma_addr +
6102 			offsetof(struct ipr_cmnd, sense_buffer);
6103 
6104 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6105 	}
6106 
6107 	return 0;
6108 }
6109 
6110 /**
6111  * ipr_alloc_mem - Allocate memory for an adapter
6112  * @ioa_cfg:	ioa config struct
6113  *
6114  * Return value:
6115  * 	0 on success / non-zero for error
6116  **/
6117 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6118 {
6119 	struct pci_dev *pdev = ioa_cfg->pdev;
6120 	int i, rc = -ENOMEM;
6121 
6122 	ENTER;
6123 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6124 				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6125 
6126 	if (!ioa_cfg->res_entries)
6127 		goto out;
6128 
6129 	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6130 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6131 
6132 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6133 						sizeof(struct ipr_misc_cbs),
6134 						&ioa_cfg->vpd_cbs_dma);
6135 
6136 	if (!ioa_cfg->vpd_cbs)
6137 		goto out_free_res_entries;
6138 
6139 	if (ipr_alloc_cmd_blks(ioa_cfg))
6140 		goto out_free_vpd_cbs;
6141 
6142 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6143 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
6144 						 &ioa_cfg->host_rrq_dma);
6145 
6146 	if (!ioa_cfg->host_rrq)
6147 		goto out_ipr_free_cmd_blocks;
6148 
6149 	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6150 						  sizeof(struct ipr_config_table),
6151 						  &ioa_cfg->cfg_table_dma);
6152 
6153 	if (!ioa_cfg->cfg_table)
6154 		goto out_free_host_rrq;
6155 
6156 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6157 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6158 							   sizeof(struct ipr_hostrcb),
6159 							   &ioa_cfg->hostrcb_dma[i]);
6160 
6161 		if (!ioa_cfg->hostrcb[i])
6162 			goto out_free_hostrcb_dma;
6163 
6164 		ioa_cfg->hostrcb[i]->hostrcb_dma =
6165 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6166 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6167 	}
6168 
6169 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6170 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6171 
6172 	if (!ioa_cfg->trace)
6173 		goto out_free_hostrcb_dma;
6174 
6175 	rc = 0;
6176 out:
6177 	LEAVE;
6178 	return rc;
6179 
6180 out_free_hostrcb_dma:
6181 	while (i-- > 0) {
6182 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6183 				    ioa_cfg->hostrcb[i],
6184 				    ioa_cfg->hostrcb_dma[i]);
6185 	}
6186 	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6187 			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6188 out_free_host_rrq:
6189 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6190 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6191 out_ipr_free_cmd_blocks:
6192 	ipr_free_cmd_blks(ioa_cfg);
6193 out_free_vpd_cbs:
6194 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6195 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6196 out_free_res_entries:
6197 	kfree(ioa_cfg->res_entries);
6198 	goto out;
6199 }
6200 
6201 /**
6202  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6203  * @ioa_cfg:	ioa config struct
6204  *
6205  * Return value:
6206  * 	none
6207  **/
6208 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6209 {
6210 	int i;
6211 
6212 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6213 		ioa_cfg->bus_attr[i].bus = i;
6214 		ioa_cfg->bus_attr[i].qas_enabled = 0;
6215 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6216 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6217 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6218 		else
6219 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6220 	}
6221 }
6222 
6223 /**
6224  * ipr_init_ioa_cfg - Initialize IOA config struct
6225  * @ioa_cfg:	ioa config struct
6226  * @host:		scsi host struct
6227  * @pdev:		PCI dev struct
6228  *
6229  * Return value:
6230  * 	none
6231  **/
6232 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6233 				       struct Scsi_Host *host, struct pci_dev *pdev)
6234 {
6235 	const struct ipr_interrupt_offsets *p;
6236 	struct ipr_interrupts *t;
6237 	void __iomem *base;
6238 
6239 	ioa_cfg->host = host;
6240 	ioa_cfg->pdev = pdev;
6241 	ioa_cfg->log_level = ipr_log_level;
6242 	ioa_cfg->doorbell = IPR_DOORBELL;
6243 	if (!ipr_auto_create)
6244 		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6245 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6246 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6247 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6248 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6249 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6250 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6251 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6252 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6253 
6254 	INIT_LIST_HEAD(&ioa_cfg->free_q);
6255 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
6256 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6257 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6258 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6259 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6260 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6261 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
6262 	ioa_cfg->sdt_state = INACTIVE;
6263 	if (ipr_enable_cache)
6264 		ioa_cfg->cache_state = CACHE_ENABLED;
6265 	else
6266 		ioa_cfg->cache_state = CACHE_DISABLED;
6267 
6268 	ipr_initialize_bus_attr(ioa_cfg);
6269 
6270 	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6271 	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6272 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
6273 	host->unique_id = host->host_no;
6274 	host->max_cmd_len = IPR_MAX_CDB_LEN;
6275 	pci_set_drvdata(pdev, ioa_cfg);
6276 
6277 	p = &ioa_cfg->chip_cfg->regs;
6278 	t = &ioa_cfg->regs;
6279 	base = ioa_cfg->hdw_dma_regs;
6280 
6281 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6282 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6283 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6284 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6285 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6286 	t->ioarrin_reg = base + p->ioarrin_reg;
6287 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6288 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6289 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6290 }
6291 
6292 /**
6293  * ipr_get_chip_cfg - Find adapter chip configuration
6294  * @dev_id:		PCI device id struct
6295  *
6296  * Return value:
6297  * 	ptr to chip config on success / NULL on failure
6298  **/
6299 static const struct ipr_chip_cfg_t * __devinit
6300 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6301 {
6302 	int i;
6303 
6304 	if (dev_id->driver_data)
6305 		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6306 
6307 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6308 		if (ipr_chip[i].vendor == dev_id->vendor &&
6309 		    ipr_chip[i].device == dev_id->device)
6310 			return ipr_chip[i].cfg;
6311 	return NULL;
6312 }
6313 
6314 /**
6315  * ipr_probe_ioa - Allocates memory and does first stage of initialization
6316  * @pdev:		PCI device struct
6317  * @dev_id:		PCI device id struct
6318  *
6319  * Return value:
6320  * 	0 on success / non-zero on failure
6321  **/
6322 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6323 				   const struct pci_device_id *dev_id)
6324 {
6325 	struct ipr_ioa_cfg *ioa_cfg;
6326 	struct Scsi_Host *host;
6327 	unsigned long ipr_regs_pci;
6328 	void __iomem *ipr_regs;
6329 	u32 rc = PCIBIOS_SUCCESSFUL;
6330 	volatile u32 mask, uproc;
6331 
6332 	ENTER;
6333 
6334 	if ((rc = pci_enable_device(pdev))) {
6335 		dev_err(&pdev->dev, "Cannot enable adapter\n");
6336 		goto out;
6337 	}
6338 
6339 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6340 
6341 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6342 
6343 	if (!host) {
6344 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6345 		rc = -ENOMEM;
6346 		goto out_disable;
6347 	}
6348 
6349 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6350 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6351 
6352 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6353 
6354 	if (!ioa_cfg->chip_cfg) {
6355 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6356 			dev_id->vendor, dev_id->device);
6357 		goto out_scsi_host_put;
6358 	}
6359 
6360 	ipr_regs_pci = pci_resource_start(pdev, 0);
6361 
6362 	rc = pci_request_regions(pdev, IPR_NAME);
6363 	if (rc < 0) {
6364 		dev_err(&pdev->dev,
6365 			"Couldn't register memory range of registers\n");
6366 		goto out_scsi_host_put;
6367 	}
6368 
6369 	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6370 
6371 	if (!ipr_regs) {
6372 		dev_err(&pdev->dev,
6373 			"Couldn't map memory range of registers\n");
6374 		rc = -ENOMEM;
6375 		goto out_release_regions;
6376 	}
6377 
6378 	ioa_cfg->hdw_dma_regs = ipr_regs;
6379 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6380 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6381 
6382 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6383 
6384 	pci_set_master(pdev);
6385 
6386 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6387 	if (rc < 0) {
6388 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6389 		goto cleanup_nomem;
6390 	}
6391 
6392 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6393 				   ioa_cfg->chip_cfg->cache_line_size);
6394 
6395 	if (rc != PCIBIOS_SUCCESSFUL) {
6396 		dev_err(&pdev->dev, "Write of cache line size failed\n");
6397 		rc = -EIO;
6398 		goto cleanup_nomem;
6399 	}
6400 
6401 	/* Save away PCI config space for use following IOA reset */
6402 	rc = pci_save_state(pdev);
6403 
6404 	if (rc != PCIBIOS_SUCCESSFUL) {
6405 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6406 		rc = -EIO;
6407 		goto cleanup_nomem;
6408 	}
6409 
6410 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6411 		goto cleanup_nomem;
6412 
6413 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6414 		goto cleanup_nomem;
6415 
6416 	rc = ipr_alloc_mem(ioa_cfg);
6417 	if (rc < 0) {
6418 		dev_err(&pdev->dev,
6419 			"Couldn't allocate enough memory for device driver!\n");
6420 		goto cleanup_nomem;
6421 	}
6422 
6423 	/*
6424 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
6425 	 * the card is in an unknown state and needs a hard reset
6426 	 */
6427 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6428 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6429 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6430 		ioa_cfg->needs_hard_reset = 1;
6431 
6432 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6433 	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6434 
6435 	if (rc) {
6436 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6437 			pdev->irq, rc);
6438 		goto cleanup_nolog;
6439 	}
6440 
6441 	spin_lock(&ipr_driver_lock);
6442 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6443 	spin_unlock(&ipr_driver_lock);
6444 
6445 	LEAVE;
6446 out:
6447 	return rc;
6448 
6449 cleanup_nolog:
6450 	ipr_free_mem(ioa_cfg);
6451 cleanup_nomem:
6452 	iounmap(ipr_regs);
6453 out_release_regions:
6454 	pci_release_regions(pdev);
6455 out_scsi_host_put:
6456 	scsi_host_put(host);
6457 out_disable:
6458 	pci_disable_device(pdev);
6459 	goto out;
6460 }
6461 
6462 /**
6463  * ipr_scan_vsets - Scans for VSET devices
6464  * @ioa_cfg:	ioa config struct
6465  *
6466  * Description: Since the VSET resources do not follow SAM in that we can have
6467  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6468  *
6469  * Return value:
6470  * 	none
6471  **/
6472 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6473 {
6474 	int target, lun;
6475 
6476 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6477 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6478 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6479 }
6480 
6481 /**
6482  * ipr_initiate_ioa_bringdown - Bring down an adapter
6483  * @ioa_cfg:		ioa config struct
6484  * @shutdown_type:	shutdown type
6485  *
6486  * Description: This function will initiate bringing down the adapter.
6487  * This consists of issuing an IOA shutdown to the adapter
6488  * to flush the cache, and running BIST.
6489  * If the caller needs to wait on the completion of the reset,
6490  * the caller must sleep on the reset_wait_q.
6491  *
6492  * Return value:
6493  * 	none
6494  **/
6495 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6496 				       enum ipr_shutdown_type shutdown_type)
6497 {
6498 	ENTER;
6499 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6500 		ioa_cfg->sdt_state = ABORT_DUMP;
6501 	ioa_cfg->reset_retries = 0;
6502 	ioa_cfg->in_ioa_bringdown = 1;
6503 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6504 	LEAVE;
6505 }
6506 
6507 /**
6508  * __ipr_remove - Remove a single adapter
6509  * @pdev:	pci device struct
6510  *
6511  * Adapter hot plug remove entry point.
6512  *
6513  * Return value:
6514  * 	none
6515  **/
6516 static void __ipr_remove(struct pci_dev *pdev)
6517 {
6518 	unsigned long host_lock_flags = 0;
6519 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6520 	ENTER;
6521 
6522 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6523 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6524 
6525 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6526 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6527 	flush_scheduled_work();
6528 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6529 
6530 	spin_lock(&ipr_driver_lock);
6531 	list_del(&ioa_cfg->queue);
6532 	spin_unlock(&ipr_driver_lock);
6533 
6534 	if (ioa_cfg->sdt_state == ABORT_DUMP)
6535 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6536 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6537 
6538 	ipr_free_all_resources(ioa_cfg);
6539 
6540 	LEAVE;
6541 }
6542 
6543 /**
6544  * ipr_remove - IOA hot plug remove entry point
6545  * @pdev:	pci device struct
6546  *
6547  * Adapter hot plug remove entry point.
6548  *
6549  * Return value:
6550  * 	none
6551  **/
6552 static void ipr_remove(struct pci_dev *pdev)
6553 {
6554 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6555 
6556 	ENTER;
6557 
6558 	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6559 			      &ipr_trace_attr);
6560 	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6561 			     &ipr_dump_attr);
6562 	scsi_remove_host(ioa_cfg->host);
6563 
6564 	__ipr_remove(pdev);
6565 
6566 	LEAVE;
6567 }
6568 
6569 /**
6570  * ipr_probe - Adapter hot plug add entry point
6571  *
6572  * Return value:
6573  * 	0 on success / non-zero on failure
6574  **/
6575 static int __devinit ipr_probe(struct pci_dev *pdev,
6576 			       const struct pci_device_id *dev_id)
6577 {
6578 	struct ipr_ioa_cfg *ioa_cfg;
6579 	int rc;
6580 
6581 	rc = ipr_probe_ioa(pdev, dev_id);
6582 
6583 	if (rc)
6584 		return rc;
6585 
6586 	ioa_cfg = pci_get_drvdata(pdev);
6587 	rc = ipr_probe_ioa_part2(ioa_cfg);
6588 
6589 	if (rc) {
6590 		__ipr_remove(pdev);
6591 		return rc;
6592 	}
6593 
6594 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6595 
6596 	if (rc) {
6597 		__ipr_remove(pdev);
6598 		return rc;
6599 	}
6600 
6601 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6602 				   &ipr_trace_attr);
6603 
6604 	if (rc) {
6605 		scsi_remove_host(ioa_cfg->host);
6606 		__ipr_remove(pdev);
6607 		return rc;
6608 	}
6609 
6610 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6611 				   &ipr_dump_attr);
6612 
6613 	if (rc) {
6614 		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6615 				      &ipr_trace_attr);
6616 		scsi_remove_host(ioa_cfg->host);
6617 		__ipr_remove(pdev);
6618 		return rc;
6619 	}
6620 
6621 	scsi_scan_host(ioa_cfg->host);
6622 	ipr_scan_vsets(ioa_cfg);
6623 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6624 	ioa_cfg->allow_ml_add_del = 1;
6625 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6626 	schedule_work(&ioa_cfg->work_q);
6627 	return 0;
6628 }
6629 
6630 /**
6631  * ipr_shutdown - Shutdown handler.
6632  * @pdev:	pci device struct
6633  *
6634  * This function is invoked upon system shutdown/reboot. It will issue
6635  * an adapter shutdown to the adapter to flush the write cache.
6636  *
6637  * Return value:
6638  * 	none
6639  **/
6640 static void ipr_shutdown(struct pci_dev *pdev)
6641 {
6642 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6643 	unsigned long lock_flags = 0;
6644 
6645 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6646 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6647 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6648 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6649 }
6650 
6651 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6652 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6653 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6654 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6655 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6656 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6657 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6658 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6659 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6660 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6661 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6662 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6663 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6664 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6665 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6666 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6667 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6668 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6669 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6670 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6671 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6672 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6673 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6674 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6675 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6676 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6677 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6678 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6679 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6680 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6681 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6682 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6683 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6684 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6685 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6686 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6687 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6688 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6689 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6690 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6691 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6692 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6693 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6694 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6695 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6696 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6697 	{ }
6698 };
6699 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6700 
6701 static struct pci_error_handlers ipr_err_handler = {
6702 	.error_detected = ipr_pci_error_detected,
6703 	.slot_reset = ipr_pci_slot_reset,
6704 };
6705 
6706 static struct pci_driver ipr_driver = {
6707 	.name = IPR_NAME,
6708 	.id_table = ipr_pci_table,
6709 	.probe = ipr_probe,
6710 	.remove = ipr_remove,
6711 	.shutdown = ipr_shutdown,
6712 	.err_handler = &ipr_err_handler,
6713 };
6714 
6715 /**
6716  * ipr_init - Module entry point
6717  *
6718  * Return value:
6719  * 	0 on success / negative value on failure
6720  **/
6721 static int __init ipr_init(void)
6722 {
6723 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6724 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6725 
6726 	return pci_module_init(&ipr_driver);
6727 }
6728 
6729 /**
6730  * ipr_exit - Module unload
6731  *
6732  * Module unload entry point.
6733  *
6734  * Return value:
6735  * 	none
6736  **/
6737 static void __exit ipr_exit(void)
6738 {
6739 	pci_unregister_driver(&ipr_driver);
6740 }
6741 
6742 module_init(ipr_init);
6743 module_exit(ipr_exit);
6744