xref: /linux/drivers/scsi/ipr.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *	- Ultra 320 SCSI controller
38  *	- PCI-X host interface
39  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *	- Non-Volatile Write Cache
41  *	- Supports attachment of non-RAID disks, tape, and optical devices
42  *	- RAID Levels 0, 5, 10
43  *	- Hot spare
44  *	- Background Parity Checking
45  *	- Background Data Scrubbing
46  *	- Ability to increase the capacity of an existing RAID 5 disk array
47  *		by adding disks
48  *
49  * Driver Features:
50  *	- Tagged command queuing
51  *	- Adapter microcode download
52  *	- PCI hot plug
53  *	- SCSI device hot plug
54  *
55  */
56 
57 #include <linux/config.h>
58 #include <linux/fs.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
83 #include "ipr.h"
84 
85 /*
86  *   Global Data
87  */
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static int ipr_auto_create = 1;
97 static DEFINE_SPINLOCK(ipr_driver_lock);
98 
99 /* This table describes the differences between DMA controller chips */
100 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 	{ /* Gemstone, Citrine, and Obsidian */
102 		.mailbox = 0x0042C,
103 		.cache_line_size = 0x20,
104 		{
105 			.set_interrupt_mask_reg = 0x0022C,
106 			.clr_interrupt_mask_reg = 0x00230,
107 			.sense_interrupt_mask_reg = 0x0022C,
108 			.clr_interrupt_reg = 0x00228,
109 			.sense_interrupt_reg = 0x00224,
110 			.ioarrin_reg = 0x00404,
111 			.sense_uproc_interrupt_reg = 0x00214,
112 			.set_uproc_interrupt_reg = 0x00214,
113 			.clr_uproc_interrupt_reg = 0x00218
114 		}
115 	},
116 	{ /* Snipe and Scamp */
117 		.mailbox = 0x0052C,
118 		.cache_line_size = 0x20,
119 		{
120 			.set_interrupt_mask_reg = 0x00288,
121 			.clr_interrupt_mask_reg = 0x0028C,
122 			.sense_interrupt_mask_reg = 0x00288,
123 			.clr_interrupt_reg = 0x00284,
124 			.sense_interrupt_reg = 0x00280,
125 			.ioarrin_reg = 0x00504,
126 			.sense_uproc_interrupt_reg = 0x00290,
127 			.set_uproc_interrupt_reg = 0x00290,
128 			.clr_uproc_interrupt_reg = 0x00294
129 		}
130 	},
131 };
132 
133 static const struct ipr_chip_t ipr_chip[] = {
134 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
135 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
138 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140 };
141 
142 static int ipr_max_bus_speeds [] = {
143 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
144 };
145 
146 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
147 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
148 module_param_named(max_speed, ipr_max_speed, uint, 0);
149 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
150 module_param_named(log_level, ipr_log_level, uint, 0);
151 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
152 module_param_named(testmode, ipr_testmode, int, 0);
153 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
154 module_param_named(fastfail, ipr_fastfail, int, 0);
155 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
156 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
157 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
158 module_param_named(enable_cache, ipr_enable_cache, int, 0);
159 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160 module_param_named(debug, ipr_debug, int, 0);
161 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162 module_param_named(auto_create, ipr_auto_create, int, 0);
163 MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(IPR_DRIVER_VERSION);
166 
167 static const char *ipr_gpdd_dev_end_states[] = {
168 	"Command complete",
169 	"Terminated by host",
170 	"Terminated by device reset",
171 	"Terminated by bus reset",
172 	"Unknown",
173 	"Command not started"
174 };
175 
176 static const char *ipr_gpdd_dev_bus_phases[] = {
177 	"Bus free",
178 	"Arbitration",
179 	"Selection",
180 	"Message out",
181 	"Command",
182 	"Message in",
183 	"Data out",
184 	"Data in",
185 	"Status",
186 	"Reselection",
187 	"Unknown"
188 };
189 
190 /*  A constant array of IOASCs/URCs/Error Messages */
191 static const
192 struct ipr_error_table_t ipr_error_table[] = {
193 	{0x00000000, 1, 1,
194 	"8155: An unknown error was received"},
195 	{0x00330000, 0, 0,
196 	"Soft underlength error"},
197 	{0x005A0000, 0, 0,
198 	"Command to be cancelled not found"},
199 	{0x00808000, 0, 0,
200 	"Qualified success"},
201 	{0x01080000, 1, 1,
202 	"FFFE: Soft device bus error recovered by the IOA"},
203 	{0x01170600, 0, 1,
204 	"FFF9: Device sector reassign successful"},
205 	{0x01170900, 0, 1,
206 	"FFF7: Media error recovered by device rewrite procedures"},
207 	{0x01180200, 0, 1,
208 	"7001: IOA sector reassignment successful"},
209 	{0x01180500, 0, 1,
210 	"FFF9: Soft media error. Sector reassignment recommended"},
211 	{0x01180600, 0, 1,
212 	"FFF7: Media error recovered by IOA rewrite procedures"},
213 	{0x01418000, 0, 1,
214 	"FF3D: Soft PCI bus error recovered by the IOA"},
215 	{0x01440000, 1, 1,
216 	"FFF6: Device hardware error recovered by the IOA"},
217 	{0x01448100, 0, 1,
218 	"FFF6: Device hardware error recovered by the device"},
219 	{0x01448200, 1, 1,
220 	"FF3D: Soft IOA error recovered by the IOA"},
221 	{0x01448300, 0, 1,
222 	"FFFA: Undefined device response recovered by the IOA"},
223 	{0x014A0000, 1, 1,
224 	"FFF6: Device bus error, message or command phase"},
225 	{0x015D0000, 0, 1,
226 	"FFF6: Failure prediction threshold exceeded"},
227 	{0x015D9200, 0, 1,
228 	"8009: Impending cache battery pack failure"},
229 	{0x02040400, 0, 0,
230 	"34FF: Disk device format in progress"},
231 	{0x023F0000, 0, 0,
232 	"Synchronization required"},
233 	{0x024E0000, 0, 0,
234 	"No ready, IOA shutdown"},
235 	{0x025A0000, 0, 0,
236 	"Not ready, IOA has been shutdown"},
237 	{0x02670100, 0, 1,
238 	"3020: Storage subsystem configuration error"},
239 	{0x03110B00, 0, 0,
240 	"FFF5: Medium error, data unreadable, recommend reassign"},
241 	{0x03110C00, 0, 0,
242 	"7000: Medium error, data unreadable, do not reassign"},
243 	{0x03310000, 0, 1,
244 	"FFF3: Disk media format bad"},
245 	{0x04050000, 0, 1,
246 	"3002: Addressed device failed to respond to selection"},
247 	{0x04080000, 1, 1,
248 	"3100: Device bus error"},
249 	{0x04080100, 0, 1,
250 	"3109: IOA timed out a device command"},
251 	{0x04088000, 0, 0,
252 	"3120: SCSI bus is not operational"},
253 	{0x04118000, 0, 1,
254 	"9000: IOA reserved area data check"},
255 	{0x04118100, 0, 1,
256 	"9001: IOA reserved area invalid data pattern"},
257 	{0x04118200, 0, 1,
258 	"9002: IOA reserved area LRC error"},
259 	{0x04320000, 0, 1,
260 	"102E: Out of alternate sectors for disk storage"},
261 	{0x04330000, 1, 1,
262 	"FFF4: Data transfer underlength error"},
263 	{0x04338000, 1, 1,
264 	"FFF4: Data transfer overlength error"},
265 	{0x043E0100, 0, 1,
266 	"3400: Logical unit failure"},
267 	{0x04408500, 0, 1,
268 	"FFF4: Device microcode is corrupt"},
269 	{0x04418000, 1, 1,
270 	"8150: PCI bus error"},
271 	{0x04430000, 1, 0,
272 	"Unsupported device bus message received"},
273 	{0x04440000, 1, 1,
274 	"FFF4: Disk device problem"},
275 	{0x04448200, 1, 1,
276 	"8150: Permanent IOA failure"},
277 	{0x04448300, 0, 1,
278 	"3010: Disk device returned wrong response to IOA"},
279 	{0x04448400, 0, 1,
280 	"8151: IOA microcode error"},
281 	{0x04448500, 0, 0,
282 	"Device bus status error"},
283 	{0x04448600, 0, 1,
284 	"8157: IOA error requiring IOA reset to recover"},
285 	{0x04490000, 0, 0,
286 	"Message reject received from the device"},
287 	{0x04449200, 0, 1,
288 	"8008: A permanent cache battery pack failure occurred"},
289 	{0x0444A000, 0, 1,
290 	"9090: Disk unit has been modified after the last known status"},
291 	{0x0444A200, 0, 1,
292 	"9081: IOA detected device error"},
293 	{0x0444A300, 0, 1,
294 	"9082: IOA detected device error"},
295 	{0x044A0000, 1, 1,
296 	"3110: Device bus error, message or command phase"},
297 	{0x04670400, 0, 1,
298 	"9091: Incorrect hardware configuration change has been detected"},
299 	{0x04678000, 0, 1,
300 	"9073: Invalid multi-adapter configuration"},
301 	{0x046E0000, 0, 1,
302 	"FFF4: Command to logical unit failed"},
303 	{0x05240000, 1, 0,
304 	"Illegal request, invalid request type or request packet"},
305 	{0x05250000, 0, 0,
306 	"Illegal request, invalid resource handle"},
307 	{0x05258000, 0, 0,
308 	"Illegal request, commands not allowed to this device"},
309 	{0x05258100, 0, 0,
310 	"Illegal request, command not allowed to a secondary adapter"},
311 	{0x05260000, 0, 0,
312 	"Illegal request, invalid field in parameter list"},
313 	{0x05260100, 0, 0,
314 	"Illegal request, parameter not supported"},
315 	{0x05260200, 0, 0,
316 	"Illegal request, parameter value invalid"},
317 	{0x052C0000, 0, 0,
318 	"Illegal request, command sequence error"},
319 	{0x052C8000, 1, 0,
320 	"Illegal request, dual adapter support not enabled"},
321 	{0x06040500, 0, 1,
322 	"9031: Array protection temporarily suspended, protection resuming"},
323 	{0x06040600, 0, 1,
324 	"9040: Array protection temporarily suspended, protection resuming"},
325 	{0x06290000, 0, 1,
326 	"FFFB: SCSI bus was reset"},
327 	{0x06290500, 0, 0,
328 	"FFFE: SCSI bus transition to single ended"},
329 	{0x06290600, 0, 0,
330 	"FFFE: SCSI bus transition to LVD"},
331 	{0x06298000, 0, 1,
332 	"FFFB: SCSI bus was reset by another initiator"},
333 	{0x063F0300, 0, 1,
334 	"3029: A device replacement has occurred"},
335 	{0x064C8000, 0, 1,
336 	"9051: IOA cache data exists for a missing or failed device"},
337 	{0x064C8100, 0, 1,
338 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
339 	{0x06670100, 0, 1,
340 	"9025: Disk unit is not supported at its physical location"},
341 	{0x06670600, 0, 1,
342 	"3020: IOA detected a SCSI bus configuration error"},
343 	{0x06678000, 0, 1,
344 	"3150: SCSI bus configuration error"},
345 	{0x06678100, 0, 1,
346 	"9074: Asymmetric advanced function disk configuration"},
347 	{0x06690200, 0, 1,
348 	"9041: Array protection temporarily suspended"},
349 	{0x06698200, 0, 1,
350 	"9042: Corrupt array parity detected on specified device"},
351 	{0x066B0200, 0, 1,
352 	"9030: Array no longer protected due to missing or failed disk unit"},
353 	{0x066B8000, 0, 1,
354 	"9071: Link operational transition"},
355 	{0x066B8100, 0, 1,
356 	"9072: Link not operational transition"},
357 	{0x066B8200, 0, 1,
358 	"9032: Array exposed but still protected"},
359 	{0x07270000, 0, 0,
360 	"Failure due to other device"},
361 	{0x07278000, 0, 1,
362 	"9008: IOA does not support functions expected by devices"},
363 	{0x07278100, 0, 1,
364 	"9010: Cache data associated with attached devices cannot be found"},
365 	{0x07278200, 0, 1,
366 	"9011: Cache data belongs to devices other than those attached"},
367 	{0x07278400, 0, 1,
368 	"9020: Array missing 2 or more devices with only 1 device present"},
369 	{0x07278500, 0, 1,
370 	"9021: Array missing 2 or more devices with 2 or more devices present"},
371 	{0x07278600, 0, 1,
372 	"9022: Exposed array is missing a required device"},
373 	{0x07278700, 0, 1,
374 	"9023: Array member(s) not at required physical locations"},
375 	{0x07278800, 0, 1,
376 	"9024: Array not functional due to present hardware configuration"},
377 	{0x07278900, 0, 1,
378 	"9026: Array not functional due to present hardware configuration"},
379 	{0x07278A00, 0, 1,
380 	"9027: Array is missing a device and parity is out of sync"},
381 	{0x07278B00, 0, 1,
382 	"9028: Maximum number of arrays already exist"},
383 	{0x07278C00, 0, 1,
384 	"9050: Required cache data cannot be located for a disk unit"},
385 	{0x07278D00, 0, 1,
386 	"9052: Cache data exists for a device that has been modified"},
387 	{0x07278F00, 0, 1,
388 	"9054: IOA resources not available due to previous problems"},
389 	{0x07279100, 0, 1,
390 	"9092: Disk unit requires initialization before use"},
391 	{0x07279200, 0, 1,
392 	"9029: Incorrect hardware configuration change has been detected"},
393 	{0x07279600, 0, 1,
394 	"9060: One or more disk pairs are missing from an array"},
395 	{0x07279700, 0, 1,
396 	"9061: One or more disks are missing from an array"},
397 	{0x07279800, 0, 1,
398 	"9062: One or more disks are missing from an array"},
399 	{0x07279900, 0, 1,
400 	"9063: Maximum number of functional arrays has been exceeded"},
401 	{0x0B260000, 0, 0,
402 	"Aborted command, invalid descriptor"},
403 	{0x0B5A0000, 0, 0,
404 	"Command terminated by host"}
405 };
406 
407 static const struct ipr_ses_table_entry ipr_ses_table[] = {
408 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
409 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
410 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
411 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
412 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
413 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
414 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
415 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
416 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
417 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
418 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
419 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
420 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
421 };
422 
423 /*
424  *  Function Prototypes
425  */
426 static int ipr_reset_alert(struct ipr_cmnd *);
427 static void ipr_process_ccn(struct ipr_cmnd *);
428 static void ipr_process_error(struct ipr_cmnd *);
429 static void ipr_reset_ioa_job(struct ipr_cmnd *);
430 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
431 				   enum ipr_shutdown_type);
432 
433 #ifdef CONFIG_SCSI_IPR_TRACE
434 /**
435  * ipr_trc_hook - Add a trace entry to the driver trace
436  * @ipr_cmd:	ipr command struct
437  * @type:		trace type
438  * @add_data:	additional data
439  *
440  * Return value:
441  * 	none
442  **/
443 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
444 			 u8 type, u32 add_data)
445 {
446 	struct ipr_trace_entry *trace_entry;
447 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
448 
449 	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
450 	trace_entry->time = jiffies;
451 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
452 	trace_entry->type = type;
453 	trace_entry->cmd_index = ipr_cmd->cmd_index;
454 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
455 	trace_entry->u.add_data = add_data;
456 }
457 #else
458 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
459 #endif
460 
461 /**
462  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
463  * @ipr_cmd:	ipr command struct
464  *
465  * Return value:
466  * 	none
467  **/
468 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
469 {
470 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
471 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
472 
473 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
474 	ioarcb->write_data_transfer_length = 0;
475 	ioarcb->read_data_transfer_length = 0;
476 	ioarcb->write_ioadl_len = 0;
477 	ioarcb->read_ioadl_len = 0;
478 	ioasa->ioasc = 0;
479 	ioasa->residual_data_len = 0;
480 
481 	ipr_cmd->scsi_cmd = NULL;
482 	ipr_cmd->sense_buffer[0] = 0;
483 	ipr_cmd->dma_use_sg = 0;
484 }
485 
486 /**
487  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
488  * @ipr_cmd:	ipr command struct
489  *
490  * Return value:
491  * 	none
492  **/
493 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
494 {
495 	ipr_reinit_ipr_cmnd(ipr_cmd);
496 	ipr_cmd->u.scratch = 0;
497 	ipr_cmd->sibling = NULL;
498 	init_timer(&ipr_cmd->timer);
499 }
500 
501 /**
502  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
503  * @ioa_cfg:	ioa config struct
504  *
505  * Return value:
506  * 	pointer to ipr command struct
507  **/
508 static
509 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
510 {
511 	struct ipr_cmnd *ipr_cmd;
512 
513 	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
514 	list_del(&ipr_cmd->queue);
515 	ipr_init_ipr_cmnd(ipr_cmd);
516 
517 	return ipr_cmd;
518 }
519 
520 /**
521  * ipr_unmap_sglist - Unmap scatterlist if mapped
522  * @ioa_cfg:	ioa config struct
523  * @ipr_cmd:	ipr command struct
524  *
525  * Return value:
526  * 	nothing
527  **/
528 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
529 			     struct ipr_cmnd *ipr_cmd)
530 {
531 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
532 
533 	if (ipr_cmd->dma_use_sg) {
534 		if (scsi_cmd->use_sg > 0) {
535 			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
536 				     scsi_cmd->use_sg,
537 				     scsi_cmd->sc_data_direction);
538 		} else {
539 			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
540 					 scsi_cmd->request_bufflen,
541 					 scsi_cmd->sc_data_direction);
542 		}
543 	}
544 }
545 
546 /**
547  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
548  * @ioa_cfg:	ioa config struct
549  * @clr_ints:     interrupts to clear
550  *
551  * This function masks all interrupts on the adapter, then clears the
552  * interrupts specified in the mask
553  *
554  * Return value:
555  * 	none
556  **/
557 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
558 					  u32 clr_ints)
559 {
560 	volatile u32 int_reg;
561 
562 	/* Stop new interrupts */
563 	ioa_cfg->allow_interrupts = 0;
564 
565 	/* Set interrupt mask to stop all new interrupts */
566 	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
567 
568 	/* Clear any pending interrupts */
569 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
570 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
571 }
572 
573 /**
574  * ipr_save_pcix_cmd_reg - Save PCI-X command register
575  * @ioa_cfg:	ioa config struct
576  *
577  * Return value:
578  * 	0 on success / -EIO on failure
579  **/
580 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
581 {
582 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
583 
584 	if (pcix_cmd_reg == 0) {
585 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
586 		return -EIO;
587 	}
588 
589 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
590 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
591 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
592 		return -EIO;
593 	}
594 
595 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
596 	return 0;
597 }
598 
599 /**
600  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
601  * @ioa_cfg:	ioa config struct
602  *
603  * Return value:
604  * 	0 on success / -EIO on failure
605  **/
606 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
607 {
608 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
609 
610 	if (pcix_cmd_reg) {
611 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
612 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
613 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
614 			return -EIO;
615 		}
616 	} else {
617 		dev_err(&ioa_cfg->pdev->dev,
618 			"Failed to setup PCI-X command register\n");
619 		return -EIO;
620 	}
621 
622 	return 0;
623 }
624 
625 /**
626  * ipr_scsi_eh_done - mid-layer done function for aborted ops
627  * @ipr_cmd:	ipr command struct
628  *
629  * This function is invoked by the interrupt handler for
630  * ops generated by the SCSI mid-layer which are being aborted.
631  *
632  * Return value:
633  * 	none
634  **/
635 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
636 {
637 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
638 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
639 
640 	scsi_cmd->result |= (DID_ERROR << 16);
641 
642 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
643 	scsi_cmd->scsi_done(scsi_cmd);
644 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
645 }
646 
647 /**
648  * ipr_fail_all_ops - Fails all outstanding ops.
649  * @ioa_cfg:	ioa config struct
650  *
651  * This function fails all outstanding ops.
652  *
653  * Return value:
654  * 	none
655  **/
656 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
657 {
658 	struct ipr_cmnd *ipr_cmd, *temp;
659 
660 	ENTER;
661 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
662 		list_del(&ipr_cmd->queue);
663 
664 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
665 		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
666 
667 		if (ipr_cmd->scsi_cmd)
668 			ipr_cmd->done = ipr_scsi_eh_done;
669 
670 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
671 		del_timer(&ipr_cmd->timer);
672 		ipr_cmd->done(ipr_cmd);
673 	}
674 
675 	LEAVE;
676 }
677 
678 /**
679  * ipr_do_req -  Send driver initiated requests.
680  * @ipr_cmd:		ipr command struct
681  * @done:			done function
682  * @timeout_func:	timeout function
683  * @timeout:		timeout value
684  *
685  * This function sends the specified command to the adapter with the
686  * timeout given. The done function is invoked on command completion.
687  *
688  * Return value:
689  * 	none
690  **/
691 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
692 		       void (*done) (struct ipr_cmnd *),
693 		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
694 {
695 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
696 
697 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
698 
699 	ipr_cmd->done = done;
700 
701 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
702 	ipr_cmd->timer.expires = jiffies + timeout;
703 	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
704 
705 	add_timer(&ipr_cmd->timer);
706 
707 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
708 
709 	mb();
710 	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
711 	       ioa_cfg->regs.ioarrin_reg);
712 }
713 
714 /**
715  * ipr_internal_cmd_done - Op done function for an internally generated op.
716  * @ipr_cmd:	ipr command struct
717  *
718  * This function is the op done function for an internally generated,
719  * blocking op. It simply wakes the sleeping thread.
720  *
721  * Return value:
722  * 	none
723  **/
724 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
725 {
726 	if (ipr_cmd->sibling)
727 		ipr_cmd->sibling = NULL;
728 	else
729 		complete(&ipr_cmd->completion);
730 }
731 
732 /**
733  * ipr_send_blocking_cmd - Send command and sleep on its completion.
734  * @ipr_cmd:	ipr command struct
735  * @timeout_func:	function to invoke if command times out
736  * @timeout:	timeout
737  *
738  * Return value:
739  * 	none
740  **/
741 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
742 				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
743 				  u32 timeout)
744 {
745 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 
747 	init_completion(&ipr_cmd->completion);
748 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
749 
750 	spin_unlock_irq(ioa_cfg->host->host_lock);
751 	wait_for_completion(&ipr_cmd->completion);
752 	spin_lock_irq(ioa_cfg->host->host_lock);
753 }
754 
755 /**
756  * ipr_send_hcam - Send an HCAM to the adapter.
757  * @ioa_cfg:	ioa config struct
758  * @type:		HCAM type
759  * @hostrcb:	hostrcb struct
760  *
761  * This function will send a Host Controlled Async command to the adapter.
762  * If HCAMs are currently not allowed to be issued to the adapter, it will
763  * place the hostrcb on the free queue.
764  *
765  * Return value:
766  * 	none
767  **/
768 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
769 			  struct ipr_hostrcb *hostrcb)
770 {
771 	struct ipr_cmnd *ipr_cmd;
772 	struct ipr_ioarcb *ioarcb;
773 
774 	if (ioa_cfg->allow_cmds) {
775 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
776 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
777 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
778 
779 		ipr_cmd->u.hostrcb = hostrcb;
780 		ioarcb = &ipr_cmd->ioarcb;
781 
782 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
783 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
784 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
785 		ioarcb->cmd_pkt.cdb[1] = type;
786 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
787 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
788 
789 		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
790 		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
791 		ipr_cmd->ioadl[0].flags_and_data_len =
792 			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
793 		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
794 
795 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
796 			ipr_cmd->done = ipr_process_ccn;
797 		else
798 			ipr_cmd->done = ipr_process_error;
799 
800 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
801 
802 		mb();
803 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
804 		       ioa_cfg->regs.ioarrin_reg);
805 	} else {
806 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
807 	}
808 }
809 
810 /**
811  * ipr_init_res_entry - Initialize a resource entry struct.
812  * @res:	resource entry struct
813  *
814  * Return value:
815  * 	none
816  **/
817 static void ipr_init_res_entry(struct ipr_resource_entry *res)
818 {
819 	res->needs_sync_complete = 0;
820 	res->in_erp = 0;
821 	res->add_to_ml = 0;
822 	res->del_from_ml = 0;
823 	res->resetting_device = 0;
824 	res->sdev = NULL;
825 }
826 
827 /**
828  * ipr_handle_config_change - Handle a config change from the adapter
829  * @ioa_cfg:	ioa config struct
830  * @hostrcb:	hostrcb
831  *
832  * Return value:
833  * 	none
834  **/
835 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
836 			      struct ipr_hostrcb *hostrcb)
837 {
838 	struct ipr_resource_entry *res = NULL;
839 	struct ipr_config_table_entry *cfgte;
840 	u32 is_ndn = 1;
841 
842 	cfgte = &hostrcb->hcam.u.ccn.cfgte;
843 
844 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
845 		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
846 			    sizeof(cfgte->res_addr))) {
847 			is_ndn = 0;
848 			break;
849 		}
850 	}
851 
852 	if (is_ndn) {
853 		if (list_empty(&ioa_cfg->free_res_q)) {
854 			ipr_send_hcam(ioa_cfg,
855 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
856 				      hostrcb);
857 			return;
858 		}
859 
860 		res = list_entry(ioa_cfg->free_res_q.next,
861 				 struct ipr_resource_entry, queue);
862 
863 		list_del(&res->queue);
864 		ipr_init_res_entry(res);
865 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
866 	}
867 
868 	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
869 
870 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
871 		if (res->sdev) {
872 			res->sdev->hostdata = NULL;
873 			res->del_from_ml = 1;
874 			if (ioa_cfg->allow_ml_add_del)
875 				schedule_work(&ioa_cfg->work_q);
876 		} else
877 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
878 	} else if (!res->sdev) {
879 		res->add_to_ml = 1;
880 		if (ioa_cfg->allow_ml_add_del)
881 			schedule_work(&ioa_cfg->work_q);
882 	}
883 
884 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
885 }
886 
887 /**
888  * ipr_process_ccn - Op done function for a CCN.
889  * @ipr_cmd:	ipr command struct
890  *
891  * This function is the op done function for a configuration
892  * change notification host controlled async from the adapter.
893  *
894  * Return value:
895  * 	none
896  **/
897 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
898 {
899 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
900 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
901 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
902 
903 	list_del(&hostrcb->queue);
904 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
905 
906 	if (ioasc) {
907 		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
908 			dev_err(&ioa_cfg->pdev->dev,
909 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
910 
911 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
912 	} else {
913 		ipr_handle_config_change(ioa_cfg, hostrcb);
914 	}
915 }
916 
917 /**
918  * ipr_log_vpd - Log the passed VPD to the error log.
919  * @vpd:		vendor/product id/sn struct
920  *
921  * Return value:
922  * 	none
923  **/
924 static void ipr_log_vpd(struct ipr_vpd *vpd)
925 {
926 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
927 		    + IPR_SERIAL_NUM_LEN];
928 
929 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
930 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
931 	       IPR_PROD_ID_LEN);
932 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
933 	ipr_err("Vendor/Product ID: %s\n", buffer);
934 
935 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
936 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
937 	ipr_err("    Serial Number: %s\n", buffer);
938 }
939 
940 /**
941  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
942  * @vpd:		vendor/product id/sn/wwn struct
943  *
944  * Return value:
945  * 	none
946  **/
947 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
948 {
949 	ipr_log_vpd(&vpd->vpd);
950 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
951 		be32_to_cpu(vpd->wwid[1]));
952 }
953 
954 /**
955  * ipr_log_enhanced_cache_error - Log a cache error.
956  * @ioa_cfg:	ioa config struct
957  * @hostrcb:	hostrcb struct
958  *
959  * Return value:
960  * 	none
961  **/
962 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
963 					 struct ipr_hostrcb *hostrcb)
964 {
965 	struct ipr_hostrcb_type_12_error *error =
966 		&hostrcb->hcam.u.error.u.type_12_error;
967 
968 	ipr_err("-----Current Configuration-----\n");
969 	ipr_err("Cache Directory Card Information:\n");
970 	ipr_log_ext_vpd(&error->ioa_vpd);
971 	ipr_err("Adapter Card Information:\n");
972 	ipr_log_ext_vpd(&error->cfc_vpd);
973 
974 	ipr_err("-----Expected Configuration-----\n");
975 	ipr_err("Cache Directory Card Information:\n");
976 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
977 	ipr_err("Adapter Card Information:\n");
978 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
979 
980 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
981 		     be32_to_cpu(error->ioa_data[0]),
982 		     be32_to_cpu(error->ioa_data[1]),
983 		     be32_to_cpu(error->ioa_data[2]));
984 }
985 
986 /**
987  * ipr_log_cache_error - Log a cache error.
988  * @ioa_cfg:	ioa config struct
989  * @hostrcb:	hostrcb struct
990  *
991  * Return value:
992  * 	none
993  **/
994 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
995 				struct ipr_hostrcb *hostrcb)
996 {
997 	struct ipr_hostrcb_type_02_error *error =
998 		&hostrcb->hcam.u.error.u.type_02_error;
999 
1000 	ipr_err("-----Current Configuration-----\n");
1001 	ipr_err("Cache Directory Card Information:\n");
1002 	ipr_log_vpd(&error->ioa_vpd);
1003 	ipr_err("Adapter Card Information:\n");
1004 	ipr_log_vpd(&error->cfc_vpd);
1005 
1006 	ipr_err("-----Expected Configuration-----\n");
1007 	ipr_err("Cache Directory Card Information:\n");
1008 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1009 	ipr_err("Adapter Card Information:\n");
1010 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1011 
1012 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1013 		     be32_to_cpu(error->ioa_data[0]),
1014 		     be32_to_cpu(error->ioa_data[1]),
1015 		     be32_to_cpu(error->ioa_data[2]));
1016 }
1017 
1018 /**
1019  * ipr_log_enhanced_config_error - Log a configuration error.
1020  * @ioa_cfg:	ioa config struct
1021  * @hostrcb:	hostrcb struct
1022  *
1023  * Return value:
1024  * 	none
1025  **/
1026 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1027 					  struct ipr_hostrcb *hostrcb)
1028 {
1029 	int errors_logged, i;
1030 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1031 	struct ipr_hostrcb_type_13_error *error;
1032 
1033 	error = &hostrcb->hcam.u.error.u.type_13_error;
1034 	errors_logged = be32_to_cpu(error->errors_logged);
1035 
1036 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1037 		be32_to_cpu(error->errors_detected), errors_logged);
1038 
1039 	dev_entry = error->dev;
1040 
1041 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1042 		ipr_err_separator;
1043 
1044 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1045 		ipr_log_ext_vpd(&dev_entry->vpd);
1046 
1047 		ipr_err("-----New Device Information-----\n");
1048 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1049 
1050 		ipr_err("Cache Directory Card Information:\n");
1051 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1052 
1053 		ipr_err("Adapter Card Information:\n");
1054 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1055 	}
1056 }
1057 
1058 /**
1059  * ipr_log_config_error - Log a configuration error.
1060  * @ioa_cfg:	ioa config struct
1061  * @hostrcb:	hostrcb struct
1062  *
1063  * Return value:
1064  * 	none
1065  **/
1066 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1067 				 struct ipr_hostrcb *hostrcb)
1068 {
1069 	int errors_logged, i;
1070 	struct ipr_hostrcb_device_data_entry *dev_entry;
1071 	struct ipr_hostrcb_type_03_error *error;
1072 
1073 	error = &hostrcb->hcam.u.error.u.type_03_error;
1074 	errors_logged = be32_to_cpu(error->errors_logged);
1075 
1076 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1077 		be32_to_cpu(error->errors_detected), errors_logged);
1078 
1079 	dev_entry = error->dev;
1080 
1081 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1082 		ipr_err_separator;
1083 
1084 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1085 		ipr_log_vpd(&dev_entry->vpd);
1086 
1087 		ipr_err("-----New Device Information-----\n");
1088 		ipr_log_vpd(&dev_entry->new_vpd);
1089 
1090 		ipr_err("Cache Directory Card Information:\n");
1091 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1092 
1093 		ipr_err("Adapter Card Information:\n");
1094 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1095 
1096 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1097 			be32_to_cpu(dev_entry->ioa_data[0]),
1098 			be32_to_cpu(dev_entry->ioa_data[1]),
1099 			be32_to_cpu(dev_entry->ioa_data[2]),
1100 			be32_to_cpu(dev_entry->ioa_data[3]),
1101 			be32_to_cpu(dev_entry->ioa_data[4]));
1102 	}
1103 }
1104 
1105 /**
1106  * ipr_log_enhanced_array_error - Log an array configuration error.
1107  * @ioa_cfg:	ioa config struct
1108  * @hostrcb:	hostrcb struct
1109  *
1110  * Return value:
1111  * 	none
1112  **/
1113 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1114 					 struct ipr_hostrcb *hostrcb)
1115 {
1116 	int i, num_entries;
1117 	struct ipr_hostrcb_type_14_error *error;
1118 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1119 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1120 
1121 	error = &hostrcb->hcam.u.error.u.type_14_error;
1122 
1123 	ipr_err_separator;
1124 
1125 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1126 		error->protection_level,
1127 		ioa_cfg->host->host_no,
1128 		error->last_func_vset_res_addr.bus,
1129 		error->last_func_vset_res_addr.target,
1130 		error->last_func_vset_res_addr.lun);
1131 
1132 	ipr_err_separator;
1133 
1134 	array_entry = error->array_member;
1135 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1136 			    sizeof(error->array_member));
1137 
1138 	for (i = 0; i < num_entries; i++, array_entry++) {
1139 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1140 			continue;
1141 
1142 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1143 			ipr_err("Exposed Array Member %d:\n", i);
1144 		else
1145 			ipr_err("Array Member %d:\n", i);
1146 
1147 		ipr_log_ext_vpd(&array_entry->vpd);
1148 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1149 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1150 				 "Expected Location");
1151 
1152 		ipr_err_separator;
1153 	}
1154 }
1155 
1156 /**
1157  * ipr_log_array_error - Log an array configuration error.
1158  * @ioa_cfg:	ioa config struct
1159  * @hostrcb:	hostrcb struct
1160  *
1161  * Return value:
1162  * 	none
1163  **/
1164 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1165 				struct ipr_hostrcb *hostrcb)
1166 {
1167 	int i;
1168 	struct ipr_hostrcb_type_04_error *error;
1169 	struct ipr_hostrcb_array_data_entry *array_entry;
1170 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1171 
1172 	error = &hostrcb->hcam.u.error.u.type_04_error;
1173 
1174 	ipr_err_separator;
1175 
1176 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1177 		error->protection_level,
1178 		ioa_cfg->host->host_no,
1179 		error->last_func_vset_res_addr.bus,
1180 		error->last_func_vset_res_addr.target,
1181 		error->last_func_vset_res_addr.lun);
1182 
1183 	ipr_err_separator;
1184 
1185 	array_entry = error->array_member;
1186 
1187 	for (i = 0; i < 18; i++) {
1188 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1189 			continue;
1190 
1191 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1192 			ipr_err("Exposed Array Member %d:\n", i);
1193 		else
1194 			ipr_err("Array Member %d:\n", i);
1195 
1196 		ipr_log_vpd(&array_entry->vpd);
1197 
1198 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1199 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1200 				 "Expected Location");
1201 
1202 		ipr_err_separator;
1203 
1204 		if (i == 9)
1205 			array_entry = error->array_member2;
1206 		else
1207 			array_entry++;
1208 	}
1209 }
1210 
1211 /**
1212  * ipr_log_hex_data - Log additional hex IOA error data.
1213  * @data:		IOA error data
1214  * @len:		data length
1215  *
1216  * Return value:
1217  * 	none
1218  **/
1219 static void ipr_log_hex_data(u32 *data, int len)
1220 {
1221 	int i;
1222 
1223 	if (len == 0)
1224 		return;
1225 
1226 	for (i = 0; i < len / 4; i += 4) {
1227 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1228 			be32_to_cpu(data[i]),
1229 			be32_to_cpu(data[i+1]),
1230 			be32_to_cpu(data[i+2]),
1231 			be32_to_cpu(data[i+3]));
1232 	}
1233 }
1234 
1235 /**
1236  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1237  * @ioa_cfg:	ioa config struct
1238  * @hostrcb:	hostrcb struct
1239  *
1240  * Return value:
1241  * 	none
1242  **/
1243 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1244 					    struct ipr_hostrcb *hostrcb)
1245 {
1246 	struct ipr_hostrcb_type_17_error *error;
1247 
1248 	error = &hostrcb->hcam.u.error.u.type_17_error;
1249 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1250 
1251 	ipr_err("%s\n", error->failure_reason);
1252 	ipr_err("Remote Adapter VPD:\n");
1253 	ipr_log_ext_vpd(&error->vpd);
1254 	ipr_log_hex_data(error->data,
1255 			 be32_to_cpu(hostrcb->hcam.length) -
1256 			 (offsetof(struct ipr_hostrcb_error, u) +
1257 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1258 }
1259 
1260 /**
1261  * ipr_log_dual_ioa_error - Log a dual adapter error.
1262  * @ioa_cfg:	ioa config struct
1263  * @hostrcb:	hostrcb struct
1264  *
1265  * Return value:
1266  * 	none
1267  **/
1268 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1269 				   struct ipr_hostrcb *hostrcb)
1270 {
1271 	struct ipr_hostrcb_type_07_error *error;
1272 
1273 	error = &hostrcb->hcam.u.error.u.type_07_error;
1274 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1275 
1276 	ipr_err("%s\n", error->failure_reason);
1277 	ipr_err("Remote Adapter VPD:\n");
1278 	ipr_log_vpd(&error->vpd);
1279 	ipr_log_hex_data(error->data,
1280 			 be32_to_cpu(hostrcb->hcam.length) -
1281 			 (offsetof(struct ipr_hostrcb_error, u) +
1282 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1283 }
1284 
1285 /**
1286  * ipr_log_generic_error - Log an adapter error.
1287  * @ioa_cfg:	ioa config struct
1288  * @hostrcb:	hostrcb struct
1289  *
1290  * Return value:
1291  * 	none
1292  **/
1293 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1294 				  struct ipr_hostrcb *hostrcb)
1295 {
1296 	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1297 			 be32_to_cpu(hostrcb->hcam.length));
1298 }
1299 
1300 /**
1301  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1302  * @ioasc:	IOASC
1303  *
1304  * This function will return the index of into the ipr_error_table
1305  * for the specified IOASC. If the IOASC is not in the table,
1306  * 0 will be returned, which points to the entry used for unknown errors.
1307  *
1308  * Return value:
1309  * 	index into the ipr_error_table
1310  **/
1311 static u32 ipr_get_error(u32 ioasc)
1312 {
1313 	int i;
1314 
1315 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1316 		if (ipr_error_table[i].ioasc == ioasc)
1317 			return i;
1318 
1319 	return 0;
1320 }
1321 
1322 /**
1323  * ipr_handle_log_data - Log an adapter error.
1324  * @ioa_cfg:	ioa config struct
1325  * @hostrcb:	hostrcb struct
1326  *
1327  * This function logs an adapter error to the system.
1328  *
1329  * Return value:
1330  * 	none
1331  **/
1332 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1333 				struct ipr_hostrcb *hostrcb)
1334 {
1335 	u32 ioasc;
1336 	int error_index;
1337 
1338 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1339 		return;
1340 
1341 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1342 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1343 
1344 	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1345 
1346 	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1347 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1348 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1349 		scsi_report_bus_reset(ioa_cfg->host,
1350 				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1351 	}
1352 
1353 	error_index = ipr_get_error(ioasc);
1354 
1355 	if (!ipr_error_table[error_index].log_hcam)
1356 		return;
1357 
1358 	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1359 		ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1360 			    "%s\n", ipr_error_table[error_index].error);
1361 	} else {
1362 		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1363 			ipr_error_table[error_index].error);
1364 	}
1365 
1366 	/* Set indication we have logged an error */
1367 	ioa_cfg->errors_logged++;
1368 
1369 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1370 		return;
1371 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1372 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1373 
1374 	switch (hostrcb->hcam.overlay_id) {
1375 	case IPR_HOST_RCB_OVERLAY_ID_2:
1376 		ipr_log_cache_error(ioa_cfg, hostrcb);
1377 		break;
1378 	case IPR_HOST_RCB_OVERLAY_ID_3:
1379 		ipr_log_config_error(ioa_cfg, hostrcb);
1380 		break;
1381 	case IPR_HOST_RCB_OVERLAY_ID_4:
1382 	case IPR_HOST_RCB_OVERLAY_ID_6:
1383 		ipr_log_array_error(ioa_cfg, hostrcb);
1384 		break;
1385 	case IPR_HOST_RCB_OVERLAY_ID_7:
1386 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1387 		break;
1388 	case IPR_HOST_RCB_OVERLAY_ID_12:
1389 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1390 		break;
1391 	case IPR_HOST_RCB_OVERLAY_ID_13:
1392 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1393 		break;
1394 	case IPR_HOST_RCB_OVERLAY_ID_14:
1395 	case IPR_HOST_RCB_OVERLAY_ID_16:
1396 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1397 		break;
1398 	case IPR_HOST_RCB_OVERLAY_ID_17:
1399 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1400 		break;
1401 	case IPR_HOST_RCB_OVERLAY_ID_1:
1402 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1403 	default:
1404 		ipr_log_generic_error(ioa_cfg, hostrcb);
1405 		break;
1406 	}
1407 }
1408 
1409 /**
1410  * ipr_process_error - Op done function for an adapter error log.
1411  * @ipr_cmd:	ipr command struct
1412  *
1413  * This function is the op done function for an error log host
1414  * controlled async from the adapter. It will log the error and
1415  * send the HCAM back to the adapter.
1416  *
1417  * Return value:
1418  * 	none
1419  **/
1420 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1421 {
1422 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1423 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1424 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1425 
1426 	list_del(&hostrcb->queue);
1427 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1428 
1429 	if (!ioasc) {
1430 		ipr_handle_log_data(ioa_cfg, hostrcb);
1431 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1432 		dev_err(&ioa_cfg->pdev->dev,
1433 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1434 	}
1435 
1436 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1437 }
1438 
1439 /**
1440  * ipr_timeout -  An internally generated op has timed out.
1441  * @ipr_cmd:	ipr command struct
1442  *
1443  * This function blocks host requests and initiates an
1444  * adapter reset.
1445  *
1446  * Return value:
1447  * 	none
1448  **/
1449 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1450 {
1451 	unsigned long lock_flags = 0;
1452 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1453 
1454 	ENTER;
1455 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1456 
1457 	ioa_cfg->errors_logged++;
1458 	dev_err(&ioa_cfg->pdev->dev,
1459 		"Adapter being reset due to command timeout.\n");
1460 
1461 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1462 		ioa_cfg->sdt_state = GET_DUMP;
1463 
1464 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1465 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1466 
1467 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1468 	LEAVE;
1469 }
1470 
1471 /**
1472  * ipr_oper_timeout -  Adapter timed out transitioning to operational
1473  * @ipr_cmd:	ipr command struct
1474  *
1475  * This function blocks host requests and initiates an
1476  * adapter reset.
1477  *
1478  * Return value:
1479  * 	none
1480  **/
1481 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1482 {
1483 	unsigned long lock_flags = 0;
1484 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1485 
1486 	ENTER;
1487 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1488 
1489 	ioa_cfg->errors_logged++;
1490 	dev_err(&ioa_cfg->pdev->dev,
1491 		"Adapter timed out transitioning to operational.\n");
1492 
1493 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1494 		ioa_cfg->sdt_state = GET_DUMP;
1495 
1496 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1497 		if (ipr_fastfail)
1498 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1499 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1500 	}
1501 
1502 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1503 	LEAVE;
1504 }
1505 
1506 /**
1507  * ipr_reset_reload - Reset/Reload the IOA
1508  * @ioa_cfg:		ioa config struct
1509  * @shutdown_type:	shutdown type
1510  *
1511  * This function resets the adapter and re-initializes it.
1512  * This function assumes that all new host commands have been stopped.
1513  * Return value:
1514  * 	SUCCESS / FAILED
1515  **/
1516 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1517 			    enum ipr_shutdown_type shutdown_type)
1518 {
1519 	if (!ioa_cfg->in_reset_reload)
1520 		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1521 
1522 	spin_unlock_irq(ioa_cfg->host->host_lock);
1523 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1524 	spin_lock_irq(ioa_cfg->host->host_lock);
1525 
1526 	/* If we got hit with a host reset while we were already resetting
1527 	 the adapter for some reason, and the reset failed. */
1528 	if (ioa_cfg->ioa_is_dead) {
1529 		ipr_trace;
1530 		return FAILED;
1531 	}
1532 
1533 	return SUCCESS;
1534 }
1535 
1536 /**
1537  * ipr_find_ses_entry - Find matching SES in SES table
1538  * @res:	resource entry struct of SES
1539  *
1540  * Return value:
1541  * 	pointer to SES table entry / NULL on failure
1542  **/
1543 static const struct ipr_ses_table_entry *
1544 ipr_find_ses_entry(struct ipr_resource_entry *res)
1545 {
1546 	int i, j, matches;
1547 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1548 
1549 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1550 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1551 			if (ste->compare_product_id_byte[j] == 'X') {
1552 				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1553 					matches++;
1554 				else
1555 					break;
1556 			} else
1557 				matches++;
1558 		}
1559 
1560 		if (matches == IPR_PROD_ID_LEN)
1561 			return ste;
1562 	}
1563 
1564 	return NULL;
1565 }
1566 
1567 /**
1568  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1569  * @ioa_cfg:	ioa config struct
1570  * @bus:		SCSI bus
1571  * @bus_width:	bus width
1572  *
1573  * Return value:
1574  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1575  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1576  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1577  *	max 160MHz = max 320MB/sec).
1578  **/
1579 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1580 {
1581 	struct ipr_resource_entry *res;
1582 	const struct ipr_ses_table_entry *ste;
1583 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1584 
1585 	/* Loop through each config table entry in the config table buffer */
1586 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1587 		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1588 			continue;
1589 
1590 		if (bus != res->cfgte.res_addr.bus)
1591 			continue;
1592 
1593 		if (!(ste = ipr_find_ses_entry(res)))
1594 			continue;
1595 
1596 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1597 	}
1598 
1599 	return max_xfer_rate;
1600 }
1601 
1602 /**
1603  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1604  * @ioa_cfg:		ioa config struct
1605  * @max_delay:		max delay in micro-seconds to wait
1606  *
1607  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1608  *
1609  * Return value:
1610  * 	0 on success / other on failure
1611  **/
1612 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1613 {
1614 	volatile u32 pcii_reg;
1615 	int delay = 1;
1616 
1617 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1618 	while (delay < max_delay) {
1619 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1620 
1621 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1622 			return 0;
1623 
1624 		/* udelay cannot be used if delay is more than a few milliseconds */
1625 		if ((delay / 1000) > MAX_UDELAY_MS)
1626 			mdelay(delay / 1000);
1627 		else
1628 			udelay(delay);
1629 
1630 		delay += delay;
1631 	}
1632 	return -EIO;
1633 }
1634 
1635 /**
1636  * ipr_get_ldump_data_section - Dump IOA memory
1637  * @ioa_cfg:			ioa config struct
1638  * @start_addr:			adapter address to dump
1639  * @dest:				destination kernel buffer
1640  * @length_in_words:	length to dump in 4 byte words
1641  *
1642  * Return value:
1643  * 	0 on success / -EIO on failure
1644  **/
1645 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1646 				      u32 start_addr,
1647 				      __be32 *dest, u32 length_in_words)
1648 {
1649 	volatile u32 temp_pcii_reg;
1650 	int i, delay = 0;
1651 
1652 	/* Write IOA interrupt reg starting LDUMP state  */
1653 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1654 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1655 
1656 	/* Wait for IO debug acknowledge */
1657 	if (ipr_wait_iodbg_ack(ioa_cfg,
1658 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1659 		dev_err(&ioa_cfg->pdev->dev,
1660 			"IOA dump long data transfer timeout\n");
1661 		return -EIO;
1662 	}
1663 
1664 	/* Signal LDUMP interlocked - clear IO debug ack */
1665 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1666 	       ioa_cfg->regs.clr_interrupt_reg);
1667 
1668 	/* Write Mailbox with starting address */
1669 	writel(start_addr, ioa_cfg->ioa_mailbox);
1670 
1671 	/* Signal address valid - clear IOA Reset alert */
1672 	writel(IPR_UPROCI_RESET_ALERT,
1673 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1674 
1675 	for (i = 0; i < length_in_words; i++) {
1676 		/* Wait for IO debug acknowledge */
1677 		if (ipr_wait_iodbg_ack(ioa_cfg,
1678 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1679 			dev_err(&ioa_cfg->pdev->dev,
1680 				"IOA dump short data transfer timeout\n");
1681 			return -EIO;
1682 		}
1683 
1684 		/* Read data from mailbox and increment destination pointer */
1685 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1686 		dest++;
1687 
1688 		/* For all but the last word of data, signal data received */
1689 		if (i < (length_in_words - 1)) {
1690 			/* Signal dump data received - Clear IO debug Ack */
1691 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1692 			       ioa_cfg->regs.clr_interrupt_reg);
1693 		}
1694 	}
1695 
1696 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1697 	writel(IPR_UPROCI_RESET_ALERT,
1698 	       ioa_cfg->regs.set_uproc_interrupt_reg);
1699 
1700 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1701 	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1702 
1703 	/* Signal dump data received - Clear IO debug Ack */
1704 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1705 	       ioa_cfg->regs.clr_interrupt_reg);
1706 
1707 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1708 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1709 		temp_pcii_reg =
1710 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1711 
1712 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1713 			return 0;
1714 
1715 		udelay(10);
1716 		delay += 10;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 #ifdef CONFIG_SCSI_IPR_DUMP
1723 /**
1724  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1725  * @ioa_cfg:		ioa config struct
1726  * @pci_address:	adapter address
1727  * @length:			length of data to copy
1728  *
1729  * Copy data from PCI adapter to kernel buffer.
1730  * Note: length MUST be a 4 byte multiple
1731  * Return value:
1732  * 	0 on success / other on failure
1733  **/
1734 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1735 			unsigned long pci_address, u32 length)
1736 {
1737 	int bytes_copied = 0;
1738 	int cur_len, rc, rem_len, rem_page_len;
1739 	__be32 *page;
1740 	unsigned long lock_flags = 0;
1741 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1742 
1743 	while (bytes_copied < length &&
1744 	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1745 		if (ioa_dump->page_offset >= PAGE_SIZE ||
1746 		    ioa_dump->page_offset == 0) {
1747 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1748 
1749 			if (!page) {
1750 				ipr_trace;
1751 				return bytes_copied;
1752 			}
1753 
1754 			ioa_dump->page_offset = 0;
1755 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1756 			ioa_dump->next_page_index++;
1757 		} else
1758 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1759 
1760 		rem_len = length - bytes_copied;
1761 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1762 		cur_len = min(rem_len, rem_page_len);
1763 
1764 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1765 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1766 			rc = -EIO;
1767 		} else {
1768 			rc = ipr_get_ldump_data_section(ioa_cfg,
1769 							pci_address + bytes_copied,
1770 							&page[ioa_dump->page_offset / 4],
1771 							(cur_len / sizeof(u32)));
1772 		}
1773 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1774 
1775 		if (!rc) {
1776 			ioa_dump->page_offset += cur_len;
1777 			bytes_copied += cur_len;
1778 		} else {
1779 			ipr_trace;
1780 			break;
1781 		}
1782 		schedule();
1783 	}
1784 
1785 	return bytes_copied;
1786 }
1787 
1788 /**
1789  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1790  * @hdr:	dump entry header struct
1791  *
1792  * Return value:
1793  * 	nothing
1794  **/
1795 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1796 {
1797 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1798 	hdr->num_elems = 1;
1799 	hdr->offset = sizeof(*hdr);
1800 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1801 }
1802 
1803 /**
1804  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1805  * @ioa_cfg:	ioa config struct
1806  * @driver_dump:	driver dump struct
1807  *
1808  * Return value:
1809  * 	nothing
1810  **/
1811 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1812 				   struct ipr_driver_dump *driver_dump)
1813 {
1814 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1815 
1816 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1817 	driver_dump->ioa_type_entry.hdr.len =
1818 		sizeof(struct ipr_dump_ioa_type_entry) -
1819 		sizeof(struct ipr_dump_entry_header);
1820 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1821 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1822 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1823 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1824 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1825 		ucode_vpd->minor_release[1];
1826 	driver_dump->hdr.num_entries++;
1827 }
1828 
1829 /**
1830  * ipr_dump_version_data - Fill in the driver version in the dump.
1831  * @ioa_cfg:	ioa config struct
1832  * @driver_dump:	driver dump struct
1833  *
1834  * Return value:
1835  * 	nothing
1836  **/
1837 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1838 				  struct ipr_driver_dump *driver_dump)
1839 {
1840 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1841 	driver_dump->version_entry.hdr.len =
1842 		sizeof(struct ipr_dump_version_entry) -
1843 		sizeof(struct ipr_dump_entry_header);
1844 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1845 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1846 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1847 	driver_dump->hdr.num_entries++;
1848 }
1849 
1850 /**
1851  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1852  * @ioa_cfg:	ioa config struct
1853  * @driver_dump:	driver dump struct
1854  *
1855  * Return value:
1856  * 	nothing
1857  **/
1858 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1859 				   struct ipr_driver_dump *driver_dump)
1860 {
1861 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1862 	driver_dump->trace_entry.hdr.len =
1863 		sizeof(struct ipr_dump_trace_entry) -
1864 		sizeof(struct ipr_dump_entry_header);
1865 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1866 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1867 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1868 	driver_dump->hdr.num_entries++;
1869 }
1870 
1871 /**
1872  * ipr_dump_location_data - Fill in the IOA location in the dump.
1873  * @ioa_cfg:	ioa config struct
1874  * @driver_dump:	driver dump struct
1875  *
1876  * Return value:
1877  * 	nothing
1878  **/
1879 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1880 				   struct ipr_driver_dump *driver_dump)
1881 {
1882 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1883 	driver_dump->location_entry.hdr.len =
1884 		sizeof(struct ipr_dump_location_entry) -
1885 		sizeof(struct ipr_dump_entry_header);
1886 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1887 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1888 	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1889 	driver_dump->hdr.num_entries++;
1890 }
1891 
1892 /**
1893  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1894  * @ioa_cfg:	ioa config struct
1895  * @dump:		dump struct
1896  *
1897  * Return value:
1898  * 	nothing
1899  **/
1900 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1901 {
1902 	unsigned long start_addr, sdt_word;
1903 	unsigned long lock_flags = 0;
1904 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1905 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1906 	u32 num_entries, start_off, end_off;
1907 	u32 bytes_to_copy, bytes_copied, rc;
1908 	struct ipr_sdt *sdt;
1909 	int i;
1910 
1911 	ENTER;
1912 
1913 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1914 
1915 	if (ioa_cfg->sdt_state != GET_DUMP) {
1916 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1917 		return;
1918 	}
1919 
1920 	start_addr = readl(ioa_cfg->ioa_mailbox);
1921 
1922 	if (!ipr_sdt_is_fmt2(start_addr)) {
1923 		dev_err(&ioa_cfg->pdev->dev,
1924 			"Invalid dump table format: %lx\n", start_addr);
1925 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1926 		return;
1927 	}
1928 
1929 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1930 
1931 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1932 
1933 	/* Initialize the overall dump header */
1934 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1935 	driver_dump->hdr.num_entries = 1;
1936 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1937 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1938 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1939 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1940 
1941 	ipr_dump_version_data(ioa_cfg, driver_dump);
1942 	ipr_dump_location_data(ioa_cfg, driver_dump);
1943 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1944 	ipr_dump_trace_data(ioa_cfg, driver_dump);
1945 
1946 	/* Update dump_header */
1947 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1948 
1949 	/* IOA Dump entry */
1950 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1951 	ioa_dump->format = IPR_SDT_FMT2;
1952 	ioa_dump->hdr.len = 0;
1953 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1954 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1955 
1956 	/* First entries in sdt are actually a list of dump addresses and
1957 	 lengths to gather the real dump data.  sdt represents the pointer
1958 	 to the ioa generated dump table.  Dump data will be extracted based
1959 	 on entries in this table */
1960 	sdt = &ioa_dump->sdt;
1961 
1962 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1963 					sizeof(struct ipr_sdt) / sizeof(__be32));
1964 
1965 	/* Smart Dump table is ready to use and the first entry is valid */
1966 	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1967 		dev_err(&ioa_cfg->pdev->dev,
1968 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1969 			rc, be32_to_cpu(sdt->hdr.state));
1970 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1971 		ioa_cfg->sdt_state = DUMP_OBTAINED;
1972 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1973 		return;
1974 	}
1975 
1976 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1977 
1978 	if (num_entries > IPR_NUM_SDT_ENTRIES)
1979 		num_entries = IPR_NUM_SDT_ENTRIES;
1980 
1981 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1982 
1983 	for (i = 0; i < num_entries; i++) {
1984 		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1985 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1986 			break;
1987 		}
1988 
1989 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1990 			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1991 			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1992 			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1993 
1994 			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1995 				bytes_to_copy = end_off - start_off;
1996 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1997 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1998 					continue;
1999 				}
2000 
2001 				/* Copy data from adapter to driver buffers */
2002 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2003 							    bytes_to_copy);
2004 
2005 				ioa_dump->hdr.len += bytes_copied;
2006 
2007 				if (bytes_copied != bytes_to_copy) {
2008 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2009 					break;
2010 				}
2011 			}
2012 		}
2013 	}
2014 
2015 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2016 
2017 	/* Update dump_header */
2018 	driver_dump->hdr.len += ioa_dump->hdr.len;
2019 	wmb();
2020 	ioa_cfg->sdt_state = DUMP_OBTAINED;
2021 	LEAVE;
2022 }
2023 
2024 #else
2025 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2026 #endif
2027 
2028 /**
2029  * ipr_release_dump - Free adapter dump memory
2030  * @kref:	kref struct
2031  *
2032  * Return value:
2033  *	nothing
2034  **/
2035 static void ipr_release_dump(struct kref *kref)
2036 {
2037 	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2038 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2039 	unsigned long lock_flags = 0;
2040 	int i;
2041 
2042 	ENTER;
2043 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2044 	ioa_cfg->dump = NULL;
2045 	ioa_cfg->sdt_state = INACTIVE;
2046 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2047 
2048 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2049 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2050 
2051 	kfree(dump);
2052 	LEAVE;
2053 }
2054 
2055 /**
2056  * ipr_worker_thread - Worker thread
2057  * @data:		ioa config struct
2058  *
2059  * Called at task level from a work thread. This function takes care
2060  * of adding and removing device from the mid-layer as configuration
2061  * changes are detected by the adapter.
2062  *
2063  * Return value:
2064  * 	nothing
2065  **/
2066 static void ipr_worker_thread(void *data)
2067 {
2068 	unsigned long lock_flags;
2069 	struct ipr_resource_entry *res;
2070 	struct scsi_device *sdev;
2071 	struct ipr_dump *dump;
2072 	struct ipr_ioa_cfg *ioa_cfg = data;
2073 	u8 bus, target, lun;
2074 	int did_work;
2075 
2076 	ENTER;
2077 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2078 
2079 	if (ioa_cfg->sdt_state == GET_DUMP) {
2080 		dump = ioa_cfg->dump;
2081 		if (!dump) {
2082 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2083 			return;
2084 		}
2085 		kref_get(&dump->kref);
2086 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2087 		ipr_get_ioa_dump(ioa_cfg, dump);
2088 		kref_put(&dump->kref, ipr_release_dump);
2089 
2090 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2091 		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2092 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2093 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2094 		return;
2095 	}
2096 
2097 restart:
2098 	do {
2099 		did_work = 0;
2100 		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2101 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2102 			return;
2103 		}
2104 
2105 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2106 			if (res->del_from_ml && res->sdev) {
2107 				did_work = 1;
2108 				sdev = res->sdev;
2109 				if (!scsi_device_get(sdev)) {
2110 					res->sdev = NULL;
2111 					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2112 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2113 					scsi_remove_device(sdev);
2114 					scsi_device_put(sdev);
2115 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2116 				}
2117 				break;
2118 			}
2119 		}
2120 	} while(did_work);
2121 
2122 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2123 		if (res->add_to_ml) {
2124 			bus = res->cfgte.res_addr.bus;
2125 			target = res->cfgte.res_addr.target;
2126 			lun = res->cfgte.res_addr.lun;
2127 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2128 			scsi_add_device(ioa_cfg->host, bus, target, lun);
2129 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2130 			goto restart;
2131 		}
2132 	}
2133 
2134 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2135 	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
2136 	LEAVE;
2137 }
2138 
2139 #ifdef CONFIG_SCSI_IPR_TRACE
2140 /**
2141  * ipr_read_trace - Dump the adapter trace
2142  * @kobj:		kobject struct
2143  * @buf:		buffer
2144  * @off:		offset
2145  * @count:		buffer size
2146  *
2147  * Return value:
2148  *	number of bytes printed to buffer
2149  **/
2150 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2151 			      loff_t off, size_t count)
2152 {
2153 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2154 	struct Scsi_Host *shost = class_to_shost(cdev);
2155 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2156 	unsigned long lock_flags = 0;
2157 	int size = IPR_TRACE_SIZE;
2158 	char *src = (char *)ioa_cfg->trace;
2159 
2160 	if (off > size)
2161 		return 0;
2162 	if (off + count > size) {
2163 		size -= off;
2164 		count = size;
2165 	}
2166 
2167 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2168 	memcpy(buf, &src[off], count);
2169 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2170 	return count;
2171 }
2172 
2173 static struct bin_attribute ipr_trace_attr = {
2174 	.attr =	{
2175 		.name = "trace",
2176 		.mode = S_IRUGO,
2177 	},
2178 	.size = 0,
2179 	.read = ipr_read_trace,
2180 };
2181 #endif
2182 
2183 static const struct {
2184 	enum ipr_cache_state state;
2185 	char *name;
2186 } cache_state [] = {
2187 	{ CACHE_NONE, "none" },
2188 	{ CACHE_DISABLED, "disabled" },
2189 	{ CACHE_ENABLED, "enabled" }
2190 };
2191 
2192 /**
2193  * ipr_show_write_caching - Show the write caching attribute
2194  * @class_dev:	class device struct
2195  * @buf:		buffer
2196  *
2197  * Return value:
2198  *	number of bytes printed to buffer
2199  **/
2200 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2201 {
2202 	struct Scsi_Host *shost = class_to_shost(class_dev);
2203 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2204 	unsigned long lock_flags = 0;
2205 	int i, len = 0;
2206 
2207 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2208 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2209 		if (cache_state[i].state == ioa_cfg->cache_state) {
2210 			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2211 			break;
2212 		}
2213 	}
2214 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2215 	return len;
2216 }
2217 
2218 
2219 /**
2220  * ipr_store_write_caching - Enable/disable adapter write cache
2221  * @class_dev:	class_device struct
2222  * @buf:		buffer
2223  * @count:		buffer size
2224  *
2225  * This function will enable/disable adapter write cache.
2226  *
2227  * Return value:
2228  * 	count on success / other on failure
2229  **/
2230 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2231 					const char *buf, size_t count)
2232 {
2233 	struct Scsi_Host *shost = class_to_shost(class_dev);
2234 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2235 	unsigned long lock_flags = 0;
2236 	enum ipr_cache_state new_state = CACHE_INVALID;
2237 	int i;
2238 
2239 	if (!capable(CAP_SYS_ADMIN))
2240 		return -EACCES;
2241 	if (ioa_cfg->cache_state == CACHE_NONE)
2242 		return -EINVAL;
2243 
2244 	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2245 		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2246 			new_state = cache_state[i].state;
2247 			break;
2248 		}
2249 	}
2250 
2251 	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2252 		return -EINVAL;
2253 
2254 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2255 	if (ioa_cfg->cache_state == new_state) {
2256 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2257 		return count;
2258 	}
2259 
2260 	ioa_cfg->cache_state = new_state;
2261 	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2262 		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2263 	if (!ioa_cfg->in_reset_reload)
2264 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2265 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2266 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2267 
2268 	return count;
2269 }
2270 
2271 static struct class_device_attribute ipr_ioa_cache_attr = {
2272 	.attr = {
2273 		.name =		"write_cache",
2274 		.mode =		S_IRUGO | S_IWUSR,
2275 	},
2276 	.show = ipr_show_write_caching,
2277 	.store = ipr_store_write_caching
2278 };
2279 
2280 /**
2281  * ipr_show_fw_version - Show the firmware version
2282  * @class_dev:	class device struct
2283  * @buf:		buffer
2284  *
2285  * Return value:
2286  *	number of bytes printed to buffer
2287  **/
2288 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2289 {
2290 	struct Scsi_Host *shost = class_to_shost(class_dev);
2291 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2292 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2293 	unsigned long lock_flags = 0;
2294 	int len;
2295 
2296 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2297 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2298 		       ucode_vpd->major_release, ucode_vpd->card_type,
2299 		       ucode_vpd->minor_release[0],
2300 		       ucode_vpd->minor_release[1]);
2301 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2302 	return len;
2303 }
2304 
2305 static struct class_device_attribute ipr_fw_version_attr = {
2306 	.attr = {
2307 		.name =		"fw_version",
2308 		.mode =		S_IRUGO,
2309 	},
2310 	.show = ipr_show_fw_version,
2311 };
2312 
2313 /**
2314  * ipr_show_log_level - Show the adapter's error logging level
2315  * @class_dev:	class device struct
2316  * @buf:		buffer
2317  *
2318  * Return value:
2319  * 	number of bytes printed to buffer
2320  **/
2321 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2322 {
2323 	struct Scsi_Host *shost = class_to_shost(class_dev);
2324 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2325 	unsigned long lock_flags = 0;
2326 	int len;
2327 
2328 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2329 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2330 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2331 	return len;
2332 }
2333 
2334 /**
2335  * ipr_store_log_level - Change the adapter's error logging level
2336  * @class_dev:	class device struct
2337  * @buf:		buffer
2338  *
2339  * Return value:
2340  * 	number of bytes printed to buffer
2341  **/
2342 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2343 				   const char *buf, size_t count)
2344 {
2345 	struct Scsi_Host *shost = class_to_shost(class_dev);
2346 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2347 	unsigned long lock_flags = 0;
2348 
2349 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2350 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2351 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2352 	return strlen(buf);
2353 }
2354 
2355 static struct class_device_attribute ipr_log_level_attr = {
2356 	.attr = {
2357 		.name =		"log_level",
2358 		.mode =		S_IRUGO | S_IWUSR,
2359 	},
2360 	.show = ipr_show_log_level,
2361 	.store = ipr_store_log_level
2362 };
2363 
2364 /**
2365  * ipr_store_diagnostics - IOA Diagnostics interface
2366  * @class_dev:	class_device struct
2367  * @buf:		buffer
2368  * @count:		buffer size
2369  *
2370  * This function will reset the adapter and wait a reasonable
2371  * amount of time for any errors that the adapter might log.
2372  *
2373  * Return value:
2374  * 	count on success / other on failure
2375  **/
2376 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2377 				     const char *buf, size_t count)
2378 {
2379 	struct Scsi_Host *shost = class_to_shost(class_dev);
2380 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2381 	unsigned long lock_flags = 0;
2382 	int rc = count;
2383 
2384 	if (!capable(CAP_SYS_ADMIN))
2385 		return -EACCES;
2386 
2387 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2388 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2389 	ioa_cfg->errors_logged = 0;
2390 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2391 
2392 	if (ioa_cfg->in_reset_reload) {
2393 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2394 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2395 
2396 		/* Wait for a second for any errors to be logged */
2397 		msleep(1000);
2398 	} else {
2399 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2400 		return -EIO;
2401 	}
2402 
2403 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2404 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2405 		rc = -EIO;
2406 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2407 
2408 	return rc;
2409 }
2410 
2411 static struct class_device_attribute ipr_diagnostics_attr = {
2412 	.attr = {
2413 		.name =		"run_diagnostics",
2414 		.mode =		S_IWUSR,
2415 	},
2416 	.store = ipr_store_diagnostics
2417 };
2418 
2419 /**
2420  * ipr_show_adapter_state - Show the adapter's state
2421  * @class_dev:	class device struct
2422  * @buf:		buffer
2423  *
2424  * Return value:
2425  * 	number of bytes printed to buffer
2426  **/
2427 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2428 {
2429 	struct Scsi_Host *shost = class_to_shost(class_dev);
2430 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2431 	unsigned long lock_flags = 0;
2432 	int len;
2433 
2434 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2435 	if (ioa_cfg->ioa_is_dead)
2436 		len = snprintf(buf, PAGE_SIZE, "offline\n");
2437 	else
2438 		len = snprintf(buf, PAGE_SIZE, "online\n");
2439 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2440 	return len;
2441 }
2442 
2443 /**
2444  * ipr_store_adapter_state - Change adapter state
2445  * @class_dev:	class_device struct
2446  * @buf:		buffer
2447  * @count:		buffer size
2448  *
2449  * This function will change the adapter's state.
2450  *
2451  * Return value:
2452  * 	count on success / other on failure
2453  **/
2454 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2455 				       const char *buf, size_t count)
2456 {
2457 	struct Scsi_Host *shost = class_to_shost(class_dev);
2458 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2459 	unsigned long lock_flags;
2460 	int result = count;
2461 
2462 	if (!capable(CAP_SYS_ADMIN))
2463 		return -EACCES;
2464 
2465 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2466 	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2467 		ioa_cfg->ioa_is_dead = 0;
2468 		ioa_cfg->reset_retries = 0;
2469 		ioa_cfg->in_ioa_bringdown = 0;
2470 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2471 	}
2472 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2473 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474 
2475 	return result;
2476 }
2477 
2478 static struct class_device_attribute ipr_ioa_state_attr = {
2479 	.attr = {
2480 		.name =		"state",
2481 		.mode =		S_IRUGO | S_IWUSR,
2482 	},
2483 	.show = ipr_show_adapter_state,
2484 	.store = ipr_store_adapter_state
2485 };
2486 
2487 /**
2488  * ipr_store_reset_adapter - Reset the adapter
2489  * @class_dev:	class_device struct
2490  * @buf:		buffer
2491  * @count:		buffer size
2492  *
2493  * This function will reset the adapter.
2494  *
2495  * Return value:
2496  * 	count on success / other on failure
2497  **/
2498 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2499 				       const char *buf, size_t count)
2500 {
2501 	struct Scsi_Host *shost = class_to_shost(class_dev);
2502 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503 	unsigned long lock_flags;
2504 	int result = count;
2505 
2506 	if (!capable(CAP_SYS_ADMIN))
2507 		return -EACCES;
2508 
2509 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2510 	if (!ioa_cfg->in_reset_reload)
2511 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2512 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2513 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2514 
2515 	return result;
2516 }
2517 
2518 static struct class_device_attribute ipr_ioa_reset_attr = {
2519 	.attr = {
2520 		.name =		"reset_host",
2521 		.mode =		S_IWUSR,
2522 	},
2523 	.store = ipr_store_reset_adapter
2524 };
2525 
2526 /**
2527  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2528  * @buf_len:		buffer length
2529  *
2530  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2531  * list to use for microcode download
2532  *
2533  * Return value:
2534  * 	pointer to sglist / NULL on failure
2535  **/
2536 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2537 {
2538 	int sg_size, order, bsize_elem, num_elem, i, j;
2539 	struct ipr_sglist *sglist;
2540 	struct scatterlist *scatterlist;
2541 	struct page *page;
2542 
2543 	/* Get the minimum size per scatter/gather element */
2544 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2545 
2546 	/* Get the actual size per element */
2547 	order = get_order(sg_size);
2548 
2549 	/* Determine the actual number of bytes per element */
2550 	bsize_elem = PAGE_SIZE * (1 << order);
2551 
2552 	/* Determine the actual number of sg entries needed */
2553 	if (buf_len % bsize_elem)
2554 		num_elem = (buf_len / bsize_elem) + 1;
2555 	else
2556 		num_elem = buf_len / bsize_elem;
2557 
2558 	/* Allocate a scatter/gather list for the DMA */
2559 	sglist = kzalloc(sizeof(struct ipr_sglist) +
2560 			 (sizeof(struct scatterlist) * (num_elem - 1)),
2561 			 GFP_KERNEL);
2562 
2563 	if (sglist == NULL) {
2564 		ipr_trace;
2565 		return NULL;
2566 	}
2567 
2568 	scatterlist = sglist->scatterlist;
2569 
2570 	sglist->order = order;
2571 	sglist->num_sg = num_elem;
2572 
2573 	/* Allocate a bunch of sg elements */
2574 	for (i = 0; i < num_elem; i++) {
2575 		page = alloc_pages(GFP_KERNEL, order);
2576 		if (!page) {
2577 			ipr_trace;
2578 
2579 			/* Free up what we already allocated */
2580 			for (j = i - 1; j >= 0; j--)
2581 				__free_pages(scatterlist[j].page, order);
2582 			kfree(sglist);
2583 			return NULL;
2584 		}
2585 
2586 		scatterlist[i].page = page;
2587 	}
2588 
2589 	return sglist;
2590 }
2591 
2592 /**
2593  * ipr_free_ucode_buffer - Frees a microcode download buffer
2594  * @p_dnld:		scatter/gather list pointer
2595  *
2596  * Free a DMA'able ucode download buffer previously allocated with
2597  * ipr_alloc_ucode_buffer
2598  *
2599  * Return value:
2600  * 	nothing
2601  **/
2602 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2603 {
2604 	int i;
2605 
2606 	for (i = 0; i < sglist->num_sg; i++)
2607 		__free_pages(sglist->scatterlist[i].page, sglist->order);
2608 
2609 	kfree(sglist);
2610 }
2611 
2612 /**
2613  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2614  * @sglist:		scatter/gather list pointer
2615  * @buffer:		buffer pointer
2616  * @len:		buffer length
2617  *
2618  * Copy a microcode image from a user buffer into a buffer allocated by
2619  * ipr_alloc_ucode_buffer
2620  *
2621  * Return value:
2622  * 	0 on success / other on failure
2623  **/
2624 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2625 				 u8 *buffer, u32 len)
2626 {
2627 	int bsize_elem, i, result = 0;
2628 	struct scatterlist *scatterlist;
2629 	void *kaddr;
2630 
2631 	/* Determine the actual number of bytes per element */
2632 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2633 
2634 	scatterlist = sglist->scatterlist;
2635 
2636 	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2637 		kaddr = kmap(scatterlist[i].page);
2638 		memcpy(kaddr, buffer, bsize_elem);
2639 		kunmap(scatterlist[i].page);
2640 
2641 		scatterlist[i].length = bsize_elem;
2642 
2643 		if (result != 0) {
2644 			ipr_trace;
2645 			return result;
2646 		}
2647 	}
2648 
2649 	if (len % bsize_elem) {
2650 		kaddr = kmap(scatterlist[i].page);
2651 		memcpy(kaddr, buffer, len % bsize_elem);
2652 		kunmap(scatterlist[i].page);
2653 
2654 		scatterlist[i].length = len % bsize_elem;
2655 	}
2656 
2657 	sglist->buffer_len = len;
2658 	return result;
2659 }
2660 
2661 /**
2662  * ipr_build_ucode_ioadl - Build a microcode download IOADL
2663  * @ipr_cmd:	ipr command struct
2664  * @sglist:		scatter/gather list
2665  *
2666  * Builds a microcode download IOA data list (IOADL).
2667  *
2668  **/
2669 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2670 				  struct ipr_sglist *sglist)
2671 {
2672 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2673 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2674 	struct scatterlist *scatterlist = sglist->scatterlist;
2675 	int i;
2676 
2677 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2678 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2679 	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2680 	ioarcb->write_ioadl_len =
2681 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2682 
2683 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2684 		ioadl[i].flags_and_data_len =
2685 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2686 		ioadl[i].address =
2687 			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2688 	}
2689 
2690 	ioadl[i-1].flags_and_data_len |=
2691 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2692 }
2693 
2694 /**
2695  * ipr_update_ioa_ucode - Update IOA's microcode
2696  * @ioa_cfg:	ioa config struct
2697  * @sglist:		scatter/gather list
2698  *
2699  * Initiate an adapter reset to update the IOA's microcode
2700  *
2701  * Return value:
2702  * 	0 on success / -EIO on failure
2703  **/
2704 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2705 				struct ipr_sglist *sglist)
2706 {
2707 	unsigned long lock_flags;
2708 
2709 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2710 
2711 	if (ioa_cfg->ucode_sglist) {
2712 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 		dev_err(&ioa_cfg->pdev->dev,
2714 			"Microcode download already in progress\n");
2715 		return -EIO;
2716 	}
2717 
2718 	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2719 					sglist->num_sg, DMA_TO_DEVICE);
2720 
2721 	if (!sglist->num_dma_sg) {
2722 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2723 		dev_err(&ioa_cfg->pdev->dev,
2724 			"Failed to map microcode download buffer!\n");
2725 		return -EIO;
2726 	}
2727 
2728 	ioa_cfg->ucode_sglist = sglist;
2729 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2730 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2731 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2732 
2733 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2734 	ioa_cfg->ucode_sglist = NULL;
2735 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2736 	return 0;
2737 }
2738 
2739 /**
2740  * ipr_store_update_fw - Update the firmware on the adapter
2741  * @class_dev:	class_device struct
2742  * @buf:		buffer
2743  * @count:		buffer size
2744  *
2745  * This function will update the firmware on the adapter.
2746  *
2747  * Return value:
2748  * 	count on success / other on failure
2749  **/
2750 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2751 				       const char *buf, size_t count)
2752 {
2753 	struct Scsi_Host *shost = class_to_shost(class_dev);
2754 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2755 	struct ipr_ucode_image_header *image_hdr;
2756 	const struct firmware *fw_entry;
2757 	struct ipr_sglist *sglist;
2758 	char fname[100];
2759 	char *src;
2760 	int len, result, dnld_size;
2761 
2762 	if (!capable(CAP_SYS_ADMIN))
2763 		return -EACCES;
2764 
2765 	len = snprintf(fname, 99, "%s", buf);
2766 	fname[len-1] = '\0';
2767 
2768 	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2769 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2770 		return -EIO;
2771 	}
2772 
2773 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2774 
2775 	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2776 	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2777 	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2778 		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2779 		release_firmware(fw_entry);
2780 		return -EINVAL;
2781 	}
2782 
2783 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2784 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2785 	sglist = ipr_alloc_ucode_buffer(dnld_size);
2786 
2787 	if (!sglist) {
2788 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2789 		release_firmware(fw_entry);
2790 		return -ENOMEM;
2791 	}
2792 
2793 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2794 
2795 	if (result) {
2796 		dev_err(&ioa_cfg->pdev->dev,
2797 			"Microcode buffer copy to DMA buffer failed\n");
2798 		goto out;
2799 	}
2800 
2801 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2802 
2803 	if (!result)
2804 		result = count;
2805 out:
2806 	ipr_free_ucode_buffer(sglist);
2807 	release_firmware(fw_entry);
2808 	return result;
2809 }
2810 
2811 static struct class_device_attribute ipr_update_fw_attr = {
2812 	.attr = {
2813 		.name =		"update_fw",
2814 		.mode =		S_IWUSR,
2815 	},
2816 	.store = ipr_store_update_fw
2817 };
2818 
2819 static struct class_device_attribute *ipr_ioa_attrs[] = {
2820 	&ipr_fw_version_attr,
2821 	&ipr_log_level_attr,
2822 	&ipr_diagnostics_attr,
2823 	&ipr_ioa_state_attr,
2824 	&ipr_ioa_reset_attr,
2825 	&ipr_update_fw_attr,
2826 	&ipr_ioa_cache_attr,
2827 	NULL,
2828 };
2829 
2830 #ifdef CONFIG_SCSI_IPR_DUMP
2831 /**
2832  * ipr_read_dump - Dump the adapter
2833  * @kobj:		kobject struct
2834  * @buf:		buffer
2835  * @off:		offset
2836  * @count:		buffer size
2837  *
2838  * Return value:
2839  *	number of bytes printed to buffer
2840  **/
2841 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2842 			      loff_t off, size_t count)
2843 {
2844 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2845 	struct Scsi_Host *shost = class_to_shost(cdev);
2846 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2847 	struct ipr_dump *dump;
2848 	unsigned long lock_flags = 0;
2849 	char *src;
2850 	int len;
2851 	size_t rc = count;
2852 
2853 	if (!capable(CAP_SYS_ADMIN))
2854 		return -EACCES;
2855 
2856 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2857 	dump = ioa_cfg->dump;
2858 
2859 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2860 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2861 		return 0;
2862 	}
2863 	kref_get(&dump->kref);
2864 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2865 
2866 	if (off > dump->driver_dump.hdr.len) {
2867 		kref_put(&dump->kref, ipr_release_dump);
2868 		return 0;
2869 	}
2870 
2871 	if (off + count > dump->driver_dump.hdr.len) {
2872 		count = dump->driver_dump.hdr.len - off;
2873 		rc = count;
2874 	}
2875 
2876 	if (count && off < sizeof(dump->driver_dump)) {
2877 		if (off + count > sizeof(dump->driver_dump))
2878 			len = sizeof(dump->driver_dump) - off;
2879 		else
2880 			len = count;
2881 		src = (u8 *)&dump->driver_dump + off;
2882 		memcpy(buf, src, len);
2883 		buf += len;
2884 		off += len;
2885 		count -= len;
2886 	}
2887 
2888 	off -= sizeof(dump->driver_dump);
2889 
2890 	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2891 		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2892 			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2893 		else
2894 			len = count;
2895 		src = (u8 *)&dump->ioa_dump + off;
2896 		memcpy(buf, src, len);
2897 		buf += len;
2898 		off += len;
2899 		count -= len;
2900 	}
2901 
2902 	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2903 
2904 	while (count) {
2905 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2906 			len = PAGE_ALIGN(off) - off;
2907 		else
2908 			len = count;
2909 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2910 		src += off & ~PAGE_MASK;
2911 		memcpy(buf, src, len);
2912 		buf += len;
2913 		off += len;
2914 		count -= len;
2915 	}
2916 
2917 	kref_put(&dump->kref, ipr_release_dump);
2918 	return rc;
2919 }
2920 
2921 /**
2922  * ipr_alloc_dump - Prepare for adapter dump
2923  * @ioa_cfg:	ioa config struct
2924  *
2925  * Return value:
2926  *	0 on success / other on failure
2927  **/
2928 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2929 {
2930 	struct ipr_dump *dump;
2931 	unsigned long lock_flags = 0;
2932 
2933 	ENTER;
2934 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2935 
2936 	if (!dump) {
2937 		ipr_err("Dump memory allocation failed\n");
2938 		return -ENOMEM;
2939 	}
2940 
2941 	kref_init(&dump->kref);
2942 	dump->ioa_cfg = ioa_cfg;
2943 
2944 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2945 
2946 	if (INACTIVE != ioa_cfg->sdt_state) {
2947 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2948 		kfree(dump);
2949 		return 0;
2950 	}
2951 
2952 	ioa_cfg->dump = dump;
2953 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2954 	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2955 		ioa_cfg->dump_taken = 1;
2956 		schedule_work(&ioa_cfg->work_q);
2957 	}
2958 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2959 
2960 	LEAVE;
2961 	return 0;
2962 }
2963 
2964 /**
2965  * ipr_free_dump - Free adapter dump memory
2966  * @ioa_cfg:	ioa config struct
2967  *
2968  * Return value:
2969  *	0 on success / other on failure
2970  **/
2971 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2972 {
2973 	struct ipr_dump *dump;
2974 	unsigned long lock_flags = 0;
2975 
2976 	ENTER;
2977 
2978 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2979 	dump = ioa_cfg->dump;
2980 	if (!dump) {
2981 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2982 		return 0;
2983 	}
2984 
2985 	ioa_cfg->dump = NULL;
2986 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2987 
2988 	kref_put(&dump->kref, ipr_release_dump);
2989 
2990 	LEAVE;
2991 	return 0;
2992 }
2993 
2994 /**
2995  * ipr_write_dump - Setup dump state of adapter
2996  * @kobj:		kobject struct
2997  * @buf:		buffer
2998  * @off:		offset
2999  * @count:		buffer size
3000  *
3001  * Return value:
3002  *	number of bytes printed to buffer
3003  **/
3004 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3005 			      loff_t off, size_t count)
3006 {
3007 	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3008 	struct Scsi_Host *shost = class_to_shost(cdev);
3009 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3010 	int rc;
3011 
3012 	if (!capable(CAP_SYS_ADMIN))
3013 		return -EACCES;
3014 
3015 	if (buf[0] == '1')
3016 		rc = ipr_alloc_dump(ioa_cfg);
3017 	else if (buf[0] == '0')
3018 		rc = ipr_free_dump(ioa_cfg);
3019 	else
3020 		return -EINVAL;
3021 
3022 	if (rc)
3023 		return rc;
3024 	else
3025 		return count;
3026 }
3027 
3028 static struct bin_attribute ipr_dump_attr = {
3029 	.attr =	{
3030 		.name = "dump",
3031 		.mode = S_IRUSR | S_IWUSR,
3032 	},
3033 	.size = 0,
3034 	.read = ipr_read_dump,
3035 	.write = ipr_write_dump
3036 };
3037 #else
3038 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3039 #endif
3040 
3041 /**
3042  * ipr_change_queue_depth - Change the device's queue depth
3043  * @sdev:	scsi device struct
3044  * @qdepth:	depth to set
3045  *
3046  * Return value:
3047  * 	actual depth set
3048  **/
3049 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3050 {
3051 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3052 	return sdev->queue_depth;
3053 }
3054 
3055 /**
3056  * ipr_change_queue_type - Change the device's queue type
3057  * @dsev:		scsi device struct
3058  * @tag_type:	type of tags to use
3059  *
3060  * Return value:
3061  * 	actual queue type set
3062  **/
3063 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3064 {
3065 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3066 	struct ipr_resource_entry *res;
3067 	unsigned long lock_flags = 0;
3068 
3069 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3070 	res = (struct ipr_resource_entry *)sdev->hostdata;
3071 
3072 	if (res) {
3073 		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3074 			/*
3075 			 * We don't bother quiescing the device here since the
3076 			 * adapter firmware does it for us.
3077 			 */
3078 			scsi_set_tag_type(sdev, tag_type);
3079 
3080 			if (tag_type)
3081 				scsi_activate_tcq(sdev, sdev->queue_depth);
3082 			else
3083 				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3084 		} else
3085 			tag_type = 0;
3086 	} else
3087 		tag_type = 0;
3088 
3089 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3090 	return tag_type;
3091 }
3092 
3093 /**
3094  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3095  * @dev:	device struct
3096  * @buf:	buffer
3097  *
3098  * Return value:
3099  * 	number of bytes printed to buffer
3100  **/
3101 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3102 {
3103 	struct scsi_device *sdev = to_scsi_device(dev);
3104 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3105 	struct ipr_resource_entry *res;
3106 	unsigned long lock_flags = 0;
3107 	ssize_t len = -ENXIO;
3108 
3109 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3110 	res = (struct ipr_resource_entry *)sdev->hostdata;
3111 	if (res)
3112 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3113 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3114 	return len;
3115 }
3116 
3117 static struct device_attribute ipr_adapter_handle_attr = {
3118 	.attr = {
3119 		.name = 	"adapter_handle",
3120 		.mode =		S_IRUSR,
3121 	},
3122 	.show = ipr_show_adapter_handle
3123 };
3124 
3125 static struct device_attribute *ipr_dev_attrs[] = {
3126 	&ipr_adapter_handle_attr,
3127 	NULL,
3128 };
3129 
3130 /**
3131  * ipr_biosparam - Return the HSC mapping
3132  * @sdev:			scsi device struct
3133  * @block_device:	block device pointer
3134  * @capacity:		capacity of the device
3135  * @parm:			Array containing returned HSC values.
3136  *
3137  * This function generates the HSC parms that fdisk uses.
3138  * We want to make sure we return something that places partitions
3139  * on 4k boundaries for best performance with the IOA.
3140  *
3141  * Return value:
3142  * 	0 on success
3143  **/
3144 static int ipr_biosparam(struct scsi_device *sdev,
3145 			 struct block_device *block_device,
3146 			 sector_t capacity, int *parm)
3147 {
3148 	int heads, sectors;
3149 	sector_t cylinders;
3150 
3151 	heads = 128;
3152 	sectors = 32;
3153 
3154 	cylinders = capacity;
3155 	sector_div(cylinders, (128 * 32));
3156 
3157 	/* return result */
3158 	parm[0] = heads;
3159 	parm[1] = sectors;
3160 	parm[2] = cylinders;
3161 
3162 	return 0;
3163 }
3164 
3165 /**
3166  * ipr_slave_destroy - Unconfigure a SCSI device
3167  * @sdev:	scsi device struct
3168  *
3169  * Return value:
3170  * 	nothing
3171  **/
3172 static void ipr_slave_destroy(struct scsi_device *sdev)
3173 {
3174 	struct ipr_resource_entry *res;
3175 	struct ipr_ioa_cfg *ioa_cfg;
3176 	unsigned long lock_flags = 0;
3177 
3178 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3179 
3180 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3181 	res = (struct ipr_resource_entry *) sdev->hostdata;
3182 	if (res) {
3183 		sdev->hostdata = NULL;
3184 		res->sdev = NULL;
3185 	}
3186 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3187 }
3188 
3189 /**
3190  * ipr_slave_configure - Configure a SCSI device
3191  * @sdev:	scsi device struct
3192  *
3193  * This function configures the specified scsi device.
3194  *
3195  * Return value:
3196  * 	0 on success
3197  **/
3198 static int ipr_slave_configure(struct scsi_device *sdev)
3199 {
3200 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3201 	struct ipr_resource_entry *res;
3202 	unsigned long lock_flags = 0;
3203 
3204 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3205 	res = sdev->hostdata;
3206 	if (res) {
3207 		if (ipr_is_af_dasd_device(res))
3208 			sdev->type = TYPE_RAID;
3209 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3210 			sdev->scsi_level = 4;
3211 			sdev->no_uld_attach = 1;
3212 		}
3213 		if (ipr_is_vset_device(res)) {
3214 			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3215 			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3216 		}
3217 		if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3218 			sdev->allow_restart = 1;
3219 		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3220 	}
3221 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3222 	return 0;
3223 }
3224 
3225 /**
3226  * ipr_slave_alloc - Prepare for commands to a device.
3227  * @sdev:	scsi device struct
3228  *
3229  * This function saves a pointer to the resource entry
3230  * in the scsi device struct if the device exists. We
3231  * can then use this pointer in ipr_queuecommand when
3232  * handling new commands.
3233  *
3234  * Return value:
3235  * 	0 on success / -ENXIO if device does not exist
3236  **/
3237 static int ipr_slave_alloc(struct scsi_device *sdev)
3238 {
3239 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3240 	struct ipr_resource_entry *res;
3241 	unsigned long lock_flags;
3242 	int rc = -ENXIO;
3243 
3244 	sdev->hostdata = NULL;
3245 
3246 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3247 
3248 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3249 		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3250 		    (res->cfgte.res_addr.target == sdev->id) &&
3251 		    (res->cfgte.res_addr.lun == sdev->lun)) {
3252 			res->sdev = sdev;
3253 			res->add_to_ml = 0;
3254 			res->in_erp = 0;
3255 			sdev->hostdata = res;
3256 			if (!ipr_is_naca_model(res))
3257 				res->needs_sync_complete = 1;
3258 			rc = 0;
3259 			break;
3260 		}
3261 	}
3262 
3263 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3264 
3265 	return rc;
3266 }
3267 
3268 /**
3269  * ipr_eh_host_reset - Reset the host adapter
3270  * @scsi_cmd:	scsi command struct
3271  *
3272  * Return value:
3273  * 	SUCCESS / FAILED
3274  **/
3275 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3276 {
3277 	struct ipr_ioa_cfg *ioa_cfg;
3278 	int rc;
3279 
3280 	ENTER;
3281 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3282 
3283 	dev_err(&ioa_cfg->pdev->dev,
3284 		"Adapter being reset as a result of error recovery.\n");
3285 
3286 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3287 		ioa_cfg->sdt_state = GET_DUMP;
3288 
3289 	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3290 
3291 	LEAVE;
3292 	return rc;
3293 }
3294 
3295 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3296 {
3297 	int rc;
3298 
3299 	spin_lock_irq(cmd->device->host->host_lock);
3300 	rc = __ipr_eh_host_reset(cmd);
3301 	spin_unlock_irq(cmd->device->host->host_lock);
3302 
3303 	return rc;
3304 }
3305 
3306 /**
3307  * ipr_eh_dev_reset - Reset the device
3308  * @scsi_cmd:	scsi command struct
3309  *
3310  * This function issues a device reset to the affected device.
3311  * A LUN reset will be sent to the device first. If that does
3312  * not work, a target reset will be sent.
3313  *
3314  * Return value:
3315  *	SUCCESS / FAILED
3316  **/
3317 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3318 {
3319 	struct ipr_cmnd *ipr_cmd;
3320 	struct ipr_ioa_cfg *ioa_cfg;
3321 	struct ipr_resource_entry *res;
3322 	struct ipr_cmd_pkt *cmd_pkt;
3323 	u32 ioasc;
3324 
3325 	ENTER;
3326 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3327 	res = scsi_cmd->device->hostdata;
3328 
3329 	if (!res)
3330 		return FAILED;
3331 
3332 	/*
3333 	 * If we are currently going through reset/reload, return failed. This will force the
3334 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3335 	 * reset to complete
3336 	 */
3337 	if (ioa_cfg->in_reset_reload)
3338 		return FAILED;
3339 	if (ioa_cfg->ioa_is_dead)
3340 		return FAILED;
3341 
3342 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3343 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3344 			if (ipr_cmd->scsi_cmd)
3345 				ipr_cmd->done = ipr_scsi_eh_done;
3346 		}
3347 	}
3348 
3349 	res->resetting_device = 1;
3350 
3351 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3352 
3353 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3354 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3355 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3356 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3357 
3358 	ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3359 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3360 
3361 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3362 
3363 	res->resetting_device = 0;
3364 
3365 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3366 
3367 	LEAVE;
3368 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3369 }
3370 
3371 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3372 {
3373 	int rc;
3374 
3375 	spin_lock_irq(cmd->device->host->host_lock);
3376 	rc = __ipr_eh_dev_reset(cmd);
3377 	spin_unlock_irq(cmd->device->host->host_lock);
3378 
3379 	return rc;
3380 }
3381 
3382 /**
3383  * ipr_bus_reset_done - Op done function for bus reset.
3384  * @ipr_cmd:	ipr command struct
3385  *
3386  * This function is the op done function for a bus reset
3387  *
3388  * Return value:
3389  * 	none
3390  **/
3391 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3392 {
3393 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3394 	struct ipr_resource_entry *res;
3395 
3396 	ENTER;
3397 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398 		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3399 			    sizeof(res->cfgte.res_handle))) {
3400 			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3401 			break;
3402 		}
3403 	}
3404 
3405 	/*
3406 	 * If abort has not completed, indicate the reset has, else call the
3407 	 * abort's done function to wake the sleeping eh thread
3408 	 */
3409 	if (ipr_cmd->sibling->sibling)
3410 		ipr_cmd->sibling->sibling = NULL;
3411 	else
3412 		ipr_cmd->sibling->done(ipr_cmd->sibling);
3413 
3414 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3415 	LEAVE;
3416 }
3417 
3418 /**
3419  * ipr_abort_timeout - An abort task has timed out
3420  * @ipr_cmd:	ipr command struct
3421  *
3422  * This function handles when an abort task times out. If this
3423  * happens we issue a bus reset since we have resources tied
3424  * up that must be freed before returning to the midlayer.
3425  *
3426  * Return value:
3427  *	none
3428  **/
3429 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3430 {
3431 	struct ipr_cmnd *reset_cmd;
3432 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3433 	struct ipr_cmd_pkt *cmd_pkt;
3434 	unsigned long lock_flags = 0;
3435 
3436 	ENTER;
3437 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3438 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3439 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 		return;
3441 	}
3442 
3443 	ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3444 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3445 	ipr_cmd->sibling = reset_cmd;
3446 	reset_cmd->sibling = ipr_cmd;
3447 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3448 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3449 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3450 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3451 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3452 
3453 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3454 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 	LEAVE;
3456 }
3457 
3458 /**
3459  * ipr_cancel_op - Cancel specified op
3460  * @scsi_cmd:	scsi command struct
3461  *
3462  * This function cancels specified op.
3463  *
3464  * Return value:
3465  *	SUCCESS / FAILED
3466  **/
3467 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3468 {
3469 	struct ipr_cmnd *ipr_cmd;
3470 	struct ipr_ioa_cfg *ioa_cfg;
3471 	struct ipr_resource_entry *res;
3472 	struct ipr_cmd_pkt *cmd_pkt;
3473 	u32 ioasc;
3474 	int op_found = 0;
3475 
3476 	ENTER;
3477 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3478 	res = scsi_cmd->device->hostdata;
3479 
3480 	/* If we are currently going through reset/reload, return failed.
3481 	 * This will force the mid-layer to call ipr_eh_host_reset,
3482 	 * which will then go to sleep and wait for the reset to complete
3483 	 */
3484 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3485 		return FAILED;
3486 	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3487 		return FAILED;
3488 
3489 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3490 		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3491 			ipr_cmd->done = ipr_scsi_eh_done;
3492 			op_found = 1;
3493 			break;
3494 		}
3495 	}
3496 
3497 	if (!op_found)
3498 		return SUCCESS;
3499 
3500 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3501 	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3502 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3503 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3504 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3505 	ipr_cmd->u.sdev = scsi_cmd->device;
3506 
3507 	ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3508 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3509 	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3510 
3511 	/*
3512 	 * If the abort task timed out and we sent a bus reset, we will get
3513 	 * one the following responses to the abort
3514 	 */
3515 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3516 		ioasc = 0;
3517 		ipr_trace;
3518 	}
3519 
3520 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3521 	if (!ipr_is_naca_model(res))
3522 		res->needs_sync_complete = 1;
3523 
3524 	LEAVE;
3525 	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3526 }
3527 
3528 /**
3529  * ipr_eh_abort - Abort a single op
3530  * @scsi_cmd:	scsi command struct
3531  *
3532  * Return value:
3533  * 	SUCCESS / FAILED
3534  **/
3535 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3536 {
3537 	unsigned long flags;
3538 	int rc;
3539 
3540 	ENTER;
3541 
3542 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3543 	rc = ipr_cancel_op(scsi_cmd);
3544 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3545 
3546 	LEAVE;
3547 	return rc;
3548 }
3549 
3550 /**
3551  * ipr_handle_other_interrupt - Handle "other" interrupts
3552  * @ioa_cfg:	ioa config struct
3553  * @int_reg:	interrupt register
3554  *
3555  * Return value:
3556  * 	IRQ_NONE / IRQ_HANDLED
3557  **/
3558 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3559 					      volatile u32 int_reg)
3560 {
3561 	irqreturn_t rc = IRQ_HANDLED;
3562 
3563 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3564 		/* Mask the interrupt */
3565 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3566 
3567 		/* Clear the interrupt */
3568 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3569 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3570 
3571 		list_del(&ioa_cfg->reset_cmd->queue);
3572 		del_timer(&ioa_cfg->reset_cmd->timer);
3573 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3574 	} else {
3575 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3576 			ioa_cfg->ioa_unit_checked = 1;
3577 		else
3578 			dev_err(&ioa_cfg->pdev->dev,
3579 				"Permanent IOA failure. 0x%08X\n", int_reg);
3580 
3581 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3582 			ioa_cfg->sdt_state = GET_DUMP;
3583 
3584 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3585 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3586 	}
3587 
3588 	return rc;
3589 }
3590 
3591 /**
3592  * ipr_isr - Interrupt service routine
3593  * @irq:	irq number
3594  * @devp:	pointer to ioa config struct
3595  * @regs:	pt_regs struct
3596  *
3597  * Return value:
3598  * 	IRQ_NONE / IRQ_HANDLED
3599  **/
3600 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3601 {
3602 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3603 	unsigned long lock_flags = 0;
3604 	volatile u32 int_reg, int_mask_reg;
3605 	u32 ioasc;
3606 	u16 cmd_index;
3607 	struct ipr_cmnd *ipr_cmd;
3608 	irqreturn_t rc = IRQ_NONE;
3609 
3610 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611 
3612 	/* If interrupts are disabled, ignore the interrupt */
3613 	if (!ioa_cfg->allow_interrupts) {
3614 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3615 		return IRQ_NONE;
3616 	}
3617 
3618 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3619 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3620 
3621 	/* If an interrupt on the adapter did not occur, ignore it */
3622 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3623 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624 		return IRQ_NONE;
3625 	}
3626 
3627 	while (1) {
3628 		ipr_cmd = NULL;
3629 
3630 		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3631 		       ioa_cfg->toggle_bit) {
3632 
3633 			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3634 				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3635 
3636 			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3637 				ioa_cfg->errors_logged++;
3638 				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3639 
3640 				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3641 					ioa_cfg->sdt_state = GET_DUMP;
3642 
3643 				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3644 				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3645 				return IRQ_HANDLED;
3646 			}
3647 
3648 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3649 
3650 			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3651 
3652 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3653 
3654 			list_del(&ipr_cmd->queue);
3655 			del_timer(&ipr_cmd->timer);
3656 			ipr_cmd->done(ipr_cmd);
3657 
3658 			rc = IRQ_HANDLED;
3659 
3660 			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3661 				ioa_cfg->hrrq_curr++;
3662 			} else {
3663 				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3664 				ioa_cfg->toggle_bit ^= 1u;
3665 			}
3666 		}
3667 
3668 		if (ipr_cmd != NULL) {
3669 			/* Clear the PCI interrupt */
3670 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3671 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3672 		} else
3673 			break;
3674 	}
3675 
3676 	if (unlikely(rc == IRQ_NONE))
3677 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3678 
3679 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3680 	return rc;
3681 }
3682 
3683 /**
3684  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3685  * @ioa_cfg:	ioa config struct
3686  * @ipr_cmd:	ipr command struct
3687  *
3688  * Return value:
3689  * 	0 on success / -1 on failure
3690  **/
3691 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3692 			   struct ipr_cmnd *ipr_cmd)
3693 {
3694 	int i;
3695 	struct scatterlist *sglist;
3696 	u32 length;
3697 	u32 ioadl_flags = 0;
3698 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3699 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3700 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3701 
3702 	length = scsi_cmd->request_bufflen;
3703 
3704 	if (length == 0)
3705 		return 0;
3706 
3707 	if (scsi_cmd->use_sg) {
3708 		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3709 						 scsi_cmd->request_buffer,
3710 						 scsi_cmd->use_sg,
3711 						 scsi_cmd->sc_data_direction);
3712 
3713 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3714 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3715 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3716 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3717 			ioarcb->write_ioadl_len =
3718 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3719 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3720 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3721 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3722 			ioarcb->read_ioadl_len =
3723 				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3724 		}
3725 
3726 		sglist = scsi_cmd->request_buffer;
3727 
3728 		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3729 			ioadl[i].flags_and_data_len =
3730 				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3731 			ioadl[i].address =
3732 				cpu_to_be32(sg_dma_address(&sglist[i]));
3733 		}
3734 
3735 		if (likely(ipr_cmd->dma_use_sg)) {
3736 			ioadl[i-1].flags_and_data_len |=
3737 				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3738 			return 0;
3739 		} else
3740 			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3741 	} else {
3742 		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3743 			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3744 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3745 			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3746 			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3747 		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748 			ioadl_flags = IPR_IOADL_FLAGS_READ;
3749 			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750 			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3751 		}
3752 
3753 		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3754 						     scsi_cmd->request_buffer, length,
3755 						     scsi_cmd->sc_data_direction);
3756 
3757 		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3758 			ipr_cmd->dma_use_sg = 1;
3759 			ioadl[0].flags_and_data_len =
3760 				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3761 			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3762 			return 0;
3763 		} else
3764 			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3765 	}
3766 
3767 	return -1;
3768 }
3769 
3770 /**
3771  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3772  * @scsi_cmd:	scsi command struct
3773  *
3774  * Return value:
3775  * 	task attributes
3776  **/
3777 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3778 {
3779 	u8 tag[2];
3780 	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3781 
3782 	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3783 		switch (tag[0]) {
3784 		case MSG_SIMPLE_TAG:
3785 			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3786 			break;
3787 		case MSG_HEAD_TAG:
3788 			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3789 			break;
3790 		case MSG_ORDERED_TAG:
3791 			rc = IPR_FLAGS_LO_ORDERED_TASK;
3792 			break;
3793 		};
3794 	}
3795 
3796 	return rc;
3797 }
3798 
3799 /**
3800  * ipr_erp_done - Process completion of ERP for a device
3801  * @ipr_cmd:		ipr command struct
3802  *
3803  * This function copies the sense buffer into the scsi_cmd
3804  * struct and pushes the scsi_done function.
3805  *
3806  * Return value:
3807  * 	nothing
3808  **/
3809 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3810 {
3811 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3812 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3813 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3814 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3815 
3816 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3817 		scsi_cmd->result |= (DID_ERROR << 16);
3818 		ipr_sdev_err(scsi_cmd->device,
3819 			     "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3820 	} else {
3821 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3822 		       SCSI_SENSE_BUFFERSIZE);
3823 	}
3824 
3825 	if (res) {
3826 		if (!ipr_is_naca_model(res))
3827 			res->needs_sync_complete = 1;
3828 		res->in_erp = 0;
3829 	}
3830 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3831 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3832 	scsi_cmd->scsi_done(scsi_cmd);
3833 }
3834 
3835 /**
3836  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3837  * @ipr_cmd:	ipr command struct
3838  *
3839  * Return value:
3840  * 	none
3841  **/
3842 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3843 {
3844 	struct ipr_ioarcb *ioarcb;
3845 	struct ipr_ioasa *ioasa;
3846 
3847 	ioarcb = &ipr_cmd->ioarcb;
3848 	ioasa = &ipr_cmd->ioasa;
3849 
3850 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3851 	ioarcb->write_data_transfer_length = 0;
3852 	ioarcb->read_data_transfer_length = 0;
3853 	ioarcb->write_ioadl_len = 0;
3854 	ioarcb->read_ioadl_len = 0;
3855 	ioasa->ioasc = 0;
3856 	ioasa->residual_data_len = 0;
3857 }
3858 
3859 /**
3860  * ipr_erp_request_sense - Send request sense to a device
3861  * @ipr_cmd:	ipr command struct
3862  *
3863  * This function sends a request sense to a device as a result
3864  * of a check condition.
3865  *
3866  * Return value:
3867  * 	nothing
3868  **/
3869 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3870 {
3871 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3872 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3873 
3874 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3875 		ipr_erp_done(ipr_cmd);
3876 		return;
3877 	}
3878 
3879 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3880 
3881 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3882 	cmd_pkt->cdb[0] = REQUEST_SENSE;
3883 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3884 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3885 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3886 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3887 
3888 	ipr_cmd->ioadl[0].flags_and_data_len =
3889 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3890 	ipr_cmd->ioadl[0].address =
3891 		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3892 
3893 	ipr_cmd->ioarcb.read_ioadl_len =
3894 		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3895 	ipr_cmd->ioarcb.read_data_transfer_length =
3896 		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3897 
3898 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3899 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3900 }
3901 
3902 /**
3903  * ipr_erp_cancel_all - Send cancel all to a device
3904  * @ipr_cmd:	ipr command struct
3905  *
3906  * This function sends a cancel all to a device to clear the
3907  * queue. If we are running TCQ on the device, QERR is set to 1,
3908  * which means all outstanding ops have been dropped on the floor.
3909  * Cancel all will return them to us.
3910  *
3911  * Return value:
3912  * 	nothing
3913  **/
3914 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3915 {
3916 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3917 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3918 	struct ipr_cmd_pkt *cmd_pkt;
3919 
3920 	res->in_erp = 1;
3921 
3922 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3923 
3924 	if (!scsi_get_tag_type(scsi_cmd->device)) {
3925 		ipr_erp_request_sense(ipr_cmd);
3926 		return;
3927 	}
3928 
3929 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3930 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3931 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3932 
3933 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3934 		   IPR_CANCEL_ALL_TIMEOUT);
3935 }
3936 
3937 /**
3938  * ipr_dump_ioasa - Dump contents of IOASA
3939  * @ioa_cfg:	ioa config struct
3940  * @ipr_cmd:	ipr command struct
3941  *
3942  * This function is invoked by the interrupt handler when ops
3943  * fail. It will log the IOASA if appropriate. Only called
3944  * for GPDD ops.
3945  *
3946  * Return value:
3947  * 	none
3948  **/
3949 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3950 			   struct ipr_cmnd *ipr_cmd)
3951 {
3952 	int i;
3953 	u16 data_len;
3954 	u32 ioasc;
3955 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3956 	__be32 *ioasa_data = (__be32 *)ioasa;
3957 	int error_index;
3958 
3959 	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3960 
3961 	if (0 == ioasc)
3962 		return;
3963 
3964 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3965 		return;
3966 
3967 	error_index = ipr_get_error(ioasc);
3968 
3969 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3970 		/* Don't log an error if the IOA already logged one */
3971 		if (ioasa->ilid != 0)
3972 			return;
3973 
3974 		if (ipr_error_table[error_index].log_ioasa == 0)
3975 			return;
3976 	}
3977 
3978 	ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3979 		     ipr_error_table[error_index].error);
3980 
3981 	if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3982 	    (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3983 		ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3984 			     "Device End state: %s Phase: %s\n",
3985 			     ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3986 			     ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3987 	}
3988 
3989 	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3990 		data_len = sizeof(struct ipr_ioasa);
3991 	else
3992 		data_len = be16_to_cpu(ioasa->ret_stat_len);
3993 
3994 	ipr_err("IOASA Dump:\n");
3995 
3996 	for (i = 0; i < data_len / 4; i += 4) {
3997 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3998 			be32_to_cpu(ioasa_data[i]),
3999 			be32_to_cpu(ioasa_data[i+1]),
4000 			be32_to_cpu(ioasa_data[i+2]),
4001 			be32_to_cpu(ioasa_data[i+3]));
4002 	}
4003 }
4004 
4005 /**
4006  * ipr_gen_sense - Generate SCSI sense data from an IOASA
4007  * @ioasa:		IOASA
4008  * @sense_buf:	sense data buffer
4009  *
4010  * Return value:
4011  * 	none
4012  **/
4013 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4014 {
4015 	u32 failing_lba;
4016 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4017 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4018 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4019 	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4020 
4021 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4022 
4023 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4024 		return;
4025 
4026 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4027 
4028 	if (ipr_is_vset_device(res) &&
4029 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4030 	    ioasa->u.vset.failing_lba_hi != 0) {
4031 		sense_buf[0] = 0x72;
4032 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4033 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4034 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4035 
4036 		sense_buf[7] = 12;
4037 		sense_buf[8] = 0;
4038 		sense_buf[9] = 0x0A;
4039 		sense_buf[10] = 0x80;
4040 
4041 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4042 
4043 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4044 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4045 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4046 		sense_buf[15] = failing_lba & 0x000000ff;
4047 
4048 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4049 
4050 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4051 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4052 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4053 		sense_buf[19] = failing_lba & 0x000000ff;
4054 	} else {
4055 		sense_buf[0] = 0x70;
4056 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4057 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4058 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4059 
4060 		/* Illegal request */
4061 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4062 		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4063 			sense_buf[7] = 10;	/* additional length */
4064 
4065 			/* IOARCB was in error */
4066 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4067 				sense_buf[15] = 0xC0;
4068 			else	/* Parameter data was invalid */
4069 				sense_buf[15] = 0x80;
4070 
4071 			sense_buf[16] =
4072 			    ((IPR_FIELD_POINTER_MASK &
4073 			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4074 			sense_buf[17] =
4075 			    (IPR_FIELD_POINTER_MASK &
4076 			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4077 		} else {
4078 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4079 				if (ipr_is_vset_device(res))
4080 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4081 				else
4082 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4083 
4084 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4085 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4086 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4087 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4088 				sense_buf[6] = failing_lba & 0x000000ff;
4089 			}
4090 
4091 			sense_buf[7] = 6;	/* additional length */
4092 		}
4093 	}
4094 }
4095 
4096 /**
4097  * ipr_get_autosense - Copy autosense data to sense buffer
4098  * @ipr_cmd:	ipr command struct
4099  *
4100  * This function copies the autosense buffer to the buffer
4101  * in the scsi_cmd, if there is autosense available.
4102  *
4103  * Return value:
4104  *	1 if autosense was available / 0 if not
4105  **/
4106 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4107 {
4108 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4109 
4110 	if ((be32_to_cpu(ioasa->ioasc_specific) &
4111 	     (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4112 		return 0;
4113 
4114 	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4115 	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4116 		   SCSI_SENSE_BUFFERSIZE));
4117 	return 1;
4118 }
4119 
4120 /**
4121  * ipr_erp_start - Process an error response for a SCSI op
4122  * @ioa_cfg:	ioa config struct
4123  * @ipr_cmd:	ipr command struct
4124  *
4125  * This function determines whether or not to initiate ERP
4126  * on the affected device.
4127  *
4128  * Return value:
4129  * 	nothing
4130  **/
4131 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4132 			      struct ipr_cmnd *ipr_cmd)
4133 {
4134 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4135 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4136 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4137 
4138 	if (!res) {
4139 		ipr_scsi_eh_done(ipr_cmd);
4140 		return;
4141 	}
4142 
4143 	if (ipr_is_gscsi(res))
4144 		ipr_dump_ioasa(ioa_cfg, ipr_cmd);
4145 	else
4146 		ipr_gen_sense(ipr_cmd);
4147 
4148 	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4149 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4150 		if (ipr_is_naca_model(res))
4151 			scsi_cmd->result |= (DID_ABORT << 16);
4152 		else
4153 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4154 		break;
4155 	case IPR_IOASC_IR_RESOURCE_HANDLE:
4156 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4157 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4158 		break;
4159 	case IPR_IOASC_HW_SEL_TIMEOUT:
4160 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4161 		if (!ipr_is_naca_model(res))
4162 			res->needs_sync_complete = 1;
4163 		break;
4164 	case IPR_IOASC_SYNC_REQUIRED:
4165 		if (!res->in_erp)
4166 			res->needs_sync_complete = 1;
4167 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4168 		break;
4169 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4170 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4171 		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4172 		break;
4173 	case IPR_IOASC_BUS_WAS_RESET:
4174 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4175 		/*
4176 		 * Report the bus reset and ask for a retry. The device
4177 		 * will give CC/UA the next command.
4178 		 */
4179 		if (!res->resetting_device)
4180 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4181 		scsi_cmd->result |= (DID_ERROR << 16);
4182 		if (!ipr_is_naca_model(res))
4183 			res->needs_sync_complete = 1;
4184 		break;
4185 	case IPR_IOASC_HW_DEV_BUS_STATUS:
4186 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4187 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4188 			if (!ipr_get_autosense(ipr_cmd)) {
4189 				if (!ipr_is_naca_model(res)) {
4190 					ipr_erp_cancel_all(ipr_cmd);
4191 					return;
4192 				}
4193 			}
4194 		}
4195 		if (!ipr_is_naca_model(res))
4196 			res->needs_sync_complete = 1;
4197 		break;
4198 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4199 		break;
4200 	default:
4201 		scsi_cmd->result |= (DID_ERROR << 16);
4202 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4203 			res->needs_sync_complete = 1;
4204 		break;
4205 	}
4206 
4207 	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4208 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4209 	scsi_cmd->scsi_done(scsi_cmd);
4210 }
4211 
4212 /**
4213  * ipr_scsi_done - mid-layer done function
4214  * @ipr_cmd:	ipr command struct
4215  *
4216  * This function is invoked by the interrupt handler for
4217  * ops generated by the SCSI mid-layer
4218  *
4219  * Return value:
4220  * 	none
4221  **/
4222 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4223 {
4224 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4225 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4226 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4227 
4228 	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4229 
4230 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4231 		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4232 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4233 		scsi_cmd->scsi_done(scsi_cmd);
4234 	} else
4235 		ipr_erp_start(ioa_cfg, ipr_cmd);
4236 }
4237 
4238 /**
4239  * ipr_save_ioafp_mode_select - Save adapters mode select data
4240  * @ioa_cfg:	ioa config struct
4241  * @scsi_cmd:	scsi command struct
4242  *
4243  * This function saves mode select data for the adapter to
4244  * use following an adapter reset.
4245  *
4246  * Return value:
4247  *	0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4248  **/
4249 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4250 				       struct scsi_cmnd *scsi_cmd)
4251 {
4252 	if (!ioa_cfg->saved_mode_pages) {
4253 		ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
4254 						     GFP_ATOMIC);
4255 		if (!ioa_cfg->saved_mode_pages) {
4256 			dev_err(&ioa_cfg->pdev->dev,
4257 				"IOA mode select buffer allocation failed\n");
4258 			return SCSI_MLQUEUE_HOST_BUSY;
4259 		}
4260 	}
4261 
4262 	memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4263 	ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4264 	return 0;
4265 }
4266 
4267 /**
4268  * ipr_queuecommand - Queue a mid-layer request
4269  * @scsi_cmd:	scsi command struct
4270  * @done:		done function
4271  *
4272  * This function queues a request generated by the mid-layer.
4273  *
4274  * Return value:
4275  *	0 on success
4276  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4277  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4278  **/
4279 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4280 			    void (*done) (struct scsi_cmnd *))
4281 {
4282 	struct ipr_ioa_cfg *ioa_cfg;
4283 	struct ipr_resource_entry *res;
4284 	struct ipr_ioarcb *ioarcb;
4285 	struct ipr_cmnd *ipr_cmd;
4286 	int rc = 0;
4287 
4288 	scsi_cmd->scsi_done = done;
4289 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4290 	res = scsi_cmd->device->hostdata;
4291 	scsi_cmd->result = (DID_OK << 16);
4292 
4293 	/*
4294 	 * We are currently blocking all devices due to a host reset
4295 	 * We have told the host to stop giving us new requests, but
4296 	 * ERP ops don't count. FIXME
4297 	 */
4298 	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4299 		return SCSI_MLQUEUE_HOST_BUSY;
4300 
4301 	/*
4302 	 * FIXME - Create scsi_set_host_offline interface
4303 	 *  and the ioa_is_dead check can be removed
4304 	 */
4305 	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4306 		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4307 		scsi_cmd->result = (DID_NO_CONNECT << 16);
4308 		scsi_cmd->scsi_done(scsi_cmd);
4309 		return 0;
4310 	}
4311 
4312 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4313 	ioarcb = &ipr_cmd->ioarcb;
4314 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4315 
4316 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4317 	ipr_cmd->scsi_cmd = scsi_cmd;
4318 	ioarcb->res_handle = res->cfgte.res_handle;
4319 	ipr_cmd->done = ipr_scsi_done;
4320 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4321 
4322 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4323 		if (scsi_cmd->underflow == 0)
4324 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4325 
4326 		if (res->needs_sync_complete) {
4327 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4328 			res->needs_sync_complete = 0;
4329 		}
4330 
4331 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4332 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4333 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4334 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4335 	}
4336 
4337 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4338 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4339 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4340 
4341 	if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4342 		rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4343 
4344 	if (likely(rc == 0))
4345 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4346 
4347 	if (likely(rc == 0)) {
4348 		mb();
4349 		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4350 		       ioa_cfg->regs.ioarrin_reg);
4351 	} else {
4352 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4353 		 return SCSI_MLQUEUE_HOST_BUSY;
4354 	}
4355 
4356 	return 0;
4357 }
4358 
4359 /**
4360  * ipr_info - Get information about the card/driver
4361  * @scsi_host:	scsi host struct
4362  *
4363  * Return value:
4364  * 	pointer to buffer with description string
4365  **/
4366 static const char * ipr_ioa_info(struct Scsi_Host *host)
4367 {
4368 	static char buffer[512];
4369 	struct ipr_ioa_cfg *ioa_cfg;
4370 	unsigned long lock_flags = 0;
4371 
4372 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4373 
4374 	spin_lock_irqsave(host->host_lock, lock_flags);
4375 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4376 	spin_unlock_irqrestore(host->host_lock, lock_flags);
4377 
4378 	return buffer;
4379 }
4380 
4381 static struct scsi_host_template driver_template = {
4382 	.module = THIS_MODULE,
4383 	.name = "IPR",
4384 	.info = ipr_ioa_info,
4385 	.queuecommand = ipr_queuecommand,
4386 	.eh_abort_handler = ipr_eh_abort,
4387 	.eh_device_reset_handler = ipr_eh_dev_reset,
4388 	.eh_host_reset_handler = ipr_eh_host_reset,
4389 	.slave_alloc = ipr_slave_alloc,
4390 	.slave_configure = ipr_slave_configure,
4391 	.slave_destroy = ipr_slave_destroy,
4392 	.change_queue_depth = ipr_change_queue_depth,
4393 	.change_queue_type = ipr_change_queue_type,
4394 	.bios_param = ipr_biosparam,
4395 	.can_queue = IPR_MAX_COMMANDS,
4396 	.this_id = -1,
4397 	.sg_tablesize = IPR_MAX_SGLIST,
4398 	.max_sectors = IPR_IOA_MAX_SECTORS,
4399 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4400 	.use_clustering = ENABLE_CLUSTERING,
4401 	.shost_attrs = ipr_ioa_attrs,
4402 	.sdev_attrs = ipr_dev_attrs,
4403 	.proc_name = IPR_NAME
4404 };
4405 
4406 #ifdef CONFIG_PPC_PSERIES
4407 static const u16 ipr_blocked_processors[] = {
4408 	PV_NORTHSTAR,
4409 	PV_PULSAR,
4410 	PV_POWER4,
4411 	PV_ICESTAR,
4412 	PV_SSTAR,
4413 	PV_POWER4p,
4414 	PV_630,
4415 	PV_630p
4416 };
4417 
4418 /**
4419  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4420  * @ioa_cfg:	ioa cfg struct
4421  *
4422  * Adapters that use Gemstone revision < 3.1 do not work reliably on
4423  * certain pSeries hardware. This function determines if the given
4424  * adapter is in one of these confgurations or not.
4425  *
4426  * Return value:
4427  * 	1 if adapter is not supported / 0 if adapter is supported
4428  **/
4429 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4430 {
4431 	u8 rev_id;
4432 	int i;
4433 
4434 	if (ioa_cfg->type == 0x5702) {
4435 		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4436 					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4437 			if (rev_id < 4) {
4438 				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4439 					if (__is_processor(ipr_blocked_processors[i]))
4440 						return 1;
4441 				}
4442 			}
4443 		}
4444 	}
4445 	return 0;
4446 }
4447 #else
4448 #define ipr_invalid_adapter(ioa_cfg) 0
4449 #endif
4450 
4451 /**
4452  * ipr_ioa_bringdown_done - IOA bring down completion.
4453  * @ipr_cmd:	ipr command struct
4454  *
4455  * This function processes the completion of an adapter bring down.
4456  * It wakes any reset sleepers.
4457  *
4458  * Return value:
4459  * 	IPR_RC_JOB_RETURN
4460  **/
4461 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4462 {
4463 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4464 
4465 	ENTER;
4466 	ioa_cfg->in_reset_reload = 0;
4467 	ioa_cfg->reset_retries = 0;
4468 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4469 	wake_up_all(&ioa_cfg->reset_wait_q);
4470 
4471 	spin_unlock_irq(ioa_cfg->host->host_lock);
4472 	scsi_unblock_requests(ioa_cfg->host);
4473 	spin_lock_irq(ioa_cfg->host->host_lock);
4474 	LEAVE;
4475 
4476 	return IPR_RC_JOB_RETURN;
4477 }
4478 
4479 /**
4480  * ipr_ioa_reset_done - IOA reset completion.
4481  * @ipr_cmd:	ipr command struct
4482  *
4483  * This function processes the completion of an adapter reset.
4484  * It schedules any necessary mid-layer add/removes and
4485  * wakes any reset sleepers.
4486  *
4487  * Return value:
4488  * 	IPR_RC_JOB_RETURN
4489  **/
4490 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4491 {
4492 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4493 	struct ipr_resource_entry *res;
4494 	struct ipr_hostrcb *hostrcb, *temp;
4495 	int i = 0;
4496 
4497 	ENTER;
4498 	ioa_cfg->in_reset_reload = 0;
4499 	ioa_cfg->allow_cmds = 1;
4500 	ioa_cfg->reset_cmd = NULL;
4501 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4502 
4503 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4504 		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4505 			ipr_trace;
4506 			break;
4507 		}
4508 	}
4509 	schedule_work(&ioa_cfg->work_q);
4510 
4511 	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4512 		list_del(&hostrcb->queue);
4513 		if (i++ < IPR_NUM_LOG_HCAMS)
4514 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4515 		else
4516 			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4517 	}
4518 
4519 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4520 
4521 	ioa_cfg->reset_retries = 0;
4522 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4523 	wake_up_all(&ioa_cfg->reset_wait_q);
4524 
4525 	spin_unlock_irq(ioa_cfg->host->host_lock);
4526 	scsi_unblock_requests(ioa_cfg->host);
4527 	spin_lock_irq(ioa_cfg->host->host_lock);
4528 
4529 	if (!ioa_cfg->allow_cmds)
4530 		scsi_block_requests(ioa_cfg->host);
4531 
4532 	LEAVE;
4533 	return IPR_RC_JOB_RETURN;
4534 }
4535 
4536 /**
4537  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4538  * @supported_dev:	supported device struct
4539  * @vpids:			vendor product id struct
4540  *
4541  * Return value:
4542  * 	none
4543  **/
4544 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4545 				 struct ipr_std_inq_vpids *vpids)
4546 {
4547 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4548 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4549 	supported_dev->num_records = 1;
4550 	supported_dev->data_length =
4551 		cpu_to_be16(sizeof(struct ipr_supported_device));
4552 	supported_dev->reserved = 0;
4553 }
4554 
4555 /**
4556  * ipr_set_supported_devs - Send Set Supported Devices for a device
4557  * @ipr_cmd:	ipr command struct
4558  *
4559  * This function send a Set Supported Devices to the adapter
4560  *
4561  * Return value:
4562  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4563  **/
4564 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4565 {
4566 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4567 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4568 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4569 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4570 	struct ipr_resource_entry *res = ipr_cmd->u.res;
4571 
4572 	ipr_cmd->job_step = ipr_ioa_reset_done;
4573 
4574 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4575 		if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4576 			continue;
4577 
4578 		ipr_cmd->u.res = res;
4579 		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4580 
4581 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4582 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4583 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4584 
4585 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4586 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4587 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4588 
4589 		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4590 							sizeof(struct ipr_supported_device));
4591 		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4592 					     offsetof(struct ipr_misc_cbs, supp_dev));
4593 		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4594 		ioarcb->write_data_transfer_length =
4595 			cpu_to_be32(sizeof(struct ipr_supported_device));
4596 
4597 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4598 			   IPR_SET_SUP_DEVICE_TIMEOUT);
4599 
4600 		ipr_cmd->job_step = ipr_set_supported_devs;
4601 		return IPR_RC_JOB_RETURN;
4602 	}
4603 
4604 	return IPR_RC_JOB_CONTINUE;
4605 }
4606 
4607 /**
4608  * ipr_setup_write_cache - Disable write cache if needed
4609  * @ipr_cmd:	ipr command struct
4610  *
4611  * This function sets up adapters write cache to desired setting
4612  *
4613  * Return value:
4614  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4615  **/
4616 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4617 {
4618 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4619 
4620 	ipr_cmd->job_step = ipr_set_supported_devs;
4621 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4622 				    struct ipr_resource_entry, queue);
4623 
4624 	if (ioa_cfg->cache_state != CACHE_DISABLED)
4625 		return IPR_RC_JOB_CONTINUE;
4626 
4627 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4628 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4629 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4630 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4631 
4632 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4633 
4634 	return IPR_RC_JOB_RETURN;
4635 }
4636 
4637 /**
4638  * ipr_get_mode_page - Locate specified mode page
4639  * @mode_pages:	mode page buffer
4640  * @page_code:	page code to find
4641  * @len:		minimum required length for mode page
4642  *
4643  * Return value:
4644  * 	pointer to mode page / NULL on failure
4645  **/
4646 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4647 			       u32 page_code, u32 len)
4648 {
4649 	struct ipr_mode_page_hdr *mode_hdr;
4650 	u32 page_length;
4651 	u32 length;
4652 
4653 	if (!mode_pages || (mode_pages->hdr.length == 0))
4654 		return NULL;
4655 
4656 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4657 	mode_hdr = (struct ipr_mode_page_hdr *)
4658 		(mode_pages->data + mode_pages->hdr.block_desc_len);
4659 
4660 	while (length) {
4661 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4662 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4663 				return mode_hdr;
4664 			break;
4665 		} else {
4666 			page_length = (sizeof(struct ipr_mode_page_hdr) +
4667 				       mode_hdr->page_length);
4668 			length -= page_length;
4669 			mode_hdr = (struct ipr_mode_page_hdr *)
4670 				((unsigned long)mode_hdr + page_length);
4671 		}
4672 	}
4673 	return NULL;
4674 }
4675 
4676 /**
4677  * ipr_check_term_power - Check for term power errors
4678  * @ioa_cfg:	ioa config struct
4679  * @mode_pages:	IOAFP mode pages buffer
4680  *
4681  * Check the IOAFP's mode page 28 for term power errors
4682  *
4683  * Return value:
4684  * 	nothing
4685  **/
4686 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4687 				 struct ipr_mode_pages *mode_pages)
4688 {
4689 	int i;
4690 	int entry_length;
4691 	struct ipr_dev_bus_entry *bus;
4692 	struct ipr_mode_page28 *mode_page;
4693 
4694 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4695 				      sizeof(struct ipr_mode_page28));
4696 
4697 	entry_length = mode_page->entry_length;
4698 
4699 	bus = mode_page->bus;
4700 
4701 	for (i = 0; i < mode_page->num_entries; i++) {
4702 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4703 			dev_err(&ioa_cfg->pdev->dev,
4704 				"Term power is absent on scsi bus %d\n",
4705 				bus->res_addr.bus);
4706 		}
4707 
4708 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4709 	}
4710 }
4711 
4712 /**
4713  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4714  * @ioa_cfg:	ioa config struct
4715  *
4716  * Looks through the config table checking for SES devices. If
4717  * the SES device is in the SES table indicating a maximum SCSI
4718  * bus speed, the speed is limited for the bus.
4719  *
4720  * Return value:
4721  * 	none
4722  **/
4723 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4724 {
4725 	u32 max_xfer_rate;
4726 	int i;
4727 
4728 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4729 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4730 						       ioa_cfg->bus_attr[i].bus_width);
4731 
4732 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4733 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4734 	}
4735 }
4736 
4737 /**
4738  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4739  * @ioa_cfg:	ioa config struct
4740  * @mode_pages:	mode page 28 buffer
4741  *
4742  * Updates mode page 28 based on driver configuration
4743  *
4744  * Return value:
4745  * 	none
4746  **/
4747 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4748 					  	struct ipr_mode_pages *mode_pages)
4749 {
4750 	int i, entry_length;
4751 	struct ipr_dev_bus_entry *bus;
4752 	struct ipr_bus_attributes *bus_attr;
4753 	struct ipr_mode_page28 *mode_page;
4754 
4755 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4756 				      sizeof(struct ipr_mode_page28));
4757 
4758 	entry_length = mode_page->entry_length;
4759 
4760 	/* Loop for each device bus entry */
4761 	for (i = 0, bus = mode_page->bus;
4762 	     i < mode_page->num_entries;
4763 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4764 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4765 			dev_err(&ioa_cfg->pdev->dev,
4766 				"Invalid resource address reported: 0x%08X\n",
4767 				IPR_GET_PHYS_LOC(bus->res_addr));
4768 			continue;
4769 		}
4770 
4771 		bus_attr = &ioa_cfg->bus_attr[i];
4772 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4773 		bus->bus_width = bus_attr->bus_width;
4774 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4775 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4776 		if (bus_attr->qas_enabled)
4777 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4778 		else
4779 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4780 	}
4781 }
4782 
4783 /**
4784  * ipr_build_mode_select - Build a mode select command
4785  * @ipr_cmd:	ipr command struct
4786  * @res_handle:	resource handle to send command to
4787  * @parm:		Byte 2 of Mode Sense command
4788  * @dma_addr:	DMA buffer address
4789  * @xfer_len:	data transfer length
4790  *
4791  * Return value:
4792  * 	none
4793  **/
4794 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4795 				  __be32 res_handle, u8 parm, u32 dma_addr,
4796 				  u8 xfer_len)
4797 {
4798 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4799 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4800 
4801 	ioarcb->res_handle = res_handle;
4802 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4803 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4804 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4805 	ioarcb->cmd_pkt.cdb[1] = parm;
4806 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4807 
4808 	ioadl->flags_and_data_len =
4809 		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4810 	ioadl->address = cpu_to_be32(dma_addr);
4811 	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4812 	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4813 }
4814 
4815 /**
4816  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4817  * @ipr_cmd:	ipr command struct
4818  *
4819  * This function sets up the SCSI bus attributes and sends
4820  * a Mode Select for Page 28 to activate them.
4821  *
4822  * Return value:
4823  * 	IPR_RC_JOB_RETURN
4824  **/
4825 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4826 {
4827 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4828 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4829 	int length;
4830 
4831 	ENTER;
4832 	if (ioa_cfg->saved_mode_pages) {
4833 		memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4834 		       ioa_cfg->saved_mode_page_len);
4835 		length = ioa_cfg->saved_mode_page_len;
4836 	} else {
4837 		ipr_scsi_bus_speed_limit(ioa_cfg);
4838 		ipr_check_term_power(ioa_cfg, mode_pages);
4839 		ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4840 		length = mode_pages->hdr.length + 1;
4841 		mode_pages->hdr.length = 0;
4842 	}
4843 
4844 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4845 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4846 			      length);
4847 
4848 	ipr_cmd->job_step = ipr_setup_write_cache;
4849 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4850 
4851 	LEAVE;
4852 	return IPR_RC_JOB_RETURN;
4853 }
4854 
4855 /**
4856  * ipr_build_mode_sense - Builds a mode sense command
4857  * @ipr_cmd:	ipr command struct
4858  * @res:		resource entry struct
4859  * @parm:		Byte 2 of mode sense command
4860  * @dma_addr:	DMA address of mode sense buffer
4861  * @xfer_len:	Size of DMA buffer
4862  *
4863  * Return value:
4864  * 	none
4865  **/
4866 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4867 				 __be32 res_handle,
4868 				 u8 parm, u32 dma_addr, u8 xfer_len)
4869 {
4870 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4871 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4872 
4873 	ioarcb->res_handle = res_handle;
4874 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4875 	ioarcb->cmd_pkt.cdb[2] = parm;
4876 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4877 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4878 
4879 	ioadl->flags_and_data_len =
4880 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4881 	ioadl->address = cpu_to_be32(dma_addr);
4882 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4883 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4884 }
4885 
4886 /**
4887  * ipr_reset_cmd_failed - Handle failure of IOA reset command
4888  * @ipr_cmd:	ipr command struct
4889  *
4890  * This function handles the failure of an IOA bringup command.
4891  *
4892  * Return value:
4893  * 	IPR_RC_JOB_RETURN
4894  **/
4895 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4896 {
4897 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4898 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4899 
4900 	dev_err(&ioa_cfg->pdev->dev,
4901 		"0x%02X failed with IOASC: 0x%08X\n",
4902 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4903 
4904 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4905 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4906 	return IPR_RC_JOB_RETURN;
4907 }
4908 
4909 /**
4910  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4911  * @ipr_cmd:	ipr command struct
4912  *
4913  * This function handles the failure of a Mode Sense to the IOAFP.
4914  * Some adapters do not handle all mode pages.
4915  *
4916  * Return value:
4917  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4918  **/
4919 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4920 {
4921 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4922 
4923 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4924 		ipr_cmd->job_step = ipr_setup_write_cache;
4925 		return IPR_RC_JOB_CONTINUE;
4926 	}
4927 
4928 	return ipr_reset_cmd_failed(ipr_cmd);
4929 }
4930 
4931 /**
4932  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4933  * @ipr_cmd:	ipr command struct
4934  *
4935  * This function send a Page 28 mode sense to the IOA to
4936  * retrieve SCSI bus attributes.
4937  *
4938  * Return value:
4939  * 	IPR_RC_JOB_RETURN
4940  **/
4941 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4942 {
4943 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4944 
4945 	ENTER;
4946 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4947 			     0x28, ioa_cfg->vpd_cbs_dma +
4948 			     offsetof(struct ipr_misc_cbs, mode_pages),
4949 			     sizeof(struct ipr_mode_pages));
4950 
4951 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4952 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4953 
4954 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4955 
4956 	LEAVE;
4957 	return IPR_RC_JOB_RETURN;
4958 }
4959 
4960 /**
4961  * ipr_init_res_table - Initialize the resource table
4962  * @ipr_cmd:	ipr command struct
4963  *
4964  * This function looks through the existing resource table, comparing
4965  * it with the config table. This function will take care of old/new
4966  * devices and schedule adding/removing them from the mid-layer
4967  * as appropriate.
4968  *
4969  * Return value:
4970  * 	IPR_RC_JOB_CONTINUE
4971  **/
4972 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4973 {
4974 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4975 	struct ipr_resource_entry *res, *temp;
4976 	struct ipr_config_table_entry *cfgte;
4977 	int found, i;
4978 	LIST_HEAD(old_res);
4979 
4980 	ENTER;
4981 	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4982 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4983 
4984 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4985 		list_move_tail(&res->queue, &old_res);
4986 
4987 	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4988 		cfgte = &ioa_cfg->cfg_table->dev[i];
4989 		found = 0;
4990 
4991 		list_for_each_entry_safe(res, temp, &old_res, queue) {
4992 			if (!memcmp(&res->cfgte.res_addr,
4993 				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4994 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4995 				found = 1;
4996 				break;
4997 			}
4998 		}
4999 
5000 		if (!found) {
5001 			if (list_empty(&ioa_cfg->free_res_q)) {
5002 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5003 				break;
5004 			}
5005 
5006 			found = 1;
5007 			res = list_entry(ioa_cfg->free_res_q.next,
5008 					 struct ipr_resource_entry, queue);
5009 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5010 			ipr_init_res_entry(res);
5011 			res->add_to_ml = 1;
5012 		}
5013 
5014 		if (found)
5015 			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5016 	}
5017 
5018 	list_for_each_entry_safe(res, temp, &old_res, queue) {
5019 		if (res->sdev) {
5020 			res->del_from_ml = 1;
5021 			res->sdev->hostdata = NULL;
5022 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5023 		} else {
5024 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5025 		}
5026 	}
5027 
5028 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5029 
5030 	LEAVE;
5031 	return IPR_RC_JOB_CONTINUE;
5032 }
5033 
5034 /**
5035  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5036  * @ipr_cmd:	ipr command struct
5037  *
5038  * This function sends a Query IOA Configuration command
5039  * to the adapter to retrieve the IOA configuration table.
5040  *
5041  * Return value:
5042  * 	IPR_RC_JOB_RETURN
5043  **/
5044 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5045 {
5046 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5047 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5048 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5049 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5050 
5051 	ENTER;
5052 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5053 		 ucode_vpd->major_release, ucode_vpd->card_type,
5054 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5055 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5056 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5057 
5058 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5059 	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5060 	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5061 
5062 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5063 	ioarcb->read_data_transfer_length =
5064 		cpu_to_be32(sizeof(struct ipr_config_table));
5065 
5066 	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5067 	ioadl->flags_and_data_len =
5068 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5069 
5070 	ipr_cmd->job_step = ipr_init_res_table;
5071 
5072 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5073 
5074 	LEAVE;
5075 	return IPR_RC_JOB_RETURN;
5076 }
5077 
5078 /**
5079  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5080  * @ipr_cmd:	ipr command struct
5081  *
5082  * This utility function sends an inquiry to the adapter.
5083  *
5084  * Return value:
5085  * 	none
5086  **/
5087 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5088 			      u32 dma_addr, u8 xfer_len)
5089 {
5090 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5091 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5092 
5093 	ENTER;
5094 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5095 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5096 
5097 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5098 	ioarcb->cmd_pkt.cdb[1] = flags;
5099 	ioarcb->cmd_pkt.cdb[2] = page;
5100 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5101 
5102 	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5103 	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5104 
5105 	ioadl->address = cpu_to_be32(dma_addr);
5106 	ioadl->flags_and_data_len =
5107 		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5108 
5109 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5110 	LEAVE;
5111 }
5112 
5113 /**
5114  * ipr_inquiry_page_supported - Is the given inquiry page supported
5115  * @page0:		inquiry page 0 buffer
5116  * @page:		page code.
5117  *
5118  * This function determines if the specified inquiry page is supported.
5119  *
5120  * Return value:
5121  *	1 if page is supported / 0 if not
5122  **/
5123 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5124 {
5125 	int i;
5126 
5127 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5128 		if (page0->page[i] == page)
5129 			return 1;
5130 
5131 	return 0;
5132 }
5133 
5134 /**
5135  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5136  * @ipr_cmd:	ipr command struct
5137  *
5138  * This function sends a Page 3 inquiry to the adapter
5139  * to retrieve software VPD information.
5140  *
5141  * Return value:
5142  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5143  **/
5144 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5145 {
5146 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5147 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5148 
5149 	ENTER;
5150 
5151 	if (!ipr_inquiry_page_supported(page0, 1))
5152 		ioa_cfg->cache_state = CACHE_NONE;
5153 
5154 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5155 
5156 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5157 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5158 			  sizeof(struct ipr_inquiry_page3));
5159 
5160 	LEAVE;
5161 	return IPR_RC_JOB_RETURN;
5162 }
5163 
5164 /**
5165  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5166  * @ipr_cmd:	ipr command struct
5167  *
5168  * This function sends a Page 0 inquiry to the adapter
5169  * to retrieve supported inquiry pages.
5170  *
5171  * Return value:
5172  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5173  **/
5174 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5175 {
5176 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5177 	char type[5];
5178 
5179 	ENTER;
5180 
5181 	/* Grab the type out of the VPD and store it away */
5182 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5183 	type[4] = '\0';
5184 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5185 
5186 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5187 
5188 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5189 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5190 			  sizeof(struct ipr_inquiry_page0));
5191 
5192 	LEAVE;
5193 	return IPR_RC_JOB_RETURN;
5194 }
5195 
5196 /**
5197  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5198  * @ipr_cmd:	ipr command struct
5199  *
5200  * This function sends a standard inquiry to the adapter.
5201  *
5202  * Return value:
5203  * 	IPR_RC_JOB_RETURN
5204  **/
5205 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5206 {
5207 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5208 
5209 	ENTER;
5210 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5211 
5212 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5213 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5214 			  sizeof(struct ipr_ioa_vpd));
5215 
5216 	LEAVE;
5217 	return IPR_RC_JOB_RETURN;
5218 }
5219 
5220 /**
5221  * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5222  * @ipr_cmd:	ipr command struct
5223  *
5224  * This function send an Identify Host Request Response Queue
5225  * command to establish the HRRQ with the adapter.
5226  *
5227  * Return value:
5228  * 	IPR_RC_JOB_RETURN
5229  **/
5230 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5231 {
5232 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5233 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5234 
5235 	ENTER;
5236 	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5237 
5238 	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5239 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5240 
5241 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5242 	ioarcb->cmd_pkt.cdb[2] =
5243 		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5244 	ioarcb->cmd_pkt.cdb[3] =
5245 		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5246 	ioarcb->cmd_pkt.cdb[4] =
5247 		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5248 	ioarcb->cmd_pkt.cdb[5] =
5249 		((u32) ioa_cfg->host_rrq_dma) & 0xff;
5250 	ioarcb->cmd_pkt.cdb[7] =
5251 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5252 	ioarcb->cmd_pkt.cdb[8] =
5253 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5254 
5255 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5256 
5257 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5258 
5259 	LEAVE;
5260 	return IPR_RC_JOB_RETURN;
5261 }
5262 
5263 /**
5264  * ipr_reset_timer_done - Adapter reset timer function
5265  * @ipr_cmd:	ipr command struct
5266  *
5267  * Description: This function is used in adapter reset processing
5268  * for timing events. If the reset_cmd pointer in the IOA
5269  * config struct is not this adapter's we are doing nested
5270  * resets and fail_all_ops will take care of freeing the
5271  * command block.
5272  *
5273  * Return value:
5274  * 	none
5275  **/
5276 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5277 {
5278 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5279 	unsigned long lock_flags = 0;
5280 
5281 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5282 
5283 	if (ioa_cfg->reset_cmd == ipr_cmd) {
5284 		list_del(&ipr_cmd->queue);
5285 		ipr_cmd->done(ipr_cmd);
5286 	}
5287 
5288 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5289 }
5290 
5291 /**
5292  * ipr_reset_start_timer - Start a timer for adapter reset job
5293  * @ipr_cmd:	ipr command struct
5294  * @timeout:	timeout value
5295  *
5296  * Description: This function is used in adapter reset processing
5297  * for timing events. If the reset_cmd pointer in the IOA
5298  * config struct is not this adapter's we are doing nested
5299  * resets and fail_all_ops will take care of freeing the
5300  * command block.
5301  *
5302  * Return value:
5303  * 	none
5304  **/
5305 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5306 				  unsigned long timeout)
5307 {
5308 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5309 	ipr_cmd->done = ipr_reset_ioa_job;
5310 
5311 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5312 	ipr_cmd->timer.expires = jiffies + timeout;
5313 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5314 	add_timer(&ipr_cmd->timer);
5315 }
5316 
5317 /**
5318  * ipr_init_ioa_mem - Initialize ioa_cfg control block
5319  * @ioa_cfg:	ioa cfg struct
5320  *
5321  * Return value:
5322  * 	nothing
5323  **/
5324 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5325 {
5326 	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5327 
5328 	/* Initialize Host RRQ pointers */
5329 	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5330 	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5331 	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5332 	ioa_cfg->toggle_bit = 1;
5333 
5334 	/* Zero out config table */
5335 	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5336 }
5337 
5338 /**
5339  * ipr_reset_enable_ioa - Enable the IOA following a reset.
5340  * @ipr_cmd:	ipr command struct
5341  *
5342  * This function reinitializes some control blocks and
5343  * enables destructive diagnostics on the adapter.
5344  *
5345  * Return value:
5346  * 	IPR_RC_JOB_RETURN
5347  **/
5348 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5349 {
5350 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5351 	volatile u32 int_reg;
5352 
5353 	ENTER;
5354 	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5355 	ipr_init_ioa_mem(ioa_cfg);
5356 
5357 	ioa_cfg->allow_interrupts = 1;
5358 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5359 
5360 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5361 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5362 		       ioa_cfg->regs.clr_interrupt_mask_reg);
5363 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5364 		return IPR_RC_JOB_CONTINUE;
5365 	}
5366 
5367 	/* Enable destructive diagnostics on IOA */
5368 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5369 
5370 	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5371 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5372 
5373 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5374 
5375 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5376 	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5377 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5378 	ipr_cmd->done = ipr_reset_ioa_job;
5379 	add_timer(&ipr_cmd->timer);
5380 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5381 
5382 	LEAVE;
5383 	return IPR_RC_JOB_RETURN;
5384 }
5385 
5386 /**
5387  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5388  * @ipr_cmd:	ipr command struct
5389  *
5390  * This function is invoked when an adapter dump has run out
5391  * of processing time.
5392  *
5393  * Return value:
5394  * 	IPR_RC_JOB_CONTINUE
5395  **/
5396 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5397 {
5398 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5399 
5400 	if (ioa_cfg->sdt_state == GET_DUMP)
5401 		ioa_cfg->sdt_state = ABORT_DUMP;
5402 
5403 	ipr_cmd->job_step = ipr_reset_alert;
5404 
5405 	return IPR_RC_JOB_CONTINUE;
5406 }
5407 
5408 /**
5409  * ipr_unit_check_no_data - Log a unit check/no data error log
5410  * @ioa_cfg:		ioa config struct
5411  *
5412  * Logs an error indicating the adapter unit checked, but for some
5413  * reason, we were unable to fetch the unit check buffer.
5414  *
5415  * Return value:
5416  * 	nothing
5417  **/
5418 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5419 {
5420 	ioa_cfg->errors_logged++;
5421 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5422 }
5423 
5424 /**
5425  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5426  * @ioa_cfg:		ioa config struct
5427  *
5428  * Fetches the unit check buffer from the adapter by clocking the data
5429  * through the mailbox register.
5430  *
5431  * Return value:
5432  * 	nothing
5433  **/
5434 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5435 {
5436 	unsigned long mailbox;
5437 	struct ipr_hostrcb *hostrcb;
5438 	struct ipr_uc_sdt sdt;
5439 	int rc, length;
5440 
5441 	mailbox = readl(ioa_cfg->ioa_mailbox);
5442 
5443 	if (!ipr_sdt_is_fmt2(mailbox)) {
5444 		ipr_unit_check_no_data(ioa_cfg);
5445 		return;
5446 	}
5447 
5448 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5449 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5450 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5451 
5452 	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5453 	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5454 		ipr_unit_check_no_data(ioa_cfg);
5455 		return;
5456 	}
5457 
5458 	/* Find length of the first sdt entry (UC buffer) */
5459 	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5460 		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5461 
5462 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5463 			     struct ipr_hostrcb, queue);
5464 	list_del(&hostrcb->queue);
5465 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5466 
5467 	rc = ipr_get_ldump_data_section(ioa_cfg,
5468 					be32_to_cpu(sdt.entry[0].bar_str_offset),
5469 					(__be32 *)&hostrcb->hcam,
5470 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5471 
5472 	if (!rc)
5473 		ipr_handle_log_data(ioa_cfg, hostrcb);
5474 	else
5475 		ipr_unit_check_no_data(ioa_cfg);
5476 
5477 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5478 }
5479 
5480 /**
5481  * ipr_reset_restore_cfg_space - Restore PCI config space.
5482  * @ipr_cmd:	ipr command struct
5483  *
5484  * Description: This function restores the saved PCI config space of
5485  * the adapter, fails all outstanding ops back to the callers, and
5486  * fetches the dump/unit check if applicable to this reset.
5487  *
5488  * Return value:
5489  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5490  **/
5491 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5492 {
5493 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5494 	int rc;
5495 
5496 	ENTER;
5497 	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5498 	rc = pci_restore_state(ioa_cfg->pdev);
5499 
5500 	if (rc != PCIBIOS_SUCCESSFUL) {
5501 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5502 		return IPR_RC_JOB_CONTINUE;
5503 	}
5504 
5505 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5506 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5507 		return IPR_RC_JOB_CONTINUE;
5508 	}
5509 
5510 	ipr_fail_all_ops(ioa_cfg);
5511 
5512 	if (ioa_cfg->ioa_unit_checked) {
5513 		ioa_cfg->ioa_unit_checked = 0;
5514 		ipr_get_unit_check_buffer(ioa_cfg);
5515 		ipr_cmd->job_step = ipr_reset_alert;
5516 		ipr_reset_start_timer(ipr_cmd, 0);
5517 		return IPR_RC_JOB_RETURN;
5518 	}
5519 
5520 	if (ioa_cfg->in_ioa_bringdown) {
5521 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5522 	} else {
5523 		ipr_cmd->job_step = ipr_reset_enable_ioa;
5524 
5525 		if (GET_DUMP == ioa_cfg->sdt_state) {
5526 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5527 			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5528 			schedule_work(&ioa_cfg->work_q);
5529 			return IPR_RC_JOB_RETURN;
5530 		}
5531 	}
5532 
5533 	ENTER;
5534 	return IPR_RC_JOB_CONTINUE;
5535 }
5536 
5537 /**
5538  * ipr_reset_start_bist - Run BIST on the adapter.
5539  * @ipr_cmd:	ipr command struct
5540  *
5541  * Description: This function runs BIST on the adapter, then delays 2 seconds.
5542  *
5543  * Return value:
5544  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5545  **/
5546 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5547 {
5548 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5549 	int rc;
5550 
5551 	ENTER;
5552 	pci_block_user_cfg_access(ioa_cfg->pdev);
5553 	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5554 
5555 	if (rc != PCIBIOS_SUCCESSFUL) {
5556 		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5557 		rc = IPR_RC_JOB_CONTINUE;
5558 	} else {
5559 		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5560 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5561 		rc = IPR_RC_JOB_RETURN;
5562 	}
5563 
5564 	LEAVE;
5565 	return rc;
5566 }
5567 
5568 /**
5569  * ipr_reset_allowed - Query whether or not IOA can be reset
5570  * @ioa_cfg:	ioa config struct
5571  *
5572  * Return value:
5573  * 	0 if reset not allowed / non-zero if reset is allowed
5574  **/
5575 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5576 {
5577 	volatile u32 temp_reg;
5578 
5579 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5580 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5581 }
5582 
5583 /**
5584  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5585  * @ipr_cmd:	ipr command struct
5586  *
5587  * Description: This function waits for adapter permission to run BIST,
5588  * then runs BIST. If the adapter does not give permission after a
5589  * reasonable time, we will reset the adapter anyway. The impact of
5590  * resetting the adapter without warning the adapter is the risk of
5591  * losing the persistent error log on the adapter. If the adapter is
5592  * reset while it is writing to the flash on the adapter, the flash
5593  * segment will have bad ECC and be zeroed.
5594  *
5595  * Return value:
5596  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5597  **/
5598 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5599 {
5600 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5601 	int rc = IPR_RC_JOB_RETURN;
5602 
5603 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5604 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5605 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5606 	} else {
5607 		ipr_cmd->job_step = ipr_reset_start_bist;
5608 		rc = IPR_RC_JOB_CONTINUE;
5609 	}
5610 
5611 	return rc;
5612 }
5613 
5614 /**
5615  * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5616  * @ipr_cmd:	ipr command struct
5617  *
5618  * Description: This function alerts the adapter that it will be reset.
5619  * If memory space is not currently enabled, proceed directly
5620  * to running BIST on the adapter. The timer must always be started
5621  * so we guarantee we do not run BIST from ipr_isr.
5622  *
5623  * Return value:
5624  * 	IPR_RC_JOB_RETURN
5625  **/
5626 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5627 {
5628 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5629 	u16 cmd_reg;
5630 	int rc;
5631 
5632 	ENTER;
5633 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5634 
5635 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5636 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5637 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5638 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5639 	} else {
5640 		ipr_cmd->job_step = ipr_reset_start_bist;
5641 	}
5642 
5643 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5644 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5645 
5646 	LEAVE;
5647 	return IPR_RC_JOB_RETURN;
5648 }
5649 
5650 /**
5651  * ipr_reset_ucode_download_done - Microcode download completion
5652  * @ipr_cmd:	ipr command struct
5653  *
5654  * Description: This function unmaps the microcode download buffer.
5655  *
5656  * Return value:
5657  * 	IPR_RC_JOB_CONTINUE
5658  **/
5659 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5660 {
5661 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5662 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5663 
5664 	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5665 		     sglist->num_sg, DMA_TO_DEVICE);
5666 
5667 	ipr_cmd->job_step = ipr_reset_alert;
5668 	return IPR_RC_JOB_CONTINUE;
5669 }
5670 
5671 /**
5672  * ipr_reset_ucode_download - Download microcode to the adapter
5673  * @ipr_cmd:	ipr command struct
5674  *
5675  * Description: This function checks to see if it there is microcode
5676  * to download to the adapter. If there is, a download is performed.
5677  *
5678  * Return value:
5679  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5680  **/
5681 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5682 {
5683 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5684 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5685 
5686 	ENTER;
5687 	ipr_cmd->job_step = ipr_reset_alert;
5688 
5689 	if (!sglist)
5690 		return IPR_RC_JOB_CONTINUE;
5691 
5692 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5693 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5694 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5695 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5696 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5697 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5698 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5699 
5700 	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5701 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5702 
5703 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5704 		   IPR_WRITE_BUFFER_TIMEOUT);
5705 
5706 	LEAVE;
5707 	return IPR_RC_JOB_RETURN;
5708 }
5709 
5710 /**
5711  * ipr_reset_shutdown_ioa - Shutdown the adapter
5712  * @ipr_cmd:	ipr command struct
5713  *
5714  * Description: This function issues an adapter shutdown of the
5715  * specified type to the specified adapter as part of the
5716  * adapter reset job.
5717  *
5718  * Return value:
5719  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5720  **/
5721 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5722 {
5723 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5724 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5725 	unsigned long timeout;
5726 	int rc = IPR_RC_JOB_CONTINUE;
5727 
5728 	ENTER;
5729 	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5730 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5731 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5732 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5733 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5734 
5735 		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5736 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5737 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5738 			timeout = IPR_INTERNAL_TIMEOUT;
5739 		else
5740 			timeout = IPR_SHUTDOWN_TIMEOUT;
5741 
5742 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5743 
5744 		rc = IPR_RC_JOB_RETURN;
5745 		ipr_cmd->job_step = ipr_reset_ucode_download;
5746 	} else
5747 		ipr_cmd->job_step = ipr_reset_alert;
5748 
5749 	LEAVE;
5750 	return rc;
5751 }
5752 
5753 /**
5754  * ipr_reset_ioa_job - Adapter reset job
5755  * @ipr_cmd:	ipr command struct
5756  *
5757  * Description: This function is the job router for the adapter reset job.
5758  *
5759  * Return value:
5760  * 	none
5761  **/
5762 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5763 {
5764 	u32 rc, ioasc;
5765 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5766 
5767 	do {
5768 		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5769 
5770 		if (ioa_cfg->reset_cmd != ipr_cmd) {
5771 			/*
5772 			 * We are doing nested adapter resets and this is
5773 			 * not the current reset job.
5774 			 */
5775 			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5776 			return;
5777 		}
5778 
5779 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5780 			rc = ipr_cmd->job_step_failed(ipr_cmd);
5781 			if (rc == IPR_RC_JOB_RETURN)
5782 				return;
5783 		}
5784 
5785 		ipr_reinit_ipr_cmnd(ipr_cmd);
5786 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5787 		rc = ipr_cmd->job_step(ipr_cmd);
5788 	} while(rc == IPR_RC_JOB_CONTINUE);
5789 }
5790 
5791 /**
5792  * _ipr_initiate_ioa_reset - Initiate an adapter reset
5793  * @ioa_cfg:		ioa config struct
5794  * @job_step:		first job step of reset job
5795  * @shutdown_type:	shutdown type
5796  *
5797  * Description: This function will initiate the reset of the given adapter
5798  * starting at the selected job step.
5799  * If the caller needs to wait on the completion of the reset,
5800  * the caller must sleep on the reset_wait_q.
5801  *
5802  * Return value:
5803  * 	none
5804  **/
5805 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5806 				    int (*job_step) (struct ipr_cmnd *),
5807 				    enum ipr_shutdown_type shutdown_type)
5808 {
5809 	struct ipr_cmnd *ipr_cmd;
5810 
5811 	ioa_cfg->in_reset_reload = 1;
5812 	ioa_cfg->allow_cmds = 0;
5813 	scsi_block_requests(ioa_cfg->host);
5814 
5815 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5816 	ioa_cfg->reset_cmd = ipr_cmd;
5817 	ipr_cmd->job_step = job_step;
5818 	ipr_cmd->u.shutdown_type = shutdown_type;
5819 
5820 	ipr_reset_ioa_job(ipr_cmd);
5821 }
5822 
5823 /**
5824  * ipr_initiate_ioa_reset - Initiate an adapter reset
5825  * @ioa_cfg:		ioa config struct
5826  * @shutdown_type:	shutdown type
5827  *
5828  * Description: This function will initiate the reset of the given adapter.
5829  * If the caller needs to wait on the completion of the reset,
5830  * the caller must sleep on the reset_wait_q.
5831  *
5832  * Return value:
5833  * 	none
5834  **/
5835 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5836 				   enum ipr_shutdown_type shutdown_type)
5837 {
5838 	if (ioa_cfg->ioa_is_dead)
5839 		return;
5840 
5841 	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5842 		ioa_cfg->sdt_state = ABORT_DUMP;
5843 
5844 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5845 		dev_err(&ioa_cfg->pdev->dev,
5846 			"IOA taken offline - error recovery failed\n");
5847 
5848 		ioa_cfg->reset_retries = 0;
5849 		ioa_cfg->ioa_is_dead = 1;
5850 
5851 		if (ioa_cfg->in_ioa_bringdown) {
5852 			ioa_cfg->reset_cmd = NULL;
5853 			ioa_cfg->in_reset_reload = 0;
5854 			ipr_fail_all_ops(ioa_cfg);
5855 			wake_up_all(&ioa_cfg->reset_wait_q);
5856 
5857 			spin_unlock_irq(ioa_cfg->host->host_lock);
5858 			scsi_unblock_requests(ioa_cfg->host);
5859 			spin_lock_irq(ioa_cfg->host->host_lock);
5860 			return;
5861 		} else {
5862 			ioa_cfg->in_ioa_bringdown = 1;
5863 			shutdown_type = IPR_SHUTDOWN_NONE;
5864 		}
5865 	}
5866 
5867 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5868 				shutdown_type);
5869 }
5870 
5871 /**
5872  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5873  * @ioa_cfg:	ioa cfg struct
5874  *
5875  * Description: This is the second phase of adapter intialization
5876  * This function takes care of initilizing the adapter to the point
5877  * where it can accept new commands.
5878 
5879  * Return value:
5880  * 	0 on sucess / -EIO on failure
5881  **/
5882 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5883 {
5884 	int rc = 0;
5885 	unsigned long host_lock_flags = 0;
5886 
5887 	ENTER;
5888 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5889 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5890 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5891 
5892 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5893 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5894 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5895 
5896 	if (ioa_cfg->ioa_is_dead) {
5897 		rc = -EIO;
5898 	} else if (ipr_invalid_adapter(ioa_cfg)) {
5899 		if (!ipr_testmode)
5900 			rc = -EIO;
5901 
5902 		dev_err(&ioa_cfg->pdev->dev,
5903 			"Adapter not supported in this hardware configuration.\n");
5904 	}
5905 
5906 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5907 
5908 	LEAVE;
5909 	return rc;
5910 }
5911 
5912 /**
5913  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5914  * @ioa_cfg:	ioa config struct
5915  *
5916  * Return value:
5917  * 	none
5918  **/
5919 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5920 {
5921 	int i;
5922 
5923 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5924 		if (ioa_cfg->ipr_cmnd_list[i])
5925 			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5926 				      ioa_cfg->ipr_cmnd_list[i],
5927 				      ioa_cfg->ipr_cmnd_list_dma[i]);
5928 
5929 		ioa_cfg->ipr_cmnd_list[i] = NULL;
5930 	}
5931 
5932 	if (ioa_cfg->ipr_cmd_pool)
5933 		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5934 
5935 	ioa_cfg->ipr_cmd_pool = NULL;
5936 }
5937 
5938 /**
5939  * ipr_free_mem - Frees memory allocated for an adapter
5940  * @ioa_cfg:	ioa cfg struct
5941  *
5942  * Return value:
5943  * 	nothing
5944  **/
5945 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5946 {
5947 	int i;
5948 
5949 	kfree(ioa_cfg->res_entries);
5950 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5951 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5952 	ipr_free_cmd_blks(ioa_cfg);
5953 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5954 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5955 	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5956 			    ioa_cfg->cfg_table,
5957 			    ioa_cfg->cfg_table_dma);
5958 
5959 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5960 		pci_free_consistent(ioa_cfg->pdev,
5961 				    sizeof(struct ipr_hostrcb),
5962 				    ioa_cfg->hostrcb[i],
5963 				    ioa_cfg->hostrcb_dma[i]);
5964 	}
5965 
5966 	ipr_free_dump(ioa_cfg);
5967 	kfree(ioa_cfg->saved_mode_pages);
5968 	kfree(ioa_cfg->trace);
5969 }
5970 
5971 /**
5972  * ipr_free_all_resources - Free all allocated resources for an adapter.
5973  * @ipr_cmd:	ipr command struct
5974  *
5975  * This function frees all allocated resources for the
5976  * specified adapter.
5977  *
5978  * Return value:
5979  * 	none
5980  **/
5981 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5982 {
5983 	struct pci_dev *pdev = ioa_cfg->pdev;
5984 
5985 	ENTER;
5986 	free_irq(pdev->irq, ioa_cfg);
5987 	iounmap(ioa_cfg->hdw_dma_regs);
5988 	pci_release_regions(pdev);
5989 	ipr_free_mem(ioa_cfg);
5990 	scsi_host_put(ioa_cfg->host);
5991 	pci_disable_device(pdev);
5992 	LEAVE;
5993 }
5994 
5995 /**
5996  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5997  * @ioa_cfg:	ioa config struct
5998  *
5999  * Return value:
6000  * 	0 on success / -ENOMEM on allocation failure
6001  **/
6002 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6003 {
6004 	struct ipr_cmnd *ipr_cmd;
6005 	struct ipr_ioarcb *ioarcb;
6006 	dma_addr_t dma_addr;
6007 	int i;
6008 
6009 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6010 						 sizeof(struct ipr_cmnd), 8, 0);
6011 
6012 	if (!ioa_cfg->ipr_cmd_pool)
6013 		return -ENOMEM;
6014 
6015 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6016 		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6017 
6018 		if (!ipr_cmd) {
6019 			ipr_free_cmd_blks(ioa_cfg);
6020 			return -ENOMEM;
6021 		}
6022 
6023 		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6024 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6025 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6026 
6027 		ioarcb = &ipr_cmd->ioarcb;
6028 		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6029 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6030 		ioarcb->write_ioadl_addr =
6031 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6032 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6033 		ioarcb->ioasa_host_pci_addr =
6034 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6035 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6036 		ipr_cmd->cmd_index = i;
6037 		ipr_cmd->ioa_cfg = ioa_cfg;
6038 		ipr_cmd->sense_buffer_dma = dma_addr +
6039 			offsetof(struct ipr_cmnd, sense_buffer);
6040 
6041 		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6042 	}
6043 
6044 	return 0;
6045 }
6046 
6047 /**
6048  * ipr_alloc_mem - Allocate memory for an adapter
6049  * @ioa_cfg:	ioa config struct
6050  *
6051  * Return value:
6052  * 	0 on success / non-zero for error
6053  **/
6054 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6055 {
6056 	struct pci_dev *pdev = ioa_cfg->pdev;
6057 	int i, rc = -ENOMEM;
6058 
6059 	ENTER;
6060 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6061 				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6062 
6063 	if (!ioa_cfg->res_entries)
6064 		goto out;
6065 
6066 	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6067 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6068 
6069 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6070 						sizeof(struct ipr_misc_cbs),
6071 						&ioa_cfg->vpd_cbs_dma);
6072 
6073 	if (!ioa_cfg->vpd_cbs)
6074 		goto out_free_res_entries;
6075 
6076 	if (ipr_alloc_cmd_blks(ioa_cfg))
6077 		goto out_free_vpd_cbs;
6078 
6079 	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6080 						 sizeof(u32) * IPR_NUM_CMD_BLKS,
6081 						 &ioa_cfg->host_rrq_dma);
6082 
6083 	if (!ioa_cfg->host_rrq)
6084 		goto out_ipr_free_cmd_blocks;
6085 
6086 	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6087 						  sizeof(struct ipr_config_table),
6088 						  &ioa_cfg->cfg_table_dma);
6089 
6090 	if (!ioa_cfg->cfg_table)
6091 		goto out_free_host_rrq;
6092 
6093 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6094 		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6095 							   sizeof(struct ipr_hostrcb),
6096 							   &ioa_cfg->hostrcb_dma[i]);
6097 
6098 		if (!ioa_cfg->hostrcb[i])
6099 			goto out_free_hostrcb_dma;
6100 
6101 		ioa_cfg->hostrcb[i]->hostrcb_dma =
6102 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6103 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6104 	}
6105 
6106 	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6107 				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6108 
6109 	if (!ioa_cfg->trace)
6110 		goto out_free_hostrcb_dma;
6111 
6112 	rc = 0;
6113 out:
6114 	LEAVE;
6115 	return rc;
6116 
6117 out_free_hostrcb_dma:
6118 	while (i-- > 0) {
6119 		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6120 				    ioa_cfg->hostrcb[i],
6121 				    ioa_cfg->hostrcb_dma[i]);
6122 	}
6123 	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6124 			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6125 out_free_host_rrq:
6126 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6127 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6128 out_ipr_free_cmd_blocks:
6129 	ipr_free_cmd_blks(ioa_cfg);
6130 out_free_vpd_cbs:
6131 	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6132 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6133 out_free_res_entries:
6134 	kfree(ioa_cfg->res_entries);
6135 	goto out;
6136 }
6137 
6138 /**
6139  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6140  * @ioa_cfg:	ioa config struct
6141  *
6142  * Return value:
6143  * 	none
6144  **/
6145 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6146 {
6147 	int i;
6148 
6149 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6150 		ioa_cfg->bus_attr[i].bus = i;
6151 		ioa_cfg->bus_attr[i].qas_enabled = 0;
6152 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6153 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6154 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6155 		else
6156 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6157 	}
6158 }
6159 
6160 /**
6161  * ipr_init_ioa_cfg - Initialize IOA config struct
6162  * @ioa_cfg:	ioa config struct
6163  * @host:		scsi host struct
6164  * @pdev:		PCI dev struct
6165  *
6166  * Return value:
6167  * 	none
6168  **/
6169 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6170 				       struct Scsi_Host *host, struct pci_dev *pdev)
6171 {
6172 	const struct ipr_interrupt_offsets *p;
6173 	struct ipr_interrupts *t;
6174 	void __iomem *base;
6175 
6176 	ioa_cfg->host = host;
6177 	ioa_cfg->pdev = pdev;
6178 	ioa_cfg->log_level = ipr_log_level;
6179 	ioa_cfg->doorbell = IPR_DOORBELL;
6180 	if (!ipr_auto_create)
6181 		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6182 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6183 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6184 	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6185 	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6186 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6187 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6188 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6189 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6190 
6191 	INIT_LIST_HEAD(&ioa_cfg->free_q);
6192 	INIT_LIST_HEAD(&ioa_cfg->pending_q);
6193 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6194 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6195 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6196 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6197 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6198 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
6199 	ioa_cfg->sdt_state = INACTIVE;
6200 	if (ipr_enable_cache)
6201 		ioa_cfg->cache_state = CACHE_ENABLED;
6202 	else
6203 		ioa_cfg->cache_state = CACHE_DISABLED;
6204 
6205 	ipr_initialize_bus_attr(ioa_cfg);
6206 
6207 	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6208 	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6209 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
6210 	host->unique_id = host->host_no;
6211 	host->max_cmd_len = IPR_MAX_CDB_LEN;
6212 	pci_set_drvdata(pdev, ioa_cfg);
6213 
6214 	p = &ioa_cfg->chip_cfg->regs;
6215 	t = &ioa_cfg->regs;
6216 	base = ioa_cfg->hdw_dma_regs;
6217 
6218 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6219 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6220 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6221 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6222 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6223 	t->ioarrin_reg = base + p->ioarrin_reg;
6224 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6225 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6226 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6227 }
6228 
6229 /**
6230  * ipr_get_chip_cfg - Find adapter chip configuration
6231  * @dev_id:		PCI device id struct
6232  *
6233  * Return value:
6234  * 	ptr to chip config on success / NULL on failure
6235  **/
6236 static const struct ipr_chip_cfg_t * __devinit
6237 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6238 {
6239 	int i;
6240 
6241 	if (dev_id->driver_data)
6242 		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6243 
6244 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6245 		if (ipr_chip[i].vendor == dev_id->vendor &&
6246 		    ipr_chip[i].device == dev_id->device)
6247 			return ipr_chip[i].cfg;
6248 	return NULL;
6249 }
6250 
6251 /**
6252  * ipr_probe_ioa - Allocates memory and does first stage of initialization
6253  * @pdev:		PCI device struct
6254  * @dev_id:		PCI device id struct
6255  *
6256  * Return value:
6257  * 	0 on success / non-zero on failure
6258  **/
6259 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6260 				   const struct pci_device_id *dev_id)
6261 {
6262 	struct ipr_ioa_cfg *ioa_cfg;
6263 	struct Scsi_Host *host;
6264 	unsigned long ipr_regs_pci;
6265 	void __iomem *ipr_regs;
6266 	u32 rc = PCIBIOS_SUCCESSFUL;
6267 
6268 	ENTER;
6269 
6270 	if ((rc = pci_enable_device(pdev))) {
6271 		dev_err(&pdev->dev, "Cannot enable adapter\n");
6272 		goto out;
6273 	}
6274 
6275 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6276 
6277 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6278 
6279 	if (!host) {
6280 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6281 		rc = -ENOMEM;
6282 		goto out_disable;
6283 	}
6284 
6285 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6286 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6287 
6288 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6289 
6290 	if (!ioa_cfg->chip_cfg) {
6291 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6292 			dev_id->vendor, dev_id->device);
6293 		goto out_scsi_host_put;
6294 	}
6295 
6296 	ipr_regs_pci = pci_resource_start(pdev, 0);
6297 
6298 	rc = pci_request_regions(pdev, IPR_NAME);
6299 	if (rc < 0) {
6300 		dev_err(&pdev->dev,
6301 			"Couldn't register memory range of registers\n");
6302 		goto out_scsi_host_put;
6303 	}
6304 
6305 	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6306 
6307 	if (!ipr_regs) {
6308 		dev_err(&pdev->dev,
6309 			"Couldn't map memory range of registers\n");
6310 		rc = -ENOMEM;
6311 		goto out_release_regions;
6312 	}
6313 
6314 	ioa_cfg->hdw_dma_regs = ipr_regs;
6315 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6316 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6317 
6318 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6319 
6320 	pci_set_master(pdev);
6321 
6322 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6323 	if (rc < 0) {
6324 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6325 		goto cleanup_nomem;
6326 	}
6327 
6328 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6329 				   ioa_cfg->chip_cfg->cache_line_size);
6330 
6331 	if (rc != PCIBIOS_SUCCESSFUL) {
6332 		dev_err(&pdev->dev, "Write of cache line size failed\n");
6333 		rc = -EIO;
6334 		goto cleanup_nomem;
6335 	}
6336 
6337 	/* Save away PCI config space for use following IOA reset */
6338 	rc = pci_save_state(pdev);
6339 
6340 	if (rc != PCIBIOS_SUCCESSFUL) {
6341 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6342 		rc = -EIO;
6343 		goto cleanup_nomem;
6344 	}
6345 
6346 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6347 		goto cleanup_nomem;
6348 
6349 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6350 		goto cleanup_nomem;
6351 
6352 	rc = ipr_alloc_mem(ioa_cfg);
6353 	if (rc < 0) {
6354 		dev_err(&pdev->dev,
6355 			"Couldn't allocate enough memory for device driver!\n");
6356 		goto cleanup_nomem;
6357 	}
6358 
6359 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6360 	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6361 
6362 	if (rc) {
6363 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6364 			pdev->irq, rc);
6365 		goto cleanup_nolog;
6366 	}
6367 
6368 	spin_lock(&ipr_driver_lock);
6369 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6370 	spin_unlock(&ipr_driver_lock);
6371 
6372 	LEAVE;
6373 out:
6374 	return rc;
6375 
6376 cleanup_nolog:
6377 	ipr_free_mem(ioa_cfg);
6378 cleanup_nomem:
6379 	iounmap(ipr_regs);
6380 out_release_regions:
6381 	pci_release_regions(pdev);
6382 out_scsi_host_put:
6383 	scsi_host_put(host);
6384 out_disable:
6385 	pci_disable_device(pdev);
6386 	goto out;
6387 }
6388 
6389 /**
6390  * ipr_scan_vsets - Scans for VSET devices
6391  * @ioa_cfg:	ioa config struct
6392  *
6393  * Description: Since the VSET resources do not follow SAM in that we can have
6394  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6395  *
6396  * Return value:
6397  * 	none
6398  **/
6399 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6400 {
6401 	int target, lun;
6402 
6403 	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6404 		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6405 			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6406 }
6407 
6408 /**
6409  * ipr_initiate_ioa_bringdown - Bring down an adapter
6410  * @ioa_cfg:		ioa config struct
6411  * @shutdown_type:	shutdown type
6412  *
6413  * Description: This function will initiate bringing down the adapter.
6414  * This consists of issuing an IOA shutdown to the adapter
6415  * to flush the cache, and running BIST.
6416  * If the caller needs to wait on the completion of the reset,
6417  * the caller must sleep on the reset_wait_q.
6418  *
6419  * Return value:
6420  * 	none
6421  **/
6422 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6423 				       enum ipr_shutdown_type shutdown_type)
6424 {
6425 	ENTER;
6426 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6427 		ioa_cfg->sdt_state = ABORT_DUMP;
6428 	ioa_cfg->reset_retries = 0;
6429 	ioa_cfg->in_ioa_bringdown = 1;
6430 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6431 	LEAVE;
6432 }
6433 
6434 /**
6435  * __ipr_remove - Remove a single adapter
6436  * @pdev:	pci device struct
6437  *
6438  * Adapter hot plug remove entry point.
6439  *
6440  * Return value:
6441  * 	none
6442  **/
6443 static void __ipr_remove(struct pci_dev *pdev)
6444 {
6445 	unsigned long host_lock_flags = 0;
6446 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6447 	ENTER;
6448 
6449 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6450 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6451 
6452 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6453 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6454 	flush_scheduled_work();
6455 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6456 
6457 	spin_lock(&ipr_driver_lock);
6458 	list_del(&ioa_cfg->queue);
6459 	spin_unlock(&ipr_driver_lock);
6460 
6461 	if (ioa_cfg->sdt_state == ABORT_DUMP)
6462 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6463 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6464 
6465 	ipr_free_all_resources(ioa_cfg);
6466 
6467 	LEAVE;
6468 }
6469 
6470 /**
6471  * ipr_remove - IOA hot plug remove entry point
6472  * @pdev:	pci device struct
6473  *
6474  * Adapter hot plug remove entry point.
6475  *
6476  * Return value:
6477  * 	none
6478  **/
6479 static void ipr_remove(struct pci_dev *pdev)
6480 {
6481 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6482 
6483 	ENTER;
6484 
6485 	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6486 			      &ipr_trace_attr);
6487 	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6488 			     &ipr_dump_attr);
6489 	scsi_remove_host(ioa_cfg->host);
6490 
6491 	__ipr_remove(pdev);
6492 
6493 	LEAVE;
6494 }
6495 
6496 /**
6497  * ipr_probe - Adapter hot plug add entry point
6498  *
6499  * Return value:
6500  * 	0 on success / non-zero on failure
6501  **/
6502 static int __devinit ipr_probe(struct pci_dev *pdev,
6503 			       const struct pci_device_id *dev_id)
6504 {
6505 	struct ipr_ioa_cfg *ioa_cfg;
6506 	int rc;
6507 
6508 	rc = ipr_probe_ioa(pdev, dev_id);
6509 
6510 	if (rc)
6511 		return rc;
6512 
6513 	ioa_cfg = pci_get_drvdata(pdev);
6514 	rc = ipr_probe_ioa_part2(ioa_cfg);
6515 
6516 	if (rc) {
6517 		__ipr_remove(pdev);
6518 		return rc;
6519 	}
6520 
6521 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6522 
6523 	if (rc) {
6524 		__ipr_remove(pdev);
6525 		return rc;
6526 	}
6527 
6528 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6529 				   &ipr_trace_attr);
6530 
6531 	if (rc) {
6532 		scsi_remove_host(ioa_cfg->host);
6533 		__ipr_remove(pdev);
6534 		return rc;
6535 	}
6536 
6537 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6538 				   &ipr_dump_attr);
6539 
6540 	if (rc) {
6541 		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6542 				      &ipr_trace_attr);
6543 		scsi_remove_host(ioa_cfg->host);
6544 		__ipr_remove(pdev);
6545 		return rc;
6546 	}
6547 
6548 	scsi_scan_host(ioa_cfg->host);
6549 	ipr_scan_vsets(ioa_cfg);
6550 	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6551 	ioa_cfg->allow_ml_add_del = 1;
6552 	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6553 	schedule_work(&ioa_cfg->work_q);
6554 	return 0;
6555 }
6556 
6557 /**
6558  * ipr_shutdown - Shutdown handler.
6559  * @pdev:	pci device struct
6560  *
6561  * This function is invoked upon system shutdown/reboot. It will issue
6562  * an adapter shutdown to the adapter to flush the write cache.
6563  *
6564  * Return value:
6565  * 	none
6566  **/
6567 static void ipr_shutdown(struct pci_dev *pdev)
6568 {
6569 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6570 	unsigned long lock_flags = 0;
6571 
6572 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6573 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6574 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6575 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6576 }
6577 
6578 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6579 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6580 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6581 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6582 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6583 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6584 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6585 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6586 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6587 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6588 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6589 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6590 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6591 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6592 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6593 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6594 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6595 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6596 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6597 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6598 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6599 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6600 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6601 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6602 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6603 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6604 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6605 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6606 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6607 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6608 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6609 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6610 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6611 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6612 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6613 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6614 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6615 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6616 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6617 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6618 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6619 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6620 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6621 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6622 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6623 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6624 	{ }
6625 };
6626 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6627 
6628 static struct pci_driver ipr_driver = {
6629 	.name = IPR_NAME,
6630 	.id_table = ipr_pci_table,
6631 	.probe = ipr_probe,
6632 	.remove = ipr_remove,
6633 	.shutdown = ipr_shutdown,
6634 };
6635 
6636 /**
6637  * ipr_init - Module entry point
6638  *
6639  * Return value:
6640  * 	0 on success / negative value on failure
6641  **/
6642 static int __init ipr_init(void)
6643 {
6644 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6645 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6646 
6647 	return pci_module_init(&ipr_driver);
6648 }
6649 
6650 /**
6651  * ipr_exit - Module unload
6652  *
6653  * Module unload entry point.
6654  *
6655  * Return value:
6656  * 	none
6657  **/
6658 static void __exit ipr_exit(void)
6659 {
6660 	pci_unregister_driver(&ipr_driver);
6661 }
6662 
6663 module_init(ipr_init);
6664 module_exit(ipr_exit);
6665