1 /* 2 * ipr.c -- driver for IBM Power Linux RAID adapters 3 * 4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation 5 * 6 * Copyright (C) 2003, 2004 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 /* 25 * Notes: 26 * 27 * This driver is used to control the following SCSI adapters: 28 * 29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B 30 * 31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter 32 * PCI-X Dual Channel Ultra 320 SCSI Adapter 33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card 34 * Embedded SCSI adapter on p615 and p655 systems 35 * 36 * Supported Hardware Features: 37 * - Ultra 320 SCSI controller 38 * - PCI-X host interface 39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine 40 * - Non-Volatile Write Cache 41 * - Supports attachment of non-RAID disks, tape, and optical devices 42 * - RAID Levels 0, 5, 10 43 * - Hot spare 44 * - Background Parity Checking 45 * - Background Data Scrubbing 46 * - Ability to increase the capacity of an existing RAID 5 disk array 47 * by adding disks 48 * 49 * Driver Features: 50 * - Tagged command queuing 51 * - Adapter microcode download 52 * - PCI hot plug 53 * - SCSI device hot plug 54 * 55 */ 56 57 #include <linux/fs.h> 58 #include <linux/init.h> 59 #include <linux/types.h> 60 #include <linux/errno.h> 61 #include <linux/kernel.h> 62 #include <linux/slab.h> 63 #include <linux/vmalloc.h> 64 #include <linux/ioport.h> 65 #include <linux/delay.h> 66 #include <linux/pci.h> 67 #include <linux/wait.h> 68 #include <linux/spinlock.h> 69 #include <linux/sched.h> 70 #include <linux/interrupt.h> 71 #include <linux/blkdev.h> 72 #include <linux/firmware.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/libata.h> 76 #include <linux/hdreg.h> 77 #include <linux/reboot.h> 78 #include <linux/stringify.h> 79 #include <asm/io.h> 80 #include <asm/irq.h> 81 #include <asm/processor.h> 82 #include <scsi/scsi.h> 83 #include <scsi/scsi_host.h> 84 #include <scsi/scsi_tcq.h> 85 #include <scsi/scsi_eh.h> 86 #include <scsi/scsi_cmnd.h> 87 #include "ipr.h" 88 89 /* 90 * Global Data 91 */ 92 static LIST_HEAD(ipr_ioa_head); 93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 94 static unsigned int ipr_max_speed = 1; 95 static int ipr_testmode = 0; 96 static unsigned int ipr_fastfail = 0; 97 static unsigned int ipr_transop_timeout = 0; 98 static unsigned int ipr_debug = 0; 99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 100 static unsigned int ipr_dual_ioa_raid = 1; 101 static unsigned int ipr_number_of_msix = 16; 102 static unsigned int ipr_fast_reboot; 103 static DEFINE_SPINLOCK(ipr_driver_lock); 104 105 /* This table describes the differences between DMA controller chips */ 106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 108 .mailbox = 0x0042C, 109 .max_cmds = 100, 110 .cache_line_size = 0x20, 111 .clear_isr = 1, 112 .iopoll_weight = 0, 113 { 114 .set_interrupt_mask_reg = 0x0022C, 115 .clr_interrupt_mask_reg = 0x00230, 116 .clr_interrupt_mask_reg32 = 0x00230, 117 .sense_interrupt_mask_reg = 0x0022C, 118 .sense_interrupt_mask_reg32 = 0x0022C, 119 .clr_interrupt_reg = 0x00228, 120 .clr_interrupt_reg32 = 0x00228, 121 .sense_interrupt_reg = 0x00224, 122 .sense_interrupt_reg32 = 0x00224, 123 .ioarrin_reg = 0x00404, 124 .sense_uproc_interrupt_reg = 0x00214, 125 .sense_uproc_interrupt_reg32 = 0x00214, 126 .set_uproc_interrupt_reg = 0x00214, 127 .set_uproc_interrupt_reg32 = 0x00214, 128 .clr_uproc_interrupt_reg = 0x00218, 129 .clr_uproc_interrupt_reg32 = 0x00218 130 } 131 }, 132 { /* Snipe and Scamp */ 133 .mailbox = 0x0052C, 134 .max_cmds = 100, 135 .cache_line_size = 0x20, 136 .clear_isr = 1, 137 .iopoll_weight = 0, 138 { 139 .set_interrupt_mask_reg = 0x00288, 140 .clr_interrupt_mask_reg = 0x0028C, 141 .clr_interrupt_mask_reg32 = 0x0028C, 142 .sense_interrupt_mask_reg = 0x00288, 143 .sense_interrupt_mask_reg32 = 0x00288, 144 .clr_interrupt_reg = 0x00284, 145 .clr_interrupt_reg32 = 0x00284, 146 .sense_interrupt_reg = 0x00280, 147 .sense_interrupt_reg32 = 0x00280, 148 .ioarrin_reg = 0x00504, 149 .sense_uproc_interrupt_reg = 0x00290, 150 .sense_uproc_interrupt_reg32 = 0x00290, 151 .set_uproc_interrupt_reg = 0x00290, 152 .set_uproc_interrupt_reg32 = 0x00290, 153 .clr_uproc_interrupt_reg = 0x00294, 154 .clr_uproc_interrupt_reg32 = 0x00294 155 } 156 }, 157 { /* CRoC */ 158 .mailbox = 0x00044, 159 .max_cmds = 1000, 160 .cache_line_size = 0x20, 161 .clear_isr = 0, 162 .iopoll_weight = 64, 163 { 164 .set_interrupt_mask_reg = 0x00010, 165 .clr_interrupt_mask_reg = 0x00018, 166 .clr_interrupt_mask_reg32 = 0x0001C, 167 .sense_interrupt_mask_reg = 0x00010, 168 .sense_interrupt_mask_reg32 = 0x00014, 169 .clr_interrupt_reg = 0x00008, 170 .clr_interrupt_reg32 = 0x0000C, 171 .sense_interrupt_reg = 0x00000, 172 .sense_interrupt_reg32 = 0x00004, 173 .ioarrin_reg = 0x00070, 174 .sense_uproc_interrupt_reg = 0x00020, 175 .sense_uproc_interrupt_reg32 = 0x00024, 176 .set_uproc_interrupt_reg = 0x00020, 177 .set_uproc_interrupt_reg32 = 0x00024, 178 .clr_uproc_interrupt_reg = 0x00028, 179 .clr_uproc_interrupt_reg32 = 0x0002C, 180 .init_feedback_reg = 0x0005C, 181 .dump_addr_reg = 0x00064, 182 .dump_data_reg = 0x00068, 183 .endian_swap_reg = 0x00084 184 } 185 }, 186 }; 187 188 static const struct ipr_chip_t ipr_chip[] = { 189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 199 }; 200 201 static int ipr_max_bus_speeds[] = { 202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE 203 }; 204 205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>"); 206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); 207 module_param_named(max_speed, ipr_max_speed, uint, 0); 208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); 209 module_param_named(log_level, ipr_log_level, uint, 0); 210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 211 module_param_named(testmode, ipr_testmode, int, 0); 212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); 214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 221 module_param_named(max_devs, ipr_max_devs, int, 0); 222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0); 225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); 226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); 227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); 228 MODULE_LICENSE("GPL"); 229 MODULE_VERSION(IPR_DRIVER_VERSION); 230 231 /* A constant array of IOASCs/URCs/Error Messages */ 232 static const 233 struct ipr_error_table_t ipr_error_table[] = { 234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, 235 "8155: An unknown error was received"}, 236 {0x00330000, 0, 0, 237 "Soft underlength error"}, 238 {0x005A0000, 0, 0, 239 "Command to be cancelled not found"}, 240 {0x00808000, 0, 0, 241 "Qualified success"}, 242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, 243 "FFFE: Soft device bus error recovered by the IOA"}, 244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 245 "4101: Soft device bus fabric error"}, 246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 247 "FFFC: Logical block guard error recovered by the device"}, 248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 249 "FFFC: Logical block reference tag error recovered by the device"}, 250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 251 "4171: Recovered scatter list tag / sequence number error"}, 252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 255 "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 257 "FFFD: Recovered logical block reference tag error detected by the IOA"}, 258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 259 "FFFD: Logical block guard error recovered by the IOA"}, 260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 261 "FFF9: Device sector reassign successful"}, 262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 263 "FFF7: Media error recovered by device rewrite procedures"}, 264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, 265 "7001: IOA sector reassignment successful"}, 266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, 267 "FFF9: Soft media error. Sector reassignment recommended"}, 268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, 269 "FFF7: Media error recovered by IOA rewrite procedures"}, 270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, 271 "FF3D: Soft PCI bus error recovered by the IOA"}, 272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, 273 "FFF6: Device hardware error recovered by the IOA"}, 274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, 275 "FFF6: Device hardware error recovered by the device"}, 276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, 277 "FF3D: Soft IOA error recovered by the IOA"}, 278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, 279 "FFFA: Undefined device response recovered by the IOA"}, 280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, 281 "FFF6: Device bus error, message or command phase"}, 282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, 283 "FFFE: Task Management Function failed"}, 284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, 285 "FFF6: Failure prediction threshold exceeded"}, 286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, 287 "8009: Impending cache battery pack failure"}, 288 {0x02040100, 0, 0, 289 "Logical Unit in process of becoming ready"}, 290 {0x02040200, 0, 0, 291 "Initializing command required"}, 292 {0x02040400, 0, 0, 293 "34FF: Disk device format in progress"}, 294 {0x02040C00, 0, 0, 295 "Logical unit not accessible, target port in unavailable state"}, 296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, 297 "9070: IOA requested reset"}, 298 {0x023F0000, 0, 0, 299 "Synchronization required"}, 300 {0x02408500, 0, 0, 301 "IOA microcode download required"}, 302 {0x02408600, 0, 0, 303 "Device bus connection is prohibited by host"}, 304 {0x024E0000, 0, 0, 305 "No ready, IOA shutdown"}, 306 {0x025A0000, 0, 0, 307 "Not ready, IOA has been shutdown"}, 308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, 309 "3020: Storage subsystem configuration error"}, 310 {0x03110B00, 0, 0, 311 "FFF5: Medium error, data unreadable, recommend reassign"}, 312 {0x03110C00, 0, 0, 313 "7000: Medium error, data unreadable, do not reassign"}, 314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, 315 "FFF3: Disk media format bad"}, 316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, 317 "3002: Addressed device failed to respond to selection"}, 318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, 319 "3100: Device bus error"}, 320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, 321 "3109: IOA timed out a device command"}, 322 {0x04088000, 0, 0, 323 "3120: SCSI bus is not operational"}, 324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 325 "4100: Hard device bus fabric error"}, 326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 327 "310C: Logical block guard error detected by the device"}, 328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 329 "310C: Logical block reference tag error detected by the device"}, 330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 331 "4170: Scatter list tag / sequence number error"}, 332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 333 "8150: Logical block CRC error on IOA to Host transfer"}, 334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 335 "4170: Logical block sequence number error on IOA to Host transfer"}, 336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 337 "310D: Logical block reference tag error detected by the IOA"}, 338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 339 "310D: Logical block guard error detected by the IOA"}, 340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 341 "9000: IOA reserved area data check"}, 342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 343 "9001: IOA reserved area invalid data pattern"}, 344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 345 "9002: IOA reserved area LRC error"}, 346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 347 "Hardware Error, IOA metadata access error"}, 348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 349 "102E: Out of alternate sectors for disk storage"}, 350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 351 "FFF4: Data transfer underlength error"}, 352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, 353 "FFF4: Data transfer overlength error"}, 354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, 355 "3400: Logical unit failure"}, 356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, 357 "FFF4: Device microcode is corrupt"}, 358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, 359 "8150: PCI bus error"}, 360 {0x04430000, 1, 0, 361 "Unsupported device bus message received"}, 362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, 363 "FFF4: Disk device problem"}, 364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, 365 "8150: Permanent IOA failure"}, 366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, 367 "3010: Disk device returned wrong response to IOA"}, 368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, 369 "8151: IOA microcode error"}, 370 {0x04448500, 0, 0, 371 "Device bus status error"}, 372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, 373 "8157: IOA error requiring IOA reset to recover"}, 374 {0x04448700, 0, 0, 375 "ATA device status error"}, 376 {0x04490000, 0, 0, 377 "Message reject received from the device"}, 378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, 379 "8008: A permanent cache battery pack failure occurred"}, 380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, 381 "9090: Disk unit has been modified after the last known status"}, 382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, 383 "9081: IOA detected device error"}, 384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, 385 "9082: IOA detected device error"}, 386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, 387 "3110: Device bus error, message or command phase"}, 388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, 389 "3110: SAS Command / Task Management Function failed"}, 390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, 391 "9091: Incorrect hardware configuration change has been detected"}, 392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, 393 "9073: Invalid multi-adapter configuration"}, 394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, 395 "4010: Incorrect connection between cascaded expanders"}, 396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, 397 "4020: Connections exceed IOA design limits"}, 398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, 399 "4030: Incorrect multipath connection"}, 400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, 401 "4110: Unsupported enclosure function"}, 402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, 403 "4120: SAS cable VPD cannot be read"}, 404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, 405 "FFF4: Command to logical unit failed"}, 406 {0x05240000, 1, 0, 407 "Illegal request, invalid request type or request packet"}, 408 {0x05250000, 0, 0, 409 "Illegal request, invalid resource handle"}, 410 {0x05258000, 0, 0, 411 "Illegal request, commands not allowed to this device"}, 412 {0x05258100, 0, 0, 413 "Illegal request, command not allowed to a secondary adapter"}, 414 {0x05258200, 0, 0, 415 "Illegal request, command not allowed to a non-optimized resource"}, 416 {0x05260000, 0, 0, 417 "Illegal request, invalid field in parameter list"}, 418 {0x05260100, 0, 0, 419 "Illegal request, parameter not supported"}, 420 {0x05260200, 0, 0, 421 "Illegal request, parameter value invalid"}, 422 {0x052C0000, 0, 0, 423 "Illegal request, command sequence error"}, 424 {0x052C8000, 1, 0, 425 "Illegal request, dual adapter support not enabled"}, 426 {0x052C8100, 1, 0, 427 "Illegal request, another cable connector was physically disabled"}, 428 {0x054E8000, 1, 0, 429 "Illegal request, inconsistent group id/group count"}, 430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, 431 "9031: Array protection temporarily suspended, protection resuming"}, 432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, 433 "9040: Array protection temporarily suspended, protection resuming"}, 434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, 435 "4080: IOA exceeded maximum operating temperature"}, 436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, 437 "4085: Service required"}, 438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL, 439 "4086: SAS Adapter Hardware Configuration Error"}, 440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 441 "3140: Device bus not ready to ready transition"}, 442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 443 "FFFB: SCSI bus was reset"}, 444 {0x06290500, 0, 0, 445 "FFFE: SCSI bus transition to single ended"}, 446 {0x06290600, 0, 0, 447 "FFFE: SCSI bus transition to LVD"}, 448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, 449 "FFFB: SCSI bus was reset by another initiator"}, 450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, 451 "3029: A device replacement has occurred"}, 452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, 453 "4102: Device bus fabric performance degradation"}, 454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, 455 "9051: IOA cache data exists for a missing or failed device"}, 456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, 457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, 459 "9025: Disk unit is not supported at its physical location"}, 460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, 461 "3020: IOA detected a SCSI bus configuration error"}, 462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, 463 "3150: SCSI bus configuration error"}, 464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, 465 "9074: Asymmetric advanced function disk configuration"}, 466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, 467 "4040: Incomplete multipath connection between IOA and enclosure"}, 468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, 469 "4041: Incomplete multipath connection between enclosure and device"}, 470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, 471 "9075: Incomplete multipath connection between IOA and remote IOA"}, 472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, 473 "9076: Configuration error, missing remote IOA"}, 474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 475 "4050: Enclosure does not support a required multipath function"}, 476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, 477 "4121: Configuration error, required cable is missing"}, 478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, 479 "4122: Cable is not plugged into the correct location on remote IOA"}, 480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, 481 "4123: Configuration error, invalid cable vital product data"}, 482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, 483 "4124: Configuration error, both cable ends are plugged into the same IOA"}, 484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, 485 "4070: Logically bad block written on device"}, 486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 487 "9041: Array protection temporarily suspended"}, 488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 489 "9042: Corrupt array parity detected on specified device"}, 490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, 491 "9030: Array no longer protected due to missing or failed disk unit"}, 492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, 493 "9071: Link operational transition"}, 494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, 495 "9072: Link not operational transition"}, 496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 497 "9032: Array exposed but still protected"}, 498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL, 499 "70DD: Device forced failed by disrupt device command"}, 500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 501 "4061: Multipath redundancy level got better"}, 502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 503 "4060: Multipath redundancy level got worse"}, 504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL, 505 "9083: Device raw mode enabled"}, 506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL, 507 "9084: Device raw mode disabled"}, 508 {0x07270000, 0, 0, 509 "Failure due to other device"}, 510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, 511 "9008: IOA does not support functions expected by devices"}, 512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, 513 "9010: Cache data associated with attached devices cannot be found"}, 514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, 515 "9011: Cache data belongs to devices other than those attached"}, 516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, 517 "9020: Array missing 2 or more devices with only 1 device present"}, 518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, 519 "9021: Array missing 2 or more devices with 2 or more devices present"}, 520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, 521 "9022: Exposed array is missing a required device"}, 522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, 523 "9023: Array member(s) not at required physical locations"}, 524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, 525 "9024: Array not functional due to present hardware configuration"}, 526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, 527 "9026: Array not functional due to present hardware configuration"}, 528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, 529 "9027: Array is missing a device and parity is out of sync"}, 530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, 531 "9028: Maximum number of arrays already exist"}, 532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, 533 "9050: Required cache data cannot be located for a disk unit"}, 534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, 535 "9052: Cache data exists for a device that has been modified"}, 536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, 537 "9054: IOA resources not available due to previous problems"}, 538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, 539 "9092: Disk unit requires initialization before use"}, 540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, 541 "9029: Incorrect hardware configuration change has been detected"}, 542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, 543 "9060: One or more disk pairs are missing from an array"}, 544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, 545 "9061: One or more disks are missing from an array"}, 546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, 547 "9062: One or more disks are missing from an array"}, 548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, 549 "9063: Maximum number of functional arrays has been exceeded"}, 550 {0x07279A00, 0, 0, 551 "Data protect, other volume set problem"}, 552 {0x0B260000, 0, 0, 553 "Aborted command, invalid descriptor"}, 554 {0x0B3F9000, 0, 0, 555 "Target operating conditions have changed, dual adapter takeover"}, 556 {0x0B530200, 0, 0, 557 "Aborted command, medium removal prevented"}, 558 {0x0B5A0000, 0, 0, 559 "Command terminated by host"}, 560 {0x0B5B8000, 0, 0, 561 "Aborted command, command terminated by host"} 562 }; 563 564 static const struct ipr_ses_table_entry ipr_ses_table[] = { 565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, 566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, 567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ 568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ 569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ 570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ 571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, 572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, 573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, 576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, 577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } 578 }; 579 580 /* 581 * Function Prototypes 582 */ 583 static int ipr_reset_alert(struct ipr_cmnd *); 584 static void ipr_process_ccn(struct ipr_cmnd *); 585 static void ipr_process_error(struct ipr_cmnd *); 586 static void ipr_reset_ioa_job(struct ipr_cmnd *); 587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, 588 enum ipr_shutdown_type); 589 590 #ifdef CONFIG_SCSI_IPR_TRACE 591 /** 592 * ipr_trc_hook - Add a trace entry to the driver trace 593 * @ipr_cmd: ipr command struct 594 * @type: trace type 595 * @add_data: additional data 596 * 597 * Return value: 598 * none 599 **/ 600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, 601 u8 type, u32 add_data) 602 { 603 struct ipr_trace_entry *trace_entry; 604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 605 unsigned int trace_index; 606 607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; 608 trace_entry = &ioa_cfg->trace[trace_index]; 609 trace_entry->time = jiffies; 610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 611 trace_entry->type = type; 612 if (ipr_cmd->ioa_cfg->sis64) 613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 614 else 615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 618 trace_entry->u.add_data = add_data; 619 wmb(); 620 } 621 #else 622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 623 #endif 624 625 /** 626 * ipr_lock_and_done - Acquire lock and complete command 627 * @ipr_cmd: ipr command struct 628 * 629 * Return value: 630 * none 631 **/ 632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) 633 { 634 unsigned long lock_flags; 635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 636 637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 638 ipr_cmd->done(ipr_cmd); 639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 640 } 641 642 /** 643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 644 * @ipr_cmd: ipr command struct 645 * 646 * Return value: 647 * none 648 **/ 649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 650 { 651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 654 dma_addr_t dma_addr = ipr_cmd->dma_addr; 655 int hrrq_id; 656 657 hrrq_id = ioarcb->cmd_pkt.hrrq_id; 658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 659 ioarcb->cmd_pkt.hrrq_id = hrrq_id; 660 ioarcb->data_transfer_length = 0; 661 ioarcb->read_data_transfer_length = 0; 662 ioarcb->ioadl_len = 0; 663 ioarcb->read_ioadl_len = 0; 664 665 if (ipr_cmd->ioa_cfg->sis64) { 666 ioarcb->u.sis64_addr_data.data_ioadl_addr = 667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 668 ioasa64->u.gata.status = 0; 669 } else { 670 ioarcb->write_ioadl_addr = 671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 673 ioasa->u.gata.status = 0; 674 } 675 676 ioasa->hdr.ioasc = 0; 677 ioasa->hdr.residual_data_len = 0; 678 ipr_cmd->scsi_cmd = NULL; 679 ipr_cmd->qc = NULL; 680 ipr_cmd->sense_buffer[0] = 0; 681 ipr_cmd->dma_use_sg = 0; 682 } 683 684 /** 685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block 686 * @ipr_cmd: ipr command struct 687 * 688 * Return value: 689 * none 690 **/ 691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, 692 void (*fast_done) (struct ipr_cmnd *)) 693 { 694 ipr_reinit_ipr_cmnd(ipr_cmd); 695 ipr_cmd->u.scratch = 0; 696 ipr_cmd->sibling = NULL; 697 ipr_cmd->eh_comp = NULL; 698 ipr_cmd->fast_done = fast_done; 699 timer_setup(&ipr_cmd->timer, NULL, 0); 700 } 701 702 /** 703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 704 * @ioa_cfg: ioa config struct 705 * 706 * Return value: 707 * pointer to ipr command struct 708 **/ 709 static 710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) 711 { 712 struct ipr_cmnd *ipr_cmd = NULL; 713 714 if (likely(!list_empty(&hrrq->hrrq_free_q))) { 715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, 716 struct ipr_cmnd, queue); 717 list_del(&ipr_cmd->queue); 718 } 719 720 721 return ipr_cmd; 722 } 723 724 /** 725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it 726 * @ioa_cfg: ioa config struct 727 * 728 * Return value: 729 * pointer to ipr command struct 730 **/ 731 static 732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 733 { 734 struct ipr_cmnd *ipr_cmd = 735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); 736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 737 return ipr_cmd; 738 } 739 740 /** 741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 742 * @ioa_cfg: ioa config struct 743 * @clr_ints: interrupts to clear 744 * 745 * This function masks all interrupts on the adapter, then clears the 746 * interrupts specified in the mask 747 * 748 * Return value: 749 * none 750 **/ 751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, 752 u32 clr_ints) 753 { 754 volatile u32 int_reg; 755 int i; 756 757 /* Stop new interrupts */ 758 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 759 spin_lock(&ioa_cfg->hrrq[i]._lock); 760 ioa_cfg->hrrq[i].allow_interrupts = 0; 761 spin_unlock(&ioa_cfg->hrrq[i]._lock); 762 } 763 764 /* Set interrupt mask to stop all new interrupts */ 765 if (ioa_cfg->sis64) 766 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 767 else 768 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 769 770 /* Clear any pending interrupts */ 771 if (ioa_cfg->sis64) 772 writel(~0, ioa_cfg->regs.clr_interrupt_reg); 773 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 774 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 775 } 776 777 /** 778 * ipr_save_pcix_cmd_reg - Save PCI-X command register 779 * @ioa_cfg: ioa config struct 780 * 781 * Return value: 782 * 0 on success / -EIO on failure 783 **/ 784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 785 { 786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 787 788 if (pcix_cmd_reg == 0) 789 return 0; 790 791 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 792 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 793 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 794 return -EIO; 795 } 796 797 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; 798 return 0; 799 } 800 801 /** 802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register 803 * @ioa_cfg: ioa config struct 804 * 805 * Return value: 806 * 0 on success / -EIO on failure 807 **/ 808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 809 { 810 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 811 812 if (pcix_cmd_reg) { 813 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 814 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 815 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 816 return -EIO; 817 } 818 } 819 820 return 0; 821 } 822 823 /** 824 * __ipr_sata_eh_done - done function for aborted SATA commands 825 * @ipr_cmd: ipr command struct 826 * 827 * This function is invoked for ops generated to SATA 828 * devices which are being aborted. 829 * 830 * Return value: 831 * none 832 **/ 833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 834 { 835 struct ata_queued_cmd *qc = ipr_cmd->qc; 836 struct ipr_sata_port *sata_port = qc->ap->private_data; 837 838 qc->err_mask |= AC_ERR_OTHER; 839 sata_port->ioasa.status |= ATA_BUSY; 840 ata_qc_complete(qc); 841 if (ipr_cmd->eh_comp) 842 complete(ipr_cmd->eh_comp); 843 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 844 } 845 846 /** 847 * ipr_sata_eh_done - done function for aborted SATA commands 848 * @ipr_cmd: ipr command struct 849 * 850 * This function is invoked for ops generated to SATA 851 * devices which are being aborted. 852 * 853 * Return value: 854 * none 855 **/ 856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 857 { 858 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 859 unsigned long hrrq_flags; 860 861 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 862 __ipr_sata_eh_done(ipr_cmd); 863 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 864 } 865 866 /** 867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops 868 * @ipr_cmd: ipr command struct 869 * 870 * This function is invoked by the interrupt handler for 871 * ops generated by the SCSI mid-layer which are being aborted. 872 * 873 * Return value: 874 * none 875 **/ 876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 877 { 878 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 879 880 scsi_cmd->result |= (DID_ERROR << 16); 881 882 scsi_dma_unmap(ipr_cmd->scsi_cmd); 883 scsi_cmd->scsi_done(scsi_cmd); 884 if (ipr_cmd->eh_comp) 885 complete(ipr_cmd->eh_comp); 886 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 887 } 888 889 /** 890 * ipr_scsi_eh_done - mid-layer done function for aborted ops 891 * @ipr_cmd: ipr command struct 892 * 893 * This function is invoked by the interrupt handler for 894 * ops generated by the SCSI mid-layer which are being aborted. 895 * 896 * Return value: 897 * none 898 **/ 899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 900 { 901 unsigned long hrrq_flags; 902 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 903 904 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 905 __ipr_scsi_eh_done(ipr_cmd); 906 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 907 } 908 909 /** 910 * ipr_fail_all_ops - Fails all outstanding ops. 911 * @ioa_cfg: ioa config struct 912 * 913 * This function fails all outstanding ops. 914 * 915 * Return value: 916 * none 917 **/ 918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 919 { 920 struct ipr_cmnd *ipr_cmd, *temp; 921 struct ipr_hrr_queue *hrrq; 922 923 ENTER; 924 for_each_hrrq(hrrq, ioa_cfg) { 925 spin_lock(&hrrq->_lock); 926 list_for_each_entry_safe(ipr_cmd, 927 temp, &hrrq->hrrq_pending_q, queue) { 928 list_del(&ipr_cmd->queue); 929 930 ipr_cmd->s.ioasa.hdr.ioasc = 931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 932 ipr_cmd->s.ioasa.hdr.ilid = 933 cpu_to_be32(IPR_DRIVER_ILID); 934 935 if (ipr_cmd->scsi_cmd) 936 ipr_cmd->done = __ipr_scsi_eh_done; 937 else if (ipr_cmd->qc) 938 ipr_cmd->done = __ipr_sata_eh_done; 939 940 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 941 IPR_IOASC_IOA_WAS_RESET); 942 del_timer(&ipr_cmd->timer); 943 ipr_cmd->done(ipr_cmd); 944 } 945 spin_unlock(&hrrq->_lock); 946 } 947 LEAVE; 948 } 949 950 /** 951 * ipr_send_command - Send driver initiated requests. 952 * @ipr_cmd: ipr command struct 953 * 954 * This function sends a command to the adapter using the correct write call. 955 * In the case of sis64, calculate the ioarcb size required. Then or in the 956 * appropriate bits. 957 * 958 * Return value: 959 * none 960 **/ 961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 962 { 963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 964 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 965 966 if (ioa_cfg->sis64) { 967 /* The default size is 256 bytes */ 968 send_dma_addr |= 0x1; 969 970 /* If the number of ioadls * size of ioadl > 128 bytes, 971 then use a 512 byte ioarcb */ 972 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 973 send_dma_addr |= 0x4; 974 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 975 } else 976 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 977 } 978 979 /** 980 * ipr_do_req - Send driver initiated requests. 981 * @ipr_cmd: ipr command struct 982 * @done: done function 983 * @timeout_func: timeout function 984 * @timeout: timeout value 985 * 986 * This function sends the specified command to the adapter with the 987 * timeout given. The done function is invoked on command completion. 988 * 989 * Return value: 990 * none 991 **/ 992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 993 void (*done) (struct ipr_cmnd *), 994 void (*timeout_func) (struct timer_list *), u32 timeout) 995 { 996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 997 998 ipr_cmd->done = done; 999 1000 ipr_cmd->timer.expires = jiffies + timeout; 1001 ipr_cmd->timer.function = timeout_func; 1002 1003 add_timer(&ipr_cmd->timer); 1004 1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 1006 1007 ipr_send_command(ipr_cmd); 1008 } 1009 1010 /** 1011 * ipr_internal_cmd_done - Op done function for an internally generated op. 1012 * @ipr_cmd: ipr command struct 1013 * 1014 * This function is the op done function for an internally generated, 1015 * blocking op. It simply wakes the sleeping thread. 1016 * 1017 * Return value: 1018 * none 1019 **/ 1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) 1021 { 1022 if (ipr_cmd->sibling) 1023 ipr_cmd->sibling = NULL; 1024 else 1025 complete(&ipr_cmd->completion); 1026 } 1027 1028 /** 1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type 1030 * @ipr_cmd: ipr command struct 1031 * @dma_addr: dma address 1032 * @len: transfer length 1033 * @flags: ioadl flag value 1034 * 1035 * This function initializes an ioadl in the case where there is only a single 1036 * descriptor. 1037 * 1038 * Return value: 1039 * nothing 1040 **/ 1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 1042 u32 len, int flags) 1043 { 1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 1046 1047 ipr_cmd->dma_use_sg = 1; 1048 1049 if (ipr_cmd->ioa_cfg->sis64) { 1050 ioadl64->flags = cpu_to_be32(flags); 1051 ioadl64->data_len = cpu_to_be32(len); 1052 ioadl64->address = cpu_to_be64(dma_addr); 1053 1054 ipr_cmd->ioarcb.ioadl_len = 1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1057 } else { 1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len); 1059 ioadl->address = cpu_to_be32(dma_addr); 1060 1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) { 1062 ipr_cmd->ioarcb.read_ioadl_len = 1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 1065 } else { 1066 ipr_cmd->ioarcb.ioadl_len = 1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1069 } 1070 } 1071 } 1072 1073 /** 1074 * ipr_send_blocking_cmd - Send command and sleep on its completion. 1075 * @ipr_cmd: ipr command struct 1076 * @timeout_func: function to invoke if command times out 1077 * @timeout: timeout 1078 * 1079 * Return value: 1080 * none 1081 **/ 1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 1083 void (*timeout_func) (struct timer_list *), 1084 u32 timeout) 1085 { 1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1087 1088 init_completion(&ipr_cmd->completion); 1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); 1090 1091 spin_unlock_irq(ioa_cfg->host->host_lock); 1092 wait_for_completion(&ipr_cmd->completion); 1093 spin_lock_irq(ioa_cfg->host->host_lock); 1094 } 1095 1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1097 { 1098 unsigned int hrrq; 1099 1100 if (ioa_cfg->hrrq_num == 1) 1101 hrrq = 0; 1102 else { 1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); 1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; 1105 } 1106 return hrrq; 1107 } 1108 1109 /** 1110 * ipr_send_hcam - Send an HCAM to the adapter. 1111 * @ioa_cfg: ioa config struct 1112 * @type: HCAM type 1113 * @hostrcb: hostrcb struct 1114 * 1115 * This function will send a Host Controlled Async command to the adapter. 1116 * If HCAMs are currently not allowed to be issued to the adapter, it will 1117 * place the hostrcb on the free queue. 1118 * 1119 * Return value: 1120 * none 1121 **/ 1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, 1123 struct ipr_hostrcb *hostrcb) 1124 { 1125 struct ipr_cmnd *ipr_cmd; 1126 struct ipr_ioarcb *ioarcb; 1127 1128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 1129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 1131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1132 1133 ipr_cmd->u.hostrcb = hostrcb; 1134 ioarcb = &ipr_cmd->ioarcb; 1135 1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; 1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; 1139 ioarcb->cmd_pkt.cdb[1] = type; 1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 1142 1143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 1145 1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 1147 ipr_cmd->done = ipr_process_ccn; 1148 else 1149 ipr_cmd->done = ipr_process_error; 1150 1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 1152 1153 ipr_send_command(ipr_cmd); 1154 } else { 1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 1156 } 1157 } 1158 1159 /** 1160 * ipr_update_ata_class - Update the ata class in the resource entry 1161 * @res: resource entry struct 1162 * @proto: cfgte device bus protocol value 1163 * 1164 * Return value: 1165 * none 1166 **/ 1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 1168 { 1169 switch (proto) { 1170 case IPR_PROTO_SATA: 1171 case IPR_PROTO_SAS_STP: 1172 res->ata_class = ATA_DEV_ATA; 1173 break; 1174 case IPR_PROTO_SATA_ATAPI: 1175 case IPR_PROTO_SAS_STP_ATAPI: 1176 res->ata_class = ATA_DEV_ATAPI; 1177 break; 1178 default: 1179 res->ata_class = ATA_DEV_UNKNOWN; 1180 break; 1181 }; 1182 } 1183 1184 /** 1185 * ipr_init_res_entry - Initialize a resource entry struct. 1186 * @res: resource entry struct 1187 * @cfgtew: config table entry wrapper struct 1188 * 1189 * Return value: 1190 * none 1191 **/ 1192 static void ipr_init_res_entry(struct ipr_resource_entry *res, 1193 struct ipr_config_table_entry_wrapper *cfgtew) 1194 { 1195 int found = 0; 1196 unsigned int proto; 1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1198 struct ipr_resource_entry *gscsi_res = NULL; 1199 1200 res->needs_sync_complete = 0; 1201 res->in_erp = 0; 1202 res->add_to_ml = 0; 1203 res->del_from_ml = 0; 1204 res->resetting_device = 0; 1205 res->reset_occurred = 0; 1206 res->sdev = NULL; 1207 res->sata_port = NULL; 1208 1209 if (ioa_cfg->sis64) { 1210 proto = cfgtew->u.cfgte64->proto; 1211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1213 res->qmodel = IPR_QUEUEING_MODEL64(res); 1214 res->type = cfgtew->u.cfgte64->res_type; 1215 1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1217 sizeof(res->res_path)); 1218 1219 res->bus = 0; 1220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1221 sizeof(res->dev_lun.scsi_lun)); 1222 res->lun = scsilun_to_int(&res->dev_lun); 1223 1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 1227 found = 1; 1228 res->target = gscsi_res->target; 1229 break; 1230 } 1231 } 1232 if (!found) { 1233 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1234 ioa_cfg->max_devs_supported); 1235 set_bit(res->target, ioa_cfg->target_ids); 1236 } 1237 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1238 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1239 res->target = 0; 1240 } else if (res->type == IPR_RES_TYPE_ARRAY) { 1241 res->bus = IPR_ARRAY_VIRTUAL_BUS; 1242 res->target = find_first_zero_bit(ioa_cfg->array_ids, 1243 ioa_cfg->max_devs_supported); 1244 set_bit(res->target, ioa_cfg->array_ids); 1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 1246 res->bus = IPR_VSET_VIRTUAL_BUS; 1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids, 1248 ioa_cfg->max_devs_supported); 1249 set_bit(res->target, ioa_cfg->vset_ids); 1250 } else { 1251 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1252 ioa_cfg->max_devs_supported); 1253 set_bit(res->target, ioa_cfg->target_ids); 1254 } 1255 } else { 1256 proto = cfgtew->u.cfgte->proto; 1257 res->qmodel = IPR_QUEUEING_MODEL(res); 1258 res->flags = cfgtew->u.cfgte->flags; 1259 if (res->flags & IPR_IS_IOA_RESOURCE) 1260 res->type = IPR_RES_TYPE_IOAFP; 1261 else 1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1263 1264 res->bus = cfgtew->u.cfgte->res_addr.bus; 1265 res->target = cfgtew->u.cfgte->res_addr.target; 1266 res->lun = cfgtew->u.cfgte->res_addr.lun; 1267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); 1268 } 1269 1270 ipr_update_ata_class(res, proto); 1271 } 1272 1273 /** 1274 * ipr_is_same_device - Determine if two devices are the same. 1275 * @res: resource entry struct 1276 * @cfgtew: config table entry wrapper struct 1277 * 1278 * Return value: 1279 * 1 if the devices are the same / 0 otherwise 1280 **/ 1281 static int ipr_is_same_device(struct ipr_resource_entry *res, 1282 struct ipr_config_table_entry_wrapper *cfgtew) 1283 { 1284 if (res->ioa_cfg->sis64) { 1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1286 sizeof(cfgtew->u.cfgte64->dev_id)) && 1287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1288 sizeof(cfgtew->u.cfgte64->lun))) { 1289 return 1; 1290 } 1291 } else { 1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus && 1293 res->target == cfgtew->u.cfgte->res_addr.target && 1294 res->lun == cfgtew->u.cfgte->res_addr.lun) 1295 return 1; 1296 } 1297 1298 return 0; 1299 } 1300 1301 /** 1302 * __ipr_format_res_path - Format the resource path for printing. 1303 * @res_path: resource path 1304 * @buf: buffer 1305 * @len: length of buffer provided 1306 * 1307 * Return value: 1308 * pointer to buffer 1309 **/ 1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) 1311 { 1312 int i; 1313 char *p = buffer; 1314 1315 *p = '\0'; 1316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1319 1320 return buffer; 1321 } 1322 1323 /** 1324 * ipr_format_res_path - Format the resource path for printing. 1325 * @ioa_cfg: ioa config struct 1326 * @res_path: resource path 1327 * @buf: buffer 1328 * @len: length of buffer provided 1329 * 1330 * Return value: 1331 * pointer to buffer 1332 **/ 1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, 1334 u8 *res_path, char *buffer, int len) 1335 { 1336 char *p = buffer; 1337 1338 *p = '\0'; 1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); 1340 __ipr_format_res_path(res_path, p, len - (buffer - p)); 1341 return buffer; 1342 } 1343 1344 /** 1345 * ipr_update_res_entry - Update the resource entry. 1346 * @res: resource entry struct 1347 * @cfgtew: config table entry wrapper struct 1348 * 1349 * Return value: 1350 * none 1351 **/ 1352 static void ipr_update_res_entry(struct ipr_resource_entry *res, 1353 struct ipr_config_table_entry_wrapper *cfgtew) 1354 { 1355 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1356 unsigned int proto; 1357 int new_path = 0; 1358 1359 if (res->ioa_cfg->sis64) { 1360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1362 res->type = cfgtew->u.cfgte64->res_type; 1363 1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1365 sizeof(struct ipr_std_inq_data)); 1366 1367 res->qmodel = IPR_QUEUEING_MODEL64(res); 1368 proto = cfgtew->u.cfgte64->proto; 1369 res->res_handle = cfgtew->u.cfgte64->res_handle; 1370 res->dev_id = cfgtew->u.cfgte64->dev_id; 1371 1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1373 sizeof(res->dev_lun.scsi_lun)); 1374 1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 1376 sizeof(res->res_path))) { 1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1378 sizeof(res->res_path)); 1379 new_path = 1; 1380 } 1381 1382 if (res->sdev && new_path) 1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1384 ipr_format_res_path(res->ioa_cfg, 1385 res->res_path, buffer, sizeof(buffer))); 1386 } else { 1387 res->flags = cfgtew->u.cfgte->flags; 1388 if (res->flags & IPR_IS_IOA_RESOURCE) 1389 res->type = IPR_RES_TYPE_IOAFP; 1390 else 1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1392 1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1394 sizeof(struct ipr_std_inq_data)); 1395 1396 res->qmodel = IPR_QUEUEING_MODEL(res); 1397 proto = cfgtew->u.cfgte->proto; 1398 res->res_handle = cfgtew->u.cfgte->res_handle; 1399 } 1400 1401 ipr_update_ata_class(res, proto); 1402 } 1403 1404 /** 1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target 1406 * for the resource. 1407 * @res: resource entry struct 1408 * @cfgtew: config table entry wrapper struct 1409 * 1410 * Return value: 1411 * none 1412 **/ 1413 static void ipr_clear_res_target(struct ipr_resource_entry *res) 1414 { 1415 struct ipr_resource_entry *gscsi_res = NULL; 1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1417 1418 if (!ioa_cfg->sis64) 1419 return; 1420 1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1422 clear_bit(res->target, ioa_cfg->array_ids); 1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1424 clear_bit(res->target, ioa_cfg->vset_ids); 1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1428 return; 1429 clear_bit(res->target, ioa_cfg->target_ids); 1430 1431 } else if (res->bus == 0) 1432 clear_bit(res->target, ioa_cfg->target_ids); 1433 } 1434 1435 /** 1436 * ipr_handle_config_change - Handle a config change from the adapter 1437 * @ioa_cfg: ioa config struct 1438 * @hostrcb: hostrcb 1439 * 1440 * Return value: 1441 * none 1442 **/ 1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1444 struct ipr_hostrcb *hostrcb) 1445 { 1446 struct ipr_resource_entry *res = NULL; 1447 struct ipr_config_table_entry_wrapper cfgtew; 1448 __be32 cc_res_handle; 1449 1450 u32 is_ndn = 1; 1451 1452 if (ioa_cfg->sis64) { 1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 1454 cc_res_handle = cfgtew.u.cfgte64->res_handle; 1455 } else { 1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 1457 cc_res_handle = cfgtew.u.cfgte->res_handle; 1458 } 1459 1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1461 if (res->res_handle == cc_res_handle) { 1462 is_ndn = 0; 1463 break; 1464 } 1465 } 1466 1467 if (is_ndn) { 1468 if (list_empty(&ioa_cfg->free_res_q)) { 1469 ipr_send_hcam(ioa_cfg, 1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 1471 hostrcb); 1472 return; 1473 } 1474 1475 res = list_entry(ioa_cfg->free_res_q.next, 1476 struct ipr_resource_entry, queue); 1477 1478 list_del(&res->queue); 1479 ipr_init_res_entry(res, &cfgtew); 1480 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1481 } 1482 1483 ipr_update_res_entry(res, &cfgtew); 1484 1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1486 if (res->sdev) { 1487 res->del_from_ml = 1; 1488 res->res_handle = IPR_INVALID_RES_HANDLE; 1489 schedule_work(&ioa_cfg->work_q); 1490 } else { 1491 ipr_clear_res_target(res); 1492 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1493 } 1494 } else if (!res->sdev || res->del_from_ml) { 1495 res->add_to_ml = 1; 1496 schedule_work(&ioa_cfg->work_q); 1497 } 1498 1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1500 } 1501 1502 /** 1503 * ipr_process_ccn - Op done function for a CCN. 1504 * @ipr_cmd: ipr command struct 1505 * 1506 * This function is the op done function for a configuration 1507 * change notification host controlled async from the adapter. 1508 * 1509 * Return value: 1510 * none 1511 **/ 1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) 1513 { 1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1517 1518 list_del_init(&hostrcb->queue); 1519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 1520 1521 if (ioasc) { 1522 if (ioasc != IPR_IOASC_IOA_WAS_RESET && 1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) 1524 dev_err(&ioa_cfg->pdev->dev, 1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1526 1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1528 } else { 1529 ipr_handle_config_change(ioa_cfg, hostrcb); 1530 } 1531 } 1532 1533 /** 1534 * strip_and_pad_whitespace - Strip and pad trailing whitespace. 1535 * @i: index into buffer 1536 * @buf: string to modify 1537 * 1538 * This function will strip all trailing whitespace, pad the end 1539 * of the string with a single space, and NULL terminate the string. 1540 * 1541 * Return value: 1542 * new length of string 1543 **/ 1544 static int strip_and_pad_whitespace(int i, char *buf) 1545 { 1546 while (i && buf[i] == ' ') 1547 i--; 1548 buf[i+1] = ' '; 1549 buf[i+2] = '\0'; 1550 return i + 2; 1551 } 1552 1553 /** 1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly. 1555 * @prefix: string to print at start of printk 1556 * @hostrcb: hostrcb pointer 1557 * @vpd: vendor/product id/sn struct 1558 * 1559 * Return value: 1560 * none 1561 **/ 1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1563 struct ipr_vpd *vpd) 1564 { 1565 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; 1566 int i = 0; 1567 1568 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1569 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); 1570 1571 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); 1572 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); 1573 1574 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); 1575 buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; 1576 1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); 1578 } 1579 1580 /** 1581 * ipr_log_vpd - Log the passed VPD to the error log. 1582 * @vpd: vendor/product id/sn struct 1583 * 1584 * Return value: 1585 * none 1586 **/ 1587 static void ipr_log_vpd(struct ipr_vpd *vpd) 1588 { 1589 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN 1590 + IPR_SERIAL_NUM_LEN]; 1591 1592 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1593 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, 1594 IPR_PROD_ID_LEN); 1595 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; 1596 ipr_err("Vendor/Product ID: %s\n", buffer); 1597 1598 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); 1599 buffer[IPR_SERIAL_NUM_LEN] = '\0'; 1600 ipr_err(" Serial Number: %s\n", buffer); 1601 } 1602 1603 /** 1604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. 1605 * @prefix: string to print at start of printk 1606 * @hostrcb: hostrcb pointer 1607 * @vpd: vendor/product id/sn/wwn struct 1608 * 1609 * Return value: 1610 * none 1611 **/ 1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1613 struct ipr_ext_vpd *vpd) 1614 { 1615 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); 1616 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, 1617 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); 1618 } 1619 1620 /** 1621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1622 * @vpd: vendor/product id/sn/wwn struct 1623 * 1624 * Return value: 1625 * none 1626 **/ 1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) 1628 { 1629 ipr_log_vpd(&vpd->vpd); 1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), 1631 be32_to_cpu(vpd->wwid[1])); 1632 } 1633 1634 /** 1635 * ipr_log_enhanced_cache_error - Log a cache error. 1636 * @ioa_cfg: ioa config struct 1637 * @hostrcb: hostrcb struct 1638 * 1639 * Return value: 1640 * none 1641 **/ 1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1643 struct ipr_hostrcb *hostrcb) 1644 { 1645 struct ipr_hostrcb_type_12_error *error; 1646 1647 if (ioa_cfg->sis64) 1648 error = &hostrcb->hcam.u.error64.u.type_12_error; 1649 else 1650 error = &hostrcb->hcam.u.error.u.type_12_error; 1651 1652 ipr_err("-----Current Configuration-----\n"); 1653 ipr_err("Cache Directory Card Information:\n"); 1654 ipr_log_ext_vpd(&error->ioa_vpd); 1655 ipr_err("Adapter Card Information:\n"); 1656 ipr_log_ext_vpd(&error->cfc_vpd); 1657 1658 ipr_err("-----Expected Configuration-----\n"); 1659 ipr_err("Cache Directory Card Information:\n"); 1660 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); 1661 ipr_err("Adapter Card Information:\n"); 1662 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); 1663 1664 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1665 be32_to_cpu(error->ioa_data[0]), 1666 be32_to_cpu(error->ioa_data[1]), 1667 be32_to_cpu(error->ioa_data[2])); 1668 } 1669 1670 /** 1671 * ipr_log_cache_error - Log a cache error. 1672 * @ioa_cfg: ioa config struct 1673 * @hostrcb: hostrcb struct 1674 * 1675 * Return value: 1676 * none 1677 **/ 1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1679 struct ipr_hostrcb *hostrcb) 1680 { 1681 struct ipr_hostrcb_type_02_error *error = 1682 &hostrcb->hcam.u.error.u.type_02_error; 1683 1684 ipr_err("-----Current Configuration-----\n"); 1685 ipr_err("Cache Directory Card Information:\n"); 1686 ipr_log_vpd(&error->ioa_vpd); 1687 ipr_err("Adapter Card Information:\n"); 1688 ipr_log_vpd(&error->cfc_vpd); 1689 1690 ipr_err("-----Expected Configuration-----\n"); 1691 ipr_err("Cache Directory Card Information:\n"); 1692 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); 1693 ipr_err("Adapter Card Information:\n"); 1694 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); 1695 1696 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1697 be32_to_cpu(error->ioa_data[0]), 1698 be32_to_cpu(error->ioa_data[1]), 1699 be32_to_cpu(error->ioa_data[2])); 1700 } 1701 1702 /** 1703 * ipr_log_enhanced_config_error - Log a configuration error. 1704 * @ioa_cfg: ioa config struct 1705 * @hostrcb: hostrcb struct 1706 * 1707 * Return value: 1708 * none 1709 **/ 1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, 1711 struct ipr_hostrcb *hostrcb) 1712 { 1713 int errors_logged, i; 1714 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; 1715 struct ipr_hostrcb_type_13_error *error; 1716 1717 error = &hostrcb->hcam.u.error.u.type_13_error; 1718 errors_logged = be32_to_cpu(error->errors_logged); 1719 1720 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1721 be32_to_cpu(error->errors_detected), errors_logged); 1722 1723 dev_entry = error->dev; 1724 1725 for (i = 0; i < errors_logged; i++, dev_entry++) { 1726 ipr_err_separator; 1727 1728 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1729 ipr_log_ext_vpd(&dev_entry->vpd); 1730 1731 ipr_err("-----New Device Information-----\n"); 1732 ipr_log_ext_vpd(&dev_entry->new_vpd); 1733 1734 ipr_err("Cache Directory Card Information:\n"); 1735 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1736 1737 ipr_err("Adapter Card Information:\n"); 1738 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1739 } 1740 } 1741 1742 /** 1743 * ipr_log_sis64_config_error - Log a device error. 1744 * @ioa_cfg: ioa config struct 1745 * @hostrcb: hostrcb struct 1746 * 1747 * Return value: 1748 * none 1749 **/ 1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1751 struct ipr_hostrcb *hostrcb) 1752 { 1753 int errors_logged, i; 1754 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1755 struct ipr_hostrcb_type_23_error *error; 1756 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1757 1758 error = &hostrcb->hcam.u.error64.u.type_23_error; 1759 errors_logged = be32_to_cpu(error->errors_logged); 1760 1761 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1762 be32_to_cpu(error->errors_detected), errors_logged); 1763 1764 dev_entry = error->dev; 1765 1766 for (i = 0; i < errors_logged; i++, dev_entry++) { 1767 ipr_err_separator; 1768 1769 ipr_err("Device %d : %s", i + 1, 1770 __ipr_format_res_path(dev_entry->res_path, 1771 buffer, sizeof(buffer))); 1772 ipr_log_ext_vpd(&dev_entry->vpd); 1773 1774 ipr_err("-----New Device Information-----\n"); 1775 ipr_log_ext_vpd(&dev_entry->new_vpd); 1776 1777 ipr_err("Cache Directory Card Information:\n"); 1778 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1779 1780 ipr_err("Adapter Card Information:\n"); 1781 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1782 } 1783 } 1784 1785 /** 1786 * ipr_log_config_error - Log a configuration error. 1787 * @ioa_cfg: ioa config struct 1788 * @hostrcb: hostrcb struct 1789 * 1790 * Return value: 1791 * none 1792 **/ 1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, 1794 struct ipr_hostrcb *hostrcb) 1795 { 1796 int errors_logged, i; 1797 struct ipr_hostrcb_device_data_entry *dev_entry; 1798 struct ipr_hostrcb_type_03_error *error; 1799 1800 error = &hostrcb->hcam.u.error.u.type_03_error; 1801 errors_logged = be32_to_cpu(error->errors_logged); 1802 1803 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1804 be32_to_cpu(error->errors_detected), errors_logged); 1805 1806 dev_entry = error->dev; 1807 1808 for (i = 0; i < errors_logged; i++, dev_entry++) { 1809 ipr_err_separator; 1810 1811 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1812 ipr_log_vpd(&dev_entry->vpd); 1813 1814 ipr_err("-----New Device Information-----\n"); 1815 ipr_log_vpd(&dev_entry->new_vpd); 1816 1817 ipr_err("Cache Directory Card Information:\n"); 1818 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); 1819 1820 ipr_err("Adapter Card Information:\n"); 1821 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); 1822 1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", 1824 be32_to_cpu(dev_entry->ioa_data[0]), 1825 be32_to_cpu(dev_entry->ioa_data[1]), 1826 be32_to_cpu(dev_entry->ioa_data[2]), 1827 be32_to_cpu(dev_entry->ioa_data[3]), 1828 be32_to_cpu(dev_entry->ioa_data[4])); 1829 } 1830 } 1831 1832 /** 1833 * ipr_log_enhanced_array_error - Log an array configuration error. 1834 * @ioa_cfg: ioa config struct 1835 * @hostrcb: hostrcb struct 1836 * 1837 * Return value: 1838 * none 1839 **/ 1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, 1841 struct ipr_hostrcb *hostrcb) 1842 { 1843 int i, num_entries; 1844 struct ipr_hostrcb_type_14_error *error; 1845 struct ipr_hostrcb_array_data_entry_enhanced *array_entry; 1846 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1847 1848 error = &hostrcb->hcam.u.error.u.type_14_error; 1849 1850 ipr_err_separator; 1851 1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1853 error->protection_level, 1854 ioa_cfg->host->host_no, 1855 error->last_func_vset_res_addr.bus, 1856 error->last_func_vset_res_addr.target, 1857 error->last_func_vset_res_addr.lun); 1858 1859 ipr_err_separator; 1860 1861 array_entry = error->array_member; 1862 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1863 ARRAY_SIZE(error->array_member)); 1864 1865 for (i = 0; i < num_entries; i++, array_entry++) { 1866 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1867 continue; 1868 1869 if (be32_to_cpu(error->exposed_mode_adn) == i) 1870 ipr_err("Exposed Array Member %d:\n", i); 1871 else 1872 ipr_err("Array Member %d:\n", i); 1873 1874 ipr_log_ext_vpd(&array_entry->vpd); 1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1877 "Expected Location"); 1878 1879 ipr_err_separator; 1880 } 1881 } 1882 1883 /** 1884 * ipr_log_array_error - Log an array configuration error. 1885 * @ioa_cfg: ioa config struct 1886 * @hostrcb: hostrcb struct 1887 * 1888 * Return value: 1889 * none 1890 **/ 1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, 1892 struct ipr_hostrcb *hostrcb) 1893 { 1894 int i; 1895 struct ipr_hostrcb_type_04_error *error; 1896 struct ipr_hostrcb_array_data_entry *array_entry; 1897 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1898 1899 error = &hostrcb->hcam.u.error.u.type_04_error; 1900 1901 ipr_err_separator; 1902 1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1904 error->protection_level, 1905 ioa_cfg->host->host_no, 1906 error->last_func_vset_res_addr.bus, 1907 error->last_func_vset_res_addr.target, 1908 error->last_func_vset_res_addr.lun); 1909 1910 ipr_err_separator; 1911 1912 array_entry = error->array_member; 1913 1914 for (i = 0; i < 18; i++) { 1915 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1916 continue; 1917 1918 if (be32_to_cpu(error->exposed_mode_adn) == i) 1919 ipr_err("Exposed Array Member %d:\n", i); 1920 else 1921 ipr_err("Array Member %d:\n", i); 1922 1923 ipr_log_vpd(&array_entry->vpd); 1924 1925 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1926 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1927 "Expected Location"); 1928 1929 ipr_err_separator; 1930 1931 if (i == 9) 1932 array_entry = error->array_member2; 1933 else 1934 array_entry++; 1935 } 1936 } 1937 1938 /** 1939 * ipr_log_hex_data - Log additional hex IOA error data. 1940 * @ioa_cfg: ioa config struct 1941 * @data: IOA error data 1942 * @len: data length 1943 * 1944 * Return value: 1945 * none 1946 **/ 1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) 1948 { 1949 int i; 1950 1951 if (len == 0) 1952 return; 1953 1954 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 1955 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); 1956 1957 for (i = 0; i < len / 4; i += 4) { 1958 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1959 be32_to_cpu(data[i]), 1960 be32_to_cpu(data[i+1]), 1961 be32_to_cpu(data[i+2]), 1962 be32_to_cpu(data[i+3])); 1963 } 1964 } 1965 1966 /** 1967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. 1968 * @ioa_cfg: ioa config struct 1969 * @hostrcb: hostrcb struct 1970 * 1971 * Return value: 1972 * none 1973 **/ 1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1975 struct ipr_hostrcb *hostrcb) 1976 { 1977 struct ipr_hostrcb_type_17_error *error; 1978 1979 if (ioa_cfg->sis64) 1980 error = &hostrcb->hcam.u.error64.u.type_17_error; 1981 else 1982 error = &hostrcb->hcam.u.error.u.type_17_error; 1983 1984 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1985 strim(error->failure_reason); 1986 1987 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1988 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1990 ipr_log_hex_data(ioa_cfg, error->data, 1991 be32_to_cpu(hostrcb->hcam.length) - 1992 (offsetof(struct ipr_hostrcb_error, u) + 1993 offsetof(struct ipr_hostrcb_type_17_error, data))); 1994 } 1995 1996 /** 1997 * ipr_log_dual_ioa_error - Log a dual adapter error. 1998 * @ioa_cfg: ioa config struct 1999 * @hostrcb: hostrcb struct 2000 * 2001 * Return value: 2002 * none 2003 **/ 2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 2005 struct ipr_hostrcb *hostrcb) 2006 { 2007 struct ipr_hostrcb_type_07_error *error; 2008 2009 error = &hostrcb->hcam.u.error.u.type_07_error; 2010 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2011 strim(error->failure_reason); 2012 2013 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 2014 be32_to_cpu(hostrcb->hcam.u.error.prc)); 2015 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); 2016 ipr_log_hex_data(ioa_cfg, error->data, 2017 be32_to_cpu(hostrcb->hcam.length) - 2018 (offsetof(struct ipr_hostrcb_error, u) + 2019 offsetof(struct ipr_hostrcb_type_07_error, data))); 2020 } 2021 2022 static const struct { 2023 u8 active; 2024 char *desc; 2025 } path_active_desc[] = { 2026 { IPR_PATH_NO_INFO, "Path" }, 2027 { IPR_PATH_ACTIVE, "Active path" }, 2028 { IPR_PATH_NOT_ACTIVE, "Inactive path" } 2029 }; 2030 2031 static const struct { 2032 u8 state; 2033 char *desc; 2034 } path_state_desc[] = { 2035 { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, 2036 { IPR_PATH_HEALTHY, "is healthy" }, 2037 { IPR_PATH_DEGRADED, "is degraded" }, 2038 { IPR_PATH_FAILED, "is failed" } 2039 }; 2040 2041 /** 2042 * ipr_log_fabric_path - Log a fabric path error 2043 * @hostrcb: hostrcb struct 2044 * @fabric: fabric descriptor 2045 * 2046 * Return value: 2047 * none 2048 **/ 2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, 2050 struct ipr_hostrcb_fabric_desc *fabric) 2051 { 2052 int i, j; 2053 u8 path_state = fabric->path_state; 2054 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2055 u8 state = path_state & IPR_PATH_STATE_MASK; 2056 2057 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2058 if (path_active_desc[i].active != active) 2059 continue; 2060 2061 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2062 if (path_state_desc[j].state != state) 2063 continue; 2064 2065 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { 2066 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", 2067 path_active_desc[i].desc, path_state_desc[j].desc, 2068 fabric->ioa_port); 2069 } else if (fabric->cascaded_expander == 0xff) { 2070 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", 2071 path_active_desc[i].desc, path_state_desc[j].desc, 2072 fabric->ioa_port, fabric->phy); 2073 } else if (fabric->phy == 0xff) { 2074 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", 2075 path_active_desc[i].desc, path_state_desc[j].desc, 2076 fabric->ioa_port, fabric->cascaded_expander); 2077 } else { 2078 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", 2079 path_active_desc[i].desc, path_state_desc[j].desc, 2080 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2081 } 2082 return; 2083 } 2084 } 2085 2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 2087 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2088 } 2089 2090 /** 2091 * ipr_log64_fabric_path - Log a fabric path error 2092 * @hostrcb: hostrcb struct 2093 * @fabric: fabric descriptor 2094 * 2095 * Return value: 2096 * none 2097 **/ 2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 2099 struct ipr_hostrcb64_fabric_desc *fabric) 2100 { 2101 int i, j; 2102 u8 path_state = fabric->path_state; 2103 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2104 u8 state = path_state & IPR_PATH_STATE_MASK; 2105 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2106 2107 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2108 if (path_active_desc[i].active != active) 2109 continue; 2110 2111 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2112 if (path_state_desc[j].state != state) 2113 continue; 2114 2115 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 2116 path_active_desc[i].desc, path_state_desc[j].desc, 2117 ipr_format_res_path(hostrcb->ioa_cfg, 2118 fabric->res_path, 2119 buffer, sizeof(buffer))); 2120 return; 2121 } 2122 } 2123 2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 2125 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, 2126 buffer, sizeof(buffer))); 2127 } 2128 2129 static const struct { 2130 u8 type; 2131 char *desc; 2132 } path_type_desc[] = { 2133 { IPR_PATH_CFG_IOA_PORT, "IOA port" }, 2134 { IPR_PATH_CFG_EXP_PORT, "Expander port" }, 2135 { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, 2136 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } 2137 }; 2138 2139 static const struct { 2140 u8 status; 2141 char *desc; 2142 } path_status_desc[] = { 2143 { IPR_PATH_CFG_NO_PROB, "Functional" }, 2144 { IPR_PATH_CFG_DEGRADED, "Degraded" }, 2145 { IPR_PATH_CFG_FAILED, "Failed" }, 2146 { IPR_PATH_CFG_SUSPECT, "Suspect" }, 2147 { IPR_PATH_NOT_DETECTED, "Missing" }, 2148 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } 2149 }; 2150 2151 static const char *link_rate[] = { 2152 "unknown", 2153 "disabled", 2154 "phy reset problem", 2155 "spinup hold", 2156 "port selector", 2157 "unknown", 2158 "unknown", 2159 "unknown", 2160 "1.5Gbps", 2161 "3.0Gbps", 2162 "unknown", 2163 "unknown", 2164 "unknown", 2165 "unknown", 2166 "unknown", 2167 "unknown" 2168 }; 2169 2170 /** 2171 * ipr_log_path_elem - Log a fabric path element. 2172 * @hostrcb: hostrcb struct 2173 * @cfg: fabric path element struct 2174 * 2175 * Return value: 2176 * none 2177 **/ 2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, 2179 struct ipr_hostrcb_config_element *cfg) 2180 { 2181 int i, j; 2182 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2183 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2184 2185 if (type == IPR_PATH_CFG_NOT_EXIST) 2186 return; 2187 2188 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2189 if (path_type_desc[i].type != type) 2190 continue; 2191 2192 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2193 if (path_status_desc[j].status != status) 2194 continue; 2195 2196 if (type == IPR_PATH_CFG_IOA_PORT) { 2197 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", 2198 path_status_desc[j].desc, path_type_desc[i].desc, 2199 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2200 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2201 } else { 2202 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { 2203 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", 2204 path_status_desc[j].desc, path_type_desc[i].desc, 2205 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2206 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2207 } else if (cfg->cascaded_expander == 0xff) { 2208 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " 2209 "WWN=%08X%08X\n", path_status_desc[j].desc, 2210 path_type_desc[i].desc, cfg->phy, 2211 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2212 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2213 } else if (cfg->phy == 0xff) { 2214 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " 2215 "WWN=%08X%08X\n", path_status_desc[j].desc, 2216 path_type_desc[i].desc, cfg->cascaded_expander, 2217 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2218 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2219 } else { 2220 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " 2221 "WWN=%08X%08X\n", path_status_desc[j].desc, 2222 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, 2223 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2224 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2225 } 2226 } 2227 return; 2228 } 2229 } 2230 2231 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " 2232 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, 2233 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2234 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2235 } 2236 2237 /** 2238 * ipr_log64_path_elem - Log a fabric path element. 2239 * @hostrcb: hostrcb struct 2240 * @cfg: fabric path element struct 2241 * 2242 * Return value: 2243 * none 2244 **/ 2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 2246 struct ipr_hostrcb64_config_element *cfg) 2247 { 2248 int i, j; 2249 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 2250 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2251 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2252 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2253 2254 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 2255 return; 2256 2257 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2258 if (path_type_desc[i].type != type) 2259 continue; 2260 2261 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2262 if (path_status_desc[j].status != status) 2263 continue; 2264 2265 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2266 path_status_desc[j].desc, path_type_desc[i].desc, 2267 ipr_format_res_path(hostrcb->ioa_cfg, 2268 cfg->res_path, buffer, sizeof(buffer)), 2269 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2270 be32_to_cpu(cfg->wwid[0]), 2271 be32_to_cpu(cfg->wwid[1])); 2272 return; 2273 } 2274 } 2275 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2276 "WWN=%08X%08X\n", cfg->type_status, 2277 ipr_format_res_path(hostrcb->ioa_cfg, 2278 cfg->res_path, buffer, sizeof(buffer)), 2279 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2280 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2281 } 2282 2283 /** 2284 * ipr_log_fabric_error - Log a fabric error. 2285 * @ioa_cfg: ioa config struct 2286 * @hostrcb: hostrcb struct 2287 * 2288 * Return value: 2289 * none 2290 **/ 2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2292 struct ipr_hostrcb *hostrcb) 2293 { 2294 struct ipr_hostrcb_type_20_error *error; 2295 struct ipr_hostrcb_fabric_desc *fabric; 2296 struct ipr_hostrcb_config_element *cfg; 2297 int i, add_len; 2298 2299 error = &hostrcb->hcam.u.error.u.type_20_error; 2300 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2301 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2302 2303 add_len = be32_to_cpu(hostrcb->hcam.length) - 2304 (offsetof(struct ipr_hostrcb_error, u) + 2305 offsetof(struct ipr_hostrcb_type_20_error, desc)); 2306 2307 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2308 ipr_log_fabric_path(hostrcb, fabric); 2309 for_each_fabric_cfg(fabric, cfg) 2310 ipr_log_path_elem(hostrcb, cfg); 2311 2312 add_len -= be16_to_cpu(fabric->length); 2313 fabric = (struct ipr_hostrcb_fabric_desc *) 2314 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2315 } 2316 2317 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2318 } 2319 2320 /** 2321 * ipr_log_sis64_array_error - Log a sis64 array error. 2322 * @ioa_cfg: ioa config struct 2323 * @hostrcb: hostrcb struct 2324 * 2325 * Return value: 2326 * none 2327 **/ 2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 2329 struct ipr_hostrcb *hostrcb) 2330 { 2331 int i, num_entries; 2332 struct ipr_hostrcb_type_24_error *error; 2333 struct ipr_hostrcb64_array_data_entry *array_entry; 2334 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2335 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 2336 2337 error = &hostrcb->hcam.u.error64.u.type_24_error; 2338 2339 ipr_err_separator; 2340 2341 ipr_err("RAID %s Array Configuration: %s\n", 2342 error->protection_level, 2343 ipr_format_res_path(ioa_cfg, error->last_res_path, 2344 buffer, sizeof(buffer))); 2345 2346 ipr_err_separator; 2347 2348 array_entry = error->array_member; 2349 num_entries = min_t(u32, error->num_entries, 2350 ARRAY_SIZE(error->array_member)); 2351 2352 for (i = 0; i < num_entries; i++, array_entry++) { 2353 2354 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 2355 continue; 2356 2357 if (error->exposed_mode_adn == i) 2358 ipr_err("Exposed Array Member %d:\n", i); 2359 else 2360 ipr_err("Array Member %d:\n", i); 2361 2362 ipr_err("Array Member %d:\n", i); 2363 ipr_log_ext_vpd(&array_entry->vpd); 2364 ipr_err("Current Location: %s\n", 2365 ipr_format_res_path(ioa_cfg, array_entry->res_path, 2366 buffer, sizeof(buffer))); 2367 ipr_err("Expected Location: %s\n", 2368 ipr_format_res_path(ioa_cfg, 2369 array_entry->expected_res_path, 2370 buffer, sizeof(buffer))); 2371 2372 ipr_err_separator; 2373 } 2374 } 2375 2376 /** 2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 2378 * @ioa_cfg: ioa config struct 2379 * @hostrcb: hostrcb struct 2380 * 2381 * Return value: 2382 * none 2383 **/ 2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2385 struct ipr_hostrcb *hostrcb) 2386 { 2387 struct ipr_hostrcb_type_30_error *error; 2388 struct ipr_hostrcb64_fabric_desc *fabric; 2389 struct ipr_hostrcb64_config_element *cfg; 2390 int i, add_len; 2391 2392 error = &hostrcb->hcam.u.error64.u.type_30_error; 2393 2394 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2395 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2396 2397 add_len = be32_to_cpu(hostrcb->hcam.length) - 2398 (offsetof(struct ipr_hostrcb64_error, u) + 2399 offsetof(struct ipr_hostrcb_type_30_error, desc)); 2400 2401 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2402 ipr_log64_fabric_path(hostrcb, fabric); 2403 for_each_fabric_cfg(fabric, cfg) 2404 ipr_log64_path_elem(hostrcb, cfg); 2405 2406 add_len -= be16_to_cpu(fabric->length); 2407 fabric = (struct ipr_hostrcb64_fabric_desc *) 2408 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2409 } 2410 2411 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2412 } 2413 2414 /** 2415 * ipr_log_sis64_service_required_error - Log a sis64 service required error. 2416 * @ioa_cfg: ioa config struct 2417 * @hostrcb: hostrcb struct 2418 * 2419 * Return value: 2420 * none 2421 **/ 2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, 2423 struct ipr_hostrcb *hostrcb) 2424 { 2425 struct ipr_hostrcb_type_41_error *error; 2426 2427 error = &hostrcb->hcam.u.error64.u.type_41_error; 2428 2429 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2430 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); 2431 ipr_log_hex_data(ioa_cfg, error->data, 2432 be32_to_cpu(hostrcb->hcam.length) - 2433 (offsetof(struct ipr_hostrcb_error, u) + 2434 offsetof(struct ipr_hostrcb_type_41_error, data))); 2435 } 2436 /** 2437 * ipr_log_generic_error - Log an adapter error. 2438 * @ioa_cfg: ioa config struct 2439 * @hostrcb: hostrcb struct 2440 * 2441 * Return value: 2442 * none 2443 **/ 2444 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 2445 struct ipr_hostrcb *hostrcb) 2446 { 2447 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, 2448 be32_to_cpu(hostrcb->hcam.length)); 2449 } 2450 2451 /** 2452 * ipr_log_sis64_device_error - Log a cache error. 2453 * @ioa_cfg: ioa config struct 2454 * @hostrcb: hostrcb struct 2455 * 2456 * Return value: 2457 * none 2458 **/ 2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, 2460 struct ipr_hostrcb *hostrcb) 2461 { 2462 struct ipr_hostrcb_type_21_error *error; 2463 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2464 2465 error = &hostrcb->hcam.u.error64.u.type_21_error; 2466 2467 ipr_err("-----Failing Device Information-----\n"); 2468 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", 2469 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), 2470 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); 2471 ipr_err("Device Resource Path: %s\n", 2472 __ipr_format_res_path(error->res_path, 2473 buffer, sizeof(buffer))); 2474 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; 2475 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; 2476 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); 2477 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); 2478 ipr_err("SCSI Sense Data:\n"); 2479 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); 2480 ipr_err("SCSI Command Descriptor Block: \n"); 2481 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); 2482 2483 ipr_err("Additional IOA Data:\n"); 2484 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); 2485 } 2486 2487 /** 2488 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2489 * @ioasc: IOASC 2490 * 2491 * This function will return the index of into the ipr_error_table 2492 * for the specified IOASC. If the IOASC is not in the table, 2493 * 0 will be returned, which points to the entry used for unknown errors. 2494 * 2495 * Return value: 2496 * index into the ipr_error_table 2497 **/ 2498 static u32 ipr_get_error(u32 ioasc) 2499 { 2500 int i; 2501 2502 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 2503 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) 2504 return i; 2505 2506 return 0; 2507 } 2508 2509 /** 2510 * ipr_handle_log_data - Log an adapter error. 2511 * @ioa_cfg: ioa config struct 2512 * @hostrcb: hostrcb struct 2513 * 2514 * This function logs an adapter error to the system. 2515 * 2516 * Return value: 2517 * none 2518 **/ 2519 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, 2520 struct ipr_hostrcb *hostrcb) 2521 { 2522 u32 ioasc; 2523 int error_index; 2524 struct ipr_hostrcb_type_21_error *error; 2525 2526 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2527 return; 2528 2529 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2530 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2531 2532 if (ioa_cfg->sis64) 2533 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2534 else 2535 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2536 2537 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 2538 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2539 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2540 scsi_report_bus_reset(ioa_cfg->host, 2541 hostrcb->hcam.u.error.fd_res_addr.bus); 2542 } 2543 2544 error_index = ipr_get_error(ioasc); 2545 2546 if (!ipr_error_table[error_index].log_hcam) 2547 return; 2548 2549 if (ioasc == IPR_IOASC_HW_CMD_FAILED && 2550 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { 2551 error = &hostrcb->hcam.u.error64.u.type_21_error; 2552 2553 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && 2554 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 2555 return; 2556 } 2557 2558 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2559 2560 /* Set indication we have logged an error */ 2561 ioa_cfg->errors_logged++; 2562 2563 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) 2564 return; 2565 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 2566 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 2567 2568 switch (hostrcb->hcam.overlay_id) { 2569 case IPR_HOST_RCB_OVERLAY_ID_2: 2570 ipr_log_cache_error(ioa_cfg, hostrcb); 2571 break; 2572 case IPR_HOST_RCB_OVERLAY_ID_3: 2573 ipr_log_config_error(ioa_cfg, hostrcb); 2574 break; 2575 case IPR_HOST_RCB_OVERLAY_ID_4: 2576 case IPR_HOST_RCB_OVERLAY_ID_6: 2577 ipr_log_array_error(ioa_cfg, hostrcb); 2578 break; 2579 case IPR_HOST_RCB_OVERLAY_ID_7: 2580 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); 2581 break; 2582 case IPR_HOST_RCB_OVERLAY_ID_12: 2583 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); 2584 break; 2585 case IPR_HOST_RCB_OVERLAY_ID_13: 2586 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); 2587 break; 2588 case IPR_HOST_RCB_OVERLAY_ID_14: 2589 case IPR_HOST_RCB_OVERLAY_ID_16: 2590 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); 2591 break; 2592 case IPR_HOST_RCB_OVERLAY_ID_17: 2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 2594 break; 2595 case IPR_HOST_RCB_OVERLAY_ID_20: 2596 ipr_log_fabric_error(ioa_cfg, hostrcb); 2597 break; 2598 case IPR_HOST_RCB_OVERLAY_ID_21: 2599 ipr_log_sis64_device_error(ioa_cfg, hostrcb); 2600 break; 2601 case IPR_HOST_RCB_OVERLAY_ID_23: 2602 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2603 break; 2604 case IPR_HOST_RCB_OVERLAY_ID_24: 2605 case IPR_HOST_RCB_OVERLAY_ID_26: 2606 ipr_log_sis64_array_error(ioa_cfg, hostrcb); 2607 break; 2608 case IPR_HOST_RCB_OVERLAY_ID_30: 2609 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 2610 break; 2611 case IPR_HOST_RCB_OVERLAY_ID_41: 2612 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); 2613 break; 2614 case IPR_HOST_RCB_OVERLAY_ID_1: 2615 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2616 default: 2617 ipr_log_generic_error(ioa_cfg, hostrcb); 2618 break; 2619 } 2620 } 2621 2622 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa) 2623 { 2624 struct ipr_hostrcb *hostrcb; 2625 2626 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, 2627 struct ipr_hostrcb, queue); 2628 2629 if (unlikely(!hostrcb)) { 2630 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); 2631 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, 2632 struct ipr_hostrcb, queue); 2633 } 2634 2635 list_del_init(&hostrcb->queue); 2636 return hostrcb; 2637 } 2638 2639 /** 2640 * ipr_process_error - Op done function for an adapter error log. 2641 * @ipr_cmd: ipr command struct 2642 * 2643 * This function is the op done function for an error log host 2644 * controlled async from the adapter. It will log the error and 2645 * send the HCAM back to the adapter. 2646 * 2647 * Return value: 2648 * none 2649 **/ 2650 static void ipr_process_error(struct ipr_cmnd *ipr_cmd) 2651 { 2652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2653 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2654 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2655 u32 fd_ioasc; 2656 2657 if (ioa_cfg->sis64) 2658 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2659 else 2660 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2661 2662 list_del_init(&hostrcb->queue); 2663 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 2664 2665 if (!ioasc) { 2666 ipr_handle_log_data(ioa_cfg, hostrcb); 2667 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) 2668 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 2669 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && 2670 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { 2671 dev_err(&ioa_cfg->pdev->dev, 2672 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2673 } 2674 2675 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); 2676 schedule_work(&ioa_cfg->work_q); 2677 hostrcb = ipr_get_free_hostrcb(ioa_cfg); 2678 2679 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2680 } 2681 2682 /** 2683 * ipr_timeout - An internally generated op has timed out. 2684 * @ipr_cmd: ipr command struct 2685 * 2686 * This function blocks host requests and initiates an 2687 * adapter reset. 2688 * 2689 * Return value: 2690 * none 2691 **/ 2692 static void ipr_timeout(struct timer_list *t) 2693 { 2694 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2695 unsigned long lock_flags = 0; 2696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2697 2698 ENTER; 2699 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2700 2701 ioa_cfg->errors_logged++; 2702 dev_err(&ioa_cfg->pdev->dev, 2703 "Adapter being reset due to command timeout.\n"); 2704 2705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2706 ioa_cfg->sdt_state = GET_DUMP; 2707 2708 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) 2709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2710 2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2712 LEAVE; 2713 } 2714 2715 /** 2716 * ipr_oper_timeout - Adapter timed out transitioning to operational 2717 * @ipr_cmd: ipr command struct 2718 * 2719 * This function blocks host requests and initiates an 2720 * adapter reset. 2721 * 2722 * Return value: 2723 * none 2724 **/ 2725 static void ipr_oper_timeout(struct timer_list *t) 2726 { 2727 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2728 unsigned long lock_flags = 0; 2729 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2730 2731 ENTER; 2732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2733 2734 ioa_cfg->errors_logged++; 2735 dev_err(&ioa_cfg->pdev->dev, 2736 "Adapter timed out transitioning to operational.\n"); 2737 2738 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2739 ioa_cfg->sdt_state = GET_DUMP; 2740 2741 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { 2742 if (ipr_fastfail) 2743 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 2744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2745 } 2746 2747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2748 LEAVE; 2749 } 2750 2751 /** 2752 * ipr_find_ses_entry - Find matching SES in SES table 2753 * @res: resource entry struct of SES 2754 * 2755 * Return value: 2756 * pointer to SES table entry / NULL on failure 2757 **/ 2758 static const struct ipr_ses_table_entry * 2759 ipr_find_ses_entry(struct ipr_resource_entry *res) 2760 { 2761 int i, j, matches; 2762 struct ipr_std_inq_vpids *vpids; 2763 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2764 2765 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2766 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2767 if (ste->compare_product_id_byte[j] == 'X') { 2768 vpids = &res->std_inq_data.vpids; 2769 if (vpids->product_id[j] == ste->product_id[j]) 2770 matches++; 2771 else 2772 break; 2773 } else 2774 matches++; 2775 } 2776 2777 if (matches == IPR_PROD_ID_LEN) 2778 return ste; 2779 } 2780 2781 return NULL; 2782 } 2783 2784 /** 2785 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus 2786 * @ioa_cfg: ioa config struct 2787 * @bus: SCSI bus 2788 * @bus_width: bus width 2789 * 2790 * Return value: 2791 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz 2792 * For a 2-byte wide SCSI bus, the maximum transfer speed is 2793 * twice the maximum transfer rate (e.g. for a wide enabled bus, 2794 * max 160MHz = max 320MB/sec). 2795 **/ 2796 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) 2797 { 2798 struct ipr_resource_entry *res; 2799 const struct ipr_ses_table_entry *ste; 2800 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); 2801 2802 /* Loop through each config table entry in the config table buffer */ 2803 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2804 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2805 continue; 2806 2807 if (bus != res->bus) 2808 continue; 2809 2810 if (!(ste = ipr_find_ses_entry(res))) 2811 continue; 2812 2813 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); 2814 } 2815 2816 return max_xfer_rate; 2817 } 2818 2819 /** 2820 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA 2821 * @ioa_cfg: ioa config struct 2822 * @max_delay: max delay in micro-seconds to wait 2823 * 2824 * Waits for an IODEBUG ACK from the IOA, doing busy looping. 2825 * 2826 * Return value: 2827 * 0 on success / other on failure 2828 **/ 2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) 2830 { 2831 volatile u32 pcii_reg; 2832 int delay = 1; 2833 2834 /* Read interrupt reg until IOA signals IO Debug Acknowledge */ 2835 while (delay < max_delay) { 2836 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 2837 2838 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) 2839 return 0; 2840 2841 /* udelay cannot be used if delay is more than a few milliseconds */ 2842 if ((delay / 1000) > MAX_UDELAY_MS) 2843 mdelay(delay / 1000); 2844 else 2845 udelay(delay); 2846 2847 delay += delay; 2848 } 2849 return -EIO; 2850 } 2851 2852 /** 2853 * ipr_get_sis64_dump_data_section - Dump IOA memory 2854 * @ioa_cfg: ioa config struct 2855 * @start_addr: adapter address to dump 2856 * @dest: destination kernel buffer 2857 * @length_in_words: length to dump in 4 byte words 2858 * 2859 * Return value: 2860 * 0 on success 2861 **/ 2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2863 u32 start_addr, 2864 __be32 *dest, u32 length_in_words) 2865 { 2866 int i; 2867 2868 for (i = 0; i < length_in_words; i++) { 2869 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 2870 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 2871 dest++; 2872 } 2873 2874 return 0; 2875 } 2876 2877 /** 2878 * ipr_get_ldump_data_section - Dump IOA memory 2879 * @ioa_cfg: ioa config struct 2880 * @start_addr: adapter address to dump 2881 * @dest: destination kernel buffer 2882 * @length_in_words: length to dump in 4 byte words 2883 * 2884 * Return value: 2885 * 0 on success / -EIO on failure 2886 **/ 2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2888 u32 start_addr, 2889 __be32 *dest, u32 length_in_words) 2890 { 2891 volatile u32 temp_pcii_reg; 2892 int i, delay = 0; 2893 2894 if (ioa_cfg->sis64) 2895 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 2896 dest, length_in_words); 2897 2898 /* Write IOA interrupt reg starting LDUMP state */ 2899 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2900 ioa_cfg->regs.set_uproc_interrupt_reg32); 2901 2902 /* Wait for IO debug acknowledge */ 2903 if (ipr_wait_iodbg_ack(ioa_cfg, 2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { 2905 dev_err(&ioa_cfg->pdev->dev, 2906 "IOA dump long data transfer timeout\n"); 2907 return -EIO; 2908 } 2909 2910 /* Signal LDUMP interlocked - clear IO debug ack */ 2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2912 ioa_cfg->regs.clr_interrupt_reg); 2913 2914 /* Write Mailbox with starting address */ 2915 writel(start_addr, ioa_cfg->ioa_mailbox); 2916 2917 /* Signal address valid - clear IOA Reset alert */ 2918 writel(IPR_UPROCI_RESET_ALERT, 2919 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2920 2921 for (i = 0; i < length_in_words; i++) { 2922 /* Wait for IO debug acknowledge */ 2923 if (ipr_wait_iodbg_ack(ioa_cfg, 2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { 2925 dev_err(&ioa_cfg->pdev->dev, 2926 "IOA dump short data transfer timeout\n"); 2927 return -EIO; 2928 } 2929 2930 /* Read data from mailbox and increment destination pointer */ 2931 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); 2932 dest++; 2933 2934 /* For all but the last word of data, signal data received */ 2935 if (i < (length_in_words - 1)) { 2936 /* Signal dump data received - Clear IO debug Ack */ 2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2938 ioa_cfg->regs.clr_interrupt_reg); 2939 } 2940 } 2941 2942 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2943 writel(IPR_UPROCI_RESET_ALERT, 2944 ioa_cfg->regs.set_uproc_interrupt_reg32); 2945 2946 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2947 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2948 2949 /* Signal dump data received - Clear IO debug Ack */ 2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2951 ioa_cfg->regs.clr_interrupt_reg); 2952 2953 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2954 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2955 temp_pcii_reg = 2956 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2957 2958 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2959 return 0; 2960 2961 udelay(10); 2962 delay += 10; 2963 } 2964 2965 return 0; 2966 } 2967 2968 #ifdef CONFIG_SCSI_IPR_DUMP 2969 /** 2970 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer 2971 * @ioa_cfg: ioa config struct 2972 * @pci_address: adapter address 2973 * @length: length of data to copy 2974 * 2975 * Copy data from PCI adapter to kernel buffer. 2976 * Note: length MUST be a 4 byte multiple 2977 * Return value: 2978 * 0 on success / other on failure 2979 **/ 2980 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, 2981 unsigned long pci_address, u32 length) 2982 { 2983 int bytes_copied = 0; 2984 int cur_len, rc, rem_len, rem_page_len, max_dump_size; 2985 __be32 *page; 2986 unsigned long lock_flags = 0; 2987 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2988 2989 if (ioa_cfg->sis64) 2990 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2991 else 2992 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2993 2994 while (bytes_copied < length && 2995 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { 2996 if (ioa_dump->page_offset >= PAGE_SIZE || 2997 ioa_dump->page_offset == 0) { 2998 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2999 3000 if (!page) { 3001 ipr_trace; 3002 return bytes_copied; 3003 } 3004 3005 ioa_dump->page_offset = 0; 3006 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; 3007 ioa_dump->next_page_index++; 3008 } else 3009 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; 3010 3011 rem_len = length - bytes_copied; 3012 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; 3013 cur_len = min(rem_len, rem_page_len); 3014 3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3016 if (ioa_cfg->sdt_state == ABORT_DUMP) { 3017 rc = -EIO; 3018 } else { 3019 rc = ipr_get_ldump_data_section(ioa_cfg, 3020 pci_address + bytes_copied, 3021 &page[ioa_dump->page_offset / 4], 3022 (cur_len / sizeof(u32))); 3023 } 3024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3025 3026 if (!rc) { 3027 ioa_dump->page_offset += cur_len; 3028 bytes_copied += cur_len; 3029 } else { 3030 ipr_trace; 3031 break; 3032 } 3033 schedule(); 3034 } 3035 3036 return bytes_copied; 3037 } 3038 3039 /** 3040 * ipr_init_dump_entry_hdr - Initialize a dump entry header. 3041 * @hdr: dump entry header struct 3042 * 3043 * Return value: 3044 * nothing 3045 **/ 3046 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) 3047 { 3048 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; 3049 hdr->num_elems = 1; 3050 hdr->offset = sizeof(*hdr); 3051 hdr->status = IPR_DUMP_STATUS_SUCCESS; 3052 } 3053 3054 /** 3055 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. 3056 * @ioa_cfg: ioa config struct 3057 * @driver_dump: driver dump struct 3058 * 3059 * Return value: 3060 * nothing 3061 **/ 3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, 3063 struct ipr_driver_dump *driver_dump) 3064 { 3065 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3066 3067 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); 3068 driver_dump->ioa_type_entry.hdr.len = 3069 sizeof(struct ipr_dump_ioa_type_entry) - 3070 sizeof(struct ipr_dump_entry_header); 3071 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3072 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; 3073 driver_dump->ioa_type_entry.type = ioa_cfg->type; 3074 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | 3075 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | 3076 ucode_vpd->minor_release[1]; 3077 driver_dump->hdr.num_entries++; 3078 } 3079 3080 /** 3081 * ipr_dump_version_data - Fill in the driver version in the dump. 3082 * @ioa_cfg: ioa config struct 3083 * @driver_dump: driver dump struct 3084 * 3085 * Return value: 3086 * nothing 3087 **/ 3088 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, 3089 struct ipr_driver_dump *driver_dump) 3090 { 3091 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); 3092 driver_dump->version_entry.hdr.len = 3093 sizeof(struct ipr_dump_version_entry) - 3094 sizeof(struct ipr_dump_entry_header); 3095 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3096 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; 3097 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); 3098 driver_dump->hdr.num_entries++; 3099 } 3100 3101 /** 3102 * ipr_dump_trace_data - Fill in the IOA trace in the dump. 3103 * @ioa_cfg: ioa config struct 3104 * @driver_dump: driver dump struct 3105 * 3106 * Return value: 3107 * nothing 3108 **/ 3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, 3110 struct ipr_driver_dump *driver_dump) 3111 { 3112 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); 3113 driver_dump->trace_entry.hdr.len = 3114 sizeof(struct ipr_dump_trace_entry) - 3115 sizeof(struct ipr_dump_entry_header); 3116 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3117 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; 3118 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); 3119 driver_dump->hdr.num_entries++; 3120 } 3121 3122 /** 3123 * ipr_dump_location_data - Fill in the IOA location in the dump. 3124 * @ioa_cfg: ioa config struct 3125 * @driver_dump: driver dump struct 3126 * 3127 * Return value: 3128 * nothing 3129 **/ 3130 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, 3131 struct ipr_driver_dump *driver_dump) 3132 { 3133 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); 3134 driver_dump->location_entry.hdr.len = 3135 sizeof(struct ipr_dump_location_entry) - 3136 sizeof(struct ipr_dump_entry_header); 3137 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3138 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 3139 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); 3140 driver_dump->hdr.num_entries++; 3141 } 3142 3143 /** 3144 * ipr_get_ioa_dump - Perform a dump of the driver and adapter. 3145 * @ioa_cfg: ioa config struct 3146 * @dump: dump struct 3147 * 3148 * Return value: 3149 * nothing 3150 **/ 3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) 3152 { 3153 unsigned long start_addr, sdt_word; 3154 unsigned long lock_flags = 0; 3155 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 3156 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 3157 u32 num_entries, max_num_entries, start_off, end_off; 3158 u32 max_dump_size, bytes_to_copy, bytes_copied, rc; 3159 struct ipr_sdt *sdt; 3160 int valid = 1; 3161 int i; 3162 3163 ENTER; 3164 3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3166 3167 if (ioa_cfg->sdt_state != READ_DUMP) { 3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3169 return; 3170 } 3171 3172 if (ioa_cfg->sis64) { 3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3174 ssleep(IPR_DUMP_DELAY_SECONDS); 3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3176 } 3177 3178 start_addr = readl(ioa_cfg->ioa_mailbox); 3179 3180 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 3181 dev_err(&ioa_cfg->pdev->dev, 3182 "Invalid dump table format: %lx\n", start_addr); 3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3184 return; 3185 } 3186 3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); 3188 3189 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; 3190 3191 /* Initialize the overall dump header */ 3192 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); 3193 driver_dump->hdr.num_entries = 1; 3194 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); 3195 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; 3196 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; 3197 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; 3198 3199 ipr_dump_version_data(ioa_cfg, driver_dump); 3200 ipr_dump_location_data(ioa_cfg, driver_dump); 3201 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); 3202 ipr_dump_trace_data(ioa_cfg, driver_dump); 3203 3204 /* Update dump_header */ 3205 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); 3206 3207 /* IOA Dump entry */ 3208 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 3209 ioa_dump->hdr.len = 0; 3210 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3211 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 3212 3213 /* First entries in sdt are actually a list of dump addresses and 3214 lengths to gather the real dump data. sdt represents the pointer 3215 to the ioa generated dump table. Dump data will be extracted based 3216 on entries in this table */ 3217 sdt = &ioa_dump->sdt; 3218 3219 if (ioa_cfg->sis64) { 3220 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; 3221 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 3222 } else { 3223 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; 3224 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 3225 } 3226 3227 bytes_to_copy = offsetof(struct ipr_sdt, entry) + 3228 (max_num_entries * sizeof(struct ipr_sdt_entry)); 3229 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 3230 bytes_to_copy / sizeof(__be32)); 3231 3232 /* Smart Dump table is ready to use and the first entry is valid */ 3233 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 3234 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 3235 dev_err(&ioa_cfg->pdev->dev, 3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 3237 rc, be32_to_cpu(sdt->hdr.state)); 3238 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; 3239 ioa_cfg->sdt_state = DUMP_OBTAINED; 3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3241 return; 3242 } 3243 3244 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 3245 3246 if (num_entries > max_num_entries) 3247 num_entries = max_num_entries; 3248 3249 /* Update dump length to the actual data to be copied */ 3250 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); 3251 if (ioa_cfg->sis64) 3252 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); 3253 else 3254 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); 3255 3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3257 3258 for (i = 0; i < num_entries; i++) { 3259 if (ioa_dump->hdr.len > max_dump_size) { 3260 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3261 break; 3262 } 3263 3264 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 3265 sdt_word = be32_to_cpu(sdt->entry[i].start_token); 3266 if (ioa_cfg->sis64) 3267 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 3268 else { 3269 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 3270 end_off = be32_to_cpu(sdt->entry[i].end_token); 3271 3272 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 3273 bytes_to_copy = end_off - start_off; 3274 else 3275 valid = 0; 3276 } 3277 if (valid) { 3278 if (bytes_to_copy > max_dump_size) { 3279 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3280 continue; 3281 } 3282 3283 /* Copy data from adapter to driver buffers */ 3284 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, 3285 bytes_to_copy); 3286 3287 ioa_dump->hdr.len += bytes_copied; 3288 3289 if (bytes_copied != bytes_to_copy) { 3290 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3291 break; 3292 } 3293 } 3294 } 3295 } 3296 3297 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); 3298 3299 /* Update dump_header */ 3300 driver_dump->hdr.len += ioa_dump->hdr.len; 3301 wmb(); 3302 ioa_cfg->sdt_state = DUMP_OBTAINED; 3303 LEAVE; 3304 } 3305 3306 #else 3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) 3308 #endif 3309 3310 /** 3311 * ipr_release_dump - Free adapter dump memory 3312 * @kref: kref struct 3313 * 3314 * Return value: 3315 * nothing 3316 **/ 3317 static void ipr_release_dump(struct kref *kref) 3318 { 3319 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); 3320 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; 3321 unsigned long lock_flags = 0; 3322 int i; 3323 3324 ENTER; 3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3326 ioa_cfg->dump = NULL; 3327 ioa_cfg->sdt_state = INACTIVE; 3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3329 3330 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3331 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3332 3333 vfree(dump->ioa_dump.ioa_data); 3334 kfree(dump); 3335 LEAVE; 3336 } 3337 3338 static void ipr_add_remove_thread(struct work_struct *work) 3339 { 3340 unsigned long lock_flags; 3341 struct ipr_resource_entry *res; 3342 struct scsi_device *sdev; 3343 struct ipr_ioa_cfg *ioa_cfg = 3344 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); 3345 u8 bus, target, lun; 3346 int did_work; 3347 3348 ENTER; 3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3350 3351 restart: 3352 do { 3353 did_work = 0; 3354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3356 return; 3357 } 3358 3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3360 if (res->del_from_ml && res->sdev) { 3361 did_work = 1; 3362 sdev = res->sdev; 3363 if (!scsi_device_get(sdev)) { 3364 if (!res->add_to_ml) 3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 3366 else 3367 res->del_from_ml = 0; 3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3369 scsi_remove_device(sdev); 3370 scsi_device_put(sdev); 3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3372 } 3373 break; 3374 } 3375 } 3376 } while (did_work); 3377 3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3379 if (res->add_to_ml) { 3380 bus = res->bus; 3381 target = res->target; 3382 lun = res->lun; 3383 res->add_to_ml = 0; 3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3385 scsi_add_device(ioa_cfg->host, bus, target, lun); 3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3387 goto restart; 3388 } 3389 } 3390 3391 ioa_cfg->scan_done = 1; 3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3394 LEAVE; 3395 } 3396 3397 /** 3398 * ipr_worker_thread - Worker thread 3399 * @work: ioa config struct 3400 * 3401 * Called at task level from a work thread. This function takes care 3402 * of adding and removing device from the mid-layer as configuration 3403 * changes are detected by the adapter. 3404 * 3405 * Return value: 3406 * nothing 3407 **/ 3408 static void ipr_worker_thread(struct work_struct *work) 3409 { 3410 unsigned long lock_flags; 3411 struct ipr_dump *dump; 3412 struct ipr_ioa_cfg *ioa_cfg = 3413 container_of(work, struct ipr_ioa_cfg, work_q); 3414 3415 ENTER; 3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3417 3418 if (ioa_cfg->sdt_state == READ_DUMP) { 3419 dump = ioa_cfg->dump; 3420 if (!dump) { 3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3422 return; 3423 } 3424 kref_get(&dump->kref); 3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3426 ipr_get_ioa_dump(ioa_cfg, dump); 3427 kref_put(&dump->kref, ipr_release_dump); 3428 3429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3430 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) 3431 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3433 return; 3434 } 3435 3436 if (ioa_cfg->scsi_unblock) { 3437 ioa_cfg->scsi_unblock = 0; 3438 ioa_cfg->scsi_blocked = 0; 3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3440 scsi_unblock_requests(ioa_cfg->host); 3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3442 if (ioa_cfg->scsi_blocked) 3443 scsi_block_requests(ioa_cfg->host); 3444 } 3445 3446 if (!ioa_cfg->scan_enabled) { 3447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3448 return; 3449 } 3450 3451 schedule_work(&ioa_cfg->scsi_add_work_q); 3452 3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3454 LEAVE; 3455 } 3456 3457 #ifdef CONFIG_SCSI_IPR_TRACE 3458 /** 3459 * ipr_read_trace - Dump the adapter trace 3460 * @filp: open sysfs file 3461 * @kobj: kobject struct 3462 * @bin_attr: bin_attribute struct 3463 * @buf: buffer 3464 * @off: offset 3465 * @count: buffer size 3466 * 3467 * Return value: 3468 * number of bytes printed to buffer 3469 **/ 3470 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, 3471 struct bin_attribute *bin_attr, 3472 char *buf, loff_t off, size_t count) 3473 { 3474 struct device *dev = container_of(kobj, struct device, kobj); 3475 struct Scsi_Host *shost = class_to_shost(dev); 3476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3477 unsigned long lock_flags = 0; 3478 ssize_t ret; 3479 3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3481 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, 3482 IPR_TRACE_SIZE); 3483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3484 3485 return ret; 3486 } 3487 3488 static struct bin_attribute ipr_trace_attr = { 3489 .attr = { 3490 .name = "trace", 3491 .mode = S_IRUGO, 3492 }, 3493 .size = 0, 3494 .read = ipr_read_trace, 3495 }; 3496 #endif 3497 3498 /** 3499 * ipr_show_fw_version - Show the firmware version 3500 * @dev: class device struct 3501 * @buf: buffer 3502 * 3503 * Return value: 3504 * number of bytes printed to buffer 3505 **/ 3506 static ssize_t ipr_show_fw_version(struct device *dev, 3507 struct device_attribute *attr, char *buf) 3508 { 3509 struct Scsi_Host *shost = class_to_shost(dev); 3510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3511 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3512 unsigned long lock_flags = 0; 3513 int len; 3514 3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3516 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", 3517 ucode_vpd->major_release, ucode_vpd->card_type, 3518 ucode_vpd->minor_release[0], 3519 ucode_vpd->minor_release[1]); 3520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3521 return len; 3522 } 3523 3524 static struct device_attribute ipr_fw_version_attr = { 3525 .attr = { 3526 .name = "fw_version", 3527 .mode = S_IRUGO, 3528 }, 3529 .show = ipr_show_fw_version, 3530 }; 3531 3532 /** 3533 * ipr_show_log_level - Show the adapter's error logging level 3534 * @dev: class device struct 3535 * @buf: buffer 3536 * 3537 * Return value: 3538 * number of bytes printed to buffer 3539 **/ 3540 static ssize_t ipr_show_log_level(struct device *dev, 3541 struct device_attribute *attr, char *buf) 3542 { 3543 struct Scsi_Host *shost = class_to_shost(dev); 3544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3545 unsigned long lock_flags = 0; 3546 int len; 3547 3548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3549 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); 3550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3551 return len; 3552 } 3553 3554 /** 3555 * ipr_store_log_level - Change the adapter's error logging level 3556 * @dev: class device struct 3557 * @buf: buffer 3558 * 3559 * Return value: 3560 * number of bytes printed to buffer 3561 **/ 3562 static ssize_t ipr_store_log_level(struct device *dev, 3563 struct device_attribute *attr, 3564 const char *buf, size_t count) 3565 { 3566 struct Scsi_Host *shost = class_to_shost(dev); 3567 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3568 unsigned long lock_flags = 0; 3569 3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3571 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); 3572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3573 return strlen(buf); 3574 } 3575 3576 static struct device_attribute ipr_log_level_attr = { 3577 .attr = { 3578 .name = "log_level", 3579 .mode = S_IRUGO | S_IWUSR, 3580 }, 3581 .show = ipr_show_log_level, 3582 .store = ipr_store_log_level 3583 }; 3584 3585 /** 3586 * ipr_store_diagnostics - IOA Diagnostics interface 3587 * @dev: device struct 3588 * @buf: buffer 3589 * @count: buffer size 3590 * 3591 * This function will reset the adapter and wait a reasonable 3592 * amount of time for any errors that the adapter might log. 3593 * 3594 * Return value: 3595 * count on success / other on failure 3596 **/ 3597 static ssize_t ipr_store_diagnostics(struct device *dev, 3598 struct device_attribute *attr, 3599 const char *buf, size_t count) 3600 { 3601 struct Scsi_Host *shost = class_to_shost(dev); 3602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3603 unsigned long lock_flags = 0; 3604 int rc = count; 3605 3606 if (!capable(CAP_SYS_ADMIN)) 3607 return -EACCES; 3608 3609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3610 while (ioa_cfg->in_reset_reload) { 3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3614 } 3615 3616 ioa_cfg->errors_logged = 0; 3617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3618 3619 if (ioa_cfg->in_reset_reload) { 3620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3621 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3622 3623 /* Wait for a second for any errors to be logged */ 3624 msleep(1000); 3625 } else { 3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3627 return -EIO; 3628 } 3629 3630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3631 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) 3632 rc = -EIO; 3633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3634 3635 return rc; 3636 } 3637 3638 static struct device_attribute ipr_diagnostics_attr = { 3639 .attr = { 3640 .name = "run_diagnostics", 3641 .mode = S_IWUSR, 3642 }, 3643 .store = ipr_store_diagnostics 3644 }; 3645 3646 /** 3647 * ipr_show_adapter_state - Show the adapter's state 3648 * @class_dev: device struct 3649 * @buf: buffer 3650 * 3651 * Return value: 3652 * number of bytes printed to buffer 3653 **/ 3654 static ssize_t ipr_show_adapter_state(struct device *dev, 3655 struct device_attribute *attr, char *buf) 3656 { 3657 struct Scsi_Host *shost = class_to_shost(dev); 3658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3659 unsigned long lock_flags = 0; 3660 int len; 3661 3662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3663 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 3664 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3665 else 3666 len = snprintf(buf, PAGE_SIZE, "online\n"); 3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3668 return len; 3669 } 3670 3671 /** 3672 * ipr_store_adapter_state - Change adapter state 3673 * @dev: device struct 3674 * @buf: buffer 3675 * @count: buffer size 3676 * 3677 * This function will change the adapter's state. 3678 * 3679 * Return value: 3680 * count on success / other on failure 3681 **/ 3682 static ssize_t ipr_store_adapter_state(struct device *dev, 3683 struct device_attribute *attr, 3684 const char *buf, size_t count) 3685 { 3686 struct Scsi_Host *shost = class_to_shost(dev); 3687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3688 unsigned long lock_flags; 3689 int result = count, i; 3690 3691 if (!capable(CAP_SYS_ADMIN)) 3692 return -EACCES; 3693 3694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3695 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && 3696 !strncmp(buf, "online", 6)) { 3697 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 3698 spin_lock(&ioa_cfg->hrrq[i]._lock); 3699 ioa_cfg->hrrq[i].ioa_is_dead = 0; 3700 spin_unlock(&ioa_cfg->hrrq[i]._lock); 3701 } 3702 wmb(); 3703 ioa_cfg->reset_retries = 0; 3704 ioa_cfg->in_ioa_bringdown = 0; 3705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3706 } 3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3709 3710 return result; 3711 } 3712 3713 static struct device_attribute ipr_ioa_state_attr = { 3714 .attr = { 3715 .name = "online_state", 3716 .mode = S_IRUGO | S_IWUSR, 3717 }, 3718 .show = ipr_show_adapter_state, 3719 .store = ipr_store_adapter_state 3720 }; 3721 3722 /** 3723 * ipr_store_reset_adapter - Reset the adapter 3724 * @dev: device struct 3725 * @buf: buffer 3726 * @count: buffer size 3727 * 3728 * This function will reset the adapter. 3729 * 3730 * Return value: 3731 * count on success / other on failure 3732 **/ 3733 static ssize_t ipr_store_reset_adapter(struct device *dev, 3734 struct device_attribute *attr, 3735 const char *buf, size_t count) 3736 { 3737 struct Scsi_Host *shost = class_to_shost(dev); 3738 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3739 unsigned long lock_flags; 3740 int result = count; 3741 3742 if (!capable(CAP_SYS_ADMIN)) 3743 return -EACCES; 3744 3745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3746 if (!ioa_cfg->in_reset_reload) 3747 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3750 3751 return result; 3752 } 3753 3754 static struct device_attribute ipr_ioa_reset_attr = { 3755 .attr = { 3756 .name = "reset_host", 3757 .mode = S_IWUSR, 3758 }, 3759 .store = ipr_store_reset_adapter 3760 }; 3761 3762 static int ipr_iopoll(struct irq_poll *iop, int budget); 3763 /** 3764 * ipr_show_iopoll_weight - Show ipr polling mode 3765 * @dev: class device struct 3766 * @buf: buffer 3767 * 3768 * Return value: 3769 * number of bytes printed to buffer 3770 **/ 3771 static ssize_t ipr_show_iopoll_weight(struct device *dev, 3772 struct device_attribute *attr, char *buf) 3773 { 3774 struct Scsi_Host *shost = class_to_shost(dev); 3775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3776 unsigned long lock_flags = 0; 3777 int len; 3778 3779 spin_lock_irqsave(shost->host_lock, lock_flags); 3780 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); 3781 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3782 3783 return len; 3784 } 3785 3786 /** 3787 * ipr_store_iopoll_weight - Change the adapter's polling mode 3788 * @dev: class device struct 3789 * @buf: buffer 3790 * 3791 * Return value: 3792 * number of bytes printed to buffer 3793 **/ 3794 static ssize_t ipr_store_iopoll_weight(struct device *dev, 3795 struct device_attribute *attr, 3796 const char *buf, size_t count) 3797 { 3798 struct Scsi_Host *shost = class_to_shost(dev); 3799 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3800 unsigned long user_iopoll_weight; 3801 unsigned long lock_flags = 0; 3802 int i; 3803 3804 if (!ioa_cfg->sis64) { 3805 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); 3806 return -EINVAL; 3807 } 3808 if (kstrtoul(buf, 10, &user_iopoll_weight)) 3809 return -EINVAL; 3810 3811 if (user_iopoll_weight > 256) { 3812 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); 3813 return -EINVAL; 3814 } 3815 3816 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { 3817 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); 3818 return strlen(buf); 3819 } 3820 3821 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3822 for (i = 1; i < ioa_cfg->hrrq_num; i++) 3823 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); 3824 } 3825 3826 spin_lock_irqsave(shost->host_lock, lock_flags); 3827 ioa_cfg->iopoll_weight = user_iopoll_weight; 3828 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3829 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 3830 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, 3831 ioa_cfg->iopoll_weight, ipr_iopoll); 3832 } 3833 } 3834 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3835 3836 return strlen(buf); 3837 } 3838 3839 static struct device_attribute ipr_iopoll_weight_attr = { 3840 .attr = { 3841 .name = "iopoll_weight", 3842 .mode = S_IRUGO | S_IWUSR, 3843 }, 3844 .show = ipr_show_iopoll_weight, 3845 .store = ipr_store_iopoll_weight 3846 }; 3847 3848 /** 3849 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3850 * @buf_len: buffer length 3851 * 3852 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3853 * list to use for microcode download 3854 * 3855 * Return value: 3856 * pointer to sglist / NULL on failure 3857 **/ 3858 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) 3859 { 3860 int sg_size, order; 3861 struct ipr_sglist *sglist; 3862 3863 /* Get the minimum size per scatter/gather element */ 3864 sg_size = buf_len / (IPR_MAX_SGLIST - 1); 3865 3866 /* Get the actual size per element */ 3867 order = get_order(sg_size); 3868 3869 /* Allocate a scatter/gather list for the DMA */ 3870 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL); 3871 if (sglist == NULL) { 3872 ipr_trace; 3873 return NULL; 3874 } 3875 sglist->order = order; 3876 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, 3877 &sglist->num_sg); 3878 if (!sglist->scatterlist) { 3879 kfree(sglist); 3880 return NULL; 3881 } 3882 3883 return sglist; 3884 } 3885 3886 /** 3887 * ipr_free_ucode_buffer - Frees a microcode download buffer 3888 * @p_dnld: scatter/gather list pointer 3889 * 3890 * Free a DMA'able ucode download buffer previously allocated with 3891 * ipr_alloc_ucode_buffer 3892 * 3893 * Return value: 3894 * nothing 3895 **/ 3896 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) 3897 { 3898 sgl_free_order(sglist->scatterlist, sglist->order); 3899 kfree(sglist); 3900 } 3901 3902 /** 3903 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer 3904 * @sglist: scatter/gather list pointer 3905 * @buffer: buffer pointer 3906 * @len: buffer length 3907 * 3908 * Copy a microcode image from a user buffer into a buffer allocated by 3909 * ipr_alloc_ucode_buffer 3910 * 3911 * Return value: 3912 * 0 on success / other on failure 3913 **/ 3914 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, 3915 u8 *buffer, u32 len) 3916 { 3917 int bsize_elem, i, result = 0; 3918 struct scatterlist *scatterlist; 3919 void *kaddr; 3920 3921 /* Determine the actual number of bytes per element */ 3922 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3923 3924 scatterlist = sglist->scatterlist; 3925 3926 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3927 struct page *page = sg_page(&scatterlist[i]); 3928 3929 kaddr = kmap(page); 3930 memcpy(kaddr, buffer, bsize_elem); 3931 kunmap(page); 3932 3933 scatterlist[i].length = bsize_elem; 3934 3935 if (result != 0) { 3936 ipr_trace; 3937 return result; 3938 } 3939 } 3940 3941 if (len % bsize_elem) { 3942 struct page *page = sg_page(&scatterlist[i]); 3943 3944 kaddr = kmap(page); 3945 memcpy(kaddr, buffer, len % bsize_elem); 3946 kunmap(page); 3947 3948 scatterlist[i].length = len % bsize_elem; 3949 } 3950 3951 sglist->buffer_len = len; 3952 return result; 3953 } 3954 3955 /** 3956 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 3957 * @ipr_cmd: ipr command struct 3958 * @sglist: scatter/gather list 3959 * 3960 * Builds a microcode download IOA data list (IOADL). 3961 * 3962 **/ 3963 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 3964 struct ipr_sglist *sglist) 3965 { 3966 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3967 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3968 struct scatterlist *scatterlist = sglist->scatterlist; 3969 int i; 3970 3971 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3972 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3973 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3974 3975 ioarcb->ioadl_len = 3976 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3977 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3978 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3979 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3980 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3981 } 3982 3983 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3984 } 3985 3986 /** 3987 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3988 * @ipr_cmd: ipr command struct 3989 * @sglist: scatter/gather list 3990 * 3991 * Builds a microcode download IOA data list (IOADL). 3992 * 3993 **/ 3994 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, 3995 struct ipr_sglist *sglist) 3996 { 3997 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3998 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3999 struct scatterlist *scatterlist = sglist->scatterlist; 4000 int i; 4001 4002 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 4003 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 4004 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 4005 4006 ioarcb->ioadl_len = 4007 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 4008 4009 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4010 ioadl[i].flags_and_data_len = 4011 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); 4012 ioadl[i].address = 4013 cpu_to_be32(sg_dma_address(&scatterlist[i])); 4014 } 4015 4016 ioadl[i-1].flags_and_data_len |= 4017 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4018 } 4019 4020 /** 4021 * ipr_update_ioa_ucode - Update IOA's microcode 4022 * @ioa_cfg: ioa config struct 4023 * @sglist: scatter/gather list 4024 * 4025 * Initiate an adapter reset to update the IOA's microcode 4026 * 4027 * Return value: 4028 * 0 on success / -EIO on failure 4029 **/ 4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, 4031 struct ipr_sglist *sglist) 4032 { 4033 unsigned long lock_flags; 4034 4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4036 while (ioa_cfg->in_reset_reload) { 4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4040 } 4041 4042 if (ioa_cfg->ucode_sglist) { 4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4044 dev_err(&ioa_cfg->pdev->dev, 4045 "Microcode download already in progress\n"); 4046 return -EIO; 4047 } 4048 4049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, 4050 sglist->scatterlist, sglist->num_sg, 4051 DMA_TO_DEVICE); 4052 4053 if (!sglist->num_dma_sg) { 4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4055 dev_err(&ioa_cfg->pdev->dev, 4056 "Failed to map microcode download buffer!\n"); 4057 return -EIO; 4058 } 4059 4060 ioa_cfg->ucode_sglist = sglist; 4061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 4064 4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4066 ioa_cfg->ucode_sglist = NULL; 4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4068 return 0; 4069 } 4070 4071 /** 4072 * ipr_store_update_fw - Update the firmware on the adapter 4073 * @class_dev: device struct 4074 * @buf: buffer 4075 * @count: buffer size 4076 * 4077 * This function will update the firmware on the adapter. 4078 * 4079 * Return value: 4080 * count on success / other on failure 4081 **/ 4082 static ssize_t ipr_store_update_fw(struct device *dev, 4083 struct device_attribute *attr, 4084 const char *buf, size_t count) 4085 { 4086 struct Scsi_Host *shost = class_to_shost(dev); 4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4088 struct ipr_ucode_image_header *image_hdr; 4089 const struct firmware *fw_entry; 4090 struct ipr_sglist *sglist; 4091 char fname[100]; 4092 char *src; 4093 char *endline; 4094 int result, dnld_size; 4095 4096 if (!capable(CAP_SYS_ADMIN)) 4097 return -EACCES; 4098 4099 snprintf(fname, sizeof(fname), "%s", buf); 4100 4101 endline = strchr(fname, '\n'); 4102 if (endline) 4103 *endline = '\0'; 4104 4105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4107 return -EIO; 4108 } 4109 4110 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 4111 4112 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 4113 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 4114 sglist = ipr_alloc_ucode_buffer(dnld_size); 4115 4116 if (!sglist) { 4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); 4118 release_firmware(fw_entry); 4119 return -ENOMEM; 4120 } 4121 4122 result = ipr_copy_ucode_buffer(sglist, src, dnld_size); 4123 4124 if (result) { 4125 dev_err(&ioa_cfg->pdev->dev, 4126 "Microcode buffer copy to DMA buffer failed\n"); 4127 goto out; 4128 } 4129 4130 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); 4131 4132 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 4133 4134 if (!result) 4135 result = count; 4136 out: 4137 ipr_free_ucode_buffer(sglist); 4138 release_firmware(fw_entry); 4139 return result; 4140 } 4141 4142 static struct device_attribute ipr_update_fw_attr = { 4143 .attr = { 4144 .name = "update_fw", 4145 .mode = S_IWUSR, 4146 }, 4147 .store = ipr_store_update_fw 4148 }; 4149 4150 /** 4151 * ipr_show_fw_type - Show the adapter's firmware type. 4152 * @dev: class device struct 4153 * @buf: buffer 4154 * 4155 * Return value: 4156 * number of bytes printed to buffer 4157 **/ 4158 static ssize_t ipr_show_fw_type(struct device *dev, 4159 struct device_attribute *attr, char *buf) 4160 { 4161 struct Scsi_Host *shost = class_to_shost(dev); 4162 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4163 unsigned long lock_flags = 0; 4164 int len; 4165 4166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4167 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); 4168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4169 return len; 4170 } 4171 4172 static struct device_attribute ipr_ioa_fw_type_attr = { 4173 .attr = { 4174 .name = "fw_type", 4175 .mode = S_IRUGO, 4176 }, 4177 .show = ipr_show_fw_type 4178 }; 4179 4180 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj, 4181 struct bin_attribute *bin_attr, char *buf, 4182 loff_t off, size_t count) 4183 { 4184 struct device *cdev = container_of(kobj, struct device, kobj); 4185 struct Scsi_Host *shost = class_to_shost(cdev); 4186 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4187 struct ipr_hostrcb *hostrcb; 4188 unsigned long lock_flags = 0; 4189 int ret; 4190 4191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4192 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, 4193 struct ipr_hostrcb, queue); 4194 if (!hostrcb) { 4195 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4196 return 0; 4197 } 4198 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, 4199 sizeof(hostrcb->hcam)); 4200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4201 return ret; 4202 } 4203 4204 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj, 4205 struct bin_attribute *bin_attr, char *buf, 4206 loff_t off, size_t count) 4207 { 4208 struct device *cdev = container_of(kobj, struct device, kobj); 4209 struct Scsi_Host *shost = class_to_shost(cdev); 4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4211 struct ipr_hostrcb *hostrcb; 4212 unsigned long lock_flags = 0; 4213 4214 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4215 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, 4216 struct ipr_hostrcb, queue); 4217 if (!hostrcb) { 4218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4219 return count; 4220 } 4221 4222 /* Reclaim hostrcb before exit */ 4223 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 4224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4225 return count; 4226 } 4227 4228 static struct bin_attribute ipr_ioa_async_err_log = { 4229 .attr = { 4230 .name = "async_err_log", 4231 .mode = S_IRUGO | S_IWUSR, 4232 }, 4233 .size = 0, 4234 .read = ipr_read_async_err_log, 4235 .write = ipr_next_async_err_log 4236 }; 4237 4238 static struct device_attribute *ipr_ioa_attrs[] = { 4239 &ipr_fw_version_attr, 4240 &ipr_log_level_attr, 4241 &ipr_diagnostics_attr, 4242 &ipr_ioa_state_attr, 4243 &ipr_ioa_reset_attr, 4244 &ipr_update_fw_attr, 4245 &ipr_ioa_fw_type_attr, 4246 &ipr_iopoll_weight_attr, 4247 NULL, 4248 }; 4249 4250 #ifdef CONFIG_SCSI_IPR_DUMP 4251 /** 4252 * ipr_read_dump - Dump the adapter 4253 * @filp: open sysfs file 4254 * @kobj: kobject struct 4255 * @bin_attr: bin_attribute struct 4256 * @buf: buffer 4257 * @off: offset 4258 * @count: buffer size 4259 * 4260 * Return value: 4261 * number of bytes printed to buffer 4262 **/ 4263 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, 4264 struct bin_attribute *bin_attr, 4265 char *buf, loff_t off, size_t count) 4266 { 4267 struct device *cdev = container_of(kobj, struct device, kobj); 4268 struct Scsi_Host *shost = class_to_shost(cdev); 4269 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4270 struct ipr_dump *dump; 4271 unsigned long lock_flags = 0; 4272 char *src; 4273 int len, sdt_end; 4274 size_t rc = count; 4275 4276 if (!capable(CAP_SYS_ADMIN)) 4277 return -EACCES; 4278 4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4280 dump = ioa_cfg->dump; 4281 4282 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { 4283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4284 return 0; 4285 } 4286 kref_get(&dump->kref); 4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4288 4289 if (off > dump->driver_dump.hdr.len) { 4290 kref_put(&dump->kref, ipr_release_dump); 4291 return 0; 4292 } 4293 4294 if (off + count > dump->driver_dump.hdr.len) { 4295 count = dump->driver_dump.hdr.len - off; 4296 rc = count; 4297 } 4298 4299 if (count && off < sizeof(dump->driver_dump)) { 4300 if (off + count > sizeof(dump->driver_dump)) 4301 len = sizeof(dump->driver_dump) - off; 4302 else 4303 len = count; 4304 src = (u8 *)&dump->driver_dump + off; 4305 memcpy(buf, src, len); 4306 buf += len; 4307 off += len; 4308 count -= len; 4309 } 4310 4311 off -= sizeof(dump->driver_dump); 4312 4313 if (ioa_cfg->sis64) 4314 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4315 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * 4316 sizeof(struct ipr_sdt_entry)); 4317 else 4318 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4319 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); 4320 4321 if (count && off < sdt_end) { 4322 if (off + count > sdt_end) 4323 len = sdt_end - off; 4324 else 4325 len = count; 4326 src = (u8 *)&dump->ioa_dump + off; 4327 memcpy(buf, src, len); 4328 buf += len; 4329 off += len; 4330 count -= len; 4331 } 4332 4333 off -= sdt_end; 4334 4335 while (count) { 4336 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 4337 len = PAGE_ALIGN(off) - off; 4338 else 4339 len = count; 4340 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; 4341 src += off & ~PAGE_MASK; 4342 memcpy(buf, src, len); 4343 buf += len; 4344 off += len; 4345 count -= len; 4346 } 4347 4348 kref_put(&dump->kref, ipr_release_dump); 4349 return rc; 4350 } 4351 4352 /** 4353 * ipr_alloc_dump - Prepare for adapter dump 4354 * @ioa_cfg: ioa config struct 4355 * 4356 * Return value: 4357 * 0 on success / other on failure 4358 **/ 4359 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 4360 { 4361 struct ipr_dump *dump; 4362 __be32 **ioa_data; 4363 unsigned long lock_flags = 0; 4364 4365 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 4366 4367 if (!dump) { 4368 ipr_err("Dump memory allocation failed\n"); 4369 return -ENOMEM; 4370 } 4371 4372 if (ioa_cfg->sis64) 4373 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES, 4374 sizeof(__be32 *))); 4375 else 4376 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES, 4377 sizeof(__be32 *))); 4378 4379 if (!ioa_data) { 4380 ipr_err("Dump memory allocation failed\n"); 4381 kfree(dump); 4382 return -ENOMEM; 4383 } 4384 4385 dump->ioa_dump.ioa_data = ioa_data; 4386 4387 kref_init(&dump->kref); 4388 dump->ioa_cfg = ioa_cfg; 4389 4390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4391 4392 if (INACTIVE != ioa_cfg->sdt_state) { 4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4394 vfree(dump->ioa_dump.ioa_data); 4395 kfree(dump); 4396 return 0; 4397 } 4398 4399 ioa_cfg->dump = dump; 4400 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4401 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { 4402 ioa_cfg->dump_taken = 1; 4403 schedule_work(&ioa_cfg->work_q); 4404 } 4405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4406 4407 return 0; 4408 } 4409 4410 /** 4411 * ipr_free_dump - Free adapter dump memory 4412 * @ioa_cfg: ioa config struct 4413 * 4414 * Return value: 4415 * 0 on success / other on failure 4416 **/ 4417 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) 4418 { 4419 struct ipr_dump *dump; 4420 unsigned long lock_flags = 0; 4421 4422 ENTER; 4423 4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4425 dump = ioa_cfg->dump; 4426 if (!dump) { 4427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4428 return 0; 4429 } 4430 4431 ioa_cfg->dump = NULL; 4432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4433 4434 kref_put(&dump->kref, ipr_release_dump); 4435 4436 LEAVE; 4437 return 0; 4438 } 4439 4440 /** 4441 * ipr_write_dump - Setup dump state of adapter 4442 * @filp: open sysfs file 4443 * @kobj: kobject struct 4444 * @bin_attr: bin_attribute struct 4445 * @buf: buffer 4446 * @off: offset 4447 * @count: buffer size 4448 * 4449 * Return value: 4450 * number of bytes printed to buffer 4451 **/ 4452 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, 4453 struct bin_attribute *bin_attr, 4454 char *buf, loff_t off, size_t count) 4455 { 4456 struct device *cdev = container_of(kobj, struct device, kobj); 4457 struct Scsi_Host *shost = class_to_shost(cdev); 4458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4459 int rc; 4460 4461 if (!capable(CAP_SYS_ADMIN)) 4462 return -EACCES; 4463 4464 if (buf[0] == '1') 4465 rc = ipr_alloc_dump(ioa_cfg); 4466 else if (buf[0] == '0') 4467 rc = ipr_free_dump(ioa_cfg); 4468 else 4469 return -EINVAL; 4470 4471 if (rc) 4472 return rc; 4473 else 4474 return count; 4475 } 4476 4477 static struct bin_attribute ipr_dump_attr = { 4478 .attr = { 4479 .name = "dump", 4480 .mode = S_IRUSR | S_IWUSR, 4481 }, 4482 .size = 0, 4483 .read = ipr_read_dump, 4484 .write = ipr_write_dump 4485 }; 4486 #else 4487 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; 4488 #endif 4489 4490 /** 4491 * ipr_change_queue_depth - Change the device's queue depth 4492 * @sdev: scsi device struct 4493 * @qdepth: depth to set 4494 * @reason: calling context 4495 * 4496 * Return value: 4497 * actual depth set 4498 **/ 4499 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 4500 { 4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4502 struct ipr_resource_entry *res; 4503 unsigned long lock_flags = 0; 4504 4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4506 res = (struct ipr_resource_entry *)sdev->hostdata; 4507 4508 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) 4509 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4511 4512 scsi_change_queue_depth(sdev, qdepth); 4513 return sdev->queue_depth; 4514 } 4515 4516 /** 4517 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4518 * @dev: device struct 4519 * @attr: device attribute structure 4520 * @buf: buffer 4521 * 4522 * Return value: 4523 * number of bytes printed to buffer 4524 **/ 4525 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) 4526 { 4527 struct scsi_device *sdev = to_scsi_device(dev); 4528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4529 struct ipr_resource_entry *res; 4530 unsigned long lock_flags = 0; 4531 ssize_t len = -ENXIO; 4532 4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4534 res = (struct ipr_resource_entry *)sdev->hostdata; 4535 if (res) 4536 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4538 return len; 4539 } 4540 4541 static struct device_attribute ipr_adapter_handle_attr = { 4542 .attr = { 4543 .name = "adapter_handle", 4544 .mode = S_IRUSR, 4545 }, 4546 .show = ipr_show_adapter_handle 4547 }; 4548 4549 /** 4550 * ipr_show_resource_path - Show the resource path or the resource address for 4551 * this device. 4552 * @dev: device struct 4553 * @attr: device attribute structure 4554 * @buf: buffer 4555 * 4556 * Return value: 4557 * number of bytes printed to buffer 4558 **/ 4559 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 4560 { 4561 struct scsi_device *sdev = to_scsi_device(dev); 4562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4563 struct ipr_resource_entry *res; 4564 unsigned long lock_flags = 0; 4565 ssize_t len = -ENXIO; 4566 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4567 4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4569 res = (struct ipr_resource_entry *)sdev->hostdata; 4570 if (res && ioa_cfg->sis64) 4571 len = snprintf(buf, PAGE_SIZE, "%s\n", 4572 __ipr_format_res_path(res->res_path, buffer, 4573 sizeof(buffer))); 4574 else if (res) 4575 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4576 res->bus, res->target, res->lun); 4577 4578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4579 return len; 4580 } 4581 4582 static struct device_attribute ipr_resource_path_attr = { 4583 .attr = { 4584 .name = "resource_path", 4585 .mode = S_IRUGO, 4586 }, 4587 .show = ipr_show_resource_path 4588 }; 4589 4590 /** 4591 * ipr_show_device_id - Show the device_id for this device. 4592 * @dev: device struct 4593 * @attr: device attribute structure 4594 * @buf: buffer 4595 * 4596 * Return value: 4597 * number of bytes printed to buffer 4598 **/ 4599 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) 4600 { 4601 struct scsi_device *sdev = to_scsi_device(dev); 4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4603 struct ipr_resource_entry *res; 4604 unsigned long lock_flags = 0; 4605 ssize_t len = -ENXIO; 4606 4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4608 res = (struct ipr_resource_entry *)sdev->hostdata; 4609 if (res && ioa_cfg->sis64) 4610 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); 4611 else if (res) 4612 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4613 4614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4615 return len; 4616 } 4617 4618 static struct device_attribute ipr_device_id_attr = { 4619 .attr = { 4620 .name = "device_id", 4621 .mode = S_IRUGO, 4622 }, 4623 .show = ipr_show_device_id 4624 }; 4625 4626 /** 4627 * ipr_show_resource_type - Show the resource type for this device. 4628 * @dev: device struct 4629 * @attr: device attribute structure 4630 * @buf: buffer 4631 * 4632 * Return value: 4633 * number of bytes printed to buffer 4634 **/ 4635 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) 4636 { 4637 struct scsi_device *sdev = to_scsi_device(dev); 4638 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4639 struct ipr_resource_entry *res; 4640 unsigned long lock_flags = 0; 4641 ssize_t len = -ENXIO; 4642 4643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4644 res = (struct ipr_resource_entry *)sdev->hostdata; 4645 4646 if (res) 4647 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); 4648 4649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4650 return len; 4651 } 4652 4653 static struct device_attribute ipr_resource_type_attr = { 4654 .attr = { 4655 .name = "resource_type", 4656 .mode = S_IRUGO, 4657 }, 4658 .show = ipr_show_resource_type 4659 }; 4660 4661 /** 4662 * ipr_show_raw_mode - Show the adapter's raw mode 4663 * @dev: class device struct 4664 * @buf: buffer 4665 * 4666 * Return value: 4667 * number of bytes printed to buffer 4668 **/ 4669 static ssize_t ipr_show_raw_mode(struct device *dev, 4670 struct device_attribute *attr, char *buf) 4671 { 4672 struct scsi_device *sdev = to_scsi_device(dev); 4673 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4674 struct ipr_resource_entry *res; 4675 unsigned long lock_flags = 0; 4676 ssize_t len; 4677 4678 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4679 res = (struct ipr_resource_entry *)sdev->hostdata; 4680 if (res) 4681 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); 4682 else 4683 len = -ENXIO; 4684 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4685 return len; 4686 } 4687 4688 /** 4689 * ipr_store_raw_mode - Change the adapter's raw mode 4690 * @dev: class device struct 4691 * @buf: buffer 4692 * 4693 * Return value: 4694 * number of bytes printed to buffer 4695 **/ 4696 static ssize_t ipr_store_raw_mode(struct device *dev, 4697 struct device_attribute *attr, 4698 const char *buf, size_t count) 4699 { 4700 struct scsi_device *sdev = to_scsi_device(dev); 4701 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4702 struct ipr_resource_entry *res; 4703 unsigned long lock_flags = 0; 4704 ssize_t len; 4705 4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4707 res = (struct ipr_resource_entry *)sdev->hostdata; 4708 if (res) { 4709 if (ipr_is_af_dasd_device(res)) { 4710 res->raw_mode = simple_strtoul(buf, NULL, 10); 4711 len = strlen(buf); 4712 if (res->sdev) 4713 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", 4714 res->raw_mode ? "enabled" : "disabled"); 4715 } else 4716 len = -EINVAL; 4717 } else 4718 len = -ENXIO; 4719 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4720 return len; 4721 } 4722 4723 static struct device_attribute ipr_raw_mode_attr = { 4724 .attr = { 4725 .name = "raw_mode", 4726 .mode = S_IRUGO | S_IWUSR, 4727 }, 4728 .show = ipr_show_raw_mode, 4729 .store = ipr_store_raw_mode 4730 }; 4731 4732 static struct device_attribute *ipr_dev_attrs[] = { 4733 &ipr_adapter_handle_attr, 4734 &ipr_resource_path_attr, 4735 &ipr_device_id_attr, 4736 &ipr_resource_type_attr, 4737 &ipr_raw_mode_attr, 4738 NULL, 4739 }; 4740 4741 /** 4742 * ipr_biosparam - Return the HSC mapping 4743 * @sdev: scsi device struct 4744 * @block_device: block device pointer 4745 * @capacity: capacity of the device 4746 * @parm: Array containing returned HSC values. 4747 * 4748 * This function generates the HSC parms that fdisk uses. 4749 * We want to make sure we return something that places partitions 4750 * on 4k boundaries for best performance with the IOA. 4751 * 4752 * Return value: 4753 * 0 on success 4754 **/ 4755 static int ipr_biosparam(struct scsi_device *sdev, 4756 struct block_device *block_device, 4757 sector_t capacity, int *parm) 4758 { 4759 int heads, sectors; 4760 sector_t cylinders; 4761 4762 heads = 128; 4763 sectors = 32; 4764 4765 cylinders = capacity; 4766 sector_div(cylinders, (128 * 32)); 4767 4768 /* return result */ 4769 parm[0] = heads; 4770 parm[1] = sectors; 4771 parm[2] = cylinders; 4772 4773 return 0; 4774 } 4775 4776 /** 4777 * ipr_find_starget - Find target based on bus/target. 4778 * @starget: scsi target struct 4779 * 4780 * Return value: 4781 * resource entry pointer if found / NULL if not found 4782 **/ 4783 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) 4784 { 4785 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4786 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4787 struct ipr_resource_entry *res; 4788 4789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4790 if ((res->bus == starget->channel) && 4791 (res->target == starget->id)) { 4792 return res; 4793 } 4794 } 4795 4796 return NULL; 4797 } 4798 4799 static struct ata_port_info sata_port_info; 4800 4801 /** 4802 * ipr_target_alloc - Prepare for commands to a SCSI target 4803 * @starget: scsi target struct 4804 * 4805 * If the device is a SATA device, this function allocates an 4806 * ATA port with libata, else it does nothing. 4807 * 4808 * Return value: 4809 * 0 on success / non-0 on failure 4810 **/ 4811 static int ipr_target_alloc(struct scsi_target *starget) 4812 { 4813 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4814 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4815 struct ipr_sata_port *sata_port; 4816 struct ata_port *ap; 4817 struct ipr_resource_entry *res; 4818 unsigned long lock_flags; 4819 4820 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4821 res = ipr_find_starget(starget); 4822 starget->hostdata = NULL; 4823 4824 if (res && ipr_is_gata(res)) { 4825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4826 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); 4827 if (!sata_port) 4828 return -ENOMEM; 4829 4830 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); 4831 if (ap) { 4832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4833 sata_port->ioa_cfg = ioa_cfg; 4834 sata_port->ap = ap; 4835 sata_port->res = res; 4836 4837 res->sata_port = sata_port; 4838 ap->private_data = sata_port; 4839 starget->hostdata = sata_port; 4840 } else { 4841 kfree(sata_port); 4842 return -ENOMEM; 4843 } 4844 } 4845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4846 4847 return 0; 4848 } 4849 4850 /** 4851 * ipr_target_destroy - Destroy a SCSI target 4852 * @starget: scsi target struct 4853 * 4854 * If the device was a SATA device, this function frees the libata 4855 * ATA port, else it does nothing. 4856 * 4857 **/ 4858 static void ipr_target_destroy(struct scsi_target *starget) 4859 { 4860 struct ipr_sata_port *sata_port = starget->hostdata; 4861 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4862 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4863 4864 if (ioa_cfg->sis64) { 4865 if (!ipr_find_starget(starget)) { 4866 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4867 clear_bit(starget->id, ioa_cfg->array_ids); 4868 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4869 clear_bit(starget->id, ioa_cfg->vset_ids); 4870 else if (starget->channel == 0) 4871 clear_bit(starget->id, ioa_cfg->target_ids); 4872 } 4873 } 4874 4875 if (sata_port) { 4876 starget->hostdata = NULL; 4877 ata_sas_port_destroy(sata_port->ap); 4878 kfree(sata_port); 4879 } 4880 } 4881 4882 /** 4883 * ipr_find_sdev - Find device based on bus/target/lun. 4884 * @sdev: scsi device struct 4885 * 4886 * Return value: 4887 * resource entry pointer if found / NULL if not found 4888 **/ 4889 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) 4890 { 4891 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4892 struct ipr_resource_entry *res; 4893 4894 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4895 if ((res->bus == sdev->channel) && 4896 (res->target == sdev->id) && 4897 (res->lun == sdev->lun)) 4898 return res; 4899 } 4900 4901 return NULL; 4902 } 4903 4904 /** 4905 * ipr_slave_destroy - Unconfigure a SCSI device 4906 * @sdev: scsi device struct 4907 * 4908 * Return value: 4909 * nothing 4910 **/ 4911 static void ipr_slave_destroy(struct scsi_device *sdev) 4912 { 4913 struct ipr_resource_entry *res; 4914 struct ipr_ioa_cfg *ioa_cfg; 4915 unsigned long lock_flags = 0; 4916 4917 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4918 4919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4920 res = (struct ipr_resource_entry *) sdev->hostdata; 4921 if (res) { 4922 if (res->sata_port) 4923 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; 4924 sdev->hostdata = NULL; 4925 res->sdev = NULL; 4926 res->sata_port = NULL; 4927 } 4928 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4929 } 4930 4931 /** 4932 * ipr_slave_configure - Configure a SCSI device 4933 * @sdev: scsi device struct 4934 * 4935 * This function configures the specified scsi device. 4936 * 4937 * Return value: 4938 * 0 on success 4939 **/ 4940 static int ipr_slave_configure(struct scsi_device *sdev) 4941 { 4942 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4943 struct ipr_resource_entry *res; 4944 struct ata_port *ap = NULL; 4945 unsigned long lock_flags = 0; 4946 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4947 4948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4949 res = sdev->hostdata; 4950 if (res) { 4951 if (ipr_is_af_dasd_device(res)) 4952 sdev->type = TYPE_RAID; 4953 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { 4954 sdev->scsi_level = 4; 4955 sdev->no_uld_attach = 1; 4956 } 4957 if (ipr_is_vset_device(res)) { 4958 sdev->scsi_level = SCSI_SPC_3; 4959 sdev->no_report_opcodes = 1; 4960 blk_queue_rq_timeout(sdev->request_queue, 4961 IPR_VSET_RW_TIMEOUT); 4962 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4963 } 4964 if (ipr_is_gata(res) && res->sata_port) 4965 ap = res->sata_port->ap; 4966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4967 4968 if (ap) { 4969 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); 4970 ata_sas_slave_configure(sdev, ap); 4971 } 4972 4973 if (ioa_cfg->sis64) 4974 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4975 ipr_format_res_path(ioa_cfg, 4976 res->res_path, buffer, sizeof(buffer))); 4977 return 0; 4978 } 4979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4980 return 0; 4981 } 4982 4983 /** 4984 * ipr_ata_slave_alloc - Prepare for commands to a SATA device 4985 * @sdev: scsi device struct 4986 * 4987 * This function initializes an ATA port so that future commands 4988 * sent through queuecommand will work. 4989 * 4990 * Return value: 4991 * 0 on success 4992 **/ 4993 static int ipr_ata_slave_alloc(struct scsi_device *sdev) 4994 { 4995 struct ipr_sata_port *sata_port = NULL; 4996 int rc = -ENXIO; 4997 4998 ENTER; 4999 if (sdev->sdev_target) 5000 sata_port = sdev->sdev_target->hostdata; 5001 if (sata_port) { 5002 rc = ata_sas_port_init(sata_port->ap); 5003 if (rc == 0) 5004 rc = ata_sas_sync_probe(sata_port->ap); 5005 } 5006 5007 if (rc) 5008 ipr_slave_destroy(sdev); 5009 5010 LEAVE; 5011 return rc; 5012 } 5013 5014 /** 5015 * ipr_slave_alloc - Prepare for commands to a device. 5016 * @sdev: scsi device struct 5017 * 5018 * This function saves a pointer to the resource entry 5019 * in the scsi device struct if the device exists. We 5020 * can then use this pointer in ipr_queuecommand when 5021 * handling new commands. 5022 * 5023 * Return value: 5024 * 0 on success / -ENXIO if device does not exist 5025 **/ 5026 static int ipr_slave_alloc(struct scsi_device *sdev) 5027 { 5028 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 5029 struct ipr_resource_entry *res; 5030 unsigned long lock_flags; 5031 int rc = -ENXIO; 5032 5033 sdev->hostdata = NULL; 5034 5035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5036 5037 res = ipr_find_sdev(sdev); 5038 if (res) { 5039 res->sdev = sdev; 5040 res->add_to_ml = 0; 5041 res->in_erp = 0; 5042 sdev->hostdata = res; 5043 if (!ipr_is_naca_model(res)) 5044 res->needs_sync_complete = 1; 5045 rc = 0; 5046 if (ipr_is_gata(res)) { 5047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5048 return ipr_ata_slave_alloc(sdev); 5049 } 5050 } 5051 5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5053 5054 return rc; 5055 } 5056 5057 /** 5058 * ipr_match_lun - Match function for specified LUN 5059 * @ipr_cmd: ipr command struct 5060 * @device: device to match (sdev) 5061 * 5062 * Returns: 5063 * 1 if command matches sdev / 0 if command does not match sdev 5064 **/ 5065 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) 5066 { 5067 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) 5068 return 1; 5069 return 0; 5070 } 5071 5072 /** 5073 * ipr_cmnd_is_free - Check if a command is free or not 5074 * @ipr_cmd ipr command struct 5075 * 5076 * Returns: 5077 * true / false 5078 **/ 5079 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) 5080 { 5081 struct ipr_cmnd *loop_cmd; 5082 5083 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { 5084 if (loop_cmd == ipr_cmd) 5085 return true; 5086 } 5087 5088 return false; 5089 } 5090 5091 /** 5092 * ipr_match_res - Match function for specified resource entry 5093 * @ipr_cmd: ipr command struct 5094 * @resource: resource entry to match 5095 * 5096 * Returns: 5097 * 1 if command matches sdev / 0 if command does not match sdev 5098 **/ 5099 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource) 5100 { 5101 struct ipr_resource_entry *res = resource; 5102 5103 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) 5104 return 1; 5105 return 0; 5106 } 5107 5108 /** 5109 * ipr_wait_for_ops - Wait for matching commands to complete 5110 * @ipr_cmd: ipr command struct 5111 * @device: device to match (sdev) 5112 * @match: match function to use 5113 * 5114 * Returns: 5115 * SUCCESS / FAILED 5116 **/ 5117 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, 5118 int (*match)(struct ipr_cmnd *, void *)) 5119 { 5120 struct ipr_cmnd *ipr_cmd; 5121 int wait, i; 5122 unsigned long flags; 5123 struct ipr_hrr_queue *hrrq; 5124 signed long timeout = IPR_ABORT_TASK_TIMEOUT; 5125 DECLARE_COMPLETION_ONSTACK(comp); 5126 5127 ENTER; 5128 do { 5129 wait = 0; 5130 5131 for_each_hrrq(hrrq, ioa_cfg) { 5132 spin_lock_irqsave(hrrq->lock, flags); 5133 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5134 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5135 if (!ipr_cmnd_is_free(ipr_cmd)) { 5136 if (match(ipr_cmd, device)) { 5137 ipr_cmd->eh_comp = ∁ 5138 wait++; 5139 } 5140 } 5141 } 5142 spin_unlock_irqrestore(hrrq->lock, flags); 5143 } 5144 5145 if (wait) { 5146 timeout = wait_for_completion_timeout(&comp, timeout); 5147 5148 if (!timeout) { 5149 wait = 0; 5150 5151 for_each_hrrq(hrrq, ioa_cfg) { 5152 spin_lock_irqsave(hrrq->lock, flags); 5153 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5154 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5155 if (!ipr_cmnd_is_free(ipr_cmd)) { 5156 if (match(ipr_cmd, device)) { 5157 ipr_cmd->eh_comp = NULL; 5158 wait++; 5159 } 5160 } 5161 } 5162 spin_unlock_irqrestore(hrrq->lock, flags); 5163 } 5164 5165 if (wait) 5166 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); 5167 LEAVE; 5168 return wait ? FAILED : SUCCESS; 5169 } 5170 } 5171 } while (wait); 5172 5173 LEAVE; 5174 return SUCCESS; 5175 } 5176 5177 static int ipr_eh_host_reset(struct scsi_cmnd *cmd) 5178 { 5179 struct ipr_ioa_cfg *ioa_cfg; 5180 unsigned long lock_flags = 0; 5181 int rc = SUCCESS; 5182 5183 ENTER; 5184 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5186 5187 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 5188 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 5189 dev_err(&ioa_cfg->pdev->dev, 5190 "Adapter being reset as a result of error recovery.\n"); 5191 5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5193 ioa_cfg->sdt_state = GET_DUMP; 5194 } 5195 5196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5197 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5199 5200 /* If we got hit with a host reset while we were already resetting 5201 the adapter for some reason, and the reset failed. */ 5202 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 5203 ipr_trace; 5204 rc = FAILED; 5205 } 5206 5207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5208 LEAVE; 5209 return rc; 5210 } 5211 5212 /** 5213 * ipr_device_reset - Reset the device 5214 * @ioa_cfg: ioa config struct 5215 * @res: resource entry struct 5216 * 5217 * This function issues a device reset to the affected device. 5218 * If the device is a SCSI device, a LUN reset will be sent 5219 * to the device first. If that does not work, a target reset 5220 * will be sent. If the device is a SATA device, a PHY reset will 5221 * be sent. 5222 * 5223 * Return value: 5224 * 0 on success / non-zero on failure 5225 **/ 5226 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, 5227 struct ipr_resource_entry *res) 5228 { 5229 struct ipr_cmnd *ipr_cmd; 5230 struct ipr_ioarcb *ioarcb; 5231 struct ipr_cmd_pkt *cmd_pkt; 5232 struct ipr_ioarcb_ata_regs *regs; 5233 u32 ioasc; 5234 5235 ENTER; 5236 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5237 ioarcb = &ipr_cmd->ioarcb; 5238 cmd_pkt = &ioarcb->cmd_pkt; 5239 5240 if (ipr_cmd->ioa_cfg->sis64) { 5241 regs = &ipr_cmd->i.ata_ioadl.regs; 5242 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 5243 } else 5244 regs = &ioarcb->u.add_data.u.regs; 5245 5246 ioarcb->res_handle = res->res_handle; 5247 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5248 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5249 if (ipr_is_gata(res)) { 5250 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 5251 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 5252 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5253 } 5254 5255 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5256 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5257 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5258 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 5259 if (ipr_cmd->ioa_cfg->sis64) 5260 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 5261 sizeof(struct ipr_ioasa_gata)); 5262 else 5263 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 5264 sizeof(struct ipr_ioasa_gata)); 5265 } 5266 5267 LEAVE; 5268 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; 5269 } 5270 5271 /** 5272 * ipr_sata_reset - Reset the SATA port 5273 * @link: SATA link to reset 5274 * @classes: class of the attached device 5275 * 5276 * This function issues a SATA phy reset to the affected ATA link. 5277 * 5278 * Return value: 5279 * 0 on success / non-zero on failure 5280 **/ 5281 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, 5282 unsigned long deadline) 5283 { 5284 struct ipr_sata_port *sata_port = link->ap->private_data; 5285 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5286 struct ipr_resource_entry *res; 5287 unsigned long lock_flags = 0; 5288 int rc = -ENXIO, ret; 5289 5290 ENTER; 5291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5292 while (ioa_cfg->in_reset_reload) { 5293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5294 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5296 } 5297 5298 res = sata_port->res; 5299 if (res) { 5300 rc = ipr_device_reset(ioa_cfg, res); 5301 *classes = res->ata_class; 5302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5303 5304 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); 5305 if (ret != SUCCESS) { 5306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5307 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 5308 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5309 5310 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5311 } 5312 } else 5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5314 5315 LEAVE; 5316 return rc; 5317 } 5318 5319 /** 5320 * ipr_eh_dev_reset - Reset the device 5321 * @scsi_cmd: scsi command struct 5322 * 5323 * This function issues a device reset to the affected device. 5324 * A LUN reset will be sent to the device first. If that does 5325 * not work, a target reset will be sent. 5326 * 5327 * Return value: 5328 * SUCCESS / FAILED 5329 **/ 5330 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) 5331 { 5332 struct ipr_cmnd *ipr_cmd; 5333 struct ipr_ioa_cfg *ioa_cfg; 5334 struct ipr_resource_entry *res; 5335 struct ata_port *ap; 5336 int rc = 0, i; 5337 struct ipr_hrr_queue *hrrq; 5338 5339 ENTER; 5340 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5341 res = scsi_cmd->device->hostdata; 5342 5343 /* 5344 * If we are currently going through reset/reload, return failed. This will force the 5345 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the 5346 * reset to complete 5347 */ 5348 if (ioa_cfg->in_reset_reload) 5349 return FAILED; 5350 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5351 return FAILED; 5352 5353 for_each_hrrq(hrrq, ioa_cfg) { 5354 spin_lock(&hrrq->_lock); 5355 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5356 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; 5357 5358 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 5359 if (!ipr_cmd->qc) 5360 continue; 5361 if (ipr_cmnd_is_free(ipr_cmd)) 5362 continue; 5363 5364 ipr_cmd->done = ipr_sata_eh_done; 5365 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 5366 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 5367 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 5368 } 5369 } 5370 } 5371 spin_unlock(&hrrq->_lock); 5372 } 5373 res->resetting_device = 1; 5374 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 5375 5376 if (ipr_is_gata(res) && res->sata_port) { 5377 ap = res->sata_port->ap; 5378 spin_unlock_irq(scsi_cmd->device->host->host_lock); 5379 ata_std_error_handler(ap); 5380 spin_lock_irq(scsi_cmd->device->host->host_lock); 5381 } else 5382 rc = ipr_device_reset(ioa_cfg, res); 5383 res->resetting_device = 0; 5384 res->reset_occurred = 1; 5385 5386 LEAVE; 5387 return rc ? FAILED : SUCCESS; 5388 } 5389 5390 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) 5391 { 5392 int rc; 5393 struct ipr_ioa_cfg *ioa_cfg; 5394 struct ipr_resource_entry *res; 5395 5396 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5397 res = cmd->device->hostdata; 5398 5399 if (!res) 5400 return FAILED; 5401 5402 spin_lock_irq(cmd->device->host->host_lock); 5403 rc = __ipr_eh_dev_reset(cmd); 5404 spin_unlock_irq(cmd->device->host->host_lock); 5405 5406 if (rc == SUCCESS) { 5407 if (ipr_is_gata(res) && res->sata_port) 5408 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); 5409 else 5410 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); 5411 } 5412 5413 return rc; 5414 } 5415 5416 /** 5417 * ipr_bus_reset_done - Op done function for bus reset. 5418 * @ipr_cmd: ipr command struct 5419 * 5420 * This function is the op done function for a bus reset 5421 * 5422 * Return value: 5423 * none 5424 **/ 5425 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) 5426 { 5427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5428 struct ipr_resource_entry *res; 5429 5430 ENTER; 5431 if (!ioa_cfg->sis64) 5432 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 5433 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 5434 scsi_report_bus_reset(ioa_cfg->host, res->bus); 5435 break; 5436 } 5437 } 5438 5439 /* 5440 * If abort has not completed, indicate the reset has, else call the 5441 * abort's done function to wake the sleeping eh thread 5442 */ 5443 if (ipr_cmd->sibling->sibling) 5444 ipr_cmd->sibling->sibling = NULL; 5445 else 5446 ipr_cmd->sibling->done(ipr_cmd->sibling); 5447 5448 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5449 LEAVE; 5450 } 5451 5452 /** 5453 * ipr_abort_timeout - An abort task has timed out 5454 * @ipr_cmd: ipr command struct 5455 * 5456 * This function handles when an abort task times out. If this 5457 * happens we issue a bus reset since we have resources tied 5458 * up that must be freed before returning to the midlayer. 5459 * 5460 * Return value: 5461 * none 5462 **/ 5463 static void ipr_abort_timeout(struct timer_list *t) 5464 { 5465 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 5466 struct ipr_cmnd *reset_cmd; 5467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5468 struct ipr_cmd_pkt *cmd_pkt; 5469 unsigned long lock_flags = 0; 5470 5471 ENTER; 5472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5473 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { 5474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5475 return; 5476 } 5477 5478 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); 5479 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5480 ipr_cmd->sibling = reset_cmd; 5481 reset_cmd->sibling = ipr_cmd; 5482 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; 5483 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; 5484 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5485 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5486 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; 5487 5488 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5490 LEAVE; 5491 } 5492 5493 /** 5494 * ipr_cancel_op - Cancel specified op 5495 * @scsi_cmd: scsi command struct 5496 * 5497 * This function cancels specified op. 5498 * 5499 * Return value: 5500 * SUCCESS / FAILED 5501 **/ 5502 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) 5503 { 5504 struct ipr_cmnd *ipr_cmd; 5505 struct ipr_ioa_cfg *ioa_cfg; 5506 struct ipr_resource_entry *res; 5507 struct ipr_cmd_pkt *cmd_pkt; 5508 u32 ioasc, int_reg; 5509 int i, op_found = 0; 5510 struct ipr_hrr_queue *hrrq; 5511 5512 ENTER; 5513 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5514 res = scsi_cmd->device->hostdata; 5515 5516 /* If we are currently going through reset/reload, return failed. 5517 * This will force the mid-layer to call ipr_eh_host_reset, 5518 * which will then go to sleep and wait for the reset to complete 5519 */ 5520 if (ioa_cfg->in_reset_reload || 5521 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5522 return FAILED; 5523 if (!res) 5524 return FAILED; 5525 5526 /* 5527 * If we are aborting a timed out op, chances are that the timeout was caused 5528 * by a still not detected EEH error. In such cases, reading a register will 5529 * trigger the EEH recovery infrastructure. 5530 */ 5531 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5532 5533 if (!ipr_is_gscsi(res)) 5534 return FAILED; 5535 5536 for_each_hrrq(hrrq, ioa_cfg) { 5537 spin_lock(&hrrq->_lock); 5538 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { 5539 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { 5540 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { 5541 op_found = 1; 5542 break; 5543 } 5544 } 5545 } 5546 spin_unlock(&hrrq->_lock); 5547 } 5548 5549 if (!op_found) 5550 return SUCCESS; 5551 5552 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5553 ipr_cmd->ioarcb.res_handle = res->res_handle; 5554 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5555 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5556 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5557 ipr_cmd->u.sdev = scsi_cmd->device; 5558 5559 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 5560 scsi_cmd->cmnd[0]); 5561 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 5562 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5563 5564 /* 5565 * If the abort task timed out and we sent a bus reset, we will get 5566 * one the following responses to the abort 5567 */ 5568 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { 5569 ioasc = 0; 5570 ipr_trace; 5571 } 5572 5573 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5574 if (!ipr_is_naca_model(res)) 5575 res->needs_sync_complete = 1; 5576 5577 LEAVE; 5578 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; 5579 } 5580 5581 /** 5582 * ipr_eh_abort - Abort a single op 5583 * @scsi_cmd: scsi command struct 5584 * 5585 * Return value: 5586 * 0 if scan in progress / 1 if scan is complete 5587 **/ 5588 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) 5589 { 5590 unsigned long lock_flags; 5591 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 5592 int rc = 0; 5593 5594 spin_lock_irqsave(shost->host_lock, lock_flags); 5595 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) 5596 rc = 1; 5597 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) 5598 rc = 1; 5599 spin_unlock_irqrestore(shost->host_lock, lock_flags); 5600 return rc; 5601 } 5602 5603 /** 5604 * ipr_eh_host_reset - Reset the host adapter 5605 * @scsi_cmd: scsi command struct 5606 * 5607 * Return value: 5608 * SUCCESS / FAILED 5609 **/ 5610 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5611 { 5612 unsigned long flags; 5613 int rc; 5614 struct ipr_ioa_cfg *ioa_cfg; 5615 5616 ENTER; 5617 5618 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5619 5620 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 5621 rc = ipr_cancel_op(scsi_cmd); 5622 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 5623 5624 if (rc == SUCCESS) 5625 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); 5626 LEAVE; 5627 return rc; 5628 } 5629 5630 /** 5631 * ipr_handle_other_interrupt - Handle "other" interrupts 5632 * @ioa_cfg: ioa config struct 5633 * @int_reg: interrupt register 5634 * 5635 * Return value: 5636 * IRQ_NONE / IRQ_HANDLED 5637 **/ 5638 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5639 u32 int_reg) 5640 { 5641 irqreturn_t rc = IRQ_HANDLED; 5642 u32 int_mask_reg; 5643 5644 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5645 int_reg &= ~int_mask_reg; 5646 5647 /* If an interrupt on the adapter did not occur, ignore it. 5648 * Or in the case of SIS 64, check for a stage change interrupt. 5649 */ 5650 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { 5651 if (ioa_cfg->sis64) { 5652 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 5653 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5654 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 5655 5656 /* clear stage change */ 5657 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5658 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5659 list_del(&ioa_cfg->reset_cmd->queue); 5660 del_timer(&ioa_cfg->reset_cmd->timer); 5661 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5662 return IRQ_HANDLED; 5663 } 5664 } 5665 5666 return IRQ_NONE; 5667 } 5668 5669 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5670 /* Mask the interrupt */ 5671 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); 5672 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5673 5674 list_del(&ioa_cfg->reset_cmd->queue); 5675 del_timer(&ioa_cfg->reset_cmd->timer); 5676 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5677 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5678 if (ioa_cfg->clear_isr) { 5679 if (ipr_debug && printk_ratelimit()) 5680 dev_err(&ioa_cfg->pdev->dev, 5681 "Spurious interrupt detected. 0x%08X\n", int_reg); 5682 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5683 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5684 return IRQ_NONE; 5685 } 5686 } else { 5687 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5688 ioa_cfg->ioa_unit_checked = 1; 5689 else if (int_reg & IPR_PCII_NO_HOST_RRQ) 5690 dev_err(&ioa_cfg->pdev->dev, 5691 "No Host RRQ. 0x%08X\n", int_reg); 5692 else 5693 dev_err(&ioa_cfg->pdev->dev, 5694 "Permanent IOA failure. 0x%08X\n", int_reg); 5695 5696 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5697 ioa_cfg->sdt_state = GET_DUMP; 5698 5699 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5700 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5701 } 5702 5703 return rc; 5704 } 5705 5706 /** 5707 * ipr_isr_eh - Interrupt service routine error handler 5708 * @ioa_cfg: ioa config struct 5709 * @msg: message to log 5710 * 5711 * Return value: 5712 * none 5713 **/ 5714 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) 5715 { 5716 ioa_cfg->errors_logged++; 5717 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); 5718 5719 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5720 ioa_cfg->sdt_state = GET_DUMP; 5721 5722 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5723 } 5724 5725 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, 5726 struct list_head *doneq) 5727 { 5728 u32 ioasc; 5729 u16 cmd_index; 5730 struct ipr_cmnd *ipr_cmd; 5731 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; 5732 int num_hrrq = 0; 5733 5734 /* If interrupts are disabled, ignore the interrupt */ 5735 if (!hrr_queue->allow_interrupts) 5736 return 0; 5737 5738 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5739 hrr_queue->toggle_bit) { 5740 5741 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & 5742 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> 5743 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5744 5745 if (unlikely(cmd_index > hrr_queue->max_cmd_id || 5746 cmd_index < hrr_queue->min_cmd_id)) { 5747 ipr_isr_eh(ioa_cfg, 5748 "Invalid response handle from IOA: ", 5749 cmd_index); 5750 break; 5751 } 5752 5753 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5754 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5755 5756 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5757 5758 list_move_tail(&ipr_cmd->queue, doneq); 5759 5760 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { 5761 hrr_queue->hrrq_curr++; 5762 } else { 5763 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; 5764 hrr_queue->toggle_bit ^= 1u; 5765 } 5766 num_hrrq++; 5767 if (budget > 0 && num_hrrq >= budget) 5768 break; 5769 } 5770 5771 return num_hrrq; 5772 } 5773 5774 static int ipr_iopoll(struct irq_poll *iop, int budget) 5775 { 5776 struct ipr_ioa_cfg *ioa_cfg; 5777 struct ipr_hrr_queue *hrrq; 5778 struct ipr_cmnd *ipr_cmd, *temp; 5779 unsigned long hrrq_flags; 5780 int completed_ops; 5781 LIST_HEAD(doneq); 5782 5783 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); 5784 ioa_cfg = hrrq->ioa_cfg; 5785 5786 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5787 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); 5788 5789 if (completed_ops < budget) 5790 irq_poll_complete(iop); 5791 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5792 5793 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5794 list_del(&ipr_cmd->queue); 5795 del_timer(&ipr_cmd->timer); 5796 ipr_cmd->fast_done(ipr_cmd); 5797 } 5798 5799 return completed_ops; 5800 } 5801 5802 /** 5803 * ipr_isr - Interrupt service routine 5804 * @irq: irq number 5805 * @devp: pointer to ioa config struct 5806 * 5807 * Return value: 5808 * IRQ_NONE / IRQ_HANDLED 5809 **/ 5810 static irqreturn_t ipr_isr(int irq, void *devp) 5811 { 5812 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5813 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5814 unsigned long hrrq_flags = 0; 5815 u32 int_reg = 0; 5816 int num_hrrq = 0; 5817 int irq_none = 0; 5818 struct ipr_cmnd *ipr_cmd, *temp; 5819 irqreturn_t rc = IRQ_NONE; 5820 LIST_HEAD(doneq); 5821 5822 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5823 /* If interrupts are disabled, ignore the interrupt */ 5824 if (!hrrq->allow_interrupts) { 5825 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5826 return IRQ_NONE; 5827 } 5828 5829 while (1) { 5830 if (ipr_process_hrrq(hrrq, -1, &doneq)) { 5831 rc = IRQ_HANDLED; 5832 5833 if (!ioa_cfg->clear_isr) 5834 break; 5835 5836 /* Clear the PCI interrupt */ 5837 num_hrrq = 0; 5838 do { 5839 writel(IPR_PCII_HRRQ_UPDATED, 5840 ioa_cfg->regs.clr_interrupt_reg32); 5841 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5842 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5843 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5844 5845 } else if (rc == IRQ_NONE && irq_none == 0) { 5846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5847 irq_none++; 5848 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5849 int_reg & IPR_PCII_HRRQ_UPDATED) { 5850 ipr_isr_eh(ioa_cfg, 5851 "Error clearing HRRQ: ", num_hrrq); 5852 rc = IRQ_HANDLED; 5853 break; 5854 } else 5855 break; 5856 } 5857 5858 if (unlikely(rc == IRQ_NONE)) 5859 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5860 5861 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5862 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5863 list_del(&ipr_cmd->queue); 5864 del_timer(&ipr_cmd->timer); 5865 ipr_cmd->fast_done(ipr_cmd); 5866 } 5867 return rc; 5868 } 5869 5870 /** 5871 * ipr_isr_mhrrq - Interrupt service routine 5872 * @irq: irq number 5873 * @devp: pointer to ioa config struct 5874 * 5875 * Return value: 5876 * IRQ_NONE / IRQ_HANDLED 5877 **/ 5878 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5879 { 5880 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5881 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5882 unsigned long hrrq_flags = 0; 5883 struct ipr_cmnd *ipr_cmd, *temp; 5884 irqreturn_t rc = IRQ_NONE; 5885 LIST_HEAD(doneq); 5886 5887 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5888 5889 /* If interrupts are disabled, ignore the interrupt */ 5890 if (!hrrq->allow_interrupts) { 5891 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5892 return IRQ_NONE; 5893 } 5894 5895 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 5896 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5897 hrrq->toggle_bit) { 5898 irq_poll_sched(&hrrq->iopoll); 5899 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5900 return IRQ_HANDLED; 5901 } 5902 } else { 5903 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5904 hrrq->toggle_bit) 5905 5906 if (ipr_process_hrrq(hrrq, -1, &doneq)) 5907 rc = IRQ_HANDLED; 5908 } 5909 5910 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5911 5912 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5913 list_del(&ipr_cmd->queue); 5914 del_timer(&ipr_cmd->timer); 5915 ipr_cmd->fast_done(ipr_cmd); 5916 } 5917 return rc; 5918 } 5919 5920 /** 5921 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 5922 * @ioa_cfg: ioa config struct 5923 * @ipr_cmd: ipr command struct 5924 * 5925 * Return value: 5926 * 0 on success / -1 on failure 5927 **/ 5928 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 5929 struct ipr_cmnd *ipr_cmd) 5930 { 5931 int i, nseg; 5932 struct scatterlist *sg; 5933 u32 length; 5934 u32 ioadl_flags = 0; 5935 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5936 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5937 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5938 5939 length = scsi_bufflen(scsi_cmd); 5940 if (!length) 5941 return 0; 5942 5943 nseg = scsi_dma_map(scsi_cmd); 5944 if (nseg < 0) { 5945 if (printk_ratelimit()) 5946 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5947 return -1; 5948 } 5949 5950 ipr_cmd->dma_use_sg = nseg; 5951 5952 ioarcb->data_transfer_length = cpu_to_be32(length); 5953 ioarcb->ioadl_len = 5954 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5955 5956 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5957 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5958 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5959 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 5960 ioadl_flags = IPR_IOADL_FLAGS_READ; 5961 5962 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5963 ioadl64[i].flags = cpu_to_be32(ioadl_flags); 5964 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 5965 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 5966 } 5967 5968 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5969 return 0; 5970 } 5971 5972 /** 5973 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5974 * @ioa_cfg: ioa config struct 5975 * @ipr_cmd: ipr command struct 5976 * 5977 * Return value: 5978 * 0 on success / -1 on failure 5979 **/ 5980 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 5981 struct ipr_cmnd *ipr_cmd) 5982 { 5983 int i, nseg; 5984 struct scatterlist *sg; 5985 u32 length; 5986 u32 ioadl_flags = 0; 5987 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5988 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5989 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5990 5991 length = scsi_bufflen(scsi_cmd); 5992 if (!length) 5993 return 0; 5994 5995 nseg = scsi_dma_map(scsi_cmd); 5996 if (nseg < 0) { 5997 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5998 return -1; 5999 } 6000 6001 ipr_cmd->dma_use_sg = nseg; 6002 6003 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 6004 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6006 ioarcb->data_transfer_length = cpu_to_be32(length); 6007 ioarcb->ioadl_len = 6008 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6009 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 6010 ioadl_flags = IPR_IOADL_FLAGS_READ; 6011 ioarcb->read_data_transfer_length = cpu_to_be32(length); 6012 ioarcb->read_ioadl_len = 6013 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6014 } 6015 6016 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 6017 ioadl = ioarcb->u.add_data.u.ioadl; 6018 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 6019 offsetof(struct ipr_ioarcb, u.add_data)); 6020 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 6021 } 6022 6023 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 6024 ioadl[i].flags_and_data_len = 6025 cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6026 ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); 6027 } 6028 6029 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6030 return 0; 6031 } 6032 6033 /** 6034 * __ipr_erp_done - Process completion of ERP for a device 6035 * @ipr_cmd: ipr command struct 6036 * 6037 * This function copies the sense buffer into the scsi_cmd 6038 * struct and pushes the scsi_done function. 6039 * 6040 * Return value: 6041 * nothing 6042 **/ 6043 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd) 6044 { 6045 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6046 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6047 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6048 6049 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 6050 scsi_cmd->result |= (DID_ERROR << 16); 6051 scmd_printk(KERN_ERR, scsi_cmd, 6052 "Request Sense failed with IOASC: 0x%08X\n", ioasc); 6053 } else { 6054 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, 6055 SCSI_SENSE_BUFFERSIZE); 6056 } 6057 6058 if (res) { 6059 if (!ipr_is_naca_model(res)) 6060 res->needs_sync_complete = 1; 6061 res->in_erp = 0; 6062 } 6063 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6064 scsi_cmd->scsi_done(scsi_cmd); 6065 if (ipr_cmd->eh_comp) 6066 complete(ipr_cmd->eh_comp); 6067 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6068 } 6069 6070 /** 6071 * ipr_erp_done - Process completion of ERP for a device 6072 * @ipr_cmd: ipr command struct 6073 * 6074 * This function copies the sense buffer into the scsi_cmd 6075 * struct and pushes the scsi_done function. 6076 * 6077 * Return value: 6078 * nothing 6079 **/ 6080 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) 6081 { 6082 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 6083 unsigned long hrrq_flags; 6084 6085 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 6086 __ipr_erp_done(ipr_cmd); 6087 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 6088 } 6089 6090 /** 6091 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP 6092 * @ipr_cmd: ipr command struct 6093 * 6094 * Return value: 6095 * none 6096 **/ 6097 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 6098 { 6099 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6100 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6101 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6102 6103 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 6104 ioarcb->data_transfer_length = 0; 6105 ioarcb->read_data_transfer_length = 0; 6106 ioarcb->ioadl_len = 0; 6107 ioarcb->read_ioadl_len = 0; 6108 ioasa->hdr.ioasc = 0; 6109 ioasa->hdr.residual_data_len = 0; 6110 6111 if (ipr_cmd->ioa_cfg->sis64) 6112 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6113 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 6114 else { 6115 ioarcb->write_ioadl_addr = 6116 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 6117 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 6118 } 6119 } 6120 6121 /** 6122 * __ipr_erp_request_sense - Send request sense to a device 6123 * @ipr_cmd: ipr command struct 6124 * 6125 * This function sends a request sense to a device as a result 6126 * of a check condition. 6127 * 6128 * Return value: 6129 * nothing 6130 **/ 6131 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 6132 { 6133 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 6134 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6135 6136 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 6137 __ipr_erp_done(ipr_cmd); 6138 return; 6139 } 6140 6141 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 6142 6143 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; 6144 cmd_pkt->cdb[0] = REQUEST_SENSE; 6145 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; 6146 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; 6147 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6148 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 6149 6150 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 6151 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 6152 6153 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 6154 IPR_REQUEST_SENSE_TIMEOUT * 2); 6155 } 6156 6157 /** 6158 * ipr_erp_request_sense - Send request sense to a device 6159 * @ipr_cmd: ipr command struct 6160 * 6161 * This function sends a request sense to a device as a result 6162 * of a check condition. 6163 * 6164 * Return value: 6165 * nothing 6166 **/ 6167 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 6168 { 6169 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; 6170 unsigned long hrrq_flags; 6171 6172 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); 6173 __ipr_erp_request_sense(ipr_cmd); 6174 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); 6175 } 6176 6177 /** 6178 * ipr_erp_cancel_all - Send cancel all to a device 6179 * @ipr_cmd: ipr command struct 6180 * 6181 * This function sends a cancel all to a device to clear the 6182 * queue. If we are running TCQ on the device, QERR is set to 1, 6183 * which means all outstanding ops have been dropped on the floor. 6184 * Cancel all will return them to us. 6185 * 6186 * Return value: 6187 * nothing 6188 **/ 6189 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) 6190 { 6191 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6192 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6193 struct ipr_cmd_pkt *cmd_pkt; 6194 6195 res->in_erp = 1; 6196 6197 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 6198 6199 if (!scsi_cmd->device->simple_tags) { 6200 __ipr_erp_request_sense(ipr_cmd); 6201 return; 6202 } 6203 6204 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 6205 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 6206 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 6207 6208 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, 6209 IPR_CANCEL_ALL_TIMEOUT); 6210 } 6211 6212 /** 6213 * ipr_dump_ioasa - Dump contents of IOASA 6214 * @ioa_cfg: ioa config struct 6215 * @ipr_cmd: ipr command struct 6216 * @res: resource entry struct 6217 * 6218 * This function is invoked by the interrupt handler when ops 6219 * fail. It will log the IOASA if appropriate. Only called 6220 * for GPDD ops. 6221 * 6222 * Return value: 6223 * none 6224 **/ 6225 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, 6226 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) 6227 { 6228 int i; 6229 u16 data_len; 6230 u32 ioasc, fd_ioasc; 6231 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6232 __be32 *ioasa_data = (__be32 *)ioasa; 6233 int error_index; 6234 6235 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; 6236 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; 6237 6238 if (0 == ioasc) 6239 return; 6240 6241 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 6242 return; 6243 6244 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) 6245 error_index = ipr_get_error(fd_ioasc); 6246 else 6247 error_index = ipr_get_error(ioasc); 6248 6249 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 6250 /* Don't log an error if the IOA already logged one */ 6251 if (ioasa->hdr.ilid != 0) 6252 return; 6253 6254 if (!ipr_is_gscsi(res)) 6255 return; 6256 6257 if (ipr_error_table[error_index].log_ioasa == 0) 6258 return; 6259 } 6260 6261 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 6262 6263 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); 6264 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) 6265 data_len = sizeof(struct ipr_ioasa64); 6266 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) 6267 data_len = sizeof(struct ipr_ioasa); 6268 6269 ipr_err("IOASA Dump:\n"); 6270 6271 for (i = 0; i < data_len / 4; i += 4) { 6272 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 6273 be32_to_cpu(ioasa_data[i]), 6274 be32_to_cpu(ioasa_data[i+1]), 6275 be32_to_cpu(ioasa_data[i+2]), 6276 be32_to_cpu(ioasa_data[i+3])); 6277 } 6278 } 6279 6280 /** 6281 * ipr_gen_sense - Generate SCSI sense data from an IOASA 6282 * @ioasa: IOASA 6283 * @sense_buf: sense data buffer 6284 * 6285 * Return value: 6286 * none 6287 **/ 6288 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) 6289 { 6290 u32 failing_lba; 6291 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 6292 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 6293 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6294 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); 6295 6296 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 6297 6298 if (ioasc >= IPR_FIRST_DRIVER_IOASC) 6299 return; 6300 6301 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; 6302 6303 if (ipr_is_vset_device(res) && 6304 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && 6305 ioasa->u.vset.failing_lba_hi != 0) { 6306 sense_buf[0] = 0x72; 6307 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); 6308 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); 6309 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); 6310 6311 sense_buf[7] = 12; 6312 sense_buf[8] = 0; 6313 sense_buf[9] = 0x0A; 6314 sense_buf[10] = 0x80; 6315 6316 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); 6317 6318 sense_buf[12] = (failing_lba & 0xff000000) >> 24; 6319 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; 6320 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; 6321 sense_buf[15] = failing_lba & 0x000000ff; 6322 6323 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6324 6325 sense_buf[16] = (failing_lba & 0xff000000) >> 24; 6326 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; 6327 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; 6328 sense_buf[19] = failing_lba & 0x000000ff; 6329 } else { 6330 sense_buf[0] = 0x70; 6331 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); 6332 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); 6333 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); 6334 6335 /* Illegal request */ 6336 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 6337 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 6338 sense_buf[7] = 10; /* additional length */ 6339 6340 /* IOARCB was in error */ 6341 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) 6342 sense_buf[15] = 0xC0; 6343 else /* Parameter data was invalid */ 6344 sense_buf[15] = 0x80; 6345 6346 sense_buf[16] = 6347 ((IPR_FIELD_POINTER_MASK & 6348 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; 6349 sense_buf[17] = 6350 (IPR_FIELD_POINTER_MASK & 6351 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; 6352 } else { 6353 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 6354 if (ipr_is_vset_device(res)) 6355 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6356 else 6357 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); 6358 6359 sense_buf[0] |= 0x80; /* Or in the Valid bit */ 6360 sense_buf[3] = (failing_lba & 0xff000000) >> 24; 6361 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; 6362 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; 6363 sense_buf[6] = failing_lba & 0x000000ff; 6364 } 6365 6366 sense_buf[7] = 6; /* additional length */ 6367 } 6368 } 6369 } 6370 6371 /** 6372 * ipr_get_autosense - Copy autosense data to sense buffer 6373 * @ipr_cmd: ipr command struct 6374 * 6375 * This function copies the autosense buffer to the buffer 6376 * in the scsi_cmd, if there is autosense available. 6377 * 6378 * Return value: 6379 * 1 if autosense was available / 0 if not 6380 **/ 6381 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 6382 { 6383 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6384 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 6385 6386 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 6387 return 0; 6388 6389 if (ipr_cmd->ioa_cfg->sis64) 6390 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, 6391 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), 6392 SCSI_SENSE_BUFFERSIZE)); 6393 else 6394 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 6395 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 6396 SCSI_SENSE_BUFFERSIZE)); 6397 return 1; 6398 } 6399 6400 /** 6401 * ipr_erp_start - Process an error response for a SCSI op 6402 * @ioa_cfg: ioa config struct 6403 * @ipr_cmd: ipr command struct 6404 * 6405 * This function determines whether or not to initiate ERP 6406 * on the affected device. 6407 * 6408 * Return value: 6409 * nothing 6410 **/ 6411 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, 6412 struct ipr_cmnd *ipr_cmd) 6413 { 6414 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6415 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6416 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6417 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 6418 6419 if (!res) { 6420 __ipr_scsi_eh_done(ipr_cmd); 6421 return; 6422 } 6423 6424 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) 6425 ipr_gen_sense(ipr_cmd); 6426 6427 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6428 6429 switch (masked_ioasc) { 6430 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 6431 if (ipr_is_naca_model(res)) 6432 scsi_cmd->result |= (DID_ABORT << 16); 6433 else 6434 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6435 break; 6436 case IPR_IOASC_IR_RESOURCE_HANDLE: 6437 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: 6438 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6439 break; 6440 case IPR_IOASC_HW_SEL_TIMEOUT: 6441 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6442 if (!ipr_is_naca_model(res)) 6443 res->needs_sync_complete = 1; 6444 break; 6445 case IPR_IOASC_SYNC_REQUIRED: 6446 if (!res->in_erp) 6447 res->needs_sync_complete = 1; 6448 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6449 break; 6450 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6451 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6452 /* 6453 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION 6454 * so SCSI mid-layer and upper layers handle it accordingly. 6455 */ 6456 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) 6457 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6458 break; 6459 case IPR_IOASC_BUS_WAS_RESET: 6460 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6461 /* 6462 * Report the bus reset and ask for a retry. The device 6463 * will give CC/UA the next command. 6464 */ 6465 if (!res->resetting_device) 6466 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); 6467 scsi_cmd->result |= (DID_ERROR << 16); 6468 if (!ipr_is_naca_model(res)) 6469 res->needs_sync_complete = 1; 6470 break; 6471 case IPR_IOASC_HW_DEV_BUS_STATUS: 6472 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); 6473 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { 6474 if (!ipr_get_autosense(ipr_cmd)) { 6475 if (!ipr_is_naca_model(res)) { 6476 ipr_erp_cancel_all(ipr_cmd); 6477 return; 6478 } 6479 } 6480 } 6481 if (!ipr_is_naca_model(res)) 6482 res->needs_sync_complete = 1; 6483 break; 6484 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 6485 break; 6486 case IPR_IOASC_IR_NON_OPTIMIZED: 6487 if (res->raw_mode) { 6488 res->raw_mode = 0; 6489 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6490 } else 6491 scsi_cmd->result |= (DID_ERROR << 16); 6492 break; 6493 default: 6494 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6495 scsi_cmd->result |= (DID_ERROR << 16); 6496 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 6497 res->needs_sync_complete = 1; 6498 break; 6499 } 6500 6501 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6502 scsi_cmd->scsi_done(scsi_cmd); 6503 if (ipr_cmd->eh_comp) 6504 complete(ipr_cmd->eh_comp); 6505 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6506 } 6507 6508 /** 6509 * ipr_scsi_done - mid-layer done function 6510 * @ipr_cmd: ipr command struct 6511 * 6512 * This function is invoked by the interrupt handler for 6513 * ops generated by the SCSI mid-layer 6514 * 6515 * Return value: 6516 * none 6517 **/ 6518 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) 6519 { 6520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6521 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6522 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6523 unsigned long lock_flags; 6524 6525 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6526 6527 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6528 scsi_dma_unmap(scsi_cmd); 6529 6530 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); 6531 scsi_cmd->scsi_done(scsi_cmd); 6532 if (ipr_cmd->eh_comp) 6533 complete(ipr_cmd->eh_comp); 6534 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6535 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); 6536 } else { 6537 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6538 spin_lock(&ipr_cmd->hrrq->_lock); 6539 ipr_erp_start(ioa_cfg, ipr_cmd); 6540 spin_unlock(&ipr_cmd->hrrq->_lock); 6541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6542 } 6543 } 6544 6545 /** 6546 * ipr_queuecommand - Queue a mid-layer request 6547 * @shost: scsi host struct 6548 * @scsi_cmd: scsi command struct 6549 * 6550 * This function queues a request generated by the mid-layer. 6551 * 6552 * Return value: 6553 * 0 on success 6554 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 6555 * SCSI_MLQUEUE_HOST_BUSY if host is busy 6556 **/ 6557 static int ipr_queuecommand(struct Scsi_Host *shost, 6558 struct scsi_cmnd *scsi_cmd) 6559 { 6560 struct ipr_ioa_cfg *ioa_cfg; 6561 struct ipr_resource_entry *res; 6562 struct ipr_ioarcb *ioarcb; 6563 struct ipr_cmnd *ipr_cmd; 6564 unsigned long hrrq_flags, lock_flags; 6565 int rc; 6566 struct ipr_hrr_queue *hrrq; 6567 int hrrq_id; 6568 6569 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6570 6571 scsi_cmd->result = (DID_OK << 16); 6572 res = scsi_cmd->device->hostdata; 6573 6574 if (ipr_is_gata(res) && res->sata_port) { 6575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6576 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 6577 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6578 return rc; 6579 } 6580 6581 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6582 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6583 6584 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6585 /* 6586 * We are currently blocking all devices due to a host reset 6587 * We have told the host to stop giving us new requests, but 6588 * ERP ops don't count. FIXME 6589 */ 6590 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { 6591 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6592 return SCSI_MLQUEUE_HOST_BUSY; 6593 } 6594 6595 /* 6596 * FIXME - Create scsi_set_host_offline interface 6597 * and the ioa_is_dead check can be removed 6598 */ 6599 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { 6600 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6601 goto err_nodev; 6602 } 6603 6604 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6605 if (ipr_cmd == NULL) { 6606 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6607 return SCSI_MLQUEUE_HOST_BUSY; 6608 } 6609 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6610 6611 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6612 ioarcb = &ipr_cmd->ioarcb; 6613 6614 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 6615 ipr_cmd->scsi_cmd = scsi_cmd; 6616 ipr_cmd->done = ipr_scsi_eh_done; 6617 6618 if (ipr_is_gscsi(res)) { 6619 if (scsi_cmd->underflow == 0) 6620 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6621 6622 if (res->reset_occurred) { 6623 res->reset_occurred = 0; 6624 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6625 } 6626 } 6627 6628 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 6629 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6630 6631 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6632 if (scsi_cmd->flags & SCMD_TAGGED) 6633 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; 6634 else 6635 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; 6636 } 6637 6638 if (scsi_cmd->cmnd[0] >= 0xC0 && 6639 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6640 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6641 } 6642 if (res->raw_mode && ipr_is_af_dasd_device(res)) { 6643 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; 6644 6645 if (scsi_cmd->underflow == 0) 6646 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6647 } 6648 6649 if (ioa_cfg->sis64) 6650 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6651 else 6652 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6653 6654 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6655 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { 6656 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6657 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6658 if (!rc) 6659 scsi_dma_unmap(scsi_cmd); 6660 return SCSI_MLQUEUE_HOST_BUSY; 6661 } 6662 6663 if (unlikely(hrrq->ioa_is_dead)) { 6664 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6665 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6666 scsi_dma_unmap(scsi_cmd); 6667 goto err_nodev; 6668 } 6669 6670 ioarcb->res_handle = res->res_handle; 6671 if (res->needs_sync_complete) { 6672 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6673 res->needs_sync_complete = 0; 6674 } 6675 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6676 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6677 ipr_send_command(ipr_cmd); 6678 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6679 return 0; 6680 6681 err_nodev: 6682 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6683 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6684 scsi_cmd->result = (DID_NO_CONNECT << 16); 6685 scsi_cmd->scsi_done(scsi_cmd); 6686 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6687 return 0; 6688 } 6689 6690 /** 6691 * ipr_ioctl - IOCTL handler 6692 * @sdev: scsi device struct 6693 * @cmd: IOCTL cmd 6694 * @arg: IOCTL arg 6695 * 6696 * Return value: 6697 * 0 on success / other on failure 6698 **/ 6699 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 6700 { 6701 struct ipr_resource_entry *res; 6702 6703 res = (struct ipr_resource_entry *)sdev->hostdata; 6704 if (res && ipr_is_gata(res)) { 6705 if (cmd == HDIO_GET_IDENTITY) 6706 return -ENOTTY; 6707 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); 6708 } 6709 6710 return -EINVAL; 6711 } 6712 6713 /** 6714 * ipr_info - Get information about the card/driver 6715 * @scsi_host: scsi host struct 6716 * 6717 * Return value: 6718 * pointer to buffer with description string 6719 **/ 6720 static const char *ipr_ioa_info(struct Scsi_Host *host) 6721 { 6722 static char buffer[512]; 6723 struct ipr_ioa_cfg *ioa_cfg; 6724 unsigned long lock_flags = 0; 6725 6726 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; 6727 6728 spin_lock_irqsave(host->host_lock, lock_flags); 6729 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); 6730 spin_unlock_irqrestore(host->host_lock, lock_flags); 6731 6732 return buffer; 6733 } 6734 6735 static struct scsi_host_template driver_template = { 6736 .module = THIS_MODULE, 6737 .name = "IPR", 6738 .info = ipr_ioa_info, 6739 .ioctl = ipr_ioctl, 6740 .queuecommand = ipr_queuecommand, 6741 .eh_abort_handler = ipr_eh_abort, 6742 .eh_device_reset_handler = ipr_eh_dev_reset, 6743 .eh_host_reset_handler = ipr_eh_host_reset, 6744 .slave_alloc = ipr_slave_alloc, 6745 .slave_configure = ipr_slave_configure, 6746 .slave_destroy = ipr_slave_destroy, 6747 .scan_finished = ipr_scan_finished, 6748 .target_alloc = ipr_target_alloc, 6749 .target_destroy = ipr_target_destroy, 6750 .change_queue_depth = ipr_change_queue_depth, 6751 .bios_param = ipr_biosparam, 6752 .can_queue = IPR_MAX_COMMANDS, 6753 .this_id = -1, 6754 .sg_tablesize = IPR_MAX_SGLIST, 6755 .max_sectors = IPR_IOA_MAX_SECTORS, 6756 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6757 .shost_attrs = ipr_ioa_attrs, 6758 .sdev_attrs = ipr_dev_attrs, 6759 .proc_name = IPR_NAME, 6760 }; 6761 6762 /** 6763 * ipr_ata_phy_reset - libata phy_reset handler 6764 * @ap: ata port to reset 6765 * 6766 **/ 6767 static void ipr_ata_phy_reset(struct ata_port *ap) 6768 { 6769 unsigned long flags; 6770 struct ipr_sata_port *sata_port = ap->private_data; 6771 struct ipr_resource_entry *res = sata_port->res; 6772 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6773 int rc; 6774 6775 ENTER; 6776 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6777 while (ioa_cfg->in_reset_reload) { 6778 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6779 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6780 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6781 } 6782 6783 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6784 goto out_unlock; 6785 6786 rc = ipr_device_reset(ioa_cfg, res); 6787 6788 if (rc) { 6789 ap->link.device[0].class = ATA_DEV_NONE; 6790 goto out_unlock; 6791 } 6792 6793 ap->link.device[0].class = res->ata_class; 6794 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 6795 ap->link.device[0].class = ATA_DEV_NONE; 6796 6797 out_unlock: 6798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6799 LEAVE; 6800 } 6801 6802 /** 6803 * ipr_ata_post_internal - Cleanup after an internal command 6804 * @qc: ATA queued command 6805 * 6806 * Return value: 6807 * none 6808 **/ 6809 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) 6810 { 6811 struct ipr_sata_port *sata_port = qc->ap->private_data; 6812 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6813 struct ipr_cmnd *ipr_cmd; 6814 struct ipr_hrr_queue *hrrq; 6815 unsigned long flags; 6816 6817 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6818 while (ioa_cfg->in_reset_reload) { 6819 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6820 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6821 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6822 } 6823 6824 for_each_hrrq(hrrq, ioa_cfg) { 6825 spin_lock(&hrrq->_lock); 6826 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 6827 if (ipr_cmd->qc == qc) { 6828 ipr_device_reset(ioa_cfg, sata_port->res); 6829 break; 6830 } 6831 } 6832 spin_unlock(&hrrq->_lock); 6833 } 6834 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6835 } 6836 6837 /** 6838 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure 6839 * @regs: destination 6840 * @tf: source ATA taskfile 6841 * 6842 * Return value: 6843 * none 6844 **/ 6845 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, 6846 struct ata_taskfile *tf) 6847 { 6848 regs->feature = tf->feature; 6849 regs->nsect = tf->nsect; 6850 regs->lbal = tf->lbal; 6851 regs->lbam = tf->lbam; 6852 regs->lbah = tf->lbah; 6853 regs->device = tf->device; 6854 regs->command = tf->command; 6855 regs->hob_feature = tf->hob_feature; 6856 regs->hob_nsect = tf->hob_nsect; 6857 regs->hob_lbal = tf->hob_lbal; 6858 regs->hob_lbam = tf->hob_lbam; 6859 regs->hob_lbah = tf->hob_lbah; 6860 regs->ctl = tf->ctl; 6861 } 6862 6863 /** 6864 * ipr_sata_done - done function for SATA commands 6865 * @ipr_cmd: ipr command struct 6866 * 6867 * This function is invoked by the interrupt handler for 6868 * ops generated by the SCSI mid-layer to SATA devices 6869 * 6870 * Return value: 6871 * none 6872 **/ 6873 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) 6874 { 6875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6876 struct ata_queued_cmd *qc = ipr_cmd->qc; 6877 struct ipr_sata_port *sata_port = qc->ap->private_data; 6878 struct ipr_resource_entry *res = sata_port->res; 6879 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6880 6881 spin_lock(&ipr_cmd->hrrq->_lock); 6882 if (ipr_cmd->ioa_cfg->sis64) 6883 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6884 sizeof(struct ipr_ioasa_gata)); 6885 else 6886 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 6887 sizeof(struct ipr_ioasa_gata)); 6888 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6889 6890 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 6891 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 6892 6893 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6894 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6895 else 6896 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6897 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6898 spin_unlock(&ipr_cmd->hrrq->_lock); 6899 ata_qc_complete(qc); 6900 } 6901 6902 /** 6903 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 6904 * @ipr_cmd: ipr command struct 6905 * @qc: ATA queued command 6906 * 6907 **/ 6908 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 6909 struct ata_queued_cmd *qc) 6910 { 6911 u32 ioadl_flags = 0; 6912 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6913 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; 6914 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6915 int len = qc->nbytes; 6916 struct scatterlist *sg; 6917 unsigned int si; 6918 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6919 6920 if (len == 0) 6921 return; 6922 6923 if (qc->dma_dir == DMA_TO_DEVICE) { 6924 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6925 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6926 } else if (qc->dma_dir == DMA_FROM_DEVICE) 6927 ioadl_flags = IPR_IOADL_FLAGS_READ; 6928 6929 ioarcb->data_transfer_length = cpu_to_be32(len); 6930 ioarcb->ioadl_len = 6931 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6932 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6933 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); 6934 6935 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6936 ioadl64->flags = cpu_to_be32(ioadl_flags); 6937 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 6938 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 6939 6940 last_ioadl64 = ioadl64; 6941 ioadl64++; 6942 } 6943 6944 if (likely(last_ioadl64)) 6945 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6946 } 6947 6948 /** 6949 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 6950 * @ipr_cmd: ipr command struct 6951 * @qc: ATA queued command 6952 * 6953 **/ 6954 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, 6955 struct ata_queued_cmd *qc) 6956 { 6957 u32 ioadl_flags = 0; 6958 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6959 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 6960 struct ipr_ioadl_desc *last_ioadl = NULL; 6961 int len = qc->nbytes; 6962 struct scatterlist *sg; 6963 unsigned int si; 6964 6965 if (len == 0) 6966 return; 6967 6968 if (qc->dma_dir == DMA_TO_DEVICE) { 6969 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6970 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6971 ioarcb->data_transfer_length = cpu_to_be32(len); 6972 ioarcb->ioadl_len = 6973 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6974 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 6975 ioadl_flags = IPR_IOADL_FLAGS_READ; 6976 ioarcb->read_data_transfer_length = cpu_to_be32(len); 6977 ioarcb->read_ioadl_len = 6978 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6979 } 6980 6981 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6982 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6983 ioadl->address = cpu_to_be32(sg_dma_address(sg)); 6984 6985 last_ioadl = ioadl; 6986 ioadl++; 6987 } 6988 6989 if (likely(last_ioadl)) 6990 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6991 } 6992 6993 /** 6994 * ipr_qc_defer - Get a free ipr_cmd 6995 * @qc: queued command 6996 * 6997 * Return value: 6998 * 0 if success 6999 **/ 7000 static int ipr_qc_defer(struct ata_queued_cmd *qc) 7001 { 7002 struct ata_port *ap = qc->ap; 7003 struct ipr_sata_port *sata_port = ap->private_data; 7004 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 7005 struct ipr_cmnd *ipr_cmd; 7006 struct ipr_hrr_queue *hrrq; 7007 int hrrq_id; 7008 7009 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 7010 hrrq = &ioa_cfg->hrrq[hrrq_id]; 7011 7012 qc->lldd_task = NULL; 7013 spin_lock(&hrrq->_lock); 7014 if (unlikely(hrrq->ioa_is_dead)) { 7015 spin_unlock(&hrrq->_lock); 7016 return 0; 7017 } 7018 7019 if (unlikely(!hrrq->allow_cmds)) { 7020 spin_unlock(&hrrq->_lock); 7021 return ATA_DEFER_LINK; 7022 } 7023 7024 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 7025 if (ipr_cmd == NULL) { 7026 spin_unlock(&hrrq->_lock); 7027 return ATA_DEFER_LINK; 7028 } 7029 7030 qc->lldd_task = ipr_cmd; 7031 spin_unlock(&hrrq->_lock); 7032 return 0; 7033 } 7034 7035 /** 7036 * ipr_qc_issue - Issue a SATA qc to a device 7037 * @qc: queued command 7038 * 7039 * Return value: 7040 * 0 if success 7041 **/ 7042 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) 7043 { 7044 struct ata_port *ap = qc->ap; 7045 struct ipr_sata_port *sata_port = ap->private_data; 7046 struct ipr_resource_entry *res = sata_port->res; 7047 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 7048 struct ipr_cmnd *ipr_cmd; 7049 struct ipr_ioarcb *ioarcb; 7050 struct ipr_ioarcb_ata_regs *regs; 7051 7052 if (qc->lldd_task == NULL) 7053 ipr_qc_defer(qc); 7054 7055 ipr_cmd = qc->lldd_task; 7056 if (ipr_cmd == NULL) 7057 return AC_ERR_SYSTEM; 7058 7059 qc->lldd_task = NULL; 7060 spin_lock(&ipr_cmd->hrrq->_lock); 7061 if (unlikely(!ipr_cmd->hrrq->allow_cmds || 7062 ipr_cmd->hrrq->ioa_is_dead)) { 7063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7064 spin_unlock(&ipr_cmd->hrrq->_lock); 7065 return AC_ERR_SYSTEM; 7066 } 7067 7068 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 7069 ioarcb = &ipr_cmd->ioarcb; 7070 7071 if (ioa_cfg->sis64) { 7072 regs = &ipr_cmd->i.ata_ioadl.regs; 7073 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 7074 } else 7075 regs = &ioarcb->u.add_data.u.regs; 7076 7077 memset(regs, 0, sizeof(*regs)); 7078 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 7079 7080 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 7081 ipr_cmd->qc = qc; 7082 ipr_cmd->done = ipr_sata_done; 7083 ipr_cmd->ioarcb.res_handle = res->res_handle; 7084 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 7085 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 7086 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 7087 ipr_cmd->dma_use_sg = qc->n_elem; 7088 7089 if (ioa_cfg->sis64) 7090 ipr_build_ata_ioadl64(ipr_cmd, qc); 7091 else 7092 ipr_build_ata_ioadl(ipr_cmd, qc); 7093 7094 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 7095 ipr_copy_sata_tf(regs, &qc->tf); 7096 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 7097 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 7098 7099 switch (qc->tf.protocol) { 7100 case ATA_PROT_NODATA: 7101 case ATA_PROT_PIO: 7102 break; 7103 7104 case ATA_PROT_DMA: 7105 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 7106 break; 7107 7108 case ATAPI_PROT_PIO: 7109 case ATAPI_PROT_NODATA: 7110 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 7111 break; 7112 7113 case ATAPI_PROT_DMA: 7114 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 7115 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 7116 break; 7117 7118 default: 7119 WARN_ON(1); 7120 spin_unlock(&ipr_cmd->hrrq->_lock); 7121 return AC_ERR_INVALID; 7122 } 7123 7124 ipr_send_command(ipr_cmd); 7125 spin_unlock(&ipr_cmd->hrrq->_lock); 7126 7127 return 0; 7128 } 7129 7130 /** 7131 * ipr_qc_fill_rtf - Read result TF 7132 * @qc: ATA queued command 7133 * 7134 * Return value: 7135 * true 7136 **/ 7137 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) 7138 { 7139 struct ipr_sata_port *sata_port = qc->ap->private_data; 7140 struct ipr_ioasa_gata *g = &sata_port->ioasa; 7141 struct ata_taskfile *tf = &qc->result_tf; 7142 7143 tf->feature = g->error; 7144 tf->nsect = g->nsect; 7145 tf->lbal = g->lbal; 7146 tf->lbam = g->lbam; 7147 tf->lbah = g->lbah; 7148 tf->device = g->device; 7149 tf->command = g->status; 7150 tf->hob_nsect = g->hob_nsect; 7151 tf->hob_lbal = g->hob_lbal; 7152 tf->hob_lbam = g->hob_lbam; 7153 tf->hob_lbah = g->hob_lbah; 7154 7155 return true; 7156 } 7157 7158 static struct ata_port_operations ipr_sata_ops = { 7159 .phy_reset = ipr_ata_phy_reset, 7160 .hardreset = ipr_sata_reset, 7161 .post_internal_cmd = ipr_ata_post_internal, 7162 .qc_prep = ata_noop_qc_prep, 7163 .qc_defer = ipr_qc_defer, 7164 .qc_issue = ipr_qc_issue, 7165 .qc_fill_rtf = ipr_qc_fill_rtf, 7166 .port_start = ata_sas_port_start, 7167 .port_stop = ata_sas_port_stop 7168 }; 7169 7170 static struct ata_port_info sata_port_info = { 7171 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 7172 ATA_FLAG_SAS_HOST, 7173 .pio_mask = ATA_PIO4_ONLY, 7174 .mwdma_mask = ATA_MWDMA2, 7175 .udma_mask = ATA_UDMA6, 7176 .port_ops = &ipr_sata_ops 7177 }; 7178 7179 #ifdef CONFIG_PPC_PSERIES 7180 static const u16 ipr_blocked_processors[] = { 7181 PVR_NORTHSTAR, 7182 PVR_PULSAR, 7183 PVR_POWER4, 7184 PVR_ICESTAR, 7185 PVR_SSTAR, 7186 PVR_POWER4p, 7187 PVR_630, 7188 PVR_630p 7189 }; 7190 7191 /** 7192 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware 7193 * @ioa_cfg: ioa cfg struct 7194 * 7195 * Adapters that use Gemstone revision < 3.1 do not work reliably on 7196 * certain pSeries hardware. This function determines if the given 7197 * adapter is in one of these confgurations or not. 7198 * 7199 * Return value: 7200 * 1 if adapter is not supported / 0 if adapter is supported 7201 **/ 7202 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) 7203 { 7204 int i; 7205 7206 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 7207 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { 7208 if (pvr_version_is(ipr_blocked_processors[i])) 7209 return 1; 7210 } 7211 } 7212 return 0; 7213 } 7214 #else 7215 #define ipr_invalid_adapter(ioa_cfg) 0 7216 #endif 7217 7218 /** 7219 * ipr_ioa_bringdown_done - IOA bring down completion. 7220 * @ipr_cmd: ipr command struct 7221 * 7222 * This function processes the completion of an adapter bring down. 7223 * It wakes any reset sleepers. 7224 * 7225 * Return value: 7226 * IPR_RC_JOB_RETURN 7227 **/ 7228 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 7229 { 7230 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7231 int i; 7232 7233 ENTER; 7234 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7235 ipr_trace; 7236 ioa_cfg->scsi_unblock = 1; 7237 schedule_work(&ioa_cfg->work_q); 7238 } 7239 7240 ioa_cfg->in_reset_reload = 0; 7241 ioa_cfg->reset_retries = 0; 7242 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 7243 spin_lock(&ioa_cfg->hrrq[i]._lock); 7244 ioa_cfg->hrrq[i].ioa_is_dead = 1; 7245 spin_unlock(&ioa_cfg->hrrq[i]._lock); 7246 } 7247 wmb(); 7248 7249 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7250 wake_up_all(&ioa_cfg->reset_wait_q); 7251 LEAVE; 7252 7253 return IPR_RC_JOB_RETURN; 7254 } 7255 7256 /** 7257 * ipr_ioa_reset_done - IOA reset completion. 7258 * @ipr_cmd: ipr command struct 7259 * 7260 * This function processes the completion of an adapter reset. 7261 * It schedules any necessary mid-layer add/removes and 7262 * wakes any reset sleepers. 7263 * 7264 * Return value: 7265 * IPR_RC_JOB_RETURN 7266 **/ 7267 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) 7268 { 7269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7270 struct ipr_resource_entry *res; 7271 int j; 7272 7273 ENTER; 7274 ioa_cfg->in_reset_reload = 0; 7275 for (j = 0; j < ioa_cfg->hrrq_num; j++) { 7276 spin_lock(&ioa_cfg->hrrq[j]._lock); 7277 ioa_cfg->hrrq[j].allow_cmds = 1; 7278 spin_unlock(&ioa_cfg->hrrq[j]._lock); 7279 } 7280 wmb(); 7281 ioa_cfg->reset_cmd = NULL; 7282 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 7283 7284 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 7285 if (res->add_to_ml || res->del_from_ml) { 7286 ipr_trace; 7287 break; 7288 } 7289 } 7290 schedule_work(&ioa_cfg->work_q); 7291 7292 for (j = 0; j < IPR_NUM_HCAMS; j++) { 7293 list_del_init(&ioa_cfg->hostrcb[j]->queue); 7294 if (j < IPR_NUM_LOG_HCAMS) 7295 ipr_send_hcam(ioa_cfg, 7296 IPR_HCAM_CDB_OP_CODE_LOG_DATA, 7297 ioa_cfg->hostrcb[j]); 7298 else 7299 ipr_send_hcam(ioa_cfg, 7300 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 7301 ioa_cfg->hostrcb[j]); 7302 } 7303 7304 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 7305 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 7306 7307 ioa_cfg->reset_retries = 0; 7308 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7309 wake_up_all(&ioa_cfg->reset_wait_q); 7310 7311 ioa_cfg->scsi_unblock = 1; 7312 schedule_work(&ioa_cfg->work_q); 7313 LEAVE; 7314 return IPR_RC_JOB_RETURN; 7315 } 7316 7317 /** 7318 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer 7319 * @supported_dev: supported device struct 7320 * @vpids: vendor product id struct 7321 * 7322 * Return value: 7323 * none 7324 **/ 7325 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, 7326 struct ipr_std_inq_vpids *vpids) 7327 { 7328 memset(supported_dev, 0, sizeof(struct ipr_supported_device)); 7329 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); 7330 supported_dev->num_records = 1; 7331 supported_dev->data_length = 7332 cpu_to_be16(sizeof(struct ipr_supported_device)); 7333 supported_dev->reserved = 0; 7334 } 7335 7336 /** 7337 * ipr_set_supported_devs - Send Set Supported Devices for a device 7338 * @ipr_cmd: ipr command struct 7339 * 7340 * This function sends a Set Supported Devices to the adapter 7341 * 7342 * Return value: 7343 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7344 **/ 7345 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) 7346 { 7347 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7348 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 7349 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7350 struct ipr_resource_entry *res = ipr_cmd->u.res; 7351 7352 ipr_cmd->job_step = ipr_ioa_reset_done; 7353 7354 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { 7355 if (!ipr_is_scsi_disk(res)) 7356 continue; 7357 7358 ipr_cmd->u.res = res; 7359 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 7360 7361 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7362 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7363 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7364 7365 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 7366 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 7367 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 7368 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 7369 7370 ipr_init_ioadl(ipr_cmd, 7371 ioa_cfg->vpd_cbs_dma + 7372 offsetof(struct ipr_misc_cbs, supp_dev), 7373 sizeof(struct ipr_supported_device), 7374 IPR_IOADL_FLAGS_WRITE_LAST); 7375 7376 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7377 IPR_SET_SUP_DEVICE_TIMEOUT); 7378 7379 if (!ioa_cfg->sis64) 7380 ipr_cmd->job_step = ipr_set_supported_devs; 7381 LEAVE; 7382 return IPR_RC_JOB_RETURN; 7383 } 7384 7385 LEAVE; 7386 return IPR_RC_JOB_CONTINUE; 7387 } 7388 7389 /** 7390 * ipr_get_mode_page - Locate specified mode page 7391 * @mode_pages: mode page buffer 7392 * @page_code: page code to find 7393 * @len: minimum required length for mode page 7394 * 7395 * Return value: 7396 * pointer to mode page / NULL on failure 7397 **/ 7398 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, 7399 u32 page_code, u32 len) 7400 { 7401 struct ipr_mode_page_hdr *mode_hdr; 7402 u32 page_length; 7403 u32 length; 7404 7405 if (!mode_pages || (mode_pages->hdr.length == 0)) 7406 return NULL; 7407 7408 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; 7409 mode_hdr = (struct ipr_mode_page_hdr *) 7410 (mode_pages->data + mode_pages->hdr.block_desc_len); 7411 7412 while (length) { 7413 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { 7414 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) 7415 return mode_hdr; 7416 break; 7417 } else { 7418 page_length = (sizeof(struct ipr_mode_page_hdr) + 7419 mode_hdr->page_length); 7420 length -= page_length; 7421 mode_hdr = (struct ipr_mode_page_hdr *) 7422 ((unsigned long)mode_hdr + page_length); 7423 } 7424 } 7425 return NULL; 7426 } 7427 7428 /** 7429 * ipr_check_term_power - Check for term power errors 7430 * @ioa_cfg: ioa config struct 7431 * @mode_pages: IOAFP mode pages buffer 7432 * 7433 * Check the IOAFP's mode page 28 for term power errors 7434 * 7435 * Return value: 7436 * nothing 7437 **/ 7438 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, 7439 struct ipr_mode_pages *mode_pages) 7440 { 7441 int i; 7442 int entry_length; 7443 struct ipr_dev_bus_entry *bus; 7444 struct ipr_mode_page28 *mode_page; 7445 7446 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7447 sizeof(struct ipr_mode_page28)); 7448 7449 entry_length = mode_page->entry_length; 7450 7451 bus = mode_page->bus; 7452 7453 for (i = 0; i < mode_page->num_entries; i++) { 7454 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { 7455 dev_err(&ioa_cfg->pdev->dev, 7456 "Term power is absent on scsi bus %d\n", 7457 bus->res_addr.bus); 7458 } 7459 7460 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); 7461 } 7462 } 7463 7464 /** 7465 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table 7466 * @ioa_cfg: ioa config struct 7467 * 7468 * Looks through the config table checking for SES devices. If 7469 * the SES device is in the SES table indicating a maximum SCSI 7470 * bus speed, the speed is limited for the bus. 7471 * 7472 * Return value: 7473 * none 7474 **/ 7475 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) 7476 { 7477 u32 max_xfer_rate; 7478 int i; 7479 7480 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 7481 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, 7482 ioa_cfg->bus_attr[i].bus_width); 7483 7484 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) 7485 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; 7486 } 7487 } 7488 7489 /** 7490 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 7491 * @ioa_cfg: ioa config struct 7492 * @mode_pages: mode page 28 buffer 7493 * 7494 * Updates mode page 28 based on driver configuration 7495 * 7496 * Return value: 7497 * none 7498 **/ 7499 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, 7500 struct ipr_mode_pages *mode_pages) 7501 { 7502 int i, entry_length; 7503 struct ipr_dev_bus_entry *bus; 7504 struct ipr_bus_attributes *bus_attr; 7505 struct ipr_mode_page28 *mode_page; 7506 7507 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7508 sizeof(struct ipr_mode_page28)); 7509 7510 entry_length = mode_page->entry_length; 7511 7512 /* Loop for each device bus entry */ 7513 for (i = 0, bus = mode_page->bus; 7514 i < mode_page->num_entries; 7515 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { 7516 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { 7517 dev_err(&ioa_cfg->pdev->dev, 7518 "Invalid resource address reported: 0x%08X\n", 7519 IPR_GET_PHYS_LOC(bus->res_addr)); 7520 continue; 7521 } 7522 7523 bus_attr = &ioa_cfg->bus_attr[i]; 7524 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; 7525 bus->bus_width = bus_attr->bus_width; 7526 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); 7527 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; 7528 if (bus_attr->qas_enabled) 7529 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; 7530 else 7531 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; 7532 } 7533 } 7534 7535 /** 7536 * ipr_build_mode_select - Build a mode select command 7537 * @ipr_cmd: ipr command struct 7538 * @res_handle: resource handle to send command to 7539 * @parm: Byte 2 of Mode Sense command 7540 * @dma_addr: DMA buffer address 7541 * @xfer_len: data transfer length 7542 * 7543 * Return value: 7544 * none 7545 **/ 7546 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 7547 __be32 res_handle, u8 parm, 7548 dma_addr_t dma_addr, u8 xfer_len) 7549 { 7550 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7551 7552 ioarcb->res_handle = res_handle; 7553 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7554 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7555 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; 7556 ioarcb->cmd_pkt.cdb[1] = parm; 7557 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7558 7559 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 7560 } 7561 7562 /** 7563 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA 7564 * @ipr_cmd: ipr command struct 7565 * 7566 * This function sets up the SCSI bus attributes and sends 7567 * a Mode Select for Page 28 to activate them. 7568 * 7569 * Return value: 7570 * IPR_RC_JOB_RETURN 7571 **/ 7572 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) 7573 { 7574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7575 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7576 int length; 7577 7578 ENTER; 7579 ipr_scsi_bus_speed_limit(ioa_cfg); 7580 ipr_check_term_power(ioa_cfg, mode_pages); 7581 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 7582 length = mode_pages->hdr.length + 1; 7583 mode_pages->hdr.length = 0; 7584 7585 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7586 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7587 length); 7588 7589 ipr_cmd->job_step = ipr_set_supported_devs; 7590 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7591 struct ipr_resource_entry, queue); 7592 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7593 7594 LEAVE; 7595 return IPR_RC_JOB_RETURN; 7596 } 7597 7598 /** 7599 * ipr_build_mode_sense - Builds a mode sense command 7600 * @ipr_cmd: ipr command struct 7601 * @res: resource entry struct 7602 * @parm: Byte 2 of mode sense command 7603 * @dma_addr: DMA address of mode sense buffer 7604 * @xfer_len: Size of DMA buffer 7605 * 7606 * Return value: 7607 * none 7608 **/ 7609 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 7610 __be32 res_handle, 7611 u8 parm, dma_addr_t dma_addr, u8 xfer_len) 7612 { 7613 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7614 7615 ioarcb->res_handle = res_handle; 7616 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; 7617 ioarcb->cmd_pkt.cdb[2] = parm; 7618 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7619 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7620 7621 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7622 } 7623 7624 /** 7625 * ipr_reset_cmd_failed - Handle failure of IOA reset command 7626 * @ipr_cmd: ipr command struct 7627 * 7628 * This function handles the failure of an IOA bringup command. 7629 * 7630 * Return value: 7631 * IPR_RC_JOB_RETURN 7632 **/ 7633 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 7634 { 7635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7636 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7637 7638 dev_err(&ioa_cfg->pdev->dev, 7639 "0x%02X failed with IOASC: 0x%08X\n", 7640 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 7641 7642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7643 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7644 return IPR_RC_JOB_RETURN; 7645 } 7646 7647 /** 7648 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense 7649 * @ipr_cmd: ipr command struct 7650 * 7651 * This function handles the failure of a Mode Sense to the IOAFP. 7652 * Some adapters do not handle all mode pages. 7653 * 7654 * Return value: 7655 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7656 **/ 7657 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 7658 { 7659 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7660 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7661 7662 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7663 ipr_cmd->job_step = ipr_set_supported_devs; 7664 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7665 struct ipr_resource_entry, queue); 7666 return IPR_RC_JOB_CONTINUE; 7667 } 7668 7669 return ipr_reset_cmd_failed(ipr_cmd); 7670 } 7671 7672 /** 7673 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA 7674 * @ipr_cmd: ipr command struct 7675 * 7676 * This function send a Page 28 mode sense to the IOA to 7677 * retrieve SCSI bus attributes. 7678 * 7679 * Return value: 7680 * IPR_RC_JOB_RETURN 7681 **/ 7682 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) 7683 { 7684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7685 7686 ENTER; 7687 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7688 0x28, ioa_cfg->vpd_cbs_dma + 7689 offsetof(struct ipr_misc_cbs, mode_pages), 7690 sizeof(struct ipr_mode_pages)); 7691 7692 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; 7693 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; 7694 7695 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7696 7697 LEAVE; 7698 return IPR_RC_JOB_RETURN; 7699 } 7700 7701 /** 7702 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA 7703 * @ipr_cmd: ipr command struct 7704 * 7705 * This function enables dual IOA RAID support if possible. 7706 * 7707 * Return value: 7708 * IPR_RC_JOB_RETURN 7709 **/ 7710 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) 7711 { 7712 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7713 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7714 struct ipr_mode_page24 *mode_page; 7715 int length; 7716 7717 ENTER; 7718 mode_page = ipr_get_mode_page(mode_pages, 0x24, 7719 sizeof(struct ipr_mode_page24)); 7720 7721 if (mode_page) 7722 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; 7723 7724 length = mode_pages->hdr.length + 1; 7725 mode_pages->hdr.length = 0; 7726 7727 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7728 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7729 length); 7730 7731 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7732 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7733 7734 LEAVE; 7735 return IPR_RC_JOB_RETURN; 7736 } 7737 7738 /** 7739 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense 7740 * @ipr_cmd: ipr command struct 7741 * 7742 * This function handles the failure of a Mode Sense to the IOAFP. 7743 * Some adapters do not handle all mode pages. 7744 * 7745 * Return value: 7746 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7747 **/ 7748 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 7749 { 7750 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7751 7752 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7753 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7754 return IPR_RC_JOB_CONTINUE; 7755 } 7756 7757 return ipr_reset_cmd_failed(ipr_cmd); 7758 } 7759 7760 /** 7761 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA 7762 * @ipr_cmd: ipr command struct 7763 * 7764 * This function send a mode sense to the IOA to retrieve 7765 * the IOA Advanced Function Control mode page. 7766 * 7767 * Return value: 7768 * IPR_RC_JOB_RETURN 7769 **/ 7770 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) 7771 { 7772 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7773 7774 ENTER; 7775 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7776 0x24, ioa_cfg->vpd_cbs_dma + 7777 offsetof(struct ipr_misc_cbs, mode_pages), 7778 sizeof(struct ipr_mode_pages)); 7779 7780 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; 7781 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; 7782 7783 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7784 7785 LEAVE; 7786 return IPR_RC_JOB_RETURN; 7787 } 7788 7789 /** 7790 * ipr_init_res_table - Initialize the resource table 7791 * @ipr_cmd: ipr command struct 7792 * 7793 * This function looks through the existing resource table, comparing 7794 * it with the config table. This function will take care of old/new 7795 * devices and schedule adding/removing them from the mid-layer 7796 * as appropriate. 7797 * 7798 * Return value: 7799 * IPR_RC_JOB_CONTINUE 7800 **/ 7801 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) 7802 { 7803 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7804 struct ipr_resource_entry *res, *temp; 7805 struct ipr_config_table_entry_wrapper cfgtew; 7806 int entries, found, flag, i; 7807 LIST_HEAD(old_res); 7808 7809 ENTER; 7810 if (ioa_cfg->sis64) 7811 flag = ioa_cfg->u.cfg_table64->hdr64.flags; 7812 else 7813 flag = ioa_cfg->u.cfg_table->hdr.flags; 7814 7815 if (flag & IPR_UCODE_DOWNLOAD_REQ) 7816 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 7817 7818 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 7819 list_move_tail(&res->queue, &old_res); 7820 7821 if (ioa_cfg->sis64) 7822 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); 7823 else 7824 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 7825 7826 for (i = 0; i < entries; i++) { 7827 if (ioa_cfg->sis64) 7828 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 7829 else 7830 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 7831 found = 0; 7832 7833 list_for_each_entry_safe(res, temp, &old_res, queue) { 7834 if (ipr_is_same_device(res, &cfgtew)) { 7835 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7836 found = 1; 7837 break; 7838 } 7839 } 7840 7841 if (!found) { 7842 if (list_empty(&ioa_cfg->free_res_q)) { 7843 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); 7844 break; 7845 } 7846 7847 found = 1; 7848 res = list_entry(ioa_cfg->free_res_q.next, 7849 struct ipr_resource_entry, queue); 7850 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7851 ipr_init_res_entry(res, &cfgtew); 7852 res->add_to_ml = 1; 7853 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) 7854 res->sdev->allow_restart = 1; 7855 7856 if (found) 7857 ipr_update_res_entry(res, &cfgtew); 7858 } 7859 7860 list_for_each_entry_safe(res, temp, &old_res, queue) { 7861 if (res->sdev) { 7862 res->del_from_ml = 1; 7863 res->res_handle = IPR_INVALID_RES_HANDLE; 7864 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7865 } 7866 } 7867 7868 list_for_each_entry_safe(res, temp, &old_res, queue) { 7869 ipr_clear_res_target(res); 7870 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 7871 } 7872 7873 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 7874 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 7875 else 7876 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7877 7878 LEAVE; 7879 return IPR_RC_JOB_CONTINUE; 7880 } 7881 7882 /** 7883 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. 7884 * @ipr_cmd: ipr command struct 7885 * 7886 * This function sends a Query IOA Configuration command 7887 * to the adapter to retrieve the IOA configuration table. 7888 * 7889 * Return value: 7890 * IPR_RC_JOB_RETURN 7891 **/ 7892 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) 7893 { 7894 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7895 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7896 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 7897 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7898 7899 ENTER; 7900 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) 7901 ioa_cfg->dual_raid = 1; 7902 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 7903 ucode_vpd->major_release, ucode_vpd->card_type, 7904 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 7905 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7906 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7907 7908 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 7909 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; 7910 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 7911 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 7912 7913 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 7914 IPR_IOADL_FLAGS_READ_LAST); 7915 7916 ipr_cmd->job_step = ipr_init_res_table; 7917 7918 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7919 7920 LEAVE; 7921 return IPR_RC_JOB_RETURN; 7922 } 7923 7924 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd) 7925 { 7926 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7927 7928 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) 7929 return IPR_RC_JOB_CONTINUE; 7930 7931 return ipr_reset_cmd_failed(ipr_cmd); 7932 } 7933 7934 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd, 7935 __be32 res_handle, u8 sa_code) 7936 { 7937 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7938 7939 ioarcb->res_handle = res_handle; 7940 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; 7941 ioarcb->cmd_pkt.cdb[1] = sa_code; 7942 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7943 } 7944 7945 /** 7946 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service 7947 * action 7948 * 7949 * Return value: 7950 * none 7951 **/ 7952 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd) 7953 { 7954 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7956 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; 7957 7958 ENTER; 7959 7960 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 7961 7962 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { 7963 ipr_build_ioa_service_action(ipr_cmd, 7964 cpu_to_be32(IPR_IOA_RES_HANDLE), 7965 IPR_IOA_SA_CHANGE_CACHE_PARAMS); 7966 7967 ioarcb->cmd_pkt.cdb[2] = 0x40; 7968 7969 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; 7970 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7971 IPR_SET_SUP_DEVICE_TIMEOUT); 7972 7973 LEAVE; 7974 return IPR_RC_JOB_RETURN; 7975 } 7976 7977 LEAVE; 7978 return IPR_RC_JOB_CONTINUE; 7979 } 7980 7981 /** 7982 * ipr_ioafp_inquiry - Send an Inquiry to the adapter. 7983 * @ipr_cmd: ipr command struct 7984 * 7985 * This utility function sends an inquiry to the adapter. 7986 * 7987 * Return value: 7988 * none 7989 **/ 7990 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 7991 dma_addr_t dma_addr, u8 xfer_len) 7992 { 7993 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7994 7995 ENTER; 7996 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7997 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7998 7999 ioarcb->cmd_pkt.cdb[0] = INQUIRY; 8000 ioarcb->cmd_pkt.cdb[1] = flags; 8001 ioarcb->cmd_pkt.cdb[2] = page; 8002 ioarcb->cmd_pkt.cdb[4] = xfer_len; 8003 8004 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 8005 8006 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 8007 LEAVE; 8008 } 8009 8010 /** 8011 * ipr_inquiry_page_supported - Is the given inquiry page supported 8012 * @page0: inquiry page 0 buffer 8013 * @page: page code. 8014 * 8015 * This function determines if the specified inquiry page is supported. 8016 * 8017 * Return value: 8018 * 1 if page is supported / 0 if not 8019 **/ 8020 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) 8021 { 8022 int i; 8023 8024 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) 8025 if (page0->page[i] == page) 8026 return 1; 8027 8028 return 0; 8029 } 8030 8031 /** 8032 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter. 8033 * @ipr_cmd: ipr command struct 8034 * 8035 * This function sends a Page 0xC4 inquiry to the adapter 8036 * to retrieve software VPD information. 8037 * 8038 * Return value: 8039 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8040 **/ 8041 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd) 8042 { 8043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8044 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 8045 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; 8046 8047 ENTER; 8048 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; 8049 memset(pageC4, 0, sizeof(*pageC4)); 8050 8051 if (ipr_inquiry_page_supported(page0, 0xC4)) { 8052 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4, 8053 (ioa_cfg->vpd_cbs_dma 8054 + offsetof(struct ipr_misc_cbs, 8055 pageC4_data)), 8056 sizeof(struct ipr_inquiry_pageC4)); 8057 return IPR_RC_JOB_RETURN; 8058 } 8059 8060 LEAVE; 8061 return IPR_RC_JOB_CONTINUE; 8062 } 8063 8064 /** 8065 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. 8066 * @ipr_cmd: ipr command struct 8067 * 8068 * This function sends a Page 0xD0 inquiry to the adapter 8069 * to retrieve adapter capabilities. 8070 * 8071 * Return value: 8072 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8073 **/ 8074 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) 8075 { 8076 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8077 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 8078 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 8079 8080 ENTER; 8081 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; 8082 memset(cap, 0, sizeof(*cap)); 8083 8084 if (ipr_inquiry_page_supported(page0, 0xD0)) { 8085 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, 8086 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), 8087 sizeof(struct ipr_inquiry_cap)); 8088 return IPR_RC_JOB_RETURN; 8089 } 8090 8091 LEAVE; 8092 return IPR_RC_JOB_CONTINUE; 8093 } 8094 8095 /** 8096 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 8097 * @ipr_cmd: ipr command struct 8098 * 8099 * This function sends a Page 3 inquiry to the adapter 8100 * to retrieve software VPD information. 8101 * 8102 * Return value: 8103 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8104 **/ 8105 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 8106 { 8107 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8108 8109 ENTER; 8110 8111 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 8112 8113 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 8114 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 8115 sizeof(struct ipr_inquiry_page3)); 8116 8117 LEAVE; 8118 return IPR_RC_JOB_RETURN; 8119 } 8120 8121 /** 8122 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. 8123 * @ipr_cmd: ipr command struct 8124 * 8125 * This function sends a Page 0 inquiry to the adapter 8126 * to retrieve supported inquiry pages. 8127 * 8128 * Return value: 8129 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8130 **/ 8131 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) 8132 { 8133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8134 char type[5]; 8135 8136 ENTER; 8137 8138 /* Grab the type out of the VPD and store it away */ 8139 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 8140 type[4] = '\0'; 8141 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 8142 8143 if (ipr_invalid_adapter(ioa_cfg)) { 8144 dev_err(&ioa_cfg->pdev->dev, 8145 "Adapter not supported in this hardware configuration.\n"); 8146 8147 if (!ipr_testmode) { 8148 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 8149 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8150 list_add_tail(&ipr_cmd->queue, 8151 &ioa_cfg->hrrq->hrrq_free_q); 8152 return IPR_RC_JOB_RETURN; 8153 } 8154 } 8155 8156 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 8157 8158 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 8159 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), 8160 sizeof(struct ipr_inquiry_page0)); 8161 8162 LEAVE; 8163 return IPR_RC_JOB_RETURN; 8164 } 8165 8166 /** 8167 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. 8168 * @ipr_cmd: ipr command struct 8169 * 8170 * This function sends a standard inquiry to the adapter. 8171 * 8172 * Return value: 8173 * IPR_RC_JOB_RETURN 8174 **/ 8175 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) 8176 { 8177 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8178 8179 ENTER; 8180 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; 8181 8182 ipr_ioafp_inquiry(ipr_cmd, 0, 0, 8183 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), 8184 sizeof(struct ipr_ioa_vpd)); 8185 8186 LEAVE; 8187 return IPR_RC_JOB_RETURN; 8188 } 8189 8190 /** 8191 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 8192 * @ipr_cmd: ipr command struct 8193 * 8194 * This function send an Identify Host Request Response Queue 8195 * command to establish the HRRQ with the adapter. 8196 * 8197 * Return value: 8198 * IPR_RC_JOB_RETURN 8199 **/ 8200 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 8201 { 8202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8203 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 8204 struct ipr_hrr_queue *hrrq; 8205 8206 ENTER; 8207 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 8208 if (ioa_cfg->identify_hrrq_index == 0) 8209 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 8210 8211 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { 8212 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; 8213 8214 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 8215 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8216 8217 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8218 if (ioa_cfg->sis64) 8219 ioarcb->cmd_pkt.cdb[1] = 0x1; 8220 8221 if (ioa_cfg->nvectors == 1) 8222 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; 8223 else 8224 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; 8225 8226 ioarcb->cmd_pkt.cdb[2] = 8227 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; 8228 ioarcb->cmd_pkt.cdb[3] = 8229 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; 8230 ioarcb->cmd_pkt.cdb[4] = 8231 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; 8232 ioarcb->cmd_pkt.cdb[5] = 8233 ((u64) hrrq->host_rrq_dma) & 0xff; 8234 ioarcb->cmd_pkt.cdb[7] = 8235 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; 8236 ioarcb->cmd_pkt.cdb[8] = 8237 (sizeof(u32) * hrrq->size) & 0xff; 8238 8239 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 8240 ioarcb->cmd_pkt.cdb[9] = 8241 ioa_cfg->identify_hrrq_index; 8242 8243 if (ioa_cfg->sis64) { 8244 ioarcb->cmd_pkt.cdb[10] = 8245 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; 8246 ioarcb->cmd_pkt.cdb[11] = 8247 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; 8248 ioarcb->cmd_pkt.cdb[12] = 8249 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; 8250 ioarcb->cmd_pkt.cdb[13] = 8251 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; 8252 } 8253 8254 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 8255 ioarcb->cmd_pkt.cdb[14] = 8256 ioa_cfg->identify_hrrq_index; 8257 8258 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8259 IPR_INTERNAL_TIMEOUT); 8260 8261 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) 8262 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8263 8264 LEAVE; 8265 return IPR_RC_JOB_RETURN; 8266 } 8267 8268 LEAVE; 8269 return IPR_RC_JOB_CONTINUE; 8270 } 8271 8272 /** 8273 * ipr_reset_timer_done - Adapter reset timer function 8274 * @ipr_cmd: ipr command struct 8275 * 8276 * Description: This function is used in adapter reset processing 8277 * for timing events. If the reset_cmd pointer in the IOA 8278 * config struct is not this adapter's we are doing nested 8279 * resets and fail_all_ops will take care of freeing the 8280 * command block. 8281 * 8282 * Return value: 8283 * none 8284 **/ 8285 static void ipr_reset_timer_done(struct timer_list *t) 8286 { 8287 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 8288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8289 unsigned long lock_flags = 0; 8290 8291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8292 8293 if (ioa_cfg->reset_cmd == ipr_cmd) { 8294 list_del(&ipr_cmd->queue); 8295 ipr_cmd->done(ipr_cmd); 8296 } 8297 8298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8299 } 8300 8301 /** 8302 * ipr_reset_start_timer - Start a timer for adapter reset job 8303 * @ipr_cmd: ipr command struct 8304 * @timeout: timeout value 8305 * 8306 * Description: This function is used in adapter reset processing 8307 * for timing events. If the reset_cmd pointer in the IOA 8308 * config struct is not this adapter's we are doing nested 8309 * resets and fail_all_ops will take care of freeing the 8310 * command block. 8311 * 8312 * Return value: 8313 * none 8314 **/ 8315 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 8316 unsigned long timeout) 8317 { 8318 8319 ENTER; 8320 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8321 ipr_cmd->done = ipr_reset_ioa_job; 8322 8323 ipr_cmd->timer.expires = jiffies + timeout; 8324 ipr_cmd->timer.function = ipr_reset_timer_done; 8325 add_timer(&ipr_cmd->timer); 8326 } 8327 8328 /** 8329 * ipr_init_ioa_mem - Initialize ioa_cfg control block 8330 * @ioa_cfg: ioa cfg struct 8331 * 8332 * Return value: 8333 * nothing 8334 **/ 8335 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 8336 { 8337 struct ipr_hrr_queue *hrrq; 8338 8339 for_each_hrrq(hrrq, ioa_cfg) { 8340 spin_lock(&hrrq->_lock); 8341 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); 8342 8343 /* Initialize Host RRQ pointers */ 8344 hrrq->hrrq_start = hrrq->host_rrq; 8345 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; 8346 hrrq->hrrq_curr = hrrq->hrrq_start; 8347 hrrq->toggle_bit = 1; 8348 spin_unlock(&hrrq->_lock); 8349 } 8350 wmb(); 8351 8352 ioa_cfg->identify_hrrq_index = 0; 8353 if (ioa_cfg->hrrq_num == 1) 8354 atomic_set(&ioa_cfg->hrrq_index, 0); 8355 else 8356 atomic_set(&ioa_cfg->hrrq_index, 1); 8357 8358 /* Zero out config table */ 8359 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 8360 } 8361 8362 /** 8363 * ipr_reset_next_stage - Process IPL stage change based on feedback register. 8364 * @ipr_cmd: ipr command struct 8365 * 8366 * Return value: 8367 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8368 **/ 8369 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 8370 { 8371 unsigned long stage, stage_time; 8372 u32 feedback; 8373 volatile u32 int_reg; 8374 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8375 u64 maskval = 0; 8376 8377 feedback = readl(ioa_cfg->regs.init_feedback_reg); 8378 stage = feedback & IPR_IPL_INIT_STAGE_MASK; 8379 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 8380 8381 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 8382 8383 /* sanity check the stage_time value */ 8384 if (stage_time == 0) 8385 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; 8386 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 8387 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 8388 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 8389 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 8390 8391 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 8392 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 8393 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8394 stage_time = ioa_cfg->transop_timeout; 8395 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8396 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 8397 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8398 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8399 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8400 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8401 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 8402 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 8403 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8404 return IPR_RC_JOB_CONTINUE; 8405 } 8406 } 8407 8408 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 8409 ipr_cmd->timer.function = ipr_oper_timeout; 8410 ipr_cmd->done = ipr_reset_ioa_job; 8411 add_timer(&ipr_cmd->timer); 8412 8413 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8414 8415 return IPR_RC_JOB_RETURN; 8416 } 8417 8418 /** 8419 * ipr_reset_enable_ioa - Enable the IOA following a reset. 8420 * @ipr_cmd: ipr command struct 8421 * 8422 * This function reinitializes some control blocks and 8423 * enables destructive diagnostics on the adapter. 8424 * 8425 * Return value: 8426 * IPR_RC_JOB_RETURN 8427 **/ 8428 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) 8429 { 8430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8431 volatile u32 int_reg; 8432 volatile u64 maskval; 8433 int i; 8434 8435 ENTER; 8436 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8437 ipr_init_ioa_mem(ioa_cfg); 8438 8439 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8440 spin_lock(&ioa_cfg->hrrq[i]._lock); 8441 ioa_cfg->hrrq[i].allow_interrupts = 1; 8442 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8443 } 8444 if (ioa_cfg->sis64) { 8445 /* Set the adapter to the correct endian mode. */ 8446 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8447 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8448 } 8449 8450 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8451 8452 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8453 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 8454 ioa_cfg->regs.clr_interrupt_mask_reg32); 8455 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8456 return IPR_RC_JOB_CONTINUE; 8457 } 8458 8459 /* Enable destructive diagnostics on IOA */ 8460 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 8461 8462 if (ioa_cfg->sis64) { 8463 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8464 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; 8465 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); 8466 } else 8467 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 8468 8469 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8470 8471 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 8472 8473 if (ioa_cfg->sis64) { 8474 ipr_cmd->job_step = ipr_reset_next_stage; 8475 return IPR_RC_JOB_CONTINUE; 8476 } 8477 8478 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 8479 ipr_cmd->timer.function = ipr_oper_timeout; 8480 ipr_cmd->done = ipr_reset_ioa_job; 8481 add_timer(&ipr_cmd->timer); 8482 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8483 8484 LEAVE; 8485 return IPR_RC_JOB_RETURN; 8486 } 8487 8488 /** 8489 * ipr_reset_wait_for_dump - Wait for a dump to timeout. 8490 * @ipr_cmd: ipr command struct 8491 * 8492 * This function is invoked when an adapter dump has run out 8493 * of processing time. 8494 * 8495 * Return value: 8496 * IPR_RC_JOB_CONTINUE 8497 **/ 8498 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) 8499 { 8500 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8501 8502 if (ioa_cfg->sdt_state == GET_DUMP) 8503 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8504 else if (ioa_cfg->sdt_state == READ_DUMP) 8505 ioa_cfg->sdt_state = ABORT_DUMP; 8506 8507 ioa_cfg->dump_timeout = 1; 8508 ipr_cmd->job_step = ipr_reset_alert; 8509 8510 return IPR_RC_JOB_CONTINUE; 8511 } 8512 8513 /** 8514 * ipr_unit_check_no_data - Log a unit check/no data error log 8515 * @ioa_cfg: ioa config struct 8516 * 8517 * Logs an error indicating the adapter unit checked, but for some 8518 * reason, we were unable to fetch the unit check buffer. 8519 * 8520 * Return value: 8521 * nothing 8522 **/ 8523 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) 8524 { 8525 ioa_cfg->errors_logged++; 8526 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); 8527 } 8528 8529 /** 8530 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA 8531 * @ioa_cfg: ioa config struct 8532 * 8533 * Fetches the unit check buffer from the adapter by clocking the data 8534 * through the mailbox register. 8535 * 8536 * Return value: 8537 * nothing 8538 **/ 8539 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) 8540 { 8541 unsigned long mailbox; 8542 struct ipr_hostrcb *hostrcb; 8543 struct ipr_uc_sdt sdt; 8544 int rc, length; 8545 u32 ioasc; 8546 8547 mailbox = readl(ioa_cfg->ioa_mailbox); 8548 8549 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 8550 ipr_unit_check_no_data(ioa_cfg); 8551 return; 8552 } 8553 8554 memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); 8555 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 8556 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 8557 8558 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 8559 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 8560 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 8561 ipr_unit_check_no_data(ioa_cfg); 8562 return; 8563 } 8564 8565 /* Find length of the first sdt entry (UC buffer) */ 8566 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 8567 length = be32_to_cpu(sdt.entry[0].end_token); 8568 else 8569 length = (be32_to_cpu(sdt.entry[0].end_token) - 8570 be32_to_cpu(sdt.entry[0].start_token)) & 8571 IPR_FMT2_MBX_ADDR_MASK; 8572 8573 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 8574 struct ipr_hostrcb, queue); 8575 list_del_init(&hostrcb->queue); 8576 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 8577 8578 rc = ipr_get_ldump_data_section(ioa_cfg, 8579 be32_to_cpu(sdt.entry[0].start_token), 8580 (__be32 *)&hostrcb->hcam, 8581 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 8582 8583 if (!rc) { 8584 ipr_handle_log_data(ioa_cfg, hostrcb); 8585 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 8586 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 8587 ioa_cfg->sdt_state == GET_DUMP) 8588 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8589 } else 8590 ipr_unit_check_no_data(ioa_cfg); 8591 8592 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 8593 } 8594 8595 /** 8596 * ipr_reset_get_unit_check_job - Call to get the unit check buffer. 8597 * @ipr_cmd: ipr command struct 8598 * 8599 * Description: This function will call to get the unit check buffer. 8600 * 8601 * Return value: 8602 * IPR_RC_JOB_RETURN 8603 **/ 8604 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) 8605 { 8606 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8607 8608 ENTER; 8609 ioa_cfg->ioa_unit_checked = 0; 8610 ipr_get_unit_check_buffer(ioa_cfg); 8611 ipr_cmd->job_step = ipr_reset_alert; 8612 ipr_reset_start_timer(ipr_cmd, 0); 8613 8614 LEAVE; 8615 return IPR_RC_JOB_RETURN; 8616 } 8617 8618 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) 8619 { 8620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8621 8622 ENTER; 8623 8624 if (ioa_cfg->sdt_state != GET_DUMP) 8625 return IPR_RC_JOB_RETURN; 8626 8627 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || 8628 (readl(ioa_cfg->regs.sense_interrupt_reg) & 8629 IPR_PCII_MAILBOX_STABLE)) { 8630 8631 if (!ipr_cmd->u.time_left) 8632 dev_err(&ioa_cfg->pdev->dev, 8633 "Timed out waiting for Mailbox register.\n"); 8634 8635 ioa_cfg->sdt_state = READ_DUMP; 8636 ioa_cfg->dump_timeout = 0; 8637 if (ioa_cfg->sis64) 8638 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 8639 else 8640 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); 8641 ipr_cmd->job_step = ipr_reset_wait_for_dump; 8642 schedule_work(&ioa_cfg->work_q); 8643 8644 } else { 8645 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8646 ipr_reset_start_timer(ipr_cmd, 8647 IPR_CHECK_FOR_RESET_TIMEOUT); 8648 } 8649 8650 LEAVE; 8651 return IPR_RC_JOB_RETURN; 8652 } 8653 8654 /** 8655 * ipr_reset_restore_cfg_space - Restore PCI config space. 8656 * @ipr_cmd: ipr command struct 8657 * 8658 * Description: This function restores the saved PCI config space of 8659 * the adapter, fails all outstanding ops back to the callers, and 8660 * fetches the dump/unit check if applicable to this reset. 8661 * 8662 * Return value: 8663 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8664 **/ 8665 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 8666 { 8667 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8668 u32 int_reg; 8669 8670 ENTER; 8671 ioa_cfg->pdev->state_saved = true; 8672 pci_restore_state(ioa_cfg->pdev); 8673 8674 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 8675 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8676 return IPR_RC_JOB_CONTINUE; 8677 } 8678 8679 ipr_fail_all_ops(ioa_cfg); 8680 8681 if (ioa_cfg->sis64) { 8682 /* Set the adapter to the correct endian mode. */ 8683 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8684 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8685 } 8686 8687 if (ioa_cfg->ioa_unit_checked) { 8688 if (ioa_cfg->sis64) { 8689 ipr_cmd->job_step = ipr_reset_get_unit_check_job; 8690 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); 8691 return IPR_RC_JOB_RETURN; 8692 } else { 8693 ioa_cfg->ioa_unit_checked = 0; 8694 ipr_get_unit_check_buffer(ioa_cfg); 8695 ipr_cmd->job_step = ipr_reset_alert; 8696 ipr_reset_start_timer(ipr_cmd, 0); 8697 return IPR_RC_JOB_RETURN; 8698 } 8699 } 8700 8701 if (ioa_cfg->in_ioa_bringdown) { 8702 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8703 } else if (ioa_cfg->sdt_state == GET_DUMP) { 8704 ipr_cmd->job_step = ipr_dump_mailbox_wait; 8705 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; 8706 } else { 8707 ipr_cmd->job_step = ipr_reset_enable_ioa; 8708 } 8709 8710 LEAVE; 8711 return IPR_RC_JOB_CONTINUE; 8712 } 8713 8714 /** 8715 * ipr_reset_bist_done - BIST has completed on the adapter. 8716 * @ipr_cmd: ipr command struct 8717 * 8718 * Description: Unblock config space and resume the reset process. 8719 * 8720 * Return value: 8721 * IPR_RC_JOB_CONTINUE 8722 **/ 8723 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) 8724 { 8725 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8726 8727 ENTER; 8728 if (ioa_cfg->cfg_locked) 8729 pci_cfg_access_unlock(ioa_cfg->pdev); 8730 ioa_cfg->cfg_locked = 0; 8731 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 8732 LEAVE; 8733 return IPR_RC_JOB_CONTINUE; 8734 } 8735 8736 /** 8737 * ipr_reset_start_bist - Run BIST on the adapter. 8738 * @ipr_cmd: ipr command struct 8739 * 8740 * Description: This function runs BIST on the adapter, then delays 2 seconds. 8741 * 8742 * Return value: 8743 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8744 **/ 8745 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 8746 { 8747 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8748 int rc = PCIBIOS_SUCCESSFUL; 8749 8750 ENTER; 8751 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) 8752 writel(IPR_UPROCI_SIS64_START_BIST, 8753 ioa_cfg->regs.set_uproc_interrupt_reg32); 8754 else 8755 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 8756 8757 if (rc == PCIBIOS_SUCCESSFUL) { 8758 ipr_cmd->job_step = ipr_reset_bist_done; 8759 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8760 rc = IPR_RC_JOB_RETURN; 8761 } else { 8762 if (ioa_cfg->cfg_locked) 8763 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); 8764 ioa_cfg->cfg_locked = 0; 8765 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8766 rc = IPR_RC_JOB_CONTINUE; 8767 } 8768 8769 LEAVE; 8770 return rc; 8771 } 8772 8773 /** 8774 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter 8775 * @ipr_cmd: ipr command struct 8776 * 8777 * Description: This clears PCI reset to the adapter and delays two seconds. 8778 * 8779 * Return value: 8780 * IPR_RC_JOB_RETURN 8781 **/ 8782 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) 8783 { 8784 ENTER; 8785 ipr_cmd->job_step = ipr_reset_bist_done; 8786 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8787 LEAVE; 8788 return IPR_RC_JOB_RETURN; 8789 } 8790 8791 /** 8792 * ipr_reset_reset_work - Pulse a PCIe fundamental reset 8793 * @work: work struct 8794 * 8795 * Description: This pulses warm reset to a slot. 8796 * 8797 **/ 8798 static void ipr_reset_reset_work(struct work_struct *work) 8799 { 8800 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); 8801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8802 struct pci_dev *pdev = ioa_cfg->pdev; 8803 unsigned long lock_flags = 0; 8804 8805 ENTER; 8806 pci_set_pcie_reset_state(pdev, pcie_warm_reset); 8807 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); 8808 pci_set_pcie_reset_state(pdev, pcie_deassert_reset); 8809 8810 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8811 if (ioa_cfg->reset_cmd == ipr_cmd) 8812 ipr_reset_ioa_job(ipr_cmd); 8813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8814 LEAVE; 8815 } 8816 8817 /** 8818 * ipr_reset_slot_reset - Reset the PCI slot of the adapter. 8819 * @ipr_cmd: ipr command struct 8820 * 8821 * Description: This asserts PCI reset to the adapter. 8822 * 8823 * Return value: 8824 * IPR_RC_JOB_RETURN 8825 **/ 8826 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) 8827 { 8828 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8829 8830 ENTER; 8831 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); 8832 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); 8833 ipr_cmd->job_step = ipr_reset_slot_reset_done; 8834 LEAVE; 8835 return IPR_RC_JOB_RETURN; 8836 } 8837 8838 /** 8839 * ipr_reset_block_config_access_wait - Wait for permission to block config access 8840 * @ipr_cmd: ipr command struct 8841 * 8842 * Description: This attempts to block config access to the IOA. 8843 * 8844 * Return value: 8845 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8846 **/ 8847 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) 8848 { 8849 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8850 int rc = IPR_RC_JOB_CONTINUE; 8851 8852 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { 8853 ioa_cfg->cfg_locked = 1; 8854 ipr_cmd->job_step = ioa_cfg->reset; 8855 } else { 8856 if (ipr_cmd->u.time_left) { 8857 rc = IPR_RC_JOB_RETURN; 8858 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8859 ipr_reset_start_timer(ipr_cmd, 8860 IPR_CHECK_FOR_RESET_TIMEOUT); 8861 } else { 8862 ipr_cmd->job_step = ioa_cfg->reset; 8863 dev_err(&ioa_cfg->pdev->dev, 8864 "Timed out waiting to lock config access. Resetting anyway.\n"); 8865 } 8866 } 8867 8868 return rc; 8869 } 8870 8871 /** 8872 * ipr_reset_block_config_access - Block config access to the IOA 8873 * @ipr_cmd: ipr command struct 8874 * 8875 * Description: This attempts to block config access to the IOA 8876 * 8877 * Return value: 8878 * IPR_RC_JOB_CONTINUE 8879 **/ 8880 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) 8881 { 8882 ipr_cmd->ioa_cfg->cfg_locked = 0; 8883 ipr_cmd->job_step = ipr_reset_block_config_access_wait; 8884 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8885 return IPR_RC_JOB_CONTINUE; 8886 } 8887 8888 /** 8889 * ipr_reset_allowed - Query whether or not IOA can be reset 8890 * @ioa_cfg: ioa config struct 8891 * 8892 * Return value: 8893 * 0 if reset not allowed / non-zero if reset is allowed 8894 **/ 8895 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) 8896 { 8897 volatile u32 temp_reg; 8898 8899 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8900 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); 8901 } 8902 8903 /** 8904 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. 8905 * @ipr_cmd: ipr command struct 8906 * 8907 * Description: This function waits for adapter permission to run BIST, 8908 * then runs BIST. If the adapter does not give permission after a 8909 * reasonable time, we will reset the adapter anyway. The impact of 8910 * resetting the adapter without warning the adapter is the risk of 8911 * losing the persistent error log on the adapter. If the adapter is 8912 * reset while it is writing to the flash on the adapter, the flash 8913 * segment will have bad ECC and be zeroed. 8914 * 8915 * Return value: 8916 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8917 **/ 8918 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) 8919 { 8920 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8921 int rc = IPR_RC_JOB_RETURN; 8922 8923 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { 8924 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8925 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8926 } else { 8927 ipr_cmd->job_step = ipr_reset_block_config_access; 8928 rc = IPR_RC_JOB_CONTINUE; 8929 } 8930 8931 return rc; 8932 } 8933 8934 /** 8935 * ipr_reset_alert - Alert the adapter of a pending reset 8936 * @ipr_cmd: ipr command struct 8937 * 8938 * Description: This function alerts the adapter that it will be reset. 8939 * If memory space is not currently enabled, proceed directly 8940 * to running BIST on the adapter. The timer must always be started 8941 * so we guarantee we do not run BIST from ipr_isr. 8942 * 8943 * Return value: 8944 * IPR_RC_JOB_RETURN 8945 **/ 8946 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) 8947 { 8948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8949 u16 cmd_reg; 8950 int rc; 8951 8952 ENTER; 8953 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); 8954 8955 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 8956 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 8957 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 8958 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 8959 } else { 8960 ipr_cmd->job_step = ipr_reset_block_config_access; 8961 } 8962 8963 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8964 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8965 8966 LEAVE; 8967 return IPR_RC_JOB_RETURN; 8968 } 8969 8970 /** 8971 * ipr_reset_quiesce_done - Complete IOA disconnect 8972 * @ipr_cmd: ipr command struct 8973 * 8974 * Description: Freeze the adapter to complete quiesce processing 8975 * 8976 * Return value: 8977 * IPR_RC_JOB_CONTINUE 8978 **/ 8979 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) 8980 { 8981 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8982 8983 ENTER; 8984 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8985 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8986 LEAVE; 8987 return IPR_RC_JOB_CONTINUE; 8988 } 8989 8990 /** 8991 * ipr_reset_cancel_hcam_done - Check for outstanding commands 8992 * @ipr_cmd: ipr command struct 8993 * 8994 * Description: Ensure nothing is outstanding to the IOA and 8995 * proceed with IOA disconnect. Otherwise reset the IOA. 8996 * 8997 * Return value: 8998 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE 8999 **/ 9000 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) 9001 { 9002 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9003 struct ipr_cmnd *loop_cmd; 9004 struct ipr_hrr_queue *hrrq; 9005 int rc = IPR_RC_JOB_CONTINUE; 9006 int count = 0; 9007 9008 ENTER; 9009 ipr_cmd->job_step = ipr_reset_quiesce_done; 9010 9011 for_each_hrrq(hrrq, ioa_cfg) { 9012 spin_lock(&hrrq->_lock); 9013 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { 9014 count++; 9015 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9016 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9017 rc = IPR_RC_JOB_RETURN; 9018 break; 9019 } 9020 spin_unlock(&hrrq->_lock); 9021 9022 if (count) 9023 break; 9024 } 9025 9026 LEAVE; 9027 return rc; 9028 } 9029 9030 /** 9031 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs 9032 * @ipr_cmd: ipr command struct 9033 * 9034 * Description: Cancel any oustanding HCAMs to the IOA. 9035 * 9036 * Return value: 9037 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9038 **/ 9039 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) 9040 { 9041 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9042 int rc = IPR_RC_JOB_CONTINUE; 9043 struct ipr_cmd_pkt *cmd_pkt; 9044 struct ipr_cmnd *hcam_cmd; 9045 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; 9046 9047 ENTER; 9048 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; 9049 9050 if (!hrrq->ioa_is_dead) { 9051 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { 9052 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { 9053 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) 9054 continue; 9055 9056 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9057 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 9058 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 9059 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 9060 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; 9061 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; 9062 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; 9063 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; 9064 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; 9065 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; 9066 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; 9067 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; 9068 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; 9069 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; 9070 9071 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 9072 IPR_CANCEL_TIMEOUT); 9073 9074 rc = IPR_RC_JOB_RETURN; 9075 ipr_cmd->job_step = ipr_reset_cancel_hcam; 9076 break; 9077 } 9078 } 9079 } else 9080 ipr_cmd->job_step = ipr_reset_alert; 9081 9082 LEAVE; 9083 return rc; 9084 } 9085 9086 /** 9087 * ipr_reset_ucode_download_done - Microcode download completion 9088 * @ipr_cmd: ipr command struct 9089 * 9090 * Description: This function unmaps the microcode download buffer. 9091 * 9092 * Return value: 9093 * IPR_RC_JOB_CONTINUE 9094 **/ 9095 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) 9096 { 9097 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9098 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 9099 9100 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, 9101 sglist->num_sg, DMA_TO_DEVICE); 9102 9103 ipr_cmd->job_step = ipr_reset_alert; 9104 return IPR_RC_JOB_CONTINUE; 9105 } 9106 9107 /** 9108 * ipr_reset_ucode_download - Download microcode to the adapter 9109 * @ipr_cmd: ipr command struct 9110 * 9111 * Description: This function checks to see if it there is microcode 9112 * to download to the adapter. If there is, a download is performed. 9113 * 9114 * Return value: 9115 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9116 **/ 9117 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) 9118 { 9119 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9120 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 9121 9122 ENTER; 9123 ipr_cmd->job_step = ipr_reset_alert; 9124 9125 if (!sglist) 9126 return IPR_RC_JOB_CONTINUE; 9127 9128 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9129 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 9130 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; 9131 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; 9132 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; 9133 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 9134 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 9135 9136 if (ioa_cfg->sis64) 9137 ipr_build_ucode_ioadl64(ipr_cmd, sglist); 9138 else 9139 ipr_build_ucode_ioadl(ipr_cmd, sglist); 9140 ipr_cmd->job_step = ipr_reset_ucode_download_done; 9141 9142 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 9143 IPR_WRITE_BUFFER_TIMEOUT); 9144 9145 LEAVE; 9146 return IPR_RC_JOB_RETURN; 9147 } 9148 9149 /** 9150 * ipr_reset_shutdown_ioa - Shutdown the adapter 9151 * @ipr_cmd: ipr command struct 9152 * 9153 * Description: This function issues an adapter shutdown of the 9154 * specified type to the specified adapter as part of the 9155 * adapter reset job. 9156 * 9157 * Return value: 9158 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 9159 **/ 9160 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) 9161 { 9162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9163 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; 9164 unsigned long timeout; 9165 int rc = IPR_RC_JOB_CONTINUE; 9166 9167 ENTER; 9168 if (shutdown_type == IPR_SHUTDOWN_QUIESCE) 9169 ipr_cmd->job_step = ipr_reset_cancel_hcam; 9170 else if (shutdown_type != IPR_SHUTDOWN_NONE && 9171 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 9172 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 9173 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 9174 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 9175 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 9176 9177 if (shutdown_type == IPR_SHUTDOWN_NORMAL) 9178 timeout = IPR_SHUTDOWN_TIMEOUT; 9179 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 9180 timeout = IPR_INTERNAL_TIMEOUT; 9181 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 9182 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; 9183 else 9184 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 9185 9186 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 9187 9188 rc = IPR_RC_JOB_RETURN; 9189 ipr_cmd->job_step = ipr_reset_ucode_download; 9190 } else 9191 ipr_cmd->job_step = ipr_reset_alert; 9192 9193 LEAVE; 9194 return rc; 9195 } 9196 9197 /** 9198 * ipr_reset_ioa_job - Adapter reset job 9199 * @ipr_cmd: ipr command struct 9200 * 9201 * Description: This function is the job router for the adapter reset job. 9202 * 9203 * Return value: 9204 * none 9205 **/ 9206 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) 9207 { 9208 u32 rc, ioasc; 9209 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9210 9211 do { 9212 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 9213 9214 if (ioa_cfg->reset_cmd != ipr_cmd) { 9215 /* 9216 * We are doing nested adapter resets and this is 9217 * not the current reset job. 9218 */ 9219 list_add_tail(&ipr_cmd->queue, 9220 &ipr_cmd->hrrq->hrrq_free_q); 9221 return; 9222 } 9223 9224 if (IPR_IOASC_SENSE_KEY(ioasc)) { 9225 rc = ipr_cmd->job_step_failed(ipr_cmd); 9226 if (rc == IPR_RC_JOB_RETURN) 9227 return; 9228 } 9229 9230 ipr_reinit_ipr_cmnd(ipr_cmd); 9231 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; 9232 rc = ipr_cmd->job_step(ipr_cmd); 9233 } while (rc == IPR_RC_JOB_CONTINUE); 9234 } 9235 9236 /** 9237 * _ipr_initiate_ioa_reset - Initiate an adapter reset 9238 * @ioa_cfg: ioa config struct 9239 * @job_step: first job step of reset job 9240 * @shutdown_type: shutdown type 9241 * 9242 * Description: This function will initiate the reset of the given adapter 9243 * starting at the selected job step. 9244 * If the caller needs to wait on the completion of the reset, 9245 * the caller must sleep on the reset_wait_q. 9246 * 9247 * Return value: 9248 * none 9249 **/ 9250 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 9251 int (*job_step) (struct ipr_cmnd *), 9252 enum ipr_shutdown_type shutdown_type) 9253 { 9254 struct ipr_cmnd *ipr_cmd; 9255 int i; 9256 9257 ioa_cfg->in_reset_reload = 1; 9258 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9259 spin_lock(&ioa_cfg->hrrq[i]._lock); 9260 ioa_cfg->hrrq[i].allow_cmds = 0; 9261 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9262 } 9263 wmb(); 9264 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9265 ioa_cfg->scsi_unblock = 0; 9266 ioa_cfg->scsi_blocked = 1; 9267 scsi_block_requests(ioa_cfg->host); 9268 } 9269 9270 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9271 ioa_cfg->reset_cmd = ipr_cmd; 9272 ipr_cmd->job_step = job_step; 9273 ipr_cmd->u.shutdown_type = shutdown_type; 9274 9275 ipr_reset_ioa_job(ipr_cmd); 9276 } 9277 9278 /** 9279 * ipr_initiate_ioa_reset - Initiate an adapter reset 9280 * @ioa_cfg: ioa config struct 9281 * @shutdown_type: shutdown type 9282 * 9283 * Description: This function will initiate the reset of the given adapter. 9284 * If the caller needs to wait on the completion of the reset, 9285 * the caller must sleep on the reset_wait_q. 9286 * 9287 * Return value: 9288 * none 9289 **/ 9290 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 9291 enum ipr_shutdown_type shutdown_type) 9292 { 9293 int i; 9294 9295 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 9296 return; 9297 9298 if (ioa_cfg->in_reset_reload) { 9299 if (ioa_cfg->sdt_state == GET_DUMP) 9300 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 9301 else if (ioa_cfg->sdt_state == READ_DUMP) 9302 ioa_cfg->sdt_state = ABORT_DUMP; 9303 } 9304 9305 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 9306 dev_err(&ioa_cfg->pdev->dev, 9307 "IOA taken offline - error recovery failed\n"); 9308 9309 ioa_cfg->reset_retries = 0; 9310 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9311 spin_lock(&ioa_cfg->hrrq[i]._lock); 9312 ioa_cfg->hrrq[i].ioa_is_dead = 1; 9313 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9314 } 9315 wmb(); 9316 9317 if (ioa_cfg->in_ioa_bringdown) { 9318 ioa_cfg->reset_cmd = NULL; 9319 ioa_cfg->in_reset_reload = 0; 9320 ipr_fail_all_ops(ioa_cfg); 9321 wake_up_all(&ioa_cfg->reset_wait_q); 9322 9323 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9324 ioa_cfg->scsi_unblock = 1; 9325 schedule_work(&ioa_cfg->work_q); 9326 } 9327 return; 9328 } else { 9329 ioa_cfg->in_ioa_bringdown = 1; 9330 shutdown_type = IPR_SHUTDOWN_NONE; 9331 } 9332 } 9333 9334 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, 9335 shutdown_type); 9336 } 9337 9338 /** 9339 * ipr_reset_freeze - Hold off all I/O activity 9340 * @ipr_cmd: ipr command struct 9341 * 9342 * Description: If the PCI slot is frozen, hold off all I/O 9343 * activity; then, as soon as the slot is available again, 9344 * initiate an adapter reset. 9345 */ 9346 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 9347 { 9348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9349 int i; 9350 9351 /* Disallow new interrupts, avoid loop */ 9352 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9353 spin_lock(&ioa_cfg->hrrq[i]._lock); 9354 ioa_cfg->hrrq[i].allow_interrupts = 0; 9355 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9356 } 9357 wmb(); 9358 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 9359 ipr_cmd->done = ipr_reset_ioa_job; 9360 return IPR_RC_JOB_RETURN; 9361 } 9362 9363 /** 9364 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled 9365 * @pdev: PCI device struct 9366 * 9367 * Description: This routine is called to tell us that the MMIO 9368 * access to the IOA has been restored 9369 */ 9370 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) 9371 { 9372 unsigned long flags = 0; 9373 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9374 9375 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9376 if (!ioa_cfg->probe_done) 9377 pci_save_state(pdev); 9378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9379 return PCI_ERS_RESULT_NEED_RESET; 9380 } 9381 9382 /** 9383 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 9384 * @pdev: PCI device struct 9385 * 9386 * Description: This routine is called to tell us that the PCI bus 9387 * is down. Can't do anything here, except put the device driver 9388 * into a holding pattern, waiting for the PCI bus to come back. 9389 */ 9390 static void ipr_pci_frozen(struct pci_dev *pdev) 9391 { 9392 unsigned long flags = 0; 9393 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9394 9395 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9396 if (ioa_cfg->probe_done) 9397 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 9398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9399 } 9400 9401 /** 9402 * ipr_pci_slot_reset - Called when PCI slot has been reset. 9403 * @pdev: PCI device struct 9404 * 9405 * Description: This routine is called by the pci error recovery 9406 * code after the PCI slot has been reset, just before we 9407 * should resume normal operations. 9408 */ 9409 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) 9410 { 9411 unsigned long flags = 0; 9412 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9413 9414 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9415 if (ioa_cfg->probe_done) { 9416 if (ioa_cfg->needs_warm_reset) 9417 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9418 else 9419 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 9420 IPR_SHUTDOWN_NONE); 9421 } else 9422 wake_up_all(&ioa_cfg->eeh_wait_q); 9423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9424 return PCI_ERS_RESULT_RECOVERED; 9425 } 9426 9427 /** 9428 * ipr_pci_perm_failure - Called when PCI slot is dead for good. 9429 * @pdev: PCI device struct 9430 * 9431 * Description: This routine is called when the PCI bus has 9432 * permanently failed. 9433 */ 9434 static void ipr_pci_perm_failure(struct pci_dev *pdev) 9435 { 9436 unsigned long flags = 0; 9437 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9438 int i; 9439 9440 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9441 if (ioa_cfg->probe_done) { 9442 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 9443 ioa_cfg->sdt_state = ABORT_DUMP; 9444 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 9445 ioa_cfg->in_ioa_bringdown = 1; 9446 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9447 spin_lock(&ioa_cfg->hrrq[i]._lock); 9448 ioa_cfg->hrrq[i].allow_cmds = 0; 9449 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9450 } 9451 wmb(); 9452 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9453 } else 9454 wake_up_all(&ioa_cfg->eeh_wait_q); 9455 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9456 } 9457 9458 /** 9459 * ipr_pci_error_detected - Called when a PCI error is detected. 9460 * @pdev: PCI device struct 9461 * @state: PCI channel state 9462 * 9463 * Description: Called when a PCI error is detected. 9464 * 9465 * Return value: 9466 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 9467 */ 9468 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, 9469 pci_channel_state_t state) 9470 { 9471 switch (state) { 9472 case pci_channel_io_frozen: 9473 ipr_pci_frozen(pdev); 9474 return PCI_ERS_RESULT_CAN_RECOVER; 9475 case pci_channel_io_perm_failure: 9476 ipr_pci_perm_failure(pdev); 9477 return PCI_ERS_RESULT_DISCONNECT; 9478 break; 9479 default: 9480 break; 9481 } 9482 return PCI_ERS_RESULT_NEED_RESET; 9483 } 9484 9485 /** 9486 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) 9487 * @ioa_cfg: ioa cfg struct 9488 * 9489 * Description: This is the second phase of adapter initialization 9490 * This function takes care of initilizing the adapter to the point 9491 * where it can accept new commands. 9492 9493 * Return value: 9494 * 0 on success / -EIO on failure 9495 **/ 9496 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) 9497 { 9498 int rc = 0; 9499 unsigned long host_lock_flags = 0; 9500 9501 ENTER; 9502 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9503 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 9504 ioa_cfg->probe_done = 1; 9505 if (ioa_cfg->needs_hard_reset) { 9506 ioa_cfg->needs_hard_reset = 0; 9507 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9508 } else 9509 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 9510 IPR_SHUTDOWN_NONE); 9511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9512 9513 LEAVE; 9514 return rc; 9515 } 9516 9517 /** 9518 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter 9519 * @ioa_cfg: ioa config struct 9520 * 9521 * Return value: 9522 * none 9523 **/ 9524 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9525 { 9526 int i; 9527 9528 if (ioa_cfg->ipr_cmnd_list) { 9529 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9530 if (ioa_cfg->ipr_cmnd_list[i]) 9531 dma_pool_free(ioa_cfg->ipr_cmd_pool, 9532 ioa_cfg->ipr_cmnd_list[i], 9533 ioa_cfg->ipr_cmnd_list_dma[i]); 9534 9535 ioa_cfg->ipr_cmnd_list[i] = NULL; 9536 } 9537 } 9538 9539 if (ioa_cfg->ipr_cmd_pool) 9540 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); 9541 9542 kfree(ioa_cfg->ipr_cmnd_list); 9543 kfree(ioa_cfg->ipr_cmnd_list_dma); 9544 ioa_cfg->ipr_cmnd_list = NULL; 9545 ioa_cfg->ipr_cmnd_list_dma = NULL; 9546 ioa_cfg->ipr_cmd_pool = NULL; 9547 } 9548 9549 /** 9550 * ipr_free_mem - Frees memory allocated for an adapter 9551 * @ioa_cfg: ioa cfg struct 9552 * 9553 * Return value: 9554 * nothing 9555 **/ 9556 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) 9557 { 9558 int i; 9559 9560 kfree(ioa_cfg->res_entries); 9561 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), 9562 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9563 ipr_free_cmd_blks(ioa_cfg); 9564 9565 for (i = 0; i < ioa_cfg->hrrq_num; i++) 9566 dma_free_coherent(&ioa_cfg->pdev->dev, 9567 sizeof(u32) * ioa_cfg->hrrq[i].size, 9568 ioa_cfg->hrrq[i].host_rrq, 9569 ioa_cfg->hrrq[i].host_rrq_dma); 9570 9571 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, 9572 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9573 9574 for (i = 0; i < IPR_MAX_HCAMS; i++) { 9575 dma_free_coherent(&ioa_cfg->pdev->dev, 9576 sizeof(struct ipr_hostrcb), 9577 ioa_cfg->hostrcb[i], 9578 ioa_cfg->hostrcb_dma[i]); 9579 } 9580 9581 ipr_free_dump(ioa_cfg); 9582 kfree(ioa_cfg->trace); 9583 } 9584 9585 /** 9586 * ipr_free_irqs - Free all allocated IRQs for the adapter. 9587 * @ioa_cfg: ipr cfg struct 9588 * 9589 * This function frees all allocated IRQs for the 9590 * specified adapter. 9591 * 9592 * Return value: 9593 * none 9594 **/ 9595 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) 9596 { 9597 struct pci_dev *pdev = ioa_cfg->pdev; 9598 int i; 9599 9600 for (i = 0; i < ioa_cfg->nvectors; i++) 9601 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); 9602 pci_free_irq_vectors(pdev); 9603 } 9604 9605 /** 9606 * ipr_free_all_resources - Free all allocated resources for an adapter. 9607 * @ipr_cmd: ipr command struct 9608 * 9609 * This function frees all allocated resources for the 9610 * specified adapter. 9611 * 9612 * Return value: 9613 * none 9614 **/ 9615 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) 9616 { 9617 struct pci_dev *pdev = ioa_cfg->pdev; 9618 9619 ENTER; 9620 ipr_free_irqs(ioa_cfg); 9621 if (ioa_cfg->reset_work_q) 9622 destroy_workqueue(ioa_cfg->reset_work_q); 9623 iounmap(ioa_cfg->hdw_dma_regs); 9624 pci_release_regions(pdev); 9625 ipr_free_mem(ioa_cfg); 9626 scsi_host_put(ioa_cfg->host); 9627 pci_disable_device(pdev); 9628 LEAVE; 9629 } 9630 9631 /** 9632 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter 9633 * @ioa_cfg: ioa config struct 9634 * 9635 * Return value: 9636 * 0 on success / -ENOMEM on allocation failure 9637 **/ 9638 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9639 { 9640 struct ipr_cmnd *ipr_cmd; 9641 struct ipr_ioarcb *ioarcb; 9642 dma_addr_t dma_addr; 9643 int i, entries_each_hrrq, hrrq_id = 0; 9644 9645 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, 9646 sizeof(struct ipr_cmnd), 512, 0); 9647 9648 if (!ioa_cfg->ipr_cmd_pool) 9649 return -ENOMEM; 9650 9651 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); 9652 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); 9653 9654 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 9655 ipr_free_cmd_blks(ioa_cfg); 9656 return -ENOMEM; 9657 } 9658 9659 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9660 if (ioa_cfg->hrrq_num > 1) { 9661 if (i == 0) { 9662 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; 9663 ioa_cfg->hrrq[i].min_cmd_id = 0; 9664 ioa_cfg->hrrq[i].max_cmd_id = 9665 (entries_each_hrrq - 1); 9666 } else { 9667 entries_each_hrrq = 9668 IPR_NUM_BASE_CMD_BLKS/ 9669 (ioa_cfg->hrrq_num - 1); 9670 ioa_cfg->hrrq[i].min_cmd_id = 9671 IPR_NUM_INTERNAL_CMD_BLKS + 9672 (i - 1) * entries_each_hrrq; 9673 ioa_cfg->hrrq[i].max_cmd_id = 9674 (IPR_NUM_INTERNAL_CMD_BLKS + 9675 i * entries_each_hrrq - 1); 9676 } 9677 } else { 9678 entries_each_hrrq = IPR_NUM_CMD_BLKS; 9679 ioa_cfg->hrrq[i].min_cmd_id = 0; 9680 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); 9681 } 9682 ioa_cfg->hrrq[i].size = entries_each_hrrq; 9683 } 9684 9685 BUG_ON(ioa_cfg->hrrq_num == 0); 9686 9687 i = IPR_NUM_CMD_BLKS - 9688 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; 9689 if (i > 0) { 9690 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; 9691 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; 9692 } 9693 9694 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9695 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, 9696 GFP_KERNEL, &dma_addr); 9697 9698 if (!ipr_cmd) { 9699 ipr_free_cmd_blks(ioa_cfg); 9700 return -ENOMEM; 9701 } 9702 9703 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; 9704 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 9705 9706 ioarcb = &ipr_cmd->ioarcb; 9707 ipr_cmd->dma_addr = dma_addr; 9708 if (ioa_cfg->sis64) 9709 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 9710 else 9711 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 9712 9713 ioarcb->host_response_handle = cpu_to_be32(i << 2); 9714 if (ioa_cfg->sis64) { 9715 ioarcb->u.sis64_addr_data.data_ioadl_addr = 9716 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 9717 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 9718 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); 9719 } else { 9720 ioarcb->write_ioadl_addr = 9721 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 9722 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 9723 ioarcb->ioasa_host_pci_addr = 9724 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); 9725 } 9726 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 9727 ipr_cmd->cmd_index = i; 9728 ipr_cmd->ioa_cfg = ioa_cfg; 9729 ipr_cmd->sense_buffer_dma = dma_addr + 9730 offsetof(struct ipr_cmnd, sense_buffer); 9731 9732 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; 9733 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; 9734 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9735 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) 9736 hrrq_id++; 9737 } 9738 9739 return 0; 9740 } 9741 9742 /** 9743 * ipr_alloc_mem - Allocate memory for an adapter 9744 * @ioa_cfg: ioa config struct 9745 * 9746 * Return value: 9747 * 0 on success / non-zero for error 9748 **/ 9749 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) 9750 { 9751 struct pci_dev *pdev = ioa_cfg->pdev; 9752 int i, rc = -ENOMEM; 9753 9754 ENTER; 9755 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, 9756 sizeof(struct ipr_resource_entry), 9757 GFP_KERNEL); 9758 9759 if (!ioa_cfg->res_entries) 9760 goto out; 9761 9762 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 9763 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 9764 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9765 } 9766 9767 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, 9768 sizeof(struct ipr_misc_cbs), 9769 &ioa_cfg->vpd_cbs_dma, 9770 GFP_KERNEL); 9771 9772 if (!ioa_cfg->vpd_cbs) 9773 goto out_free_res_entries; 9774 9775 if (ipr_alloc_cmd_blks(ioa_cfg)) 9776 goto out_free_vpd_cbs; 9777 9778 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9779 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, 9780 sizeof(u32) * ioa_cfg->hrrq[i].size, 9781 &ioa_cfg->hrrq[i].host_rrq_dma, 9782 GFP_KERNEL); 9783 9784 if (!ioa_cfg->hrrq[i].host_rrq) { 9785 while (--i > 0) 9786 dma_free_coherent(&pdev->dev, 9787 sizeof(u32) * ioa_cfg->hrrq[i].size, 9788 ioa_cfg->hrrq[i].host_rrq, 9789 ioa_cfg->hrrq[i].host_rrq_dma); 9790 goto out_ipr_free_cmd_blocks; 9791 } 9792 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9793 } 9794 9795 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, 9796 ioa_cfg->cfg_table_size, 9797 &ioa_cfg->cfg_table_dma, 9798 GFP_KERNEL); 9799 9800 if (!ioa_cfg->u.cfg_table) 9801 goto out_free_host_rrq; 9802 9803 for (i = 0; i < IPR_MAX_HCAMS; i++) { 9804 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, 9805 sizeof(struct ipr_hostrcb), 9806 &ioa_cfg->hostrcb_dma[i], 9807 GFP_KERNEL); 9808 9809 if (!ioa_cfg->hostrcb[i]) 9810 goto out_free_hostrcb_dma; 9811 9812 ioa_cfg->hostrcb[i]->hostrcb_dma = 9813 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 9814 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; 9815 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 9816 } 9817 9818 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, 9819 sizeof(struct ipr_trace_entry), 9820 GFP_KERNEL); 9821 9822 if (!ioa_cfg->trace) 9823 goto out_free_hostrcb_dma; 9824 9825 rc = 0; 9826 out: 9827 LEAVE; 9828 return rc; 9829 9830 out_free_hostrcb_dma: 9831 while (i-- > 0) { 9832 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), 9833 ioa_cfg->hostrcb[i], 9834 ioa_cfg->hostrcb_dma[i]); 9835 } 9836 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, 9837 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9838 out_free_host_rrq: 9839 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9840 dma_free_coherent(&pdev->dev, 9841 sizeof(u32) * ioa_cfg->hrrq[i].size, 9842 ioa_cfg->hrrq[i].host_rrq, 9843 ioa_cfg->hrrq[i].host_rrq_dma); 9844 } 9845 out_ipr_free_cmd_blocks: 9846 ipr_free_cmd_blks(ioa_cfg); 9847 out_free_vpd_cbs: 9848 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), 9849 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9850 out_free_res_entries: 9851 kfree(ioa_cfg->res_entries); 9852 goto out; 9853 } 9854 9855 /** 9856 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values 9857 * @ioa_cfg: ioa config struct 9858 * 9859 * Return value: 9860 * none 9861 **/ 9862 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) 9863 { 9864 int i; 9865 9866 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 9867 ioa_cfg->bus_attr[i].bus = i; 9868 ioa_cfg->bus_attr[i].qas_enabled = 0; 9869 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; 9870 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) 9871 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; 9872 else 9873 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; 9874 } 9875 } 9876 9877 /** 9878 * ipr_init_regs - Initialize IOA registers 9879 * @ioa_cfg: ioa config struct 9880 * 9881 * Return value: 9882 * none 9883 **/ 9884 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) 9885 { 9886 const struct ipr_interrupt_offsets *p; 9887 struct ipr_interrupts *t; 9888 void __iomem *base; 9889 9890 p = &ioa_cfg->chip_cfg->regs; 9891 t = &ioa_cfg->regs; 9892 base = ioa_cfg->hdw_dma_regs; 9893 9894 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9895 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9896 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9897 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9898 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 9899 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 9900 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 9901 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 9902 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 9903 t->ioarrin_reg = base + p->ioarrin_reg; 9904 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 9905 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 9906 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 9907 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 9908 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 9909 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 9910 9911 if (ioa_cfg->sis64) { 9912 t->init_feedback_reg = base + p->init_feedback_reg; 9913 t->dump_addr_reg = base + p->dump_addr_reg; 9914 t->dump_data_reg = base + p->dump_data_reg; 9915 t->endian_swap_reg = base + p->endian_swap_reg; 9916 } 9917 } 9918 9919 /** 9920 * ipr_init_ioa_cfg - Initialize IOA config struct 9921 * @ioa_cfg: ioa config struct 9922 * @host: scsi host struct 9923 * @pdev: PCI dev struct 9924 * 9925 * Return value: 9926 * none 9927 **/ 9928 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9929 struct Scsi_Host *host, struct pci_dev *pdev) 9930 { 9931 int i; 9932 9933 ioa_cfg->host = host; 9934 ioa_cfg->pdev = pdev; 9935 ioa_cfg->log_level = ipr_log_level; 9936 ioa_cfg->doorbell = IPR_DOORBELL; 9937 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 9938 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 9939 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 9940 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 9941 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 9942 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 9943 9944 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9945 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9946 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); 9947 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9948 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9949 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9950 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); 9951 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9952 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9953 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9954 ioa_cfg->sdt_state = INACTIVE; 9955 9956 ipr_initialize_bus_attr(ioa_cfg); 9957 ioa_cfg->max_devs_supported = ipr_max_devs; 9958 9959 if (ioa_cfg->sis64) { 9960 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 9961 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9962 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9963 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9964 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 9965 + ((sizeof(struct ipr_config_table_entry64) 9966 * ioa_cfg->max_devs_supported))); 9967 } else { 9968 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9969 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9970 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9971 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9972 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 9973 + ((sizeof(struct ipr_config_table_entry) 9974 * ioa_cfg->max_devs_supported))); 9975 } 9976 9977 host->max_channel = IPR_VSET_BUS; 9978 host->unique_id = host->host_no; 9979 host->max_cmd_len = IPR_MAX_CDB_LEN; 9980 host->can_queue = ioa_cfg->max_cmds; 9981 pci_set_drvdata(pdev, ioa_cfg); 9982 9983 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { 9984 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); 9985 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); 9986 spin_lock_init(&ioa_cfg->hrrq[i]._lock); 9987 if (i == 0) 9988 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; 9989 else 9990 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; 9991 } 9992 } 9993 9994 /** 9995 * ipr_get_chip_info - Find adapter chip information 9996 * @dev_id: PCI device id struct 9997 * 9998 * Return value: 9999 * ptr to chip information on success / NULL on failure 10000 **/ 10001 static const struct ipr_chip_t * 10002 ipr_get_chip_info(const struct pci_device_id *dev_id) 10003 { 10004 int i; 10005 10006 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 10007 if (ipr_chip[i].vendor == dev_id->vendor && 10008 ipr_chip[i].device == dev_id->device) 10009 return &ipr_chip[i]; 10010 return NULL; 10011 } 10012 10013 /** 10014 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete 10015 * during probe time 10016 * @ioa_cfg: ioa config struct 10017 * 10018 * Return value: 10019 * None 10020 **/ 10021 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) 10022 { 10023 struct pci_dev *pdev = ioa_cfg->pdev; 10024 10025 if (pci_channel_offline(pdev)) { 10026 wait_event_timeout(ioa_cfg->eeh_wait_q, 10027 !pci_channel_offline(pdev), 10028 IPR_PCI_ERROR_RECOVERY_TIMEOUT); 10029 pci_restore_state(pdev); 10030 } 10031 } 10032 10033 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 10034 { 10035 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; 10036 10037 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { 10038 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, 10039 "host%d-%d", ioa_cfg->host->host_no, vec_idx); 10040 ioa_cfg->vectors_info[vec_idx]. 10041 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; 10042 } 10043 } 10044 10045 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, 10046 struct pci_dev *pdev) 10047 { 10048 int i, rc; 10049 10050 for (i = 1; i < ioa_cfg->nvectors; i++) { 10051 rc = request_irq(pci_irq_vector(pdev, i), 10052 ipr_isr_mhrrq, 10053 0, 10054 ioa_cfg->vectors_info[i].desc, 10055 &ioa_cfg->hrrq[i]); 10056 if (rc) { 10057 while (--i >= 0) 10058 free_irq(pci_irq_vector(pdev, i), 10059 &ioa_cfg->hrrq[i]); 10060 return rc; 10061 } 10062 } 10063 return 0; 10064 } 10065 10066 /** 10067 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 10068 * @pdev: PCI device struct 10069 * 10070 * Description: Simply set the msi_received flag to 1 indicating that 10071 * Message Signaled Interrupts are supported. 10072 * 10073 * Return value: 10074 * 0 on success / non-zero on failure 10075 **/ 10076 static irqreturn_t ipr_test_intr(int irq, void *devp) 10077 { 10078 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 10079 unsigned long lock_flags = 0; 10080 irqreturn_t rc = IRQ_HANDLED; 10081 10082 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); 10083 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10084 10085 ioa_cfg->msi_received = 1; 10086 wake_up(&ioa_cfg->msi_wait_q); 10087 10088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10089 return rc; 10090 } 10091 10092 /** 10093 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 10094 * @pdev: PCI device struct 10095 * 10096 * Description: This routine sets up and initiates a test interrupt to determine 10097 * if the interrupt is received via the ipr_test_intr() service routine. 10098 * If the tests fails, the driver will fall back to LSI. 10099 * 10100 * Return value: 10101 * 0 on success / non-zero on failure 10102 **/ 10103 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) 10104 { 10105 int rc; 10106 volatile u32 int_reg; 10107 unsigned long lock_flags = 0; 10108 int irq = pci_irq_vector(pdev, 0); 10109 10110 ENTER; 10111 10112 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10113 init_waitqueue_head(&ioa_cfg->msi_wait_q); 10114 ioa_cfg->msi_received = 0; 10115 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10116 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 10117 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 10118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10119 10120 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 10121 if (rc) { 10122 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); 10123 return rc; 10124 } else if (ipr_debug) 10125 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); 10126 10127 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 10128 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 10129 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 10130 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10131 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10132 10133 if (!ioa_cfg->msi_received) { 10134 /* MSI test failed */ 10135 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 10136 rc = -EOPNOTSUPP; 10137 } else if (ipr_debug) 10138 dev_info(&pdev->dev, "MSI test succeeded.\n"); 10139 10140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10141 10142 free_irq(irq, ioa_cfg); 10143 10144 LEAVE; 10145 10146 return rc; 10147 } 10148 10149 /* ipr_probe_ioa - Allocates memory and does first stage of initialization 10150 * @pdev: PCI device struct 10151 * @dev_id: PCI device id struct 10152 * 10153 * Return value: 10154 * 0 on success / non-zero on failure 10155 **/ 10156 static int ipr_probe_ioa(struct pci_dev *pdev, 10157 const struct pci_device_id *dev_id) 10158 { 10159 struct ipr_ioa_cfg *ioa_cfg; 10160 struct Scsi_Host *host; 10161 unsigned long ipr_regs_pci; 10162 void __iomem *ipr_regs; 10163 int rc = PCIBIOS_SUCCESSFUL; 10164 volatile u32 mask, uproc, interrupts; 10165 unsigned long lock_flags, driver_lock_flags; 10166 unsigned int irq_flag; 10167 10168 ENTER; 10169 10170 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 10171 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 10172 10173 if (!host) { 10174 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 10175 rc = -ENOMEM; 10176 goto out; 10177 } 10178 10179 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 10180 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 10181 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); 10182 10183 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 10184 10185 if (!ioa_cfg->ipr_chip) { 10186 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 10187 dev_id->vendor, dev_id->device); 10188 goto out_scsi_host_put; 10189 } 10190 10191 /* set SIS 32 or SIS 64 */ 10192 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 10193 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 10194 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; 10195 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; 10196 10197 if (ipr_transop_timeout) 10198 ioa_cfg->transop_timeout = ipr_transop_timeout; 10199 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 10200 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; 10201 else 10202 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 10203 10204 ioa_cfg->revid = pdev->revision; 10205 10206 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 10207 10208 ipr_regs_pci = pci_resource_start(pdev, 0); 10209 10210 rc = pci_request_regions(pdev, IPR_NAME); 10211 if (rc < 0) { 10212 dev_err(&pdev->dev, 10213 "Couldn't register memory range of registers\n"); 10214 goto out_scsi_host_put; 10215 } 10216 10217 rc = pci_enable_device(pdev); 10218 10219 if (rc || pci_channel_offline(pdev)) { 10220 if (pci_channel_offline(pdev)) { 10221 ipr_wait_for_pci_err_recovery(ioa_cfg); 10222 rc = pci_enable_device(pdev); 10223 } 10224 10225 if (rc) { 10226 dev_err(&pdev->dev, "Cannot enable adapter\n"); 10227 ipr_wait_for_pci_err_recovery(ioa_cfg); 10228 goto out_release_regions; 10229 } 10230 } 10231 10232 ipr_regs = pci_ioremap_bar(pdev, 0); 10233 10234 if (!ipr_regs) { 10235 dev_err(&pdev->dev, 10236 "Couldn't map memory range of registers\n"); 10237 rc = -ENOMEM; 10238 goto out_disable; 10239 } 10240 10241 ioa_cfg->hdw_dma_regs = ipr_regs; 10242 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 10243 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 10244 10245 ipr_init_regs(ioa_cfg); 10246 10247 if (ioa_cfg->sis64) { 10248 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 10249 if (rc < 0) { 10250 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); 10251 rc = dma_set_mask_and_coherent(&pdev->dev, 10252 DMA_BIT_MASK(32)); 10253 } 10254 } else 10255 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 10256 10257 if (rc < 0) { 10258 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 10259 goto cleanup_nomem; 10260 } 10261 10262 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 10263 ioa_cfg->chip_cfg->cache_line_size); 10264 10265 if (rc != PCIBIOS_SUCCESSFUL) { 10266 dev_err(&pdev->dev, "Write of cache line size failed\n"); 10267 ipr_wait_for_pci_err_recovery(ioa_cfg); 10268 rc = -EIO; 10269 goto cleanup_nomem; 10270 } 10271 10272 /* Issue MMIO read to ensure card is not in EEH */ 10273 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 10274 ipr_wait_for_pci_err_recovery(ioa_cfg); 10275 10276 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 10277 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 10278 IPR_MAX_MSIX_VECTORS); 10279 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; 10280 } 10281 10282 irq_flag = PCI_IRQ_LEGACY; 10283 if (ioa_cfg->ipr_chip->has_msi) 10284 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX; 10285 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag); 10286 if (rc < 0) { 10287 ipr_wait_for_pci_err_recovery(ioa_cfg); 10288 goto cleanup_nomem; 10289 } 10290 ioa_cfg->nvectors = rc; 10291 10292 if (!pdev->msi_enabled && !pdev->msix_enabled) 10293 ioa_cfg->clear_isr = 1; 10294 10295 pci_set_master(pdev); 10296 10297 if (pci_channel_offline(pdev)) { 10298 ipr_wait_for_pci_err_recovery(ioa_cfg); 10299 pci_set_master(pdev); 10300 if (pci_channel_offline(pdev)) { 10301 rc = -EIO; 10302 goto out_msi_disable; 10303 } 10304 } 10305 10306 if (pdev->msi_enabled || pdev->msix_enabled) { 10307 rc = ipr_test_msi(ioa_cfg, pdev); 10308 switch (rc) { 10309 case 0: 10310 dev_info(&pdev->dev, 10311 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, 10312 pdev->msix_enabled ? "-X" : ""); 10313 break; 10314 case -EOPNOTSUPP: 10315 ipr_wait_for_pci_err_recovery(ioa_cfg); 10316 pci_free_irq_vectors(pdev); 10317 10318 ioa_cfg->nvectors = 1; 10319 ioa_cfg->clear_isr = 1; 10320 break; 10321 default: 10322 goto out_msi_disable; 10323 } 10324 } 10325 10326 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, 10327 (unsigned int)num_online_cpus(), 10328 (unsigned int)IPR_MAX_HRRQ_NUM); 10329 10330 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 10331 goto out_msi_disable; 10332 10333 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 10334 goto out_msi_disable; 10335 10336 rc = ipr_alloc_mem(ioa_cfg); 10337 if (rc < 0) { 10338 dev_err(&pdev->dev, 10339 "Couldn't allocate enough memory for device driver!\n"); 10340 goto out_msi_disable; 10341 } 10342 10343 /* Save away PCI config space for use following IOA reset */ 10344 rc = pci_save_state(pdev); 10345 10346 if (rc != PCIBIOS_SUCCESSFUL) { 10347 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 10348 rc = -EIO; 10349 goto cleanup_nolog; 10350 } 10351 10352 /* 10353 * If HRRQ updated interrupt is not masked, or reset alert is set, 10354 * the card is in an unknown state and needs a hard reset 10355 */ 10356 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 10357 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 10358 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 10359 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 10360 ioa_cfg->needs_hard_reset = 1; 10361 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) 10362 ioa_cfg->needs_hard_reset = 1; 10363 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 10364 ioa_cfg->ioa_unit_checked = 1; 10365 10366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10367 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10369 10370 if (pdev->msi_enabled || pdev->msix_enabled) { 10371 name_msi_vectors(ioa_cfg); 10372 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0, 10373 ioa_cfg->vectors_info[0].desc, 10374 &ioa_cfg->hrrq[0]); 10375 if (!rc) 10376 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev); 10377 } else { 10378 rc = request_irq(pdev->irq, ipr_isr, 10379 IRQF_SHARED, 10380 IPR_NAME, &ioa_cfg->hrrq[0]); 10381 } 10382 if (rc) { 10383 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 10384 pdev->irq, rc); 10385 goto cleanup_nolog; 10386 } 10387 10388 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || 10389 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { 10390 ioa_cfg->needs_warm_reset = 1; 10391 ioa_cfg->reset = ipr_reset_slot_reset; 10392 10393 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", 10394 WQ_MEM_RECLAIM, host->host_no); 10395 10396 if (!ioa_cfg->reset_work_q) { 10397 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); 10398 rc = -ENOMEM; 10399 goto out_free_irq; 10400 } 10401 } else 10402 ioa_cfg->reset = ipr_reset_start_bist; 10403 10404 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10405 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 10406 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10407 10408 LEAVE; 10409 out: 10410 return rc; 10411 10412 out_free_irq: 10413 ipr_free_irqs(ioa_cfg); 10414 cleanup_nolog: 10415 ipr_free_mem(ioa_cfg); 10416 out_msi_disable: 10417 ipr_wait_for_pci_err_recovery(ioa_cfg); 10418 pci_free_irq_vectors(pdev); 10419 cleanup_nomem: 10420 iounmap(ipr_regs); 10421 out_disable: 10422 pci_disable_device(pdev); 10423 out_release_regions: 10424 pci_release_regions(pdev); 10425 out_scsi_host_put: 10426 scsi_host_put(host); 10427 goto out; 10428 } 10429 10430 /** 10431 * ipr_initiate_ioa_bringdown - Bring down an adapter 10432 * @ioa_cfg: ioa config struct 10433 * @shutdown_type: shutdown type 10434 * 10435 * Description: This function will initiate bringing down the adapter. 10436 * This consists of issuing an IOA shutdown to the adapter 10437 * to flush the cache, and running BIST. 10438 * If the caller needs to wait on the completion of the reset, 10439 * the caller must sleep on the reset_wait_q. 10440 * 10441 * Return value: 10442 * none 10443 **/ 10444 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, 10445 enum ipr_shutdown_type shutdown_type) 10446 { 10447 ENTER; 10448 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 10449 ioa_cfg->sdt_state = ABORT_DUMP; 10450 ioa_cfg->reset_retries = 0; 10451 ioa_cfg->in_ioa_bringdown = 1; 10452 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 10453 LEAVE; 10454 } 10455 10456 /** 10457 * __ipr_remove - Remove a single adapter 10458 * @pdev: pci device struct 10459 * 10460 * Adapter hot plug remove entry point. 10461 * 10462 * Return value: 10463 * none 10464 **/ 10465 static void __ipr_remove(struct pci_dev *pdev) 10466 { 10467 unsigned long host_lock_flags = 0; 10468 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10469 int i; 10470 unsigned long driver_lock_flags; 10471 ENTER; 10472 10473 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10474 while (ioa_cfg->in_reset_reload) { 10475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10476 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10477 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10478 } 10479 10480 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 10481 spin_lock(&ioa_cfg->hrrq[i]._lock); 10482 ioa_cfg->hrrq[i].removing_ioa = 1; 10483 spin_unlock(&ioa_cfg->hrrq[i]._lock); 10484 } 10485 wmb(); 10486 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 10487 10488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10490 flush_work(&ioa_cfg->work_q); 10491 if (ioa_cfg->reset_work_q) 10492 flush_workqueue(ioa_cfg->reset_work_q); 10493 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 10494 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10495 10496 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10497 list_del(&ioa_cfg->queue); 10498 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10499 10500 if (ioa_cfg->sdt_state == ABORT_DUMP) 10501 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 10502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10503 10504 ipr_free_all_resources(ioa_cfg); 10505 10506 LEAVE; 10507 } 10508 10509 /** 10510 * ipr_remove - IOA hot plug remove entry point 10511 * @pdev: pci device struct 10512 * 10513 * Adapter hot plug remove entry point. 10514 * 10515 * Return value: 10516 * none 10517 **/ 10518 static void ipr_remove(struct pci_dev *pdev) 10519 { 10520 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10521 10522 ENTER; 10523 10524 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10525 &ipr_trace_attr); 10526 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10527 &ipr_dump_attr); 10528 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, 10529 &ipr_ioa_async_err_log); 10530 scsi_remove_host(ioa_cfg->host); 10531 10532 __ipr_remove(pdev); 10533 10534 LEAVE; 10535 } 10536 10537 /** 10538 * ipr_probe - Adapter hot plug add entry point 10539 * 10540 * Return value: 10541 * 0 on success / non-zero on failure 10542 **/ 10543 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 10544 { 10545 struct ipr_ioa_cfg *ioa_cfg; 10546 unsigned long flags; 10547 int rc, i; 10548 10549 rc = ipr_probe_ioa(pdev, dev_id); 10550 10551 if (rc) 10552 return rc; 10553 10554 ioa_cfg = pci_get_drvdata(pdev); 10555 rc = ipr_probe_ioa_part2(ioa_cfg); 10556 10557 if (rc) { 10558 __ipr_remove(pdev); 10559 return rc; 10560 } 10561 10562 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); 10563 10564 if (rc) { 10565 __ipr_remove(pdev); 10566 return rc; 10567 } 10568 10569 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, 10570 &ipr_trace_attr); 10571 10572 if (rc) { 10573 scsi_remove_host(ioa_cfg->host); 10574 __ipr_remove(pdev); 10575 return rc; 10576 } 10577 10578 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, 10579 &ipr_ioa_async_err_log); 10580 10581 if (rc) { 10582 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10583 &ipr_dump_attr); 10584 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10585 &ipr_trace_attr); 10586 scsi_remove_host(ioa_cfg->host); 10587 __ipr_remove(pdev); 10588 return rc; 10589 } 10590 10591 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 10592 &ipr_dump_attr); 10593 10594 if (rc) { 10595 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, 10596 &ipr_ioa_async_err_log); 10597 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10598 &ipr_trace_attr); 10599 scsi_remove_host(ioa_cfg->host); 10600 __ipr_remove(pdev); 10601 return rc; 10602 } 10603 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10604 ioa_cfg->scan_enabled = 1; 10605 schedule_work(&ioa_cfg->work_q); 10606 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10607 10608 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10609 10610 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10611 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 10612 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, 10613 ioa_cfg->iopoll_weight, ipr_iopoll); 10614 } 10615 } 10616 10617 scsi_scan_host(ioa_cfg->host); 10618 10619 return 0; 10620 } 10621 10622 /** 10623 * ipr_shutdown - Shutdown handler. 10624 * @pdev: pci device struct 10625 * 10626 * This function is invoked upon system shutdown/reboot. It will issue 10627 * an adapter shutdown to the adapter to flush the write cache. 10628 * 10629 * Return value: 10630 * none 10631 **/ 10632 static void ipr_shutdown(struct pci_dev *pdev) 10633 { 10634 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10635 unsigned long lock_flags = 0; 10636 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; 10637 int i; 10638 10639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10640 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10641 ioa_cfg->iopoll_weight = 0; 10642 for (i = 1; i < ioa_cfg->hrrq_num; i++) 10643 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); 10644 } 10645 10646 while (ioa_cfg->in_reset_reload) { 10647 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10648 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10649 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10650 } 10651 10652 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) 10653 shutdown_type = IPR_SHUTDOWN_QUIESCE; 10654 10655 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); 10656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10657 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10658 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { 10659 ipr_free_irqs(ioa_cfg); 10660 pci_disable_device(ioa_cfg->pdev); 10661 } 10662 } 10663 10664 static struct pci_device_id ipr_pci_table[] = { 10665 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, 10667 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, 10669 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10670 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, 10671 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, 10673 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10674 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, 10675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, 10677 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 10679 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 10681 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10682 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10684 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10686 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10687 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10689 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10694 IPR_USE_LONG_TRANSOP_TIMEOUT}, 10695 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10696 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10697 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 10700 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 10703 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10704 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, 10705 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10706 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 10707 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 10710 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 10712 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 10714 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10715 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10716 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 10717 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 10720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 10724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, 10726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 10730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 10732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 10734 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10735 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 10736 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10737 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 10738 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10739 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 10740 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10741 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, 10742 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10743 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, 10744 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10745 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, 10746 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10747 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, 10748 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10749 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, 10750 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10751 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, 10752 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10753 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, 10754 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10755 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, 10756 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10757 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, 10758 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10759 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, 10760 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10761 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, 10762 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10763 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, 10764 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10765 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, 10766 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10767 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, 10768 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10769 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, 10770 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, 10771 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, 10772 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, 10773 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, 10774 { } 10775 }; 10776 MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10777 10778 static const struct pci_error_handlers ipr_err_handler = { 10779 .error_detected = ipr_pci_error_detected, 10780 .mmio_enabled = ipr_pci_mmio_enabled, 10781 .slot_reset = ipr_pci_slot_reset, 10782 }; 10783 10784 static struct pci_driver ipr_driver = { 10785 .name = IPR_NAME, 10786 .id_table = ipr_pci_table, 10787 .probe = ipr_probe, 10788 .remove = ipr_remove, 10789 .shutdown = ipr_shutdown, 10790 .err_handler = &ipr_err_handler, 10791 }; 10792 10793 /** 10794 * ipr_halt_done - Shutdown prepare completion 10795 * 10796 * Return value: 10797 * none 10798 **/ 10799 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 10800 { 10801 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 10802 } 10803 10804 /** 10805 * ipr_halt - Issue shutdown prepare to all adapters 10806 * 10807 * Return value: 10808 * NOTIFY_OK on success / NOTIFY_DONE on failure 10809 **/ 10810 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 10811 { 10812 struct ipr_cmnd *ipr_cmd; 10813 struct ipr_ioa_cfg *ioa_cfg; 10814 unsigned long flags = 0, driver_lock_flags; 10815 10816 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 10817 return NOTIFY_DONE; 10818 10819 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10820 10821 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 10822 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10823 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 10824 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { 10825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10826 continue; 10827 } 10828 10829 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 10830 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 10831 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 10832 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 10833 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 10834 10835 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 10836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10837 } 10838 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10839 10840 return NOTIFY_OK; 10841 } 10842 10843 static struct notifier_block ipr_notifier = { 10844 ipr_halt, NULL, 0 10845 }; 10846 10847 /** 10848 * ipr_init - Module entry point 10849 * 10850 * Return value: 10851 * 0 on success / negative value on failure 10852 **/ 10853 static int __init ipr_init(void) 10854 { 10855 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 10856 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 10857 10858 register_reboot_notifier(&ipr_notifier); 10859 return pci_register_driver(&ipr_driver); 10860 } 10861 10862 /** 10863 * ipr_exit - Module unload 10864 * 10865 * Module unload entry point. 10866 * 10867 * Return value: 10868 * none 10869 **/ 10870 static void __exit ipr_exit(void) 10871 { 10872 unregister_reboot_notifier(&ipr_notifier); 10873 pci_unregister_driver(&ipr_driver); 10874 } 10875 10876 module_init(ipr_init); 10877 module_exit(ipr_exit); 10878