1 /* 2 * ipr.c -- driver for IBM Power Linux RAID adapters 3 * 4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation 5 * 6 * Copyright (C) 2003, 2004 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 /* 25 * Notes: 26 * 27 * This driver is used to control the following SCSI adapters: 28 * 29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B 30 * 31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter 32 * PCI-X Dual Channel Ultra 320 SCSI Adapter 33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card 34 * Embedded SCSI adapter on p615 and p655 systems 35 * 36 * Supported Hardware Features: 37 * - Ultra 320 SCSI controller 38 * - PCI-X host interface 39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine 40 * - Non-Volatile Write Cache 41 * - Supports attachment of non-RAID disks, tape, and optical devices 42 * - RAID Levels 0, 5, 10 43 * - Hot spare 44 * - Background Parity Checking 45 * - Background Data Scrubbing 46 * - Ability to increase the capacity of an existing RAID 5 disk array 47 * by adding disks 48 * 49 * Driver Features: 50 * - Tagged command queuing 51 * - Adapter microcode download 52 * - PCI hot plug 53 * - SCSI device hot plug 54 * 55 */ 56 57 #include <linux/fs.h> 58 #include <linux/init.h> 59 #include <linux/types.h> 60 #include <linux/errno.h> 61 #include <linux/kernel.h> 62 #include <linux/slab.h> 63 #include <linux/vmalloc.h> 64 #include <linux/ioport.h> 65 #include <linux/delay.h> 66 #include <linux/pci.h> 67 #include <linux/wait.h> 68 #include <linux/spinlock.h> 69 #include <linux/sched.h> 70 #include <linux/interrupt.h> 71 #include <linux/blkdev.h> 72 #include <linux/firmware.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/libata.h> 76 #include <linux/hdreg.h> 77 #include <linux/reboot.h> 78 #include <linux/stringify.h> 79 #include <asm/io.h> 80 #include <asm/irq.h> 81 #include <asm/processor.h> 82 #include <scsi/scsi.h> 83 #include <scsi/scsi_host.h> 84 #include <scsi/scsi_tcq.h> 85 #include <scsi/scsi_eh.h> 86 #include <scsi/scsi_cmnd.h> 87 #include "ipr.h" 88 89 /* 90 * Global Data 91 */ 92 static LIST_HEAD(ipr_ioa_head); 93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; 94 static unsigned int ipr_max_speed = 1; 95 static int ipr_testmode = 0; 96 static unsigned int ipr_fastfail = 0; 97 static unsigned int ipr_transop_timeout = 0; 98 static unsigned int ipr_debug = 0; 99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 100 static unsigned int ipr_dual_ioa_raid = 1; 101 static unsigned int ipr_number_of_msix = 2; 102 static unsigned int ipr_fast_reboot; 103 static DEFINE_SPINLOCK(ipr_driver_lock); 104 105 /* This table describes the differences between DMA controller chips */ 106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 108 .mailbox = 0x0042C, 109 .max_cmds = 100, 110 .cache_line_size = 0x20, 111 .clear_isr = 1, 112 .iopoll_weight = 0, 113 { 114 .set_interrupt_mask_reg = 0x0022C, 115 .clr_interrupt_mask_reg = 0x00230, 116 .clr_interrupt_mask_reg32 = 0x00230, 117 .sense_interrupt_mask_reg = 0x0022C, 118 .sense_interrupt_mask_reg32 = 0x0022C, 119 .clr_interrupt_reg = 0x00228, 120 .clr_interrupt_reg32 = 0x00228, 121 .sense_interrupt_reg = 0x00224, 122 .sense_interrupt_reg32 = 0x00224, 123 .ioarrin_reg = 0x00404, 124 .sense_uproc_interrupt_reg = 0x00214, 125 .sense_uproc_interrupt_reg32 = 0x00214, 126 .set_uproc_interrupt_reg = 0x00214, 127 .set_uproc_interrupt_reg32 = 0x00214, 128 .clr_uproc_interrupt_reg = 0x00218, 129 .clr_uproc_interrupt_reg32 = 0x00218 130 } 131 }, 132 { /* Snipe and Scamp */ 133 .mailbox = 0x0052C, 134 .max_cmds = 100, 135 .cache_line_size = 0x20, 136 .clear_isr = 1, 137 .iopoll_weight = 0, 138 { 139 .set_interrupt_mask_reg = 0x00288, 140 .clr_interrupt_mask_reg = 0x0028C, 141 .clr_interrupt_mask_reg32 = 0x0028C, 142 .sense_interrupt_mask_reg = 0x00288, 143 .sense_interrupt_mask_reg32 = 0x00288, 144 .clr_interrupt_reg = 0x00284, 145 .clr_interrupt_reg32 = 0x00284, 146 .sense_interrupt_reg = 0x00280, 147 .sense_interrupt_reg32 = 0x00280, 148 .ioarrin_reg = 0x00504, 149 .sense_uproc_interrupt_reg = 0x00290, 150 .sense_uproc_interrupt_reg32 = 0x00290, 151 .set_uproc_interrupt_reg = 0x00290, 152 .set_uproc_interrupt_reg32 = 0x00290, 153 .clr_uproc_interrupt_reg = 0x00294, 154 .clr_uproc_interrupt_reg32 = 0x00294 155 } 156 }, 157 { /* CRoC */ 158 .mailbox = 0x00044, 159 .max_cmds = 1000, 160 .cache_line_size = 0x20, 161 .clear_isr = 0, 162 .iopoll_weight = 64, 163 { 164 .set_interrupt_mask_reg = 0x00010, 165 .clr_interrupt_mask_reg = 0x00018, 166 .clr_interrupt_mask_reg32 = 0x0001C, 167 .sense_interrupt_mask_reg = 0x00010, 168 .sense_interrupt_mask_reg32 = 0x00014, 169 .clr_interrupt_reg = 0x00008, 170 .clr_interrupt_reg32 = 0x0000C, 171 .sense_interrupt_reg = 0x00000, 172 .sense_interrupt_reg32 = 0x00004, 173 .ioarrin_reg = 0x00070, 174 .sense_uproc_interrupt_reg = 0x00020, 175 .sense_uproc_interrupt_reg32 = 0x00024, 176 .set_uproc_interrupt_reg = 0x00020, 177 .set_uproc_interrupt_reg32 = 0x00024, 178 .clr_uproc_interrupt_reg = 0x00028, 179 .clr_uproc_interrupt_reg32 = 0x0002C, 180 .init_feedback_reg = 0x0005C, 181 .dump_addr_reg = 0x00064, 182 .dump_data_reg = 0x00068, 183 .endian_swap_reg = 0x00084 184 } 185 }, 186 }; 187 188 static const struct ipr_chip_t ipr_chip[] = { 189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] }, 194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 198 }; 199 200 static int ipr_max_bus_speeds[] = { 201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE 202 }; 203 204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>"); 205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); 206 module_param_named(max_speed, ipr_max_speed, uint, 0); 207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); 208 module_param_named(log_level, ipr_log_level, uint, 0); 209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); 210 module_param_named(testmode, ipr_testmode, int, 0); 211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); 212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); 213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 220 module_param_named(max_devs, ipr_max_devs, int, 0); 221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0); 224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); 225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); 226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); 227 MODULE_LICENSE("GPL"); 228 MODULE_VERSION(IPR_DRIVER_VERSION); 229 230 /* A constant array of IOASCs/URCs/Error Messages */ 231 static const 232 struct ipr_error_table_t ipr_error_table[] = { 233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL, 234 "8155: An unknown error was received"}, 235 {0x00330000, 0, 0, 236 "Soft underlength error"}, 237 {0x005A0000, 0, 0, 238 "Command to be cancelled not found"}, 239 {0x00808000, 0, 0, 240 "Qualified success"}, 241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL, 242 "FFFE: Soft device bus error recovered by the IOA"}, 243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 244 "4101: Soft device bus fabric error"}, 245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, 246 "FFFC: Logical block guard error recovered by the device"}, 247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, 248 "FFFC: Logical block reference tag error recovered by the device"}, 249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, 250 "4171: Recovered scatter list tag / sequence number error"}, 251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, 252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, 253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, 254 "4171: Recovered logical block sequence number error on IOA to Host transfer"}, 255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, 256 "FFFD: Recovered logical block reference tag error detected by the IOA"}, 257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, 258 "FFFD: Logical block guard error recovered by the IOA"}, 259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 260 "FFF9: Device sector reassign successful"}, 261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 262 "FFF7: Media error recovered by device rewrite procedures"}, 263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL, 264 "7001: IOA sector reassignment successful"}, 265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL, 266 "FFF9: Soft media error. Sector reassignment recommended"}, 267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL, 268 "FFF7: Media error recovered by IOA rewrite procedures"}, 269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL, 270 "FF3D: Soft PCI bus error recovered by the IOA"}, 271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL, 272 "FFF6: Device hardware error recovered by the IOA"}, 273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL, 274 "FFF6: Device hardware error recovered by the device"}, 275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL, 276 "FF3D: Soft IOA error recovered by the IOA"}, 277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL, 278 "FFFA: Undefined device response recovered by the IOA"}, 279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL, 280 "FFF6: Device bus error, message or command phase"}, 281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL, 282 "FFFE: Task Management Function failed"}, 283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL, 284 "FFF6: Failure prediction threshold exceeded"}, 285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, 286 "8009: Impending cache battery pack failure"}, 287 {0x02040100, 0, 0, 288 "Logical Unit in process of becoming ready"}, 289 {0x02040200, 0, 0, 290 "Initializing command required"}, 291 {0x02040400, 0, 0, 292 "34FF: Disk device format in progress"}, 293 {0x02040C00, 0, 0, 294 "Logical unit not accessible, target port in unavailable state"}, 295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, 296 "9070: IOA requested reset"}, 297 {0x023F0000, 0, 0, 298 "Synchronization required"}, 299 {0x02408500, 0, 0, 300 "IOA microcode download required"}, 301 {0x02408600, 0, 0, 302 "Device bus connection is prohibited by host"}, 303 {0x024E0000, 0, 0, 304 "No ready, IOA shutdown"}, 305 {0x025A0000, 0, 0, 306 "Not ready, IOA has been shutdown"}, 307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL, 308 "3020: Storage subsystem configuration error"}, 309 {0x03110B00, 0, 0, 310 "FFF5: Medium error, data unreadable, recommend reassign"}, 311 {0x03110C00, 0, 0, 312 "7000: Medium error, data unreadable, do not reassign"}, 313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL, 314 "FFF3: Disk media format bad"}, 315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL, 316 "3002: Addressed device failed to respond to selection"}, 317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL, 318 "3100: Device bus error"}, 319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL, 320 "3109: IOA timed out a device command"}, 321 {0x04088000, 0, 0, 322 "3120: SCSI bus is not operational"}, 323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 324 "4100: Hard device bus fabric error"}, 325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, 326 "310C: Logical block guard error detected by the device"}, 327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, 328 "310C: Logical block reference tag error detected by the device"}, 329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, 330 "4170: Scatter list tag / sequence number error"}, 331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, 332 "8150: Logical block CRC error on IOA to Host transfer"}, 333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, 334 "4170: Logical block sequence number error on IOA to Host transfer"}, 335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, 336 "310D: Logical block reference tag error detected by the IOA"}, 337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, 338 "310D: Logical block guard error detected by the IOA"}, 339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 340 "9000: IOA reserved area data check"}, 341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 342 "9001: IOA reserved area invalid data pattern"}, 343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 344 "9002: IOA reserved area LRC error"}, 345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, 346 "Hardware Error, IOA metadata access error"}, 347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 348 "102E: Out of alternate sectors for disk storage"}, 349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 350 "FFF4: Data transfer underlength error"}, 351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL, 352 "FFF4: Data transfer overlength error"}, 353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL, 354 "3400: Logical unit failure"}, 355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL, 356 "FFF4: Device microcode is corrupt"}, 357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL, 358 "8150: PCI bus error"}, 359 {0x04430000, 1, 0, 360 "Unsupported device bus message received"}, 361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL, 362 "FFF4: Disk device problem"}, 363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL, 364 "8150: Permanent IOA failure"}, 365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL, 366 "3010: Disk device returned wrong response to IOA"}, 367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL, 368 "8151: IOA microcode error"}, 369 {0x04448500, 0, 0, 370 "Device bus status error"}, 371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL, 372 "8157: IOA error requiring IOA reset to recover"}, 373 {0x04448700, 0, 0, 374 "ATA device status error"}, 375 {0x04490000, 0, 0, 376 "Message reject received from the device"}, 377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL, 378 "8008: A permanent cache battery pack failure occurred"}, 379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL, 380 "9090: Disk unit has been modified after the last known status"}, 381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL, 382 "9081: IOA detected device error"}, 383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL, 384 "9082: IOA detected device error"}, 385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL, 386 "3110: Device bus error, message or command phase"}, 387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL, 388 "3110: SAS Command / Task Management Function failed"}, 389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL, 390 "9091: Incorrect hardware configuration change has been detected"}, 391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL, 392 "9073: Invalid multi-adapter configuration"}, 393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL, 394 "4010: Incorrect connection between cascaded expanders"}, 395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL, 396 "4020: Connections exceed IOA design limits"}, 397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL, 398 "4030: Incorrect multipath connection"}, 399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, 400 "4110: Unsupported enclosure function"}, 401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, 402 "4120: SAS cable VPD cannot be read"}, 403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, 404 "FFF4: Command to logical unit failed"}, 405 {0x05240000, 1, 0, 406 "Illegal request, invalid request type or request packet"}, 407 {0x05250000, 0, 0, 408 "Illegal request, invalid resource handle"}, 409 {0x05258000, 0, 0, 410 "Illegal request, commands not allowed to this device"}, 411 {0x05258100, 0, 0, 412 "Illegal request, command not allowed to a secondary adapter"}, 413 {0x05258200, 0, 0, 414 "Illegal request, command not allowed to a non-optimized resource"}, 415 {0x05260000, 0, 0, 416 "Illegal request, invalid field in parameter list"}, 417 {0x05260100, 0, 0, 418 "Illegal request, parameter not supported"}, 419 {0x05260200, 0, 0, 420 "Illegal request, parameter value invalid"}, 421 {0x052C0000, 0, 0, 422 "Illegal request, command sequence error"}, 423 {0x052C8000, 1, 0, 424 "Illegal request, dual adapter support not enabled"}, 425 {0x052C8100, 1, 0, 426 "Illegal request, another cable connector was physically disabled"}, 427 {0x054E8000, 1, 0, 428 "Illegal request, inconsistent group id/group count"}, 429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, 430 "9031: Array protection temporarily suspended, protection resuming"}, 431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, 432 "9040: Array protection temporarily suspended, protection resuming"}, 433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, 434 "4080: IOA exceeded maximum operating temperature"}, 435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, 436 "4085: Service required"}, 437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 438 "3140: Device bus not ready to ready transition"}, 439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 440 "FFFB: SCSI bus was reset"}, 441 {0x06290500, 0, 0, 442 "FFFE: SCSI bus transition to single ended"}, 443 {0x06290600, 0, 0, 444 "FFFE: SCSI bus transition to LVD"}, 445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL, 446 "FFFB: SCSI bus was reset by another initiator"}, 447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, 448 "3029: A device replacement has occurred"}, 449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, 450 "4102: Device bus fabric performance degradation"}, 451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, 452 "9051: IOA cache data exists for a missing or failed device"}, 453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, 454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL, 456 "9025: Disk unit is not supported at its physical location"}, 457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL, 458 "3020: IOA detected a SCSI bus configuration error"}, 459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL, 460 "3150: SCSI bus configuration error"}, 461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL, 462 "9074: Asymmetric advanced function disk configuration"}, 463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL, 464 "4040: Incomplete multipath connection between IOA and enclosure"}, 465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL, 466 "4041: Incomplete multipath connection between enclosure and device"}, 467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL, 468 "9075: Incomplete multipath connection between IOA and remote IOA"}, 469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL, 470 "9076: Configuration error, missing remote IOA"}, 471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, 472 "4050: Enclosure does not support a required multipath function"}, 473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, 474 "4121: Configuration error, required cable is missing"}, 475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, 476 "4122: Cable is not plugged into the correct location on remote IOA"}, 477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, 478 "4123: Configuration error, invalid cable vital product data"}, 479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, 480 "4124: Configuration error, both cable ends are plugged into the same IOA"}, 481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, 482 "4070: Logically bad block written on device"}, 483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, 484 "9041: Array protection temporarily suspended"}, 485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL, 486 "9042: Corrupt array parity detected on specified device"}, 487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL, 488 "9030: Array no longer protected due to missing or failed disk unit"}, 489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL, 490 "9071: Link operational transition"}, 491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL, 492 "9072: Link not operational transition"}, 493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 494 "9032: Array exposed but still protected"}, 495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1, 496 "70DD: Device forced failed by disrupt device command"}, 497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 498 "4061: Multipath redundancy level got better"}, 499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 500 "4060: Multipath redundancy level got worse"}, 501 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL, 502 "9083: Device raw mode enabled"}, 503 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL, 504 "9084: Device raw mode disabled"}, 505 {0x07270000, 0, 0, 506 "Failure due to other device"}, 507 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL, 508 "9008: IOA does not support functions expected by devices"}, 509 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL, 510 "9010: Cache data associated with attached devices cannot be found"}, 511 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL, 512 "9011: Cache data belongs to devices other than those attached"}, 513 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL, 514 "9020: Array missing 2 or more devices with only 1 device present"}, 515 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL, 516 "9021: Array missing 2 or more devices with 2 or more devices present"}, 517 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL, 518 "9022: Exposed array is missing a required device"}, 519 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL, 520 "9023: Array member(s) not at required physical locations"}, 521 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL, 522 "9024: Array not functional due to present hardware configuration"}, 523 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL, 524 "9026: Array not functional due to present hardware configuration"}, 525 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL, 526 "9027: Array is missing a device and parity is out of sync"}, 527 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL, 528 "9028: Maximum number of arrays already exist"}, 529 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL, 530 "9050: Required cache data cannot be located for a disk unit"}, 531 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL, 532 "9052: Cache data exists for a device that has been modified"}, 533 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL, 534 "9054: IOA resources not available due to previous problems"}, 535 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL, 536 "9092: Disk unit requires initialization before use"}, 537 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL, 538 "9029: Incorrect hardware configuration change has been detected"}, 539 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL, 540 "9060: One or more disk pairs are missing from an array"}, 541 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL, 542 "9061: One or more disks are missing from an array"}, 543 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL, 544 "9062: One or more disks are missing from an array"}, 545 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, 546 "9063: Maximum number of functional arrays has been exceeded"}, 547 {0x07279A00, 0, 0, 548 "Data protect, other volume set problem"}, 549 {0x0B260000, 0, 0, 550 "Aborted command, invalid descriptor"}, 551 {0x0B3F9000, 0, 0, 552 "Target operating conditions have changed, dual adapter takeover"}, 553 {0x0B530200, 0, 0, 554 "Aborted command, medium removal prevented"}, 555 {0x0B5A0000, 0, 0, 556 "Command terminated by host"}, 557 {0x0B5B8000, 0, 0, 558 "Aborted command, command terminated by host"} 559 }; 560 561 static const struct ipr_ses_table_entry ipr_ses_table[] = { 562 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, 563 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, 564 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ 565 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ 566 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ 567 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ 568 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, 569 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, 570 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 571 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, 572 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, 573 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, 574 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } 575 }; 576 577 /* 578 * Function Prototypes 579 */ 580 static int ipr_reset_alert(struct ipr_cmnd *); 581 static void ipr_process_ccn(struct ipr_cmnd *); 582 static void ipr_process_error(struct ipr_cmnd *); 583 static void ipr_reset_ioa_job(struct ipr_cmnd *); 584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, 585 enum ipr_shutdown_type); 586 587 #ifdef CONFIG_SCSI_IPR_TRACE 588 /** 589 * ipr_trc_hook - Add a trace entry to the driver trace 590 * @ipr_cmd: ipr command struct 591 * @type: trace type 592 * @add_data: additional data 593 * 594 * Return value: 595 * none 596 **/ 597 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, 598 u8 type, u32 add_data) 599 { 600 struct ipr_trace_entry *trace_entry; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 602 unsigned int trace_index; 603 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; 605 trace_entry = &ioa_cfg->trace[trace_index]; 606 trace_entry->time = jiffies; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 608 trace_entry->type = type; 609 if (ipr_cmd->ioa_cfg->sis64) 610 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; 611 else 612 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; 613 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 614 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 615 trace_entry->u.add_data = add_data; 616 wmb(); 617 } 618 #else 619 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 620 #endif 621 622 /** 623 * ipr_lock_and_done - Acquire lock and complete command 624 * @ipr_cmd: ipr command struct 625 * 626 * Return value: 627 * none 628 **/ 629 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) 630 { 631 unsigned long lock_flags; 632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 633 634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 635 ipr_cmd->done(ipr_cmd); 636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 637 } 638 639 /** 640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 641 * @ipr_cmd: ipr command struct 642 * 643 * Return value: 644 * none 645 **/ 646 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 647 { 648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 649 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 650 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 651 dma_addr_t dma_addr = ipr_cmd->dma_addr; 652 int hrrq_id; 653 654 hrrq_id = ioarcb->cmd_pkt.hrrq_id; 655 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 656 ioarcb->cmd_pkt.hrrq_id = hrrq_id; 657 ioarcb->data_transfer_length = 0; 658 ioarcb->read_data_transfer_length = 0; 659 ioarcb->ioadl_len = 0; 660 ioarcb->read_ioadl_len = 0; 661 662 if (ipr_cmd->ioa_cfg->sis64) { 663 ioarcb->u.sis64_addr_data.data_ioadl_addr = 664 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 665 ioasa64->u.gata.status = 0; 666 } else { 667 ioarcb->write_ioadl_addr = 668 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 669 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 670 ioasa->u.gata.status = 0; 671 } 672 673 ioasa->hdr.ioasc = 0; 674 ioasa->hdr.residual_data_len = 0; 675 ipr_cmd->scsi_cmd = NULL; 676 ipr_cmd->qc = NULL; 677 ipr_cmd->sense_buffer[0] = 0; 678 ipr_cmd->dma_use_sg = 0; 679 } 680 681 /** 682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block 683 * @ipr_cmd: ipr command struct 684 * 685 * Return value: 686 * none 687 **/ 688 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, 689 void (*fast_done) (struct ipr_cmnd *)) 690 { 691 ipr_reinit_ipr_cmnd(ipr_cmd); 692 ipr_cmd->u.scratch = 0; 693 ipr_cmd->sibling = NULL; 694 ipr_cmd->eh_comp = NULL; 695 ipr_cmd->fast_done = fast_done; 696 init_timer(&ipr_cmd->timer); 697 } 698 699 /** 700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 701 * @ioa_cfg: ioa config struct 702 * 703 * Return value: 704 * pointer to ipr command struct 705 **/ 706 static 707 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) 708 { 709 struct ipr_cmnd *ipr_cmd = NULL; 710 711 if (likely(!list_empty(&hrrq->hrrq_free_q))) { 712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, 713 struct ipr_cmnd, queue); 714 list_del(&ipr_cmd->queue); 715 } 716 717 718 return ipr_cmd; 719 } 720 721 /** 722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it 723 * @ioa_cfg: ioa config struct 724 * 725 * Return value: 726 * pointer to ipr command struct 727 **/ 728 static 729 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 730 { 731 struct ipr_cmnd *ipr_cmd = 732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); 733 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 734 return ipr_cmd; 735 } 736 737 /** 738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 739 * @ioa_cfg: ioa config struct 740 * @clr_ints: interrupts to clear 741 * 742 * This function masks all interrupts on the adapter, then clears the 743 * interrupts specified in the mask 744 * 745 * Return value: 746 * none 747 **/ 748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, 749 u32 clr_ints) 750 { 751 volatile u32 int_reg; 752 int i; 753 754 /* Stop new interrupts */ 755 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 756 spin_lock(&ioa_cfg->hrrq[i]._lock); 757 ioa_cfg->hrrq[i].allow_interrupts = 0; 758 spin_unlock(&ioa_cfg->hrrq[i]._lock); 759 } 760 wmb(); 761 762 /* Set interrupt mask to stop all new interrupts */ 763 if (ioa_cfg->sis64) 764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); 765 else 766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 767 768 /* Clear any pending interrupts */ 769 if (ioa_cfg->sis64) 770 writel(~0, ioa_cfg->regs.clr_interrupt_reg); 771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); 772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 773 } 774 775 /** 776 * ipr_save_pcix_cmd_reg - Save PCI-X command register 777 * @ioa_cfg: ioa config struct 778 * 779 * Return value: 780 * 0 on success / -EIO on failure 781 **/ 782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 783 { 784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 785 786 if (pcix_cmd_reg == 0) 787 return 0; 788 789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); 792 return -EIO; 793 } 794 795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; 796 return 0; 797 } 798 799 /** 800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register 801 * @ioa_cfg: ioa config struct 802 * 803 * Return value: 804 * 0 on success / -EIO on failure 805 **/ 806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) 807 { 808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); 809 810 if (pcix_cmd_reg) { 811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, 812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { 813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); 814 return -EIO; 815 } 816 } 817 818 return 0; 819 } 820 821 /** 822 * ipr_sata_eh_done - done function for aborted SATA commands 823 * @ipr_cmd: ipr command struct 824 * 825 * This function is invoked for ops generated to SATA 826 * devices which are being aborted. 827 * 828 * Return value: 829 * none 830 **/ 831 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 832 { 833 struct ata_queued_cmd *qc = ipr_cmd->qc; 834 struct ipr_sata_port *sata_port = qc->ap->private_data; 835 836 qc->err_mask |= AC_ERR_OTHER; 837 sata_port->ioasa.status |= ATA_BUSY; 838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 839 ata_qc_complete(qc); 840 } 841 842 /** 843 * ipr_scsi_eh_done - mid-layer done function for aborted ops 844 * @ipr_cmd: ipr command struct 845 * 846 * This function is invoked by the interrupt handler for 847 * ops generated by the SCSI mid-layer which are being aborted. 848 * 849 * Return value: 850 * none 851 **/ 852 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 853 { 854 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 855 856 scsi_cmd->result |= (DID_ERROR << 16); 857 858 scsi_dma_unmap(ipr_cmd->scsi_cmd); 859 scsi_cmd->scsi_done(scsi_cmd); 860 if (ipr_cmd->eh_comp) 861 complete(ipr_cmd->eh_comp); 862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 863 } 864 865 /** 866 * ipr_fail_all_ops - Fails all outstanding ops. 867 * @ioa_cfg: ioa config struct 868 * 869 * This function fails all outstanding ops. 870 * 871 * Return value: 872 * none 873 **/ 874 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 875 { 876 struct ipr_cmnd *ipr_cmd, *temp; 877 struct ipr_hrr_queue *hrrq; 878 879 ENTER; 880 for_each_hrrq(hrrq, ioa_cfg) { 881 spin_lock(&hrrq->_lock); 882 list_for_each_entry_safe(ipr_cmd, 883 temp, &hrrq->hrrq_pending_q, queue) { 884 list_del(&ipr_cmd->queue); 885 886 ipr_cmd->s.ioasa.hdr.ioasc = 887 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 888 ipr_cmd->s.ioasa.hdr.ilid = 889 cpu_to_be32(IPR_DRIVER_ILID); 890 891 if (ipr_cmd->scsi_cmd) 892 ipr_cmd->done = ipr_scsi_eh_done; 893 else if (ipr_cmd->qc) 894 ipr_cmd->done = ipr_sata_eh_done; 895 896 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 897 IPR_IOASC_IOA_WAS_RESET); 898 del_timer(&ipr_cmd->timer); 899 ipr_cmd->done(ipr_cmd); 900 } 901 spin_unlock(&hrrq->_lock); 902 } 903 LEAVE; 904 } 905 906 /** 907 * ipr_send_command - Send driver initiated requests. 908 * @ipr_cmd: ipr command struct 909 * 910 * This function sends a command to the adapter using the correct write call. 911 * In the case of sis64, calculate the ioarcb size required. Then or in the 912 * appropriate bits. 913 * 914 * Return value: 915 * none 916 **/ 917 static void ipr_send_command(struct ipr_cmnd *ipr_cmd) 918 { 919 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 920 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; 921 922 if (ioa_cfg->sis64) { 923 /* The default size is 256 bytes */ 924 send_dma_addr |= 0x1; 925 926 /* If the number of ioadls * size of ioadl > 128 bytes, 927 then use a 512 byte ioarcb */ 928 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) 929 send_dma_addr |= 0x4; 930 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 931 } else 932 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); 933 } 934 935 /** 936 * ipr_do_req - Send driver initiated requests. 937 * @ipr_cmd: ipr command struct 938 * @done: done function 939 * @timeout_func: timeout function 940 * @timeout: timeout value 941 * 942 * This function sends the specified command to the adapter with the 943 * timeout given. The done function is invoked on command completion. 944 * 945 * Return value: 946 * none 947 **/ 948 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 949 void (*done) (struct ipr_cmnd *), 950 void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 951 { 952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 953 954 ipr_cmd->done = done; 955 956 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 957 ipr_cmd->timer.expires = jiffies + timeout; 958 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; 959 960 add_timer(&ipr_cmd->timer); 961 962 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 963 964 ipr_send_command(ipr_cmd); 965 } 966 967 /** 968 * ipr_internal_cmd_done - Op done function for an internally generated op. 969 * @ipr_cmd: ipr command struct 970 * 971 * This function is the op done function for an internally generated, 972 * blocking op. It simply wakes the sleeping thread. 973 * 974 * Return value: 975 * none 976 **/ 977 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) 978 { 979 if (ipr_cmd->sibling) 980 ipr_cmd->sibling = NULL; 981 else 982 complete(&ipr_cmd->completion); 983 } 984 985 /** 986 * ipr_init_ioadl - initialize the ioadl for the correct SIS type 987 * @ipr_cmd: ipr command struct 988 * @dma_addr: dma address 989 * @len: transfer length 990 * @flags: ioadl flag value 991 * 992 * This function initializes an ioadl in the case where there is only a single 993 * descriptor. 994 * 995 * Return value: 996 * nothing 997 **/ 998 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, 999 u32 len, int flags) 1000 { 1001 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 1002 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 1003 1004 ipr_cmd->dma_use_sg = 1; 1005 1006 if (ipr_cmd->ioa_cfg->sis64) { 1007 ioadl64->flags = cpu_to_be32(flags); 1008 ioadl64->data_len = cpu_to_be32(len); 1009 ioadl64->address = cpu_to_be64(dma_addr); 1010 1011 ipr_cmd->ioarcb.ioadl_len = 1012 cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); 1013 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1014 } else { 1015 ioadl->flags_and_data_len = cpu_to_be32(flags | len); 1016 ioadl->address = cpu_to_be32(dma_addr); 1017 1018 if (flags == IPR_IOADL_FLAGS_READ_LAST) { 1019 ipr_cmd->ioarcb.read_ioadl_len = 1020 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1021 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); 1022 } else { 1023 ipr_cmd->ioarcb.ioadl_len = 1024 cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 1025 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); 1026 } 1027 } 1028 } 1029 1030 /** 1031 * ipr_send_blocking_cmd - Send command and sleep on its completion. 1032 * @ipr_cmd: ipr command struct 1033 * @timeout_func: function to invoke if command times out 1034 * @timeout: timeout 1035 * 1036 * Return value: 1037 * none 1038 **/ 1039 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 1040 void (*timeout_func) (struct ipr_cmnd *ipr_cmd), 1041 u32 timeout) 1042 { 1043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1044 1045 init_completion(&ipr_cmd->completion); 1046 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); 1047 1048 spin_unlock_irq(ioa_cfg->host->host_lock); 1049 wait_for_completion(&ipr_cmd->completion); 1050 spin_lock_irq(ioa_cfg->host->host_lock); 1051 } 1052 1053 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1054 { 1055 unsigned int hrrq; 1056 1057 if (ioa_cfg->hrrq_num == 1) 1058 hrrq = 0; 1059 else { 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); 1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; 1062 } 1063 return hrrq; 1064 } 1065 1066 /** 1067 * ipr_send_hcam - Send an HCAM to the adapter. 1068 * @ioa_cfg: ioa config struct 1069 * @type: HCAM type 1070 * @hostrcb: hostrcb struct 1071 * 1072 * This function will send a Host Controlled Async command to the adapter. 1073 * If HCAMs are currently not allowed to be issued to the adapter, it will 1074 * place the hostrcb on the free queue. 1075 * 1076 * Return value: 1077 * none 1078 **/ 1079 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, 1080 struct ipr_hostrcb *hostrcb) 1081 { 1082 struct ipr_cmnd *ipr_cmd; 1083 struct ipr_ioarcb *ioarcb; 1084 1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 1086 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1087 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1089 1090 ipr_cmd->u.hostrcb = hostrcb; 1091 ioarcb = &ipr_cmd->ioarcb; 1092 1093 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 1094 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; 1095 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; 1096 ioarcb->cmd_pkt.cdb[1] = type; 1097 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 1098 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 1099 1100 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, 1101 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); 1102 1103 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 1104 ipr_cmd->done = ipr_process_ccn; 1105 else 1106 ipr_cmd->done = ipr_process_error; 1107 1108 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 1109 1110 ipr_send_command(ipr_cmd); 1111 } else { 1112 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 1113 } 1114 } 1115 1116 /** 1117 * ipr_update_ata_class - Update the ata class in the resource entry 1118 * @res: resource entry struct 1119 * @proto: cfgte device bus protocol value 1120 * 1121 * Return value: 1122 * none 1123 **/ 1124 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) 1125 { 1126 switch (proto) { 1127 case IPR_PROTO_SATA: 1128 case IPR_PROTO_SAS_STP: 1129 res->ata_class = ATA_DEV_ATA; 1130 break; 1131 case IPR_PROTO_SATA_ATAPI: 1132 case IPR_PROTO_SAS_STP_ATAPI: 1133 res->ata_class = ATA_DEV_ATAPI; 1134 break; 1135 default: 1136 res->ata_class = ATA_DEV_UNKNOWN; 1137 break; 1138 }; 1139 } 1140 1141 /** 1142 * ipr_init_res_entry - Initialize a resource entry struct. 1143 * @res: resource entry struct 1144 * @cfgtew: config table entry wrapper struct 1145 * 1146 * Return value: 1147 * none 1148 **/ 1149 static void ipr_init_res_entry(struct ipr_resource_entry *res, 1150 struct ipr_config_table_entry_wrapper *cfgtew) 1151 { 1152 int found = 0; 1153 unsigned int proto; 1154 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1155 struct ipr_resource_entry *gscsi_res = NULL; 1156 1157 res->needs_sync_complete = 0; 1158 res->in_erp = 0; 1159 res->add_to_ml = 0; 1160 res->del_from_ml = 0; 1161 res->resetting_device = 0; 1162 res->reset_occurred = 0; 1163 res->sdev = NULL; 1164 res->sata_port = NULL; 1165 1166 if (ioa_cfg->sis64) { 1167 proto = cfgtew->u.cfgte64->proto; 1168 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1169 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1170 res->qmodel = IPR_QUEUEING_MODEL64(res); 1171 res->type = cfgtew->u.cfgte64->res_type; 1172 1173 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1174 sizeof(res->res_path)); 1175 1176 res->bus = 0; 1177 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1178 sizeof(res->dev_lun.scsi_lun)); 1179 res->lun = scsilun_to_int(&res->dev_lun); 1180 1181 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1182 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { 1183 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { 1184 found = 1; 1185 res->target = gscsi_res->target; 1186 break; 1187 } 1188 } 1189 if (!found) { 1190 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1191 ioa_cfg->max_devs_supported); 1192 set_bit(res->target, ioa_cfg->target_ids); 1193 } 1194 } else if (res->type == IPR_RES_TYPE_IOAFP) { 1195 res->bus = IPR_IOAFP_VIRTUAL_BUS; 1196 res->target = 0; 1197 } else if (res->type == IPR_RES_TYPE_ARRAY) { 1198 res->bus = IPR_ARRAY_VIRTUAL_BUS; 1199 res->target = find_first_zero_bit(ioa_cfg->array_ids, 1200 ioa_cfg->max_devs_supported); 1201 set_bit(res->target, ioa_cfg->array_ids); 1202 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { 1203 res->bus = IPR_VSET_VIRTUAL_BUS; 1204 res->target = find_first_zero_bit(ioa_cfg->vset_ids, 1205 ioa_cfg->max_devs_supported); 1206 set_bit(res->target, ioa_cfg->vset_ids); 1207 } else { 1208 res->target = find_first_zero_bit(ioa_cfg->target_ids, 1209 ioa_cfg->max_devs_supported); 1210 set_bit(res->target, ioa_cfg->target_ids); 1211 } 1212 } else { 1213 proto = cfgtew->u.cfgte->proto; 1214 res->qmodel = IPR_QUEUEING_MODEL(res); 1215 res->flags = cfgtew->u.cfgte->flags; 1216 if (res->flags & IPR_IS_IOA_RESOURCE) 1217 res->type = IPR_RES_TYPE_IOAFP; 1218 else 1219 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1220 1221 res->bus = cfgtew->u.cfgte->res_addr.bus; 1222 res->target = cfgtew->u.cfgte->res_addr.target; 1223 res->lun = cfgtew->u.cfgte->res_addr.lun; 1224 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); 1225 } 1226 1227 ipr_update_ata_class(res, proto); 1228 } 1229 1230 /** 1231 * ipr_is_same_device - Determine if two devices are the same. 1232 * @res: resource entry struct 1233 * @cfgtew: config table entry wrapper struct 1234 * 1235 * Return value: 1236 * 1 if the devices are the same / 0 otherwise 1237 **/ 1238 static int ipr_is_same_device(struct ipr_resource_entry *res, 1239 struct ipr_config_table_entry_wrapper *cfgtew) 1240 { 1241 if (res->ioa_cfg->sis64) { 1242 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, 1243 sizeof(cfgtew->u.cfgte64->dev_id)) && 1244 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1245 sizeof(cfgtew->u.cfgte64->lun))) { 1246 return 1; 1247 } 1248 } else { 1249 if (res->bus == cfgtew->u.cfgte->res_addr.bus && 1250 res->target == cfgtew->u.cfgte->res_addr.target && 1251 res->lun == cfgtew->u.cfgte->res_addr.lun) 1252 return 1; 1253 } 1254 1255 return 0; 1256 } 1257 1258 /** 1259 * __ipr_format_res_path - Format the resource path for printing. 1260 * @res_path: resource path 1261 * @buf: buffer 1262 * @len: length of buffer provided 1263 * 1264 * Return value: 1265 * pointer to buffer 1266 **/ 1267 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len) 1268 { 1269 int i; 1270 char *p = buffer; 1271 1272 *p = '\0'; 1273 p += snprintf(p, buffer + len - p, "%02X", res_path[0]); 1274 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) 1275 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); 1276 1277 return buffer; 1278 } 1279 1280 /** 1281 * ipr_format_res_path - Format the resource path for printing. 1282 * @ioa_cfg: ioa config struct 1283 * @res_path: resource path 1284 * @buf: buffer 1285 * @len: length of buffer provided 1286 * 1287 * Return value: 1288 * pointer to buffer 1289 **/ 1290 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, 1291 u8 *res_path, char *buffer, int len) 1292 { 1293 char *p = buffer; 1294 1295 *p = '\0'; 1296 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); 1297 __ipr_format_res_path(res_path, p, len - (buffer - p)); 1298 return buffer; 1299 } 1300 1301 /** 1302 * ipr_update_res_entry - Update the resource entry. 1303 * @res: resource entry struct 1304 * @cfgtew: config table entry wrapper struct 1305 * 1306 * Return value: 1307 * none 1308 **/ 1309 static void ipr_update_res_entry(struct ipr_resource_entry *res, 1310 struct ipr_config_table_entry_wrapper *cfgtew) 1311 { 1312 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1313 unsigned int proto; 1314 int new_path = 0; 1315 1316 if (res->ioa_cfg->sis64) { 1317 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); 1318 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); 1319 res->type = cfgtew->u.cfgte64->res_type; 1320 1321 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1322 sizeof(struct ipr_std_inq_data)); 1323 1324 res->qmodel = IPR_QUEUEING_MODEL64(res); 1325 proto = cfgtew->u.cfgte64->proto; 1326 res->res_handle = cfgtew->u.cfgte64->res_handle; 1327 res->dev_id = cfgtew->u.cfgte64->dev_id; 1328 1329 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, 1330 sizeof(res->dev_lun.scsi_lun)); 1331 1332 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, 1333 sizeof(res->res_path))) { 1334 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1335 sizeof(res->res_path)); 1336 new_path = 1; 1337 } 1338 1339 if (res->sdev && new_path) 1340 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1341 ipr_format_res_path(res->ioa_cfg, 1342 res->res_path, buffer, sizeof(buffer))); 1343 } else { 1344 res->flags = cfgtew->u.cfgte->flags; 1345 if (res->flags & IPR_IS_IOA_RESOURCE) 1346 res->type = IPR_RES_TYPE_IOAFP; 1347 else 1348 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; 1349 1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, 1351 sizeof(struct ipr_std_inq_data)); 1352 1353 res->qmodel = IPR_QUEUEING_MODEL(res); 1354 proto = cfgtew->u.cfgte->proto; 1355 res->res_handle = cfgtew->u.cfgte->res_handle; 1356 } 1357 1358 ipr_update_ata_class(res, proto); 1359 } 1360 1361 /** 1362 * ipr_clear_res_target - Clear the bit in the bit map representing the target 1363 * for the resource. 1364 * @res: resource entry struct 1365 * @cfgtew: config table entry wrapper struct 1366 * 1367 * Return value: 1368 * none 1369 **/ 1370 static void ipr_clear_res_target(struct ipr_resource_entry *res) 1371 { 1372 struct ipr_resource_entry *gscsi_res = NULL; 1373 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; 1374 1375 if (!ioa_cfg->sis64) 1376 return; 1377 1378 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) 1379 clear_bit(res->target, ioa_cfg->array_ids); 1380 else if (res->bus == IPR_VSET_VIRTUAL_BUS) 1381 clear_bit(res->target, ioa_cfg->vset_ids); 1382 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { 1383 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) 1384 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) 1385 return; 1386 clear_bit(res->target, ioa_cfg->target_ids); 1387 1388 } else if (res->bus == 0) 1389 clear_bit(res->target, ioa_cfg->target_ids); 1390 } 1391 1392 /** 1393 * ipr_handle_config_change - Handle a config change from the adapter 1394 * @ioa_cfg: ioa config struct 1395 * @hostrcb: hostrcb 1396 * 1397 * Return value: 1398 * none 1399 **/ 1400 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1401 struct ipr_hostrcb *hostrcb) 1402 { 1403 struct ipr_resource_entry *res = NULL; 1404 struct ipr_config_table_entry_wrapper cfgtew; 1405 __be32 cc_res_handle; 1406 1407 u32 is_ndn = 1; 1408 1409 if (ioa_cfg->sis64) { 1410 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; 1411 cc_res_handle = cfgtew.u.cfgte64->res_handle; 1412 } else { 1413 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; 1414 cc_res_handle = cfgtew.u.cfgte->res_handle; 1415 } 1416 1417 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1418 if (res->res_handle == cc_res_handle) { 1419 is_ndn = 0; 1420 break; 1421 } 1422 } 1423 1424 if (is_ndn) { 1425 if (list_empty(&ioa_cfg->free_res_q)) { 1426 ipr_send_hcam(ioa_cfg, 1427 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, 1428 hostrcb); 1429 return; 1430 } 1431 1432 res = list_entry(ioa_cfg->free_res_q.next, 1433 struct ipr_resource_entry, queue); 1434 1435 list_del(&res->queue); 1436 ipr_init_res_entry(res, &cfgtew); 1437 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1438 } 1439 1440 ipr_update_res_entry(res, &cfgtew); 1441 1442 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1443 if (res->sdev) { 1444 res->del_from_ml = 1; 1445 res->res_handle = IPR_INVALID_RES_HANDLE; 1446 schedule_work(&ioa_cfg->work_q); 1447 } else { 1448 ipr_clear_res_target(res); 1449 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1450 } 1451 } else if (!res->sdev || res->del_from_ml) { 1452 res->add_to_ml = 1; 1453 schedule_work(&ioa_cfg->work_q); 1454 } 1455 1456 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1457 } 1458 1459 /** 1460 * ipr_process_ccn - Op done function for a CCN. 1461 * @ipr_cmd: ipr command struct 1462 * 1463 * This function is the op done function for a configuration 1464 * change notification host controlled async from the adapter. 1465 * 1466 * Return value: 1467 * none 1468 **/ 1469 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) 1470 { 1471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1472 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1473 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1474 1475 list_del(&hostrcb->queue); 1476 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 1477 1478 if (ioasc) { 1479 if (ioasc != IPR_IOASC_IOA_WAS_RESET && 1480 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) 1481 dev_err(&ioa_cfg->pdev->dev, 1482 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 1483 1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1485 } else { 1486 ipr_handle_config_change(ioa_cfg, hostrcb); 1487 } 1488 } 1489 1490 /** 1491 * strip_and_pad_whitespace - Strip and pad trailing whitespace. 1492 * @i: index into buffer 1493 * @buf: string to modify 1494 * 1495 * This function will strip all trailing whitespace, pad the end 1496 * of the string with a single space, and NULL terminate the string. 1497 * 1498 * Return value: 1499 * new length of string 1500 **/ 1501 static int strip_and_pad_whitespace(int i, char *buf) 1502 { 1503 while (i && buf[i] == ' ') 1504 i--; 1505 buf[i+1] = ' '; 1506 buf[i+2] = '\0'; 1507 return i + 2; 1508 } 1509 1510 /** 1511 * ipr_log_vpd_compact - Log the passed extended VPD compactly. 1512 * @prefix: string to print at start of printk 1513 * @hostrcb: hostrcb pointer 1514 * @vpd: vendor/product id/sn struct 1515 * 1516 * Return value: 1517 * none 1518 **/ 1519 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1520 struct ipr_vpd *vpd) 1521 { 1522 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; 1523 int i = 0; 1524 1525 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1526 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); 1527 1528 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); 1529 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); 1530 1531 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); 1532 buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; 1533 1534 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); 1535 } 1536 1537 /** 1538 * ipr_log_vpd - Log the passed VPD to the error log. 1539 * @vpd: vendor/product id/sn struct 1540 * 1541 * Return value: 1542 * none 1543 **/ 1544 static void ipr_log_vpd(struct ipr_vpd *vpd) 1545 { 1546 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN 1547 + IPR_SERIAL_NUM_LEN]; 1548 1549 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); 1550 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, 1551 IPR_PROD_ID_LEN); 1552 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; 1553 ipr_err("Vendor/Product ID: %s\n", buffer); 1554 1555 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); 1556 buffer[IPR_SERIAL_NUM_LEN] = '\0'; 1557 ipr_err(" Serial Number: %s\n", buffer); 1558 } 1559 1560 /** 1561 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. 1562 * @prefix: string to print at start of printk 1563 * @hostrcb: hostrcb pointer 1564 * @vpd: vendor/product id/sn/wwn struct 1565 * 1566 * Return value: 1567 * none 1568 **/ 1569 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, 1570 struct ipr_ext_vpd *vpd) 1571 { 1572 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); 1573 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, 1574 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); 1575 } 1576 1577 /** 1578 * ipr_log_ext_vpd - Log the passed extended VPD to the error log. 1579 * @vpd: vendor/product id/sn/wwn struct 1580 * 1581 * Return value: 1582 * none 1583 **/ 1584 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) 1585 { 1586 ipr_log_vpd(&vpd->vpd); 1587 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), 1588 be32_to_cpu(vpd->wwid[1])); 1589 } 1590 1591 /** 1592 * ipr_log_enhanced_cache_error - Log a cache error. 1593 * @ioa_cfg: ioa config struct 1594 * @hostrcb: hostrcb struct 1595 * 1596 * Return value: 1597 * none 1598 **/ 1599 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1600 struct ipr_hostrcb *hostrcb) 1601 { 1602 struct ipr_hostrcb_type_12_error *error; 1603 1604 if (ioa_cfg->sis64) 1605 error = &hostrcb->hcam.u.error64.u.type_12_error; 1606 else 1607 error = &hostrcb->hcam.u.error.u.type_12_error; 1608 1609 ipr_err("-----Current Configuration-----\n"); 1610 ipr_err("Cache Directory Card Information:\n"); 1611 ipr_log_ext_vpd(&error->ioa_vpd); 1612 ipr_err("Adapter Card Information:\n"); 1613 ipr_log_ext_vpd(&error->cfc_vpd); 1614 1615 ipr_err("-----Expected Configuration-----\n"); 1616 ipr_err("Cache Directory Card Information:\n"); 1617 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); 1618 ipr_err("Adapter Card Information:\n"); 1619 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); 1620 1621 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1622 be32_to_cpu(error->ioa_data[0]), 1623 be32_to_cpu(error->ioa_data[1]), 1624 be32_to_cpu(error->ioa_data[2])); 1625 } 1626 1627 /** 1628 * ipr_log_cache_error - Log a cache error. 1629 * @ioa_cfg: ioa config struct 1630 * @hostrcb: hostrcb struct 1631 * 1632 * Return value: 1633 * none 1634 **/ 1635 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1636 struct ipr_hostrcb *hostrcb) 1637 { 1638 struct ipr_hostrcb_type_02_error *error = 1639 &hostrcb->hcam.u.error.u.type_02_error; 1640 1641 ipr_err("-----Current Configuration-----\n"); 1642 ipr_err("Cache Directory Card Information:\n"); 1643 ipr_log_vpd(&error->ioa_vpd); 1644 ipr_err("Adapter Card Information:\n"); 1645 ipr_log_vpd(&error->cfc_vpd); 1646 1647 ipr_err("-----Expected Configuration-----\n"); 1648 ipr_err("Cache Directory Card Information:\n"); 1649 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); 1650 ipr_err("Adapter Card Information:\n"); 1651 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); 1652 1653 ipr_err("Additional IOA Data: %08X %08X %08X\n", 1654 be32_to_cpu(error->ioa_data[0]), 1655 be32_to_cpu(error->ioa_data[1]), 1656 be32_to_cpu(error->ioa_data[2])); 1657 } 1658 1659 /** 1660 * ipr_log_enhanced_config_error - Log a configuration error. 1661 * @ioa_cfg: ioa config struct 1662 * @hostrcb: hostrcb struct 1663 * 1664 * Return value: 1665 * none 1666 **/ 1667 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, 1668 struct ipr_hostrcb *hostrcb) 1669 { 1670 int errors_logged, i; 1671 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; 1672 struct ipr_hostrcb_type_13_error *error; 1673 1674 error = &hostrcb->hcam.u.error.u.type_13_error; 1675 errors_logged = be32_to_cpu(error->errors_logged); 1676 1677 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1678 be32_to_cpu(error->errors_detected), errors_logged); 1679 1680 dev_entry = error->dev; 1681 1682 for (i = 0; i < errors_logged; i++, dev_entry++) { 1683 ipr_err_separator; 1684 1685 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1686 ipr_log_ext_vpd(&dev_entry->vpd); 1687 1688 ipr_err("-----New Device Information-----\n"); 1689 ipr_log_ext_vpd(&dev_entry->new_vpd); 1690 1691 ipr_err("Cache Directory Card Information:\n"); 1692 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1693 1694 ipr_err("Adapter Card Information:\n"); 1695 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1696 } 1697 } 1698 1699 /** 1700 * ipr_log_sis64_config_error - Log a device error. 1701 * @ioa_cfg: ioa config struct 1702 * @hostrcb: hostrcb struct 1703 * 1704 * Return value: 1705 * none 1706 **/ 1707 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, 1708 struct ipr_hostrcb *hostrcb) 1709 { 1710 int errors_logged, i; 1711 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; 1712 struct ipr_hostrcb_type_23_error *error; 1713 char buffer[IPR_MAX_RES_PATH_LENGTH]; 1714 1715 error = &hostrcb->hcam.u.error64.u.type_23_error; 1716 errors_logged = be32_to_cpu(error->errors_logged); 1717 1718 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1719 be32_to_cpu(error->errors_detected), errors_logged); 1720 1721 dev_entry = error->dev; 1722 1723 for (i = 0; i < errors_logged; i++, dev_entry++) { 1724 ipr_err_separator; 1725 1726 ipr_err("Device %d : %s", i + 1, 1727 __ipr_format_res_path(dev_entry->res_path, 1728 buffer, sizeof(buffer))); 1729 ipr_log_ext_vpd(&dev_entry->vpd); 1730 1731 ipr_err("-----New Device Information-----\n"); 1732 ipr_log_ext_vpd(&dev_entry->new_vpd); 1733 1734 ipr_err("Cache Directory Card Information:\n"); 1735 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); 1736 1737 ipr_err("Adapter Card Information:\n"); 1738 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); 1739 } 1740 } 1741 1742 /** 1743 * ipr_log_config_error - Log a configuration error. 1744 * @ioa_cfg: ioa config struct 1745 * @hostrcb: hostrcb struct 1746 * 1747 * Return value: 1748 * none 1749 **/ 1750 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, 1751 struct ipr_hostrcb *hostrcb) 1752 { 1753 int errors_logged, i; 1754 struct ipr_hostrcb_device_data_entry *dev_entry; 1755 struct ipr_hostrcb_type_03_error *error; 1756 1757 error = &hostrcb->hcam.u.error.u.type_03_error; 1758 errors_logged = be32_to_cpu(error->errors_logged); 1759 1760 ipr_err("Device Errors Detected/Logged: %d/%d\n", 1761 be32_to_cpu(error->errors_detected), errors_logged); 1762 1763 dev_entry = error->dev; 1764 1765 for (i = 0; i < errors_logged; i++, dev_entry++) { 1766 ipr_err_separator; 1767 1768 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); 1769 ipr_log_vpd(&dev_entry->vpd); 1770 1771 ipr_err("-----New Device Information-----\n"); 1772 ipr_log_vpd(&dev_entry->new_vpd); 1773 1774 ipr_err("Cache Directory Card Information:\n"); 1775 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); 1776 1777 ipr_err("Adapter Card Information:\n"); 1778 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); 1779 1780 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", 1781 be32_to_cpu(dev_entry->ioa_data[0]), 1782 be32_to_cpu(dev_entry->ioa_data[1]), 1783 be32_to_cpu(dev_entry->ioa_data[2]), 1784 be32_to_cpu(dev_entry->ioa_data[3]), 1785 be32_to_cpu(dev_entry->ioa_data[4])); 1786 } 1787 } 1788 1789 /** 1790 * ipr_log_enhanced_array_error - Log an array configuration error. 1791 * @ioa_cfg: ioa config struct 1792 * @hostrcb: hostrcb struct 1793 * 1794 * Return value: 1795 * none 1796 **/ 1797 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, 1798 struct ipr_hostrcb *hostrcb) 1799 { 1800 int i, num_entries; 1801 struct ipr_hostrcb_type_14_error *error; 1802 struct ipr_hostrcb_array_data_entry_enhanced *array_entry; 1803 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1804 1805 error = &hostrcb->hcam.u.error.u.type_14_error; 1806 1807 ipr_err_separator; 1808 1809 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1810 error->protection_level, 1811 ioa_cfg->host->host_no, 1812 error->last_func_vset_res_addr.bus, 1813 error->last_func_vset_res_addr.target, 1814 error->last_func_vset_res_addr.lun); 1815 1816 ipr_err_separator; 1817 1818 array_entry = error->array_member; 1819 num_entries = min_t(u32, be32_to_cpu(error->num_entries), 1820 ARRAY_SIZE(error->array_member)); 1821 1822 for (i = 0; i < num_entries; i++, array_entry++) { 1823 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1824 continue; 1825 1826 if (be32_to_cpu(error->exposed_mode_adn) == i) 1827 ipr_err("Exposed Array Member %d:\n", i); 1828 else 1829 ipr_err("Array Member %d:\n", i); 1830 1831 ipr_log_ext_vpd(&array_entry->vpd); 1832 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1833 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1834 "Expected Location"); 1835 1836 ipr_err_separator; 1837 } 1838 } 1839 1840 /** 1841 * ipr_log_array_error - Log an array configuration error. 1842 * @ioa_cfg: ioa config struct 1843 * @hostrcb: hostrcb struct 1844 * 1845 * Return value: 1846 * none 1847 **/ 1848 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, 1849 struct ipr_hostrcb *hostrcb) 1850 { 1851 int i; 1852 struct ipr_hostrcb_type_04_error *error; 1853 struct ipr_hostrcb_array_data_entry *array_entry; 1854 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 1855 1856 error = &hostrcb->hcam.u.error.u.type_04_error; 1857 1858 ipr_err_separator; 1859 1860 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", 1861 error->protection_level, 1862 ioa_cfg->host->host_no, 1863 error->last_func_vset_res_addr.bus, 1864 error->last_func_vset_res_addr.target, 1865 error->last_func_vset_res_addr.lun); 1866 1867 ipr_err_separator; 1868 1869 array_entry = error->array_member; 1870 1871 for (i = 0; i < 18; i++) { 1872 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 1873 continue; 1874 1875 if (be32_to_cpu(error->exposed_mode_adn) == i) 1876 ipr_err("Exposed Array Member %d:\n", i); 1877 else 1878 ipr_err("Array Member %d:\n", i); 1879 1880 ipr_log_vpd(&array_entry->vpd); 1881 1882 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); 1883 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, 1884 "Expected Location"); 1885 1886 ipr_err_separator; 1887 1888 if (i == 9) 1889 array_entry = error->array_member2; 1890 else 1891 array_entry++; 1892 } 1893 } 1894 1895 /** 1896 * ipr_log_hex_data - Log additional hex IOA error data. 1897 * @ioa_cfg: ioa config struct 1898 * @data: IOA error data 1899 * @len: data length 1900 * 1901 * Return value: 1902 * none 1903 **/ 1904 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) 1905 { 1906 int i; 1907 1908 if (len == 0) 1909 return; 1910 1911 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 1912 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP); 1913 1914 for (i = 0; i < len / 4; i += 4) { 1915 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1916 be32_to_cpu(data[i]), 1917 be32_to_cpu(data[i+1]), 1918 be32_to_cpu(data[i+2]), 1919 be32_to_cpu(data[i+3])); 1920 } 1921 } 1922 1923 /** 1924 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. 1925 * @ioa_cfg: ioa config struct 1926 * @hostrcb: hostrcb struct 1927 * 1928 * Return value: 1929 * none 1930 **/ 1931 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1932 struct ipr_hostrcb *hostrcb) 1933 { 1934 struct ipr_hostrcb_type_17_error *error; 1935 1936 if (ioa_cfg->sis64) 1937 error = &hostrcb->hcam.u.error64.u.type_17_error; 1938 else 1939 error = &hostrcb->hcam.u.error.u.type_17_error; 1940 1941 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1942 strim(error->failure_reason); 1943 1944 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1945 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1946 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1947 ipr_log_hex_data(ioa_cfg, error->data, 1948 be32_to_cpu(hostrcb->hcam.length) - 1949 (offsetof(struct ipr_hostrcb_error, u) + 1950 offsetof(struct ipr_hostrcb_type_17_error, data))); 1951 } 1952 1953 /** 1954 * ipr_log_dual_ioa_error - Log a dual adapter error. 1955 * @ioa_cfg: ioa config struct 1956 * @hostrcb: hostrcb struct 1957 * 1958 * Return value: 1959 * none 1960 **/ 1961 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, 1962 struct ipr_hostrcb *hostrcb) 1963 { 1964 struct ipr_hostrcb_type_07_error *error; 1965 1966 error = &hostrcb->hcam.u.error.u.type_07_error; 1967 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1968 strim(error->failure_reason); 1969 1970 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, 1971 be32_to_cpu(hostrcb->hcam.u.error.prc)); 1972 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); 1973 ipr_log_hex_data(ioa_cfg, error->data, 1974 be32_to_cpu(hostrcb->hcam.length) - 1975 (offsetof(struct ipr_hostrcb_error, u) + 1976 offsetof(struct ipr_hostrcb_type_07_error, data))); 1977 } 1978 1979 static const struct { 1980 u8 active; 1981 char *desc; 1982 } path_active_desc[] = { 1983 { IPR_PATH_NO_INFO, "Path" }, 1984 { IPR_PATH_ACTIVE, "Active path" }, 1985 { IPR_PATH_NOT_ACTIVE, "Inactive path" } 1986 }; 1987 1988 static const struct { 1989 u8 state; 1990 char *desc; 1991 } path_state_desc[] = { 1992 { IPR_PATH_STATE_NO_INFO, "has no path state information available" }, 1993 { IPR_PATH_HEALTHY, "is healthy" }, 1994 { IPR_PATH_DEGRADED, "is degraded" }, 1995 { IPR_PATH_FAILED, "is failed" } 1996 }; 1997 1998 /** 1999 * ipr_log_fabric_path - Log a fabric path error 2000 * @hostrcb: hostrcb struct 2001 * @fabric: fabric descriptor 2002 * 2003 * Return value: 2004 * none 2005 **/ 2006 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, 2007 struct ipr_hostrcb_fabric_desc *fabric) 2008 { 2009 int i, j; 2010 u8 path_state = fabric->path_state; 2011 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2012 u8 state = path_state & IPR_PATH_STATE_MASK; 2013 2014 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2015 if (path_active_desc[i].active != active) 2016 continue; 2017 2018 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2019 if (path_state_desc[j].state != state) 2020 continue; 2021 2022 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { 2023 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n", 2024 path_active_desc[i].desc, path_state_desc[j].desc, 2025 fabric->ioa_port); 2026 } else if (fabric->cascaded_expander == 0xff) { 2027 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n", 2028 path_active_desc[i].desc, path_state_desc[j].desc, 2029 fabric->ioa_port, fabric->phy); 2030 } else if (fabric->phy == 0xff) { 2031 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n", 2032 path_active_desc[i].desc, path_state_desc[j].desc, 2033 fabric->ioa_port, fabric->cascaded_expander); 2034 } else { 2035 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n", 2036 path_active_desc[i].desc, path_state_desc[j].desc, 2037 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2038 } 2039 return; 2040 } 2041 } 2042 2043 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state, 2044 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 2045 } 2046 2047 /** 2048 * ipr_log64_fabric_path - Log a fabric path error 2049 * @hostrcb: hostrcb struct 2050 * @fabric: fabric descriptor 2051 * 2052 * Return value: 2053 * none 2054 **/ 2055 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, 2056 struct ipr_hostrcb64_fabric_desc *fabric) 2057 { 2058 int i, j; 2059 u8 path_state = fabric->path_state; 2060 u8 active = path_state & IPR_PATH_ACTIVE_MASK; 2061 u8 state = path_state & IPR_PATH_STATE_MASK; 2062 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2063 2064 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { 2065 if (path_active_desc[i].active != active) 2066 continue; 2067 2068 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { 2069 if (path_state_desc[j].state != state) 2070 continue; 2071 2072 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 2073 path_active_desc[i].desc, path_state_desc[j].desc, 2074 ipr_format_res_path(hostrcb->ioa_cfg, 2075 fabric->res_path, 2076 buffer, sizeof(buffer))); 2077 return; 2078 } 2079 } 2080 2081 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 2082 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, 2083 buffer, sizeof(buffer))); 2084 } 2085 2086 static const struct { 2087 u8 type; 2088 char *desc; 2089 } path_type_desc[] = { 2090 { IPR_PATH_CFG_IOA_PORT, "IOA port" }, 2091 { IPR_PATH_CFG_EXP_PORT, "Expander port" }, 2092 { IPR_PATH_CFG_DEVICE_PORT, "Device port" }, 2093 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" } 2094 }; 2095 2096 static const struct { 2097 u8 status; 2098 char *desc; 2099 } path_status_desc[] = { 2100 { IPR_PATH_CFG_NO_PROB, "Functional" }, 2101 { IPR_PATH_CFG_DEGRADED, "Degraded" }, 2102 { IPR_PATH_CFG_FAILED, "Failed" }, 2103 { IPR_PATH_CFG_SUSPECT, "Suspect" }, 2104 { IPR_PATH_NOT_DETECTED, "Missing" }, 2105 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" } 2106 }; 2107 2108 static const char *link_rate[] = { 2109 "unknown", 2110 "disabled", 2111 "phy reset problem", 2112 "spinup hold", 2113 "port selector", 2114 "unknown", 2115 "unknown", 2116 "unknown", 2117 "1.5Gbps", 2118 "3.0Gbps", 2119 "unknown", 2120 "unknown", 2121 "unknown", 2122 "unknown", 2123 "unknown", 2124 "unknown" 2125 }; 2126 2127 /** 2128 * ipr_log_path_elem - Log a fabric path element. 2129 * @hostrcb: hostrcb struct 2130 * @cfg: fabric path element struct 2131 * 2132 * Return value: 2133 * none 2134 **/ 2135 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, 2136 struct ipr_hostrcb_config_element *cfg) 2137 { 2138 int i, j; 2139 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2140 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2141 2142 if (type == IPR_PATH_CFG_NOT_EXIST) 2143 return; 2144 2145 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2146 if (path_type_desc[i].type != type) 2147 continue; 2148 2149 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2150 if (path_status_desc[j].status != status) 2151 continue; 2152 2153 if (type == IPR_PATH_CFG_IOA_PORT) { 2154 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n", 2155 path_status_desc[j].desc, path_type_desc[i].desc, 2156 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2157 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2158 } else { 2159 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { 2160 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n", 2161 path_status_desc[j].desc, path_type_desc[i].desc, 2162 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2163 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2164 } else if (cfg->cascaded_expander == 0xff) { 2165 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, " 2166 "WWN=%08X%08X\n", path_status_desc[j].desc, 2167 path_type_desc[i].desc, cfg->phy, 2168 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2169 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2170 } else if (cfg->phy == 0xff) { 2171 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, " 2172 "WWN=%08X%08X\n", path_status_desc[j].desc, 2173 path_type_desc[i].desc, cfg->cascaded_expander, 2174 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2175 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2176 } else { 2177 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s " 2178 "WWN=%08X%08X\n", path_status_desc[j].desc, 2179 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, 2180 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2181 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2182 } 2183 } 2184 return; 2185 } 2186 } 2187 2188 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s " 2189 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, 2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2192 } 2193 2194 /** 2195 * ipr_log64_path_elem - Log a fabric path element. 2196 * @hostrcb: hostrcb struct 2197 * @cfg: fabric path element struct 2198 * 2199 * Return value: 2200 * none 2201 **/ 2202 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, 2203 struct ipr_hostrcb64_config_element *cfg) 2204 { 2205 int i, j; 2206 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; 2207 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; 2208 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; 2209 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2210 2211 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) 2212 return; 2213 2214 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { 2215 if (path_type_desc[i].type != type) 2216 continue; 2217 2218 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { 2219 if (path_status_desc[j].status != status) 2220 continue; 2221 2222 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2223 path_status_desc[j].desc, path_type_desc[i].desc, 2224 ipr_format_res_path(hostrcb->ioa_cfg, 2225 cfg->res_path, buffer, sizeof(buffer)), 2226 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2227 be32_to_cpu(cfg->wwid[0]), 2228 be32_to_cpu(cfg->wwid[1])); 2229 return; 2230 } 2231 } 2232 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2233 "WWN=%08X%08X\n", cfg->type_status, 2234 ipr_format_res_path(hostrcb->ioa_cfg, 2235 cfg->res_path, buffer, sizeof(buffer)), 2236 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2237 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2238 } 2239 2240 /** 2241 * ipr_log_fabric_error - Log a fabric error. 2242 * @ioa_cfg: ioa config struct 2243 * @hostrcb: hostrcb struct 2244 * 2245 * Return value: 2246 * none 2247 **/ 2248 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2249 struct ipr_hostrcb *hostrcb) 2250 { 2251 struct ipr_hostrcb_type_20_error *error; 2252 struct ipr_hostrcb_fabric_desc *fabric; 2253 struct ipr_hostrcb_config_element *cfg; 2254 int i, add_len; 2255 2256 error = &hostrcb->hcam.u.error.u.type_20_error; 2257 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2258 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2259 2260 add_len = be32_to_cpu(hostrcb->hcam.length) - 2261 (offsetof(struct ipr_hostrcb_error, u) + 2262 offsetof(struct ipr_hostrcb_type_20_error, desc)); 2263 2264 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2265 ipr_log_fabric_path(hostrcb, fabric); 2266 for_each_fabric_cfg(fabric, cfg) 2267 ipr_log_path_elem(hostrcb, cfg); 2268 2269 add_len -= be16_to_cpu(fabric->length); 2270 fabric = (struct ipr_hostrcb_fabric_desc *) 2271 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2272 } 2273 2274 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2275 } 2276 2277 /** 2278 * ipr_log_sis64_array_error - Log a sis64 array error. 2279 * @ioa_cfg: ioa config struct 2280 * @hostrcb: hostrcb struct 2281 * 2282 * Return value: 2283 * none 2284 **/ 2285 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, 2286 struct ipr_hostrcb *hostrcb) 2287 { 2288 int i, num_entries; 2289 struct ipr_hostrcb_type_24_error *error; 2290 struct ipr_hostrcb64_array_data_entry *array_entry; 2291 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2292 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; 2293 2294 error = &hostrcb->hcam.u.error64.u.type_24_error; 2295 2296 ipr_err_separator; 2297 2298 ipr_err("RAID %s Array Configuration: %s\n", 2299 error->protection_level, 2300 ipr_format_res_path(ioa_cfg, error->last_res_path, 2301 buffer, sizeof(buffer))); 2302 2303 ipr_err_separator; 2304 2305 array_entry = error->array_member; 2306 num_entries = min_t(u32, error->num_entries, 2307 ARRAY_SIZE(error->array_member)); 2308 2309 for (i = 0; i < num_entries; i++, array_entry++) { 2310 2311 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) 2312 continue; 2313 2314 if (error->exposed_mode_adn == i) 2315 ipr_err("Exposed Array Member %d:\n", i); 2316 else 2317 ipr_err("Array Member %d:\n", i); 2318 2319 ipr_err("Array Member %d:\n", i); 2320 ipr_log_ext_vpd(&array_entry->vpd); 2321 ipr_err("Current Location: %s\n", 2322 ipr_format_res_path(ioa_cfg, array_entry->res_path, 2323 buffer, sizeof(buffer))); 2324 ipr_err("Expected Location: %s\n", 2325 ipr_format_res_path(ioa_cfg, 2326 array_entry->expected_res_path, 2327 buffer, sizeof(buffer))); 2328 2329 ipr_err_separator; 2330 } 2331 } 2332 2333 /** 2334 * ipr_log_sis64_fabric_error - Log a sis64 fabric error. 2335 * @ioa_cfg: ioa config struct 2336 * @hostrcb: hostrcb struct 2337 * 2338 * Return value: 2339 * none 2340 **/ 2341 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, 2342 struct ipr_hostrcb *hostrcb) 2343 { 2344 struct ipr_hostrcb_type_30_error *error; 2345 struct ipr_hostrcb64_fabric_desc *fabric; 2346 struct ipr_hostrcb64_config_element *cfg; 2347 int i, add_len; 2348 2349 error = &hostrcb->hcam.u.error64.u.type_30_error; 2350 2351 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 2352 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); 2353 2354 add_len = be32_to_cpu(hostrcb->hcam.length) - 2355 (offsetof(struct ipr_hostrcb64_error, u) + 2356 offsetof(struct ipr_hostrcb_type_30_error, desc)); 2357 2358 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { 2359 ipr_log64_fabric_path(hostrcb, fabric); 2360 for_each_fabric_cfg(fabric, cfg) 2361 ipr_log64_path_elem(hostrcb, cfg); 2362 2363 add_len -= be16_to_cpu(fabric->length); 2364 fabric = (struct ipr_hostrcb64_fabric_desc *) 2365 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2366 } 2367 2368 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); 2369 } 2370 2371 /** 2372 * ipr_log_generic_error - Log an adapter error. 2373 * @ioa_cfg: ioa config struct 2374 * @hostrcb: hostrcb struct 2375 * 2376 * Return value: 2377 * none 2378 **/ 2379 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 2380 struct ipr_hostrcb *hostrcb) 2381 { 2382 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, 2383 be32_to_cpu(hostrcb->hcam.length)); 2384 } 2385 2386 /** 2387 * ipr_log_sis64_device_error - Log a cache error. 2388 * @ioa_cfg: ioa config struct 2389 * @hostrcb: hostrcb struct 2390 * 2391 * Return value: 2392 * none 2393 **/ 2394 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, 2395 struct ipr_hostrcb *hostrcb) 2396 { 2397 struct ipr_hostrcb_type_21_error *error; 2398 char buffer[IPR_MAX_RES_PATH_LENGTH]; 2399 2400 error = &hostrcb->hcam.u.error64.u.type_21_error; 2401 2402 ipr_err("-----Failing Device Information-----\n"); 2403 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", 2404 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), 2405 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); 2406 ipr_err("Device Resource Path: %s\n", 2407 __ipr_format_res_path(error->res_path, 2408 buffer, sizeof(buffer))); 2409 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; 2410 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; 2411 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); 2412 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); 2413 ipr_err("SCSI Sense Data:\n"); 2414 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); 2415 ipr_err("SCSI Command Descriptor Block: \n"); 2416 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); 2417 2418 ipr_err("Additional IOA Data:\n"); 2419 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); 2420 } 2421 2422 /** 2423 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2424 * @ioasc: IOASC 2425 * 2426 * This function will return the index of into the ipr_error_table 2427 * for the specified IOASC. If the IOASC is not in the table, 2428 * 0 will be returned, which points to the entry used for unknown errors. 2429 * 2430 * Return value: 2431 * index into the ipr_error_table 2432 **/ 2433 static u32 ipr_get_error(u32 ioasc) 2434 { 2435 int i; 2436 2437 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) 2438 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK)) 2439 return i; 2440 2441 return 0; 2442 } 2443 2444 /** 2445 * ipr_handle_log_data - Log an adapter error. 2446 * @ioa_cfg: ioa config struct 2447 * @hostrcb: hostrcb struct 2448 * 2449 * This function logs an adapter error to the system. 2450 * 2451 * Return value: 2452 * none 2453 **/ 2454 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, 2455 struct ipr_hostrcb *hostrcb) 2456 { 2457 u32 ioasc; 2458 int error_index; 2459 struct ipr_hostrcb_type_21_error *error; 2460 2461 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2462 return; 2463 2464 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2465 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2466 2467 if (ioa_cfg->sis64) 2468 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2469 else 2470 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2471 2472 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || 2473 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { 2474 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2475 scsi_report_bus_reset(ioa_cfg->host, 2476 hostrcb->hcam.u.error.fd_res_addr.bus); 2477 } 2478 2479 error_index = ipr_get_error(ioasc); 2480 2481 if (!ipr_error_table[error_index].log_hcam) 2482 return; 2483 2484 if (ioasc == IPR_IOASC_HW_CMD_FAILED && 2485 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { 2486 error = &hostrcb->hcam.u.error64.u.type_21_error; 2487 2488 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && 2489 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) 2490 return; 2491 } 2492 2493 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2494 2495 /* Set indication we have logged an error */ 2496 ioa_cfg->errors_logged++; 2497 2498 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) 2499 return; 2500 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 2501 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 2502 2503 switch (hostrcb->hcam.overlay_id) { 2504 case IPR_HOST_RCB_OVERLAY_ID_2: 2505 ipr_log_cache_error(ioa_cfg, hostrcb); 2506 break; 2507 case IPR_HOST_RCB_OVERLAY_ID_3: 2508 ipr_log_config_error(ioa_cfg, hostrcb); 2509 break; 2510 case IPR_HOST_RCB_OVERLAY_ID_4: 2511 case IPR_HOST_RCB_OVERLAY_ID_6: 2512 ipr_log_array_error(ioa_cfg, hostrcb); 2513 break; 2514 case IPR_HOST_RCB_OVERLAY_ID_7: 2515 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); 2516 break; 2517 case IPR_HOST_RCB_OVERLAY_ID_12: 2518 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); 2519 break; 2520 case IPR_HOST_RCB_OVERLAY_ID_13: 2521 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); 2522 break; 2523 case IPR_HOST_RCB_OVERLAY_ID_14: 2524 case IPR_HOST_RCB_OVERLAY_ID_16: 2525 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); 2526 break; 2527 case IPR_HOST_RCB_OVERLAY_ID_17: 2528 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 2529 break; 2530 case IPR_HOST_RCB_OVERLAY_ID_20: 2531 ipr_log_fabric_error(ioa_cfg, hostrcb); 2532 break; 2533 case IPR_HOST_RCB_OVERLAY_ID_21: 2534 ipr_log_sis64_device_error(ioa_cfg, hostrcb); 2535 break; 2536 case IPR_HOST_RCB_OVERLAY_ID_23: 2537 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2538 break; 2539 case IPR_HOST_RCB_OVERLAY_ID_24: 2540 case IPR_HOST_RCB_OVERLAY_ID_26: 2541 ipr_log_sis64_array_error(ioa_cfg, hostrcb); 2542 break; 2543 case IPR_HOST_RCB_OVERLAY_ID_30: 2544 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); 2545 break; 2546 case IPR_HOST_RCB_OVERLAY_ID_1: 2547 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2548 default: 2549 ipr_log_generic_error(ioa_cfg, hostrcb); 2550 break; 2551 } 2552 } 2553 2554 /** 2555 * ipr_process_error - Op done function for an adapter error log. 2556 * @ipr_cmd: ipr command struct 2557 * 2558 * This function is the op done function for an error log host 2559 * controlled async from the adapter. It will log the error and 2560 * send the HCAM back to the adapter. 2561 * 2562 * Return value: 2563 * none 2564 **/ 2565 static void ipr_process_error(struct ipr_cmnd *ipr_cmd) 2566 { 2567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2568 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2569 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2570 u32 fd_ioasc; 2571 2572 if (ioa_cfg->sis64) 2573 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2574 else 2575 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2576 2577 list_del(&hostrcb->queue); 2578 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 2579 2580 if (!ioasc) { 2581 ipr_handle_log_data(ioa_cfg, hostrcb); 2582 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) 2583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 2584 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET && 2585 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) { 2586 dev_err(&ioa_cfg->pdev->dev, 2587 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2588 } 2589 2590 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2591 } 2592 2593 /** 2594 * ipr_timeout - An internally generated op has timed out. 2595 * @ipr_cmd: ipr command struct 2596 * 2597 * This function blocks host requests and initiates an 2598 * adapter reset. 2599 * 2600 * Return value: 2601 * none 2602 **/ 2603 static void ipr_timeout(struct ipr_cmnd *ipr_cmd) 2604 { 2605 unsigned long lock_flags = 0; 2606 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2607 2608 ENTER; 2609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2610 2611 ioa_cfg->errors_logged++; 2612 dev_err(&ioa_cfg->pdev->dev, 2613 "Adapter being reset due to command timeout.\n"); 2614 2615 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2616 ioa_cfg->sdt_state = GET_DUMP; 2617 2618 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) 2619 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2620 2621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2622 LEAVE; 2623 } 2624 2625 /** 2626 * ipr_oper_timeout - Adapter timed out transitioning to operational 2627 * @ipr_cmd: ipr command struct 2628 * 2629 * This function blocks host requests and initiates an 2630 * adapter reset. 2631 * 2632 * Return value: 2633 * none 2634 **/ 2635 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) 2636 { 2637 unsigned long lock_flags = 0; 2638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2639 2640 ENTER; 2641 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2642 2643 ioa_cfg->errors_logged++; 2644 dev_err(&ioa_cfg->pdev->dev, 2645 "Adapter timed out transitioning to operational.\n"); 2646 2647 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 2648 ioa_cfg->sdt_state = GET_DUMP; 2649 2650 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { 2651 if (ipr_fastfail) 2652 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 2653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 2654 } 2655 2656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2657 LEAVE; 2658 } 2659 2660 /** 2661 * ipr_find_ses_entry - Find matching SES in SES table 2662 * @res: resource entry struct of SES 2663 * 2664 * Return value: 2665 * pointer to SES table entry / NULL on failure 2666 **/ 2667 static const struct ipr_ses_table_entry * 2668 ipr_find_ses_entry(struct ipr_resource_entry *res) 2669 { 2670 int i, j, matches; 2671 struct ipr_std_inq_vpids *vpids; 2672 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2673 2674 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2675 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2676 if (ste->compare_product_id_byte[j] == 'X') { 2677 vpids = &res->std_inq_data.vpids; 2678 if (vpids->product_id[j] == ste->product_id[j]) 2679 matches++; 2680 else 2681 break; 2682 } else 2683 matches++; 2684 } 2685 2686 if (matches == IPR_PROD_ID_LEN) 2687 return ste; 2688 } 2689 2690 return NULL; 2691 } 2692 2693 /** 2694 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus 2695 * @ioa_cfg: ioa config struct 2696 * @bus: SCSI bus 2697 * @bus_width: bus width 2698 * 2699 * Return value: 2700 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz 2701 * For a 2-byte wide SCSI bus, the maximum transfer speed is 2702 * twice the maximum transfer rate (e.g. for a wide enabled bus, 2703 * max 160MHz = max 320MB/sec). 2704 **/ 2705 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) 2706 { 2707 struct ipr_resource_entry *res; 2708 const struct ipr_ses_table_entry *ste; 2709 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); 2710 2711 /* Loop through each config table entry in the config table buffer */ 2712 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2713 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) 2714 continue; 2715 2716 if (bus != res->bus) 2717 continue; 2718 2719 if (!(ste = ipr_find_ses_entry(res))) 2720 continue; 2721 2722 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); 2723 } 2724 2725 return max_xfer_rate; 2726 } 2727 2728 /** 2729 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA 2730 * @ioa_cfg: ioa config struct 2731 * @max_delay: max delay in micro-seconds to wait 2732 * 2733 * Waits for an IODEBUG ACK from the IOA, doing busy looping. 2734 * 2735 * Return value: 2736 * 0 on success / other on failure 2737 **/ 2738 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) 2739 { 2740 volatile u32 pcii_reg; 2741 int delay = 1; 2742 2743 /* Read interrupt reg until IOA signals IO Debug Acknowledge */ 2744 while (delay < max_delay) { 2745 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 2746 2747 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) 2748 return 0; 2749 2750 /* udelay cannot be used if delay is more than a few milliseconds */ 2751 if ((delay / 1000) > MAX_UDELAY_MS) 2752 mdelay(delay / 1000); 2753 else 2754 udelay(delay); 2755 2756 delay += delay; 2757 } 2758 return -EIO; 2759 } 2760 2761 /** 2762 * ipr_get_sis64_dump_data_section - Dump IOA memory 2763 * @ioa_cfg: ioa config struct 2764 * @start_addr: adapter address to dump 2765 * @dest: destination kernel buffer 2766 * @length_in_words: length to dump in 4 byte words 2767 * 2768 * Return value: 2769 * 0 on success 2770 **/ 2771 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2772 u32 start_addr, 2773 __be32 *dest, u32 length_in_words) 2774 { 2775 int i; 2776 2777 for (i = 0; i < length_in_words; i++) { 2778 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); 2779 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); 2780 dest++; 2781 } 2782 2783 return 0; 2784 } 2785 2786 /** 2787 * ipr_get_ldump_data_section - Dump IOA memory 2788 * @ioa_cfg: ioa config struct 2789 * @start_addr: adapter address to dump 2790 * @dest: destination kernel buffer 2791 * @length_in_words: length to dump in 4 byte words 2792 * 2793 * Return value: 2794 * 0 on success / -EIO on failure 2795 **/ 2796 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, 2797 u32 start_addr, 2798 __be32 *dest, u32 length_in_words) 2799 { 2800 volatile u32 temp_pcii_reg; 2801 int i, delay = 0; 2802 2803 if (ioa_cfg->sis64) 2804 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, 2805 dest, length_in_words); 2806 2807 /* Write IOA interrupt reg starting LDUMP state */ 2808 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2809 ioa_cfg->regs.set_uproc_interrupt_reg32); 2810 2811 /* Wait for IO debug acknowledge */ 2812 if (ipr_wait_iodbg_ack(ioa_cfg, 2813 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { 2814 dev_err(&ioa_cfg->pdev->dev, 2815 "IOA dump long data transfer timeout\n"); 2816 return -EIO; 2817 } 2818 2819 /* Signal LDUMP interlocked - clear IO debug ack */ 2820 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2821 ioa_cfg->regs.clr_interrupt_reg); 2822 2823 /* Write Mailbox with starting address */ 2824 writel(start_addr, ioa_cfg->ioa_mailbox); 2825 2826 /* Signal address valid - clear IOA Reset alert */ 2827 writel(IPR_UPROCI_RESET_ALERT, 2828 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2829 2830 for (i = 0; i < length_in_words; i++) { 2831 /* Wait for IO debug acknowledge */ 2832 if (ipr_wait_iodbg_ack(ioa_cfg, 2833 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { 2834 dev_err(&ioa_cfg->pdev->dev, 2835 "IOA dump short data transfer timeout\n"); 2836 return -EIO; 2837 } 2838 2839 /* Read data from mailbox and increment destination pointer */ 2840 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); 2841 dest++; 2842 2843 /* For all but the last word of data, signal data received */ 2844 if (i < (length_in_words - 1)) { 2845 /* Signal dump data received - Clear IO debug Ack */ 2846 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2847 ioa_cfg->regs.clr_interrupt_reg); 2848 } 2849 } 2850 2851 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2852 writel(IPR_UPROCI_RESET_ALERT, 2853 ioa_cfg->regs.set_uproc_interrupt_reg32); 2854 2855 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2856 ioa_cfg->regs.clr_uproc_interrupt_reg32); 2857 2858 /* Signal dump data received - Clear IO debug Ack */ 2859 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2860 ioa_cfg->regs.clr_interrupt_reg); 2861 2862 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2863 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2864 temp_pcii_reg = 2865 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 2866 2867 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2868 return 0; 2869 2870 udelay(10); 2871 delay += 10; 2872 } 2873 2874 return 0; 2875 } 2876 2877 #ifdef CONFIG_SCSI_IPR_DUMP 2878 /** 2879 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer 2880 * @ioa_cfg: ioa config struct 2881 * @pci_address: adapter address 2882 * @length: length of data to copy 2883 * 2884 * Copy data from PCI adapter to kernel buffer. 2885 * Note: length MUST be a 4 byte multiple 2886 * Return value: 2887 * 0 on success / other on failure 2888 **/ 2889 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, 2890 unsigned long pci_address, u32 length) 2891 { 2892 int bytes_copied = 0; 2893 int cur_len, rc, rem_len, rem_page_len, max_dump_size; 2894 __be32 *page; 2895 unsigned long lock_flags = 0; 2896 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; 2897 2898 if (ioa_cfg->sis64) 2899 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 2900 else 2901 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 2902 2903 while (bytes_copied < length && 2904 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { 2905 if (ioa_dump->page_offset >= PAGE_SIZE || 2906 ioa_dump->page_offset == 0) { 2907 page = (__be32 *)__get_free_page(GFP_ATOMIC); 2908 2909 if (!page) { 2910 ipr_trace; 2911 return bytes_copied; 2912 } 2913 2914 ioa_dump->page_offset = 0; 2915 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; 2916 ioa_dump->next_page_index++; 2917 } else 2918 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; 2919 2920 rem_len = length - bytes_copied; 2921 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; 2922 cur_len = min(rem_len, rem_page_len); 2923 2924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2925 if (ioa_cfg->sdt_state == ABORT_DUMP) { 2926 rc = -EIO; 2927 } else { 2928 rc = ipr_get_ldump_data_section(ioa_cfg, 2929 pci_address + bytes_copied, 2930 &page[ioa_dump->page_offset / 4], 2931 (cur_len / sizeof(u32))); 2932 } 2933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2934 2935 if (!rc) { 2936 ioa_dump->page_offset += cur_len; 2937 bytes_copied += cur_len; 2938 } else { 2939 ipr_trace; 2940 break; 2941 } 2942 schedule(); 2943 } 2944 2945 return bytes_copied; 2946 } 2947 2948 /** 2949 * ipr_init_dump_entry_hdr - Initialize a dump entry header. 2950 * @hdr: dump entry header struct 2951 * 2952 * Return value: 2953 * nothing 2954 **/ 2955 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) 2956 { 2957 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; 2958 hdr->num_elems = 1; 2959 hdr->offset = sizeof(*hdr); 2960 hdr->status = IPR_DUMP_STATUS_SUCCESS; 2961 } 2962 2963 /** 2964 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. 2965 * @ioa_cfg: ioa config struct 2966 * @driver_dump: driver dump struct 2967 * 2968 * Return value: 2969 * nothing 2970 **/ 2971 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, 2972 struct ipr_driver_dump *driver_dump) 2973 { 2974 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 2975 2976 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); 2977 driver_dump->ioa_type_entry.hdr.len = 2978 sizeof(struct ipr_dump_ioa_type_entry) - 2979 sizeof(struct ipr_dump_entry_header); 2980 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2981 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; 2982 driver_dump->ioa_type_entry.type = ioa_cfg->type; 2983 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | 2984 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | 2985 ucode_vpd->minor_release[1]; 2986 driver_dump->hdr.num_entries++; 2987 } 2988 2989 /** 2990 * ipr_dump_version_data - Fill in the driver version in the dump. 2991 * @ioa_cfg: ioa config struct 2992 * @driver_dump: driver dump struct 2993 * 2994 * Return value: 2995 * nothing 2996 **/ 2997 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, 2998 struct ipr_driver_dump *driver_dump) 2999 { 3000 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); 3001 driver_dump->version_entry.hdr.len = 3002 sizeof(struct ipr_dump_version_entry) - 3003 sizeof(struct ipr_dump_entry_header); 3004 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3005 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; 3006 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); 3007 driver_dump->hdr.num_entries++; 3008 } 3009 3010 /** 3011 * ipr_dump_trace_data - Fill in the IOA trace in the dump. 3012 * @ioa_cfg: ioa config struct 3013 * @driver_dump: driver dump struct 3014 * 3015 * Return value: 3016 * nothing 3017 **/ 3018 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, 3019 struct ipr_driver_dump *driver_dump) 3020 { 3021 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); 3022 driver_dump->trace_entry.hdr.len = 3023 sizeof(struct ipr_dump_trace_entry) - 3024 sizeof(struct ipr_dump_entry_header); 3025 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3026 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; 3027 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); 3028 driver_dump->hdr.num_entries++; 3029 } 3030 3031 /** 3032 * ipr_dump_location_data - Fill in the IOA location in the dump. 3033 * @ioa_cfg: ioa config struct 3034 * @driver_dump: driver dump struct 3035 * 3036 * Return value: 3037 * nothing 3038 **/ 3039 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, 3040 struct ipr_driver_dump *driver_dump) 3041 { 3042 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); 3043 driver_dump->location_entry.hdr.len = 3044 sizeof(struct ipr_dump_location_entry) - 3045 sizeof(struct ipr_dump_entry_header); 3046 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 3047 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 3048 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); 3049 driver_dump->hdr.num_entries++; 3050 } 3051 3052 /** 3053 * ipr_get_ioa_dump - Perform a dump of the driver and adapter. 3054 * @ioa_cfg: ioa config struct 3055 * @dump: dump struct 3056 * 3057 * Return value: 3058 * nothing 3059 **/ 3060 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) 3061 { 3062 unsigned long start_addr, sdt_word; 3063 unsigned long lock_flags = 0; 3064 struct ipr_driver_dump *driver_dump = &dump->driver_dump; 3065 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; 3066 u32 num_entries, max_num_entries, start_off, end_off; 3067 u32 max_dump_size, bytes_to_copy, bytes_copied, rc; 3068 struct ipr_sdt *sdt; 3069 int valid = 1; 3070 int i; 3071 3072 ENTER; 3073 3074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3075 3076 if (ioa_cfg->sdt_state != READ_DUMP) { 3077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3078 return; 3079 } 3080 3081 if (ioa_cfg->sis64) { 3082 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3083 ssleep(IPR_DUMP_DELAY_SECONDS); 3084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3085 } 3086 3087 start_addr = readl(ioa_cfg->ioa_mailbox); 3088 3089 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { 3090 dev_err(&ioa_cfg->pdev->dev, 3091 "Invalid dump table format: %lx\n", start_addr); 3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3093 return; 3094 } 3095 3096 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); 3097 3098 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; 3099 3100 /* Initialize the overall dump header */ 3101 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); 3102 driver_dump->hdr.num_entries = 1; 3103 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); 3104 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; 3105 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; 3106 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; 3107 3108 ipr_dump_version_data(ioa_cfg, driver_dump); 3109 ipr_dump_location_data(ioa_cfg, driver_dump); 3110 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); 3111 ipr_dump_trace_data(ioa_cfg, driver_dump); 3112 3113 /* Update dump_header */ 3114 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); 3115 3116 /* IOA Dump entry */ 3117 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 3118 ioa_dump->hdr.len = 0; 3119 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 3120 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 3121 3122 /* First entries in sdt are actually a list of dump addresses and 3123 lengths to gather the real dump data. sdt represents the pointer 3124 to the ioa generated dump table. Dump data will be extracted based 3125 on entries in this table */ 3126 sdt = &ioa_dump->sdt; 3127 3128 if (ioa_cfg->sis64) { 3129 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; 3130 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; 3131 } else { 3132 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; 3133 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; 3134 } 3135 3136 bytes_to_copy = offsetof(struct ipr_sdt, entry) + 3137 (max_num_entries * sizeof(struct ipr_sdt_entry)); 3138 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, 3139 bytes_to_copy / sizeof(__be32)); 3140 3141 /* Smart Dump table is ready to use and the first entry is valid */ 3142 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 3143 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 3144 dev_err(&ioa_cfg->pdev->dev, 3145 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 3146 rc, be32_to_cpu(sdt->hdr.state)); 3147 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; 3148 ioa_cfg->sdt_state = DUMP_OBTAINED; 3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3150 return; 3151 } 3152 3153 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); 3154 3155 if (num_entries > max_num_entries) 3156 num_entries = max_num_entries; 3157 3158 /* Update dump length to the actual data to be copied */ 3159 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); 3160 if (ioa_cfg->sis64) 3161 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); 3162 else 3163 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); 3164 3165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3166 3167 for (i = 0; i < num_entries; i++) { 3168 if (ioa_dump->hdr.len > max_dump_size) { 3169 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3170 break; 3171 } 3172 3173 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 3174 sdt_word = be32_to_cpu(sdt->entry[i].start_token); 3175 if (ioa_cfg->sis64) 3176 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); 3177 else { 3178 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 3179 end_off = be32_to_cpu(sdt->entry[i].end_token); 3180 3181 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) 3182 bytes_to_copy = end_off - start_off; 3183 else 3184 valid = 0; 3185 } 3186 if (valid) { 3187 if (bytes_to_copy > max_dump_size) { 3188 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 3189 continue; 3190 } 3191 3192 /* Copy data from adapter to driver buffers */ 3193 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, 3194 bytes_to_copy); 3195 3196 ioa_dump->hdr.len += bytes_copied; 3197 3198 if (bytes_copied != bytes_to_copy) { 3199 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; 3200 break; 3201 } 3202 } 3203 } 3204 } 3205 3206 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); 3207 3208 /* Update dump_header */ 3209 driver_dump->hdr.len += ioa_dump->hdr.len; 3210 wmb(); 3211 ioa_cfg->sdt_state = DUMP_OBTAINED; 3212 LEAVE; 3213 } 3214 3215 #else 3216 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) 3217 #endif 3218 3219 /** 3220 * ipr_release_dump - Free adapter dump memory 3221 * @kref: kref struct 3222 * 3223 * Return value: 3224 * nothing 3225 **/ 3226 static void ipr_release_dump(struct kref *kref) 3227 { 3228 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); 3229 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; 3230 unsigned long lock_flags = 0; 3231 int i; 3232 3233 ENTER; 3234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3235 ioa_cfg->dump = NULL; 3236 ioa_cfg->sdt_state = INACTIVE; 3237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3238 3239 for (i = 0; i < dump->ioa_dump.next_page_index; i++) 3240 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); 3241 3242 vfree(dump->ioa_dump.ioa_data); 3243 kfree(dump); 3244 LEAVE; 3245 } 3246 3247 /** 3248 * ipr_worker_thread - Worker thread 3249 * @work: ioa config struct 3250 * 3251 * Called at task level from a work thread. This function takes care 3252 * of adding and removing device from the mid-layer as configuration 3253 * changes are detected by the adapter. 3254 * 3255 * Return value: 3256 * nothing 3257 **/ 3258 static void ipr_worker_thread(struct work_struct *work) 3259 { 3260 unsigned long lock_flags; 3261 struct ipr_resource_entry *res; 3262 struct scsi_device *sdev; 3263 struct ipr_dump *dump; 3264 struct ipr_ioa_cfg *ioa_cfg = 3265 container_of(work, struct ipr_ioa_cfg, work_q); 3266 u8 bus, target, lun; 3267 int did_work; 3268 3269 ENTER; 3270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3271 3272 if (ioa_cfg->sdt_state == READ_DUMP) { 3273 dump = ioa_cfg->dump; 3274 if (!dump) { 3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3276 return; 3277 } 3278 kref_get(&dump->kref); 3279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3280 ipr_get_ioa_dump(ioa_cfg, dump); 3281 kref_put(&dump->kref, ipr_release_dump); 3282 3283 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3284 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) 3285 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3287 return; 3288 } 3289 3290 restart: 3291 do { 3292 did_work = 0; 3293 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3295 return; 3296 } 3297 3298 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3299 if (res->del_from_ml && res->sdev) { 3300 did_work = 1; 3301 sdev = res->sdev; 3302 if (!scsi_device_get(sdev)) { 3303 if (!res->add_to_ml) 3304 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 3305 else 3306 res->del_from_ml = 0; 3307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3308 scsi_remove_device(sdev); 3309 scsi_device_put(sdev); 3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3311 } 3312 break; 3313 } 3314 } 3315 } while (did_work); 3316 3317 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3318 if (res->add_to_ml) { 3319 bus = res->bus; 3320 target = res->target; 3321 lun = res->lun; 3322 res->add_to_ml = 0; 3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3324 scsi_add_device(ioa_cfg->host, bus, target, lun); 3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3326 goto restart; 3327 } 3328 } 3329 3330 ioa_cfg->scan_done = 1; 3331 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3332 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3333 LEAVE; 3334 } 3335 3336 #ifdef CONFIG_SCSI_IPR_TRACE 3337 /** 3338 * ipr_read_trace - Dump the adapter trace 3339 * @filp: open sysfs file 3340 * @kobj: kobject struct 3341 * @bin_attr: bin_attribute struct 3342 * @buf: buffer 3343 * @off: offset 3344 * @count: buffer size 3345 * 3346 * Return value: 3347 * number of bytes printed to buffer 3348 **/ 3349 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj, 3350 struct bin_attribute *bin_attr, 3351 char *buf, loff_t off, size_t count) 3352 { 3353 struct device *dev = container_of(kobj, struct device, kobj); 3354 struct Scsi_Host *shost = class_to_shost(dev); 3355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3356 unsigned long lock_flags = 0; 3357 ssize_t ret; 3358 3359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3360 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, 3361 IPR_TRACE_SIZE); 3362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3363 3364 return ret; 3365 } 3366 3367 static struct bin_attribute ipr_trace_attr = { 3368 .attr = { 3369 .name = "trace", 3370 .mode = S_IRUGO, 3371 }, 3372 .size = 0, 3373 .read = ipr_read_trace, 3374 }; 3375 #endif 3376 3377 /** 3378 * ipr_show_fw_version - Show the firmware version 3379 * @dev: class device struct 3380 * @buf: buffer 3381 * 3382 * Return value: 3383 * number of bytes printed to buffer 3384 **/ 3385 static ssize_t ipr_show_fw_version(struct device *dev, 3386 struct device_attribute *attr, char *buf) 3387 { 3388 struct Scsi_Host *shost = class_to_shost(dev); 3389 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3390 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 3391 unsigned long lock_flags = 0; 3392 int len; 3393 3394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3395 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", 3396 ucode_vpd->major_release, ucode_vpd->card_type, 3397 ucode_vpd->minor_release[0], 3398 ucode_vpd->minor_release[1]); 3399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3400 return len; 3401 } 3402 3403 static struct device_attribute ipr_fw_version_attr = { 3404 .attr = { 3405 .name = "fw_version", 3406 .mode = S_IRUGO, 3407 }, 3408 .show = ipr_show_fw_version, 3409 }; 3410 3411 /** 3412 * ipr_show_log_level - Show the adapter's error logging level 3413 * @dev: class device struct 3414 * @buf: buffer 3415 * 3416 * Return value: 3417 * number of bytes printed to buffer 3418 **/ 3419 static ssize_t ipr_show_log_level(struct device *dev, 3420 struct device_attribute *attr, char *buf) 3421 { 3422 struct Scsi_Host *shost = class_to_shost(dev); 3423 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3424 unsigned long lock_flags = 0; 3425 int len; 3426 3427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3428 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); 3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3430 return len; 3431 } 3432 3433 /** 3434 * ipr_store_log_level - Change the adapter's error logging level 3435 * @dev: class device struct 3436 * @buf: buffer 3437 * 3438 * Return value: 3439 * number of bytes printed to buffer 3440 **/ 3441 static ssize_t ipr_store_log_level(struct device *dev, 3442 struct device_attribute *attr, 3443 const char *buf, size_t count) 3444 { 3445 struct Scsi_Host *shost = class_to_shost(dev); 3446 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3447 unsigned long lock_flags = 0; 3448 3449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3450 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); 3451 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3452 return strlen(buf); 3453 } 3454 3455 static struct device_attribute ipr_log_level_attr = { 3456 .attr = { 3457 .name = "log_level", 3458 .mode = S_IRUGO | S_IWUSR, 3459 }, 3460 .show = ipr_show_log_level, 3461 .store = ipr_store_log_level 3462 }; 3463 3464 /** 3465 * ipr_store_diagnostics - IOA Diagnostics interface 3466 * @dev: device struct 3467 * @buf: buffer 3468 * @count: buffer size 3469 * 3470 * This function will reset the adapter and wait a reasonable 3471 * amount of time for any errors that the adapter might log. 3472 * 3473 * Return value: 3474 * count on success / other on failure 3475 **/ 3476 static ssize_t ipr_store_diagnostics(struct device *dev, 3477 struct device_attribute *attr, 3478 const char *buf, size_t count) 3479 { 3480 struct Scsi_Host *shost = class_to_shost(dev); 3481 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3482 unsigned long lock_flags = 0; 3483 int rc = count; 3484 3485 if (!capable(CAP_SYS_ADMIN)) 3486 return -EACCES; 3487 3488 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3489 while (ioa_cfg->in_reset_reload) { 3490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3491 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3493 } 3494 3495 ioa_cfg->errors_logged = 0; 3496 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3497 3498 if (ioa_cfg->in_reset_reload) { 3499 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3500 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3501 3502 /* Wait for a second for any errors to be logged */ 3503 msleep(1000); 3504 } else { 3505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3506 return -EIO; 3507 } 3508 3509 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3510 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) 3511 rc = -EIO; 3512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3513 3514 return rc; 3515 } 3516 3517 static struct device_attribute ipr_diagnostics_attr = { 3518 .attr = { 3519 .name = "run_diagnostics", 3520 .mode = S_IWUSR, 3521 }, 3522 .store = ipr_store_diagnostics 3523 }; 3524 3525 /** 3526 * ipr_show_adapter_state - Show the adapter's state 3527 * @class_dev: device struct 3528 * @buf: buffer 3529 * 3530 * Return value: 3531 * number of bytes printed to buffer 3532 **/ 3533 static ssize_t ipr_show_adapter_state(struct device *dev, 3534 struct device_attribute *attr, char *buf) 3535 { 3536 struct Scsi_Host *shost = class_to_shost(dev); 3537 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3538 unsigned long lock_flags = 0; 3539 int len; 3540 3541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3542 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 3543 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3544 else 3545 len = snprintf(buf, PAGE_SIZE, "online\n"); 3546 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3547 return len; 3548 } 3549 3550 /** 3551 * ipr_store_adapter_state - Change adapter state 3552 * @dev: device struct 3553 * @buf: buffer 3554 * @count: buffer size 3555 * 3556 * This function will change the adapter's state. 3557 * 3558 * Return value: 3559 * count on success / other on failure 3560 **/ 3561 static ssize_t ipr_store_adapter_state(struct device *dev, 3562 struct device_attribute *attr, 3563 const char *buf, size_t count) 3564 { 3565 struct Scsi_Host *shost = class_to_shost(dev); 3566 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3567 unsigned long lock_flags; 3568 int result = count, i; 3569 3570 if (!capable(CAP_SYS_ADMIN)) 3571 return -EACCES; 3572 3573 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && 3575 !strncmp(buf, "online", 6)) { 3576 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 3577 spin_lock(&ioa_cfg->hrrq[i]._lock); 3578 ioa_cfg->hrrq[i].ioa_is_dead = 0; 3579 spin_unlock(&ioa_cfg->hrrq[i]._lock); 3580 } 3581 wmb(); 3582 ioa_cfg->reset_retries = 0; 3583 ioa_cfg->in_ioa_bringdown = 0; 3584 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3585 } 3586 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3587 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3588 3589 return result; 3590 } 3591 3592 static struct device_attribute ipr_ioa_state_attr = { 3593 .attr = { 3594 .name = "online_state", 3595 .mode = S_IRUGO | S_IWUSR, 3596 }, 3597 .show = ipr_show_adapter_state, 3598 .store = ipr_store_adapter_state 3599 }; 3600 3601 /** 3602 * ipr_store_reset_adapter - Reset the adapter 3603 * @dev: device struct 3604 * @buf: buffer 3605 * @count: buffer size 3606 * 3607 * This function will reset the adapter. 3608 * 3609 * Return value: 3610 * count on success / other on failure 3611 **/ 3612 static ssize_t ipr_store_reset_adapter(struct device *dev, 3613 struct device_attribute *attr, 3614 const char *buf, size_t count) 3615 { 3616 struct Scsi_Host *shost = class_to_shost(dev); 3617 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3618 unsigned long lock_flags; 3619 int result = count; 3620 3621 if (!capable(CAP_SYS_ADMIN)) 3622 return -EACCES; 3623 3624 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3625 if (!ioa_cfg->in_reset_reload) 3626 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3628 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3629 3630 return result; 3631 } 3632 3633 static struct device_attribute ipr_ioa_reset_attr = { 3634 .attr = { 3635 .name = "reset_host", 3636 .mode = S_IWUSR, 3637 }, 3638 .store = ipr_store_reset_adapter 3639 }; 3640 3641 static int ipr_iopoll(struct blk_iopoll *iop, int budget); 3642 /** 3643 * ipr_show_iopoll_weight - Show ipr polling mode 3644 * @dev: class device struct 3645 * @buf: buffer 3646 * 3647 * Return value: 3648 * number of bytes printed to buffer 3649 **/ 3650 static ssize_t ipr_show_iopoll_weight(struct device *dev, 3651 struct device_attribute *attr, char *buf) 3652 { 3653 struct Scsi_Host *shost = class_to_shost(dev); 3654 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3655 unsigned long lock_flags = 0; 3656 int len; 3657 3658 spin_lock_irqsave(shost->host_lock, lock_flags); 3659 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); 3660 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3661 3662 return len; 3663 } 3664 3665 /** 3666 * ipr_store_iopoll_weight - Change the adapter's polling mode 3667 * @dev: class device struct 3668 * @buf: buffer 3669 * 3670 * Return value: 3671 * number of bytes printed to buffer 3672 **/ 3673 static ssize_t ipr_store_iopoll_weight(struct device *dev, 3674 struct device_attribute *attr, 3675 const char *buf, size_t count) 3676 { 3677 struct Scsi_Host *shost = class_to_shost(dev); 3678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3679 unsigned long user_iopoll_weight; 3680 unsigned long lock_flags = 0; 3681 int i; 3682 3683 if (!ioa_cfg->sis64) { 3684 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n"); 3685 return -EINVAL; 3686 } 3687 if (kstrtoul(buf, 10, &user_iopoll_weight)) 3688 return -EINVAL; 3689 3690 if (user_iopoll_weight > 256) { 3691 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n"); 3692 return -EINVAL; 3693 } 3694 3695 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { 3696 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n"); 3697 return strlen(buf); 3698 } 3699 3700 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3701 for (i = 1; i < ioa_cfg->hrrq_num; i++) 3702 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 3703 } 3704 3705 spin_lock_irqsave(shost->host_lock, lock_flags); 3706 ioa_cfg->iopoll_weight = user_iopoll_weight; 3707 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 3708 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 3709 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 3710 ioa_cfg->iopoll_weight, ipr_iopoll); 3711 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); 3712 } 3713 } 3714 spin_unlock_irqrestore(shost->host_lock, lock_flags); 3715 3716 return strlen(buf); 3717 } 3718 3719 static struct device_attribute ipr_iopoll_weight_attr = { 3720 .attr = { 3721 .name = "iopoll_weight", 3722 .mode = S_IRUGO | S_IWUSR, 3723 }, 3724 .show = ipr_show_iopoll_weight, 3725 .store = ipr_store_iopoll_weight 3726 }; 3727 3728 /** 3729 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3730 * @buf_len: buffer length 3731 * 3732 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3733 * list to use for microcode download 3734 * 3735 * Return value: 3736 * pointer to sglist / NULL on failure 3737 **/ 3738 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) 3739 { 3740 int sg_size, order, bsize_elem, num_elem, i, j; 3741 struct ipr_sglist *sglist; 3742 struct scatterlist *scatterlist; 3743 struct page *page; 3744 3745 /* Get the minimum size per scatter/gather element */ 3746 sg_size = buf_len / (IPR_MAX_SGLIST - 1); 3747 3748 /* Get the actual size per element */ 3749 order = get_order(sg_size); 3750 3751 /* Determine the actual number of bytes per element */ 3752 bsize_elem = PAGE_SIZE * (1 << order); 3753 3754 /* Determine the actual number of sg entries needed */ 3755 if (buf_len % bsize_elem) 3756 num_elem = (buf_len / bsize_elem) + 1; 3757 else 3758 num_elem = buf_len / bsize_elem; 3759 3760 /* Allocate a scatter/gather list for the DMA */ 3761 sglist = kzalloc(sizeof(struct ipr_sglist) + 3762 (sizeof(struct scatterlist) * (num_elem - 1)), 3763 GFP_KERNEL); 3764 3765 if (sglist == NULL) { 3766 ipr_trace; 3767 return NULL; 3768 } 3769 3770 scatterlist = sglist->scatterlist; 3771 sg_init_table(scatterlist, num_elem); 3772 3773 sglist->order = order; 3774 sglist->num_sg = num_elem; 3775 3776 /* Allocate a bunch of sg elements */ 3777 for (i = 0; i < num_elem; i++) { 3778 page = alloc_pages(GFP_KERNEL, order); 3779 if (!page) { 3780 ipr_trace; 3781 3782 /* Free up what we already allocated */ 3783 for (j = i - 1; j >= 0; j--) 3784 __free_pages(sg_page(&scatterlist[j]), order); 3785 kfree(sglist); 3786 return NULL; 3787 } 3788 3789 sg_set_page(&scatterlist[i], page, 0, 0); 3790 } 3791 3792 return sglist; 3793 } 3794 3795 /** 3796 * ipr_free_ucode_buffer - Frees a microcode download buffer 3797 * @p_dnld: scatter/gather list pointer 3798 * 3799 * Free a DMA'able ucode download buffer previously allocated with 3800 * ipr_alloc_ucode_buffer 3801 * 3802 * Return value: 3803 * nothing 3804 **/ 3805 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) 3806 { 3807 int i; 3808 3809 for (i = 0; i < sglist->num_sg; i++) 3810 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order); 3811 3812 kfree(sglist); 3813 } 3814 3815 /** 3816 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer 3817 * @sglist: scatter/gather list pointer 3818 * @buffer: buffer pointer 3819 * @len: buffer length 3820 * 3821 * Copy a microcode image from a user buffer into a buffer allocated by 3822 * ipr_alloc_ucode_buffer 3823 * 3824 * Return value: 3825 * 0 on success / other on failure 3826 **/ 3827 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, 3828 u8 *buffer, u32 len) 3829 { 3830 int bsize_elem, i, result = 0; 3831 struct scatterlist *scatterlist; 3832 void *kaddr; 3833 3834 /* Determine the actual number of bytes per element */ 3835 bsize_elem = PAGE_SIZE * (1 << sglist->order); 3836 3837 scatterlist = sglist->scatterlist; 3838 3839 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { 3840 struct page *page = sg_page(&scatterlist[i]); 3841 3842 kaddr = kmap(page); 3843 memcpy(kaddr, buffer, bsize_elem); 3844 kunmap(page); 3845 3846 scatterlist[i].length = bsize_elem; 3847 3848 if (result != 0) { 3849 ipr_trace; 3850 return result; 3851 } 3852 } 3853 3854 if (len % bsize_elem) { 3855 struct page *page = sg_page(&scatterlist[i]); 3856 3857 kaddr = kmap(page); 3858 memcpy(kaddr, buffer, len % bsize_elem); 3859 kunmap(page); 3860 3861 scatterlist[i].length = len % bsize_elem; 3862 } 3863 3864 sglist->buffer_len = len; 3865 return result; 3866 } 3867 3868 /** 3869 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL 3870 * @ipr_cmd: ipr command struct 3871 * @sglist: scatter/gather list 3872 * 3873 * Builds a microcode download IOA data list (IOADL). 3874 * 3875 **/ 3876 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, 3877 struct ipr_sglist *sglist) 3878 { 3879 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3880 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 3881 struct scatterlist *scatterlist = sglist->scatterlist; 3882 int i; 3883 3884 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3885 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3886 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3887 3888 ioarcb->ioadl_len = 3889 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 3890 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3891 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); 3892 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); 3893 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); 3894 } 3895 3896 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3897 } 3898 3899 /** 3900 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3901 * @ipr_cmd: ipr command struct 3902 * @sglist: scatter/gather list 3903 * 3904 * Builds a microcode download IOA data list (IOADL). 3905 * 3906 **/ 3907 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, 3908 struct ipr_sglist *sglist) 3909 { 3910 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3911 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 3912 struct scatterlist *scatterlist = sglist->scatterlist; 3913 int i; 3914 3915 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3916 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3917 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); 3918 3919 ioarcb->ioadl_len = 3920 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3921 3922 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3923 ioadl[i].flags_and_data_len = 3924 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); 3925 ioadl[i].address = 3926 cpu_to_be32(sg_dma_address(&scatterlist[i])); 3927 } 3928 3929 ioadl[i-1].flags_and_data_len |= 3930 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 3931 } 3932 3933 /** 3934 * ipr_update_ioa_ucode - Update IOA's microcode 3935 * @ioa_cfg: ioa config struct 3936 * @sglist: scatter/gather list 3937 * 3938 * Initiate an adapter reset to update the IOA's microcode 3939 * 3940 * Return value: 3941 * 0 on success / -EIO on failure 3942 **/ 3943 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, 3944 struct ipr_sglist *sglist) 3945 { 3946 unsigned long lock_flags; 3947 3948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3949 while (ioa_cfg->in_reset_reload) { 3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3951 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3953 } 3954 3955 if (ioa_cfg->ucode_sglist) { 3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3957 dev_err(&ioa_cfg->pdev->dev, 3958 "Microcode download already in progress\n"); 3959 return -EIO; 3960 } 3961 3962 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, 3963 sglist->scatterlist, sglist->num_sg, 3964 DMA_TO_DEVICE); 3965 3966 if (!sglist->num_dma_sg) { 3967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3968 dev_err(&ioa_cfg->pdev->dev, 3969 "Failed to map microcode download buffer!\n"); 3970 return -EIO; 3971 } 3972 3973 ioa_cfg->ucode_sglist = sglist; 3974 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); 3975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3976 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 3977 3978 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3979 ioa_cfg->ucode_sglist = NULL; 3980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3981 return 0; 3982 } 3983 3984 /** 3985 * ipr_store_update_fw - Update the firmware on the adapter 3986 * @class_dev: device struct 3987 * @buf: buffer 3988 * @count: buffer size 3989 * 3990 * This function will update the firmware on the adapter. 3991 * 3992 * Return value: 3993 * count on success / other on failure 3994 **/ 3995 static ssize_t ipr_store_update_fw(struct device *dev, 3996 struct device_attribute *attr, 3997 const char *buf, size_t count) 3998 { 3999 struct Scsi_Host *shost = class_to_shost(dev); 4000 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4001 struct ipr_ucode_image_header *image_hdr; 4002 const struct firmware *fw_entry; 4003 struct ipr_sglist *sglist; 4004 char fname[100]; 4005 char *src; 4006 int len, result, dnld_size; 4007 4008 if (!capable(CAP_SYS_ADMIN)) 4009 return -EACCES; 4010 4011 len = snprintf(fname, 99, "%s", buf); 4012 fname[len-1] = '\0'; 4013 4014 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4015 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4016 return -EIO; 4017 } 4018 4019 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 4020 4021 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 4022 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 4023 sglist = ipr_alloc_ucode_buffer(dnld_size); 4024 4025 if (!sglist) { 4026 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); 4027 release_firmware(fw_entry); 4028 return -ENOMEM; 4029 } 4030 4031 result = ipr_copy_ucode_buffer(sglist, src, dnld_size); 4032 4033 if (result) { 4034 dev_err(&ioa_cfg->pdev->dev, 4035 "Microcode buffer copy to DMA buffer failed\n"); 4036 goto out; 4037 } 4038 4039 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); 4040 4041 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 4042 4043 if (!result) 4044 result = count; 4045 out: 4046 ipr_free_ucode_buffer(sglist); 4047 release_firmware(fw_entry); 4048 return result; 4049 } 4050 4051 static struct device_attribute ipr_update_fw_attr = { 4052 .attr = { 4053 .name = "update_fw", 4054 .mode = S_IWUSR, 4055 }, 4056 .store = ipr_store_update_fw 4057 }; 4058 4059 /** 4060 * ipr_show_fw_type - Show the adapter's firmware type. 4061 * @dev: class device struct 4062 * @buf: buffer 4063 * 4064 * Return value: 4065 * number of bytes printed to buffer 4066 **/ 4067 static ssize_t ipr_show_fw_type(struct device *dev, 4068 struct device_attribute *attr, char *buf) 4069 { 4070 struct Scsi_Host *shost = class_to_shost(dev); 4071 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4072 unsigned long lock_flags = 0; 4073 int len; 4074 4075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4076 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); 4077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4078 return len; 4079 } 4080 4081 static struct device_attribute ipr_ioa_fw_type_attr = { 4082 .attr = { 4083 .name = "fw_type", 4084 .mode = S_IRUGO, 4085 }, 4086 .show = ipr_show_fw_type 4087 }; 4088 4089 static struct device_attribute *ipr_ioa_attrs[] = { 4090 &ipr_fw_version_attr, 4091 &ipr_log_level_attr, 4092 &ipr_diagnostics_attr, 4093 &ipr_ioa_state_attr, 4094 &ipr_ioa_reset_attr, 4095 &ipr_update_fw_attr, 4096 &ipr_ioa_fw_type_attr, 4097 &ipr_iopoll_weight_attr, 4098 NULL, 4099 }; 4100 4101 #ifdef CONFIG_SCSI_IPR_DUMP 4102 /** 4103 * ipr_read_dump - Dump the adapter 4104 * @filp: open sysfs file 4105 * @kobj: kobject struct 4106 * @bin_attr: bin_attribute struct 4107 * @buf: buffer 4108 * @off: offset 4109 * @count: buffer size 4110 * 4111 * Return value: 4112 * number of bytes printed to buffer 4113 **/ 4114 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, 4115 struct bin_attribute *bin_attr, 4116 char *buf, loff_t off, size_t count) 4117 { 4118 struct device *cdev = container_of(kobj, struct device, kobj); 4119 struct Scsi_Host *shost = class_to_shost(cdev); 4120 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4121 struct ipr_dump *dump; 4122 unsigned long lock_flags = 0; 4123 char *src; 4124 int len, sdt_end; 4125 size_t rc = count; 4126 4127 if (!capable(CAP_SYS_ADMIN)) 4128 return -EACCES; 4129 4130 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4131 dump = ioa_cfg->dump; 4132 4133 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { 4134 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4135 return 0; 4136 } 4137 kref_get(&dump->kref); 4138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4139 4140 if (off > dump->driver_dump.hdr.len) { 4141 kref_put(&dump->kref, ipr_release_dump); 4142 return 0; 4143 } 4144 4145 if (off + count > dump->driver_dump.hdr.len) { 4146 count = dump->driver_dump.hdr.len - off; 4147 rc = count; 4148 } 4149 4150 if (count && off < sizeof(dump->driver_dump)) { 4151 if (off + count > sizeof(dump->driver_dump)) 4152 len = sizeof(dump->driver_dump) - off; 4153 else 4154 len = count; 4155 src = (u8 *)&dump->driver_dump + off; 4156 memcpy(buf, src, len); 4157 buf += len; 4158 off += len; 4159 count -= len; 4160 } 4161 4162 off -= sizeof(dump->driver_dump); 4163 4164 if (ioa_cfg->sis64) 4165 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4166 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * 4167 sizeof(struct ipr_sdt_entry)); 4168 else 4169 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + 4170 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); 4171 4172 if (count && off < sdt_end) { 4173 if (off + count > sdt_end) 4174 len = sdt_end - off; 4175 else 4176 len = count; 4177 src = (u8 *)&dump->ioa_dump + off; 4178 memcpy(buf, src, len); 4179 buf += len; 4180 off += len; 4181 count -= len; 4182 } 4183 4184 off -= sdt_end; 4185 4186 while (count) { 4187 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) 4188 len = PAGE_ALIGN(off) - off; 4189 else 4190 len = count; 4191 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; 4192 src += off & ~PAGE_MASK; 4193 memcpy(buf, src, len); 4194 buf += len; 4195 off += len; 4196 count -= len; 4197 } 4198 4199 kref_put(&dump->kref, ipr_release_dump); 4200 return rc; 4201 } 4202 4203 /** 4204 * ipr_alloc_dump - Prepare for adapter dump 4205 * @ioa_cfg: ioa config struct 4206 * 4207 * Return value: 4208 * 0 on success / other on failure 4209 **/ 4210 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) 4211 { 4212 struct ipr_dump *dump; 4213 __be32 **ioa_data; 4214 unsigned long lock_flags = 0; 4215 4216 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 4217 4218 if (!dump) { 4219 ipr_err("Dump memory allocation failed\n"); 4220 return -ENOMEM; 4221 } 4222 4223 if (ioa_cfg->sis64) 4224 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 4225 else 4226 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); 4227 4228 if (!ioa_data) { 4229 ipr_err("Dump memory allocation failed\n"); 4230 kfree(dump); 4231 return -ENOMEM; 4232 } 4233 4234 dump->ioa_dump.ioa_data = ioa_data; 4235 4236 kref_init(&dump->kref); 4237 dump->ioa_cfg = ioa_cfg; 4238 4239 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4240 4241 if (INACTIVE != ioa_cfg->sdt_state) { 4242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4243 vfree(dump->ioa_dump.ioa_data); 4244 kfree(dump); 4245 return 0; 4246 } 4247 4248 ioa_cfg->dump = dump; 4249 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4250 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { 4251 ioa_cfg->dump_taken = 1; 4252 schedule_work(&ioa_cfg->work_q); 4253 } 4254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4255 4256 return 0; 4257 } 4258 4259 /** 4260 * ipr_free_dump - Free adapter dump memory 4261 * @ioa_cfg: ioa config struct 4262 * 4263 * Return value: 4264 * 0 on success / other on failure 4265 **/ 4266 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) 4267 { 4268 struct ipr_dump *dump; 4269 unsigned long lock_flags = 0; 4270 4271 ENTER; 4272 4273 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4274 dump = ioa_cfg->dump; 4275 if (!dump) { 4276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4277 return 0; 4278 } 4279 4280 ioa_cfg->dump = NULL; 4281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4282 4283 kref_put(&dump->kref, ipr_release_dump); 4284 4285 LEAVE; 4286 return 0; 4287 } 4288 4289 /** 4290 * ipr_write_dump - Setup dump state of adapter 4291 * @filp: open sysfs file 4292 * @kobj: kobject struct 4293 * @bin_attr: bin_attribute struct 4294 * @buf: buffer 4295 * @off: offset 4296 * @count: buffer size 4297 * 4298 * Return value: 4299 * number of bytes printed to buffer 4300 **/ 4301 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj, 4302 struct bin_attribute *bin_attr, 4303 char *buf, loff_t off, size_t count) 4304 { 4305 struct device *cdev = container_of(kobj, struct device, kobj); 4306 struct Scsi_Host *shost = class_to_shost(cdev); 4307 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 4308 int rc; 4309 4310 if (!capable(CAP_SYS_ADMIN)) 4311 return -EACCES; 4312 4313 if (buf[0] == '1') 4314 rc = ipr_alloc_dump(ioa_cfg); 4315 else if (buf[0] == '0') 4316 rc = ipr_free_dump(ioa_cfg); 4317 else 4318 return -EINVAL; 4319 4320 if (rc) 4321 return rc; 4322 else 4323 return count; 4324 } 4325 4326 static struct bin_attribute ipr_dump_attr = { 4327 .attr = { 4328 .name = "dump", 4329 .mode = S_IRUSR | S_IWUSR, 4330 }, 4331 .size = 0, 4332 .read = ipr_read_dump, 4333 .write = ipr_write_dump 4334 }; 4335 #else 4336 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; 4337 #endif 4338 4339 /** 4340 * ipr_change_queue_depth - Change the device's queue depth 4341 * @sdev: scsi device struct 4342 * @qdepth: depth to set 4343 * @reason: calling context 4344 * 4345 * Return value: 4346 * actual depth set 4347 **/ 4348 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) 4349 { 4350 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4351 struct ipr_resource_entry *res; 4352 unsigned long lock_flags = 0; 4353 4354 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4355 res = (struct ipr_resource_entry *)sdev->hostdata; 4356 4357 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN) 4358 qdepth = IPR_MAX_CMD_PER_ATA_LUN; 4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4360 4361 scsi_change_queue_depth(sdev, qdepth); 4362 return sdev->queue_depth; 4363 } 4364 4365 /** 4366 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4367 * @dev: device struct 4368 * @attr: device attribute structure 4369 * @buf: buffer 4370 * 4371 * Return value: 4372 * number of bytes printed to buffer 4373 **/ 4374 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf) 4375 { 4376 struct scsi_device *sdev = to_scsi_device(dev); 4377 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4378 struct ipr_resource_entry *res; 4379 unsigned long lock_flags = 0; 4380 ssize_t len = -ENXIO; 4381 4382 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4383 res = (struct ipr_resource_entry *)sdev->hostdata; 4384 if (res) 4385 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); 4386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4387 return len; 4388 } 4389 4390 static struct device_attribute ipr_adapter_handle_attr = { 4391 .attr = { 4392 .name = "adapter_handle", 4393 .mode = S_IRUSR, 4394 }, 4395 .show = ipr_show_adapter_handle 4396 }; 4397 4398 /** 4399 * ipr_show_resource_path - Show the resource path or the resource address for 4400 * this device. 4401 * @dev: device struct 4402 * @attr: device attribute structure 4403 * @buf: buffer 4404 * 4405 * Return value: 4406 * number of bytes printed to buffer 4407 **/ 4408 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) 4409 { 4410 struct scsi_device *sdev = to_scsi_device(dev); 4411 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4412 struct ipr_resource_entry *res; 4413 unsigned long lock_flags = 0; 4414 ssize_t len = -ENXIO; 4415 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4416 4417 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4418 res = (struct ipr_resource_entry *)sdev->hostdata; 4419 if (res && ioa_cfg->sis64) 4420 len = snprintf(buf, PAGE_SIZE, "%s\n", 4421 __ipr_format_res_path(res->res_path, buffer, 4422 sizeof(buffer))); 4423 else if (res) 4424 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, 4425 res->bus, res->target, res->lun); 4426 4427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4428 return len; 4429 } 4430 4431 static struct device_attribute ipr_resource_path_attr = { 4432 .attr = { 4433 .name = "resource_path", 4434 .mode = S_IRUGO, 4435 }, 4436 .show = ipr_show_resource_path 4437 }; 4438 4439 /** 4440 * ipr_show_device_id - Show the device_id for this device. 4441 * @dev: device struct 4442 * @attr: device attribute structure 4443 * @buf: buffer 4444 * 4445 * Return value: 4446 * number of bytes printed to buffer 4447 **/ 4448 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) 4449 { 4450 struct scsi_device *sdev = to_scsi_device(dev); 4451 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4452 struct ipr_resource_entry *res; 4453 unsigned long lock_flags = 0; 4454 ssize_t len = -ENXIO; 4455 4456 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4457 res = (struct ipr_resource_entry *)sdev->hostdata; 4458 if (res && ioa_cfg->sis64) 4459 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); 4460 else if (res) 4461 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4462 4463 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4464 return len; 4465 } 4466 4467 static struct device_attribute ipr_device_id_attr = { 4468 .attr = { 4469 .name = "device_id", 4470 .mode = S_IRUGO, 4471 }, 4472 .show = ipr_show_device_id 4473 }; 4474 4475 /** 4476 * ipr_show_resource_type - Show the resource type for this device. 4477 * @dev: device struct 4478 * @attr: device attribute structure 4479 * @buf: buffer 4480 * 4481 * Return value: 4482 * number of bytes printed to buffer 4483 **/ 4484 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf) 4485 { 4486 struct scsi_device *sdev = to_scsi_device(dev); 4487 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4488 struct ipr_resource_entry *res; 4489 unsigned long lock_flags = 0; 4490 ssize_t len = -ENXIO; 4491 4492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4493 res = (struct ipr_resource_entry *)sdev->hostdata; 4494 4495 if (res) 4496 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); 4497 4498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4499 return len; 4500 } 4501 4502 static struct device_attribute ipr_resource_type_attr = { 4503 .attr = { 4504 .name = "resource_type", 4505 .mode = S_IRUGO, 4506 }, 4507 .show = ipr_show_resource_type 4508 }; 4509 4510 /** 4511 * ipr_show_raw_mode - Show the adapter's raw mode 4512 * @dev: class device struct 4513 * @buf: buffer 4514 * 4515 * Return value: 4516 * number of bytes printed to buffer 4517 **/ 4518 static ssize_t ipr_show_raw_mode(struct device *dev, 4519 struct device_attribute *attr, char *buf) 4520 { 4521 struct scsi_device *sdev = to_scsi_device(dev); 4522 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4523 struct ipr_resource_entry *res; 4524 unsigned long lock_flags = 0; 4525 ssize_t len; 4526 4527 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4528 res = (struct ipr_resource_entry *)sdev->hostdata; 4529 if (res) 4530 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); 4531 else 4532 len = -ENXIO; 4533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4534 return len; 4535 } 4536 4537 /** 4538 * ipr_store_raw_mode - Change the adapter's raw mode 4539 * @dev: class device struct 4540 * @buf: buffer 4541 * 4542 * Return value: 4543 * number of bytes printed to buffer 4544 **/ 4545 static ssize_t ipr_store_raw_mode(struct device *dev, 4546 struct device_attribute *attr, 4547 const char *buf, size_t count) 4548 { 4549 struct scsi_device *sdev = to_scsi_device(dev); 4550 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4551 struct ipr_resource_entry *res; 4552 unsigned long lock_flags = 0; 4553 ssize_t len; 4554 4555 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4556 res = (struct ipr_resource_entry *)sdev->hostdata; 4557 if (res) { 4558 if (ipr_is_af_dasd_device(res)) { 4559 res->raw_mode = simple_strtoul(buf, NULL, 10); 4560 len = strlen(buf); 4561 if (res->sdev) 4562 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", 4563 res->raw_mode ? "enabled" : "disabled"); 4564 } else 4565 len = -EINVAL; 4566 } else 4567 len = -ENXIO; 4568 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4569 return len; 4570 } 4571 4572 static struct device_attribute ipr_raw_mode_attr = { 4573 .attr = { 4574 .name = "raw_mode", 4575 .mode = S_IRUGO | S_IWUSR, 4576 }, 4577 .show = ipr_show_raw_mode, 4578 .store = ipr_store_raw_mode 4579 }; 4580 4581 static struct device_attribute *ipr_dev_attrs[] = { 4582 &ipr_adapter_handle_attr, 4583 &ipr_resource_path_attr, 4584 &ipr_device_id_attr, 4585 &ipr_resource_type_attr, 4586 &ipr_raw_mode_attr, 4587 NULL, 4588 }; 4589 4590 /** 4591 * ipr_biosparam - Return the HSC mapping 4592 * @sdev: scsi device struct 4593 * @block_device: block device pointer 4594 * @capacity: capacity of the device 4595 * @parm: Array containing returned HSC values. 4596 * 4597 * This function generates the HSC parms that fdisk uses. 4598 * We want to make sure we return something that places partitions 4599 * on 4k boundaries for best performance with the IOA. 4600 * 4601 * Return value: 4602 * 0 on success 4603 **/ 4604 static int ipr_biosparam(struct scsi_device *sdev, 4605 struct block_device *block_device, 4606 sector_t capacity, int *parm) 4607 { 4608 int heads, sectors; 4609 sector_t cylinders; 4610 4611 heads = 128; 4612 sectors = 32; 4613 4614 cylinders = capacity; 4615 sector_div(cylinders, (128 * 32)); 4616 4617 /* return result */ 4618 parm[0] = heads; 4619 parm[1] = sectors; 4620 parm[2] = cylinders; 4621 4622 return 0; 4623 } 4624 4625 /** 4626 * ipr_find_starget - Find target based on bus/target. 4627 * @starget: scsi target struct 4628 * 4629 * Return value: 4630 * resource entry pointer if found / NULL if not found 4631 **/ 4632 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) 4633 { 4634 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4635 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4636 struct ipr_resource_entry *res; 4637 4638 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4639 if ((res->bus == starget->channel) && 4640 (res->target == starget->id)) { 4641 return res; 4642 } 4643 } 4644 4645 return NULL; 4646 } 4647 4648 static struct ata_port_info sata_port_info; 4649 4650 /** 4651 * ipr_target_alloc - Prepare for commands to a SCSI target 4652 * @starget: scsi target struct 4653 * 4654 * If the device is a SATA device, this function allocates an 4655 * ATA port with libata, else it does nothing. 4656 * 4657 * Return value: 4658 * 0 on success / non-0 on failure 4659 **/ 4660 static int ipr_target_alloc(struct scsi_target *starget) 4661 { 4662 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4663 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4664 struct ipr_sata_port *sata_port; 4665 struct ata_port *ap; 4666 struct ipr_resource_entry *res; 4667 unsigned long lock_flags; 4668 4669 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4670 res = ipr_find_starget(starget); 4671 starget->hostdata = NULL; 4672 4673 if (res && ipr_is_gata(res)) { 4674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4675 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL); 4676 if (!sata_port) 4677 return -ENOMEM; 4678 4679 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); 4680 if (ap) { 4681 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4682 sata_port->ioa_cfg = ioa_cfg; 4683 sata_port->ap = ap; 4684 sata_port->res = res; 4685 4686 res->sata_port = sata_port; 4687 ap->private_data = sata_port; 4688 starget->hostdata = sata_port; 4689 } else { 4690 kfree(sata_port); 4691 return -ENOMEM; 4692 } 4693 } 4694 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4695 4696 return 0; 4697 } 4698 4699 /** 4700 * ipr_target_destroy - Destroy a SCSI target 4701 * @starget: scsi target struct 4702 * 4703 * If the device was a SATA device, this function frees the libata 4704 * ATA port, else it does nothing. 4705 * 4706 **/ 4707 static void ipr_target_destroy(struct scsi_target *starget) 4708 { 4709 struct ipr_sata_port *sata_port = starget->hostdata; 4710 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4711 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4712 4713 if (ioa_cfg->sis64) { 4714 if (!ipr_find_starget(starget)) { 4715 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4716 clear_bit(starget->id, ioa_cfg->array_ids); 4717 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4718 clear_bit(starget->id, ioa_cfg->vset_ids); 4719 else if (starget->channel == 0) 4720 clear_bit(starget->id, ioa_cfg->target_ids); 4721 } 4722 } 4723 4724 if (sata_port) { 4725 starget->hostdata = NULL; 4726 ata_sas_port_destroy(sata_port->ap); 4727 kfree(sata_port); 4728 } 4729 } 4730 4731 /** 4732 * ipr_find_sdev - Find device based on bus/target/lun. 4733 * @sdev: scsi device struct 4734 * 4735 * Return value: 4736 * resource entry pointer if found / NULL if not found 4737 **/ 4738 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) 4739 { 4740 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4741 struct ipr_resource_entry *res; 4742 4743 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4744 if ((res->bus == sdev->channel) && 4745 (res->target == sdev->id) && 4746 (res->lun == sdev->lun)) 4747 return res; 4748 } 4749 4750 return NULL; 4751 } 4752 4753 /** 4754 * ipr_slave_destroy - Unconfigure a SCSI device 4755 * @sdev: scsi device struct 4756 * 4757 * Return value: 4758 * nothing 4759 **/ 4760 static void ipr_slave_destroy(struct scsi_device *sdev) 4761 { 4762 struct ipr_resource_entry *res; 4763 struct ipr_ioa_cfg *ioa_cfg; 4764 unsigned long lock_flags = 0; 4765 4766 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4767 4768 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4769 res = (struct ipr_resource_entry *) sdev->hostdata; 4770 if (res) { 4771 if (res->sata_port) 4772 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; 4773 sdev->hostdata = NULL; 4774 res->sdev = NULL; 4775 res->sata_port = NULL; 4776 } 4777 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4778 } 4779 4780 /** 4781 * ipr_slave_configure - Configure a SCSI device 4782 * @sdev: scsi device struct 4783 * 4784 * This function configures the specified scsi device. 4785 * 4786 * Return value: 4787 * 0 on success 4788 **/ 4789 static int ipr_slave_configure(struct scsi_device *sdev) 4790 { 4791 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4792 struct ipr_resource_entry *res; 4793 struct ata_port *ap = NULL; 4794 unsigned long lock_flags = 0; 4795 char buffer[IPR_MAX_RES_PATH_LENGTH]; 4796 4797 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4798 res = sdev->hostdata; 4799 if (res) { 4800 if (ipr_is_af_dasd_device(res)) 4801 sdev->type = TYPE_RAID; 4802 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { 4803 sdev->scsi_level = 4; 4804 sdev->no_uld_attach = 1; 4805 } 4806 if (ipr_is_vset_device(res)) { 4807 sdev->scsi_level = SCSI_SPC_3; 4808 blk_queue_rq_timeout(sdev->request_queue, 4809 IPR_VSET_RW_TIMEOUT); 4810 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4811 } 4812 if (ipr_is_gata(res) && res->sata_port) 4813 ap = res->sata_port->ap; 4814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4815 4816 if (ap) { 4817 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); 4818 ata_sas_slave_configure(sdev, ap); 4819 } 4820 4821 if (ioa_cfg->sis64) 4822 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4823 ipr_format_res_path(ioa_cfg, 4824 res->res_path, buffer, sizeof(buffer))); 4825 return 0; 4826 } 4827 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4828 return 0; 4829 } 4830 4831 /** 4832 * ipr_ata_slave_alloc - Prepare for commands to a SATA device 4833 * @sdev: scsi device struct 4834 * 4835 * This function initializes an ATA port so that future commands 4836 * sent through queuecommand will work. 4837 * 4838 * Return value: 4839 * 0 on success 4840 **/ 4841 static int ipr_ata_slave_alloc(struct scsi_device *sdev) 4842 { 4843 struct ipr_sata_port *sata_port = NULL; 4844 int rc = -ENXIO; 4845 4846 ENTER; 4847 if (sdev->sdev_target) 4848 sata_port = sdev->sdev_target->hostdata; 4849 if (sata_port) { 4850 rc = ata_sas_port_init(sata_port->ap); 4851 if (rc == 0) 4852 rc = ata_sas_sync_probe(sata_port->ap); 4853 } 4854 4855 if (rc) 4856 ipr_slave_destroy(sdev); 4857 4858 LEAVE; 4859 return rc; 4860 } 4861 4862 /** 4863 * ipr_slave_alloc - Prepare for commands to a device. 4864 * @sdev: scsi device struct 4865 * 4866 * This function saves a pointer to the resource entry 4867 * in the scsi device struct if the device exists. We 4868 * can then use this pointer in ipr_queuecommand when 4869 * handling new commands. 4870 * 4871 * Return value: 4872 * 0 on success / -ENXIO if device does not exist 4873 **/ 4874 static int ipr_slave_alloc(struct scsi_device *sdev) 4875 { 4876 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; 4877 struct ipr_resource_entry *res; 4878 unsigned long lock_flags; 4879 int rc = -ENXIO; 4880 4881 sdev->hostdata = NULL; 4882 4883 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4884 4885 res = ipr_find_sdev(sdev); 4886 if (res) { 4887 res->sdev = sdev; 4888 res->add_to_ml = 0; 4889 res->in_erp = 0; 4890 sdev->hostdata = res; 4891 if (!ipr_is_naca_model(res)) 4892 res->needs_sync_complete = 1; 4893 rc = 0; 4894 if (ipr_is_gata(res)) { 4895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4896 return ipr_ata_slave_alloc(sdev); 4897 } 4898 } 4899 4900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4901 4902 return rc; 4903 } 4904 4905 /** 4906 * ipr_match_lun - Match function for specified LUN 4907 * @ipr_cmd: ipr command struct 4908 * @device: device to match (sdev) 4909 * 4910 * Returns: 4911 * 1 if command matches sdev / 0 if command does not match sdev 4912 **/ 4913 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) 4914 { 4915 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) 4916 return 1; 4917 return 0; 4918 } 4919 4920 /** 4921 * ipr_wait_for_ops - Wait for matching commands to complete 4922 * @ipr_cmd: ipr command struct 4923 * @device: device to match (sdev) 4924 * @match: match function to use 4925 * 4926 * Returns: 4927 * SUCCESS / FAILED 4928 **/ 4929 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, 4930 int (*match)(struct ipr_cmnd *, void *)) 4931 { 4932 struct ipr_cmnd *ipr_cmd; 4933 int wait; 4934 unsigned long flags; 4935 struct ipr_hrr_queue *hrrq; 4936 signed long timeout = IPR_ABORT_TASK_TIMEOUT; 4937 DECLARE_COMPLETION_ONSTACK(comp); 4938 4939 ENTER; 4940 do { 4941 wait = 0; 4942 4943 for_each_hrrq(hrrq, ioa_cfg) { 4944 spin_lock_irqsave(hrrq->lock, flags); 4945 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 4946 if (match(ipr_cmd, device)) { 4947 ipr_cmd->eh_comp = ∁ 4948 wait++; 4949 } 4950 } 4951 spin_unlock_irqrestore(hrrq->lock, flags); 4952 } 4953 4954 if (wait) { 4955 timeout = wait_for_completion_timeout(&comp, timeout); 4956 4957 if (!timeout) { 4958 wait = 0; 4959 4960 for_each_hrrq(hrrq, ioa_cfg) { 4961 spin_lock_irqsave(hrrq->lock, flags); 4962 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 4963 if (match(ipr_cmd, device)) { 4964 ipr_cmd->eh_comp = NULL; 4965 wait++; 4966 } 4967 } 4968 spin_unlock_irqrestore(hrrq->lock, flags); 4969 } 4970 4971 if (wait) 4972 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); 4973 LEAVE; 4974 return wait ? FAILED : SUCCESS; 4975 } 4976 } 4977 } while (wait); 4978 4979 LEAVE; 4980 return SUCCESS; 4981 } 4982 4983 static int ipr_eh_host_reset(struct scsi_cmnd *cmd) 4984 { 4985 struct ipr_ioa_cfg *ioa_cfg; 4986 unsigned long lock_flags = 0; 4987 int rc = SUCCESS; 4988 4989 ENTER; 4990 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 4991 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4992 4993 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 4994 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4995 dev_err(&ioa_cfg->pdev->dev, 4996 "Adapter being reset as a result of error recovery.\n"); 4997 4998 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 4999 ioa_cfg->sdt_state = GET_DUMP; 5000 } 5001 5002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5003 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5004 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5005 5006 /* If we got hit with a host reset while we were already resetting 5007 the adapter for some reason, and the reset failed. */ 5008 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 5009 ipr_trace; 5010 rc = FAILED; 5011 } 5012 5013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5014 LEAVE; 5015 return rc; 5016 } 5017 5018 /** 5019 * ipr_device_reset - Reset the device 5020 * @ioa_cfg: ioa config struct 5021 * @res: resource entry struct 5022 * 5023 * This function issues a device reset to the affected device. 5024 * If the device is a SCSI device, a LUN reset will be sent 5025 * to the device first. If that does not work, a target reset 5026 * will be sent. If the device is a SATA device, a PHY reset will 5027 * be sent. 5028 * 5029 * Return value: 5030 * 0 on success / non-zero on failure 5031 **/ 5032 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, 5033 struct ipr_resource_entry *res) 5034 { 5035 struct ipr_cmnd *ipr_cmd; 5036 struct ipr_ioarcb *ioarcb; 5037 struct ipr_cmd_pkt *cmd_pkt; 5038 struct ipr_ioarcb_ata_regs *regs; 5039 u32 ioasc; 5040 5041 ENTER; 5042 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5043 ioarcb = &ipr_cmd->ioarcb; 5044 cmd_pkt = &ioarcb->cmd_pkt; 5045 5046 if (ipr_cmd->ioa_cfg->sis64) { 5047 regs = &ipr_cmd->i.ata_ioadl.regs; 5048 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 5049 } else 5050 regs = &ioarcb->u.add_data.u.regs; 5051 5052 ioarcb->res_handle = res->res_handle; 5053 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5054 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5055 if (ipr_is_gata(res)) { 5056 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 5057 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); 5058 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5059 } 5060 5061 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5062 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5064 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 5065 if (ipr_cmd->ioa_cfg->sis64) 5066 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 5067 sizeof(struct ipr_ioasa_gata)); 5068 else 5069 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 5070 sizeof(struct ipr_ioasa_gata)); 5071 } 5072 5073 LEAVE; 5074 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; 5075 } 5076 5077 /** 5078 * ipr_sata_reset - Reset the SATA port 5079 * @link: SATA link to reset 5080 * @classes: class of the attached device 5081 * 5082 * This function issues a SATA phy reset to the affected ATA link. 5083 * 5084 * Return value: 5085 * 0 on success / non-zero on failure 5086 **/ 5087 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, 5088 unsigned long deadline) 5089 { 5090 struct ipr_sata_port *sata_port = link->ap->private_data; 5091 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 5092 struct ipr_resource_entry *res; 5093 unsigned long lock_flags = 0; 5094 int rc = -ENXIO; 5095 5096 ENTER; 5097 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5098 while (ioa_cfg->in_reset_reload) { 5099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5100 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 5101 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5102 } 5103 5104 res = sata_port->res; 5105 if (res) { 5106 rc = ipr_device_reset(ioa_cfg, res); 5107 *classes = res->ata_class; 5108 } 5109 5110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5111 LEAVE; 5112 return rc; 5113 } 5114 5115 /** 5116 * ipr_eh_dev_reset - Reset the device 5117 * @scsi_cmd: scsi command struct 5118 * 5119 * This function issues a device reset to the affected device. 5120 * A LUN reset will be sent to the device first. If that does 5121 * not work, a target reset will be sent. 5122 * 5123 * Return value: 5124 * SUCCESS / FAILED 5125 **/ 5126 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) 5127 { 5128 struct ipr_cmnd *ipr_cmd; 5129 struct ipr_ioa_cfg *ioa_cfg; 5130 struct ipr_resource_entry *res; 5131 struct ata_port *ap; 5132 int rc = 0; 5133 struct ipr_hrr_queue *hrrq; 5134 5135 ENTER; 5136 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5137 res = scsi_cmd->device->hostdata; 5138 5139 if (!res) 5140 return FAILED; 5141 5142 /* 5143 * If we are currently going through reset/reload, return failed. This will force the 5144 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the 5145 * reset to complete 5146 */ 5147 if (ioa_cfg->in_reset_reload) 5148 return FAILED; 5149 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5150 return FAILED; 5151 5152 for_each_hrrq(hrrq, ioa_cfg) { 5153 spin_lock(&hrrq->_lock); 5154 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 5155 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 5156 if (ipr_cmd->scsi_cmd) 5157 ipr_cmd->done = ipr_scsi_eh_done; 5158 if (ipr_cmd->qc) 5159 ipr_cmd->done = ipr_sata_eh_done; 5160 if (ipr_cmd->qc && 5161 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 5162 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 5163 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 5164 } 5165 } 5166 } 5167 spin_unlock(&hrrq->_lock); 5168 } 5169 res->resetting_device = 1; 5170 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 5171 5172 if (ipr_is_gata(res) && res->sata_port) { 5173 ap = res->sata_port->ap; 5174 spin_unlock_irq(scsi_cmd->device->host->host_lock); 5175 ata_std_error_handler(ap); 5176 spin_lock_irq(scsi_cmd->device->host->host_lock); 5177 5178 for_each_hrrq(hrrq, ioa_cfg) { 5179 spin_lock(&hrrq->_lock); 5180 list_for_each_entry(ipr_cmd, 5181 &hrrq->hrrq_pending_q, queue) { 5182 if (ipr_cmd->ioarcb.res_handle == 5183 res->res_handle) { 5184 rc = -EIO; 5185 break; 5186 } 5187 } 5188 spin_unlock(&hrrq->_lock); 5189 } 5190 } else 5191 rc = ipr_device_reset(ioa_cfg, res); 5192 res->resetting_device = 0; 5193 res->reset_occurred = 1; 5194 5195 LEAVE; 5196 return rc ? FAILED : SUCCESS; 5197 } 5198 5199 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) 5200 { 5201 int rc; 5202 struct ipr_ioa_cfg *ioa_cfg; 5203 5204 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 5205 5206 spin_lock_irq(cmd->device->host->host_lock); 5207 rc = __ipr_eh_dev_reset(cmd); 5208 spin_unlock_irq(cmd->device->host->host_lock); 5209 5210 if (rc == SUCCESS) 5211 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); 5212 5213 return rc; 5214 } 5215 5216 /** 5217 * ipr_bus_reset_done - Op done function for bus reset. 5218 * @ipr_cmd: ipr command struct 5219 * 5220 * This function is the op done function for a bus reset 5221 * 5222 * Return value: 5223 * none 5224 **/ 5225 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) 5226 { 5227 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5228 struct ipr_resource_entry *res; 5229 5230 ENTER; 5231 if (!ioa_cfg->sis64) 5232 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 5233 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { 5234 scsi_report_bus_reset(ioa_cfg->host, res->bus); 5235 break; 5236 } 5237 } 5238 5239 /* 5240 * If abort has not completed, indicate the reset has, else call the 5241 * abort's done function to wake the sleeping eh thread 5242 */ 5243 if (ipr_cmd->sibling->sibling) 5244 ipr_cmd->sibling->sibling = NULL; 5245 else 5246 ipr_cmd->sibling->done(ipr_cmd->sibling); 5247 5248 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5249 LEAVE; 5250 } 5251 5252 /** 5253 * ipr_abort_timeout - An abort task has timed out 5254 * @ipr_cmd: ipr command struct 5255 * 5256 * This function handles when an abort task times out. If this 5257 * happens we issue a bus reset since we have resources tied 5258 * up that must be freed before returning to the midlayer. 5259 * 5260 * Return value: 5261 * none 5262 **/ 5263 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) 5264 { 5265 struct ipr_cmnd *reset_cmd; 5266 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5267 struct ipr_cmd_pkt *cmd_pkt; 5268 unsigned long lock_flags = 0; 5269 5270 ENTER; 5271 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5272 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { 5273 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5274 return; 5275 } 5276 5277 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); 5278 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5279 ipr_cmd->sibling = reset_cmd; 5280 reset_cmd->sibling = ipr_cmd; 5281 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; 5282 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; 5283 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5284 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 5285 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; 5286 5287 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 5288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5289 LEAVE; 5290 } 5291 5292 /** 5293 * ipr_cancel_op - Cancel specified op 5294 * @scsi_cmd: scsi command struct 5295 * 5296 * This function cancels specified op. 5297 * 5298 * Return value: 5299 * SUCCESS / FAILED 5300 **/ 5301 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) 5302 { 5303 struct ipr_cmnd *ipr_cmd; 5304 struct ipr_ioa_cfg *ioa_cfg; 5305 struct ipr_resource_entry *res; 5306 struct ipr_cmd_pkt *cmd_pkt; 5307 u32 ioasc, int_reg; 5308 int op_found = 0; 5309 struct ipr_hrr_queue *hrrq; 5310 5311 ENTER; 5312 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5313 res = scsi_cmd->device->hostdata; 5314 5315 /* If we are currently going through reset/reload, return failed. 5316 * This will force the mid-layer to call ipr_eh_host_reset, 5317 * which will then go to sleep and wait for the reset to complete 5318 */ 5319 if (ioa_cfg->in_reset_reload || 5320 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 5321 return FAILED; 5322 if (!res) 5323 return FAILED; 5324 5325 /* 5326 * If we are aborting a timed out op, chances are that the timeout was caused 5327 * by a still not detected EEH error. In such cases, reading a register will 5328 * trigger the EEH recovery infrastructure. 5329 */ 5330 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5331 5332 if (!ipr_is_gscsi(res)) 5333 return FAILED; 5334 5335 for_each_hrrq(hrrq, ioa_cfg) { 5336 spin_lock(&hrrq->_lock); 5337 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 5338 if (ipr_cmd->scsi_cmd == scsi_cmd) { 5339 ipr_cmd->done = ipr_scsi_eh_done; 5340 op_found = 1; 5341 break; 5342 } 5343 } 5344 spin_unlock(&hrrq->_lock); 5345 } 5346 5347 if (!op_found) 5348 return SUCCESS; 5349 5350 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5351 ipr_cmd->ioarcb.res_handle = res->res_handle; 5352 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5353 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5354 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5355 ipr_cmd->u.sdev = scsi_cmd->device; 5356 5357 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 5358 scsi_cmd->cmnd[0]); 5359 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 5360 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5361 5362 /* 5363 * If the abort task timed out and we sent a bus reset, we will get 5364 * one the following responses to the abort 5365 */ 5366 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { 5367 ioasc = 0; 5368 ipr_trace; 5369 } 5370 5371 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5372 if (!ipr_is_naca_model(res)) 5373 res->needs_sync_complete = 1; 5374 5375 LEAVE; 5376 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; 5377 } 5378 5379 /** 5380 * ipr_eh_abort - Abort a single op 5381 * @scsi_cmd: scsi command struct 5382 * 5383 * Return value: 5384 * 0 if scan in progress / 1 if scan is complete 5385 **/ 5386 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) 5387 { 5388 unsigned long lock_flags; 5389 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 5390 int rc = 0; 5391 5392 spin_lock_irqsave(shost->host_lock, lock_flags); 5393 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) 5394 rc = 1; 5395 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) 5396 rc = 1; 5397 spin_unlock_irqrestore(shost->host_lock, lock_flags); 5398 return rc; 5399 } 5400 5401 /** 5402 * ipr_eh_host_reset - Reset the host adapter 5403 * @scsi_cmd: scsi command struct 5404 * 5405 * Return value: 5406 * SUCCESS / FAILED 5407 **/ 5408 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5409 { 5410 unsigned long flags; 5411 int rc; 5412 struct ipr_ioa_cfg *ioa_cfg; 5413 5414 ENTER; 5415 5416 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 5417 5418 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); 5419 rc = ipr_cancel_op(scsi_cmd); 5420 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); 5421 5422 if (rc == SUCCESS) 5423 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); 5424 LEAVE; 5425 return rc; 5426 } 5427 5428 /** 5429 * ipr_handle_other_interrupt - Handle "other" interrupts 5430 * @ioa_cfg: ioa config struct 5431 * @int_reg: interrupt register 5432 * 5433 * Return value: 5434 * IRQ_NONE / IRQ_HANDLED 5435 **/ 5436 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 5437 u32 int_reg) 5438 { 5439 irqreturn_t rc = IRQ_HANDLED; 5440 u32 int_mask_reg; 5441 5442 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5443 int_reg &= ~int_mask_reg; 5444 5445 /* If an interrupt on the adapter did not occur, ignore it. 5446 * Or in the case of SIS 64, check for a stage change interrupt. 5447 */ 5448 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { 5449 if (ioa_cfg->sis64) { 5450 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 5451 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5452 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { 5453 5454 /* clear stage change */ 5455 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5456 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5457 list_del(&ioa_cfg->reset_cmd->queue); 5458 del_timer(&ioa_cfg->reset_cmd->timer); 5459 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5460 return IRQ_HANDLED; 5461 } 5462 } 5463 5464 return IRQ_NONE; 5465 } 5466 5467 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 5468 /* Mask the interrupt */ 5469 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); 5470 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5471 5472 list_del(&ioa_cfg->reset_cmd->queue); 5473 del_timer(&ioa_cfg->reset_cmd->timer); 5474 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5475 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5476 if (ioa_cfg->clear_isr) { 5477 if (ipr_debug && printk_ratelimit()) 5478 dev_err(&ioa_cfg->pdev->dev, 5479 "Spurious interrupt detected. 0x%08X\n", int_reg); 5480 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5481 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5482 return IRQ_NONE; 5483 } 5484 } else { 5485 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5486 ioa_cfg->ioa_unit_checked = 1; 5487 else if (int_reg & IPR_PCII_NO_HOST_RRQ) 5488 dev_err(&ioa_cfg->pdev->dev, 5489 "No Host RRQ. 0x%08X\n", int_reg); 5490 else 5491 dev_err(&ioa_cfg->pdev->dev, 5492 "Permanent IOA failure. 0x%08X\n", int_reg); 5493 5494 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5495 ioa_cfg->sdt_state = GET_DUMP; 5496 5497 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5498 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5499 } 5500 5501 return rc; 5502 } 5503 5504 /** 5505 * ipr_isr_eh - Interrupt service routine error handler 5506 * @ioa_cfg: ioa config struct 5507 * @msg: message to log 5508 * 5509 * Return value: 5510 * none 5511 **/ 5512 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) 5513 { 5514 ioa_cfg->errors_logged++; 5515 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); 5516 5517 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5518 ioa_cfg->sdt_state = GET_DUMP; 5519 5520 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5521 } 5522 5523 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget, 5524 struct list_head *doneq) 5525 { 5526 u32 ioasc; 5527 u16 cmd_index; 5528 struct ipr_cmnd *ipr_cmd; 5529 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; 5530 int num_hrrq = 0; 5531 5532 /* If interrupts are disabled, ignore the interrupt */ 5533 if (!hrr_queue->allow_interrupts) 5534 return 0; 5535 5536 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5537 hrr_queue->toggle_bit) { 5538 5539 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & 5540 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> 5541 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5542 5543 if (unlikely(cmd_index > hrr_queue->max_cmd_id || 5544 cmd_index < hrr_queue->min_cmd_id)) { 5545 ipr_isr_eh(ioa_cfg, 5546 "Invalid response handle from IOA: ", 5547 cmd_index); 5548 break; 5549 } 5550 5551 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5552 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5553 5554 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5555 5556 list_move_tail(&ipr_cmd->queue, doneq); 5557 5558 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { 5559 hrr_queue->hrrq_curr++; 5560 } else { 5561 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; 5562 hrr_queue->toggle_bit ^= 1u; 5563 } 5564 num_hrrq++; 5565 if (budget > 0 && num_hrrq >= budget) 5566 break; 5567 } 5568 5569 return num_hrrq; 5570 } 5571 5572 static int ipr_iopoll(struct blk_iopoll *iop, int budget) 5573 { 5574 struct ipr_ioa_cfg *ioa_cfg; 5575 struct ipr_hrr_queue *hrrq; 5576 struct ipr_cmnd *ipr_cmd, *temp; 5577 unsigned long hrrq_flags; 5578 int completed_ops; 5579 LIST_HEAD(doneq); 5580 5581 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); 5582 ioa_cfg = hrrq->ioa_cfg; 5583 5584 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5585 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); 5586 5587 if (completed_ops < budget) 5588 blk_iopoll_complete(iop); 5589 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5590 5591 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5592 list_del(&ipr_cmd->queue); 5593 del_timer(&ipr_cmd->timer); 5594 ipr_cmd->fast_done(ipr_cmd); 5595 } 5596 5597 return completed_ops; 5598 } 5599 5600 /** 5601 * ipr_isr - Interrupt service routine 5602 * @irq: irq number 5603 * @devp: pointer to ioa config struct 5604 * 5605 * Return value: 5606 * IRQ_NONE / IRQ_HANDLED 5607 **/ 5608 static irqreturn_t ipr_isr(int irq, void *devp) 5609 { 5610 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5611 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5612 unsigned long hrrq_flags = 0; 5613 u32 int_reg = 0; 5614 int num_hrrq = 0; 5615 int irq_none = 0; 5616 struct ipr_cmnd *ipr_cmd, *temp; 5617 irqreturn_t rc = IRQ_NONE; 5618 LIST_HEAD(doneq); 5619 5620 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5621 /* If interrupts are disabled, ignore the interrupt */ 5622 if (!hrrq->allow_interrupts) { 5623 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5624 return IRQ_NONE; 5625 } 5626 5627 while (1) { 5628 if (ipr_process_hrrq(hrrq, -1, &doneq)) { 5629 rc = IRQ_HANDLED; 5630 5631 if (!ioa_cfg->clear_isr) 5632 break; 5633 5634 /* Clear the PCI interrupt */ 5635 num_hrrq = 0; 5636 do { 5637 writel(IPR_PCII_HRRQ_UPDATED, 5638 ioa_cfg->regs.clr_interrupt_reg32); 5639 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5640 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5641 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5642 5643 } else if (rc == IRQ_NONE && irq_none == 0) { 5644 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5645 irq_none++; 5646 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5647 int_reg & IPR_PCII_HRRQ_UPDATED) { 5648 ipr_isr_eh(ioa_cfg, 5649 "Error clearing HRRQ: ", num_hrrq); 5650 rc = IRQ_HANDLED; 5651 break; 5652 } else 5653 break; 5654 } 5655 5656 if (unlikely(rc == IRQ_NONE)) 5657 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5658 5659 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5660 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5661 list_del(&ipr_cmd->queue); 5662 del_timer(&ipr_cmd->timer); 5663 ipr_cmd->fast_done(ipr_cmd); 5664 } 5665 return rc; 5666 } 5667 5668 /** 5669 * ipr_isr_mhrrq - Interrupt service routine 5670 * @irq: irq number 5671 * @devp: pointer to ioa config struct 5672 * 5673 * Return value: 5674 * IRQ_NONE / IRQ_HANDLED 5675 **/ 5676 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5677 { 5678 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5679 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5680 unsigned long hrrq_flags = 0; 5681 struct ipr_cmnd *ipr_cmd, *temp; 5682 irqreturn_t rc = IRQ_NONE; 5683 LIST_HEAD(doneq); 5684 5685 spin_lock_irqsave(hrrq->lock, hrrq_flags); 5686 5687 /* If interrupts are disabled, ignore the interrupt */ 5688 if (!hrrq->allow_interrupts) { 5689 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5690 return IRQ_NONE; 5691 } 5692 5693 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 5694 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5695 hrrq->toggle_bit) { 5696 if (!blk_iopoll_sched_prep(&hrrq->iopoll)) 5697 blk_iopoll_sched(&hrrq->iopoll); 5698 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5699 return IRQ_HANDLED; 5700 } 5701 } else { 5702 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5703 hrrq->toggle_bit) 5704 5705 if (ipr_process_hrrq(hrrq, -1, &doneq)) 5706 rc = IRQ_HANDLED; 5707 } 5708 5709 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5710 5711 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5712 list_del(&ipr_cmd->queue); 5713 del_timer(&ipr_cmd->timer); 5714 ipr_cmd->fast_done(ipr_cmd); 5715 } 5716 return rc; 5717 } 5718 5719 /** 5720 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer 5721 * @ioa_cfg: ioa config struct 5722 * @ipr_cmd: ipr command struct 5723 * 5724 * Return value: 5725 * 0 on success / -1 on failure 5726 **/ 5727 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, 5728 struct ipr_cmnd *ipr_cmd) 5729 { 5730 int i, nseg; 5731 struct scatterlist *sg; 5732 u32 length; 5733 u32 ioadl_flags = 0; 5734 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5735 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5736 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 5737 5738 length = scsi_bufflen(scsi_cmd); 5739 if (!length) 5740 return 0; 5741 5742 nseg = scsi_dma_map(scsi_cmd); 5743 if (nseg < 0) { 5744 if (printk_ratelimit()) 5745 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5746 return -1; 5747 } 5748 5749 ipr_cmd->dma_use_sg = nseg; 5750 5751 ioarcb->data_transfer_length = cpu_to_be32(length); 5752 ioarcb->ioadl_len = 5753 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 5754 5755 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5756 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5757 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5758 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) 5759 ioadl_flags = IPR_IOADL_FLAGS_READ; 5760 5761 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5762 ioadl64[i].flags = cpu_to_be32(ioadl_flags); 5763 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); 5764 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); 5765 } 5766 5767 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5768 return 0; 5769 } 5770 5771 /** 5772 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5773 * @ioa_cfg: ioa config struct 5774 * @ipr_cmd: ipr command struct 5775 * 5776 * Return value: 5777 * 0 on success / -1 on failure 5778 **/ 5779 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 5780 struct ipr_cmnd *ipr_cmd) 5781 { 5782 int i, nseg; 5783 struct scatterlist *sg; 5784 u32 length; 5785 u32 ioadl_flags = 0; 5786 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5787 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5788 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 5789 5790 length = scsi_bufflen(scsi_cmd); 5791 if (!length) 5792 return 0; 5793 5794 nseg = scsi_dma_map(scsi_cmd); 5795 if (nseg < 0) { 5796 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); 5797 return -1; 5798 } 5799 5800 ipr_cmd->dma_use_sg = nseg; 5801 5802 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5803 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5804 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5805 ioarcb->data_transfer_length = cpu_to_be32(length); 5806 ioarcb->ioadl_len = 5807 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5808 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5809 ioadl_flags = IPR_IOADL_FLAGS_READ; 5810 ioarcb->read_data_transfer_length = cpu_to_be32(length); 5811 ioarcb->read_ioadl_len = 5812 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5813 } 5814 5815 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { 5816 ioadl = ioarcb->u.add_data.u.ioadl; 5817 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + 5818 offsetof(struct ipr_ioarcb, u.add_data)); 5819 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5820 } 5821 5822 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { 5823 ioadl[i].flags_and_data_len = 5824 cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 5825 ioadl[i].address = cpu_to_be32(sg_dma_address(sg)); 5826 } 5827 5828 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 5829 return 0; 5830 } 5831 5832 /** 5833 * ipr_erp_done - Process completion of ERP for a device 5834 * @ipr_cmd: ipr command struct 5835 * 5836 * This function copies the sense buffer into the scsi_cmd 5837 * struct and pushes the scsi_done function. 5838 * 5839 * Return value: 5840 * nothing 5841 **/ 5842 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) 5843 { 5844 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5845 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5846 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5847 5848 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5849 scsi_cmd->result |= (DID_ERROR << 16); 5850 scmd_printk(KERN_ERR, scsi_cmd, 5851 "Request Sense failed with IOASC: 0x%08X\n", ioasc); 5852 } else { 5853 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, 5854 SCSI_SENSE_BUFFERSIZE); 5855 } 5856 5857 if (res) { 5858 if (!ipr_is_naca_model(res)) 5859 res->needs_sync_complete = 1; 5860 res->in_erp = 0; 5861 } 5862 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5863 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5864 scsi_cmd->scsi_done(scsi_cmd); 5865 } 5866 5867 /** 5868 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP 5869 * @ipr_cmd: ipr command struct 5870 * 5871 * Return value: 5872 * none 5873 **/ 5874 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 5875 { 5876 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5877 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5878 dma_addr_t dma_addr = ipr_cmd->dma_addr; 5879 5880 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5881 ioarcb->data_transfer_length = 0; 5882 ioarcb->read_data_transfer_length = 0; 5883 ioarcb->ioadl_len = 0; 5884 ioarcb->read_ioadl_len = 0; 5885 ioasa->hdr.ioasc = 0; 5886 ioasa->hdr.residual_data_len = 0; 5887 5888 if (ipr_cmd->ioa_cfg->sis64) 5889 ioarcb->u.sis64_addr_data.data_ioadl_addr = 5890 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 5891 else { 5892 ioarcb->write_ioadl_addr = 5893 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 5894 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5895 } 5896 } 5897 5898 /** 5899 * ipr_erp_request_sense - Send request sense to a device 5900 * @ipr_cmd: ipr command struct 5901 * 5902 * This function sends a request sense to a device as a result 5903 * of a check condition. 5904 * 5905 * Return value: 5906 * nothing 5907 **/ 5908 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 5909 { 5910 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5911 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5912 5913 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5914 ipr_erp_done(ipr_cmd); 5915 return; 5916 } 5917 5918 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5919 5920 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; 5921 cmd_pkt->cdb[0] = REQUEST_SENSE; 5922 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; 5923 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; 5924 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5925 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5926 5927 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, 5928 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); 5929 5930 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5931 IPR_REQUEST_SENSE_TIMEOUT * 2); 5932 } 5933 5934 /** 5935 * ipr_erp_cancel_all - Send cancel all to a device 5936 * @ipr_cmd: ipr command struct 5937 * 5938 * This function sends a cancel all to a device to clear the 5939 * queue. If we are running TCQ on the device, QERR is set to 1, 5940 * which means all outstanding ops have been dropped on the floor. 5941 * Cancel all will return them to us. 5942 * 5943 * Return value: 5944 * nothing 5945 **/ 5946 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) 5947 { 5948 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5949 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5950 struct ipr_cmd_pkt *cmd_pkt; 5951 5952 res->in_erp = 1; 5953 5954 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5955 5956 if (!scsi_cmd->device->simple_tags) { 5957 ipr_erp_request_sense(ipr_cmd); 5958 return; 5959 } 5960 5961 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5962 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 5963 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 5964 5965 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, 5966 IPR_CANCEL_ALL_TIMEOUT); 5967 } 5968 5969 /** 5970 * ipr_dump_ioasa - Dump contents of IOASA 5971 * @ioa_cfg: ioa config struct 5972 * @ipr_cmd: ipr command struct 5973 * @res: resource entry struct 5974 * 5975 * This function is invoked by the interrupt handler when ops 5976 * fail. It will log the IOASA if appropriate. Only called 5977 * for GPDD ops. 5978 * 5979 * Return value: 5980 * none 5981 **/ 5982 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, 5983 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res) 5984 { 5985 int i; 5986 u16 data_len; 5987 u32 ioasc, fd_ioasc; 5988 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 5989 __be32 *ioasa_data = (__be32 *)ioasa; 5990 int error_index; 5991 5992 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; 5993 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; 5994 5995 if (0 == ioasc) 5996 return; 5997 5998 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 5999 return; 6000 6001 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc) 6002 error_index = ipr_get_error(fd_ioasc); 6003 else 6004 error_index = ipr_get_error(ioasc); 6005 6006 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 6007 /* Don't log an error if the IOA already logged one */ 6008 if (ioasa->hdr.ilid != 0) 6009 return; 6010 6011 if (!ipr_is_gscsi(res)) 6012 return; 6013 6014 if (ipr_error_table[error_index].log_ioasa == 0) 6015 return; 6016 } 6017 6018 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 6019 6020 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); 6021 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) 6022 data_len = sizeof(struct ipr_ioasa64); 6023 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) 6024 data_len = sizeof(struct ipr_ioasa); 6025 6026 ipr_err("IOASA Dump:\n"); 6027 6028 for (i = 0; i < data_len / 4; i += 4) { 6029 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 6030 be32_to_cpu(ioasa_data[i]), 6031 be32_to_cpu(ioasa_data[i+1]), 6032 be32_to_cpu(ioasa_data[i+2]), 6033 be32_to_cpu(ioasa_data[i+3])); 6034 } 6035 } 6036 6037 /** 6038 * ipr_gen_sense - Generate SCSI sense data from an IOASA 6039 * @ioasa: IOASA 6040 * @sense_buf: sense data buffer 6041 * 6042 * Return value: 6043 * none 6044 **/ 6045 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) 6046 { 6047 u32 failing_lba; 6048 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 6049 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 6050 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6051 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); 6052 6053 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 6054 6055 if (ioasc >= IPR_FIRST_DRIVER_IOASC) 6056 return; 6057 6058 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; 6059 6060 if (ipr_is_vset_device(res) && 6061 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && 6062 ioasa->u.vset.failing_lba_hi != 0) { 6063 sense_buf[0] = 0x72; 6064 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); 6065 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); 6066 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); 6067 6068 sense_buf[7] = 12; 6069 sense_buf[8] = 0; 6070 sense_buf[9] = 0x0A; 6071 sense_buf[10] = 0x80; 6072 6073 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); 6074 6075 sense_buf[12] = (failing_lba & 0xff000000) >> 24; 6076 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; 6077 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; 6078 sense_buf[15] = failing_lba & 0x000000ff; 6079 6080 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6081 6082 sense_buf[16] = (failing_lba & 0xff000000) >> 24; 6083 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; 6084 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; 6085 sense_buf[19] = failing_lba & 0x000000ff; 6086 } else { 6087 sense_buf[0] = 0x70; 6088 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); 6089 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); 6090 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); 6091 6092 /* Illegal request */ 6093 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 6094 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 6095 sense_buf[7] = 10; /* additional length */ 6096 6097 /* IOARCB was in error */ 6098 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) 6099 sense_buf[15] = 0xC0; 6100 else /* Parameter data was invalid */ 6101 sense_buf[15] = 0x80; 6102 6103 sense_buf[16] = 6104 ((IPR_FIELD_POINTER_MASK & 6105 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; 6106 sense_buf[17] = 6107 (IPR_FIELD_POINTER_MASK & 6108 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; 6109 } else { 6110 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 6111 if (ipr_is_vset_device(res)) 6112 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); 6113 else 6114 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); 6115 6116 sense_buf[0] |= 0x80; /* Or in the Valid bit */ 6117 sense_buf[3] = (failing_lba & 0xff000000) >> 24; 6118 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; 6119 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; 6120 sense_buf[6] = failing_lba & 0x000000ff; 6121 } 6122 6123 sense_buf[7] = 6; /* additional length */ 6124 } 6125 } 6126 } 6127 6128 /** 6129 * ipr_get_autosense - Copy autosense data to sense buffer 6130 * @ipr_cmd: ipr command struct 6131 * 6132 * This function copies the autosense buffer to the buffer 6133 * in the scsi_cmd, if there is autosense available. 6134 * 6135 * Return value: 6136 * 1 if autosense was available / 0 if not 6137 **/ 6138 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 6139 { 6140 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 6141 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 6142 6143 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 6144 return 0; 6145 6146 if (ipr_cmd->ioa_cfg->sis64) 6147 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, 6148 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), 6149 SCSI_SENSE_BUFFERSIZE)); 6150 else 6151 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 6152 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 6153 SCSI_SENSE_BUFFERSIZE)); 6154 return 1; 6155 } 6156 6157 /** 6158 * ipr_erp_start - Process an error response for a SCSI op 6159 * @ioa_cfg: ioa config struct 6160 * @ipr_cmd: ipr command struct 6161 * 6162 * This function determines whether or not to initiate ERP 6163 * on the affected device. 6164 * 6165 * Return value: 6166 * nothing 6167 **/ 6168 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, 6169 struct ipr_cmnd *ipr_cmd) 6170 { 6171 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6172 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 6173 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6174 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 6175 6176 if (!res) { 6177 ipr_scsi_eh_done(ipr_cmd); 6178 return; 6179 } 6180 6181 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) 6182 ipr_gen_sense(ipr_cmd); 6183 6184 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6185 6186 switch (masked_ioasc) { 6187 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 6188 if (ipr_is_naca_model(res)) 6189 scsi_cmd->result |= (DID_ABORT << 16); 6190 else 6191 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6192 break; 6193 case IPR_IOASC_IR_RESOURCE_HANDLE: 6194 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: 6195 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6196 break; 6197 case IPR_IOASC_HW_SEL_TIMEOUT: 6198 scsi_cmd->result |= (DID_NO_CONNECT << 16); 6199 if (!ipr_is_naca_model(res)) 6200 res->needs_sync_complete = 1; 6201 break; 6202 case IPR_IOASC_SYNC_REQUIRED: 6203 if (!res->in_erp) 6204 res->needs_sync_complete = 1; 6205 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6206 break; 6207 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6208 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6209 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6210 break; 6211 case IPR_IOASC_BUS_WAS_RESET: 6212 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6213 /* 6214 * Report the bus reset and ask for a retry. The device 6215 * will give CC/UA the next command. 6216 */ 6217 if (!res->resetting_device) 6218 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); 6219 scsi_cmd->result |= (DID_ERROR << 16); 6220 if (!ipr_is_naca_model(res)) 6221 res->needs_sync_complete = 1; 6222 break; 6223 case IPR_IOASC_HW_DEV_BUS_STATUS: 6224 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); 6225 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { 6226 if (!ipr_get_autosense(ipr_cmd)) { 6227 if (!ipr_is_naca_model(res)) { 6228 ipr_erp_cancel_all(ipr_cmd); 6229 return; 6230 } 6231 } 6232 } 6233 if (!ipr_is_naca_model(res)) 6234 res->needs_sync_complete = 1; 6235 break; 6236 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 6237 break; 6238 case IPR_IOASC_IR_NON_OPTIMIZED: 6239 if (res->raw_mode) { 6240 res->raw_mode = 0; 6241 scsi_cmd->result |= (DID_IMM_RETRY << 16); 6242 } else 6243 scsi_cmd->result |= (DID_ERROR << 16); 6244 break; 6245 default: 6246 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6247 scsi_cmd->result |= (DID_ERROR << 16); 6248 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 6249 res->needs_sync_complete = 1; 6250 break; 6251 } 6252 6253 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6254 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6255 scsi_cmd->scsi_done(scsi_cmd); 6256 } 6257 6258 /** 6259 * ipr_scsi_done - mid-layer done function 6260 * @ipr_cmd: ipr command struct 6261 * 6262 * This function is invoked by the interrupt handler for 6263 * ops generated by the SCSI mid-layer 6264 * 6265 * Return value: 6266 * none 6267 **/ 6268 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) 6269 { 6270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6271 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6272 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6273 unsigned long lock_flags; 6274 6275 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6276 6277 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6278 scsi_dma_unmap(scsi_cmd); 6279 6280 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); 6281 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6282 scsi_cmd->scsi_done(scsi_cmd); 6283 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); 6284 } else { 6285 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6286 spin_lock(&ipr_cmd->hrrq->_lock); 6287 ipr_erp_start(ioa_cfg, ipr_cmd); 6288 spin_unlock(&ipr_cmd->hrrq->_lock); 6289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6290 } 6291 } 6292 6293 /** 6294 * ipr_queuecommand - Queue a mid-layer request 6295 * @shost: scsi host struct 6296 * @scsi_cmd: scsi command struct 6297 * 6298 * This function queues a request generated by the mid-layer. 6299 * 6300 * Return value: 6301 * 0 on success 6302 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 6303 * SCSI_MLQUEUE_HOST_BUSY if host is busy 6304 **/ 6305 static int ipr_queuecommand(struct Scsi_Host *shost, 6306 struct scsi_cmnd *scsi_cmd) 6307 { 6308 struct ipr_ioa_cfg *ioa_cfg; 6309 struct ipr_resource_entry *res; 6310 struct ipr_ioarcb *ioarcb; 6311 struct ipr_cmnd *ipr_cmd; 6312 unsigned long hrrq_flags, lock_flags; 6313 int rc; 6314 struct ipr_hrr_queue *hrrq; 6315 int hrrq_id; 6316 6317 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6318 6319 scsi_cmd->result = (DID_OK << 16); 6320 res = scsi_cmd->device->hostdata; 6321 6322 if (ipr_is_gata(res) && res->sata_port) { 6323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6324 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 6325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 6326 return rc; 6327 } 6328 6329 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6330 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6331 6332 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6333 /* 6334 * We are currently blocking all devices due to a host reset 6335 * We have told the host to stop giving us new requests, but 6336 * ERP ops don't count. FIXME 6337 */ 6338 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { 6339 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6340 return SCSI_MLQUEUE_HOST_BUSY; 6341 } 6342 6343 /* 6344 * FIXME - Create scsi_set_host_offline interface 6345 * and the ioa_is_dead check can be removed 6346 */ 6347 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { 6348 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6349 goto err_nodev; 6350 } 6351 6352 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6353 if (ipr_cmd == NULL) { 6354 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6355 return SCSI_MLQUEUE_HOST_BUSY; 6356 } 6357 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6358 6359 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6360 ioarcb = &ipr_cmd->ioarcb; 6361 6362 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 6363 ipr_cmd->scsi_cmd = scsi_cmd; 6364 ipr_cmd->done = ipr_scsi_eh_done; 6365 6366 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 6367 if (scsi_cmd->underflow == 0) 6368 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6369 6370 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6371 if (ipr_is_gscsi(res) && res->reset_occurred) { 6372 res->reset_occurred = 0; 6373 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6374 } 6375 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6376 if (scsi_cmd->flags & SCMD_TAGGED) 6377 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; 6378 else 6379 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; 6380 } 6381 6382 if (scsi_cmd->cmnd[0] >= 0xC0 && 6383 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6384 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6385 } 6386 if (res->raw_mode && ipr_is_af_dasd_device(res)) { 6387 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; 6388 6389 if (scsi_cmd->underflow == 0) 6390 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6391 } 6392 6393 if (ioa_cfg->sis64) 6394 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6395 else 6396 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6397 6398 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6399 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { 6400 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6401 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6402 if (!rc) 6403 scsi_dma_unmap(scsi_cmd); 6404 return SCSI_MLQUEUE_HOST_BUSY; 6405 } 6406 6407 if (unlikely(hrrq->ioa_is_dead)) { 6408 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6409 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6410 scsi_dma_unmap(scsi_cmd); 6411 goto err_nodev; 6412 } 6413 6414 ioarcb->res_handle = res->res_handle; 6415 if (res->needs_sync_complete) { 6416 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6417 res->needs_sync_complete = 0; 6418 } 6419 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6420 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6421 ipr_send_command(ipr_cmd); 6422 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6423 return 0; 6424 6425 err_nodev: 6426 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6427 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6428 scsi_cmd->result = (DID_NO_CONNECT << 16); 6429 scsi_cmd->scsi_done(scsi_cmd); 6430 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6431 return 0; 6432 } 6433 6434 /** 6435 * ipr_ioctl - IOCTL handler 6436 * @sdev: scsi device struct 6437 * @cmd: IOCTL cmd 6438 * @arg: IOCTL arg 6439 * 6440 * Return value: 6441 * 0 on success / other on failure 6442 **/ 6443 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 6444 { 6445 struct ipr_resource_entry *res; 6446 6447 res = (struct ipr_resource_entry *)sdev->hostdata; 6448 if (res && ipr_is_gata(res)) { 6449 if (cmd == HDIO_GET_IDENTITY) 6450 return -ENOTTY; 6451 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); 6452 } 6453 6454 return -EINVAL; 6455 } 6456 6457 /** 6458 * ipr_info - Get information about the card/driver 6459 * @scsi_host: scsi host struct 6460 * 6461 * Return value: 6462 * pointer to buffer with description string 6463 **/ 6464 static const char *ipr_ioa_info(struct Scsi_Host *host) 6465 { 6466 static char buffer[512]; 6467 struct ipr_ioa_cfg *ioa_cfg; 6468 unsigned long lock_flags = 0; 6469 6470 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; 6471 6472 spin_lock_irqsave(host->host_lock, lock_flags); 6473 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); 6474 spin_unlock_irqrestore(host->host_lock, lock_flags); 6475 6476 return buffer; 6477 } 6478 6479 static struct scsi_host_template driver_template = { 6480 .module = THIS_MODULE, 6481 .name = "IPR", 6482 .info = ipr_ioa_info, 6483 .ioctl = ipr_ioctl, 6484 .queuecommand = ipr_queuecommand, 6485 .eh_abort_handler = ipr_eh_abort, 6486 .eh_device_reset_handler = ipr_eh_dev_reset, 6487 .eh_host_reset_handler = ipr_eh_host_reset, 6488 .slave_alloc = ipr_slave_alloc, 6489 .slave_configure = ipr_slave_configure, 6490 .slave_destroy = ipr_slave_destroy, 6491 .scan_finished = ipr_scan_finished, 6492 .target_alloc = ipr_target_alloc, 6493 .target_destroy = ipr_target_destroy, 6494 .change_queue_depth = ipr_change_queue_depth, 6495 .bios_param = ipr_biosparam, 6496 .can_queue = IPR_MAX_COMMANDS, 6497 .this_id = -1, 6498 .sg_tablesize = IPR_MAX_SGLIST, 6499 .max_sectors = IPR_IOA_MAX_SECTORS, 6500 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6501 .use_clustering = ENABLE_CLUSTERING, 6502 .shost_attrs = ipr_ioa_attrs, 6503 .sdev_attrs = ipr_dev_attrs, 6504 .proc_name = IPR_NAME, 6505 .use_blk_tags = 1, 6506 }; 6507 6508 /** 6509 * ipr_ata_phy_reset - libata phy_reset handler 6510 * @ap: ata port to reset 6511 * 6512 **/ 6513 static void ipr_ata_phy_reset(struct ata_port *ap) 6514 { 6515 unsigned long flags; 6516 struct ipr_sata_port *sata_port = ap->private_data; 6517 struct ipr_resource_entry *res = sata_port->res; 6518 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6519 int rc; 6520 6521 ENTER; 6522 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6523 while (ioa_cfg->in_reset_reload) { 6524 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6525 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6526 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6527 } 6528 6529 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6530 goto out_unlock; 6531 6532 rc = ipr_device_reset(ioa_cfg, res); 6533 6534 if (rc) { 6535 ap->link.device[0].class = ATA_DEV_NONE; 6536 goto out_unlock; 6537 } 6538 6539 ap->link.device[0].class = res->ata_class; 6540 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) 6541 ap->link.device[0].class = ATA_DEV_NONE; 6542 6543 out_unlock: 6544 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6545 LEAVE; 6546 } 6547 6548 /** 6549 * ipr_ata_post_internal - Cleanup after an internal command 6550 * @qc: ATA queued command 6551 * 6552 * Return value: 6553 * none 6554 **/ 6555 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) 6556 { 6557 struct ipr_sata_port *sata_port = qc->ap->private_data; 6558 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6559 struct ipr_cmnd *ipr_cmd; 6560 struct ipr_hrr_queue *hrrq; 6561 unsigned long flags; 6562 6563 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6564 while (ioa_cfg->in_reset_reload) { 6565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 6567 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6568 } 6569 6570 for_each_hrrq(hrrq, ioa_cfg) { 6571 spin_lock(&hrrq->_lock); 6572 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 6573 if (ipr_cmd->qc == qc) { 6574 ipr_device_reset(ioa_cfg, sata_port->res); 6575 break; 6576 } 6577 } 6578 spin_unlock(&hrrq->_lock); 6579 } 6580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6581 } 6582 6583 /** 6584 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure 6585 * @regs: destination 6586 * @tf: source ATA taskfile 6587 * 6588 * Return value: 6589 * none 6590 **/ 6591 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs, 6592 struct ata_taskfile *tf) 6593 { 6594 regs->feature = tf->feature; 6595 regs->nsect = tf->nsect; 6596 regs->lbal = tf->lbal; 6597 regs->lbam = tf->lbam; 6598 regs->lbah = tf->lbah; 6599 regs->device = tf->device; 6600 regs->command = tf->command; 6601 regs->hob_feature = tf->hob_feature; 6602 regs->hob_nsect = tf->hob_nsect; 6603 regs->hob_lbal = tf->hob_lbal; 6604 regs->hob_lbam = tf->hob_lbam; 6605 regs->hob_lbah = tf->hob_lbah; 6606 regs->ctl = tf->ctl; 6607 } 6608 6609 /** 6610 * ipr_sata_done - done function for SATA commands 6611 * @ipr_cmd: ipr command struct 6612 * 6613 * This function is invoked by the interrupt handler for 6614 * ops generated by the SCSI mid-layer to SATA devices 6615 * 6616 * Return value: 6617 * none 6618 **/ 6619 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) 6620 { 6621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6622 struct ata_queued_cmd *qc = ipr_cmd->qc; 6623 struct ipr_sata_port *sata_port = qc->ap->private_data; 6624 struct ipr_resource_entry *res = sata_port->res; 6625 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6626 6627 spin_lock(&ipr_cmd->hrrq->_lock); 6628 if (ipr_cmd->ioa_cfg->sis64) 6629 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6630 sizeof(struct ipr_ioasa_gata)); 6631 else 6632 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, 6633 sizeof(struct ipr_ioasa_gata)); 6634 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 6635 6636 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 6637 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 6638 6639 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 6640 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6641 else 6642 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6643 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6644 spin_unlock(&ipr_cmd->hrrq->_lock); 6645 ata_qc_complete(qc); 6646 } 6647 6648 /** 6649 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list 6650 * @ipr_cmd: ipr command struct 6651 * @qc: ATA queued command 6652 * 6653 **/ 6654 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, 6655 struct ata_queued_cmd *qc) 6656 { 6657 u32 ioadl_flags = 0; 6658 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6659 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; 6660 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6661 int len = qc->nbytes; 6662 struct scatterlist *sg; 6663 unsigned int si; 6664 dma_addr_t dma_addr = ipr_cmd->dma_addr; 6665 6666 if (len == 0) 6667 return; 6668 6669 if (qc->dma_dir == DMA_TO_DEVICE) { 6670 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6671 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6672 } else if (qc->dma_dir == DMA_FROM_DEVICE) 6673 ioadl_flags = IPR_IOADL_FLAGS_READ; 6674 6675 ioarcb->data_transfer_length = cpu_to_be32(len); 6676 ioarcb->ioadl_len = 6677 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6678 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6679 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); 6680 6681 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6682 ioadl64->flags = cpu_to_be32(ioadl_flags); 6683 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); 6684 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); 6685 6686 last_ioadl64 = ioadl64; 6687 ioadl64++; 6688 } 6689 6690 if (likely(last_ioadl64)) 6691 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6692 } 6693 6694 /** 6695 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 6696 * @ipr_cmd: ipr command struct 6697 * @qc: ATA queued command 6698 * 6699 **/ 6700 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, 6701 struct ata_queued_cmd *qc) 6702 { 6703 u32 ioadl_flags = 0; 6704 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6705 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; 6706 struct ipr_ioadl_desc *last_ioadl = NULL; 6707 int len = qc->nbytes; 6708 struct scatterlist *sg; 6709 unsigned int si; 6710 6711 if (len == 0) 6712 return; 6713 6714 if (qc->dma_dir == DMA_TO_DEVICE) { 6715 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 6716 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6717 ioarcb->data_transfer_length = cpu_to_be32(len); 6718 ioarcb->ioadl_len = 6719 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6720 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 6721 ioadl_flags = IPR_IOADL_FLAGS_READ; 6722 ioarcb->read_data_transfer_length = cpu_to_be32(len); 6723 ioarcb->read_ioadl_len = 6724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 6725 } 6726 6727 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6728 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); 6729 ioadl->address = cpu_to_be32(sg_dma_address(sg)); 6730 6731 last_ioadl = ioadl; 6732 ioadl++; 6733 } 6734 6735 if (likely(last_ioadl)) 6736 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); 6737 } 6738 6739 /** 6740 * ipr_qc_defer - Get a free ipr_cmd 6741 * @qc: queued command 6742 * 6743 * Return value: 6744 * 0 if success 6745 **/ 6746 static int ipr_qc_defer(struct ata_queued_cmd *qc) 6747 { 6748 struct ata_port *ap = qc->ap; 6749 struct ipr_sata_port *sata_port = ap->private_data; 6750 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6751 struct ipr_cmnd *ipr_cmd; 6752 struct ipr_hrr_queue *hrrq; 6753 int hrrq_id; 6754 6755 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6756 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6757 6758 qc->lldd_task = NULL; 6759 spin_lock(&hrrq->_lock); 6760 if (unlikely(hrrq->ioa_is_dead)) { 6761 spin_unlock(&hrrq->_lock); 6762 return 0; 6763 } 6764 6765 if (unlikely(!hrrq->allow_cmds)) { 6766 spin_unlock(&hrrq->_lock); 6767 return ATA_DEFER_LINK; 6768 } 6769 6770 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6771 if (ipr_cmd == NULL) { 6772 spin_unlock(&hrrq->_lock); 6773 return ATA_DEFER_LINK; 6774 } 6775 6776 qc->lldd_task = ipr_cmd; 6777 spin_unlock(&hrrq->_lock); 6778 return 0; 6779 } 6780 6781 /** 6782 * ipr_qc_issue - Issue a SATA qc to a device 6783 * @qc: queued command 6784 * 6785 * Return value: 6786 * 0 if success 6787 **/ 6788 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) 6789 { 6790 struct ata_port *ap = qc->ap; 6791 struct ipr_sata_port *sata_port = ap->private_data; 6792 struct ipr_resource_entry *res = sata_port->res; 6793 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6794 struct ipr_cmnd *ipr_cmd; 6795 struct ipr_ioarcb *ioarcb; 6796 struct ipr_ioarcb_ata_regs *regs; 6797 6798 if (qc->lldd_task == NULL) 6799 ipr_qc_defer(qc); 6800 6801 ipr_cmd = qc->lldd_task; 6802 if (ipr_cmd == NULL) 6803 return AC_ERR_SYSTEM; 6804 6805 qc->lldd_task = NULL; 6806 spin_lock(&ipr_cmd->hrrq->_lock); 6807 if (unlikely(!ipr_cmd->hrrq->allow_cmds || 6808 ipr_cmd->hrrq->ioa_is_dead)) { 6809 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6810 spin_unlock(&ipr_cmd->hrrq->_lock); 6811 return AC_ERR_SYSTEM; 6812 } 6813 6814 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 6815 ioarcb = &ipr_cmd->ioarcb; 6816 6817 if (ioa_cfg->sis64) { 6818 regs = &ipr_cmd->i.ata_ioadl.regs; 6819 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); 6820 } else 6821 regs = &ioarcb->u.add_data.u.regs; 6822 6823 memset(regs, 0, sizeof(*regs)); 6824 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6825 6826 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 6827 ipr_cmd->qc = qc; 6828 ipr_cmd->done = ipr_sata_done; 6829 ipr_cmd->ioarcb.res_handle = res->res_handle; 6830 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 6831 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6832 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6833 ipr_cmd->dma_use_sg = qc->n_elem; 6834 6835 if (ioa_cfg->sis64) 6836 ipr_build_ata_ioadl64(ipr_cmd, qc); 6837 else 6838 ipr_build_ata_ioadl(ipr_cmd, qc); 6839 6840 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 6841 ipr_copy_sata_tf(regs, &qc->tf); 6842 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 6843 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6844 6845 switch (qc->tf.protocol) { 6846 case ATA_PROT_NODATA: 6847 case ATA_PROT_PIO: 6848 break; 6849 6850 case ATA_PROT_DMA: 6851 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6852 break; 6853 6854 case ATAPI_PROT_PIO: 6855 case ATAPI_PROT_NODATA: 6856 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6857 break; 6858 6859 case ATAPI_PROT_DMA: 6860 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; 6861 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; 6862 break; 6863 6864 default: 6865 WARN_ON(1); 6866 spin_unlock(&ipr_cmd->hrrq->_lock); 6867 return AC_ERR_INVALID; 6868 } 6869 6870 ipr_send_command(ipr_cmd); 6871 spin_unlock(&ipr_cmd->hrrq->_lock); 6872 6873 return 0; 6874 } 6875 6876 /** 6877 * ipr_qc_fill_rtf - Read result TF 6878 * @qc: ATA queued command 6879 * 6880 * Return value: 6881 * true 6882 **/ 6883 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) 6884 { 6885 struct ipr_sata_port *sata_port = qc->ap->private_data; 6886 struct ipr_ioasa_gata *g = &sata_port->ioasa; 6887 struct ata_taskfile *tf = &qc->result_tf; 6888 6889 tf->feature = g->error; 6890 tf->nsect = g->nsect; 6891 tf->lbal = g->lbal; 6892 tf->lbam = g->lbam; 6893 tf->lbah = g->lbah; 6894 tf->device = g->device; 6895 tf->command = g->status; 6896 tf->hob_nsect = g->hob_nsect; 6897 tf->hob_lbal = g->hob_lbal; 6898 tf->hob_lbam = g->hob_lbam; 6899 tf->hob_lbah = g->hob_lbah; 6900 6901 return true; 6902 } 6903 6904 static struct ata_port_operations ipr_sata_ops = { 6905 .phy_reset = ipr_ata_phy_reset, 6906 .hardreset = ipr_sata_reset, 6907 .post_internal_cmd = ipr_ata_post_internal, 6908 .qc_prep = ata_noop_qc_prep, 6909 .qc_defer = ipr_qc_defer, 6910 .qc_issue = ipr_qc_issue, 6911 .qc_fill_rtf = ipr_qc_fill_rtf, 6912 .port_start = ata_sas_port_start, 6913 .port_stop = ata_sas_port_stop 6914 }; 6915 6916 static struct ata_port_info sata_port_info = { 6917 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 6918 ATA_FLAG_SAS_HOST, 6919 .pio_mask = ATA_PIO4_ONLY, 6920 .mwdma_mask = ATA_MWDMA2, 6921 .udma_mask = ATA_UDMA6, 6922 .port_ops = &ipr_sata_ops 6923 }; 6924 6925 #ifdef CONFIG_PPC_PSERIES 6926 static const u16 ipr_blocked_processors[] = { 6927 PVR_NORTHSTAR, 6928 PVR_PULSAR, 6929 PVR_POWER4, 6930 PVR_ICESTAR, 6931 PVR_SSTAR, 6932 PVR_POWER4p, 6933 PVR_630, 6934 PVR_630p 6935 }; 6936 6937 /** 6938 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware 6939 * @ioa_cfg: ioa cfg struct 6940 * 6941 * Adapters that use Gemstone revision < 3.1 do not work reliably on 6942 * certain pSeries hardware. This function determines if the given 6943 * adapter is in one of these confgurations or not. 6944 * 6945 * Return value: 6946 * 1 if adapter is not supported / 0 if adapter is supported 6947 **/ 6948 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) 6949 { 6950 int i; 6951 6952 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 6953 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { 6954 if (pvr_version_is(ipr_blocked_processors[i])) 6955 return 1; 6956 } 6957 } 6958 return 0; 6959 } 6960 #else 6961 #define ipr_invalid_adapter(ioa_cfg) 0 6962 #endif 6963 6964 /** 6965 * ipr_ioa_bringdown_done - IOA bring down completion. 6966 * @ipr_cmd: ipr command struct 6967 * 6968 * This function processes the completion of an adapter bring down. 6969 * It wakes any reset sleepers. 6970 * 6971 * Return value: 6972 * IPR_RC_JOB_RETURN 6973 **/ 6974 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 6975 { 6976 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6977 int i; 6978 6979 ENTER; 6980 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 6981 ipr_trace; 6982 spin_unlock_irq(ioa_cfg->host->host_lock); 6983 scsi_unblock_requests(ioa_cfg->host); 6984 spin_lock_irq(ioa_cfg->host->host_lock); 6985 } 6986 6987 ioa_cfg->in_reset_reload = 0; 6988 ioa_cfg->reset_retries = 0; 6989 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 6990 spin_lock(&ioa_cfg->hrrq[i]._lock); 6991 ioa_cfg->hrrq[i].ioa_is_dead = 1; 6992 spin_unlock(&ioa_cfg->hrrq[i]._lock); 6993 } 6994 wmb(); 6995 6996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6997 wake_up_all(&ioa_cfg->reset_wait_q); 6998 LEAVE; 6999 7000 return IPR_RC_JOB_RETURN; 7001 } 7002 7003 /** 7004 * ipr_ioa_reset_done - IOA reset completion. 7005 * @ipr_cmd: ipr command struct 7006 * 7007 * This function processes the completion of an adapter reset. 7008 * It schedules any necessary mid-layer add/removes and 7009 * wakes any reset sleepers. 7010 * 7011 * Return value: 7012 * IPR_RC_JOB_RETURN 7013 **/ 7014 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) 7015 { 7016 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7017 struct ipr_resource_entry *res; 7018 struct ipr_hostrcb *hostrcb, *temp; 7019 int i = 0, j; 7020 7021 ENTER; 7022 ioa_cfg->in_reset_reload = 0; 7023 for (j = 0; j < ioa_cfg->hrrq_num; j++) { 7024 spin_lock(&ioa_cfg->hrrq[j]._lock); 7025 ioa_cfg->hrrq[j].allow_cmds = 1; 7026 spin_unlock(&ioa_cfg->hrrq[j]._lock); 7027 } 7028 wmb(); 7029 ioa_cfg->reset_cmd = NULL; 7030 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 7031 7032 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 7033 if (res->add_to_ml || res->del_from_ml) { 7034 ipr_trace; 7035 break; 7036 } 7037 } 7038 schedule_work(&ioa_cfg->work_q); 7039 7040 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { 7041 list_del(&hostrcb->queue); 7042 if (i++ < IPR_NUM_LOG_HCAMS) 7043 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 7044 else 7045 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 7046 } 7047 7048 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 7049 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 7050 7051 ioa_cfg->reset_retries = 0; 7052 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7053 wake_up_all(&ioa_cfg->reset_wait_q); 7054 7055 spin_unlock(ioa_cfg->host->host_lock); 7056 scsi_unblock_requests(ioa_cfg->host); 7057 spin_lock(ioa_cfg->host->host_lock); 7058 7059 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 7060 scsi_block_requests(ioa_cfg->host); 7061 7062 schedule_work(&ioa_cfg->work_q); 7063 LEAVE; 7064 return IPR_RC_JOB_RETURN; 7065 } 7066 7067 /** 7068 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer 7069 * @supported_dev: supported device struct 7070 * @vpids: vendor product id struct 7071 * 7072 * Return value: 7073 * none 7074 **/ 7075 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, 7076 struct ipr_std_inq_vpids *vpids) 7077 { 7078 memset(supported_dev, 0, sizeof(struct ipr_supported_device)); 7079 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); 7080 supported_dev->num_records = 1; 7081 supported_dev->data_length = 7082 cpu_to_be16(sizeof(struct ipr_supported_device)); 7083 supported_dev->reserved = 0; 7084 } 7085 7086 /** 7087 * ipr_set_supported_devs - Send Set Supported Devices for a device 7088 * @ipr_cmd: ipr command struct 7089 * 7090 * This function sends a Set Supported Devices to the adapter 7091 * 7092 * Return value: 7093 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7094 **/ 7095 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) 7096 { 7097 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7098 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 7099 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7100 struct ipr_resource_entry *res = ipr_cmd->u.res; 7101 7102 ipr_cmd->job_step = ipr_ioa_reset_done; 7103 7104 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { 7105 if (!ipr_is_scsi_disk(res)) 7106 continue; 7107 7108 ipr_cmd->u.res = res; 7109 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); 7110 7111 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7112 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7113 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7114 7115 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 7116 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; 7117 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 7118 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 7119 7120 ipr_init_ioadl(ipr_cmd, 7121 ioa_cfg->vpd_cbs_dma + 7122 offsetof(struct ipr_misc_cbs, supp_dev), 7123 sizeof(struct ipr_supported_device), 7124 IPR_IOADL_FLAGS_WRITE_LAST); 7125 7126 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7127 IPR_SET_SUP_DEVICE_TIMEOUT); 7128 7129 if (!ioa_cfg->sis64) 7130 ipr_cmd->job_step = ipr_set_supported_devs; 7131 LEAVE; 7132 return IPR_RC_JOB_RETURN; 7133 } 7134 7135 LEAVE; 7136 return IPR_RC_JOB_CONTINUE; 7137 } 7138 7139 /** 7140 * ipr_get_mode_page - Locate specified mode page 7141 * @mode_pages: mode page buffer 7142 * @page_code: page code to find 7143 * @len: minimum required length for mode page 7144 * 7145 * Return value: 7146 * pointer to mode page / NULL on failure 7147 **/ 7148 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, 7149 u32 page_code, u32 len) 7150 { 7151 struct ipr_mode_page_hdr *mode_hdr; 7152 u32 page_length; 7153 u32 length; 7154 7155 if (!mode_pages || (mode_pages->hdr.length == 0)) 7156 return NULL; 7157 7158 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; 7159 mode_hdr = (struct ipr_mode_page_hdr *) 7160 (mode_pages->data + mode_pages->hdr.block_desc_len); 7161 7162 while (length) { 7163 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { 7164 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) 7165 return mode_hdr; 7166 break; 7167 } else { 7168 page_length = (sizeof(struct ipr_mode_page_hdr) + 7169 mode_hdr->page_length); 7170 length -= page_length; 7171 mode_hdr = (struct ipr_mode_page_hdr *) 7172 ((unsigned long)mode_hdr + page_length); 7173 } 7174 } 7175 return NULL; 7176 } 7177 7178 /** 7179 * ipr_check_term_power - Check for term power errors 7180 * @ioa_cfg: ioa config struct 7181 * @mode_pages: IOAFP mode pages buffer 7182 * 7183 * Check the IOAFP's mode page 28 for term power errors 7184 * 7185 * Return value: 7186 * nothing 7187 **/ 7188 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, 7189 struct ipr_mode_pages *mode_pages) 7190 { 7191 int i; 7192 int entry_length; 7193 struct ipr_dev_bus_entry *bus; 7194 struct ipr_mode_page28 *mode_page; 7195 7196 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7197 sizeof(struct ipr_mode_page28)); 7198 7199 entry_length = mode_page->entry_length; 7200 7201 bus = mode_page->bus; 7202 7203 for (i = 0; i < mode_page->num_entries; i++) { 7204 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { 7205 dev_err(&ioa_cfg->pdev->dev, 7206 "Term power is absent on scsi bus %d\n", 7207 bus->res_addr.bus); 7208 } 7209 7210 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); 7211 } 7212 } 7213 7214 /** 7215 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table 7216 * @ioa_cfg: ioa config struct 7217 * 7218 * Looks through the config table checking for SES devices. If 7219 * the SES device is in the SES table indicating a maximum SCSI 7220 * bus speed, the speed is limited for the bus. 7221 * 7222 * Return value: 7223 * none 7224 **/ 7225 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) 7226 { 7227 u32 max_xfer_rate; 7228 int i; 7229 7230 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 7231 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, 7232 ioa_cfg->bus_attr[i].bus_width); 7233 7234 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) 7235 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; 7236 } 7237 } 7238 7239 /** 7240 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 7241 * @ioa_cfg: ioa config struct 7242 * @mode_pages: mode page 28 buffer 7243 * 7244 * Updates mode page 28 based on driver configuration 7245 * 7246 * Return value: 7247 * none 7248 **/ 7249 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, 7250 struct ipr_mode_pages *mode_pages) 7251 { 7252 int i, entry_length; 7253 struct ipr_dev_bus_entry *bus; 7254 struct ipr_bus_attributes *bus_attr; 7255 struct ipr_mode_page28 *mode_page; 7256 7257 mode_page = ipr_get_mode_page(mode_pages, 0x28, 7258 sizeof(struct ipr_mode_page28)); 7259 7260 entry_length = mode_page->entry_length; 7261 7262 /* Loop for each device bus entry */ 7263 for (i = 0, bus = mode_page->bus; 7264 i < mode_page->num_entries; 7265 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { 7266 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { 7267 dev_err(&ioa_cfg->pdev->dev, 7268 "Invalid resource address reported: 0x%08X\n", 7269 IPR_GET_PHYS_LOC(bus->res_addr)); 7270 continue; 7271 } 7272 7273 bus_attr = &ioa_cfg->bus_attr[i]; 7274 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; 7275 bus->bus_width = bus_attr->bus_width; 7276 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); 7277 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; 7278 if (bus_attr->qas_enabled) 7279 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; 7280 else 7281 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; 7282 } 7283 } 7284 7285 /** 7286 * ipr_build_mode_select - Build a mode select command 7287 * @ipr_cmd: ipr command struct 7288 * @res_handle: resource handle to send command to 7289 * @parm: Byte 2 of Mode Sense command 7290 * @dma_addr: DMA buffer address 7291 * @xfer_len: data transfer length 7292 * 7293 * Return value: 7294 * none 7295 **/ 7296 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 7297 __be32 res_handle, u8 parm, 7298 dma_addr_t dma_addr, u8 xfer_len) 7299 { 7300 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7301 7302 ioarcb->res_handle = res_handle; 7303 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7304 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 7305 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; 7306 ioarcb->cmd_pkt.cdb[1] = parm; 7307 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7308 7309 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); 7310 } 7311 7312 /** 7313 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA 7314 * @ipr_cmd: ipr command struct 7315 * 7316 * This function sets up the SCSI bus attributes and sends 7317 * a Mode Select for Page 28 to activate them. 7318 * 7319 * Return value: 7320 * IPR_RC_JOB_RETURN 7321 **/ 7322 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) 7323 { 7324 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7325 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7326 int length; 7327 7328 ENTER; 7329 ipr_scsi_bus_speed_limit(ioa_cfg); 7330 ipr_check_term_power(ioa_cfg, mode_pages); 7331 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); 7332 length = mode_pages->hdr.length + 1; 7333 mode_pages->hdr.length = 0; 7334 7335 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7336 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7337 length); 7338 7339 ipr_cmd->job_step = ipr_set_supported_devs; 7340 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7341 struct ipr_resource_entry, queue); 7342 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7343 7344 LEAVE; 7345 return IPR_RC_JOB_RETURN; 7346 } 7347 7348 /** 7349 * ipr_build_mode_sense - Builds a mode sense command 7350 * @ipr_cmd: ipr command struct 7351 * @res: resource entry struct 7352 * @parm: Byte 2 of mode sense command 7353 * @dma_addr: DMA address of mode sense buffer 7354 * @xfer_len: Size of DMA buffer 7355 * 7356 * Return value: 7357 * none 7358 **/ 7359 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 7360 __be32 res_handle, 7361 u8 parm, dma_addr_t dma_addr, u8 xfer_len) 7362 { 7363 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7364 7365 ioarcb->res_handle = res_handle; 7366 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; 7367 ioarcb->cmd_pkt.cdb[2] = parm; 7368 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7369 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7370 7371 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7372 } 7373 7374 /** 7375 * ipr_reset_cmd_failed - Handle failure of IOA reset command 7376 * @ipr_cmd: ipr command struct 7377 * 7378 * This function handles the failure of an IOA bringup command. 7379 * 7380 * Return value: 7381 * IPR_RC_JOB_RETURN 7382 **/ 7383 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 7384 { 7385 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7386 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7387 7388 dev_err(&ioa_cfg->pdev->dev, 7389 "0x%02X failed with IOASC: 0x%08X\n", 7390 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 7391 7392 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7393 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7394 return IPR_RC_JOB_RETURN; 7395 } 7396 7397 /** 7398 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense 7399 * @ipr_cmd: ipr command struct 7400 * 7401 * This function handles the failure of a Mode Sense to the IOAFP. 7402 * Some adapters do not handle all mode pages. 7403 * 7404 * Return value: 7405 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7406 **/ 7407 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 7408 { 7409 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7410 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7411 7412 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7413 ipr_cmd->job_step = ipr_set_supported_devs; 7414 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, 7415 struct ipr_resource_entry, queue); 7416 return IPR_RC_JOB_CONTINUE; 7417 } 7418 7419 return ipr_reset_cmd_failed(ipr_cmd); 7420 } 7421 7422 /** 7423 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA 7424 * @ipr_cmd: ipr command struct 7425 * 7426 * This function send a Page 28 mode sense to the IOA to 7427 * retrieve SCSI bus attributes. 7428 * 7429 * Return value: 7430 * IPR_RC_JOB_RETURN 7431 **/ 7432 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) 7433 { 7434 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7435 7436 ENTER; 7437 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7438 0x28, ioa_cfg->vpd_cbs_dma + 7439 offsetof(struct ipr_misc_cbs, mode_pages), 7440 sizeof(struct ipr_mode_pages)); 7441 7442 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; 7443 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; 7444 7445 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7446 7447 LEAVE; 7448 return IPR_RC_JOB_RETURN; 7449 } 7450 7451 /** 7452 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA 7453 * @ipr_cmd: ipr command struct 7454 * 7455 * This function enables dual IOA RAID support if possible. 7456 * 7457 * Return value: 7458 * IPR_RC_JOB_RETURN 7459 **/ 7460 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) 7461 { 7462 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7463 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; 7464 struct ipr_mode_page24 *mode_page; 7465 int length; 7466 7467 ENTER; 7468 mode_page = ipr_get_mode_page(mode_pages, 0x24, 7469 sizeof(struct ipr_mode_page24)); 7470 7471 if (mode_page) 7472 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; 7473 7474 length = mode_pages->hdr.length + 1; 7475 mode_pages->hdr.length = 0; 7476 7477 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 7478 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 7479 length); 7480 7481 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7482 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7483 7484 LEAVE; 7485 return IPR_RC_JOB_RETURN; 7486 } 7487 7488 /** 7489 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense 7490 * @ipr_cmd: ipr command struct 7491 * 7492 * This function handles the failure of a Mode Sense to the IOAFP. 7493 * Some adapters do not handle all mode pages. 7494 * 7495 * Return value: 7496 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7497 **/ 7498 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 7499 { 7500 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 7501 7502 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 7503 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7504 return IPR_RC_JOB_CONTINUE; 7505 } 7506 7507 return ipr_reset_cmd_failed(ipr_cmd); 7508 } 7509 7510 /** 7511 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA 7512 * @ipr_cmd: ipr command struct 7513 * 7514 * This function send a mode sense to the IOA to retrieve 7515 * the IOA Advanced Function Control mode page. 7516 * 7517 * Return value: 7518 * IPR_RC_JOB_RETURN 7519 **/ 7520 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) 7521 { 7522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7523 7524 ENTER; 7525 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 7526 0x24, ioa_cfg->vpd_cbs_dma + 7527 offsetof(struct ipr_misc_cbs, mode_pages), 7528 sizeof(struct ipr_mode_pages)); 7529 7530 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; 7531 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; 7532 7533 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7534 7535 LEAVE; 7536 return IPR_RC_JOB_RETURN; 7537 } 7538 7539 /** 7540 * ipr_init_res_table - Initialize the resource table 7541 * @ipr_cmd: ipr command struct 7542 * 7543 * This function looks through the existing resource table, comparing 7544 * it with the config table. This function will take care of old/new 7545 * devices and schedule adding/removing them from the mid-layer 7546 * as appropriate. 7547 * 7548 * Return value: 7549 * IPR_RC_JOB_CONTINUE 7550 **/ 7551 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) 7552 { 7553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7554 struct ipr_resource_entry *res, *temp; 7555 struct ipr_config_table_entry_wrapper cfgtew; 7556 int entries, found, flag, i; 7557 LIST_HEAD(old_res); 7558 7559 ENTER; 7560 if (ioa_cfg->sis64) 7561 flag = ioa_cfg->u.cfg_table64->hdr64.flags; 7562 else 7563 flag = ioa_cfg->u.cfg_table->hdr.flags; 7564 7565 if (flag & IPR_UCODE_DOWNLOAD_REQ) 7566 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 7567 7568 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 7569 list_move_tail(&res->queue, &old_res); 7570 7571 if (ioa_cfg->sis64) 7572 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); 7573 else 7574 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 7575 7576 for (i = 0; i < entries; i++) { 7577 if (ioa_cfg->sis64) 7578 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; 7579 else 7580 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; 7581 found = 0; 7582 7583 list_for_each_entry_safe(res, temp, &old_res, queue) { 7584 if (ipr_is_same_device(res, &cfgtew)) { 7585 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7586 found = 1; 7587 break; 7588 } 7589 } 7590 7591 if (!found) { 7592 if (list_empty(&ioa_cfg->free_res_q)) { 7593 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); 7594 break; 7595 } 7596 7597 found = 1; 7598 res = list_entry(ioa_cfg->free_res_q.next, 7599 struct ipr_resource_entry, queue); 7600 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7601 ipr_init_res_entry(res, &cfgtew); 7602 res->add_to_ml = 1; 7603 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) 7604 res->sdev->allow_restart = 1; 7605 7606 if (found) 7607 ipr_update_res_entry(res, &cfgtew); 7608 } 7609 7610 list_for_each_entry_safe(res, temp, &old_res, queue) { 7611 if (res->sdev) { 7612 res->del_from_ml = 1; 7613 res->res_handle = IPR_INVALID_RES_HANDLE; 7614 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 7615 } 7616 } 7617 7618 list_for_each_entry_safe(res, temp, &old_res, queue) { 7619 ipr_clear_res_target(res); 7620 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 7621 } 7622 7623 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 7624 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 7625 else 7626 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 7627 7628 LEAVE; 7629 return IPR_RC_JOB_CONTINUE; 7630 } 7631 7632 /** 7633 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. 7634 * @ipr_cmd: ipr command struct 7635 * 7636 * This function sends a Query IOA Configuration command 7637 * to the adapter to retrieve the IOA configuration table. 7638 * 7639 * Return value: 7640 * IPR_RC_JOB_RETURN 7641 **/ 7642 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) 7643 { 7644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7645 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7646 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 7647 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7648 7649 ENTER; 7650 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) 7651 ioa_cfg->dual_raid = 1; 7652 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", 7653 ucode_vpd->major_release, ucode_vpd->card_type, 7654 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); 7655 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7656 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7657 7658 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 7659 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; 7660 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 7661 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 7662 7663 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, 7664 IPR_IOADL_FLAGS_READ_LAST); 7665 7666 ipr_cmd->job_step = ipr_init_res_table; 7667 7668 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7669 7670 LEAVE; 7671 return IPR_RC_JOB_RETURN; 7672 } 7673 7674 /** 7675 * ipr_ioafp_inquiry - Send an Inquiry to the adapter. 7676 * @ipr_cmd: ipr command struct 7677 * 7678 * This utility function sends an inquiry to the adapter. 7679 * 7680 * Return value: 7681 * none 7682 **/ 7683 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 7684 dma_addr_t dma_addr, u8 xfer_len) 7685 { 7686 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7687 7688 ENTER; 7689 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 7690 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7691 7692 ioarcb->cmd_pkt.cdb[0] = INQUIRY; 7693 ioarcb->cmd_pkt.cdb[1] = flags; 7694 ioarcb->cmd_pkt.cdb[2] = page; 7695 ioarcb->cmd_pkt.cdb[4] = xfer_len; 7696 7697 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); 7698 7699 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7700 LEAVE; 7701 } 7702 7703 /** 7704 * ipr_inquiry_page_supported - Is the given inquiry page supported 7705 * @page0: inquiry page 0 buffer 7706 * @page: page code. 7707 * 7708 * This function determines if the specified inquiry page is supported. 7709 * 7710 * Return value: 7711 * 1 if page is supported / 0 if not 7712 **/ 7713 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) 7714 { 7715 int i; 7716 7717 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) 7718 if (page0->page[i] == page) 7719 return 1; 7720 7721 return 0; 7722 } 7723 7724 /** 7725 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. 7726 * @ipr_cmd: ipr command struct 7727 * 7728 * This function sends a Page 0xD0 inquiry to the adapter 7729 * to retrieve adapter capabilities. 7730 * 7731 * Return value: 7732 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7733 **/ 7734 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) 7735 { 7736 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7737 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; 7738 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 7739 7740 ENTER; 7741 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; 7742 memset(cap, 0, sizeof(*cap)); 7743 7744 if (ipr_inquiry_page_supported(page0, 0xD0)) { 7745 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, 7746 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), 7747 sizeof(struct ipr_inquiry_cap)); 7748 return IPR_RC_JOB_RETURN; 7749 } 7750 7751 LEAVE; 7752 return IPR_RC_JOB_CONTINUE; 7753 } 7754 7755 /** 7756 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. 7757 * @ipr_cmd: ipr command struct 7758 * 7759 * This function sends a Page 3 inquiry to the adapter 7760 * to retrieve software VPD information. 7761 * 7762 * Return value: 7763 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7764 **/ 7765 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 7766 { 7767 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7768 7769 ENTER; 7770 7771 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 7772 7773 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 7774 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), 7775 sizeof(struct ipr_inquiry_page3)); 7776 7777 LEAVE; 7778 return IPR_RC_JOB_RETURN; 7779 } 7780 7781 /** 7782 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. 7783 * @ipr_cmd: ipr command struct 7784 * 7785 * This function sends a Page 0 inquiry to the adapter 7786 * to retrieve supported inquiry pages. 7787 * 7788 * Return value: 7789 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 7790 **/ 7791 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) 7792 { 7793 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7794 char type[5]; 7795 7796 ENTER; 7797 7798 /* Grab the type out of the VPD and store it away */ 7799 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 7800 type[4] = '\0'; 7801 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7802 7803 if (ipr_invalid_adapter(ioa_cfg)) { 7804 dev_err(&ioa_cfg->pdev->dev, 7805 "Adapter not supported in this hardware configuration.\n"); 7806 7807 if (!ipr_testmode) { 7808 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 7809 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7810 list_add_tail(&ipr_cmd->queue, 7811 &ioa_cfg->hrrq->hrrq_free_q); 7812 return IPR_RC_JOB_RETURN; 7813 } 7814 } 7815 7816 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7817 7818 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 7819 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), 7820 sizeof(struct ipr_inquiry_page0)); 7821 7822 LEAVE; 7823 return IPR_RC_JOB_RETURN; 7824 } 7825 7826 /** 7827 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. 7828 * @ipr_cmd: ipr command struct 7829 * 7830 * This function sends a standard inquiry to the adapter. 7831 * 7832 * Return value: 7833 * IPR_RC_JOB_RETURN 7834 **/ 7835 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) 7836 { 7837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7838 7839 ENTER; 7840 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; 7841 7842 ipr_ioafp_inquiry(ipr_cmd, 0, 0, 7843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), 7844 sizeof(struct ipr_ioa_vpd)); 7845 7846 LEAVE; 7847 return IPR_RC_JOB_RETURN; 7848 } 7849 7850 /** 7851 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. 7852 * @ipr_cmd: ipr command struct 7853 * 7854 * This function send an Identify Host Request Response Queue 7855 * command to establish the HRRQ with the adapter. 7856 * 7857 * Return value: 7858 * IPR_RC_JOB_RETURN 7859 **/ 7860 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) 7861 { 7862 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7863 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7864 struct ipr_hrr_queue *hrrq; 7865 7866 ENTER; 7867 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7868 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7869 7870 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { 7871 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; 7872 7873 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7874 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7875 7876 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7877 if (ioa_cfg->sis64) 7878 ioarcb->cmd_pkt.cdb[1] = 0x1; 7879 7880 if (ioa_cfg->nvectors == 1) 7881 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; 7882 else 7883 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; 7884 7885 ioarcb->cmd_pkt.cdb[2] = 7886 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; 7887 ioarcb->cmd_pkt.cdb[3] = 7888 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; 7889 ioarcb->cmd_pkt.cdb[4] = 7890 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; 7891 ioarcb->cmd_pkt.cdb[5] = 7892 ((u64) hrrq->host_rrq_dma) & 0xff; 7893 ioarcb->cmd_pkt.cdb[7] = 7894 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; 7895 ioarcb->cmd_pkt.cdb[8] = 7896 (sizeof(u32) * hrrq->size) & 0xff; 7897 7898 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7899 ioarcb->cmd_pkt.cdb[9] = 7900 ioa_cfg->identify_hrrq_index; 7901 7902 if (ioa_cfg->sis64) { 7903 ioarcb->cmd_pkt.cdb[10] = 7904 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; 7905 ioarcb->cmd_pkt.cdb[11] = 7906 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; 7907 ioarcb->cmd_pkt.cdb[12] = 7908 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; 7909 ioarcb->cmd_pkt.cdb[13] = 7910 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; 7911 } 7912 7913 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7914 ioarcb->cmd_pkt.cdb[14] = 7915 ioa_cfg->identify_hrrq_index; 7916 7917 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7918 IPR_INTERNAL_TIMEOUT); 7919 7920 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) 7921 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7922 7923 LEAVE; 7924 return IPR_RC_JOB_RETURN; 7925 } 7926 7927 LEAVE; 7928 return IPR_RC_JOB_CONTINUE; 7929 } 7930 7931 /** 7932 * ipr_reset_timer_done - Adapter reset timer function 7933 * @ipr_cmd: ipr command struct 7934 * 7935 * Description: This function is used in adapter reset processing 7936 * for timing events. If the reset_cmd pointer in the IOA 7937 * config struct is not this adapter's we are doing nested 7938 * resets and fail_all_ops will take care of freeing the 7939 * command block. 7940 * 7941 * Return value: 7942 * none 7943 **/ 7944 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) 7945 { 7946 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7947 unsigned long lock_flags = 0; 7948 7949 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 7950 7951 if (ioa_cfg->reset_cmd == ipr_cmd) { 7952 list_del(&ipr_cmd->queue); 7953 ipr_cmd->done(ipr_cmd); 7954 } 7955 7956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 7957 } 7958 7959 /** 7960 * ipr_reset_start_timer - Start a timer for adapter reset job 7961 * @ipr_cmd: ipr command struct 7962 * @timeout: timeout value 7963 * 7964 * Description: This function is used in adapter reset processing 7965 * for timing events. If the reset_cmd pointer in the IOA 7966 * config struct is not this adapter's we are doing nested 7967 * resets and fail_all_ops will take care of freeing the 7968 * command block. 7969 * 7970 * Return value: 7971 * none 7972 **/ 7973 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 7974 unsigned long timeout) 7975 { 7976 7977 ENTER; 7978 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 7979 ipr_cmd->done = ipr_reset_ioa_job; 7980 7981 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7982 ipr_cmd->timer.expires = jiffies + timeout; 7983 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 7984 add_timer(&ipr_cmd->timer); 7985 } 7986 7987 /** 7988 * ipr_init_ioa_mem - Initialize ioa_cfg control block 7989 * @ioa_cfg: ioa cfg struct 7990 * 7991 * Return value: 7992 * nothing 7993 **/ 7994 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 7995 { 7996 struct ipr_hrr_queue *hrrq; 7997 7998 for_each_hrrq(hrrq, ioa_cfg) { 7999 spin_lock(&hrrq->_lock); 8000 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); 8001 8002 /* Initialize Host RRQ pointers */ 8003 hrrq->hrrq_start = hrrq->host_rrq; 8004 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; 8005 hrrq->hrrq_curr = hrrq->hrrq_start; 8006 hrrq->toggle_bit = 1; 8007 spin_unlock(&hrrq->_lock); 8008 } 8009 wmb(); 8010 8011 ioa_cfg->identify_hrrq_index = 0; 8012 if (ioa_cfg->hrrq_num == 1) 8013 atomic_set(&ioa_cfg->hrrq_index, 0); 8014 else 8015 atomic_set(&ioa_cfg->hrrq_index, 1); 8016 8017 /* Zero out config table */ 8018 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 8019 } 8020 8021 /** 8022 * ipr_reset_next_stage - Process IPL stage change based on feedback register. 8023 * @ipr_cmd: ipr command struct 8024 * 8025 * Return value: 8026 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8027 **/ 8028 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) 8029 { 8030 unsigned long stage, stage_time; 8031 u32 feedback; 8032 volatile u32 int_reg; 8033 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8034 u64 maskval = 0; 8035 8036 feedback = readl(ioa_cfg->regs.init_feedback_reg); 8037 stage = feedback & IPR_IPL_INIT_STAGE_MASK; 8038 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; 8039 8040 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 8041 8042 /* sanity check the stage_time value */ 8043 if (stage_time == 0) 8044 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; 8045 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 8046 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 8047 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 8048 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 8049 8050 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { 8051 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); 8052 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8053 stage_time = ioa_cfg->transop_timeout; 8054 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8055 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { 8056 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8057 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8058 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8059 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8060 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; 8061 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); 8062 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8063 return IPR_RC_JOB_CONTINUE; 8064 } 8065 } 8066 8067 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8068 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 8069 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 8070 ipr_cmd->done = ipr_reset_ioa_job; 8071 add_timer(&ipr_cmd->timer); 8072 8073 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8074 8075 return IPR_RC_JOB_RETURN; 8076 } 8077 8078 /** 8079 * ipr_reset_enable_ioa - Enable the IOA following a reset. 8080 * @ipr_cmd: ipr command struct 8081 * 8082 * This function reinitializes some control blocks and 8083 * enables destructive diagnostics on the adapter. 8084 * 8085 * Return value: 8086 * IPR_RC_JOB_RETURN 8087 **/ 8088 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) 8089 { 8090 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8091 volatile u32 int_reg; 8092 volatile u64 maskval; 8093 int i; 8094 8095 ENTER; 8096 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 8097 ipr_init_ioa_mem(ioa_cfg); 8098 8099 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8100 spin_lock(&ioa_cfg->hrrq[i]._lock); 8101 ioa_cfg->hrrq[i].allow_interrupts = 1; 8102 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8103 } 8104 wmb(); 8105 if (ioa_cfg->sis64) { 8106 /* Set the adapter to the correct endian mode. */ 8107 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8108 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8109 } 8110 8111 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 8112 8113 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 8114 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 8115 ioa_cfg->regs.clr_interrupt_mask_reg32); 8116 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8117 return IPR_RC_JOB_CONTINUE; 8118 } 8119 8120 /* Enable destructive diagnostics on IOA */ 8121 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 8122 8123 if (ioa_cfg->sis64) { 8124 maskval = IPR_PCII_IPL_STAGE_CHANGE; 8125 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; 8126 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); 8127 } else 8128 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 8129 8130 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8131 8132 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 8133 8134 if (ioa_cfg->sis64) { 8135 ipr_cmd->job_step = ipr_reset_next_stage; 8136 return IPR_RC_JOB_CONTINUE; 8137 } 8138 8139 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8140 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 8141 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 8142 ipr_cmd->done = ipr_reset_ioa_job; 8143 add_timer(&ipr_cmd->timer); 8144 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8145 8146 LEAVE; 8147 return IPR_RC_JOB_RETURN; 8148 } 8149 8150 /** 8151 * ipr_reset_wait_for_dump - Wait for a dump to timeout. 8152 * @ipr_cmd: ipr command struct 8153 * 8154 * This function is invoked when an adapter dump has run out 8155 * of processing time. 8156 * 8157 * Return value: 8158 * IPR_RC_JOB_CONTINUE 8159 **/ 8160 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) 8161 { 8162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8163 8164 if (ioa_cfg->sdt_state == GET_DUMP) 8165 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8166 else if (ioa_cfg->sdt_state == READ_DUMP) 8167 ioa_cfg->sdt_state = ABORT_DUMP; 8168 8169 ioa_cfg->dump_timeout = 1; 8170 ipr_cmd->job_step = ipr_reset_alert; 8171 8172 return IPR_RC_JOB_CONTINUE; 8173 } 8174 8175 /** 8176 * ipr_unit_check_no_data - Log a unit check/no data error log 8177 * @ioa_cfg: ioa config struct 8178 * 8179 * Logs an error indicating the adapter unit checked, but for some 8180 * reason, we were unable to fetch the unit check buffer. 8181 * 8182 * Return value: 8183 * nothing 8184 **/ 8185 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) 8186 { 8187 ioa_cfg->errors_logged++; 8188 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); 8189 } 8190 8191 /** 8192 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA 8193 * @ioa_cfg: ioa config struct 8194 * 8195 * Fetches the unit check buffer from the adapter by clocking the data 8196 * through the mailbox register. 8197 * 8198 * Return value: 8199 * nothing 8200 **/ 8201 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) 8202 { 8203 unsigned long mailbox; 8204 struct ipr_hostrcb *hostrcb; 8205 struct ipr_uc_sdt sdt; 8206 int rc, length; 8207 u32 ioasc; 8208 8209 mailbox = readl(ioa_cfg->ioa_mailbox); 8210 8211 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { 8212 ipr_unit_check_no_data(ioa_cfg); 8213 return; 8214 } 8215 8216 memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); 8217 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 8218 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 8219 8220 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || 8221 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && 8222 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { 8223 ipr_unit_check_no_data(ioa_cfg); 8224 return; 8225 } 8226 8227 /* Find length of the first sdt entry (UC buffer) */ 8228 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) 8229 length = be32_to_cpu(sdt.entry[0].end_token); 8230 else 8231 length = (be32_to_cpu(sdt.entry[0].end_token) - 8232 be32_to_cpu(sdt.entry[0].start_token)) & 8233 IPR_FMT2_MBX_ADDR_MASK; 8234 8235 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 8236 struct ipr_hostrcb, queue); 8237 list_del(&hostrcb->queue); 8238 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 8239 8240 rc = ipr_get_ldump_data_section(ioa_cfg, 8241 be32_to_cpu(sdt.entry[0].start_token), 8242 (__be32 *)&hostrcb->hcam, 8243 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 8244 8245 if (!rc) { 8246 ipr_handle_log_data(ioa_cfg, hostrcb); 8247 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 8248 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 8249 ioa_cfg->sdt_state == GET_DUMP) 8250 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8251 } else 8252 ipr_unit_check_no_data(ioa_cfg); 8253 8254 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 8255 } 8256 8257 /** 8258 * ipr_reset_get_unit_check_job - Call to get the unit check buffer. 8259 * @ipr_cmd: ipr command struct 8260 * 8261 * Description: This function will call to get the unit check buffer. 8262 * 8263 * Return value: 8264 * IPR_RC_JOB_RETURN 8265 **/ 8266 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) 8267 { 8268 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8269 8270 ENTER; 8271 ioa_cfg->ioa_unit_checked = 0; 8272 ipr_get_unit_check_buffer(ioa_cfg); 8273 ipr_cmd->job_step = ipr_reset_alert; 8274 ipr_reset_start_timer(ipr_cmd, 0); 8275 8276 LEAVE; 8277 return IPR_RC_JOB_RETURN; 8278 } 8279 8280 /** 8281 * ipr_reset_restore_cfg_space - Restore PCI config space. 8282 * @ipr_cmd: ipr command struct 8283 * 8284 * Description: This function restores the saved PCI config space of 8285 * the adapter, fails all outstanding ops back to the callers, and 8286 * fetches the dump/unit check if applicable to this reset. 8287 * 8288 * Return value: 8289 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8290 **/ 8291 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) 8292 { 8293 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8294 u32 int_reg; 8295 8296 ENTER; 8297 ioa_cfg->pdev->state_saved = true; 8298 pci_restore_state(ioa_cfg->pdev); 8299 8300 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 8301 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8302 return IPR_RC_JOB_CONTINUE; 8303 } 8304 8305 ipr_fail_all_ops(ioa_cfg); 8306 8307 if (ioa_cfg->sis64) { 8308 /* Set the adapter to the correct endian mode. */ 8309 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8310 int_reg = readl(ioa_cfg->regs.endian_swap_reg); 8311 } 8312 8313 if (ioa_cfg->ioa_unit_checked) { 8314 if (ioa_cfg->sis64) { 8315 ipr_cmd->job_step = ipr_reset_get_unit_check_job; 8316 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); 8317 return IPR_RC_JOB_RETURN; 8318 } else { 8319 ioa_cfg->ioa_unit_checked = 0; 8320 ipr_get_unit_check_buffer(ioa_cfg); 8321 ipr_cmd->job_step = ipr_reset_alert; 8322 ipr_reset_start_timer(ipr_cmd, 0); 8323 return IPR_RC_JOB_RETURN; 8324 } 8325 } 8326 8327 if (ioa_cfg->in_ioa_bringdown) { 8328 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8329 } else { 8330 ipr_cmd->job_step = ipr_reset_enable_ioa; 8331 8332 if (GET_DUMP == ioa_cfg->sdt_state) { 8333 ioa_cfg->sdt_state = READ_DUMP; 8334 ioa_cfg->dump_timeout = 0; 8335 if (ioa_cfg->sis64) 8336 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 8337 else 8338 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); 8339 ipr_cmd->job_step = ipr_reset_wait_for_dump; 8340 schedule_work(&ioa_cfg->work_q); 8341 return IPR_RC_JOB_RETURN; 8342 } 8343 } 8344 8345 LEAVE; 8346 return IPR_RC_JOB_CONTINUE; 8347 } 8348 8349 /** 8350 * ipr_reset_bist_done - BIST has completed on the adapter. 8351 * @ipr_cmd: ipr command struct 8352 * 8353 * Description: Unblock config space and resume the reset process. 8354 * 8355 * Return value: 8356 * IPR_RC_JOB_CONTINUE 8357 **/ 8358 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd) 8359 { 8360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8361 8362 ENTER; 8363 if (ioa_cfg->cfg_locked) 8364 pci_cfg_access_unlock(ioa_cfg->pdev); 8365 ioa_cfg->cfg_locked = 0; 8366 ipr_cmd->job_step = ipr_reset_restore_cfg_space; 8367 LEAVE; 8368 return IPR_RC_JOB_CONTINUE; 8369 } 8370 8371 /** 8372 * ipr_reset_start_bist - Run BIST on the adapter. 8373 * @ipr_cmd: ipr command struct 8374 * 8375 * Description: This function runs BIST on the adapter, then delays 2 seconds. 8376 * 8377 * Return value: 8378 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8379 **/ 8380 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) 8381 { 8382 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8383 int rc = PCIBIOS_SUCCESSFUL; 8384 8385 ENTER; 8386 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) 8387 writel(IPR_UPROCI_SIS64_START_BIST, 8388 ioa_cfg->regs.set_uproc_interrupt_reg32); 8389 else 8390 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 8391 8392 if (rc == PCIBIOS_SUCCESSFUL) { 8393 ipr_cmd->job_step = ipr_reset_bist_done; 8394 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8395 rc = IPR_RC_JOB_RETURN; 8396 } else { 8397 if (ioa_cfg->cfg_locked) 8398 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); 8399 ioa_cfg->cfg_locked = 0; 8400 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 8401 rc = IPR_RC_JOB_CONTINUE; 8402 } 8403 8404 LEAVE; 8405 return rc; 8406 } 8407 8408 /** 8409 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter 8410 * @ipr_cmd: ipr command struct 8411 * 8412 * Description: This clears PCI reset to the adapter and delays two seconds. 8413 * 8414 * Return value: 8415 * IPR_RC_JOB_RETURN 8416 **/ 8417 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) 8418 { 8419 ENTER; 8420 ipr_cmd->job_step = ipr_reset_bist_done; 8421 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); 8422 LEAVE; 8423 return IPR_RC_JOB_RETURN; 8424 } 8425 8426 /** 8427 * ipr_reset_reset_work - Pulse a PCIe fundamental reset 8428 * @work: work struct 8429 * 8430 * Description: This pulses warm reset to a slot. 8431 * 8432 **/ 8433 static void ipr_reset_reset_work(struct work_struct *work) 8434 { 8435 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work); 8436 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8437 struct pci_dev *pdev = ioa_cfg->pdev; 8438 unsigned long lock_flags = 0; 8439 8440 ENTER; 8441 pci_set_pcie_reset_state(pdev, pcie_warm_reset); 8442 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT)); 8443 pci_set_pcie_reset_state(pdev, pcie_deassert_reset); 8444 8445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 8446 if (ioa_cfg->reset_cmd == ipr_cmd) 8447 ipr_reset_ioa_job(ipr_cmd); 8448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8449 LEAVE; 8450 } 8451 8452 /** 8453 * ipr_reset_slot_reset - Reset the PCI slot of the adapter. 8454 * @ipr_cmd: ipr command struct 8455 * 8456 * Description: This asserts PCI reset to the adapter. 8457 * 8458 * Return value: 8459 * IPR_RC_JOB_RETURN 8460 **/ 8461 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) 8462 { 8463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8464 8465 ENTER; 8466 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); 8467 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); 8468 ipr_cmd->job_step = ipr_reset_slot_reset_done; 8469 LEAVE; 8470 return IPR_RC_JOB_RETURN; 8471 } 8472 8473 /** 8474 * ipr_reset_block_config_access_wait - Wait for permission to block config access 8475 * @ipr_cmd: ipr command struct 8476 * 8477 * Description: This attempts to block config access to the IOA. 8478 * 8479 * Return value: 8480 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8481 **/ 8482 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd) 8483 { 8484 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8485 int rc = IPR_RC_JOB_CONTINUE; 8486 8487 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { 8488 ioa_cfg->cfg_locked = 1; 8489 ipr_cmd->job_step = ioa_cfg->reset; 8490 } else { 8491 if (ipr_cmd->u.time_left) { 8492 rc = IPR_RC_JOB_RETURN; 8493 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8494 ipr_reset_start_timer(ipr_cmd, 8495 IPR_CHECK_FOR_RESET_TIMEOUT); 8496 } else { 8497 ipr_cmd->job_step = ioa_cfg->reset; 8498 dev_err(&ioa_cfg->pdev->dev, 8499 "Timed out waiting to lock config access. Resetting anyway.\n"); 8500 } 8501 } 8502 8503 return rc; 8504 } 8505 8506 /** 8507 * ipr_reset_block_config_access - Block config access to the IOA 8508 * @ipr_cmd: ipr command struct 8509 * 8510 * Description: This attempts to block config access to the IOA 8511 * 8512 * Return value: 8513 * IPR_RC_JOB_CONTINUE 8514 **/ 8515 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd) 8516 { 8517 ipr_cmd->ioa_cfg->cfg_locked = 0; 8518 ipr_cmd->job_step = ipr_reset_block_config_access_wait; 8519 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8520 return IPR_RC_JOB_CONTINUE; 8521 } 8522 8523 /** 8524 * ipr_reset_allowed - Query whether or not IOA can be reset 8525 * @ioa_cfg: ioa config struct 8526 * 8527 * Return value: 8528 * 0 if reset not allowed / non-zero if reset is allowed 8529 **/ 8530 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) 8531 { 8532 volatile u32 temp_reg; 8533 8534 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8535 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); 8536 } 8537 8538 /** 8539 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. 8540 * @ipr_cmd: ipr command struct 8541 * 8542 * Description: This function waits for adapter permission to run BIST, 8543 * then runs BIST. If the adapter does not give permission after a 8544 * reasonable time, we will reset the adapter anyway. The impact of 8545 * resetting the adapter without warning the adapter is the risk of 8546 * losing the persistent error log on the adapter. If the adapter is 8547 * reset while it is writing to the flash on the adapter, the flash 8548 * segment will have bad ECC and be zeroed. 8549 * 8550 * Return value: 8551 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8552 **/ 8553 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) 8554 { 8555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8556 int rc = IPR_RC_JOB_RETURN; 8557 8558 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { 8559 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; 8560 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8561 } else { 8562 ipr_cmd->job_step = ipr_reset_block_config_access; 8563 rc = IPR_RC_JOB_CONTINUE; 8564 } 8565 8566 return rc; 8567 } 8568 8569 /** 8570 * ipr_reset_alert - Alert the adapter of a pending reset 8571 * @ipr_cmd: ipr command struct 8572 * 8573 * Description: This function alerts the adapter that it will be reset. 8574 * If memory space is not currently enabled, proceed directly 8575 * to running BIST on the adapter. The timer must always be started 8576 * so we guarantee we do not run BIST from ipr_isr. 8577 * 8578 * Return value: 8579 * IPR_RC_JOB_RETURN 8580 **/ 8581 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) 8582 { 8583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8584 u16 cmd_reg; 8585 int rc; 8586 8587 ENTER; 8588 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); 8589 8590 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 8591 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 8592 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); 8593 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 8594 } else { 8595 ipr_cmd->job_step = ipr_reset_block_config_access; 8596 } 8597 8598 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; 8599 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); 8600 8601 LEAVE; 8602 return IPR_RC_JOB_RETURN; 8603 } 8604 8605 /** 8606 * ipr_reset_quiesce_done - Complete IOA disconnect 8607 * @ipr_cmd: ipr command struct 8608 * 8609 * Description: Freeze the adapter to complete quiesce processing 8610 * 8611 * Return value: 8612 * IPR_RC_JOB_CONTINUE 8613 **/ 8614 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd) 8615 { 8616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8617 8618 ENTER; 8619 ipr_cmd->job_step = ipr_ioa_bringdown_done; 8620 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8621 LEAVE; 8622 return IPR_RC_JOB_CONTINUE; 8623 } 8624 8625 /** 8626 * ipr_reset_cancel_hcam_done - Check for outstanding commands 8627 * @ipr_cmd: ipr command struct 8628 * 8629 * Description: Ensure nothing is outstanding to the IOA and 8630 * proceed with IOA disconnect. Otherwise reset the IOA. 8631 * 8632 * Return value: 8633 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE 8634 **/ 8635 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd) 8636 { 8637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8638 struct ipr_cmnd *loop_cmd; 8639 struct ipr_hrr_queue *hrrq; 8640 int rc = IPR_RC_JOB_CONTINUE; 8641 int count = 0; 8642 8643 ENTER; 8644 ipr_cmd->job_step = ipr_reset_quiesce_done; 8645 8646 for_each_hrrq(hrrq, ioa_cfg) { 8647 spin_lock(&hrrq->_lock); 8648 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { 8649 count++; 8650 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8651 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 8652 rc = IPR_RC_JOB_RETURN; 8653 break; 8654 } 8655 spin_unlock(&hrrq->_lock); 8656 8657 if (count) 8658 break; 8659 } 8660 8661 LEAVE; 8662 return rc; 8663 } 8664 8665 /** 8666 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs 8667 * @ipr_cmd: ipr command struct 8668 * 8669 * Description: Cancel any oustanding HCAMs to the IOA. 8670 * 8671 * Return value: 8672 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8673 **/ 8674 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd) 8675 { 8676 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8677 int rc = IPR_RC_JOB_CONTINUE; 8678 struct ipr_cmd_pkt *cmd_pkt; 8679 struct ipr_cmnd *hcam_cmd; 8680 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; 8681 8682 ENTER; 8683 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; 8684 8685 if (!hrrq->ioa_is_dead) { 8686 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { 8687 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { 8688 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) 8689 continue; 8690 8691 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8692 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8693 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 8694 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 8695 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; 8696 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; 8697 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; 8698 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; 8699 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; 8700 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; 8701 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; 8702 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; 8703 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; 8704 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; 8705 8706 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8707 IPR_CANCEL_TIMEOUT); 8708 8709 rc = IPR_RC_JOB_RETURN; 8710 ipr_cmd->job_step = ipr_reset_cancel_hcam; 8711 break; 8712 } 8713 } 8714 } else 8715 ipr_cmd->job_step = ipr_reset_alert; 8716 8717 LEAVE; 8718 return rc; 8719 } 8720 8721 /** 8722 * ipr_reset_ucode_download_done - Microcode download completion 8723 * @ipr_cmd: ipr command struct 8724 * 8725 * Description: This function unmaps the microcode download buffer. 8726 * 8727 * Return value: 8728 * IPR_RC_JOB_CONTINUE 8729 **/ 8730 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) 8731 { 8732 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8733 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8734 8735 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, 8736 sglist->num_sg, DMA_TO_DEVICE); 8737 8738 ipr_cmd->job_step = ipr_reset_alert; 8739 return IPR_RC_JOB_CONTINUE; 8740 } 8741 8742 /** 8743 * ipr_reset_ucode_download - Download microcode to the adapter 8744 * @ipr_cmd: ipr command struct 8745 * 8746 * Description: This function checks to see if it there is microcode 8747 * to download to the adapter. If there is, a download is performed. 8748 * 8749 * Return value: 8750 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8751 **/ 8752 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) 8753 { 8754 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8755 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; 8756 8757 ENTER; 8758 ipr_cmd->job_step = ipr_reset_alert; 8759 8760 if (!sglist) 8761 return IPR_RC_JOB_CONTINUE; 8762 8763 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8764 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 8765 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; 8766 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; 8767 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; 8768 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 8769 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 8770 8771 if (ioa_cfg->sis64) 8772 ipr_build_ucode_ioadl64(ipr_cmd, sglist); 8773 else 8774 ipr_build_ucode_ioadl(ipr_cmd, sglist); 8775 ipr_cmd->job_step = ipr_reset_ucode_download_done; 8776 8777 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 8778 IPR_WRITE_BUFFER_TIMEOUT); 8779 8780 LEAVE; 8781 return IPR_RC_JOB_RETURN; 8782 } 8783 8784 /** 8785 * ipr_reset_shutdown_ioa - Shutdown the adapter 8786 * @ipr_cmd: ipr command struct 8787 * 8788 * Description: This function issues an adapter shutdown of the 8789 * specified type to the specified adapter as part of the 8790 * adapter reset job. 8791 * 8792 * Return value: 8793 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 8794 **/ 8795 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) 8796 { 8797 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8798 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; 8799 unsigned long timeout; 8800 int rc = IPR_RC_JOB_CONTINUE; 8801 8802 ENTER; 8803 if (shutdown_type == IPR_SHUTDOWN_QUIESCE) 8804 ipr_cmd->job_step = ipr_reset_cancel_hcam; 8805 else if (shutdown_type != IPR_SHUTDOWN_NONE && 8806 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 8807 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8808 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8809 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 8810 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; 8811 8812 if (shutdown_type == IPR_SHUTDOWN_NORMAL) 8813 timeout = IPR_SHUTDOWN_TIMEOUT; 8814 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) 8815 timeout = IPR_INTERNAL_TIMEOUT; 8816 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 8817 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; 8818 else 8819 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; 8820 8821 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); 8822 8823 rc = IPR_RC_JOB_RETURN; 8824 ipr_cmd->job_step = ipr_reset_ucode_download; 8825 } else 8826 ipr_cmd->job_step = ipr_reset_alert; 8827 8828 LEAVE; 8829 return rc; 8830 } 8831 8832 /** 8833 * ipr_reset_ioa_job - Adapter reset job 8834 * @ipr_cmd: ipr command struct 8835 * 8836 * Description: This function is the job router for the adapter reset job. 8837 * 8838 * Return value: 8839 * none 8840 **/ 8841 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) 8842 { 8843 u32 rc, ioasc; 8844 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8845 8846 do { 8847 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 8848 8849 if (ioa_cfg->reset_cmd != ipr_cmd) { 8850 /* 8851 * We are doing nested adapter resets and this is 8852 * not the current reset job. 8853 */ 8854 list_add_tail(&ipr_cmd->queue, 8855 &ipr_cmd->hrrq->hrrq_free_q); 8856 return; 8857 } 8858 8859 if (IPR_IOASC_SENSE_KEY(ioasc)) { 8860 rc = ipr_cmd->job_step_failed(ipr_cmd); 8861 if (rc == IPR_RC_JOB_RETURN) 8862 return; 8863 } 8864 8865 ipr_reinit_ipr_cmnd(ipr_cmd); 8866 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; 8867 rc = ipr_cmd->job_step(ipr_cmd); 8868 } while (rc == IPR_RC_JOB_CONTINUE); 8869 } 8870 8871 /** 8872 * _ipr_initiate_ioa_reset - Initiate an adapter reset 8873 * @ioa_cfg: ioa config struct 8874 * @job_step: first job step of reset job 8875 * @shutdown_type: shutdown type 8876 * 8877 * Description: This function will initiate the reset of the given adapter 8878 * starting at the selected job step. 8879 * If the caller needs to wait on the completion of the reset, 8880 * the caller must sleep on the reset_wait_q. 8881 * 8882 * Return value: 8883 * none 8884 **/ 8885 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8886 int (*job_step) (struct ipr_cmnd *), 8887 enum ipr_shutdown_type shutdown_type) 8888 { 8889 struct ipr_cmnd *ipr_cmd; 8890 int i; 8891 8892 ioa_cfg->in_reset_reload = 1; 8893 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8894 spin_lock(&ioa_cfg->hrrq[i]._lock); 8895 ioa_cfg->hrrq[i].allow_cmds = 0; 8896 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8897 } 8898 wmb(); 8899 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) 8900 scsi_block_requests(ioa_cfg->host); 8901 8902 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8903 ioa_cfg->reset_cmd = ipr_cmd; 8904 ipr_cmd->job_step = job_step; 8905 ipr_cmd->u.shutdown_type = shutdown_type; 8906 8907 ipr_reset_ioa_job(ipr_cmd); 8908 } 8909 8910 /** 8911 * ipr_initiate_ioa_reset - Initiate an adapter reset 8912 * @ioa_cfg: ioa config struct 8913 * @shutdown_type: shutdown type 8914 * 8915 * Description: This function will initiate the reset of the given adapter. 8916 * If the caller needs to wait on the completion of the reset, 8917 * the caller must sleep on the reset_wait_q. 8918 * 8919 * Return value: 8920 * none 8921 **/ 8922 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8923 enum ipr_shutdown_type shutdown_type) 8924 { 8925 int i; 8926 8927 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) 8928 return; 8929 8930 if (ioa_cfg->in_reset_reload) { 8931 if (ioa_cfg->sdt_state == GET_DUMP) 8932 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 8933 else if (ioa_cfg->sdt_state == READ_DUMP) 8934 ioa_cfg->sdt_state = ABORT_DUMP; 8935 } 8936 8937 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 8938 dev_err(&ioa_cfg->pdev->dev, 8939 "IOA taken offline - error recovery failed\n"); 8940 8941 ioa_cfg->reset_retries = 0; 8942 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8943 spin_lock(&ioa_cfg->hrrq[i]._lock); 8944 ioa_cfg->hrrq[i].ioa_is_dead = 1; 8945 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8946 } 8947 wmb(); 8948 8949 if (ioa_cfg->in_ioa_bringdown) { 8950 ioa_cfg->reset_cmd = NULL; 8951 ioa_cfg->in_reset_reload = 0; 8952 ipr_fail_all_ops(ioa_cfg); 8953 wake_up_all(&ioa_cfg->reset_wait_q); 8954 8955 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 8956 spin_unlock_irq(ioa_cfg->host->host_lock); 8957 scsi_unblock_requests(ioa_cfg->host); 8958 spin_lock_irq(ioa_cfg->host->host_lock); 8959 } 8960 return; 8961 } else { 8962 ioa_cfg->in_ioa_bringdown = 1; 8963 shutdown_type = IPR_SHUTDOWN_NONE; 8964 } 8965 } 8966 8967 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, 8968 shutdown_type); 8969 } 8970 8971 /** 8972 * ipr_reset_freeze - Hold off all I/O activity 8973 * @ipr_cmd: ipr command struct 8974 * 8975 * Description: If the PCI slot is frozen, hold off all I/O 8976 * activity; then, as soon as the slot is available again, 8977 * initiate an adapter reset. 8978 */ 8979 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 8980 { 8981 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8982 int i; 8983 8984 /* Disallow new interrupts, avoid loop */ 8985 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8986 spin_lock(&ioa_cfg->hrrq[i]._lock); 8987 ioa_cfg->hrrq[i].allow_interrupts = 0; 8988 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8989 } 8990 wmb(); 8991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8992 ipr_cmd->done = ipr_reset_ioa_job; 8993 return IPR_RC_JOB_RETURN; 8994 } 8995 8996 /** 8997 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled 8998 * @pdev: PCI device struct 8999 * 9000 * Description: This routine is called to tell us that the MMIO 9001 * access to the IOA has been restored 9002 */ 9003 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) 9004 { 9005 unsigned long flags = 0; 9006 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9007 9008 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9009 if (!ioa_cfg->probe_done) 9010 pci_save_state(pdev); 9011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9012 return PCI_ERS_RESULT_NEED_RESET; 9013 } 9014 9015 /** 9016 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 9017 * @pdev: PCI device struct 9018 * 9019 * Description: This routine is called to tell us that the PCI bus 9020 * is down. Can't do anything here, except put the device driver 9021 * into a holding pattern, waiting for the PCI bus to come back. 9022 */ 9023 static void ipr_pci_frozen(struct pci_dev *pdev) 9024 { 9025 unsigned long flags = 0; 9026 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9027 9028 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9029 if (ioa_cfg->probe_done) 9030 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 9031 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9032 } 9033 9034 /** 9035 * ipr_pci_slot_reset - Called when PCI slot has been reset. 9036 * @pdev: PCI device struct 9037 * 9038 * Description: This routine is called by the pci error recovery 9039 * code after the PCI slot has been reset, just before we 9040 * should resume normal operations. 9041 */ 9042 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) 9043 { 9044 unsigned long flags = 0; 9045 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9046 9047 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9048 if (ioa_cfg->probe_done) { 9049 if (ioa_cfg->needs_warm_reset) 9050 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9051 else 9052 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 9053 IPR_SHUTDOWN_NONE); 9054 } else 9055 wake_up_all(&ioa_cfg->eeh_wait_q); 9056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9057 return PCI_ERS_RESULT_RECOVERED; 9058 } 9059 9060 /** 9061 * ipr_pci_perm_failure - Called when PCI slot is dead for good. 9062 * @pdev: PCI device struct 9063 * 9064 * Description: This routine is called when the PCI bus has 9065 * permanently failed. 9066 */ 9067 static void ipr_pci_perm_failure(struct pci_dev *pdev) 9068 { 9069 unsigned long flags = 0; 9070 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9071 int i; 9072 9073 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9074 if (ioa_cfg->probe_done) { 9075 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 9076 ioa_cfg->sdt_state = ABORT_DUMP; 9077 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 9078 ioa_cfg->in_ioa_bringdown = 1; 9079 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9080 spin_lock(&ioa_cfg->hrrq[i]._lock); 9081 ioa_cfg->hrrq[i].allow_cmds = 0; 9082 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9083 } 9084 wmb(); 9085 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9086 } else 9087 wake_up_all(&ioa_cfg->eeh_wait_q); 9088 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9089 } 9090 9091 /** 9092 * ipr_pci_error_detected - Called when a PCI error is detected. 9093 * @pdev: PCI device struct 9094 * @state: PCI channel state 9095 * 9096 * Description: Called when a PCI error is detected. 9097 * 9098 * Return value: 9099 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 9100 */ 9101 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, 9102 pci_channel_state_t state) 9103 { 9104 switch (state) { 9105 case pci_channel_io_frozen: 9106 ipr_pci_frozen(pdev); 9107 return PCI_ERS_RESULT_CAN_RECOVER; 9108 case pci_channel_io_perm_failure: 9109 ipr_pci_perm_failure(pdev); 9110 return PCI_ERS_RESULT_DISCONNECT; 9111 break; 9112 default: 9113 break; 9114 } 9115 return PCI_ERS_RESULT_NEED_RESET; 9116 } 9117 9118 /** 9119 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) 9120 * @ioa_cfg: ioa cfg struct 9121 * 9122 * Description: This is the second phase of adapter intialization 9123 * This function takes care of initilizing the adapter to the point 9124 * where it can accept new commands. 9125 9126 * Return value: 9127 * 0 on success / -EIO on failure 9128 **/ 9129 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) 9130 { 9131 int rc = 0; 9132 unsigned long host_lock_flags = 0; 9133 9134 ENTER; 9135 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9136 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 9137 ioa_cfg->probe_done = 1; 9138 if (ioa_cfg->needs_hard_reset) { 9139 ioa_cfg->needs_hard_reset = 0; 9140 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 9141 } else 9142 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 9143 IPR_SHUTDOWN_NONE); 9144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9145 9146 LEAVE; 9147 return rc; 9148 } 9149 9150 /** 9151 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter 9152 * @ioa_cfg: ioa config struct 9153 * 9154 * Return value: 9155 * none 9156 **/ 9157 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9158 { 9159 int i; 9160 9161 if (ioa_cfg->ipr_cmnd_list) { 9162 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9163 if (ioa_cfg->ipr_cmnd_list[i]) 9164 dma_pool_free(ioa_cfg->ipr_cmd_pool, 9165 ioa_cfg->ipr_cmnd_list[i], 9166 ioa_cfg->ipr_cmnd_list_dma[i]); 9167 9168 ioa_cfg->ipr_cmnd_list[i] = NULL; 9169 } 9170 } 9171 9172 if (ioa_cfg->ipr_cmd_pool) 9173 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); 9174 9175 kfree(ioa_cfg->ipr_cmnd_list); 9176 kfree(ioa_cfg->ipr_cmnd_list_dma); 9177 ioa_cfg->ipr_cmnd_list = NULL; 9178 ioa_cfg->ipr_cmnd_list_dma = NULL; 9179 ioa_cfg->ipr_cmd_pool = NULL; 9180 } 9181 9182 /** 9183 * ipr_free_mem - Frees memory allocated for an adapter 9184 * @ioa_cfg: ioa cfg struct 9185 * 9186 * Return value: 9187 * nothing 9188 **/ 9189 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) 9190 { 9191 int i; 9192 9193 kfree(ioa_cfg->res_entries); 9194 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), 9195 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9196 ipr_free_cmd_blks(ioa_cfg); 9197 9198 for (i = 0; i < ioa_cfg->hrrq_num; i++) 9199 dma_free_coherent(&ioa_cfg->pdev->dev, 9200 sizeof(u32) * ioa_cfg->hrrq[i].size, 9201 ioa_cfg->hrrq[i].host_rrq, 9202 ioa_cfg->hrrq[i].host_rrq_dma); 9203 9204 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, 9205 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9206 9207 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9208 dma_free_coherent(&ioa_cfg->pdev->dev, 9209 sizeof(struct ipr_hostrcb), 9210 ioa_cfg->hostrcb[i], 9211 ioa_cfg->hostrcb_dma[i]); 9212 } 9213 9214 ipr_free_dump(ioa_cfg); 9215 kfree(ioa_cfg->trace); 9216 } 9217 9218 /** 9219 * ipr_free_irqs - Free all allocated IRQs for the adapter. 9220 * @ioa_cfg: ipr cfg struct 9221 * 9222 * This function frees all allocated IRQs for the 9223 * specified adapter. 9224 * 9225 * Return value: 9226 * none 9227 **/ 9228 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) 9229 { 9230 struct pci_dev *pdev = ioa_cfg->pdev; 9231 9232 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9233 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9234 int i; 9235 for (i = 0; i < ioa_cfg->nvectors; i++) 9236 free_irq(ioa_cfg->vectors_info[i].vec, 9237 &ioa_cfg->hrrq[i]); 9238 } else 9239 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); 9240 9241 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9242 pci_disable_msi(pdev); 9243 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9244 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { 9245 pci_disable_msix(pdev); 9246 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; 9247 } 9248 } 9249 9250 /** 9251 * ipr_free_all_resources - Free all allocated resources for an adapter. 9252 * @ipr_cmd: ipr command struct 9253 * 9254 * This function frees all allocated resources for the 9255 * specified adapter. 9256 * 9257 * Return value: 9258 * none 9259 **/ 9260 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) 9261 { 9262 struct pci_dev *pdev = ioa_cfg->pdev; 9263 9264 ENTER; 9265 ipr_free_irqs(ioa_cfg); 9266 if (ioa_cfg->reset_work_q) 9267 destroy_workqueue(ioa_cfg->reset_work_q); 9268 iounmap(ioa_cfg->hdw_dma_regs); 9269 pci_release_regions(pdev); 9270 ipr_free_mem(ioa_cfg); 9271 scsi_host_put(ioa_cfg->host); 9272 pci_disable_device(pdev); 9273 LEAVE; 9274 } 9275 9276 /** 9277 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter 9278 * @ioa_cfg: ioa config struct 9279 * 9280 * Return value: 9281 * 0 on success / -ENOMEM on allocation failure 9282 **/ 9283 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) 9284 { 9285 struct ipr_cmnd *ipr_cmd; 9286 struct ipr_ioarcb *ioarcb; 9287 dma_addr_t dma_addr; 9288 int i, entries_each_hrrq, hrrq_id = 0; 9289 9290 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, 9291 sizeof(struct ipr_cmnd), 512, 0); 9292 9293 if (!ioa_cfg->ipr_cmd_pool) 9294 return -ENOMEM; 9295 9296 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); 9297 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); 9298 9299 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { 9300 ipr_free_cmd_blks(ioa_cfg); 9301 return -ENOMEM; 9302 } 9303 9304 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9305 if (ioa_cfg->hrrq_num > 1) { 9306 if (i == 0) { 9307 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; 9308 ioa_cfg->hrrq[i].min_cmd_id = 0; 9309 ioa_cfg->hrrq[i].max_cmd_id = 9310 (entries_each_hrrq - 1); 9311 } else { 9312 entries_each_hrrq = 9313 IPR_NUM_BASE_CMD_BLKS/ 9314 (ioa_cfg->hrrq_num - 1); 9315 ioa_cfg->hrrq[i].min_cmd_id = 9316 IPR_NUM_INTERNAL_CMD_BLKS + 9317 (i - 1) * entries_each_hrrq; 9318 ioa_cfg->hrrq[i].max_cmd_id = 9319 (IPR_NUM_INTERNAL_CMD_BLKS + 9320 i * entries_each_hrrq - 1); 9321 } 9322 } else { 9323 entries_each_hrrq = IPR_NUM_CMD_BLKS; 9324 ioa_cfg->hrrq[i].min_cmd_id = 0; 9325 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); 9326 } 9327 ioa_cfg->hrrq[i].size = entries_each_hrrq; 9328 } 9329 9330 BUG_ON(ioa_cfg->hrrq_num == 0); 9331 9332 i = IPR_NUM_CMD_BLKS - 9333 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; 9334 if (i > 0) { 9335 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; 9336 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; 9337 } 9338 9339 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 9340 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 9341 9342 if (!ipr_cmd) { 9343 ipr_free_cmd_blks(ioa_cfg); 9344 return -ENOMEM; 9345 } 9346 9347 memset(ipr_cmd, 0, sizeof(*ipr_cmd)); 9348 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; 9349 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 9350 9351 ioarcb = &ipr_cmd->ioarcb; 9352 ipr_cmd->dma_addr = dma_addr; 9353 if (ioa_cfg->sis64) 9354 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); 9355 else 9356 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 9357 9358 ioarcb->host_response_handle = cpu_to_be32(i << 2); 9359 if (ioa_cfg->sis64) { 9360 ioarcb->u.sis64_addr_data.data_ioadl_addr = 9361 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 9362 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 9363 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); 9364 } else { 9365 ioarcb->write_ioadl_addr = 9366 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 9367 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 9368 ioarcb->ioasa_host_pci_addr = 9369 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); 9370 } 9371 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 9372 ipr_cmd->cmd_index = i; 9373 ipr_cmd->ioa_cfg = ioa_cfg; 9374 ipr_cmd->sense_buffer_dma = dma_addr + 9375 offsetof(struct ipr_cmnd, sense_buffer); 9376 9377 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; 9378 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; 9379 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 9380 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) 9381 hrrq_id++; 9382 } 9383 9384 return 0; 9385 } 9386 9387 /** 9388 * ipr_alloc_mem - Allocate memory for an adapter 9389 * @ioa_cfg: ioa config struct 9390 * 9391 * Return value: 9392 * 0 on success / non-zero for error 9393 **/ 9394 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) 9395 { 9396 struct pci_dev *pdev = ioa_cfg->pdev; 9397 int i, rc = -ENOMEM; 9398 9399 ENTER; 9400 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 9401 ioa_cfg->max_devs_supported, GFP_KERNEL); 9402 9403 if (!ioa_cfg->res_entries) 9404 goto out; 9405 9406 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { 9407 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 9408 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; 9409 } 9410 9411 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, 9412 sizeof(struct ipr_misc_cbs), 9413 &ioa_cfg->vpd_cbs_dma, 9414 GFP_KERNEL); 9415 9416 if (!ioa_cfg->vpd_cbs) 9417 goto out_free_res_entries; 9418 9419 if (ipr_alloc_cmd_blks(ioa_cfg)) 9420 goto out_free_vpd_cbs; 9421 9422 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9423 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, 9424 sizeof(u32) * ioa_cfg->hrrq[i].size, 9425 &ioa_cfg->hrrq[i].host_rrq_dma, 9426 GFP_KERNEL); 9427 9428 if (!ioa_cfg->hrrq[i].host_rrq) { 9429 while (--i > 0) 9430 dma_free_coherent(&pdev->dev, 9431 sizeof(u32) * ioa_cfg->hrrq[i].size, 9432 ioa_cfg->hrrq[i].host_rrq, 9433 ioa_cfg->hrrq[i].host_rrq_dma); 9434 goto out_ipr_free_cmd_blocks; 9435 } 9436 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; 9437 } 9438 9439 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, 9440 ioa_cfg->cfg_table_size, 9441 &ioa_cfg->cfg_table_dma, 9442 GFP_KERNEL); 9443 9444 if (!ioa_cfg->u.cfg_table) 9445 goto out_free_host_rrq; 9446 9447 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9448 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, 9449 sizeof(struct ipr_hostrcb), 9450 &ioa_cfg->hostrcb_dma[i], 9451 GFP_KERNEL); 9452 9453 if (!ioa_cfg->hostrcb[i]) 9454 goto out_free_hostrcb_dma; 9455 9456 ioa_cfg->hostrcb[i]->hostrcb_dma = 9457 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 9458 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; 9459 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 9460 } 9461 9462 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * 9463 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); 9464 9465 if (!ioa_cfg->trace) 9466 goto out_free_hostrcb_dma; 9467 9468 rc = 0; 9469 out: 9470 LEAVE; 9471 return rc; 9472 9473 out_free_hostrcb_dma: 9474 while (i-- > 0) { 9475 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), 9476 ioa_cfg->hostrcb[i], 9477 ioa_cfg->hostrcb_dma[i]); 9478 } 9479 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, 9480 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9481 out_free_host_rrq: 9482 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 9483 dma_free_coherent(&pdev->dev, 9484 sizeof(u32) * ioa_cfg->hrrq[i].size, 9485 ioa_cfg->hrrq[i].host_rrq, 9486 ioa_cfg->hrrq[i].host_rrq_dma); 9487 } 9488 out_ipr_free_cmd_blocks: 9489 ipr_free_cmd_blks(ioa_cfg); 9490 out_free_vpd_cbs: 9491 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), 9492 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 9493 out_free_res_entries: 9494 kfree(ioa_cfg->res_entries); 9495 goto out; 9496 } 9497 9498 /** 9499 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values 9500 * @ioa_cfg: ioa config struct 9501 * 9502 * Return value: 9503 * none 9504 **/ 9505 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) 9506 { 9507 int i; 9508 9509 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { 9510 ioa_cfg->bus_attr[i].bus = i; 9511 ioa_cfg->bus_attr[i].qas_enabled = 0; 9512 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; 9513 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) 9514 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; 9515 else 9516 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; 9517 } 9518 } 9519 9520 /** 9521 * ipr_init_regs - Initialize IOA registers 9522 * @ioa_cfg: ioa config struct 9523 * 9524 * Return value: 9525 * none 9526 **/ 9527 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) 9528 { 9529 const struct ipr_interrupt_offsets *p; 9530 struct ipr_interrupts *t; 9531 void __iomem *base; 9532 9533 p = &ioa_cfg->chip_cfg->regs; 9534 t = &ioa_cfg->regs; 9535 base = ioa_cfg->hdw_dma_regs; 9536 9537 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9538 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9539 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9540 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9541 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; 9542 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 9543 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; 9544 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 9545 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; 9546 t->ioarrin_reg = base + p->ioarrin_reg; 9547 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 9548 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; 9549 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 9550 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; 9551 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 9552 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; 9553 9554 if (ioa_cfg->sis64) { 9555 t->init_feedback_reg = base + p->init_feedback_reg; 9556 t->dump_addr_reg = base + p->dump_addr_reg; 9557 t->dump_data_reg = base + p->dump_data_reg; 9558 t->endian_swap_reg = base + p->endian_swap_reg; 9559 } 9560 } 9561 9562 /** 9563 * ipr_init_ioa_cfg - Initialize IOA config struct 9564 * @ioa_cfg: ioa config struct 9565 * @host: scsi host struct 9566 * @pdev: PCI dev struct 9567 * 9568 * Return value: 9569 * none 9570 **/ 9571 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9572 struct Scsi_Host *host, struct pci_dev *pdev) 9573 { 9574 int i; 9575 9576 ioa_cfg->host = host; 9577 ioa_cfg->pdev = pdev; 9578 ioa_cfg->log_level = ipr_log_level; 9579 ioa_cfg->doorbell = IPR_DOORBELL; 9580 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 9581 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 9582 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 9583 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 9584 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 9585 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 9586 9587 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9588 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9589 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9590 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9591 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9592 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9593 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9594 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9595 ioa_cfg->sdt_state = INACTIVE; 9596 9597 ipr_initialize_bus_attr(ioa_cfg); 9598 ioa_cfg->max_devs_supported = ipr_max_devs; 9599 9600 if (ioa_cfg->sis64) { 9601 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; 9602 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9603 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9604 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9605 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 9606 + ((sizeof(struct ipr_config_table_entry64) 9607 * ioa_cfg->max_devs_supported))); 9608 } else { 9609 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9610 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9611 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9612 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9613 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) 9614 + ((sizeof(struct ipr_config_table_entry) 9615 * ioa_cfg->max_devs_supported))); 9616 } 9617 9618 host->max_channel = IPR_VSET_BUS; 9619 host->unique_id = host->host_no; 9620 host->max_cmd_len = IPR_MAX_CDB_LEN; 9621 host->can_queue = ioa_cfg->max_cmds; 9622 pci_set_drvdata(pdev, ioa_cfg); 9623 9624 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { 9625 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); 9626 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); 9627 spin_lock_init(&ioa_cfg->hrrq[i]._lock); 9628 if (i == 0) 9629 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; 9630 else 9631 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; 9632 } 9633 } 9634 9635 /** 9636 * ipr_get_chip_info - Find adapter chip information 9637 * @dev_id: PCI device id struct 9638 * 9639 * Return value: 9640 * ptr to chip information on success / NULL on failure 9641 **/ 9642 static const struct ipr_chip_t * 9643 ipr_get_chip_info(const struct pci_device_id *dev_id) 9644 { 9645 int i; 9646 9647 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) 9648 if (ipr_chip[i].vendor == dev_id->vendor && 9649 ipr_chip[i].device == dev_id->device) 9650 return &ipr_chip[i]; 9651 return NULL; 9652 } 9653 9654 /** 9655 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete 9656 * during probe time 9657 * @ioa_cfg: ioa config struct 9658 * 9659 * Return value: 9660 * None 9661 **/ 9662 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) 9663 { 9664 struct pci_dev *pdev = ioa_cfg->pdev; 9665 9666 if (pci_channel_offline(pdev)) { 9667 wait_event_timeout(ioa_cfg->eeh_wait_q, 9668 !pci_channel_offline(pdev), 9669 IPR_PCI_ERROR_RECOVERY_TIMEOUT); 9670 pci_restore_state(pdev); 9671 } 9672 } 9673 9674 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) 9675 { 9676 struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; 9677 int i, vectors; 9678 9679 for (i = 0; i < ARRAY_SIZE(entries); ++i) 9680 entries[i].entry = i; 9681 9682 vectors = pci_enable_msix_range(ioa_cfg->pdev, 9683 entries, 1, ipr_number_of_msix); 9684 if (vectors < 0) { 9685 ipr_wait_for_pci_err_recovery(ioa_cfg); 9686 return vectors; 9687 } 9688 9689 for (i = 0; i < vectors; i++) 9690 ioa_cfg->vectors_info[i].vec = entries[i].vector; 9691 ioa_cfg->nvectors = vectors; 9692 9693 return 0; 9694 } 9695 9696 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) 9697 { 9698 int i, vectors; 9699 9700 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); 9701 if (vectors < 0) { 9702 ipr_wait_for_pci_err_recovery(ioa_cfg); 9703 return vectors; 9704 } 9705 9706 for (i = 0; i < vectors; i++) 9707 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; 9708 ioa_cfg->nvectors = vectors; 9709 9710 return 0; 9711 } 9712 9713 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 9714 { 9715 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; 9716 9717 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { 9718 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, 9719 "host%d-%d", ioa_cfg->host->host_no, vec_idx); 9720 ioa_cfg->vectors_info[vec_idx]. 9721 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; 9722 } 9723 } 9724 9725 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) 9726 { 9727 int i, rc; 9728 9729 for (i = 1; i < ioa_cfg->nvectors; i++) { 9730 rc = request_irq(ioa_cfg->vectors_info[i].vec, 9731 ipr_isr_mhrrq, 9732 0, 9733 ioa_cfg->vectors_info[i].desc, 9734 &ioa_cfg->hrrq[i]); 9735 if (rc) { 9736 while (--i >= 0) 9737 free_irq(ioa_cfg->vectors_info[i].vec, 9738 &ioa_cfg->hrrq[i]); 9739 return rc; 9740 } 9741 } 9742 return 0; 9743 } 9744 9745 /** 9746 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 9747 * @pdev: PCI device struct 9748 * 9749 * Description: Simply set the msi_received flag to 1 indicating that 9750 * Message Signaled Interrupts are supported. 9751 * 9752 * Return value: 9753 * 0 on success / non-zero on failure 9754 **/ 9755 static irqreturn_t ipr_test_intr(int irq, void *devp) 9756 { 9757 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 9758 unsigned long lock_flags = 0; 9759 irqreturn_t rc = IRQ_HANDLED; 9760 9761 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); 9762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9763 9764 ioa_cfg->msi_received = 1; 9765 wake_up(&ioa_cfg->msi_wait_q); 9766 9767 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9768 return rc; 9769 } 9770 9771 /** 9772 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 9773 * @pdev: PCI device struct 9774 * 9775 * Description: The return value from pci_enable_msi_range() can not always be 9776 * trusted. This routine sets up and initiates a test interrupt to determine 9777 * if the interrupt is received via the ipr_test_intr() service routine. 9778 * If the tests fails, the driver will fall back to LSI. 9779 * 9780 * Return value: 9781 * 0 on success / non-zero on failure 9782 **/ 9783 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) 9784 { 9785 int rc; 9786 volatile u32 int_reg; 9787 unsigned long lock_flags = 0; 9788 9789 ENTER; 9790 9791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9792 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9793 ioa_cfg->msi_received = 0; 9794 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9795 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); 9796 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 9797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9798 9799 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9800 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9801 else 9802 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); 9803 if (rc) { 9804 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); 9805 return rc; 9806 } else if (ipr_debug) 9807 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 9808 9809 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 9810 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 9811 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 9812 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9813 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9814 9815 if (!ioa_cfg->msi_received) { 9816 /* MSI test failed */ 9817 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 9818 rc = -EOPNOTSUPP; 9819 } else if (ipr_debug) 9820 dev_info(&pdev->dev, "MSI test succeeded.\n"); 9821 9822 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9823 9824 if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9825 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); 9826 else 9827 free_irq(pdev->irq, ioa_cfg); 9828 9829 LEAVE; 9830 9831 return rc; 9832 } 9833 9834 /* ipr_probe_ioa - Allocates memory and does first stage of initialization 9835 * @pdev: PCI device struct 9836 * @dev_id: PCI device id struct 9837 * 9838 * Return value: 9839 * 0 on success / non-zero on failure 9840 **/ 9841 static int ipr_probe_ioa(struct pci_dev *pdev, 9842 const struct pci_device_id *dev_id) 9843 { 9844 struct ipr_ioa_cfg *ioa_cfg; 9845 struct Scsi_Host *host; 9846 unsigned long ipr_regs_pci; 9847 void __iomem *ipr_regs; 9848 int rc = PCIBIOS_SUCCESSFUL; 9849 volatile u32 mask, uproc, interrupts; 9850 unsigned long lock_flags, driver_lock_flags; 9851 9852 ENTER; 9853 9854 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 9855 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 9856 9857 if (!host) { 9858 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 9859 rc = -ENOMEM; 9860 goto out; 9861 } 9862 9863 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 9864 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 9865 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); 9866 9867 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 9868 9869 if (!ioa_cfg->ipr_chip) { 9870 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", 9871 dev_id->vendor, dev_id->device); 9872 goto out_scsi_host_put; 9873 } 9874 9875 /* set SIS 32 or SIS 64 */ 9876 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 9877 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 9878 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; 9879 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; 9880 9881 if (ipr_transop_timeout) 9882 ioa_cfg->transop_timeout = ipr_transop_timeout; 9883 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) 9884 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; 9885 else 9886 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; 9887 9888 ioa_cfg->revid = pdev->revision; 9889 9890 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 9891 9892 ipr_regs_pci = pci_resource_start(pdev, 0); 9893 9894 rc = pci_request_regions(pdev, IPR_NAME); 9895 if (rc < 0) { 9896 dev_err(&pdev->dev, 9897 "Couldn't register memory range of registers\n"); 9898 goto out_scsi_host_put; 9899 } 9900 9901 rc = pci_enable_device(pdev); 9902 9903 if (rc || pci_channel_offline(pdev)) { 9904 if (pci_channel_offline(pdev)) { 9905 ipr_wait_for_pci_err_recovery(ioa_cfg); 9906 rc = pci_enable_device(pdev); 9907 } 9908 9909 if (rc) { 9910 dev_err(&pdev->dev, "Cannot enable adapter\n"); 9911 ipr_wait_for_pci_err_recovery(ioa_cfg); 9912 goto out_release_regions; 9913 } 9914 } 9915 9916 ipr_regs = pci_ioremap_bar(pdev, 0); 9917 9918 if (!ipr_regs) { 9919 dev_err(&pdev->dev, 9920 "Couldn't map memory range of registers\n"); 9921 rc = -ENOMEM; 9922 goto out_disable; 9923 } 9924 9925 ioa_cfg->hdw_dma_regs = ipr_regs; 9926 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 9927 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 9928 9929 ipr_init_regs(ioa_cfg); 9930 9931 if (ioa_cfg->sis64) { 9932 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9933 if (rc < 0) { 9934 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); 9935 rc = dma_set_mask_and_coherent(&pdev->dev, 9936 DMA_BIT_MASK(32)); 9937 } 9938 } else 9939 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9940 9941 if (rc < 0) { 9942 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 9943 goto cleanup_nomem; 9944 } 9945 9946 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 9947 ioa_cfg->chip_cfg->cache_line_size); 9948 9949 if (rc != PCIBIOS_SUCCESSFUL) { 9950 dev_err(&pdev->dev, "Write of cache line size failed\n"); 9951 ipr_wait_for_pci_err_recovery(ioa_cfg); 9952 rc = -EIO; 9953 goto cleanup_nomem; 9954 } 9955 9956 /* Issue MMIO read to ensure card is not in EEH */ 9957 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 9958 ipr_wait_for_pci_err_recovery(ioa_cfg); 9959 9960 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 9961 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 9962 IPR_MAX_MSIX_VECTORS); 9963 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS; 9964 } 9965 9966 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9967 ipr_enable_msix(ioa_cfg) == 0) 9968 ioa_cfg->intr_flag = IPR_USE_MSIX; 9969 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9970 ipr_enable_msi(ioa_cfg) == 0) 9971 ioa_cfg->intr_flag = IPR_USE_MSI; 9972 else { 9973 ioa_cfg->intr_flag = IPR_USE_LSI; 9974 ioa_cfg->nvectors = 1; 9975 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9976 } 9977 9978 pci_set_master(pdev); 9979 9980 if (pci_channel_offline(pdev)) { 9981 ipr_wait_for_pci_err_recovery(ioa_cfg); 9982 pci_set_master(pdev); 9983 if (pci_channel_offline(pdev)) { 9984 rc = -EIO; 9985 goto out_msi_disable; 9986 } 9987 } 9988 9989 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9990 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9991 rc = ipr_test_msi(ioa_cfg, pdev); 9992 if (rc == -EOPNOTSUPP) { 9993 ipr_wait_for_pci_err_recovery(ioa_cfg); 9994 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9995 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9996 pci_disable_msi(pdev); 9997 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { 9998 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; 9999 pci_disable_msix(pdev); 10000 } 10001 10002 ioa_cfg->intr_flag = IPR_USE_LSI; 10003 ioa_cfg->nvectors = 1; 10004 } 10005 else if (rc) 10006 goto out_msi_disable; 10007 else { 10008 if (ioa_cfg->intr_flag == IPR_USE_MSI) 10009 dev_info(&pdev->dev, 10010 "Request for %d MSIs succeeded with starting IRQ: %d\n", 10011 ioa_cfg->nvectors, pdev->irq); 10012 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 10013 dev_info(&pdev->dev, 10014 "Request for %d MSIXs succeeded.", 10015 ioa_cfg->nvectors); 10016 } 10017 } 10018 10019 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, 10020 (unsigned int)num_online_cpus(), 10021 (unsigned int)IPR_MAX_HRRQ_NUM); 10022 10023 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 10024 goto out_msi_disable; 10025 10026 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 10027 goto out_msi_disable; 10028 10029 rc = ipr_alloc_mem(ioa_cfg); 10030 if (rc < 0) { 10031 dev_err(&pdev->dev, 10032 "Couldn't allocate enough memory for device driver!\n"); 10033 goto out_msi_disable; 10034 } 10035 10036 /* Save away PCI config space for use following IOA reset */ 10037 rc = pci_save_state(pdev); 10038 10039 if (rc != PCIBIOS_SUCCESSFUL) { 10040 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 10041 rc = -EIO; 10042 goto cleanup_nolog; 10043 } 10044 10045 /* 10046 * If HRRQ updated interrupt is not masked, or reset alert is set, 10047 * the card is in an unknown state and needs a hard reset 10048 */ 10049 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 10050 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); 10051 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 10052 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 10053 ioa_cfg->needs_hard_reset = 1; 10054 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) 10055 ioa_cfg->needs_hard_reset = 1; 10056 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 10057 ioa_cfg->ioa_unit_checked = 1; 10058 10059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10060 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 10061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10062 10063 if (ioa_cfg->intr_flag == IPR_USE_MSI 10064 || ioa_cfg->intr_flag == IPR_USE_MSIX) { 10065 name_msi_vectors(ioa_cfg); 10066 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, 10067 0, 10068 ioa_cfg->vectors_info[0].desc, 10069 &ioa_cfg->hrrq[0]); 10070 if (!rc) 10071 rc = ipr_request_other_msi_irqs(ioa_cfg); 10072 } else { 10073 rc = request_irq(pdev->irq, ipr_isr, 10074 IRQF_SHARED, 10075 IPR_NAME, &ioa_cfg->hrrq[0]); 10076 } 10077 if (rc) { 10078 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 10079 pdev->irq, rc); 10080 goto cleanup_nolog; 10081 } 10082 10083 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || 10084 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { 10085 ioa_cfg->needs_warm_reset = 1; 10086 ioa_cfg->reset = ipr_reset_slot_reset; 10087 10088 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", 10089 WQ_MEM_RECLAIM, host->host_no); 10090 10091 if (!ioa_cfg->reset_work_q) { 10092 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); 10093 goto out_free_irq; 10094 } 10095 } else 10096 ioa_cfg->reset = ipr_reset_start_bist; 10097 10098 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10099 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); 10100 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10101 10102 LEAVE; 10103 out: 10104 return rc; 10105 10106 out_free_irq: 10107 ipr_free_irqs(ioa_cfg); 10108 cleanup_nolog: 10109 ipr_free_mem(ioa_cfg); 10110 out_msi_disable: 10111 ipr_wait_for_pci_err_recovery(ioa_cfg); 10112 if (ioa_cfg->intr_flag == IPR_USE_MSI) 10113 pci_disable_msi(pdev); 10114 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 10115 pci_disable_msix(pdev); 10116 cleanup_nomem: 10117 iounmap(ipr_regs); 10118 out_disable: 10119 pci_disable_device(pdev); 10120 out_release_regions: 10121 pci_release_regions(pdev); 10122 out_scsi_host_put: 10123 scsi_host_put(host); 10124 goto out; 10125 } 10126 10127 /** 10128 * ipr_initiate_ioa_bringdown - Bring down an adapter 10129 * @ioa_cfg: ioa config struct 10130 * @shutdown_type: shutdown type 10131 * 10132 * Description: This function will initiate bringing down the adapter. 10133 * This consists of issuing an IOA shutdown to the adapter 10134 * to flush the cache, and running BIST. 10135 * If the caller needs to wait on the completion of the reset, 10136 * the caller must sleep on the reset_wait_q. 10137 * 10138 * Return value: 10139 * none 10140 **/ 10141 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, 10142 enum ipr_shutdown_type shutdown_type) 10143 { 10144 ENTER; 10145 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 10146 ioa_cfg->sdt_state = ABORT_DUMP; 10147 ioa_cfg->reset_retries = 0; 10148 ioa_cfg->in_ioa_bringdown = 1; 10149 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); 10150 LEAVE; 10151 } 10152 10153 /** 10154 * __ipr_remove - Remove a single adapter 10155 * @pdev: pci device struct 10156 * 10157 * Adapter hot plug remove entry point. 10158 * 10159 * Return value: 10160 * none 10161 **/ 10162 static void __ipr_remove(struct pci_dev *pdev) 10163 { 10164 unsigned long host_lock_flags = 0; 10165 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10166 int i; 10167 unsigned long driver_lock_flags; 10168 ENTER; 10169 10170 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10171 while (ioa_cfg->in_reset_reload) { 10172 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10173 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10174 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10175 } 10176 10177 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 10178 spin_lock(&ioa_cfg->hrrq[i]._lock); 10179 ioa_cfg->hrrq[i].removing_ioa = 1; 10180 spin_unlock(&ioa_cfg->hrrq[i]._lock); 10181 } 10182 wmb(); 10183 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); 10184 10185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10186 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10187 flush_work(&ioa_cfg->work_q); 10188 if (ioa_cfg->reset_work_q) 10189 flush_workqueue(ioa_cfg->reset_work_q); 10190 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 10191 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 10192 10193 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10194 list_del(&ioa_cfg->queue); 10195 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10196 10197 if (ioa_cfg->sdt_state == ABORT_DUMP) 10198 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 10199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 10200 10201 ipr_free_all_resources(ioa_cfg); 10202 10203 LEAVE; 10204 } 10205 10206 /** 10207 * ipr_remove - IOA hot plug remove entry point 10208 * @pdev: pci device struct 10209 * 10210 * Adapter hot plug remove entry point. 10211 * 10212 * Return value: 10213 * none 10214 **/ 10215 static void ipr_remove(struct pci_dev *pdev) 10216 { 10217 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10218 10219 ENTER; 10220 10221 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10222 &ipr_trace_attr); 10223 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10224 &ipr_dump_attr); 10225 scsi_remove_host(ioa_cfg->host); 10226 10227 __ipr_remove(pdev); 10228 10229 LEAVE; 10230 } 10231 10232 /** 10233 * ipr_probe - Adapter hot plug add entry point 10234 * 10235 * Return value: 10236 * 0 on success / non-zero on failure 10237 **/ 10238 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 10239 { 10240 struct ipr_ioa_cfg *ioa_cfg; 10241 int rc, i; 10242 10243 rc = ipr_probe_ioa(pdev, dev_id); 10244 10245 if (rc) 10246 return rc; 10247 10248 ioa_cfg = pci_get_drvdata(pdev); 10249 rc = ipr_probe_ioa_part2(ioa_cfg); 10250 10251 if (rc) { 10252 __ipr_remove(pdev); 10253 return rc; 10254 } 10255 10256 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); 10257 10258 if (rc) { 10259 __ipr_remove(pdev); 10260 return rc; 10261 } 10262 10263 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, 10264 &ipr_trace_attr); 10265 10266 if (rc) { 10267 scsi_remove_host(ioa_cfg->host); 10268 __ipr_remove(pdev); 10269 return rc; 10270 } 10271 10272 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 10273 &ipr_dump_attr); 10274 10275 if (rc) { 10276 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10277 &ipr_trace_attr); 10278 scsi_remove_host(ioa_cfg->host); 10279 __ipr_remove(pdev); 10280 return rc; 10281 } 10282 10283 scsi_scan_host(ioa_cfg->host); 10284 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10285 10286 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10287 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 10288 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 10289 ioa_cfg->iopoll_weight, ipr_iopoll); 10290 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); 10291 } 10292 } 10293 10294 schedule_work(&ioa_cfg->work_q); 10295 return 0; 10296 } 10297 10298 /** 10299 * ipr_shutdown - Shutdown handler. 10300 * @pdev: pci device struct 10301 * 10302 * This function is invoked upon system shutdown/reboot. It will issue 10303 * an adapter shutdown to the adapter to flush the write cache. 10304 * 10305 * Return value: 10306 * none 10307 **/ 10308 static void ipr_shutdown(struct pci_dev *pdev) 10309 { 10310 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 10311 unsigned long lock_flags = 0; 10312 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL; 10313 int i; 10314 10315 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10316 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10317 ioa_cfg->iopoll_weight = 0; 10318 for (i = 1; i < ioa_cfg->hrrq_num; i++) 10319 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 10320 } 10321 10322 while (ioa_cfg->in_reset_reload) { 10323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10324 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 10326 } 10327 10328 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) 10329 shutdown_type = IPR_SHUTDOWN_QUIESCE; 10330 10331 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); 10332 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 10333 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 10334 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { 10335 ipr_free_irqs(ioa_cfg); 10336 pci_disable_device(ioa_cfg->pdev); 10337 } 10338 } 10339 10340 static struct pci_device_id ipr_pci_table[] = { 10341 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10342 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 }, 10343 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10344 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 }, 10345 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10346 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 }, 10347 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, 10348 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 }, 10349 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10350 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 }, 10351 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10352 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 }, 10353 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10354 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 10355 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 10356 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 10357 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10358 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10359 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10360 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10361 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10362 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10363 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 10364 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10365 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10366 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10367 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 10368 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10369 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 10370 IPR_USE_LONG_TRANSOP_TIMEOUT}, 10371 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 10372 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 10373 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10374 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10375 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 10376 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10377 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10378 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 10379 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10380 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 }, 10381 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 10382 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 10383 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, 10384 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 10385 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 10386 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10387 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 10388 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10389 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 10390 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10391 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 10392 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 10393 IPR_USE_LONG_TRANSOP_TIMEOUT }, 10394 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10395 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, 10396 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10397 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 10398 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10399 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 10400 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10401 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 }, 10402 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10403 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 10404 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 10405 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 10406 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10407 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 10408 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10409 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 10410 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10411 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 10412 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10413 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 }, 10414 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10415 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 10416 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10417 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 }, 10418 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10419 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 }, 10420 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10421 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, 10422 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10423 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, 10424 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10425 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, 10426 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10427 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, 10428 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10429 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, 10430 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10431 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, 10432 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10433 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, 10434 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10435 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, 10436 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10437 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, 10438 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10439 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, 10440 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10441 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, 10442 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10443 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, 10444 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10445 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, 10446 { } 10447 }; 10448 MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10449 10450 static const struct pci_error_handlers ipr_err_handler = { 10451 .error_detected = ipr_pci_error_detected, 10452 .mmio_enabled = ipr_pci_mmio_enabled, 10453 .slot_reset = ipr_pci_slot_reset, 10454 }; 10455 10456 static struct pci_driver ipr_driver = { 10457 .name = IPR_NAME, 10458 .id_table = ipr_pci_table, 10459 .probe = ipr_probe, 10460 .remove = ipr_remove, 10461 .shutdown = ipr_shutdown, 10462 .err_handler = &ipr_err_handler, 10463 }; 10464 10465 /** 10466 * ipr_halt_done - Shutdown prepare completion 10467 * 10468 * Return value: 10469 * none 10470 **/ 10471 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 10472 { 10473 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 10474 } 10475 10476 /** 10477 * ipr_halt - Issue shutdown prepare to all adapters 10478 * 10479 * Return value: 10480 * NOTIFY_OK on success / NOTIFY_DONE on failure 10481 **/ 10482 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) 10483 { 10484 struct ipr_cmnd *ipr_cmd; 10485 struct ipr_ioa_cfg *ioa_cfg; 10486 unsigned long flags = 0, driver_lock_flags; 10487 10488 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) 10489 return NOTIFY_DONE; 10490 10491 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); 10492 10493 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 10494 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10495 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 10496 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { 10497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10498 continue; 10499 } 10500 10501 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 10502 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 10503 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 10504 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 10505 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; 10506 10507 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 10508 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10509 } 10510 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); 10511 10512 return NOTIFY_OK; 10513 } 10514 10515 static struct notifier_block ipr_notifier = { 10516 ipr_halt, NULL, 0 10517 }; 10518 10519 /** 10520 * ipr_init - Module entry point 10521 * 10522 * Return value: 10523 * 0 on success / negative value on failure 10524 **/ 10525 static int __init ipr_init(void) 10526 { 10527 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 10528 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 10529 10530 register_reboot_notifier(&ipr_notifier); 10531 return pci_register_driver(&ipr_driver); 10532 } 10533 10534 /** 10535 * ipr_exit - Module unload 10536 * 10537 * Module unload entry point. 10538 * 10539 * Return value: 10540 * none 10541 **/ 10542 static void __exit ipr_exit(void) 10543 { 10544 unregister_reboot_notifier(&ipr_notifier); 10545 pci_unregister_driver(&ipr_driver); 10546 } 10547 10548 module_init(ipr_init); 10549 module_exit(ipr_exit); 10550