1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/sched.h> 30 #include <linux/pci.h> 31 #include <linux/spinlock.h> 32 #include <linux/slab.h> 33 #include <linux/completion.h> 34 #include <linux/blkdev.h> 35 #include <asm/semaphore.h> 36 #include <asm/uaccess.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_cmnd.h> 40 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_host.h> 42 43 #include "aacraid.h" 44 45 /* values for inqd_pdt: Peripheral device type in plain English */ 46 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */ 47 #define INQD_PDT_PROC 0x03 /* Processor device */ 48 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */ 49 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */ 50 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */ 51 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */ 52 53 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */ 54 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */ 55 56 /* 57 * Sense codes 58 */ 59 60 #define SENCODE_NO_SENSE 0x00 61 #define SENCODE_END_OF_DATA 0x00 62 #define SENCODE_BECOMING_READY 0x04 63 #define SENCODE_INIT_CMD_REQUIRED 0x04 64 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A 65 #define SENCODE_INVALID_COMMAND 0x20 66 #define SENCODE_LBA_OUT_OF_RANGE 0x21 67 #define SENCODE_INVALID_CDB_FIELD 0x24 68 #define SENCODE_LUN_NOT_SUPPORTED 0x25 69 #define SENCODE_INVALID_PARAM_FIELD 0x26 70 #define SENCODE_PARAM_NOT_SUPPORTED 0x26 71 #define SENCODE_PARAM_VALUE_INVALID 0x26 72 #define SENCODE_RESET_OCCURRED 0x29 73 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E 74 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F 75 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 76 #define SENCODE_DIAGNOSTIC_FAILURE 0x40 77 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44 78 #define SENCODE_INVALID_MESSAGE_ERROR 0x49 79 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c 80 #define SENCODE_OVERLAPPED_COMMAND 0x4E 81 82 /* 83 * Additional sense codes 84 */ 85 86 #define ASENCODE_NO_SENSE 0x00 87 #define ASENCODE_END_OF_DATA 0x05 88 #define ASENCODE_BECOMING_READY 0x01 89 #define ASENCODE_INIT_CMD_REQUIRED 0x02 90 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 91 #define ASENCODE_INVALID_COMMAND 0x00 92 #define ASENCODE_LBA_OUT_OF_RANGE 0x00 93 #define ASENCODE_INVALID_CDB_FIELD 0x00 94 #define ASENCODE_LUN_NOT_SUPPORTED 0x00 95 #define ASENCODE_INVALID_PARAM_FIELD 0x00 96 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01 97 #define ASENCODE_PARAM_VALUE_INVALID 0x02 98 #define ASENCODE_RESET_OCCURRED 0x00 99 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 100 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03 101 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 102 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80 103 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 104 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00 105 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 106 #define ASENCODE_OVERLAPPED_COMMAND 0x00 107 108 #define BYTE0(x) (unsigned char)(x) 109 #define BYTE1(x) (unsigned char)((x) >> 8) 110 #define BYTE2(x) (unsigned char)((x) >> 16) 111 #define BYTE3(x) (unsigned char)((x) >> 24) 112 113 /*------------------------------------------------------------------------------ 114 * S T R U C T S / T Y P E D E F S 115 *----------------------------------------------------------------------------*/ 116 /* SCSI inquiry data */ 117 struct inquiry_data { 118 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ 119 u8 inqd_dtq; /* RMB | Device Type Qualifier */ 120 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ 121 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ 122 u8 inqd_len; /* Additional length (n-4) */ 123 u8 inqd_pad1[2];/* Reserved - must be zero */ 124 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 125 u8 inqd_vid[8]; /* Vendor ID */ 126 u8 inqd_pid[16];/* Product ID */ 127 u8 inqd_prl[4]; /* Product Revision Level */ 128 }; 129 130 /* 131 * M O D U L E G L O B A L S 132 */ 133 134 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); 135 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); 136 static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg); 137 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 138 #ifdef AAC_DETAILED_STATUS_INFO 139 static char *aac_get_status_string(u32 status); 140 #endif 141 142 /* 143 * Non dasd selection is handled entirely in aachba now 144 */ 145 146 static int nondasd = -1; 147 static int dacmode = -1; 148 149 static int commit = -1; 150 151 module_param(nondasd, int, 0); 152 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); 153 module_param(dacmode, int, 0); 154 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); 155 module_param(commit, int, 0); 156 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); 157 158 int numacb = -1; 159 module_param(numacb, int, S_IRUGO|S_IWUSR); 160 MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid\nvalues are 512 and down. Default is to use suggestion from Firmware."); 161 162 int acbsize = -1; 163 module_param(acbsize, int, S_IRUGO|S_IWUSR); 164 MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512,\n2048, 4096 and 8192. Default is to use suggestion from Firmware."); 165 /** 166 * aac_get_config_status - check the adapter configuration 167 * @common: adapter to query 168 * 169 * Query config status, and commit the configuration if needed. 170 */ 171 int aac_get_config_status(struct aac_dev *dev) 172 { 173 int status = 0; 174 struct fib * fibptr; 175 176 if (!(fibptr = fib_alloc(dev))) 177 return -ENOMEM; 178 179 fib_init(fibptr); 180 { 181 struct aac_get_config_status *dinfo; 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); 183 184 dinfo->command = cpu_to_le32(VM_ContainerConfig); 185 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS); 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 187 } 188 189 status = fib_send(ContainerCommand, 190 fibptr, 191 sizeof (struct aac_get_config_status), 192 FsaNormal, 193 1, 1, 194 NULL, NULL); 195 if (status < 0 ) { 196 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); 197 } else { 198 struct aac_get_config_status_resp *reply 199 = (struct aac_get_config_status_resp *) fib_data(fibptr); 200 dprintk((KERN_WARNING 201 "aac_get_config_status: response=%d status=%d action=%d\n", 202 le32_to_cpu(reply->response), 203 le32_to_cpu(reply->status), 204 le32_to_cpu(reply->data.action))); 205 if ((le32_to_cpu(reply->response) != ST_OK) || 206 (le32_to_cpu(reply->status) != CT_OK) || 207 (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) { 208 printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n"); 209 status = -EINVAL; 210 } 211 } 212 fib_complete(fibptr); 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 214 if (status >= 0) { 215 if (commit == 1) { 216 struct aac_commit_config * dinfo; 217 fib_init(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 219 220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 222 223 status = fib_send(ContainerCommand, 224 fibptr, 225 sizeof (struct aac_commit_config), 226 FsaNormal, 227 1, 1, 228 NULL, NULL); 229 fib_complete(fibptr); 230 } else if (commit == 0) { 231 printk(KERN_WARNING 232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 233 } 234 } 235 fib_free(fibptr); 236 return status; 237 } 238 239 /** 240 * aac_get_containers - list containers 241 * @common: adapter to probe 242 * 243 * Make a list of all containers on this controller 244 */ 245 int aac_get_containers(struct aac_dev *dev) 246 { 247 struct fsa_dev_info *fsa_dev_ptr; 248 u32 index; 249 int status = 0; 250 struct fib * fibptr; 251 unsigned instance; 252 struct aac_get_container_count *dinfo; 253 struct aac_get_container_count_resp *dresp; 254 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 255 256 instance = dev->scsi_host_ptr->unique_id; 257 258 if (!(fibptr = fib_alloc(dev))) 259 return -ENOMEM; 260 261 fib_init(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 265 266 status = fib_send(ContainerCommand, 267 fibptr, 268 sizeof (struct aac_get_container_count), 269 FsaNormal, 270 1, 1, 271 NULL, NULL); 272 if (status >= 0) { 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 275 fib_complete(fibptr); 276 } 277 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 279 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 282 if (!fsa_dev_ptr) { 283 fib_free(fibptr); 284 return -ENOMEM; 285 } 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 287 288 dev->fsa_dev = fsa_dev_ptr; 289 dev->maximum_num_containers = maximum_num_containers; 290 291 for (index = 0; index < dev->maximum_num_containers; index++) { 292 struct aac_query_mount *dinfo; 293 struct aac_mount *dresp; 294 295 fsa_dev_ptr[index].devname[0] = '\0'; 296 297 fib_init(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 299 300 dinfo->command = cpu_to_le32(VM_NameServe); 301 dinfo->count = cpu_to_le32(index); 302 dinfo->type = cpu_to_le32(FT_FILESYS); 303 304 status = fib_send(ContainerCommand, 305 fibptr, 306 sizeof (struct aac_query_mount), 307 FsaNormal, 308 1, 1, 309 NULL, NULL); 310 if (status < 0 ) { 311 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); 312 break; 313 } 314 dresp = (struct aac_mount *)fib_data(fibptr); 315 316 dprintk ((KERN_DEBUG 317 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", 318 (int)index, (int)le32_to_cpu(dresp->status), 319 (int)le32_to_cpu(dresp->mnt[0].vol), 320 (int)le32_to_cpu(dresp->mnt[0].state), 321 (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); 322 if ((le32_to_cpu(dresp->status) == ST_OK) && 323 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 324 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 325 fsa_dev_ptr[index].valid = 1; 326 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); 327 fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); 328 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 329 fsa_dev_ptr[index].ro = 1; 330 } 331 fib_complete(fibptr); 332 /* 333 * If there are no more containers, then stop asking. 334 */ 335 if ((index + 1) >= le32_to_cpu(dresp->count)){ 336 break; 337 } 338 } 339 fib_free(fibptr); 340 return status; 341 } 342 343 static void aac_io_done(struct scsi_cmnd * scsicmd) 344 { 345 unsigned long cpu_flags; 346 struct Scsi_Host *host = scsicmd->device->host; 347 spin_lock_irqsave(host->host_lock, cpu_flags); 348 scsicmd->scsi_done(scsicmd); 349 spin_unlock_irqrestore(host->host_lock, cpu_flags); 350 } 351 352 static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len) 353 { 354 void *buf; 355 unsigned int transfer_len; 356 struct scatterlist *sg = scsicmd->request_buffer; 357 358 if (scsicmd->use_sg) { 359 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 360 transfer_len = min(sg->length, len + offset); 361 } else { 362 buf = scsicmd->request_buffer; 363 transfer_len = min(scsicmd->request_bufflen, len + offset); 364 } 365 366 memcpy(buf + offset, data, transfer_len - offset); 367 368 if (scsicmd->use_sg) 369 kunmap_atomic(buf - sg->offset, KM_IRQ0); 370 371 } 372 373 static void get_container_name_callback(void *context, struct fib * fibptr) 374 { 375 struct aac_get_name_resp * get_name_reply; 376 struct scsi_cmnd * scsicmd; 377 378 scsicmd = (struct scsi_cmnd *) context; 379 380 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); 381 if (fibptr == NULL) 382 BUG(); 383 384 get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr); 385 /* Failure is irrelevant, using default value instead */ 386 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 387 && (get_name_reply->data[0] != '\0')) { 388 char *sp = get_name_reply->data; 389 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; 390 while (*sp == ' ') 391 ++sp; 392 if (*sp) { 393 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; 394 int count = sizeof(d); 395 char *dp = d; 396 do { 397 *dp++ = (*sp) ? *sp++ : ' '; 398 } while (--count > 0); 399 aac_internal_transfer(scsicmd, d, 400 offsetof(struct inquiry_data, inqd_pid), sizeof(d)); 401 } 402 } 403 404 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 405 406 fib_complete(fibptr); 407 fib_free(fibptr); 408 aac_io_done(scsicmd); 409 } 410 411 /** 412 * aac_get_container_name - get container name, none blocking. 413 */ 414 static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) 415 { 416 int status; 417 struct aac_get_name *dinfo; 418 struct fib * cmd_fibcontext; 419 struct aac_dev * dev; 420 421 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 422 423 if (!(cmd_fibcontext = fib_alloc(dev))) 424 return -ENOMEM; 425 426 fib_init(cmd_fibcontext); 427 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 428 429 dinfo->command = cpu_to_le32(VM_ContainerConfig); 430 dinfo->type = cpu_to_le32(CT_READ_NAME); 431 dinfo->cid = cpu_to_le32(cid); 432 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 433 434 status = fib_send(ContainerCommand, 435 cmd_fibcontext, 436 sizeof (struct aac_get_name), 437 FsaNormal, 438 0, 1, 439 (fib_callback) get_container_name_callback, 440 (void *) scsicmd); 441 442 /* 443 * Check that the command queued to the controller 444 */ 445 if (status == -EINPROGRESS) 446 return 0; 447 448 printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 449 fib_complete(cmd_fibcontext); 450 fib_free(cmd_fibcontext); 451 return -1; 452 } 453 454 /** 455 * probe_container - query a logical volume 456 * @dev: device to query 457 * @cid: container identifier 458 * 459 * Queries the controller about the given volume. The volume information 460 * is updated in the struct fsa_dev_info structure rather than returned. 461 */ 462 463 static int probe_container(struct aac_dev *dev, int cid) 464 { 465 struct fsa_dev_info *fsa_dev_ptr; 466 int status; 467 struct aac_query_mount *dinfo; 468 struct aac_mount *dresp; 469 struct fib * fibptr; 470 unsigned instance; 471 472 fsa_dev_ptr = dev->fsa_dev; 473 instance = dev->scsi_host_ptr->unique_id; 474 475 if (!(fibptr = fib_alloc(dev))) 476 return -ENOMEM; 477 478 fib_init(fibptr); 479 480 dinfo = (struct aac_query_mount *)fib_data(fibptr); 481 482 dinfo->command = cpu_to_le32(VM_NameServe); 483 dinfo->count = cpu_to_le32(cid); 484 dinfo->type = cpu_to_le32(FT_FILESYS); 485 486 status = fib_send(ContainerCommand, 487 fibptr, 488 sizeof(struct aac_query_mount), 489 FsaNormal, 490 1, 1, 491 NULL, NULL); 492 if (status < 0) { 493 printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 494 goto error; 495 } 496 497 dresp = (struct aac_mount *) fib_data(fibptr); 498 499 if ((le32_to_cpu(dresp->status) == ST_OK) && 500 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 501 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 502 fsa_dev_ptr[cid].valid = 1; 503 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 504 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); 505 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 506 fsa_dev_ptr[cid].ro = 1; 507 } 508 509 error: 510 fib_complete(fibptr); 511 fib_free(fibptr); 512 513 return status; 514 } 515 516 /* Local Structure to set SCSI inquiry data strings */ 517 struct scsi_inq { 518 char vid[8]; /* Vendor ID */ 519 char pid[16]; /* Product ID */ 520 char prl[4]; /* Product Revision Level */ 521 }; 522 523 /** 524 * InqStrCopy - string merge 525 * @a: string to copy from 526 * @b: string to copy to 527 * 528 * Copy a String from one location to another 529 * without copying \0 530 */ 531 532 static void inqstrcpy(char *a, char *b) 533 { 534 535 while(*a != (char)0) 536 *b++ = *a++; 537 } 538 539 static char *container_types[] = { 540 "None", 541 "Volume", 542 "Mirror", 543 "Stripe", 544 "RAID5", 545 "SSRW", 546 "SSRO", 547 "Morph", 548 "Legacy", 549 "RAID4", 550 "RAID10", 551 "RAID00", 552 "V-MIRRORS", 553 "PSEUDO R4", 554 "RAID50", 555 "RAID5D", 556 "RAID5D0", 557 "RAID1E", 558 "RAID6", 559 "RAID60", 560 "Unknown" 561 }; 562 563 564 565 /* Function: setinqstr 566 * 567 * Arguments: [1] pointer to void [1] int 568 * 569 * Purpose: Sets SCSI inquiry data strings for vendor, product 570 * and revision level. Allows strings to be set in platform dependant 571 * files instead of in OS dependant driver source. 572 */ 573 574 static void setinqstr(int devtype, void *data, int tindex) 575 { 576 struct scsi_inq *str; 577 struct aac_driver_ident *mp; 578 579 mp = aac_get_driver_ident(devtype); 580 581 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ 582 583 inqstrcpy (mp->vname, str->vid); 584 inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */ 585 586 if (tindex < (sizeof(container_types)/sizeof(char *))){ 587 char *findit = str->pid; 588 589 for ( ; *findit != ' '; findit++); /* walk till we find a space */ 590 /* RAID is superfluous in the context of a RAID device */ 591 if (memcmp(findit-4, "RAID", 4) == 0) 592 *(findit -= 4) = ' '; 593 inqstrcpy (container_types[tindex], findit + 1); 594 } 595 inqstrcpy ("V1.0", str->prl); 596 } 597 598 static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, 599 u8 a_sense_code, u8 incorrect_length, 600 u8 bit_pointer, u16 field_pointer, 601 u32 residue) 602 { 603 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */ 604 sense_buf[1] = 0; /* Segment number, always zero */ 605 606 if (incorrect_length) { 607 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */ 608 sense_buf[3] = BYTE3(residue); 609 sense_buf[4] = BYTE2(residue); 610 sense_buf[5] = BYTE1(residue); 611 sense_buf[6] = BYTE0(residue); 612 } else 613 sense_buf[2] = sense_key; /* Sense key */ 614 615 if (sense_key == ILLEGAL_REQUEST) 616 sense_buf[7] = 10; /* Additional sense length */ 617 else 618 sense_buf[7] = 6; /* Additional sense length */ 619 620 sense_buf[12] = sense_code; /* Additional sense code */ 621 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ 622 if (sense_key == ILLEGAL_REQUEST) { 623 sense_buf[15] = 0; 624 625 if (sense_code == SENCODE_INVALID_PARAM_FIELD) 626 sense_buf[15] = 0x80;/* Std sense key specific field */ 627 /* Illegal parameter is in the parameter block */ 628 629 if (sense_code == SENCODE_INVALID_CDB_FIELD) 630 sense_buf[15] = 0xc0;/* Std sense key specific field */ 631 /* Illegal parameter is in the CDB block */ 632 sense_buf[15] |= bit_pointer; 633 sense_buf[16] = field_pointer >> 8; /* MSB */ 634 sense_buf[17] = field_pointer; /* LSB */ 635 } 636 } 637 638 int aac_get_adapter_info(struct aac_dev* dev) 639 { 640 struct fib* fibptr; 641 int rcode; 642 u32 tmp; 643 struct aac_adapter_info *info; 644 struct aac_bus_info *command; 645 struct aac_bus_info_response *bus_info; 646 647 if (!(fibptr = fib_alloc(dev))) 648 return -ENOMEM; 649 650 fib_init(fibptr); 651 info = (struct aac_adapter_info *) fib_data(fibptr); 652 memset(info,0,sizeof(*info)); 653 654 rcode = fib_send(RequestAdapterInfo, 655 fibptr, 656 sizeof(*info), 657 FsaNormal, 658 1, 1, 659 NULL, 660 NULL); 661 662 if (rcode < 0) { 663 fib_complete(fibptr); 664 fib_free(fibptr); 665 return rcode; 666 } 667 memcpy(&dev->adapter_info, info, sizeof(*info)); 668 669 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 670 struct aac_supplement_adapter_info * info; 671 672 fib_init(fibptr); 673 674 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 675 676 memset(info,0,sizeof(*info)); 677 678 rcode = fib_send(RequestSupplementAdapterInfo, 679 fibptr, 680 sizeof(*info), 681 FsaNormal, 682 1, 1, 683 NULL, 684 NULL); 685 686 if (rcode >= 0) 687 memcpy(&dev->supplement_adapter_info, info, sizeof(*info)); 688 } 689 690 691 /* 692 * GetBusInfo 693 */ 694 695 fib_init(fibptr); 696 697 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 698 699 memset(bus_info, 0, sizeof(*bus_info)); 700 701 command = (struct aac_bus_info *)bus_info; 702 703 command->Command = cpu_to_le32(VM_Ioctl); 704 command->ObjType = cpu_to_le32(FT_DRIVE); 705 command->MethodId = cpu_to_le32(1); 706 command->CtlCmd = cpu_to_le32(GetBusInfo); 707 708 rcode = fib_send(ContainerCommand, 709 fibptr, 710 sizeof (*bus_info), 711 FsaNormal, 712 1, 1, 713 NULL, NULL); 714 715 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) { 716 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus); 717 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 718 } 719 720 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 721 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 722 dev->name, 723 dev->id, 724 tmp>>24, 725 (tmp>>16)&0xff, 726 tmp&0xff, 727 le32_to_cpu(dev->adapter_info.kernelbuild), 728 (int)sizeof(dev->supplement_adapter_info.BuildDate), 729 dev->supplement_adapter_info.BuildDate); 730 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 731 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 732 dev->name, dev->id, 733 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 734 le32_to_cpu(dev->adapter_info.monitorbuild)); 735 tmp = le32_to_cpu(dev->adapter_info.biosrev); 736 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 737 dev->name, dev->id, 738 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 739 le32_to_cpu(dev->adapter_info.biosbuild)); 740 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 741 printk(KERN_INFO "%s%d: serial %x\n", 742 dev->name, dev->id, 743 le32_to_cpu(dev->adapter_info.serial[0])); 744 745 dev->nondasd_support = 0; 746 dev->raid_scsi_mode = 0; 747 if(dev->adapter_info.options & AAC_OPT_NONDASD){ 748 dev->nondasd_support = 1; 749 } 750 751 /* 752 * If the firmware supports ROMB RAID/SCSI mode and we are currently 753 * in RAID/SCSI mode, set the flag. For now if in this mode we will 754 * force nondasd support on. If we decide to allow the non-dasd flag 755 * additional changes changes will have to be made to support 756 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be 757 * changed to support the new dev->raid_scsi_mode flag instead of 758 * leaching off of the dev->nondasd_support flag. Also in linit.c the 759 * function aac_detect will have to be modified where it sets up the 760 * max number of channels based on the aac->nondasd_support flag only. 761 */ 762 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) && 763 (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) { 764 dev->nondasd_support = 1; 765 dev->raid_scsi_mode = 1; 766 } 767 if (dev->raid_scsi_mode != 0) 768 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", 769 dev->name, dev->id); 770 771 if(nondasd != -1) { 772 dev->nondasd_support = (nondasd!=0); 773 } 774 if(dev->nondasd_support != 0){ 775 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 776 } 777 778 dev->dac_support = 0; 779 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ 780 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); 781 dev->dac_support = 1; 782 } 783 784 if(dacmode != -1) { 785 dev->dac_support = (dacmode!=0); 786 } 787 if(dev->dac_support != 0) { 788 if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) && 789 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) { 790 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", 791 dev->name, dev->id); 792 } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) && 793 !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) { 794 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", 795 dev->name, dev->id); 796 dev->dac_support = 0; 797 } else { 798 printk(KERN_WARNING"%s%d: No suitable DMA available.\n", 799 dev->name, dev->id); 800 rcode = -ENOMEM; 801 } 802 } 803 /* 804 * 57 scatter gather elements 805 */ 806 if (!(dev->raw_io_interface)) { 807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 808 sizeof(struct aac_fibhdr) - 809 sizeof(struct aac_write) + sizeof(struct sgmap)) / 810 sizeof(struct sgmap); 811 if (dev->dac_support) { 812 /* 813 * 38 scatter gather elements 814 */ 815 dev->scsi_host_ptr->sg_tablesize = 816 (dev->max_fib_size - 817 sizeof(struct aac_fibhdr) - 818 sizeof(struct aac_write64) + 819 sizeof(struct sgmap64)) / 820 sizeof(struct sgmap64); 821 } 822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 824 /* 825 * Worst case size that could cause sg overflow when 826 * we break up SG elements that are larger than 64KB. 827 * Would be nice if we could tell the SCSI layer what 828 * the maximum SG element size can be. Worst case is 829 * (sg_tablesize-1) 4KB elements with one 64KB 830 * element. 831 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB 832 */ 833 dev->scsi_host_ptr->max_sectors = 834 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 835 } 836 } 837 838 fib_complete(fibptr); 839 fib_free(fibptr); 840 841 return rcode; 842 } 843 844 845 static void io_callback(void *context, struct fib * fibptr) 846 { 847 struct aac_dev *dev; 848 struct aac_read_reply *readreply; 849 struct scsi_cmnd *scsicmd; 850 u32 cid; 851 852 scsicmd = (struct scsi_cmnd *) context; 853 854 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 856 857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies)); 858 859 if (fibptr == NULL) 860 BUG(); 861 862 if(scsicmd->use_sg) 863 pci_unmap_sg(dev->pdev, 864 (struct scatterlist *)scsicmd->buffer, 865 scsicmd->use_sg, 866 scsicmd->sc_data_direction); 867 else if(scsicmd->request_bufflen) 868 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, 869 scsicmd->request_bufflen, 870 scsicmd->sc_data_direction); 871 readreply = (struct aac_read_reply *)fib_data(fibptr); 872 if (le32_to_cpu(readreply->status) == ST_OK) 873 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 874 else { 875 #ifdef AAC_DETAILED_STATUS_INFO 876 printk(KERN_WARNING "io_callback: io failed, status = %d\n", 877 le32_to_cpu(readreply->status)); 878 #endif 879 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 880 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 881 HARDWARE_ERROR, 882 SENCODE_INTERNAL_TARGET_FAILURE, 883 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 884 0, 0); 885 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 886 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 887 ? sizeof(scsicmd->sense_buffer) 888 : sizeof(dev->fsa_dev[cid].sense_data)); 889 } 890 fib_complete(fibptr); 891 fib_free(fibptr); 892 893 aac_io_done(scsicmd); 894 } 895 896 static int aac_read(struct scsi_cmnd * scsicmd, int cid) 897 { 898 u32 lba; 899 u32 count; 900 int status; 901 902 u16 fibsize; 903 struct aac_dev *dev; 904 struct fib * cmd_fibcontext; 905 906 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 907 /* 908 * Get block address and transfer length 909 */ 910 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ 911 { 912 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 913 914 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 915 count = scsicmd->cmnd[4]; 916 917 if (count == 0) 918 count = 256; 919 } else { 920 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 921 922 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 923 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 924 } 925 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", 926 smp_processor_id(), (unsigned long long)lba, jiffies)); 927 /* 928 * Alocate and initialize a Fib 929 */ 930 if (!(cmd_fibcontext = fib_alloc(dev))) { 931 return -1; 932 } 933 934 fib_init(cmd_fibcontext); 935 936 if (dev->raw_io_interface) { 937 struct aac_raw_io *readcmd; 938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 939 readcmd->block[0] = cpu_to_le32(lba); 940 readcmd->block[1] = 0; 941 readcmd->count = cpu_to_le32(count<<9); 942 readcmd->cid = cpu_to_le16(cid); 943 readcmd->flags = cpu_to_le16(1); 944 readcmd->bpTotal = 0; 945 readcmd->bpComplete = 0; 946 947 aac_build_sgraw(scsicmd, &readcmd->sg); 948 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw)); 949 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) 950 BUG(); 951 /* 952 * Now send the Fib to the adapter 953 */ 954 status = fib_send(ContainerRawIo, 955 cmd_fibcontext, 956 fibsize, 957 FsaNormal, 958 0, 1, 959 (fib_callback) io_callback, 960 (void *) scsicmd); 961 } else if (dev->dac_support == 1) { 962 struct aac_read64 *readcmd; 963 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); 964 readcmd->command = cpu_to_le32(VM_CtHostRead64); 965 readcmd->cid = cpu_to_le16(cid); 966 readcmd->sector_count = cpu_to_le16(count); 967 readcmd->block = cpu_to_le32(lba); 968 readcmd->pad = 0; 969 readcmd->flags = 0; 970 971 aac_build_sg64(scsicmd, &readcmd->sg); 972 fibsize = sizeof(struct aac_read64) + 973 ((le32_to_cpu(readcmd->sg.count) - 1) * 974 sizeof (struct sgentry64)); 975 BUG_ON (fibsize > (dev->max_fib_size - 976 sizeof(struct aac_fibhdr))); 977 /* 978 * Now send the Fib to the adapter 979 */ 980 status = fib_send(ContainerCommand64, 981 cmd_fibcontext, 982 fibsize, 983 FsaNormal, 984 0, 1, 985 (fib_callback) io_callback, 986 (void *) scsicmd); 987 } else { 988 struct aac_read *readcmd; 989 readcmd = (struct aac_read *) fib_data(cmd_fibcontext); 990 readcmd->command = cpu_to_le32(VM_CtBlockRead); 991 readcmd->cid = cpu_to_le32(cid); 992 readcmd->block = cpu_to_le32(lba); 993 readcmd->count = cpu_to_le32(count * 512); 994 995 aac_build_sg(scsicmd, &readcmd->sg); 996 fibsize = sizeof(struct aac_read) + 997 ((le32_to_cpu(readcmd->sg.count) - 1) * 998 sizeof (struct sgentry)); 999 BUG_ON (fibsize > (dev->max_fib_size - 1000 sizeof(struct aac_fibhdr))); 1001 /* 1002 * Now send the Fib to the adapter 1003 */ 1004 status = fib_send(ContainerCommand, 1005 cmd_fibcontext, 1006 fibsize, 1007 FsaNormal, 1008 0, 1, 1009 (fib_callback) io_callback, 1010 (void *) scsicmd); 1011 } 1012 1013 1014 1015 /* 1016 * Check that the command queued to the controller 1017 */ 1018 if (status == -EINPROGRESS) 1019 return 0; 1020 1021 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1022 /* 1023 * For some reason, the Fib didn't queue, return QUEUE_FULL 1024 */ 1025 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1026 aac_io_done(scsicmd); 1027 fib_complete(cmd_fibcontext); 1028 fib_free(cmd_fibcontext); 1029 return 0; 1030 } 1031 1032 static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1033 { 1034 u32 lba; 1035 u32 count; 1036 int status; 1037 u16 fibsize; 1038 struct aac_dev *dev; 1039 struct fib * cmd_fibcontext; 1040 1041 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1042 /* 1043 * Get block address and transfer length 1044 */ 1045 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */ 1046 { 1047 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 1048 count = scsicmd->cmnd[4]; 1049 if (count == 0) 1050 count = 256; 1051 } else { 1052 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1053 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1054 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1055 } 1056 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 1057 smp_processor_id(), (unsigned long long)lba, jiffies)); 1058 /* 1059 * Allocate and initialize a Fib then setup a BlockWrite command 1060 */ 1061 if (!(cmd_fibcontext = fib_alloc(dev))) { 1062 scsicmd->result = DID_ERROR << 16; 1063 aac_io_done(scsicmd); 1064 return 0; 1065 } 1066 fib_init(cmd_fibcontext); 1067 1068 if (dev->raw_io_interface) { 1069 struct aac_raw_io *writecmd; 1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1071 writecmd->block[0] = cpu_to_le32(lba); 1072 writecmd->block[1] = 0; 1073 writecmd->count = cpu_to_le32(count<<9); 1074 writecmd->cid = cpu_to_le16(cid); 1075 writecmd->flags = 0; 1076 writecmd->bpTotal = 0; 1077 writecmd->bpComplete = 0; 1078 1079 aac_build_sgraw(scsicmd, &writecmd->sg); 1080 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw)); 1081 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) 1082 BUG(); 1083 /* 1084 * Now send the Fib to the adapter 1085 */ 1086 status = fib_send(ContainerRawIo, 1087 cmd_fibcontext, 1088 fibsize, 1089 FsaNormal, 1090 0, 1, 1091 (fib_callback) io_callback, 1092 (void *) scsicmd); 1093 } else if (dev->dac_support == 1) { 1094 struct aac_write64 *writecmd; 1095 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); 1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1097 writecmd->cid = cpu_to_le16(cid); 1098 writecmd->sector_count = cpu_to_le16(count); 1099 writecmd->block = cpu_to_le32(lba); 1100 writecmd->pad = 0; 1101 writecmd->flags = 0; 1102 1103 aac_build_sg64(scsicmd, &writecmd->sg); 1104 fibsize = sizeof(struct aac_write64) + 1105 ((le32_to_cpu(writecmd->sg.count) - 1) * 1106 sizeof (struct sgentry64)); 1107 BUG_ON (fibsize > (dev->max_fib_size - 1108 sizeof(struct aac_fibhdr))); 1109 /* 1110 * Now send the Fib to the adapter 1111 */ 1112 status = fib_send(ContainerCommand64, 1113 cmd_fibcontext, 1114 fibsize, 1115 FsaNormal, 1116 0, 1, 1117 (fib_callback) io_callback, 1118 (void *) scsicmd); 1119 } else { 1120 struct aac_write *writecmd; 1121 writecmd = (struct aac_write *) fib_data(cmd_fibcontext); 1122 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1123 writecmd->cid = cpu_to_le32(cid); 1124 writecmd->block = cpu_to_le32(lba); 1125 writecmd->count = cpu_to_le32(count * 512); 1126 writecmd->sg.count = cpu_to_le32(1); 1127 /* ->stable is not used - it did mean which type of write */ 1128 1129 aac_build_sg(scsicmd, &writecmd->sg); 1130 fibsize = sizeof(struct aac_write) + 1131 ((le32_to_cpu(writecmd->sg.count) - 1) * 1132 sizeof (struct sgentry)); 1133 BUG_ON (fibsize > (dev->max_fib_size - 1134 sizeof(struct aac_fibhdr))); 1135 /* 1136 * Now send the Fib to the adapter 1137 */ 1138 status = fib_send(ContainerCommand, 1139 cmd_fibcontext, 1140 fibsize, 1141 FsaNormal, 1142 0, 1, 1143 (fib_callback) io_callback, 1144 (void *) scsicmd); 1145 } 1146 1147 /* 1148 * Check that the command queued to the controller 1149 */ 1150 if (status == -EINPROGRESS) 1151 { 1152 return 0; 1153 } 1154 1155 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1156 /* 1157 * For some reason, the Fib didn't queue, return QUEUE_FULL 1158 */ 1159 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1160 aac_io_done(scsicmd); 1161 1162 fib_complete(cmd_fibcontext); 1163 fib_free(cmd_fibcontext); 1164 return 0; 1165 } 1166 1167 static void synchronize_callback(void *context, struct fib *fibptr) 1168 { 1169 struct aac_synchronize_reply *synchronizereply; 1170 struct scsi_cmnd *cmd; 1171 1172 cmd = context; 1173 1174 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 1175 smp_processor_id(), jiffies)); 1176 BUG_ON(fibptr == NULL); 1177 1178 1179 synchronizereply = fib_data(fibptr); 1180 if (le32_to_cpu(synchronizereply->status) == CT_OK) 1181 cmd->result = DID_OK << 16 | 1182 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1183 else { 1184 struct scsi_device *sdev = cmd->device; 1185 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 1186 u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun); 1187 printk(KERN_WARNING 1188 "synchronize_callback: synchronize failed, status = %d\n", 1189 le32_to_cpu(synchronizereply->status)); 1190 cmd->result = DID_OK << 16 | 1191 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1192 set_sense((u8 *)&dev->fsa_dev[cid].sense_data, 1193 HARDWARE_ERROR, 1194 SENCODE_INTERNAL_TARGET_FAILURE, 1195 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 1196 0, 0); 1197 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1198 min(sizeof(dev->fsa_dev[cid].sense_data), 1199 sizeof(cmd->sense_buffer))); 1200 } 1201 1202 fib_complete(fibptr); 1203 fib_free(fibptr); 1204 aac_io_done(cmd); 1205 } 1206 1207 static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) 1208 { 1209 int status; 1210 struct fib *cmd_fibcontext; 1211 struct aac_synchronize *synchronizecmd; 1212 struct scsi_cmnd *cmd; 1213 struct scsi_device *sdev = scsicmd->device; 1214 int active = 0; 1215 unsigned long flags; 1216 1217 /* 1218 * Wait for all commands to complete to this specific 1219 * target (block). 1220 */ 1221 spin_lock_irqsave(&sdev->list_lock, flags); 1222 list_for_each_entry(cmd, &sdev->cmd_list, list) 1223 if (cmd != scsicmd && cmd->serial_number != 0) { 1224 ++active; 1225 break; 1226 } 1227 1228 spin_unlock_irqrestore(&sdev->list_lock, flags); 1229 1230 /* 1231 * Yield the processor (requeue for later) 1232 */ 1233 if (active) 1234 return SCSI_MLQUEUE_DEVICE_BUSY; 1235 1236 /* 1237 * Allocate and initialize a Fib 1238 */ 1239 if (!(cmd_fibcontext = 1240 fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1241 return SCSI_MLQUEUE_HOST_BUSY; 1242 1243 fib_init(cmd_fibcontext); 1244 1245 synchronizecmd = fib_data(cmd_fibcontext); 1246 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1247 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); 1248 synchronizecmd->cid = cpu_to_le32(cid); 1249 synchronizecmd->count = 1250 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 1251 1252 /* 1253 * Now send the Fib to the adapter 1254 */ 1255 status = fib_send(ContainerCommand, 1256 cmd_fibcontext, 1257 sizeof(struct aac_synchronize), 1258 FsaNormal, 1259 0, 1, 1260 (fib_callback)synchronize_callback, 1261 (void *)scsicmd); 1262 1263 /* 1264 * Check that the command queued to the controller 1265 */ 1266 if (status == -EINPROGRESS) 1267 return 0; 1268 1269 printk(KERN_WARNING 1270 "aac_synchronize: fib_send failed with status: %d.\n", status); 1271 fib_complete(cmd_fibcontext); 1272 fib_free(cmd_fibcontext); 1273 return SCSI_MLQUEUE_HOST_BUSY; 1274 } 1275 1276 /** 1277 * aac_scsi_cmd() - Process SCSI command 1278 * @scsicmd: SCSI command block 1279 * 1280 * Emulate a SCSI command and queue the required request for the 1281 * aacraid firmware. 1282 */ 1283 1284 int aac_scsi_cmd(struct scsi_cmnd * scsicmd) 1285 { 1286 u32 cid = 0; 1287 struct Scsi_Host *host = scsicmd->device->host; 1288 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1289 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1290 int cardtype = dev->cardtype; 1291 int ret; 1292 1293 /* 1294 * If the bus, id or lun is out of range, return fail 1295 * Test does not apply to ID 16, the pseudo id for the controller 1296 * itself. 1297 */ 1298 if (scsicmd->device->id != host->this_id) { 1299 if ((scsicmd->device->channel == 0) ){ 1300 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1301 scsicmd->result = DID_NO_CONNECT << 16; 1302 scsicmd->scsi_done(scsicmd); 1303 return 0; 1304 } 1305 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 1306 1307 /* 1308 * If the target container doesn't exist, it may have 1309 * been newly created 1310 */ 1311 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 1312 switch (scsicmd->cmnd[0]) { 1313 case INQUIRY: 1314 case READ_CAPACITY: 1315 case TEST_UNIT_READY: 1316 spin_unlock_irq(host->host_lock); 1317 probe_container(dev, cid); 1318 spin_lock_irq(host->host_lock); 1319 if (fsa_dev_ptr[cid].valid == 0) { 1320 scsicmd->result = DID_NO_CONNECT << 16; 1321 scsicmd->scsi_done(scsicmd); 1322 return 0; 1323 } 1324 default: 1325 break; 1326 } 1327 } 1328 /* 1329 * If the target container still doesn't exist, 1330 * return failure 1331 */ 1332 if (fsa_dev_ptr[cid].valid == 0) { 1333 scsicmd->result = DID_BAD_TARGET << 16; 1334 scsicmd->scsi_done(scsicmd); 1335 return 0; 1336 } 1337 } else { /* check for physical non-dasd devices */ 1338 if(dev->nondasd_support == 1){ 1339 return aac_send_srb_fib(scsicmd); 1340 } else { 1341 scsicmd->result = DID_NO_CONNECT << 16; 1342 scsicmd->scsi_done(scsicmd); 1343 return 0; 1344 } 1345 } 1346 } 1347 /* 1348 * else Command for the controller itself 1349 */ 1350 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ 1351 (scsicmd->cmnd[0] != TEST_UNIT_READY)) 1352 { 1353 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 1354 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1355 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1356 ILLEGAL_REQUEST, 1357 SENCODE_INVALID_COMMAND, 1358 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 1359 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1360 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 1361 ? sizeof(scsicmd->sense_buffer) 1362 : sizeof(dev->fsa_dev[cid].sense_data)); 1363 scsicmd->scsi_done(scsicmd); 1364 return 0; 1365 } 1366 1367 1368 /* Handle commands here that don't really require going out to the adapter */ 1369 switch (scsicmd->cmnd[0]) { 1370 case INQUIRY: 1371 { 1372 struct inquiry_data inq_data; 1373 1374 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id)); 1375 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1376 1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ 1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1380 inq_data.inqd_len = 31; 1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1382 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ 1383 /* 1384 * Set the Vendor, Product, and Revision Level 1385 * see: <vendor>.c i.e. aac.c 1386 */ 1387 if (scsicmd->device->id == host->this_id) { 1388 setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *))); 1389 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ 1390 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1391 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1392 scsicmd->scsi_done(scsicmd); 1393 return 0; 1394 } 1395 setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1396 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1398 return aac_get_container_name(scsicmd, cid); 1399 } 1400 case READ_CAPACITY: 1401 { 1402 u32 capacity; 1403 char cp[8]; 1404 1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1407 capacity = fsa_dev_ptr[cid].size - 1; 1408 else 1409 capacity = (u32)-1; 1410 1411 cp[0] = (capacity >> 24) & 0xff; 1412 cp[1] = (capacity >> 16) & 0xff; 1413 cp[2] = (capacity >> 8) & 0xff; 1414 cp[3] = (capacity >> 0) & 0xff; 1415 cp[4] = 0; 1416 cp[5] = 0; 1417 cp[6] = 2; 1418 cp[7] = 0; 1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); 1420 1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1422 scsicmd->scsi_done(scsicmd); 1423 1424 return 0; 1425 } 1426 1427 case MODE_SENSE: 1428 { 1429 char mode_buf[4]; 1430 1431 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 1432 mode_buf[0] = 3; /* Mode data length */ 1433 mode_buf[1] = 0; /* Medium type - default */ 1434 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1435 mode_buf[3] = 0; /* Block descriptor length */ 1436 1437 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1438 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1439 scsicmd->scsi_done(scsicmd); 1440 1441 return 0; 1442 } 1443 case MODE_SENSE_10: 1444 { 1445 char mode_buf[8]; 1446 1447 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 1448 mode_buf[0] = 0; /* Mode data length (MSB) */ 1449 mode_buf[1] = 6; /* Mode data length (LSB) */ 1450 mode_buf[2] = 0; /* Medium type - default */ 1451 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1452 mode_buf[4] = 0; /* reserved */ 1453 mode_buf[5] = 0; /* reserved */ 1454 mode_buf[6] = 0; /* Block descriptor length (MSB) */ 1455 mode_buf[7] = 0; /* Block descriptor length (LSB) */ 1456 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1457 1458 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1459 scsicmd->scsi_done(scsicmd); 1460 1461 return 0; 1462 } 1463 case REQUEST_SENSE: 1464 dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); 1465 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data)); 1466 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data)); 1467 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1468 scsicmd->scsi_done(scsicmd); 1469 return 0; 1470 1471 case ALLOW_MEDIUM_REMOVAL: 1472 dprintk((KERN_DEBUG "LOCK command.\n")); 1473 if (scsicmd->cmnd[4]) 1474 fsa_dev_ptr[cid].locked = 1; 1475 else 1476 fsa_dev_ptr[cid].locked = 0; 1477 1478 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1479 scsicmd->scsi_done(scsicmd); 1480 return 0; 1481 /* 1482 * These commands are all No-Ops 1483 */ 1484 case TEST_UNIT_READY: 1485 case RESERVE: 1486 case RELEASE: 1487 case REZERO_UNIT: 1488 case REASSIGN_BLOCKS: 1489 case SEEK_10: 1490 case START_STOP: 1491 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1492 scsicmd->scsi_done(scsicmd); 1493 return 0; 1494 } 1495 1496 switch (scsicmd->cmnd[0]) 1497 { 1498 case READ_6: 1499 case READ_10: 1500 /* 1501 * Hack to keep track of ordinal number of the device that 1502 * corresponds to a container. Needed to convert 1503 * containers to /dev/sd device names 1504 */ 1505 1506 spin_unlock_irq(host->host_lock); 1507 if (scsicmd->request->rq_disk) 1508 memcpy(fsa_dev_ptr[cid].devname, 1509 scsicmd->request->rq_disk->disk_name, 1510 8); 1511 1512 ret = aac_read(scsicmd, cid); 1513 spin_lock_irq(host->host_lock); 1514 return ret; 1515 1516 case WRITE_6: 1517 case WRITE_10: 1518 spin_unlock_irq(host->host_lock); 1519 ret = aac_write(scsicmd, cid); 1520 spin_lock_irq(host->host_lock); 1521 return ret; 1522 1523 case SYNCHRONIZE_CACHE: 1524 /* Issue FIB to tell Firmware to flush it's cache */ 1525 return aac_synchronize(scsicmd, cid); 1526 1527 default: 1528 /* 1529 * Unhandled commands 1530 */ 1531 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); 1532 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1533 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1534 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 1535 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 1536 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1537 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) 1538 ? sizeof(scsicmd->sense_buffer) 1539 : sizeof(dev->fsa_dev[cid].sense_data)); 1540 scsicmd->scsi_done(scsicmd); 1541 return 0; 1542 } 1543 } 1544 1545 static int query_disk(struct aac_dev *dev, void __user *arg) 1546 { 1547 struct aac_query_disk qd; 1548 struct fsa_dev_info *fsa_dev_ptr; 1549 1550 fsa_dev_ptr = dev->fsa_dev; 1551 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 1552 return -EFAULT; 1553 if (qd.cnum == -1) 1554 qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun); 1555 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 1556 { 1557 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 1558 return -EINVAL; 1559 qd.instance = dev->scsi_host_ptr->host_no; 1560 qd.bus = 0; 1561 qd.id = CONTAINER_TO_ID(qd.cnum); 1562 qd.lun = CONTAINER_TO_LUN(qd.cnum); 1563 } 1564 else return -EINVAL; 1565 1566 qd.valid = fsa_dev_ptr[qd.cnum].valid; 1567 qd.locked = fsa_dev_ptr[qd.cnum].locked; 1568 qd.deleted = fsa_dev_ptr[qd.cnum].deleted; 1569 1570 if (fsa_dev_ptr[qd.cnum].devname[0] == '\0') 1571 qd.unmapped = 1; 1572 else 1573 qd.unmapped = 0; 1574 1575 strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname, 1576 min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); 1577 1578 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) 1579 return -EFAULT; 1580 return 0; 1581 } 1582 1583 static int force_delete_disk(struct aac_dev *dev, void __user *arg) 1584 { 1585 struct aac_delete_disk dd; 1586 struct fsa_dev_info *fsa_dev_ptr; 1587 1588 fsa_dev_ptr = dev->fsa_dev; 1589 1590 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1591 return -EFAULT; 1592 1593 if (dd.cnum >= dev->maximum_num_containers) 1594 return -EINVAL; 1595 /* 1596 * Mark this container as being deleted. 1597 */ 1598 fsa_dev_ptr[dd.cnum].deleted = 1; 1599 /* 1600 * Mark the container as no longer valid 1601 */ 1602 fsa_dev_ptr[dd.cnum].valid = 0; 1603 return 0; 1604 } 1605 1606 static int delete_disk(struct aac_dev *dev, void __user *arg) 1607 { 1608 struct aac_delete_disk dd; 1609 struct fsa_dev_info *fsa_dev_ptr; 1610 1611 fsa_dev_ptr = dev->fsa_dev; 1612 1613 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1614 return -EFAULT; 1615 1616 if (dd.cnum >= dev->maximum_num_containers) 1617 return -EINVAL; 1618 /* 1619 * If the container is locked, it can not be deleted by the API. 1620 */ 1621 if (fsa_dev_ptr[dd.cnum].locked) 1622 return -EBUSY; 1623 else { 1624 /* 1625 * Mark the container as no longer being valid. 1626 */ 1627 fsa_dev_ptr[dd.cnum].valid = 0; 1628 fsa_dev_ptr[dd.cnum].devname[0] = '\0'; 1629 return 0; 1630 } 1631 } 1632 1633 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg) 1634 { 1635 switch (cmd) { 1636 case FSACTL_QUERY_DISK: 1637 return query_disk(dev, arg); 1638 case FSACTL_DELETE_DISK: 1639 return delete_disk(dev, arg); 1640 case FSACTL_FORCE_DELETE_DISK: 1641 return force_delete_disk(dev, arg); 1642 case FSACTL_GET_CONTAINERS: 1643 return aac_get_containers(dev); 1644 default: 1645 return -ENOTTY; 1646 } 1647 } 1648 1649 /** 1650 * 1651 * aac_srb_callback 1652 * @context: the context set in the fib - here it is scsi cmd 1653 * @fibptr: pointer to the fib 1654 * 1655 * Handles the completion of a scsi command to a non dasd device 1656 * 1657 */ 1658 1659 static void aac_srb_callback(void *context, struct fib * fibptr) 1660 { 1661 struct aac_dev *dev; 1662 struct aac_srb_reply *srbreply; 1663 struct scsi_cmnd *scsicmd; 1664 1665 scsicmd = (struct scsi_cmnd *) context; 1666 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1667 1668 if (fibptr == NULL) 1669 BUG(); 1670 1671 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 1672 1673 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 1674 /* 1675 * Calculate resid for sg 1676 */ 1677 1678 scsicmd->resid = scsicmd->request_bufflen - 1679 le32_to_cpu(srbreply->data_xfer_length); 1680 1681 if(scsicmd->use_sg) 1682 pci_unmap_sg(dev->pdev, 1683 (struct scatterlist *)scsicmd->buffer, 1684 scsicmd->use_sg, 1685 scsicmd->sc_data_direction); 1686 else if(scsicmd->request_bufflen) 1687 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen, 1688 scsicmd->sc_data_direction); 1689 1690 /* 1691 * First check the fib status 1692 */ 1693 1694 if (le32_to_cpu(srbreply->status) != ST_OK){ 1695 int len; 1696 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); 1697 len = (le32_to_cpu(srbreply->sense_data_size) > 1698 sizeof(scsicmd->sense_buffer)) ? 1699 sizeof(scsicmd->sense_buffer) : 1700 le32_to_cpu(srbreply->sense_data_size); 1701 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1702 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 1703 } 1704 1705 /* 1706 * Next check the srb status 1707 */ 1708 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){ 1709 case SRB_STATUS_ERROR_RECOVERY: 1710 case SRB_STATUS_PENDING: 1711 case SRB_STATUS_SUCCESS: 1712 if(scsicmd->cmnd[0] == INQUIRY ){ 1713 u8 b; 1714 u8 b1; 1715 /* We can't expose disk devices because we can't tell whether they 1716 * are the raw container drives or stand alone drives. If they have 1717 * the removable bit set then we should expose them though. 1718 */ 1719 b = (*(u8*)scsicmd->buffer)&0x1f; 1720 b1 = ((u8*)scsicmd->buffer)[1]; 1721 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1722 || (b==TYPE_DISK && (b1&0x80)) ){ 1723 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1724 /* 1725 * We will allow disk devices if in RAID/SCSI mode and 1726 * the channel is 2 1727 */ 1728 } else if ((dev->raid_scsi_mode) && 1729 (scsicmd->device->channel == 2)) { 1730 scsicmd->result = DID_OK << 16 | 1731 COMMAND_COMPLETE << 8; 1732 } else { 1733 scsicmd->result = DID_NO_CONNECT << 16 | 1734 COMMAND_COMPLETE << 8; 1735 } 1736 } else { 1737 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1738 } 1739 break; 1740 case SRB_STATUS_DATA_OVERRUN: 1741 switch(scsicmd->cmnd[0]){ 1742 case READ_6: 1743 case WRITE_6: 1744 case READ_10: 1745 case WRITE_10: 1746 case READ_12: 1747 case WRITE_12: 1748 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { 1749 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 1750 } else { 1751 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); 1752 } 1753 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1754 break; 1755 case INQUIRY: { 1756 u8 b; 1757 u8 b1; 1758 /* We can't expose disk devices because we can't tell whether they 1759 * are the raw container drives or stand alone drives 1760 */ 1761 b = (*(u8*)scsicmd->buffer)&0x0f; 1762 b1 = ((u8*)scsicmd->buffer)[1]; 1763 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 1764 || (b==TYPE_DISK && (b1&0x80)) ){ 1765 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1766 /* 1767 * We will allow disk devices if in RAID/SCSI mode and 1768 * the channel is 2 1769 */ 1770 } else if ((dev->raid_scsi_mode) && 1771 (scsicmd->device->channel == 2)) { 1772 scsicmd->result = DID_OK << 16 | 1773 COMMAND_COMPLETE << 8; 1774 } else { 1775 scsicmd->result = DID_NO_CONNECT << 16 | 1776 COMMAND_COMPLETE << 8; 1777 } 1778 break; 1779 } 1780 default: 1781 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 1782 break; 1783 } 1784 break; 1785 case SRB_STATUS_ABORTED: 1786 scsicmd->result = DID_ABORT << 16 | ABORT << 8; 1787 break; 1788 case SRB_STATUS_ABORT_FAILED: 1789 // Not sure about this one - but assuming the hba was trying to abort for some reason 1790 scsicmd->result = DID_ERROR << 16 | ABORT << 8; 1791 break; 1792 case SRB_STATUS_PARITY_ERROR: 1793 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8; 1794 break; 1795 case SRB_STATUS_NO_DEVICE: 1796 case SRB_STATUS_INVALID_PATH_ID: 1797 case SRB_STATUS_INVALID_TARGET_ID: 1798 case SRB_STATUS_INVALID_LUN: 1799 case SRB_STATUS_SELECTION_TIMEOUT: 1800 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 1801 break; 1802 1803 case SRB_STATUS_COMMAND_TIMEOUT: 1804 case SRB_STATUS_TIMEOUT: 1805 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8; 1806 break; 1807 1808 case SRB_STATUS_BUSY: 1809 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 1810 break; 1811 1812 case SRB_STATUS_BUS_RESET: 1813 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8; 1814 break; 1815 1816 case SRB_STATUS_MESSAGE_REJECTED: 1817 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8; 1818 break; 1819 case SRB_STATUS_REQUEST_FLUSHED: 1820 case SRB_STATUS_ERROR: 1821 case SRB_STATUS_INVALID_REQUEST: 1822 case SRB_STATUS_REQUEST_SENSE_FAILED: 1823 case SRB_STATUS_NO_HBA: 1824 case SRB_STATUS_UNEXPECTED_BUS_FREE: 1825 case SRB_STATUS_PHASE_SEQUENCE_FAILURE: 1826 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: 1827 case SRB_STATUS_DELAYED_RETRY: 1828 case SRB_STATUS_BAD_FUNCTION: 1829 case SRB_STATUS_NOT_STARTED: 1830 case SRB_STATUS_NOT_IN_USE: 1831 case SRB_STATUS_FORCE_ABORT: 1832 case SRB_STATUS_DOMAIN_VALIDATION_FAIL: 1833 default: 1834 #ifdef AAC_DETAILED_STATUS_INFO 1835 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", 1836 le32_to_cpu(srbreply->srb_status) & 0x3F, 1837 aac_get_status_string( 1838 le32_to_cpu(srbreply->srb_status) & 0x3F), 1839 scsicmd->cmnd[0], 1840 le32_to_cpu(srbreply->scsi_status)); 1841 #endif 1842 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1843 break; 1844 } 1845 if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition 1846 int len; 1847 scsicmd->result |= SAM_STAT_CHECK_CONDITION; 1848 len = (le32_to_cpu(srbreply->sense_data_size) > 1849 sizeof(scsicmd->sense_buffer)) ? 1850 sizeof(scsicmd->sense_buffer) : 1851 le32_to_cpu(srbreply->sense_data_size); 1852 #ifdef AAC_DETAILED_STATUS_INFO 1853 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 1854 le32_to_cpu(srbreply->status), len)); 1855 #endif 1856 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 1857 1858 } 1859 /* 1860 * OR in the scsi status (already shifted up a bit) 1861 */ 1862 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 1863 1864 fib_complete(fibptr); 1865 fib_free(fibptr); 1866 aac_io_done(scsicmd); 1867 } 1868 1869 /** 1870 * 1871 * aac_send_scb_fib 1872 * @scsicmd: the scsi command block 1873 * 1874 * This routine will form a FIB and fill in the aac_srb from the 1875 * scsicmd passed in. 1876 */ 1877 1878 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) 1879 { 1880 struct fib* cmd_fibcontext; 1881 struct aac_dev* dev; 1882 int status; 1883 struct aac_srb *srbcmd; 1884 u16 fibsize; 1885 u32 flag; 1886 u32 timeout; 1887 1888 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1889 if (scsicmd->device->id >= dev->maximum_num_physicals || 1890 scsicmd->device->lun > 7) { 1891 scsicmd->result = DID_NO_CONNECT << 16; 1892 scsicmd->scsi_done(scsicmd); 1893 return 0; 1894 } 1895 1896 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1897 switch(scsicmd->sc_data_direction){ 1898 case DMA_TO_DEVICE: 1899 flag = SRB_DataOut; 1900 break; 1901 case DMA_BIDIRECTIONAL: 1902 flag = SRB_DataIn | SRB_DataOut; 1903 break; 1904 case DMA_FROM_DEVICE: 1905 flag = SRB_DataIn; 1906 break; 1907 case DMA_NONE: 1908 default: /* shuts up some versions of gcc */ 1909 flag = SRB_NoDataXfer; 1910 break; 1911 } 1912 1913 1914 /* 1915 * Allocate and initialize a Fib then setup a BlockWrite command 1916 */ 1917 if (!(cmd_fibcontext = fib_alloc(dev))) { 1918 return -1; 1919 } 1920 fib_init(cmd_fibcontext); 1921 1922 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 1923 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 1924 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel)); 1925 srbcmd->id = cpu_to_le32(scsicmd->device->id); 1926 srbcmd->lun = cpu_to_le32(scsicmd->device->lun); 1927 srbcmd->flags = cpu_to_le32(flag); 1928 timeout = scsicmd->timeout_per_command/HZ; 1929 if(timeout == 0){ 1930 timeout = 1; 1931 } 1932 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1933 srbcmd->retry_limit = 0; /* Obsolete parameter */ 1934 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len); 1935 1936 if( dev->dac_support == 1 ) { 1937 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg); 1938 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); 1939 1940 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1941 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); 1942 /* 1943 * Build Scatter/Gather list 1944 */ 1945 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + 1946 ((le32_to_cpu(srbcmd->sg.count) & 0xff) * 1947 sizeof (struct sgentry64)); 1948 BUG_ON (fibsize > (dev->max_fib_size - 1949 sizeof(struct aac_fibhdr))); 1950 1951 /* 1952 * Now send the Fib to the adapter 1953 */ 1954 status = fib_send(ScsiPortCommand64, cmd_fibcontext, 1955 fibsize, FsaNormal, 0, 1, 1956 (fib_callback) aac_srb_callback, 1957 (void *) scsicmd); 1958 } else { 1959 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg); 1960 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); 1961 1962 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1963 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); 1964 /* 1965 * Build Scatter/Gather list 1966 */ 1967 fibsize = sizeof (struct aac_srb) + 1968 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * 1969 sizeof (struct sgentry)); 1970 BUG_ON (fibsize > (dev->max_fib_size - 1971 sizeof(struct aac_fibhdr))); 1972 1973 /* 1974 * Now send the Fib to the adapter 1975 */ 1976 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 1977 (fib_callback) aac_srb_callback, (void *) scsicmd); 1978 } 1979 /* 1980 * Check that the command queued to the controller 1981 */ 1982 if (status == -EINPROGRESS){ 1983 return 0; 1984 } 1985 1986 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 1987 fib_complete(cmd_fibcontext); 1988 fib_free(cmd_fibcontext); 1989 1990 return -1; 1991 } 1992 1993 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg) 1994 { 1995 struct aac_dev *dev; 1996 unsigned long byte_count = 0; 1997 1998 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1999 // Get rid of old data 2000 psg->count = 0; 2001 psg->sg[0].addr = 0; 2002 psg->sg[0].count = 0; 2003 if (scsicmd->use_sg) { 2004 struct scatterlist *sg; 2005 int i; 2006 int sg_count; 2007 sg = (struct scatterlist *) scsicmd->request_buffer; 2008 2009 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, 2010 scsicmd->sc_data_direction); 2011 psg->count = cpu_to_le32(sg_count); 2012 2013 byte_count = 0; 2014 2015 for (i = 0; i < sg_count; i++) { 2016 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); 2017 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); 2018 byte_count += sg_dma_len(sg); 2019 sg++; 2020 } 2021 /* hba wants the size to be exact */ 2022 if(byte_count > scsicmd->request_bufflen){ 2023 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2024 (byte_count - scsicmd->request_bufflen); 2025 psg->sg[i-1].count = cpu_to_le32(temp); 2026 byte_count = scsicmd->request_bufflen; 2027 } 2028 /* Check for command underflow */ 2029 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2030 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 2031 byte_count, scsicmd->underflow); 2032 } 2033 } 2034 else if(scsicmd->request_bufflen) { 2035 dma_addr_t addr; 2036 addr = pci_map_single(dev->pdev, 2037 scsicmd->request_buffer, 2038 scsicmd->request_bufflen, 2039 scsicmd->sc_data_direction); 2040 psg->count = cpu_to_le32(1); 2041 psg->sg[0].addr = cpu_to_le32(addr); 2042 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); 2043 scsicmd->SCp.dma_handle = addr; 2044 byte_count = scsicmd->request_bufflen; 2045 } 2046 return byte_count; 2047 } 2048 2049 2050 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg) 2051 { 2052 struct aac_dev *dev; 2053 unsigned long byte_count = 0; 2054 u64 addr; 2055 2056 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2057 // Get rid of old data 2058 psg->count = 0; 2059 psg->sg[0].addr[0] = 0; 2060 psg->sg[0].addr[1] = 0; 2061 psg->sg[0].count = 0; 2062 if (scsicmd->use_sg) { 2063 struct scatterlist *sg; 2064 int i; 2065 int sg_count; 2066 sg = (struct scatterlist *) scsicmd->request_buffer; 2067 2068 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, 2069 scsicmd->sc_data_direction); 2070 psg->count = cpu_to_le32(sg_count); 2071 2072 byte_count = 0; 2073 2074 for (i = 0; i < sg_count; i++) { 2075 addr = sg_dma_address(sg); 2076 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 2077 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 2078 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); 2079 byte_count += sg_dma_len(sg); 2080 sg++; 2081 } 2082 /* hba wants the size to be exact */ 2083 if(byte_count > scsicmd->request_bufflen){ 2084 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2085 (byte_count - scsicmd->request_bufflen); 2086 psg->sg[i-1].count = cpu_to_le32(temp); 2087 byte_count = scsicmd->request_bufflen; 2088 } 2089 /* Check for command underflow */ 2090 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2091 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 2092 byte_count, scsicmd->underflow); 2093 } 2094 } 2095 else if(scsicmd->request_bufflen) { 2096 u64 addr; 2097 addr = pci_map_single(dev->pdev, 2098 scsicmd->request_buffer, 2099 scsicmd->request_bufflen, 2100 scsicmd->sc_data_direction); 2101 psg->count = cpu_to_le32(1); 2102 psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff); 2103 psg->sg[0].addr[1] = cpu_to_le32(addr >> 32); 2104 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); 2105 scsicmd->SCp.dma_handle = addr; 2106 byte_count = scsicmd->request_bufflen; 2107 } 2108 return byte_count; 2109 } 2110 2111 static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg) 2112 { 2113 struct Scsi_Host *host = scsicmd->device->host; 2114 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 2115 unsigned long byte_count = 0; 2116 2117 // Get rid of old data 2118 psg->count = 0; 2119 psg->sg[0].next = 0; 2120 psg->sg[0].prev = 0; 2121 psg->sg[0].addr[0] = 0; 2122 psg->sg[0].addr[1] = 0; 2123 psg->sg[0].count = 0; 2124 psg->sg[0].flags = 0; 2125 if (scsicmd->use_sg) { 2126 struct scatterlist *sg; 2127 int i; 2128 int sg_count; 2129 sg = (struct scatterlist *) scsicmd->request_buffer; 2130 2131 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, 2132 scsicmd->sc_data_direction); 2133 2134 for (i = 0; i < sg_count; i++) { 2135 int count = sg_dma_len(sg); 2136 u64 addr = sg_dma_address(sg); 2137 psg->sg[i].next = 0; 2138 psg->sg[i].prev = 0; 2139 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); 2140 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 2141 psg->sg[i].count = cpu_to_le32(count); 2142 psg->sg[i].flags = 0; 2143 byte_count += count; 2144 sg++; 2145 } 2146 psg->count = cpu_to_le32(sg_count); 2147 /* hba wants the size to be exact */ 2148 if(byte_count > scsicmd->request_bufflen){ 2149 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2150 (byte_count - scsicmd->request_bufflen); 2151 psg->sg[i-1].count = cpu_to_le32(temp); 2152 byte_count = scsicmd->request_bufflen; 2153 } 2154 /* Check for command underflow */ 2155 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2156 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 2157 byte_count, scsicmd->underflow); 2158 } 2159 } 2160 else if(scsicmd->request_bufflen) { 2161 int count; 2162 u64 addr; 2163 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev, 2164 scsicmd->request_buffer, 2165 scsicmd->request_bufflen, 2166 scsicmd->sc_data_direction); 2167 addr = scsicmd->SCp.dma_handle; 2168 count = scsicmd->request_bufflen; 2169 psg->count = cpu_to_le32(1); 2170 psg->sg[0].next = 0; 2171 psg->sg[0].prev = 0; 2172 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32)); 2173 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 2174 psg->sg[0].count = cpu_to_le32(count); 2175 psg->sg[0].flags = 0; 2176 byte_count = scsicmd->request_bufflen; 2177 } 2178 return byte_count; 2179 } 2180 2181 #ifdef AAC_DETAILED_STATUS_INFO 2182 2183 struct aac_srb_status_info { 2184 u32 status; 2185 char *str; 2186 }; 2187 2188 2189 static struct aac_srb_status_info srb_status_info[] = { 2190 { SRB_STATUS_PENDING, "Pending Status"}, 2191 { SRB_STATUS_SUCCESS, "Success"}, 2192 { SRB_STATUS_ABORTED, "Aborted Command"}, 2193 { SRB_STATUS_ABORT_FAILED, "Abort Failed"}, 2194 { SRB_STATUS_ERROR, "Error Event"}, 2195 { SRB_STATUS_BUSY, "Device Busy"}, 2196 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"}, 2197 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"}, 2198 { SRB_STATUS_NO_DEVICE, "No Device"}, 2199 { SRB_STATUS_TIMEOUT, "Timeout"}, 2200 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"}, 2201 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"}, 2202 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"}, 2203 { SRB_STATUS_BUS_RESET, "Bus Reset"}, 2204 { SRB_STATUS_PARITY_ERROR, "Parity Error"}, 2205 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"}, 2206 { SRB_STATUS_NO_HBA, "No HBA"}, 2207 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"}, 2208 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"}, 2209 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"}, 2210 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"}, 2211 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"}, 2212 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"}, 2213 { SRB_STATUS_INVALID_LUN, "Invalid LUN"}, 2214 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"}, 2215 { SRB_STATUS_BAD_FUNCTION, "Bad Function"}, 2216 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, 2217 { SRB_STATUS_NOT_STARTED, "Not Started"}, 2218 { SRB_STATUS_NOT_IN_USE, "Not In Use"}, 2219 { SRB_STATUS_FORCE_ABORT, "Force Abort"}, 2220 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, 2221 { 0xff, "Unknown Error"} 2222 }; 2223 2224 char *aac_get_status_string(u32 status) 2225 { 2226 int i; 2227 2228 for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){ 2229 if(srb_status_info[i].status == status){ 2230 return srb_status_info[i].str; 2231 } 2232 } 2233 2234 return "Bad Status Code"; 2235 } 2236 2237 #endif 2238