1 /* 2 * Engenio/LSI RDAC SCSI Device Handler 3 * 4 * Copyright (C) 2005 Mike Christie. All rights reserved. 5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * 21 */ 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_eh.h> 24 #include <scsi/scsi_dh.h> 25 #include <linux/workqueue.h> 26 #include <linux/slab.h> 27 28 #define RDAC_NAME "rdac" 29 #define RDAC_RETRY_COUNT 5 30 31 /* 32 * LSI mode page stuff 33 * 34 * These struct definitions and the forming of the 35 * mode page were taken from the LSI RDAC 2.4 GPL'd 36 * driver, and then converted to Linux conventions. 37 */ 38 #define RDAC_QUIESCENCE_TIME 20; 39 /* 40 * Page Codes 41 */ 42 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c 43 44 /* 45 * Controller modes definitions 46 */ 47 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 48 49 /* 50 * RDAC Options field 51 */ 52 #define RDAC_FORCED_QUIESENCE 0x02 53 54 #define RDAC_TIMEOUT (60 * HZ) 55 #define RDAC_RETRIES 3 56 57 struct rdac_mode_6_hdr { 58 u8 data_len; 59 u8 medium_type; 60 u8 device_params; 61 u8 block_desc_len; 62 }; 63 64 struct rdac_mode_10_hdr { 65 u16 data_len; 66 u8 medium_type; 67 u8 device_params; 68 u16 reserved; 69 u16 block_desc_len; 70 }; 71 72 struct rdac_mode_common { 73 u8 controller_serial[16]; 74 u8 alt_controller_serial[16]; 75 u8 rdac_mode[2]; 76 u8 alt_rdac_mode[2]; 77 u8 quiescence_timeout; 78 u8 rdac_options; 79 }; 80 81 struct rdac_pg_legacy { 82 struct rdac_mode_6_hdr hdr; 83 u8 page_code; 84 u8 page_len; 85 struct rdac_mode_common common; 86 #define MODE6_MAX_LUN 32 87 u8 lun_table[MODE6_MAX_LUN]; 88 u8 reserved2[32]; 89 u8 reserved3; 90 u8 reserved4; 91 }; 92 93 struct rdac_pg_expanded { 94 struct rdac_mode_10_hdr hdr; 95 u8 page_code; 96 u8 subpage_code; 97 u8 page_len[2]; 98 struct rdac_mode_common common; 99 u8 lun_table[256]; 100 u8 reserved3; 101 u8 reserved4; 102 }; 103 104 struct c9_inquiry { 105 u8 peripheral_info; 106 u8 page_code; /* 0xC9 */ 107 u8 reserved1; 108 u8 page_len; 109 u8 page_id[4]; /* "vace" */ 110 u8 avte_cvp; 111 u8 path_prio; 112 u8 reserved2[38]; 113 }; 114 115 #define SUBSYS_ID_LEN 16 116 #define SLOT_ID_LEN 2 117 #define ARRAY_LABEL_LEN 31 118 119 struct c4_inquiry { 120 u8 peripheral_info; 121 u8 page_code; /* 0xC4 */ 122 u8 reserved1; 123 u8 page_len; 124 u8 page_id[4]; /* "subs" */ 125 u8 subsys_id[SUBSYS_ID_LEN]; 126 u8 revision[4]; 127 u8 slot_id[SLOT_ID_LEN]; 128 u8 reserved[2]; 129 }; 130 131 struct rdac_controller { 132 u8 subsys_id[SUBSYS_ID_LEN]; 133 u8 slot_id[SLOT_ID_LEN]; 134 int use_ms10; 135 struct kref kref; 136 struct list_head node; /* list of all controllers */ 137 union { 138 struct rdac_pg_legacy legacy; 139 struct rdac_pg_expanded expanded; 140 } mode_select; 141 u8 index; 142 u8 array_name[ARRAY_LABEL_LEN]; 143 spinlock_t ms_lock; 144 int ms_queued; 145 struct work_struct ms_work; 146 struct scsi_device *ms_sdev; 147 struct list_head ms_head; 148 }; 149 150 struct c8_inquiry { 151 u8 peripheral_info; 152 u8 page_code; /* 0xC8 */ 153 u8 reserved1; 154 u8 page_len; 155 u8 page_id[4]; /* "edid" */ 156 u8 reserved2[3]; 157 u8 vol_uniq_id_len; 158 u8 vol_uniq_id[16]; 159 u8 vol_user_label_len; 160 u8 vol_user_label[60]; 161 u8 array_uniq_id_len; 162 u8 array_unique_id[16]; 163 u8 array_user_label_len; 164 u8 array_user_label[60]; 165 u8 lun[8]; 166 }; 167 168 struct c2_inquiry { 169 u8 peripheral_info; 170 u8 page_code; /* 0xC2 */ 171 u8 reserved1; 172 u8 page_len; 173 u8 page_id[4]; /* "swr4" */ 174 u8 sw_version[3]; 175 u8 sw_date[3]; 176 u8 features_enabled; 177 u8 max_lun_supported; 178 u8 partitions[239]; /* Total allocation length should be 0xFF */ 179 }; 180 181 struct rdac_dh_data { 182 struct rdac_controller *ctlr; 183 #define UNINITIALIZED_LUN (1 << 8) 184 unsigned lun; 185 #define RDAC_STATE_ACTIVE 0 186 #define RDAC_STATE_PASSIVE 1 187 unsigned char state; 188 189 #define RDAC_LUN_UNOWNED 0 190 #define RDAC_LUN_OWNED 1 191 #define RDAC_LUN_AVT 2 192 char lun_state; 193 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 194 union { 195 struct c2_inquiry c2; 196 struct c4_inquiry c4; 197 struct c8_inquiry c8; 198 struct c9_inquiry c9; 199 } inq; 200 }; 201 202 static const char *lun_state[] = 203 { 204 "unowned", 205 "owned", 206 "owned (AVT mode)", 207 }; 208 209 struct rdac_queue_data { 210 struct list_head entry; 211 struct rdac_dh_data *h; 212 activate_complete callback_fn; 213 void *callback_data; 214 }; 215 216 static LIST_HEAD(ctlr_list); 217 static DEFINE_SPINLOCK(list_lock); 218 static struct workqueue_struct *kmpath_rdacd; 219 static void send_mode_select(struct work_struct *work); 220 221 /* 222 * module parameter to enable rdac debug logging. 223 * 2 bits for each type of logging, only two types defined for now 224 * Can be enhanced if required at later point 225 */ 226 static int rdac_logging = 1; 227 module_param(rdac_logging, int, S_IRUGO|S_IWUSR); 228 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " 229 "Default is 1 - failover logging enabled, " 230 "set it to 0xF to enable all the logs"); 231 232 #define RDAC_LOG_FAILOVER 0 233 #define RDAC_LOG_SENSE 2 234 235 #define RDAC_LOG_BITS 2 236 237 #define RDAC_LOG_LEVEL(SHIFT) \ 238 ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) 239 240 #define RDAC_LOG(SHIFT, sdev, f, arg...) \ 241 do { \ 242 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ 243 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ 244 } while (0); 245 246 static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) 247 { 248 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 249 BUG_ON(scsi_dh_data == NULL); 250 return ((struct rdac_dh_data *) scsi_dh_data->buf); 251 } 252 253 static struct request *get_rdac_req(struct scsi_device *sdev, 254 void *buffer, unsigned buflen, int rw) 255 { 256 struct request *rq; 257 struct request_queue *q = sdev->request_queue; 258 259 rq = blk_get_request(q, rw, GFP_NOIO); 260 261 if (!rq) { 262 sdev_printk(KERN_INFO, sdev, 263 "get_rdac_req: blk_get_request failed.\n"); 264 return NULL; 265 } 266 267 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { 268 blk_put_request(rq); 269 sdev_printk(KERN_INFO, sdev, 270 "get_rdac_req: blk_rq_map_kern failed.\n"); 271 return NULL; 272 } 273 274 rq->cmd_type = REQ_TYPE_BLOCK_PC; 275 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 276 REQ_FAILFAST_DRIVER; 277 rq->retries = RDAC_RETRIES; 278 rq->timeout = RDAC_TIMEOUT; 279 280 return rq; 281 } 282 283 static struct request *rdac_failover_get(struct scsi_device *sdev, 284 struct rdac_dh_data *h, struct list_head *list) 285 { 286 struct request *rq; 287 struct rdac_mode_common *common; 288 unsigned data_size; 289 struct rdac_queue_data *qdata; 290 u8 *lun_table; 291 292 if (h->ctlr->use_ms10) { 293 struct rdac_pg_expanded *rdac_pg; 294 295 data_size = sizeof(struct rdac_pg_expanded); 296 rdac_pg = &h->ctlr->mode_select.expanded; 297 memset(rdac_pg, 0, data_size); 298 common = &rdac_pg->common; 299 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; 300 rdac_pg->subpage_code = 0x1; 301 rdac_pg->page_len[0] = 0x01; 302 rdac_pg->page_len[1] = 0x28; 303 lun_table = rdac_pg->lun_table; 304 } else { 305 struct rdac_pg_legacy *rdac_pg; 306 307 data_size = sizeof(struct rdac_pg_legacy); 308 rdac_pg = &h->ctlr->mode_select.legacy; 309 memset(rdac_pg, 0, data_size); 310 common = &rdac_pg->common; 311 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; 312 rdac_pg->page_len = 0x68; 313 lun_table = rdac_pg->lun_table; 314 } 315 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; 316 common->quiescence_timeout = RDAC_QUIESCENCE_TIME; 317 common->rdac_options = RDAC_FORCED_QUIESENCE; 318 319 list_for_each_entry(qdata, list, entry) { 320 lun_table[qdata->h->lun] = 0x81; 321 } 322 323 /* get request for block layer packet command */ 324 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); 325 if (!rq) 326 return NULL; 327 328 /* Prepare the command. */ 329 if (h->ctlr->use_ms10) { 330 rq->cmd[0] = MODE_SELECT_10; 331 rq->cmd[7] = data_size >> 8; 332 rq->cmd[8] = data_size & 0xff; 333 } else { 334 rq->cmd[0] = MODE_SELECT; 335 rq->cmd[4] = data_size; 336 } 337 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 338 339 rq->sense = h->sense; 340 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 341 rq->sense_len = 0; 342 343 return rq; 344 } 345 346 static void release_controller(struct kref *kref) 347 { 348 struct rdac_controller *ctlr; 349 ctlr = container_of(kref, struct rdac_controller, kref); 350 351 flush_workqueue(kmpath_rdacd); 352 spin_lock(&list_lock); 353 list_del(&ctlr->node); 354 spin_unlock(&list_lock); 355 kfree(ctlr); 356 } 357 358 static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, 359 char *array_name) 360 { 361 struct rdac_controller *ctlr, *tmp; 362 363 spin_lock(&list_lock); 364 365 list_for_each_entry(tmp, &ctlr_list, node) { 366 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && 367 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { 368 kref_get(&tmp->kref); 369 spin_unlock(&list_lock); 370 return tmp; 371 } 372 } 373 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); 374 if (!ctlr) 375 goto done; 376 377 /* initialize fields of controller */ 378 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); 379 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); 380 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); 381 382 /* update the controller index */ 383 if (slot_id[1] == 0x31) 384 ctlr->index = 0; 385 else 386 ctlr->index = 1; 387 388 kref_init(&ctlr->kref); 389 ctlr->use_ms10 = -1; 390 ctlr->ms_queued = 0; 391 ctlr->ms_sdev = NULL; 392 spin_lock_init(&ctlr->ms_lock); 393 INIT_WORK(&ctlr->ms_work, send_mode_select); 394 INIT_LIST_HEAD(&ctlr->ms_head); 395 list_add(&ctlr->node, &ctlr_list); 396 done: 397 spin_unlock(&list_lock); 398 return ctlr; 399 } 400 401 static int submit_inquiry(struct scsi_device *sdev, int page_code, 402 unsigned int len, struct rdac_dh_data *h) 403 { 404 struct request *rq; 405 struct request_queue *q = sdev->request_queue; 406 int err = SCSI_DH_RES_TEMP_UNAVAIL; 407 408 rq = get_rdac_req(sdev, &h->inq, len, READ); 409 if (!rq) 410 goto done; 411 412 /* Prepare the command. */ 413 rq->cmd[0] = INQUIRY; 414 rq->cmd[1] = 1; 415 rq->cmd[2] = page_code; 416 rq->cmd[4] = len; 417 rq->cmd_len = COMMAND_SIZE(INQUIRY); 418 419 rq->sense = h->sense; 420 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 421 rq->sense_len = 0; 422 423 err = blk_execute_rq(q, NULL, rq, 1); 424 if (err == -EIO) 425 err = SCSI_DH_IO; 426 427 blk_put_request(rq); 428 done: 429 return err; 430 } 431 432 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, 433 char *array_name) 434 { 435 int err, i; 436 struct c8_inquiry *inqp; 437 438 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); 439 if (err == SCSI_DH_OK) { 440 inqp = &h->inq.c8; 441 if (inqp->page_code != 0xc8) 442 return SCSI_DH_NOSYS; 443 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || 444 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') 445 return SCSI_DH_NOSYS; 446 h->lun = inqp->lun[7]; /* Uses only the last byte */ 447 448 for(i=0; i<ARRAY_LABEL_LEN-1; ++i) 449 *(array_name+i) = inqp->array_user_label[(2*i)+1]; 450 451 *(array_name+ARRAY_LABEL_LEN-1) = '\0'; 452 } 453 return err; 454 } 455 456 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) 457 { 458 int err; 459 struct c9_inquiry *inqp; 460 461 h->lun_state = RDAC_LUN_UNOWNED; 462 h->state = RDAC_STATE_ACTIVE; 463 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 464 if (err == SCSI_DH_OK) { 465 inqp = &h->inq.c9; 466 if ((inqp->avte_cvp >> 7) == 0x1) { 467 /* LUN in AVT mode */ 468 sdev_printk(KERN_NOTICE, sdev, 469 "%s: AVT mode detected\n", 470 RDAC_NAME); 471 h->lun_state = RDAC_LUN_AVT; 472 } else if ((inqp->avte_cvp & 0x1) != 0) { 473 /* LUN was owned by the controller */ 474 h->lun_state = RDAC_LUN_OWNED; 475 } 476 } 477 478 if (h->lun_state == RDAC_LUN_UNOWNED) 479 h->state = RDAC_STATE_PASSIVE; 480 481 return err; 482 } 483 484 static int initialize_controller(struct scsi_device *sdev, 485 struct rdac_dh_data *h, char *array_name) 486 { 487 int err; 488 struct c4_inquiry *inqp; 489 490 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 491 if (err == SCSI_DH_OK) { 492 inqp = &h->inq.c4; 493 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, 494 array_name); 495 if (!h->ctlr) 496 err = SCSI_DH_RES_TEMP_UNAVAIL; 497 } 498 return err; 499 } 500 501 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) 502 { 503 int err; 504 struct c2_inquiry *inqp; 505 506 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h); 507 if (err == SCSI_DH_OK) { 508 inqp = &h->inq.c2; 509 /* 510 * If more than MODE6_MAX_LUN luns are supported, use 511 * mode select 10 512 */ 513 if (inqp->max_lun_supported >= MODE6_MAX_LUN) 514 h->ctlr->use_ms10 = 1; 515 else 516 h->ctlr->use_ms10 = 0; 517 } 518 return err; 519 } 520 521 static int mode_select_handle_sense(struct scsi_device *sdev, 522 unsigned char *sensebuf) 523 { 524 struct scsi_sense_hdr sense_hdr; 525 int err = SCSI_DH_IO, ret; 526 struct rdac_dh_data *h = get_rdac_data(sdev); 527 528 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 529 if (!ret) 530 goto done; 531 532 switch (sense_hdr.sense_key) { 533 case NO_SENSE: 534 case ABORTED_COMMAND: 535 case UNIT_ATTENTION: 536 err = SCSI_DH_RETRY; 537 break; 538 case NOT_READY: 539 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01) 540 /* LUN Not Ready and is in the Process of Becoming 541 * Ready 542 */ 543 err = SCSI_DH_RETRY; 544 break; 545 case ILLEGAL_REQUEST: 546 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36) 547 /* 548 * Command Lock contention 549 */ 550 err = SCSI_DH_RETRY; 551 break; 552 default: 553 break; 554 } 555 556 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 557 "MODE_SELECT returned with sense %02x/%02x/%02x", 558 (char *) h->ctlr->array_name, h->ctlr->index, 559 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); 560 561 done: 562 return err; 563 } 564 565 static void send_mode_select(struct work_struct *work) 566 { 567 struct rdac_controller *ctlr = 568 container_of(work, struct rdac_controller, ms_work); 569 struct request *rq; 570 struct scsi_device *sdev = ctlr->ms_sdev; 571 struct rdac_dh_data *h = get_rdac_data(sdev); 572 struct request_queue *q = sdev->request_queue; 573 int err, retry_cnt = RDAC_RETRY_COUNT; 574 struct rdac_queue_data *tmp, *qdata; 575 LIST_HEAD(list); 576 577 spin_lock(&ctlr->ms_lock); 578 list_splice_init(&ctlr->ms_head, &list); 579 ctlr->ms_queued = 0; 580 ctlr->ms_sdev = NULL; 581 spin_unlock(&ctlr->ms_lock); 582 583 retry: 584 err = SCSI_DH_RES_TEMP_UNAVAIL; 585 rq = rdac_failover_get(sdev, h, &list); 586 if (!rq) 587 goto done; 588 589 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 590 "%s MODE_SELECT command", 591 (char *) h->ctlr->array_name, h->ctlr->index, 592 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); 593 594 err = blk_execute_rq(q, NULL, rq, 1); 595 blk_put_request(rq); 596 if (err != SCSI_DH_OK) { 597 err = mode_select_handle_sense(sdev, h->sense); 598 if (err == SCSI_DH_RETRY && retry_cnt--) 599 goto retry; 600 } 601 if (err == SCSI_DH_OK) { 602 h->state = RDAC_STATE_ACTIVE; 603 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 604 "MODE_SELECT completed", 605 (char *) h->ctlr->array_name, h->ctlr->index); 606 } 607 608 done: 609 list_for_each_entry_safe(qdata, tmp, &list, entry) { 610 list_del(&qdata->entry); 611 if (err == SCSI_DH_OK) 612 qdata->h->state = RDAC_STATE_ACTIVE; 613 if (qdata->callback_fn) 614 qdata->callback_fn(qdata->callback_data, err); 615 kfree(qdata); 616 } 617 return; 618 } 619 620 static int queue_mode_select(struct scsi_device *sdev, 621 activate_complete fn, void *data) 622 { 623 struct rdac_queue_data *qdata; 624 struct rdac_controller *ctlr; 625 626 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); 627 if (!qdata) 628 return SCSI_DH_RETRY; 629 630 qdata->h = get_rdac_data(sdev); 631 qdata->callback_fn = fn; 632 qdata->callback_data = data; 633 634 ctlr = qdata->h->ctlr; 635 spin_lock(&ctlr->ms_lock); 636 list_add_tail(&qdata->entry, &ctlr->ms_head); 637 if (!ctlr->ms_queued) { 638 ctlr->ms_queued = 1; 639 ctlr->ms_sdev = sdev; 640 queue_work(kmpath_rdacd, &ctlr->ms_work); 641 } 642 spin_unlock(&ctlr->ms_lock); 643 return SCSI_DH_OK; 644 } 645 646 static int rdac_activate(struct scsi_device *sdev, 647 activate_complete fn, void *data) 648 { 649 struct rdac_dh_data *h = get_rdac_data(sdev); 650 int err = SCSI_DH_OK; 651 652 err = check_ownership(sdev, h); 653 if (err != SCSI_DH_OK) 654 goto done; 655 656 if (h->lun_state == RDAC_LUN_UNOWNED) { 657 err = queue_mode_select(sdev, fn, data); 658 if (err == SCSI_DH_OK) 659 return 0; 660 } 661 done: 662 if (fn) 663 fn(data, err); 664 return 0; 665 } 666 667 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) 668 { 669 struct rdac_dh_data *h = get_rdac_data(sdev); 670 int ret = BLKPREP_OK; 671 672 if (h->state != RDAC_STATE_ACTIVE) { 673 ret = BLKPREP_KILL; 674 req->cmd_flags |= REQ_QUIET; 675 } 676 return ret; 677 678 } 679 680 static int rdac_check_sense(struct scsi_device *sdev, 681 struct scsi_sense_hdr *sense_hdr) 682 { 683 struct rdac_dh_data *h = get_rdac_data(sdev); 684 685 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " 686 "I/O returned with sense %02x/%02x/%02x", 687 (char *) h->ctlr->array_name, h->ctlr->index, 688 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); 689 690 switch (sense_hdr->sense_key) { 691 case NOT_READY: 692 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) 693 /* LUN Not Ready - Logical Unit Not Ready and is in 694 * the process of becoming ready 695 * Just retry. 696 */ 697 return ADD_TO_MLQUEUE; 698 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 699 /* LUN Not Ready - Storage firmware incompatible 700 * Manual code synchonisation required. 701 * 702 * Nothing we can do here. Try to bypass the path. 703 */ 704 return SUCCESS; 705 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) 706 /* LUN Not Ready - Quiescense in progress 707 * 708 * Just retry and wait. 709 */ 710 return ADD_TO_MLQUEUE; 711 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02) 712 /* LUN Not Ready - Quiescense in progress 713 * or has been achieved 714 * Just retry. 715 */ 716 return ADD_TO_MLQUEUE; 717 break; 718 case ILLEGAL_REQUEST: 719 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { 720 /* Invalid Request - Current Logical Unit Ownership. 721 * Controller is not the current owner of the LUN, 722 * Fail the path, so that the other path be used. 723 */ 724 h->state = RDAC_STATE_PASSIVE; 725 return SUCCESS; 726 } 727 break; 728 case UNIT_ATTENTION: 729 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) 730 /* 731 * Power On, Reset, or Bus Device Reset, just retry. 732 */ 733 return ADD_TO_MLQUEUE; 734 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02) 735 /* 736 * Quiescence in progress , just retry. 737 */ 738 return ADD_TO_MLQUEUE; 739 break; 740 } 741 /* success just means we do not care what scsi-ml does */ 742 return SCSI_RETURN_NOT_HANDLED; 743 } 744 745 static const struct scsi_dh_devlist rdac_dev_list[] = { 746 {"IBM", "1722"}, 747 {"IBM", "1724"}, 748 {"IBM", "1726"}, 749 {"IBM", "1742"}, 750 {"IBM", "1745"}, 751 {"IBM", "1746"}, 752 {"IBM", "1814"}, 753 {"IBM", "1815"}, 754 {"IBM", "1818"}, 755 {"IBM", "3526"}, 756 {"SGI", "TP9400"}, 757 {"SGI", "TP9500"}, 758 {"SGI", "IS"}, 759 {"STK", "OPENstorage D280"}, 760 {"SUN", "CSM200_R"}, 761 {"SUN", "LCSM100_I"}, 762 {"SUN", "LCSM100_S"}, 763 {"SUN", "LCSM100_E"}, 764 {"SUN", "LCSM100_F"}, 765 {"DELL", "MD3000"}, 766 {"DELL", "MD3000i"}, 767 {"DELL", "MD32xx"}, 768 {"DELL", "MD32xxi"}, 769 {"DELL", "MD36xxi"}, 770 {"DELL", "MD36xxf"}, 771 {"LSI", "INF-01-00"}, 772 {"ENGENIO", "INF-01-00"}, 773 {"STK", "FLEXLINE 380"}, 774 {"SUN", "CSM100_R_FC"}, 775 {"SUN", "STK6580_6780"}, 776 {"SUN", "SUN_6180"}, 777 {NULL, NULL}, 778 }; 779 780 static int rdac_bus_attach(struct scsi_device *sdev); 781 static void rdac_bus_detach(struct scsi_device *sdev); 782 783 static struct scsi_device_handler rdac_dh = { 784 .name = RDAC_NAME, 785 .module = THIS_MODULE, 786 .devlist = rdac_dev_list, 787 .prep_fn = rdac_prep_fn, 788 .check_sense = rdac_check_sense, 789 .attach = rdac_bus_attach, 790 .detach = rdac_bus_detach, 791 .activate = rdac_activate, 792 }; 793 794 static int rdac_bus_attach(struct scsi_device *sdev) 795 { 796 struct scsi_dh_data *scsi_dh_data; 797 struct rdac_dh_data *h; 798 unsigned long flags; 799 int err; 800 char array_name[ARRAY_LABEL_LEN]; 801 802 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 803 + sizeof(*h) , GFP_KERNEL); 804 if (!scsi_dh_data) { 805 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", 806 RDAC_NAME); 807 return 0; 808 } 809 810 scsi_dh_data->scsi_dh = &rdac_dh; 811 h = (struct rdac_dh_data *) scsi_dh_data->buf; 812 h->lun = UNINITIALIZED_LUN; 813 h->state = RDAC_STATE_ACTIVE; 814 815 err = get_lun_info(sdev, h, array_name); 816 if (err != SCSI_DH_OK) 817 goto failed; 818 819 err = initialize_controller(sdev, h, array_name); 820 if (err != SCSI_DH_OK) 821 goto failed; 822 823 err = check_ownership(sdev, h); 824 if (err != SCSI_DH_OK) 825 goto clean_ctlr; 826 827 err = set_mode_select(sdev, h); 828 if (err != SCSI_DH_OK) 829 goto clean_ctlr; 830 831 if (!try_module_get(THIS_MODULE)) 832 goto clean_ctlr; 833 834 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 835 sdev->scsi_dh_data = scsi_dh_data; 836 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 837 838 sdev_printk(KERN_NOTICE, sdev, 839 "%s: LUN %d (%s)\n", 840 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]); 841 842 return 0; 843 844 clean_ctlr: 845 kref_put(&h->ctlr->kref, release_controller); 846 847 failed: 848 kfree(scsi_dh_data); 849 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 850 RDAC_NAME); 851 return -EINVAL; 852 } 853 854 static void rdac_bus_detach( struct scsi_device *sdev ) 855 { 856 struct scsi_dh_data *scsi_dh_data; 857 struct rdac_dh_data *h; 858 unsigned long flags; 859 860 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 861 scsi_dh_data = sdev->scsi_dh_data; 862 sdev->scsi_dh_data = NULL; 863 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 864 865 h = (struct rdac_dh_data *) scsi_dh_data->buf; 866 if (h->ctlr) 867 kref_put(&h->ctlr->kref, release_controller); 868 kfree(scsi_dh_data); 869 module_put(THIS_MODULE); 870 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME); 871 } 872 873 874 875 static int __init rdac_init(void) 876 { 877 int r; 878 879 r = scsi_register_device_handler(&rdac_dh); 880 if (r != 0) { 881 printk(KERN_ERR "Failed to register scsi device handler."); 882 goto done; 883 } 884 885 /* 886 * Create workqueue to handle mode selects for rdac 887 */ 888 kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); 889 if (!kmpath_rdacd) { 890 scsi_unregister_device_handler(&rdac_dh); 891 printk(KERN_ERR "kmpath_rdacd creation failed.\n"); 892 } 893 done: 894 return r; 895 } 896 897 static void __exit rdac_exit(void) 898 { 899 destroy_workqueue(kmpath_rdacd); 900 scsi_unregister_device_handler(&rdac_dh); 901 } 902 903 module_init(rdac_init); 904 module_exit(rdac_exit); 905 906 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); 907 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); 908 MODULE_VERSION("01.00.0000.0000"); 909 MODULE_LICENSE("GPL"); 910