xref: /linux/drivers/scsi/device_handler/scsi_dh_rdac.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
3  *
4  * Copyright (C) 2005 Mike Christie. All rights reserved.
5  * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  *
21  */
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 
29 #define RDAC_NAME "rdac"
30 #define RDAC_RETRY_COUNT 5
31 
32 /*
33  * LSI mode page stuff
34  *
35  * These struct definitions and the forming of the
36  * mode page were taken from the LSI RDAC 2.4 GPL'd
37  * driver, and then converted to Linux conventions.
38  */
39 #define RDAC_QUIESCENCE_TIME 20
40 /*
41  * Page Codes
42  */
43 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
44 
45 /*
46  * Controller modes definitions
47  */
48 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02
49 
50 /*
51  * RDAC Options field
52  */
53 #define RDAC_FORCED_QUIESENCE 0x02
54 
55 #define RDAC_TIMEOUT	(60 * HZ)
56 #define RDAC_RETRIES	3
57 
58 struct rdac_mode_6_hdr {
59 	u8	data_len;
60 	u8	medium_type;
61 	u8	device_params;
62 	u8	block_desc_len;
63 };
64 
65 struct rdac_mode_10_hdr {
66 	u16	data_len;
67 	u8	medium_type;
68 	u8	device_params;
69 	u16	reserved;
70 	u16	block_desc_len;
71 };
72 
73 struct rdac_mode_common {
74 	u8	controller_serial[16];
75 	u8	alt_controller_serial[16];
76 	u8	rdac_mode[2];
77 	u8	alt_rdac_mode[2];
78 	u8	quiescence_timeout;
79 	u8	rdac_options;
80 };
81 
82 struct rdac_pg_legacy {
83 	struct rdac_mode_6_hdr hdr;
84 	u8	page_code;
85 	u8	page_len;
86 	struct rdac_mode_common common;
87 #define MODE6_MAX_LUN	32
88 	u8	lun_table[MODE6_MAX_LUN];
89 	u8	reserved2[32];
90 	u8	reserved3;
91 	u8	reserved4;
92 };
93 
94 struct rdac_pg_expanded {
95 	struct rdac_mode_10_hdr hdr;
96 	u8	page_code;
97 	u8	subpage_code;
98 	u8	page_len[2];
99 	struct rdac_mode_common common;
100 	u8	lun_table[256];
101 	u8	reserved3;
102 	u8	reserved4;
103 };
104 
105 struct c9_inquiry {
106 	u8	peripheral_info;
107 	u8	page_code;	/* 0xC9 */
108 	u8	reserved1;
109 	u8	page_len;
110 	u8	page_id[4];	/* "vace" */
111 	u8	avte_cvp;
112 	u8	path_prio;
113 	u8	reserved2[38];
114 };
115 
116 #define SUBSYS_ID_LEN	16
117 #define SLOT_ID_LEN	2
118 #define ARRAY_LABEL_LEN	31
119 
120 struct c4_inquiry {
121 	u8	peripheral_info;
122 	u8	page_code;	/* 0xC4 */
123 	u8	reserved1;
124 	u8	page_len;
125 	u8	page_id[4];	/* "subs" */
126 	u8	subsys_id[SUBSYS_ID_LEN];
127 	u8	revision[4];
128 	u8	slot_id[SLOT_ID_LEN];
129 	u8	reserved[2];
130 };
131 
132 #define UNIQUE_ID_LEN 16
133 struct c8_inquiry {
134 	u8	peripheral_info;
135 	u8	page_code; /* 0xC8 */
136 	u8	reserved1;
137 	u8	page_len;
138 	u8	page_id[4]; /* "edid" */
139 	u8	reserved2[3];
140 	u8	vol_uniq_id_len;
141 	u8	vol_uniq_id[16];
142 	u8	vol_user_label_len;
143 	u8	vol_user_label[60];
144 	u8	array_uniq_id_len;
145 	u8	array_unique_id[UNIQUE_ID_LEN];
146 	u8	array_user_label_len;
147 	u8	array_user_label[60];
148 	u8	lun[8];
149 };
150 
151 struct rdac_controller {
152 	u8			array_id[UNIQUE_ID_LEN];
153 	int			use_ms10;
154 	struct kref		kref;
155 	struct list_head	node; /* list of all controllers */
156 	union			{
157 		struct rdac_pg_legacy legacy;
158 		struct rdac_pg_expanded expanded;
159 	} mode_select;
160 	u8	index;
161 	u8	array_name[ARRAY_LABEL_LEN];
162 	struct Scsi_Host	*host;
163 	spinlock_t		ms_lock;
164 	int			ms_queued;
165 	struct work_struct	ms_work;
166 	struct scsi_device	*ms_sdev;
167 	struct list_head	ms_head;
168 	struct list_head	dh_list;
169 };
170 
171 struct c2_inquiry {
172 	u8	peripheral_info;
173 	u8	page_code;	/* 0xC2 */
174 	u8	reserved1;
175 	u8	page_len;
176 	u8	page_id[4];	/* "swr4" */
177 	u8	sw_version[3];
178 	u8	sw_date[3];
179 	u8	features_enabled;
180 	u8	max_lun_supported;
181 	u8	partitions[239]; /* Total allocation length should be 0xFF */
182 };
183 
184 struct rdac_dh_data {
185 	struct list_head	node;
186 	struct rdac_controller	*ctlr;
187 	struct scsi_device	*sdev;
188 #define UNINITIALIZED_LUN	(1 << 8)
189 	unsigned		lun;
190 
191 #define RDAC_MODE		0
192 #define RDAC_MODE_AVT		1
193 #define RDAC_MODE_IOSHIP	2
194 	unsigned char		mode;
195 
196 #define RDAC_STATE_ACTIVE	0
197 #define RDAC_STATE_PASSIVE	1
198 	unsigned char		state;
199 
200 #define RDAC_LUN_UNOWNED	0
201 #define RDAC_LUN_OWNED		1
202 	char			lun_state;
203 
204 #define RDAC_PREFERRED		0
205 #define RDAC_NON_PREFERRED	1
206 	char			preferred;
207 
208 	union			{
209 		struct c2_inquiry c2;
210 		struct c4_inquiry c4;
211 		struct c8_inquiry c8;
212 		struct c9_inquiry c9;
213 	} inq;
214 };
215 
216 static const char *mode[] = {
217 	"RDAC",
218 	"AVT",
219 	"IOSHIP",
220 };
221 static const char *lun_state[] =
222 {
223 	"unowned",
224 	"owned",
225 };
226 
227 struct rdac_queue_data {
228 	struct list_head	entry;
229 	struct rdac_dh_data	*h;
230 	activate_complete	callback_fn;
231 	void			*callback_data;
232 };
233 
234 static LIST_HEAD(ctlr_list);
235 static DEFINE_SPINLOCK(list_lock);
236 static struct workqueue_struct *kmpath_rdacd;
237 static void send_mode_select(struct work_struct *work);
238 
239 /*
240  * module parameter to enable rdac debug logging.
241  * 2 bits for each type of logging, only two types defined for now
242  * Can be enhanced if required at later point
243  */
244 static int rdac_logging = 1;
245 module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
246 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
247 		"Default is 1 - failover logging enabled, "
248 		"set it to 0xF to enable all the logs");
249 
250 #define RDAC_LOG_FAILOVER	0
251 #define RDAC_LOG_SENSE		2
252 
253 #define RDAC_LOG_BITS		2
254 
255 #define RDAC_LOG_LEVEL(SHIFT)  \
256 	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
257 
258 #define RDAC_LOG(SHIFT, sdev, f, arg...) \
259 do { \
260 	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
261 		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
262 } while (0);
263 
264 static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
265 				      struct list_head *list,
266 				      unsigned char *cdb)
267 {
268 	struct rdac_mode_common *common;
269 	unsigned data_size;
270 	struct rdac_queue_data *qdata;
271 	u8 *lun_table;
272 
273 	if (ctlr->use_ms10) {
274 		struct rdac_pg_expanded *rdac_pg;
275 
276 		data_size = sizeof(struct rdac_pg_expanded);
277 		rdac_pg = &ctlr->mode_select.expanded;
278 		memset(rdac_pg, 0, data_size);
279 		common = &rdac_pg->common;
280 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
281 		rdac_pg->subpage_code = 0x1;
282 		rdac_pg->page_len[0] = 0x01;
283 		rdac_pg->page_len[1] = 0x28;
284 		lun_table = rdac_pg->lun_table;
285 	} else {
286 		struct rdac_pg_legacy *rdac_pg;
287 
288 		data_size = sizeof(struct rdac_pg_legacy);
289 		rdac_pg = &ctlr->mode_select.legacy;
290 		memset(rdac_pg, 0, data_size);
291 		common = &rdac_pg->common;
292 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
293 		rdac_pg->page_len = 0x68;
294 		lun_table = rdac_pg->lun_table;
295 	}
296 	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
297 	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
298 	common->rdac_options = RDAC_FORCED_QUIESENCE;
299 
300 	list_for_each_entry(qdata, list, entry) {
301 		lun_table[qdata->h->lun] = 0x81;
302 	}
303 
304 	/* Prepare the command. */
305 	if (ctlr->use_ms10) {
306 		cdb[0] = MODE_SELECT_10;
307 		cdb[7] = data_size >> 8;
308 		cdb[8] = data_size & 0xff;
309 	} else {
310 		cdb[0] = MODE_SELECT;
311 		cdb[4] = data_size;
312 	}
313 
314 	return data_size;
315 }
316 
317 static void release_controller(struct kref *kref)
318 {
319 	struct rdac_controller *ctlr;
320 	ctlr = container_of(kref, struct rdac_controller, kref);
321 
322 	list_del(&ctlr->node);
323 	kfree(ctlr);
324 }
325 
326 static struct rdac_controller *get_controller(int index, char *array_name,
327 			u8 *array_id, struct scsi_device *sdev)
328 {
329 	struct rdac_controller *ctlr, *tmp;
330 
331 	list_for_each_entry(tmp, &ctlr_list, node) {
332 		if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
333 			  (tmp->index == index) &&
334 			  (tmp->host == sdev->host)) {
335 			kref_get(&tmp->kref);
336 			return tmp;
337 		}
338 	}
339 	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
340 	if (!ctlr)
341 		return NULL;
342 
343 	/* initialize fields of controller */
344 	memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
345 	ctlr->index = index;
346 	ctlr->host = sdev->host;
347 	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
348 
349 	kref_init(&ctlr->kref);
350 	ctlr->use_ms10 = -1;
351 	ctlr->ms_queued = 0;
352 	ctlr->ms_sdev = NULL;
353 	spin_lock_init(&ctlr->ms_lock);
354 	INIT_WORK(&ctlr->ms_work, send_mode_select);
355 	INIT_LIST_HEAD(&ctlr->ms_head);
356 	list_add(&ctlr->node, &ctlr_list);
357 	INIT_LIST_HEAD(&ctlr->dh_list);
358 
359 	return ctlr;
360 }
361 
362 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
363 			char *array_name, u8 *array_id)
364 {
365 	int err = SCSI_DH_IO, i;
366 	struct c8_inquiry *inqp = &h->inq.c8;
367 
368 	if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp,
369 			       sizeof(struct c8_inquiry))) {
370 		if (inqp->page_code != 0xc8)
371 			return SCSI_DH_NOSYS;
372 		if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
373 		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
374 			return SCSI_DH_NOSYS;
375 		h->lun = inqp->lun[7]; /* Uses only the last byte */
376 
377 		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
378 			*(array_name+i) = inqp->array_user_label[(2*i)+1];
379 
380 		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
381 		memset(array_id, 0, UNIQUE_ID_LEN);
382 		memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
383 		err = SCSI_DH_OK;
384 	}
385 	return err;
386 }
387 
388 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
389 {
390 	int err = SCSI_DH_IO, access_state;
391 	struct rdac_dh_data *tmp;
392 	struct c9_inquiry *inqp = &h->inq.c9;
393 
394 	h->state = RDAC_STATE_ACTIVE;
395 	if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp,
396 			       sizeof(struct c9_inquiry))) {
397 		/* detect the operating mode */
398 		if ((inqp->avte_cvp >> 5) & 0x1)
399 			h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
400 		else if (inqp->avte_cvp >> 7)
401 			h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
402 		else
403 			h->mode = RDAC_MODE; /* LUN in RDAC mode */
404 
405 		/* Update ownership */
406 		if (inqp->avte_cvp & 0x1) {
407 			h->lun_state = RDAC_LUN_OWNED;
408 			access_state = SCSI_ACCESS_STATE_OPTIMAL;
409 		} else {
410 			h->lun_state = RDAC_LUN_UNOWNED;
411 			if (h->mode == RDAC_MODE) {
412 				h->state = RDAC_STATE_PASSIVE;
413 				access_state = SCSI_ACCESS_STATE_STANDBY;
414 			} else
415 				access_state = SCSI_ACCESS_STATE_ACTIVE;
416 		}
417 
418 		/* Update path prio*/
419 		if (inqp->path_prio & 0x1) {
420 			h->preferred = RDAC_PREFERRED;
421 			access_state |= SCSI_ACCESS_STATE_PREFERRED;
422 		} else
423 			h->preferred = RDAC_NON_PREFERRED;
424 		rcu_read_lock();
425 		list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
426 			/* h->sdev should always be valid */
427 			BUG_ON(!tmp->sdev);
428 			tmp->sdev->access_state = access_state;
429 		}
430 		rcu_read_unlock();
431 		err = SCSI_DH_OK;
432 	}
433 
434 	return err;
435 }
436 
437 static int initialize_controller(struct scsi_device *sdev,
438 		struct rdac_dh_data *h, char *array_name, u8 *array_id)
439 {
440 	int err = SCSI_DH_IO, index;
441 	struct c4_inquiry *inqp = &h->inq.c4;
442 
443 	if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp,
444 			       sizeof(struct c4_inquiry))) {
445 		/* get the controller index */
446 		if (inqp->slot_id[1] == 0x31)
447 			index = 0;
448 		else
449 			index = 1;
450 
451 		spin_lock(&list_lock);
452 		h->ctlr = get_controller(index, array_name, array_id, sdev);
453 		if (!h->ctlr)
454 			err = SCSI_DH_RES_TEMP_UNAVAIL;
455 		else {
456 			h->sdev = sdev;
457 			list_add_rcu(&h->node, &h->ctlr->dh_list);
458 		}
459 		spin_unlock(&list_lock);
460 		err = SCSI_DH_OK;
461 	}
462 	return err;
463 }
464 
465 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
466 {
467 	int err = SCSI_DH_IO;
468 	struct c2_inquiry *inqp = &h->inq.c2;
469 
470 	if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp,
471 			       sizeof(struct c2_inquiry))) {
472 		/*
473 		 * If more than MODE6_MAX_LUN luns are supported, use
474 		 * mode select 10
475 		 */
476 		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
477 			h->ctlr->use_ms10 = 1;
478 		else
479 			h->ctlr->use_ms10 = 0;
480 		err = SCSI_DH_OK;
481 	}
482 	return err;
483 }
484 
485 static int mode_select_handle_sense(struct scsi_device *sdev,
486 				    struct scsi_sense_hdr *sense_hdr)
487 {
488 	struct rdac_dh_data *h = sdev->handler_data;
489 
490 	if (!scsi_sense_valid(sense_hdr))
491 		return SCSI_DH_IO;
492 
493 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
494 		"MODE_SELECT returned with sense %02x/%02x/%02x",
495 		(char *) h->ctlr->array_name, h->ctlr->index,
496 		sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
497 
498 	return SCSI_DH_IO;
499 }
500 
501 static void send_mode_select(struct work_struct *work)
502 {
503 	struct rdac_controller *ctlr =
504 		container_of(work, struct rdac_controller, ms_work);
505 	struct scsi_device *sdev = ctlr->ms_sdev;
506 	struct rdac_dh_data *h = sdev->handler_data;
507 	int rc, err;
508 	struct rdac_queue_data *tmp, *qdata;
509 	LIST_HEAD(list);
510 	unsigned char cdb[MAX_COMMAND_SIZE];
511 	struct scsi_sense_hdr sshdr;
512 	unsigned int data_size;
513 	blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
514 				REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
515 	struct scsi_failure failure_defs[] = {
516 		{
517 			.sense = NO_SENSE,
518 			.asc = SCMD_FAILURE_ASC_ANY,
519 			.ascq = SCMD_FAILURE_ASCQ_ANY,
520 			.result = SAM_STAT_CHECK_CONDITION,
521 		},
522 		{
523 			.sense = ABORTED_COMMAND,
524 			.asc = SCMD_FAILURE_ASC_ANY,
525 			.ascq = SCMD_FAILURE_ASCQ_ANY,
526 			.result = SAM_STAT_CHECK_CONDITION,
527 		},
528 		{
529 			.sense = UNIT_ATTENTION,
530 			.asc = SCMD_FAILURE_ASC_ANY,
531 			.ascq = SCMD_FAILURE_ASCQ_ANY,
532 			.result = SAM_STAT_CHECK_CONDITION,
533 		},
534 		/* LUN Not Ready and is in the Process of Becoming Ready */
535 		{
536 			.sense = NOT_READY,
537 			.asc = 0x04,
538 			.ascq = 0x01,
539 			.result = SAM_STAT_CHECK_CONDITION,
540 		},
541 		/* Command Lock contention */
542 		{
543 			.sense = ILLEGAL_REQUEST,
544 			.asc = 0x91,
545 			.ascq = 0x36,
546 			.allowed = SCMD_FAILURE_NO_LIMIT,
547 			.result = SAM_STAT_CHECK_CONDITION,
548 		},
549 		{}
550 	};
551 	struct scsi_failures failures = {
552 		.total_allowed = RDAC_RETRY_COUNT,
553 		.failure_definitions = failure_defs,
554 	};
555 	const struct scsi_exec_args exec_args = {
556 		.sshdr = &sshdr,
557 		.failures = &failures,
558 	};
559 
560 	spin_lock(&ctlr->ms_lock);
561 	list_splice_init(&ctlr->ms_head, &list);
562 	ctlr->ms_queued = 0;
563 	ctlr->ms_sdev = NULL;
564 	spin_unlock(&ctlr->ms_lock);
565 
566 	memset(cdb, 0, sizeof(cdb));
567 
568 	data_size = rdac_failover_get(ctlr, &list, cdb);
569 
570 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, queueing MODE_SELECT command",
571 		(char *)h->ctlr->array_name, h->ctlr->index);
572 
573 	rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
574 			      RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
575 	if (!rc) {
576 		h->state = RDAC_STATE_ACTIVE;
577 		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
578 				"MODE_SELECT completed",
579 				(char *) h->ctlr->array_name, h->ctlr->index);
580 		err = SCSI_DH_OK;
581 	} else if (rc < 0) {
582 		err = SCSI_DH_IO;
583 	} else {
584 		err = mode_select_handle_sense(sdev, &sshdr);
585 	}
586 
587 	list_for_each_entry_safe(qdata, tmp, &list, entry) {
588 		list_del(&qdata->entry);
589 		if (err == SCSI_DH_OK)
590 			qdata->h->state = RDAC_STATE_ACTIVE;
591 		if (qdata->callback_fn)
592 			qdata->callback_fn(qdata->callback_data, err);
593 		kfree(qdata);
594 	}
595 	return;
596 }
597 
598 static int queue_mode_select(struct scsi_device *sdev,
599 				activate_complete fn, void *data)
600 {
601 	struct rdac_queue_data *qdata;
602 	struct rdac_controller *ctlr;
603 
604 	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
605 	if (!qdata)
606 		return SCSI_DH_RETRY;
607 
608 	qdata->h = sdev->handler_data;
609 	qdata->callback_fn = fn;
610 	qdata->callback_data = data;
611 
612 	ctlr = qdata->h->ctlr;
613 	spin_lock(&ctlr->ms_lock);
614 	list_add_tail(&qdata->entry, &ctlr->ms_head);
615 	if (!ctlr->ms_queued) {
616 		ctlr->ms_queued = 1;
617 		ctlr->ms_sdev = sdev;
618 		queue_work(kmpath_rdacd, &ctlr->ms_work);
619 	}
620 	spin_unlock(&ctlr->ms_lock);
621 	return SCSI_DH_OK;
622 }
623 
624 static int rdac_activate(struct scsi_device *sdev,
625 			activate_complete fn, void *data)
626 {
627 	struct rdac_dh_data *h = sdev->handler_data;
628 	int err = SCSI_DH_OK;
629 	int act = 0;
630 
631 	err = check_ownership(sdev, h);
632 	if (err != SCSI_DH_OK)
633 		goto done;
634 
635 	switch (h->mode) {
636 	case RDAC_MODE:
637 		if (h->lun_state == RDAC_LUN_UNOWNED)
638 			act = 1;
639 		break;
640 	case RDAC_MODE_IOSHIP:
641 		if ((h->lun_state == RDAC_LUN_UNOWNED) &&
642 		    (h->preferred == RDAC_PREFERRED))
643 			act = 1;
644 		break;
645 	default:
646 		break;
647 	}
648 
649 	if (act) {
650 		err = queue_mode_select(sdev, fn, data);
651 		if (err == SCSI_DH_OK)
652 			return 0;
653 	}
654 done:
655 	if (fn)
656 		fn(data, err);
657 	return 0;
658 }
659 
660 static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
661 {
662 	struct rdac_dh_data *h = sdev->handler_data;
663 
664 	if (h->state != RDAC_STATE_ACTIVE) {
665 		req->rq_flags |= RQF_QUIET;
666 		return BLK_STS_IOERR;
667 	}
668 
669 	return BLK_STS_OK;
670 }
671 
672 static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev,
673 					      struct scsi_sense_hdr *sense_hdr)
674 {
675 	struct rdac_dh_data *h = sdev->handler_data;
676 
677 	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
678 			"I/O returned with sense %02x/%02x/%02x",
679 			(char *) h->ctlr->array_name, h->ctlr->index,
680 			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
681 
682 	switch (sense_hdr->sense_key) {
683 	case NOT_READY:
684 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
685 			/* LUN Not Ready - Logical Unit Not Ready and is in
686 			* the process of becoming ready
687 			* Just retry.
688 			*/
689 			return ADD_TO_MLQUEUE;
690 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
691 			/* LUN Not Ready - Storage firmware incompatible
692 			 * Manual code synchonisation required.
693 			 *
694 			 * Nothing we can do here. Try to bypass the path.
695 			 */
696 			return SUCCESS;
697 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
698 			/* LUN Not Ready - Quiescense in progress
699 			 *
700 			 * Just retry and wait.
701 			 */
702 			return ADD_TO_MLQUEUE;
703 		if (sense_hdr->asc == 0xA1  && sense_hdr->ascq == 0x02)
704 			/* LUN Not Ready - Quiescense in progress
705 			 * or has been achieved
706 			 * Just retry.
707 			 */
708 			return ADD_TO_MLQUEUE;
709 		break;
710 	case ILLEGAL_REQUEST:
711 		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
712 			/* Invalid Request - Current Logical Unit Ownership.
713 			 * Controller is not the current owner of the LUN,
714 			 * Fail the path, so that the other path be used.
715 			 */
716 			h->state = RDAC_STATE_PASSIVE;
717 			return SUCCESS;
718 		}
719 		break;
720 	case UNIT_ATTENTION:
721 		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
722 			/*
723 			 * Power On, Reset, or Bus Device Reset, just retry.
724 			 */
725 			return ADD_TO_MLQUEUE;
726 		if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
727 			/*
728 			 * Quiescence in progress , just retry.
729 			 */
730 			return ADD_TO_MLQUEUE;
731 		break;
732 	}
733 	/* success just means we do not care what scsi-ml does */
734 	return SCSI_RETURN_NOT_HANDLED;
735 }
736 
737 static int rdac_bus_attach(struct scsi_device *sdev)
738 {
739 	struct rdac_dh_data *h;
740 	int err;
741 	char array_name[ARRAY_LABEL_LEN];
742 	char array_id[UNIQUE_ID_LEN];
743 
744 	h = kzalloc(sizeof(*h) , GFP_KERNEL);
745 	if (!h)
746 		return SCSI_DH_NOMEM;
747 	h->lun = UNINITIALIZED_LUN;
748 	h->state = RDAC_STATE_ACTIVE;
749 
750 	err = get_lun_info(sdev, h, array_name, array_id);
751 	if (err != SCSI_DH_OK)
752 		goto failed;
753 
754 	err = initialize_controller(sdev, h, array_name, array_id);
755 	if (err != SCSI_DH_OK)
756 		goto failed;
757 
758 	err = check_ownership(sdev, h);
759 	if (err != SCSI_DH_OK)
760 		goto clean_ctlr;
761 
762 	err = set_mode_select(sdev, h);
763 	if (err != SCSI_DH_OK)
764 		goto clean_ctlr;
765 
766 	sdev_printk(KERN_NOTICE, sdev,
767 		    "%s: LUN %d (%s) (%s)\n",
768 		    RDAC_NAME, h->lun, mode[(int)h->mode],
769 		    lun_state[(int)h->lun_state]);
770 
771 	sdev->handler_data = h;
772 	return SCSI_DH_OK;
773 
774 clean_ctlr:
775 	spin_lock(&list_lock);
776 	kref_put(&h->ctlr->kref, release_controller);
777 	spin_unlock(&list_lock);
778 
779 failed:
780 	kfree(h);
781 	return err;
782 }
783 
784 static void rdac_bus_detach( struct scsi_device *sdev )
785 {
786 	struct rdac_dh_data *h = sdev->handler_data;
787 
788 	if (h->ctlr && h->ctlr->ms_queued)
789 		flush_workqueue(kmpath_rdacd);
790 
791 	spin_lock(&list_lock);
792 	if (h->ctlr) {
793 		list_del_rcu(&h->node);
794 		kref_put(&h->ctlr->kref, release_controller);
795 	}
796 	spin_unlock(&list_lock);
797 	sdev->handler_data = NULL;
798 	synchronize_rcu();
799 	kfree(h);
800 }
801 
802 static struct scsi_device_handler rdac_dh = {
803 	.name = RDAC_NAME,
804 	.module = THIS_MODULE,
805 	.prep_fn = rdac_prep_fn,
806 	.check_sense = rdac_check_sense,
807 	.attach = rdac_bus_attach,
808 	.detach = rdac_bus_detach,
809 	.activate = rdac_activate,
810 };
811 
812 static int __init rdac_init(void)
813 {
814 	int r;
815 
816 	r = scsi_register_device_handler(&rdac_dh);
817 	if (r != 0) {
818 		printk(KERN_ERR "Failed to register scsi device handler.");
819 		goto done;
820 	}
821 
822 	/*
823 	 * Create workqueue to handle mode selects for rdac
824 	 */
825 	kmpath_rdacd =
826 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd");
827 	if (!kmpath_rdacd) {
828 		scsi_unregister_device_handler(&rdac_dh);
829 		printk(KERN_ERR "kmpath_rdacd creation failed.\n");
830 
831 		r = -EINVAL;
832 	}
833 done:
834 	return r;
835 }
836 
837 static void __exit rdac_exit(void)
838 {
839 	destroy_workqueue(kmpath_rdacd);
840 	scsi_unregister_device_handler(&rdac_dh);
841 }
842 
843 module_init(rdac_init);
844 module_exit(rdac_exit);
845 
846 MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
847 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
848 MODULE_VERSION("01.00.0000.0000");
849 MODULE_LICENSE("GPL");
850