xref: /titanic_50/usr/src/uts/sun/io/scsi/adapters/sf.c (revision 275c9da86e89f8abf71135cf63d9fc23671b2e60)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * sf - Solaris Fibre Channel driver
31  *
32  * This module implements some of the Fibre Channel FC-4 layer, converting
33  * from FC frames to SCSI and back.  (Note: no sequence management is done
34  * here, though.)
35  */
36 
37 #if defined(lint) && !defined(DEBUG)
38 #define	DEBUG	1
39 #endif
40 
41 /*
42  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
43  * Need to use the ugly RAID LUN mappings in FCP Annex D
44  * to prevent SCSA from barfing.  This *REALLY* needs to
45  * be addressed by the standards committee.
46  */
47 #define	RAID_LUNS	1
48 
49 #ifdef DEBUG
50 static int sfdebug = 0;
51 #include <sys/debug.h>
52 
53 #define	SF_DEBUG(level, args) \
54 	if (sfdebug >= (level)) sf_log args
55 #else
56 #define	SF_DEBUG(level, args)
57 #endif
58 
59 static int sf_bus_config_debug = 0;
60 
61 /* Why do I have to do this? */
62 #define	offsetof(s, m)  (size_t)(&(((s *)0)->m))
63 
64 #include <sys/scsi/scsi.h>
65 #include <sys/fc4/fcal.h>
66 #include <sys/fc4/fcp.h>
67 #include <sys/fc4/fcal_linkapp.h>
68 #include <sys/socal_cq_defs.h>
69 #include <sys/fc4/fcal_transport.h>
70 #include <sys/fc4/fcio.h>
71 #include <sys/scsi/adapters/sfvar.h>
72 #include <sys/scsi/impl/scsi_reset_notify.h>
73 #include <sys/stat.h>
74 #include <sys/varargs.h>
75 #include <sys/var.h>
76 #include <sys/thread.h>
77 #include <sys/proc.h>
78 #include <sys/kstat.h>
79 #include <sys/devctl.h>
80 #include <sys/scsi/targets/ses.h>
81 #include <sys/callb.h>
82 
83 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
85 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
86 static void sf_softstate_unlink(struct sf *);
87 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
88     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
89 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
90     ddi_bus_config_op_t op, void *arg);
91 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
92     scsi_hba_tran_t *, struct scsi_device *);
93 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
94     scsi_hba_tran_t *, struct scsi_device *);
95 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
96     int, int, int);
97 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
98 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
99     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
100 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
101 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
102 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
103 static int sf_scsi_reset_notify(struct scsi_address *, int,
104     void (*)(caddr_t), caddr_t);
105 static int sf_scsi_get_name(struct scsi_device *, char *, int);
106 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
107 static int sf_add_cr_pool(struct sf *);
108 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
109 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
110 static void sf_crpool_free(struct sf *);
111 static int sf_kmem_cache_constructor(void *, void *, int);
112 static void sf_kmem_cache_destructor(void *, void *);
113 static void sf_statec_callback(void *, int);
114 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
115 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
116 static void sf_els_callback(struct fcal_packet *);
117 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
118 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
119 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
120     struct sf_target *);
121 static void sf_reportlun_callback(struct fcal_packet *);
122 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
123     struct sf_target *);
124 static void sf_inq_callback(struct fcal_packet *);
125 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
126     int, caddr_t *, caddr_t *);
127 static void sf_els_free(struct fcal_packet *);
128 static struct sf_target *sf_create_target(struct sf *,
129     struct sf_els_hdr *, int, int64_t);
130 #ifdef RAID_LUNS
131 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
132 #else
133 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
134 #endif
135 static void sf_finish_init(struct sf *, int);
136 static void sf_offline_target(struct sf *, struct sf_target *);
137 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
138 static int sf_create_props(dev_info_t *, struct sf_target *, int);
139 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
140 static int sf_getcap(struct scsi_address *, char *, int);
141 static int sf_setcap(struct scsi_address *, char *, int, int);
142 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
143 static int sf_reset(struct scsi_address *, int);
144 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
145 static int sf_start(struct scsi_address *, struct scsi_pkt *);
146 static int sf_start_internal(struct sf *, struct sf_pkt *);
147 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
148 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
149 static int sf_dopoll(struct sf *, struct sf_pkt *);
150 static void sf_cmd_callback(struct fcal_packet *);
151 static void sf_throttle(struct sf *);
152 static void sf_watch(void *);
153 static void sf_throttle_start(struct sf *);
154 static void sf_check_targets(struct sf *);
155 static void sf_check_reset_delay(void *);
156 static int sf_target_timeout(struct sf *, struct sf_pkt *);
157 static void sf_force_lip(struct sf *);
158 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
159 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
160 /*PRINTFLIKE3*/
161 static void sf_log(struct sf *, int, const char *, ...);
162 static int sf_kstat_update(kstat_t *, int);
163 static int sf_open(dev_t *, int, int, cred_t *);
164 static int sf_close(dev_t, int, int, cred_t *);
165 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
166 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
167 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
168     ddi_eventcookie_t *);
169 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
170     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
171 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
172 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
173     ddi_eventcookie_t, void *);
174 
175 static void sf_hp_daemon(void *);
176 
177 /*
178  * this is required to be able to supply a control node
179  * where ioctls can be executed
180  */
181 struct cb_ops sf_cb_ops = {
182 	sf_open,			/* open */
183 	sf_close,			/* close */
184 	nodev,				/* strategy */
185 	nodev,				/* print */
186 	nodev,				/* dump */
187 	nodev,				/* read */
188 	nodev,				/* write */
189 	sf_ioctl,			/* ioctl */
190 	nodev,				/* devmap */
191 	nodev,				/* mmap */
192 	nodev,				/* segmap */
193 	nochpoll,			/* poll */
194 	ddi_prop_op,			/* cb_prop_op */
195 	0,				/* streamtab  */
196 	D_MP | D_NEW | D_HOTPLUG	/* driver flags */
197 
198 };
199 
200 /*
201  * autoconfiguration routines.
202  */
203 static struct dev_ops sf_ops = {
204 	DEVO_REV,		/* devo_rev, */
205 	0,			/* refcnt  */
206 	sf_info,		/* info */
207 	nulldev,		/* identify */
208 	nulldev,		/* probe */
209 	sf_attach,		/* attach */
210 	sf_detach,		/* detach */
211 	nodev,			/* reset */
212 	&sf_cb_ops,		/* driver operations */
213 	NULL,			/* bus operations */
214 	NULL			/* power management */
215 };
216 
217 /* to ensure this module gets loaded in memory when we do */
218 char _depends_on[] = "misc/scsi";
219 
220 #define	SF_NAME	"FC-AL FCP Nexus Driver"	/* Name of the module. */
221 static	char	sf_version[] = "%I% %E%";	/* version of the module */
222 
223 static struct modldrv modldrv = {
224 	&mod_driverops, /* Type of module. This one is a driver */
225 	SF_NAME "%I%",
226 	&sf_ops,	/* driver ops */
227 };
228 
229 static struct modlinkage modlinkage = {
230 	MODREV_1, (void *)&modldrv, NULL
231 };
232 
233 /* XXXXXX The following is here to handle broken targets -- remove it later */
234 static int sf_reportlun_forever = 0;
235 /* XXXXXX */
236 static int sf_lip_on_plogo = 0;
237 static int sf_els_retries = SF_ELS_RETRIES;
238 static struct sf *sf_head = NULL;
239 static int sf_target_scan_cnt = 4;
240 static int sf_pkt_scan_cnt = 5;
241 static int sf_pool_scan_cnt = 1800;
242 static void *sf_state = NULL;
243 static int sf_watchdog_init = 0;
244 static int sf_watchdog_time = 0;
245 static int sf_watchdog_timeout = 1;
246 static int sf_watchdog_tick;
247 static int sf_watch_running = 0;
248 static timeout_id_t sf_watchdog_id;
249 static timeout_id_t sf_reset_timeout_id;
250 static int sf_max_targets = SF_MAX_TARGETS;
251 static kmutex_t sf_global_mutex;
252 static int sf_core = 0;
253 int *sf_token = NULL; /* Must not be static or lint complains. */
254 static kcondvar_t sf_watch_cv;
255 extern pri_t minclsyspri;
256 static ddi_eventcookie_t	sf_insert_eid;
257 static ddi_eventcookie_t	sf_remove_eid;
258 
259 static ndi_event_definition_t	sf_event_defs[] = {
260 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
261 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
262 };
263 
264 #define	SF_N_NDI_EVENTS	\
265 	(sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
266 
267 #ifdef DEBUG
268 static int sf_lip_flag = 1;		/* bool: to allow LIPs */
269 static int sf_reset_flag = 1;		/* bool: to allow reset after LIP */
270 static int sf_abort_flag = 0;		/* bool: to do just one abort */
271 #endif
272 
273 extern volatile int64_t	lbolt64;
274 
275 /*
276  * for converting between target number (switch) and hard address/AL_PA
277  */
278 static uchar_t sf_switch_to_alpa[] = {
279 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
280 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
281 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
282 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
283 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
284 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
285 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
286 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
287 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
288 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
289 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
290 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
291 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
292 };
293 
294 static uchar_t sf_alpa_to_switch[] = {
295 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
296 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
297 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
298 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
299 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
300 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
301 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
302 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
303 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
304 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
305 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
306 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
307 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
308 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
310 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
311 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
312 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
313 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
314 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
315 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
316 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
317 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
318 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
319 };
320 
321 /*
322  * these macros call the proper transport-layer function given
323  * a particular transport
324  */
325 #define	soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
326 #define	soc_transport_poll(a, b, c, d)\
327 	(*a->fcal_ops->fcal_transport_poll)(b, c, d)
328 #define	soc_get_lilp_map(a, b, c, d, e)\
329 	(*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
330 #define	soc_force_lip(a, b, c, d, e)\
331 	(*a->fcal_ops->fcal_force_lip)(b, c, d, e)
332 #define	soc_abort(a, b, c, d, e)\
333 	(*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
334 #define	soc_force_reset(a, b, c, d)\
335 	(*a->fcal_ops->fcal_force_reset)(b, c, d)
336 #define	soc_add_ulp(a, b, c, d, e, f, g, h)\
337 	(*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
338 #define	soc_remove_ulp(a, b, c, d, e)\
339 	(*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
340 #define	soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
341 
342 
343 /* power management property defines (should be in a common include file?) */
344 #define	PM_HARDWARE_STATE_PROP		"pm-hardware-state"
345 #define	PM_NEEDS_SUSPEND_RESUME		"needs-suspend-resume"
346 
347 
348 /* node properties */
349 #define	NODE_WWN_PROP			"node-wwn"
350 #define	PORT_WWN_PROP			"port-wwn"
351 #define	LIP_CNT_PROP			"lip-count"
352 #define	TARGET_PROP			"target"
353 #define	LUN_PROP			"lun"
354 
355 
356 /*
357  * initialize this driver and install this module
358  */
359 int
360 _init(void)
361 {
362 	int	i;
363 
364 	i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
365 	    SF_INIT_ITEMS);
366 	if (i != 0)
367 		return (i);
368 
369 	if ((i = scsi_hba_init(&modlinkage)) != 0) {
370 		ddi_soft_state_fini(&sf_state);
371 		return (i);
372 	}
373 
374 	mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
375 	sf_watch_running = 0;
376 	cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
377 
378 	if ((i = mod_install(&modlinkage)) != 0) {
379 		mutex_destroy(&sf_global_mutex);
380 		cv_destroy(&sf_watch_cv);
381 		scsi_hba_fini(&modlinkage);
382 		ddi_soft_state_fini(&sf_state);
383 		return (i);
384 	}
385 
386 	return (i);
387 }
388 
389 
390 /*
391  * remove this driver module from the system
392  */
393 int
394 _fini(void)
395 {
396 	int	i;
397 
398 	if ((i = mod_remove(&modlinkage)) == 0) {
399 		scsi_hba_fini(&modlinkage);
400 		mutex_destroy(&sf_global_mutex);
401 		cv_destroy(&sf_watch_cv);
402 		ddi_soft_state_fini(&sf_state);
403 	}
404 	return (i);
405 }
406 
407 
408 int
409 _info(struct modinfo *modinfop)
410 {
411 	return (mod_info(&modlinkage, modinfop));
412 }
413 
414 /*
415  * Given the device number return the devinfo pointer or instance
416  */
417 /*ARGSUSED*/
418 static int
419 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
420 {
421 	int		instance = SF_MINOR2INST(getminor((dev_t)arg));
422 	struct sf	*sf;
423 
424 	switch (infocmd) {
425 	case DDI_INFO_DEVT2DEVINFO:
426 		sf = ddi_get_soft_state(sf_state, instance);
427 		if (sf != NULL)
428 			*result = sf->sf_dip;
429 		else {
430 			*result = NULL;
431 			return (DDI_FAILURE);
432 		}
433 		break;
434 
435 	case DDI_INFO_DEVT2INSTANCE:
436 		*result = (void *)(uintptr_t)instance;
437 		break;
438 	default:
439 		return (DDI_FAILURE);
440 	}
441 	return (DDI_SUCCESS);
442 }
443 
444 /*
445  * either attach or resume this driver
446  */
447 static int
448 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
449 {
450 	int instance;
451 	int mutex_initted = FALSE;
452 	uint_t ccount;
453 	size_t i, real_size;
454 	struct fcal_transport *handle;
455 	char buf[64];
456 	struct sf *sf, *tsf;
457 	scsi_hba_tran_t *tran = NULL;
458 	int	handle_bound = FALSE;
459 	kthread_t *tp;
460 
461 
462 	switch ((int)cmd) {
463 
464 	case DDI_RESUME:
465 
466 		/*
467 		 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
468 		 * so time to undo that and get going again by forcing a
469 		 * lip
470 		 */
471 
472 		instance = ddi_get_instance(dip);
473 
474 		sf = ddi_get_soft_state(sf_state, instance);
475 		SF_DEBUG(2, (sf, CE_CONT,
476 		    "sf_attach: DDI_RESUME for sf%d\n", instance));
477 		if (sf == NULL) {
478 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
479 			return (DDI_FAILURE);
480 		}
481 
482 		/*
483 		 * clear suspended flag so that normal operations can resume
484 		 */
485 		mutex_enter(&sf->sf_mutex);
486 		sf->sf_state &= ~SF_STATE_SUSPENDED;
487 		mutex_exit(&sf->sf_mutex);
488 
489 		/*
490 		 * force a login by setting our state to offline
491 		 */
492 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
493 		sf->sf_state = SF_STATE_OFFLINE;
494 
495 		/*
496 		 * call transport routine to register state change and
497 		 * ELS callback routines (to register us as a ULP)
498 		 */
499 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
500 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
501 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
502 
503 		/*
504 		 * call transport routine to force loop initialization
505 		 */
506 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
507 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
508 
509 		/*
510 		 * increment watchdog init flag, setting watchdog timeout
511 		 * if we are the first (since somebody has to do it)
512 		 */
513 		mutex_enter(&sf_global_mutex);
514 		if (!sf_watchdog_init++) {
515 			mutex_exit(&sf_global_mutex);
516 			sf_watchdog_id = timeout(sf_watch,
517 			    (caddr_t)0, sf_watchdog_tick);
518 		} else {
519 			mutex_exit(&sf_global_mutex);
520 		}
521 
522 		return (DDI_SUCCESS);
523 
524 	case DDI_ATTACH:
525 
526 		/*
527 		 * this instance attaching for the first time
528 		 */
529 
530 		instance = ddi_get_instance(dip);
531 
532 		if (ddi_soft_state_zalloc(sf_state, instance) !=
533 		    DDI_SUCCESS) {
534 			cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
535 			    instance);
536 			return (DDI_FAILURE);
537 		}
538 
539 		sf = ddi_get_soft_state(sf_state, instance);
540 		SF_DEBUG(4, (sf, CE_CONT,
541 		    "sf_attach: DDI_ATTACH for sf%d\n", instance));
542 		if (sf == NULL) {
543 			/* this shouldn't happen since we just allocated it */
544 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
545 			return (DDI_FAILURE);
546 		}
547 
548 		/*
549 		 * from this point on, if there's an error, we must de-allocate
550 		 * soft state before returning DDI_FAILURE
551 		 */
552 
553 		if ((handle = ddi_get_parent_data(dip)) == NULL) {
554 			cmn_err(CE_WARN,
555 			    "sf%d: failed to obtain transport handle",
556 			    instance);
557 			goto fail;
558 		}
559 
560 		/* fill in our soft state structure */
561 		sf->sf_dip = dip;
562 		sf->sf_state = SF_STATE_INIT;
563 		sf->sf_throttle = handle->fcal_cmdmax;
564 		sf->sf_sochandle = handle;
565 		sf->sf_socp = handle->fcal_handle;
566 		sf->sf_check_n_close = 0;
567 
568 		/* create a command/response buffer pool for this instance */
569 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
570 			cmn_err(CE_WARN,
571 			    "sf%d: failed to allocate command/response pool",
572 			    instance);
573 			goto fail;
574 		}
575 
576 		/* create a a cache for this instance */
577 		(void) sprintf(buf, "sf%d_cache", instance);
578 		sf->sf_pkt_cache = kmem_cache_create(buf,
579 		    sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
580 		    sizeof (struct scsi_pkt), 8,
581 		    sf_kmem_cache_constructor, sf_kmem_cache_destructor,
582 		    NULL, NULL, NULL, 0);
583 		if (sf->sf_pkt_cache == NULL) {
584 			cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
585 			    instance);
586 			goto fail;
587 		}
588 
589 		/* set up a handle and allocate memory for DMA */
590 		if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
591 		    fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
592 		    sf_lilp_dmahandle) != DDI_SUCCESS) {
593 			cmn_err(CE_WARN,
594 			    "sf%d: failed to allocate dma handle for lilp map",
595 			    instance);
596 			goto fail;
597 		}
598 		i = sizeof (struct fcal_lilp_map) + 1;
599 		if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
600 		    i, sf->sf_sochandle->
601 		    fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
602 		    (caddr_t *)&sf->sf_lilp_map, &real_size,
603 		    &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
604 			cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
605 			    instance);
606 			goto fail;
607 		}
608 		if (real_size < i) {
609 			/* no error message ??? */
610 			goto fail;		/* trouble allocating memory */
611 		}
612 
613 		/*
614 		 * set up the address for the DMA transfers (getting a cookie)
615 		 */
616 		if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
617 		    (caddr_t)sf->sf_lilp_map, real_size,
618 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
619 		    &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
620 			cmn_err(CE_WARN,
621 			    "sf%d: failed to bind dma handle for lilp map",
622 			    instance);
623 			goto fail;
624 		}
625 		handle_bound = TRUE;
626 		/* ensure only one cookie was allocated */
627 		if (ccount != 1) {
628 			goto fail;
629 		}
630 
631 		/* ensure LILP map and DMA cookie addresses are even?? */
632 		sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
633 		    sf_lilp_map + 1) & ~1);
634 		sf->sf_lilp_dmacookie.dmac_address = (sf->
635 		    sf_lilp_dmacookie.dmac_address + 1) & ~1;
636 
637 		/* set up all of our mutexes and condition variables */
638 		mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
639 		mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
640 		mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
641 		mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
642 		cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
643 		cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
644 
645 		mutex_initted = TRUE;
646 
647 		/* create our devctl minor node */
648 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
649 		    SF_INST2DEVCTL_MINOR(instance),
650 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
651 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
652 					" for devctl", instance);
653 			goto fail;
654 		}
655 
656 		/* create fc minor node */
657 		if (ddi_create_minor_node(dip, "fc", S_IFCHR,
658 		    SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
659 		    0) != DDI_SUCCESS) {
660 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
661 					" for fc", instance);
662 			goto fail;
663 		}
664 		/* allocate a SCSI transport structure */
665 		tran = scsi_hba_tran_alloc(dip, 0);
666 		if (tran == NULL) {
667 			/* remove all minor nodes created */
668 			ddi_remove_minor_node(dip, NULL);
669 			cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
670 			    instance);
671 			goto fail;
672 		}
673 
674 		/* save ptr to new transport structure and fill it in */
675 		sf->sf_tran = tran;
676 
677 		tran->tran_hba_private		= sf;
678 		tran->tran_tgt_private		= NULL;
679 		tran->tran_tgt_init		= sf_scsi_tgt_init;
680 		tran->tran_tgt_probe		= NULL;
681 		tran->tran_tgt_free		= sf_scsi_tgt_free;
682 
683 		tran->tran_start		= sf_start;
684 		tran->tran_abort		= sf_abort;
685 		tran->tran_reset		= sf_reset;
686 		tran->tran_getcap		= sf_getcap;
687 		tran->tran_setcap		= sf_setcap;
688 		tran->tran_init_pkt		= sf_scsi_init_pkt;
689 		tran->tran_destroy_pkt		= sf_scsi_destroy_pkt;
690 		tran->tran_dmafree		= sf_scsi_dmafree;
691 		tran->tran_sync_pkt		= sf_scsi_sync_pkt;
692 		tran->tran_reset_notify		= sf_scsi_reset_notify;
693 
694 		/*
695 		 * register event notification routines with scsa
696 		 */
697 		tran->tran_get_eventcookie	= sf_bus_get_eventcookie;
698 		tran->tran_add_eventcall	= sf_bus_add_eventcall;
699 		tran->tran_remove_eventcall	= sf_bus_remove_eventcall;
700 		tran->tran_post_event		= sf_bus_post_event;
701 
702 		/*
703 		 * register bus configure/unconfigure
704 		 */
705 		tran->tran_bus_config		= sf_scsi_bus_config;
706 		tran->tran_bus_unconfig		= sf_scsi_bus_unconfig;
707 
708 		/*
709 		 * allocate an ndi event handle
710 		 */
711 		sf->sf_event_defs = (ndi_event_definition_t *)
712 			kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
713 
714 		bcopy(sf_event_defs, sf->sf_event_defs,
715 		    sizeof (sf_event_defs));
716 
717 		(void) ndi_event_alloc_hdl(dip, NULL,
718 			&sf->sf_event_hdl, NDI_SLEEP);
719 
720 		sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
721 		sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
722 		sf->sf_events.ndi_event_defs = sf->sf_event_defs;
723 
724 		if (ndi_event_bind_set(sf->sf_event_hdl,
725 			&sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
726 			goto fail;
727 		}
728 
729 		tran->tran_get_name		= sf_scsi_get_name;
730 		tran->tran_get_bus_addr		= sf_scsi_get_bus_addr;
731 
732 		/* setup and attach SCSI hba transport */
733 		if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
734 		    fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
735 			cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
736 			    instance);
737 			goto fail;
738 		}
739 
740 		/* set up kstats */
741 		if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
742 		    "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
743 		    KSTAT_FLAG_VIRTUAL)) == NULL) {
744 			cmn_err(CE_WARN, "sf%d: failed to create kstat",
745 			    instance);
746 		} else {
747 			sf->sf_stats.version = 2;
748 			(void) sprintf(sf->sf_stats.drvr_name,
749 			"%s: %s", SF_NAME, sf_version);
750 			sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
751 			sf->sf_ksp->ks_private = sf;
752 			sf->sf_ksp->ks_update = sf_kstat_update;
753 			kstat_install(sf->sf_ksp);
754 		}
755 
756 		/* create the hotplug thread */
757 		mutex_enter(&sf->sf_hp_daemon_mutex);
758 		tp = thread_create(NULL, 0,
759 		    (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
760 		sf->sf_hp_tid = tp->t_did;
761 		mutex_exit(&sf->sf_hp_daemon_mutex);
762 
763 		/* add this soft state instance to the head of the list */
764 		mutex_enter(&sf_global_mutex);
765 		sf->sf_next = sf_head;
766 		tsf = sf_head;
767 		sf_head = sf;
768 
769 		/*
770 		 * find entry in list that has the same FC-AL handle (if any)
771 		 */
772 		while (tsf != NULL) {
773 			if (tsf->sf_socp == sf->sf_socp) {
774 				break;		/* found matching entry */
775 			}
776 			tsf = tsf->sf_next;
777 		}
778 
779 		if (tsf != NULL) {
780 			/* if we found a matching entry keep track of it */
781 			sf->sf_sibling = tsf;
782 		}
783 
784 		/*
785 		 * increment watchdog init flag, setting watchdog timeout
786 		 * if we are the first (since somebody has to do it)
787 		 */
788 		if (!sf_watchdog_init++) {
789 			mutex_exit(&sf_global_mutex);
790 			sf_watchdog_tick = sf_watchdog_timeout *
791 			    drv_usectohz(1000000);
792 			sf_watchdog_id = timeout(sf_watch,
793 			    NULL, sf_watchdog_tick);
794 		} else {
795 			mutex_exit(&sf_global_mutex);
796 		}
797 
798 		if (tsf != NULL) {
799 			/*
800 			 * set up matching entry to be our sibling
801 			 */
802 			mutex_enter(&tsf->sf_mutex);
803 			tsf->sf_sibling = sf;
804 			mutex_exit(&tsf->sf_mutex);
805 		}
806 
807 		/*
808 		 * create this property so that PM code knows we want
809 		 * to be suspended at PM time
810 		 */
811 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
812 		    PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
813 
814 		/* log the fact that we have a new device */
815 		ddi_report_dev(dip);
816 
817 		/*
818 		 * force a login by setting our state to offline
819 		 */
820 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
821 		sf->sf_state = SF_STATE_OFFLINE;
822 
823 		/*
824 		 * call transport routine to register state change and
825 		 * ELS callback routines (to register us as a ULP)
826 		 */
827 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
828 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
829 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
830 
831 		/*
832 		 * call transport routine to force loop initialization
833 		 */
834 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
835 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
836 		sf->sf_reset_time = lbolt64;
837 		return (DDI_SUCCESS);
838 
839 	default:
840 		return (DDI_FAILURE);
841 	}
842 
843 fail:
844 	cmn_err(CE_WARN, "sf%d: failed to attach", instance);
845 
846 	/*
847 	 * Unbind and free event set
848 	 */
849 	if (sf->sf_event_hdl) {
850 		(void) ndi_event_unbind_set(sf->sf_event_hdl,
851 		    &sf->sf_events, NDI_SLEEP);
852 		(void) ndi_event_free_hdl(sf->sf_event_hdl);
853 	}
854 
855 	if (sf->sf_event_defs) {
856 		kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
857 	}
858 
859 	if (sf->sf_tran != NULL) {
860 		scsi_hba_tran_free(sf->sf_tran);
861 	}
862 	while (sf->sf_cr_pool != NULL) {
863 		sf_crpool_free(sf);
864 	}
865 	if (sf->sf_lilp_dmahandle != NULL) {
866 		if (handle_bound) {
867 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
868 		}
869 		ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
870 	}
871 	if (sf->sf_pkt_cache != NULL) {
872 		kmem_cache_destroy(sf->sf_pkt_cache);
873 	}
874 	if (sf->sf_lilp_map != NULL) {
875 		ddi_dma_mem_free(&sf->sf_lilp_acchandle);
876 	}
877 	if (sf->sf_ksp != NULL) {
878 		kstat_delete(sf->sf_ksp);
879 	}
880 	if (mutex_initted) {
881 		mutex_destroy(&sf->sf_mutex);
882 		mutex_destroy(&sf->sf_cmd_mutex);
883 		mutex_destroy(&sf->sf_cr_mutex);
884 		mutex_destroy(&sf->sf_hp_daemon_mutex);
885 		cv_destroy(&sf->sf_cr_cv);
886 		cv_destroy(&sf->sf_hp_daemon_cv);
887 	}
888 	mutex_enter(&sf_global_mutex);
889 
890 	/*
891 	 * kill off the watchdog if we are the last instance
892 	 */
893 	if (!--sf_watchdog_init) {
894 		timeout_id_t tid = sf_watchdog_id;
895 		mutex_exit(&sf_global_mutex);
896 		(void) untimeout(tid);
897 	} else {
898 		mutex_exit(&sf_global_mutex);
899 	}
900 
901 	ddi_soft_state_free(sf_state, instance);
902 
903 	if (tran != NULL) {
904 		/* remove all minor nodes */
905 		ddi_remove_minor_node(dip, NULL);
906 	}
907 
908 	return (DDI_FAILURE);
909 }
910 
911 
912 /* ARGSUSED */
913 static int
914 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
915 {
916 	struct sf		*sf;
917 	int			instance;
918 	int			i;
919 	struct sf_target	*target;
920 	timeout_id_t		tid;
921 
922 
923 
924 	/* NO OTHER THREADS ARE RUNNING */
925 
926 	instance = ddi_get_instance(dip);
927 
928 	if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
929 		cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
930 		return (DDI_FAILURE);
931 	}
932 
933 	switch (cmd) {
934 
935 	case DDI_SUSPEND:
936 		/*
937 		 * suspend our instance
938 		 */
939 
940 		SF_DEBUG(2, (sf, CE_CONT,
941 		    "sf_detach: DDI_SUSPEND for sf%d\n", instance));
942 		/*
943 		 * There is a race condition in socal where while doing
944 		 * callbacks if a ULP removes it self from the callback list
945 		 * the for loop in socal may panic as cblist is junk and
946 		 * while trying to get cblist->next the system will panic.
947 		 */
948 
949 		/* call transport to remove our unregister our callbacks */
950 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
951 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
952 
953 		/*
954 		 * begin process of clearing outstanding commands
955 		 * by issuing a lip
956 		 */
957 		sf_force_lip(sf);
958 
959 		/*
960 		 * toggle the device OFFLINE in order to cause
961 		 * outstanding commands to drain
962 		 */
963 		mutex_enter(&sf->sf_mutex);
964 		sf->sf_lip_cnt++;
965 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
966 		sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
967 		for (i = 0; i < sf_max_targets; i++) {
968 			target = sf->sf_targets[i];
969 			if (target != NULL) {
970 				struct sf_target *ntarget;
971 
972 				mutex_enter(&target->sft_mutex);
973 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
974 					target->sft_state |=
975 					(SF_TARGET_BUSY | SF_TARGET_MARK);
976 				}
977 				/* do this for all LUNs as well */
978 				for (ntarget = target->sft_next_lun;
979 					ntarget;
980 					ntarget = ntarget->sft_next_lun) {
981 					mutex_enter(&ntarget->sft_mutex);
982 					if (!(ntarget->sft_state &
983 						SF_TARGET_OFFLINE)) {
984 						ntarget->sft_state |=
985 							(SF_TARGET_BUSY |
986 								SF_TARGET_MARK);
987 					}
988 					mutex_exit(&ntarget->sft_mutex);
989 				}
990 				mutex_exit(&target->sft_mutex);
991 			}
992 		}
993 		mutex_exit(&sf->sf_mutex);
994 		mutex_enter(&sf_global_mutex);
995 
996 		/*
997 		 * kill off the watchdog if we are the last instance
998 		 */
999 		if (!--sf_watchdog_init) {
1000 			tid = sf_watchdog_id;
1001 			mutex_exit(&sf_global_mutex);
1002 			(void) untimeout(tid);
1003 		} else {
1004 			mutex_exit(&sf_global_mutex);
1005 		}
1006 
1007 		return (DDI_SUCCESS);
1008 
1009 	case DDI_DETACH:
1010 		/*
1011 		 * detach this instance
1012 		 */
1013 
1014 		SF_DEBUG(2, (sf, CE_CONT,
1015 		    "sf_detach: DDI_DETACH for sf%d\n", instance));
1016 
1017 		/* remove this "sf" from the list of sf softstates */
1018 		sf_softstate_unlink(sf);
1019 
1020 		/*
1021 		 * prior to taking any DDI_DETACH actions, toggle the
1022 		 * device OFFLINE in order to cause outstanding
1023 		 * commands to drain
1024 		 */
1025 		mutex_enter(&sf->sf_mutex);
1026 		sf->sf_lip_cnt++;
1027 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1028 		sf->sf_state = SF_STATE_OFFLINE;
1029 		for (i = 0; i < sf_max_targets; i++) {
1030 			target = sf->sf_targets[i];
1031 			if (target != NULL) {
1032 				struct sf_target *ntarget;
1033 
1034 				mutex_enter(&target->sft_mutex);
1035 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1036 					target->sft_state |=
1037 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
1038 				}
1039 				for (ntarget = target->sft_next_lun;
1040 					ntarget;
1041 					ntarget = ntarget->sft_next_lun) {
1042 					mutex_enter(&ntarget->sft_mutex);
1043 					if (!(ntarget->sft_state &
1044 						SF_TARGET_OFFLINE)) {
1045 						ntarget->sft_state |=
1046 							(SF_TARGET_BUSY |
1047 								SF_TARGET_MARK);
1048 					}
1049 					mutex_exit(&ntarget->sft_mutex);
1050 				}
1051 				mutex_exit(&target->sft_mutex);
1052 			}
1053 		}
1054 		mutex_exit(&sf->sf_mutex);
1055 
1056 		/* call transport to remove and unregister our callbacks */
1057 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1058 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1059 
1060 		/*
1061 		 * kill off the watchdog if we are the last instance
1062 		 */
1063 		mutex_enter(&sf_global_mutex);
1064 		if (!--sf_watchdog_init) {
1065 			tid = sf_watchdog_id;
1066 			mutex_exit(&sf_global_mutex);
1067 			(void) untimeout(tid);
1068 		} else {
1069 			mutex_exit(&sf_global_mutex);
1070 		}
1071 
1072 		/* signal sf_hp_daemon() to exit and wait for exit */
1073 		mutex_enter(&sf->sf_hp_daemon_mutex);
1074 		ASSERT(sf->sf_hp_tid);
1075 		sf->sf_hp_exit = 1;		/* flag exit */
1076 		cv_signal(&sf->sf_hp_daemon_cv);
1077 		mutex_exit(&sf->sf_hp_daemon_mutex);
1078 		thread_join(sf->sf_hp_tid);	/* wait for hotplug to exit */
1079 
1080 		/*
1081 		 * Unbind and free event set
1082 		 */
1083 		if (sf->sf_event_hdl) {
1084 			(void) ndi_event_unbind_set(sf->sf_event_hdl,
1085 			    &sf->sf_events, NDI_SLEEP);
1086 			(void) ndi_event_free_hdl(sf->sf_event_hdl);
1087 		}
1088 
1089 		if (sf->sf_event_defs) {
1090 			kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1091 		}
1092 
1093 		/* detach this instance of the HBA driver */
1094 		scsi_hba_detach(dip);
1095 		scsi_hba_tran_free(sf->sf_tran);
1096 
1097 		/* deallocate/unbind DMA handle for lilp map */
1098 		if (sf->sf_lilp_map != NULL) {
1099 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1100 			if (sf->sf_lilp_dmahandle != NULL) {
1101 				ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1102 			}
1103 			ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1104 		}
1105 
1106 		/*
1107 		 * the kmem cache must be destroyed before free'ing
1108 		 * up the crpools
1109 		 *
1110 		 * our finagle of "ntot" and "nfree"
1111 		 * causes an ASSERT failure in "sf_cr_free()"
1112 		 * if the kmem cache is free'd after invoking
1113 		 * "sf_crpool_free()".
1114 		 */
1115 		kmem_cache_destroy(sf->sf_pkt_cache);
1116 
1117 		SF_DEBUG(2, (sf, CE_CONT,
1118 		    "sf_detach: sf_crpool_free() for instance 0x%x\n",
1119 		    instance));
1120 		while (sf->sf_cr_pool != NULL) {
1121 			/*
1122 			 * set ntot to nfree for this particular entry
1123 			 *
1124 			 * this causes sf_crpool_free() to update
1125 			 * the cr_pool list when deallocating this entry
1126 			 */
1127 			sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1128 			sf_crpool_free(sf);
1129 		}
1130 
1131 		/*
1132 		 * now that the cr_pool's are gone it's safe
1133 		 * to destroy all softstate mutex's and cv's
1134 		 */
1135 		mutex_destroy(&sf->sf_mutex);
1136 		mutex_destroy(&sf->sf_cmd_mutex);
1137 		mutex_destroy(&sf->sf_cr_mutex);
1138 		mutex_destroy(&sf->sf_hp_daemon_mutex);
1139 		cv_destroy(&sf->sf_cr_cv);
1140 		cv_destroy(&sf->sf_hp_daemon_cv);
1141 
1142 		/* remove all minor nodes from the device tree */
1143 		ddi_remove_minor_node(dip, NULL);
1144 
1145 		/* remove properties created during attach() */
1146 		ddi_prop_remove_all(dip);
1147 
1148 		/* remove kstat's if present */
1149 		if (sf->sf_ksp != NULL) {
1150 			kstat_delete(sf->sf_ksp);
1151 		}
1152 
1153 		SF_DEBUG(2, (sf, CE_CONT,
1154 			"sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1155 			instance));
1156 		ddi_soft_state_free(sf_state, instance);
1157 		return (DDI_SUCCESS);
1158 
1159 	default:
1160 		SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1161 		    instance, (int)cmd));
1162 		return (DDI_FAILURE);
1163 	}
1164 }
1165 
1166 
1167 /*
1168  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1169  */
1170 static void
1171 sf_softstate_unlink(struct sf *sf)
1172 {
1173 	struct sf	*sf_ptr;
1174 	struct sf	*sf_found_sibling;
1175 	struct sf	*sf_reposition = NULL;
1176 
1177 
1178 	mutex_enter(&sf_global_mutex);
1179 	while (sf_watch_running) {
1180 		/* Busy working the list -- wait */
1181 		cv_wait(&sf_watch_cv, &sf_global_mutex);
1182 	}
1183 	if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1184 		/*
1185 		 * we have a sibling so NULL out its reference to us
1186 		 */
1187 		mutex_enter(&sf_found_sibling->sf_mutex);
1188 		sf_found_sibling->sf_sibling = NULL;
1189 		mutex_exit(&sf_found_sibling->sf_mutex);
1190 	}
1191 
1192 	/* remove our instance from the global list */
1193 	if (sf == sf_head) {
1194 		/* we were at at head of the list */
1195 		sf_head = sf->sf_next;
1196 	} else {
1197 		/* find us in the list */
1198 		for (sf_ptr = sf_head;
1199 		    sf_ptr != NULL;
1200 		    sf_ptr = sf_ptr->sf_next) {
1201 			if (sf_ptr == sf) {
1202 				break;
1203 			}
1204 			/* remember this place */
1205 			sf_reposition = sf_ptr;
1206 		}
1207 		ASSERT(sf_ptr == sf);
1208 		ASSERT(sf_reposition != NULL);
1209 
1210 		sf_reposition->sf_next = sf_ptr->sf_next;
1211 	}
1212 	mutex_exit(&sf_global_mutex);
1213 }
1214 
1215 
1216 static int
1217 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1218     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1219 {
1220 	int64_t		reset_delay;
1221 	struct sf	*sf;
1222 
1223 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1224 	ASSERT(sf);
1225 
1226 	reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1227 		(lbolt64 - sf->sf_reset_time);
1228 	if (reset_delay < 0)
1229 		reset_delay = 0;
1230 
1231 	if (sf_bus_config_debug)
1232 		flag |= NDI_DEVI_DEBUG;
1233 
1234 	return (ndi_busop_bus_config(parent, flag, op,
1235 		arg, childp, (clock_t)reset_delay));
1236 }
1237 
1238 static int
1239 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1240     ddi_bus_config_op_t op, void *arg)
1241 {
1242 	if (sf_bus_config_debug)
1243 		flag |= NDI_DEVI_DEBUG;
1244 
1245 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1246 }
1247 
1248 
1249 /*
1250  * called by transport to initialize a SCSI target
1251  */
1252 /* ARGSUSED */
1253 static int
1254 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1255     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1256 {
1257 #ifdef RAID_LUNS
1258 	int lun;
1259 #else
1260 	int64_t lun;
1261 #endif
1262 	struct sf_target *target;
1263 	struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1264 	int i, t_len;
1265 	unsigned int lip_cnt;
1266 	unsigned char wwn[FC_WWN_SIZE];
1267 
1268 
1269 	/* get and validate our SCSI target ID */
1270 	i = sd->sd_address.a_target;
1271 	if (i >= sf_max_targets) {
1272 		return (DDI_NOT_WELL_FORMED);
1273 	}
1274 
1275 	/* get our port WWN property */
1276 	t_len = sizeof (wwn);
1277 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1278 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1279 	    (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1280 		/* no port WWN property - ignore the OBP stub node */
1281 		return (DDI_NOT_WELL_FORMED);
1282 	}
1283 
1284 	/* get our LIP count property */
1285 	t_len = sizeof (lip_cnt);
1286 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1287 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1288 	    (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1289 		return (DDI_FAILURE);
1290 	}
1291 	/* and our LUN property */
1292 	t_len = sizeof (lun);
1293 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1294 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1295 	    (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1296 		return (DDI_FAILURE);
1297 	}
1298 
1299 	/* find the target structure for this instance */
1300 	mutex_enter(&sf->sf_mutex);
1301 	if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1302 		mutex_exit(&sf->sf_mutex);
1303 		return (DDI_FAILURE);
1304 	}
1305 
1306 	mutex_enter(&target->sft_mutex);
1307 	if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1308 	    & SF_TARGET_INIT_DONE)) {
1309 		/*
1310 		 * set links between HBA transport and target structures
1311 		 * and set done flag
1312 		 */
1313 		hba_tran->tran_tgt_private = target;
1314 		target->sft_tran = hba_tran;
1315 		target->sft_state |= SF_TARGET_INIT_DONE;
1316 	} else {
1317 		/* already initialized ?? */
1318 		mutex_exit(&target->sft_mutex);
1319 		mutex_exit(&sf->sf_mutex);
1320 		return (DDI_FAILURE);
1321 	}
1322 	mutex_exit(&target->sft_mutex);
1323 	mutex_exit(&sf->sf_mutex);
1324 
1325 	return (DDI_SUCCESS);
1326 }
1327 
1328 
1329 /*
1330  * called by transport to free a target
1331  */
1332 /* ARGSUSED */
1333 static void
1334 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1335     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1336 {
1337 	struct sf_target *target = hba_tran->tran_tgt_private;
1338 
1339 	if (target != NULL) {
1340 		mutex_enter(&target->sft_mutex);
1341 		target->sft_tran = NULL;
1342 		target->sft_state &= ~SF_TARGET_INIT_DONE;
1343 		mutex_exit(&target->sft_mutex);
1344 	}
1345 }
1346 
1347 
1348 /*
1349  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1350  * success, else return FALSE
1351  */
1352 /*ARGSUSED*/
1353 static int
1354 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1355     int tgtlen, int statuslen, int kf)
1356 {
1357 	caddr_t scbp, tgt;
1358 	int failure = FALSE;
1359 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1360 
1361 
1362 	tgt = scbp = NULL;
1363 
1364 	if (tgtlen > PKT_PRIV_LEN) {
1365 		if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1366 			failure = TRUE;
1367 		} else {
1368 			cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1369 			pkt->pkt_private = tgt;
1370 		}
1371 	}
1372 	if (statuslen > EXTCMDS_STATUS_SIZE) {
1373 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1374 			failure = TRUE;
1375 		} else {
1376 			cmd->cmd_flags |= CFLAG_SCBEXTERN;
1377 			pkt->pkt_scbp = (opaque_t)scbp;
1378 		}
1379 	}
1380 	if (failure) {
1381 		sf_pkt_destroy_extern(sf, cmd);
1382 	}
1383 	return (failure);
1384 }
1385 
1386 
1387 /*
1388  * deallocator for non-std size cdb/pkt_private/status
1389  */
1390 static void
1391 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1392 {
1393 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1394 
1395 	if (cmd->cmd_flags & CFLAG_FREE) {
1396 		cmn_err(CE_PANIC,
1397 		    "sf_scsi_impl_pktfree: freeing free packet");
1398 		_NOTE(NOT_REACHED)
1399 		/* NOTREACHED */
1400 	}
1401 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1402 		kmem_free((caddr_t)pkt->pkt_scbp,
1403 		    (size_t)cmd->cmd_scblen);
1404 	}
1405 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1406 		kmem_free((caddr_t)pkt->pkt_private,
1407 		    (size_t)cmd->cmd_privlen);
1408 	}
1409 
1410 	cmd->cmd_flags = CFLAG_FREE;
1411 	kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1412 }
1413 
1414 
1415 /*
1416  * create or initialize a SCSI packet -- called internally and
1417  * by the transport
1418  */
1419 static struct scsi_pkt *
1420 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1421     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1422     int flags, int (*callback)(), caddr_t arg)
1423 {
1424 	int kf;
1425 	int failure = FALSE;
1426 	struct sf_pkt *cmd;
1427 	struct sf *sf = ADDR2SF(ap);
1428 	struct sf_target *target = ADDR2TARGET(ap);
1429 	struct sf_pkt	*new_cmd = NULL;
1430 	struct fcal_packet	*fpkt;
1431 	fc_frame_header_t	*hp;
1432 	struct fcp_cmd *fcmd;
1433 
1434 
1435 	/*
1436 	 * If we've already allocated a pkt once,
1437 	 * this request is for dma allocation only.
1438 	 */
1439 	if (pkt == NULL) {
1440 
1441 		/*
1442 		 * First step of sf_scsi_init_pkt:  pkt allocation
1443 		 */
1444 		if (cmdlen > FCP_CDB_SIZE) {
1445 			return (NULL);
1446 		}
1447 
1448 		kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1449 
1450 		if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1451 			/*
1452 			 * Selective zeroing of the pkt.
1453 			 */
1454 
1455 			cmd->cmd_flags = 0;
1456 			cmd->cmd_forw = 0;
1457 			cmd->cmd_back = 0;
1458 			cmd->cmd_next = 0;
1459 			cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1460 			    sizeof (struct sf_pkt) + sizeof (struct
1461 			    fcal_packet));
1462 			cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1463 				sizeof (struct sf_pkt));
1464 			cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1465 			cmd->cmd_state = SF_STATE_IDLE;
1466 			cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1467 			cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1468 			cmd->cmd_pkt->pkt_comp	= NULL;
1469 			cmd->cmd_pkt->pkt_flags	= 0;
1470 			cmd->cmd_pkt->pkt_time	= 0;
1471 			cmd->cmd_pkt->pkt_resid	= 0;
1472 			cmd->cmd_pkt->pkt_reason = 0;
1473 			cmd->cmd_cdblen = (uchar_t)cmdlen;
1474 			cmd->cmd_scblen		= statuslen;
1475 			cmd->cmd_privlen	= tgtlen;
1476 			cmd->cmd_pkt->pkt_address = *ap;
1477 
1478 			/* zero pkt_private */
1479 			(int *)(cmd->cmd_pkt->pkt_private =
1480 			    cmd->cmd_pkt_private);
1481 			bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1482 			    PKT_PRIV_LEN);
1483 		} else {
1484 			failure = TRUE;
1485 		}
1486 
1487 		if (failure ||
1488 		    (tgtlen > PKT_PRIV_LEN) ||
1489 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
1490 			if (!failure) {
1491 				/* need to allocate more space */
1492 				failure = sf_pkt_alloc_extern(sf, cmd,
1493 				    tgtlen, statuslen, kf);
1494 			}
1495 			if (failure) {
1496 				return (NULL);
1497 			}
1498 		}
1499 
1500 		fpkt = cmd->cmd_fp_pkt;
1501 		if (cmd->cmd_block == NULL) {
1502 
1503 			/* allocate cmd/response pool buffers */
1504 			if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1505 				sf_pkt_destroy_extern(sf, cmd);
1506 				return (NULL);
1507 			}
1508 
1509 			/* fill in the FC-AL packet */
1510 			fpkt->fcal_pkt_cookie = sf->sf_socp;
1511 			fpkt->fcal_pkt_comp = sf_cmd_callback;
1512 			fpkt->fcal_pkt_flags = 0;
1513 			fpkt->fcal_magic = FCALP_MAGIC;
1514 			fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1515 			    (ushort_t)(SOC_FC_HEADER |
1516 			    sf->sf_sochandle->fcal_portno);
1517 			fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1518 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1519 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1520 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1521 			fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1522 			    (uint32_t)cmd->cmd_dmac;
1523 			fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1524 			    sizeof (struct fcp_cmd);
1525 			fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1526 			    (uint32_t)cmd->cmd_rsp_dmac;
1527 			fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1528 			    FCP_MAX_RSP_IU_SIZE;
1529 
1530 			/* Fill in the Fabric Channel Header */
1531 			hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1532 			hp->r_ctl = R_CTL_COMMAND;
1533 			hp->type = TYPE_SCSI_FCP;
1534 			hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1535 			hp->reserved1 = 0;
1536 			hp->seq_id = 0;
1537 			hp->df_ctl  = 0;
1538 			hp->seq_cnt = 0;
1539 			hp->ox_id = 0xffff;
1540 			hp->rx_id = 0xffff;
1541 			hp->ro = 0;
1542 
1543 			/* Establish the LUN */
1544 			bcopy((caddr_t)&target->sft_lun.b,
1545 				(caddr_t)&cmd->cmd_block->fcp_ent_addr,
1546 				FCP_LUN_SIZE);
1547 			*((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1548 		}
1549 		cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1550 
1551 		mutex_enter(&target->sft_pkt_mutex);
1552 
1553 		target->sft_pkt_tail->cmd_forw = cmd;
1554 		cmd->cmd_back = target->sft_pkt_tail;
1555 		cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1556 		target->sft_pkt_tail = cmd;
1557 
1558 		mutex_exit(&target->sft_pkt_mutex);
1559 		new_cmd = cmd;		/* for later cleanup if needed */
1560 	} else {
1561 		/* pkt already exists -- just a request for DMA allocation */
1562 		cmd = (struct sf_pkt *)pkt->pkt_ha_private;
1563 		fpkt = cmd->cmd_fp_pkt;
1564 	}
1565 
1566 	/* zero cdb (bzero is too slow) */
1567 	bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1568 
1569 	/*
1570 	 * Second step of sf_scsi_init_pkt:  dma allocation
1571 	 * Set up dma info
1572 	 */
1573 	if ((bp != NULL) && (bp->b_bcount != 0)) {
1574 		int cmd_flags, dma_flags;
1575 		int rval = 0;
1576 		uint_t dmacookie_count;
1577 
1578 		/* there is a buffer and some data to transfer */
1579 
1580 		/* set up command and DMA flags */
1581 		cmd_flags = cmd->cmd_flags;
1582 		if (bp->b_flags & B_READ) {
1583 			/* a read */
1584 			cmd_flags &= ~CFLAG_DMASEND;
1585 			dma_flags = DDI_DMA_READ;
1586 		} else {
1587 			/* a write */
1588 			cmd_flags |= CFLAG_DMASEND;
1589 			dma_flags = DDI_DMA_WRITE;
1590 		}
1591 		if (flags & PKT_CONSISTENT) {
1592 			cmd_flags |= CFLAG_CMDIOPB;
1593 			dma_flags |= DDI_DMA_CONSISTENT;
1594 		}
1595 
1596 		/* ensure we have a DMA handle */
1597 		if (cmd->cmd_dmahandle == NULL) {
1598 			rval = ddi_dma_alloc_handle(sf->sf_dip,
1599 			    sf->sf_sochandle->fcal_dmaattr, callback, arg,
1600 			    &cmd->cmd_dmahandle);
1601 		}
1602 
1603 		if (rval == 0) {
1604 			/* bind our DMA handle to our buffer */
1605 			rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1606 			    dma_flags, callback, arg, &cmd->cmd_dmacookie,
1607 			    &dmacookie_count);
1608 		}
1609 
1610 		if (rval != 0) {
1611 			/* DMA failure */
1612 			SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1613 			switch (rval) {
1614 			case DDI_DMA_NORESOURCES:
1615 				bioerror(bp, 0);
1616 				break;
1617 			case DDI_DMA_BADATTR:
1618 			case DDI_DMA_NOMAPPING:
1619 				bioerror(bp, EFAULT);
1620 				break;
1621 			case DDI_DMA_TOOBIG:
1622 			default:
1623 				bioerror(bp, EINVAL);
1624 				break;
1625 			}
1626 			/* clear valid flag */
1627 			cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1628 			if (new_cmd != NULL) {
1629 				/* destroy packet if we just created it */
1630 				sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1631 			}
1632 			return (NULL);
1633 		}
1634 
1635 		ASSERT(dmacookie_count == 1);
1636 		/* set up amt to transfer and set valid flag */
1637 		cmd->cmd_dmacount = bp->b_bcount;
1638 		cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1639 
1640 		ASSERT(cmd->cmd_dmahandle != NULL);
1641 	}
1642 
1643 	/* set up FC-AL packet */
1644 	fcmd = cmd->cmd_block;
1645 
1646 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1647 		if (cmd->cmd_flags & CFLAG_DMASEND) {
1648 			/* DMA write */
1649 			fcmd->fcp_cntl.cntl_read_data = 0;
1650 			fcmd->fcp_cntl.cntl_write_data = 1;
1651 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1652 			    CQ_TYPE_IO_WRITE;
1653 		} else {
1654 			/* DMA read */
1655 			fcmd->fcp_cntl.cntl_read_data = 1;
1656 			fcmd->fcp_cntl.cntl_write_data = 0;
1657 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1658 			    CQ_TYPE_IO_READ;
1659 		}
1660 		fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1661 		    (uint32_t)cmd->cmd_dmacookie.dmac_address;
1662 		fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1663 		    cmd->cmd_dmacookie.dmac_size;
1664 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1665 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1666 		    cmd->cmd_dmacookie.dmac_size;
1667 		fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1668 	} else {
1669 		/* not a read or write */
1670 		fcmd->fcp_cntl.cntl_read_data = 0;
1671 		fcmd->fcp_cntl.cntl_write_data = 0;
1672 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1673 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1674 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1675 		    sizeof (struct fcp_cmd);
1676 		fcmd->fcp_data_len = 0;
1677 	}
1678 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1679 
1680 	return (cmd->cmd_pkt);
1681 }
1682 
1683 
1684 /*
1685  * destroy a SCSI packet -- called internally and by the transport
1686  */
1687 static void
1688 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1689 {
1690 	struct sf_pkt *cmd = (struct sf_pkt *)pkt->pkt_ha_private;
1691 	struct sf *sf = ADDR2SF(ap);
1692 	struct sf_target *target = ADDR2TARGET(ap);
1693 	struct fcal_packet	*fpkt = cmd->cmd_fp_pkt;
1694 
1695 
1696 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1697 		/* DMA was set up -- clean up */
1698 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1699 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1700 	}
1701 
1702 	/* take this packet off the doubly-linked list */
1703 	mutex_enter(&target->sft_pkt_mutex);
1704 	cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1705 	cmd->cmd_forw->cmd_back = cmd->cmd_back;
1706 	mutex_exit(&target->sft_pkt_mutex);
1707 
1708 	fpkt->fcal_pkt_flags = 0;
1709 	/* free the packet */
1710 	if ((cmd->cmd_flags &
1711 	    (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1712 		/* just a regular packet */
1713 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1714 		cmd->cmd_flags = CFLAG_FREE;
1715 		kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1716 	} else {
1717 		/* a packet with extra memory */
1718 		sf_pkt_destroy_extern(sf, cmd);
1719 	}
1720 }
1721 
1722 
1723 /*
1724  * called by transport to unbind DMA handle
1725  */
1726 /* ARGSUSED */
1727 static void
1728 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1729 {
1730 	struct sf_pkt *cmd = (struct sf_pkt *)pkt->pkt_ha_private;
1731 
1732 
1733 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1734 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1735 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1736 	}
1737 
1738 }
1739 
1740 
1741 /*
1742  * called by transport to synchronize CPU and I/O views of memory
1743  */
1744 /* ARGSUSED */
1745 static void
1746 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1747 {
1748 	struct sf_pkt *cmd = (struct sf_pkt *)pkt->pkt_ha_private;
1749 
1750 
1751 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1752 		if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1753 			(cmd->cmd_flags & CFLAG_DMASEND) ?
1754 			DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1755 		    DDI_SUCCESS) {
1756 			cmn_err(CE_WARN, "sf: sync pkt failed");
1757 		}
1758 	}
1759 }
1760 
1761 
1762 /*
1763  * routine for reset notification setup, to register or cancel. -- called
1764  * by transport
1765  */
1766 static int
1767 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1768     void (*callback)(caddr_t), caddr_t arg)
1769 {
1770 	struct sf	*sf = ADDR2SF(ap);
1771 
1772 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1773 		&sf->sf_mutex, &sf->sf_reset_notify_listf));
1774 }
1775 
1776 
1777 /*
1778  * called by transport to get port WWN property (except sun4u)
1779  */
1780 /* ARGSUSED */
1781 static int
1782 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1783 {
1784 	char tbuf[(FC_WWN_SIZE*2)+1];
1785 	unsigned char wwn[FC_WWN_SIZE];
1786 	int i, lun;
1787 	dev_info_t *tgt_dip;
1788 
1789 	tgt_dip = sd->sd_dev;
1790 	i = sizeof (wwn);
1791 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1792 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1793 	    (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1794 		name[0] = '\0';
1795 		return (0);
1796 	}
1797 	i = sizeof (lun);
1798 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1799 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1800 	    (caddr_t)&lun, &i) != DDI_SUCCESS) {
1801 		name[0] = '\0';
1802 		return (0);
1803 	}
1804 	for (i = 0; i < FC_WWN_SIZE; i++)
1805 		(void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1806 	(void) sprintf(name, "w%s,%x", tbuf, lun);
1807 	return (1);
1808 }
1809 
1810 
1811 /*
1812  * called by transport to get target soft AL-PA (except sun4u)
1813  */
1814 /* ARGSUSED */
1815 static int
1816 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1817 {
1818 	struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1819 
1820 	if (target == NULL)
1821 		return (0);
1822 
1823 	(void) sprintf(name, "%x", target->sft_al_pa);
1824 	return (1);
1825 }
1826 
1827 
1828 /*
1829  * add to the command/response buffer pool for this sf instance
1830  */
1831 static int
1832 sf_add_cr_pool(struct sf *sf)
1833 {
1834 	int		cmd_buf_size;
1835 	size_t		real_cmd_buf_size;
1836 	int		rsp_buf_size;
1837 	size_t		real_rsp_buf_size;
1838 	uint_t		i, ccount;
1839 	struct sf_cr_pool	*ptr;
1840 	struct sf_cr_free_elem *cptr;
1841 	caddr_t	dptr, eptr;
1842 	ddi_dma_cookie_t	cmd_cookie;
1843 	ddi_dma_cookie_t	rsp_cookie;
1844 	int		cmd_bound = FALSE, rsp_bound = FALSE;
1845 
1846 
1847 	/* allocate room for the pool */
1848 	if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1849 	    NULL) {
1850 		return (DDI_FAILURE);
1851 	}
1852 
1853 	/* allocate a DMA handle for the command pool */
1854 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1855 	    DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1856 		goto fail;
1857 	}
1858 
1859 	/*
1860 	 * Get a piece of memory in which to put commands
1861 	 */
1862 	cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1863 	if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1864 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1865 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1866 	    &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1867 		goto fail;
1868 	}
1869 
1870 	/* bind the DMA handle to an address */
1871 	if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1872 	    ptr->cmd_base, real_cmd_buf_size,
1873 	    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1874 	    NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1875 		goto fail;
1876 	}
1877 	cmd_bound = TRUE;
1878 	/* ensure only one cookie was allocated */
1879 	if (ccount != 1) {
1880 		goto fail;
1881 	}
1882 
1883 	/* allocate a DMA handle for the response pool */
1884 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1885 	    DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1886 		goto fail;
1887 	}
1888 
1889 	/*
1890 	 * Get a piece of memory in which to put responses
1891 	 */
1892 	rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1893 	if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1894 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1895 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1896 	    &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1897 		goto fail;
1898 	}
1899 
1900 	/* bind the DMA handle to an address */
1901 	if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1902 	    ptr->rsp_base, real_rsp_buf_size,
1903 	    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1904 	    NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1905 		goto fail;
1906 	}
1907 	rsp_bound = TRUE;
1908 	/* ensure only one cookie was allocated */
1909 	if (ccount != 1) {
1910 		goto fail;
1911 	}
1912 
1913 	/*
1914 	 * Generate a (cmd/rsp structure) free list
1915 	 */
1916 	/* ensure ptr points to start of long word (8-byte block) */
1917 	dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1918 	/* keep track of actual size after moving pointer */
1919 	real_cmd_buf_size -= (dptr - ptr->cmd_base);
1920 	eptr = ptr->rsp_base;
1921 
1922 	/* set actual total number of entries */
1923 	ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1924 			(real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1925 	ptr->nfree = ptr->ntot;
1926 	ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1927 	ptr->sf = sf;
1928 
1929 	/* set up DMA for each pair of entries */
1930 	i = 0;
1931 	while (i < ptr->ntot) {
1932 		cptr = (struct sf_cr_free_elem *)dptr;
1933 		dptr += sizeof (struct fcp_cmd);
1934 
1935 		cptr->next = (struct sf_cr_free_elem *)dptr;
1936 		cptr->rsp = eptr;
1937 
1938 		cptr->cmd_dmac = cmd_cookie.dmac_address +
1939 		    (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1940 
1941 		cptr->rsp_dmac = rsp_cookie.dmac_address +
1942 		    (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1943 
1944 		eptr += FCP_MAX_RSP_IU_SIZE;
1945 		i++;
1946 	}
1947 
1948 	/* terminate the list */
1949 	cptr->next = NULL;
1950 
1951 	/* add this list at front of current one */
1952 	mutex_enter(&sf->sf_cr_mutex);
1953 	ptr->next = sf->sf_cr_pool;
1954 	sf->sf_cr_pool = ptr;
1955 	sf->sf_cr_pool_cnt++;
1956 	mutex_exit(&sf->sf_cr_mutex);
1957 
1958 	return (DDI_SUCCESS);
1959 
1960 fail:
1961 	/* we failed so clean up */
1962 	if (ptr->cmd_dma_handle != NULL) {
1963 		if (cmd_bound) {
1964 			(void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1965 		}
1966 		ddi_dma_free_handle(&ptr->cmd_dma_handle);
1967 	}
1968 
1969 	if (ptr->rsp_dma_handle != NULL) {
1970 		if (rsp_bound) {
1971 			(void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1972 		}
1973 		ddi_dma_free_handle(&ptr->rsp_dma_handle);
1974 	}
1975 
1976 	if (ptr->cmd_base != NULL) {
1977 		ddi_dma_mem_free(&ptr->cmd_acc_handle);
1978 	}
1979 
1980 	if (ptr->rsp_base != NULL) {
1981 		ddi_dma_mem_free(&ptr->rsp_acc_handle);
1982 	}
1983 
1984 	kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1985 	return (DDI_FAILURE);
1986 }
1987 
1988 
1989 /*
1990  * allocate a command/response buffer from the pool, allocating more
1991  * in the pool as needed
1992  */
1993 static int
1994 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1995 {
1996 	struct sf_cr_pool *ptr;
1997 	struct sf_cr_free_elem *cptr;
1998 
1999 
2000 	mutex_enter(&sf->sf_cr_mutex);
2001 
2002 try_again:
2003 
2004 	/* find a free buffer in the existing pool */
2005 	ptr = sf->sf_cr_pool;
2006 	while (ptr != NULL) {
2007 		if (ptr->nfree != 0) {
2008 			ptr->nfree--;
2009 			break;
2010 		} else {
2011 			ptr = ptr->next;
2012 		}
2013 	}
2014 
2015 	/* did we find a free buffer ? */
2016 	if (ptr != NULL) {
2017 		/* we found a free buffer -- take it off the free list */
2018 		cptr = ptr->free;
2019 		ptr->free = cptr->next;
2020 		mutex_exit(&sf->sf_cr_mutex);
2021 		/* set up the command to use the buffer pair */
2022 		cmd->cmd_block = (struct fcp_cmd *)cptr;
2023 		cmd->cmd_dmac = cptr->cmd_dmac;
2024 		cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2025 		cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2026 		cmd->cmd_cr_pool = ptr;
2027 		return (DDI_SUCCESS);		/* success */
2028 	}
2029 
2030 	/* no free buffer available -- can we allocate more ? */
2031 	if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2032 		/* we need to allocate more buffer pairs */
2033 		if (sf->sf_cr_flag) {
2034 			/* somebody already allocating for this instance */
2035 			if (func == SLEEP_FUNC) {
2036 				/* user wants to wait */
2037 				cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2038 				/* we've been woken so go try again */
2039 				goto try_again;
2040 			}
2041 			/* user does not want to wait */
2042 			mutex_exit(&sf->sf_cr_mutex);
2043 			sf->sf_stats.cralloc_failures++;
2044 			return (DDI_FAILURE);	/* give up */
2045 		}
2046 		/* set flag saying we're allocating */
2047 		sf->sf_cr_flag = 1;
2048 		mutex_exit(&sf->sf_cr_mutex);
2049 		/* add to our pool */
2050 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2051 			/* couldn't add to our pool for some reason */
2052 			mutex_enter(&sf->sf_cr_mutex);
2053 			sf->sf_cr_flag = 0;
2054 			cv_broadcast(&sf->sf_cr_cv);
2055 			mutex_exit(&sf->sf_cr_mutex);
2056 			sf->sf_stats.cralloc_failures++;
2057 			return (DDI_FAILURE);	/* give up */
2058 		}
2059 		/*
2060 		 * clear flag saying we're allocating and tell all other
2061 		 * that care
2062 		 */
2063 		mutex_enter(&sf->sf_cr_mutex);
2064 		sf->sf_cr_flag = 0;
2065 		cv_broadcast(&sf->sf_cr_cv);
2066 		/* now that we have more buffers try again */
2067 		goto try_again;
2068 	}
2069 
2070 	/* we don't have room to allocate any more buffers */
2071 	mutex_exit(&sf->sf_cr_mutex);
2072 	sf->sf_stats.cralloc_failures++;
2073 	return (DDI_FAILURE);			/* give up */
2074 }
2075 
2076 
2077 /*
2078  * free a cmd/response buffer pair in our pool
2079  */
2080 static void
2081 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2082 {
2083 	struct sf *sf = cp->sf;
2084 	struct sf_cr_free_elem *elem;
2085 
2086 	elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2087 	elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2088 	elem->cmd_dmac = cmd->cmd_dmac;
2089 	elem->rsp_dmac = cmd->cmd_rsp_dmac;
2090 
2091 	mutex_enter(&sf->sf_cr_mutex);
2092 	cp->nfree++;
2093 	ASSERT(cp->nfree <= cp->ntot);
2094 
2095 	elem->next = cp->free;
2096 	cp->free = elem;
2097 	mutex_exit(&sf->sf_cr_mutex);
2098 }
2099 
2100 
2101 /*
2102  * free our pool of cmd/response buffers
2103  */
2104 static void
2105 sf_crpool_free(struct sf *sf)
2106 {
2107 	struct sf_cr_pool *cp, *prev;
2108 
2109 	prev = NULL;
2110 	mutex_enter(&sf->sf_cr_mutex);
2111 	cp = sf->sf_cr_pool;
2112 	while (cp != NULL) {
2113 		if (cp->nfree == cp->ntot) {
2114 			if (prev != NULL) {
2115 				prev->next = cp->next;
2116 			} else {
2117 				sf->sf_cr_pool = cp->next;
2118 			}
2119 			sf->sf_cr_pool_cnt--;
2120 			mutex_exit(&sf->sf_cr_mutex);
2121 
2122 			(void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2123 			ddi_dma_free_handle(&cp->cmd_dma_handle);
2124 			(void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2125 			ddi_dma_free_handle(&cp->rsp_dma_handle);
2126 			ddi_dma_mem_free(&cp->cmd_acc_handle);
2127 			ddi_dma_mem_free(&cp->rsp_acc_handle);
2128 			kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2129 			return;
2130 		}
2131 		prev = cp;
2132 		cp = cp->next;
2133 	}
2134 	mutex_exit(&sf->sf_cr_mutex);
2135 }
2136 
2137 
2138 /* ARGSUSED */
2139 static int
2140 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2141 {
2142 	struct sf_pkt *cmd = buf;
2143 
2144 	mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2145 	cmd->cmd_block = NULL;
2146 	cmd->cmd_dmahandle = NULL;
2147 	return (0);
2148 }
2149 
2150 
2151 /* ARGSUSED */
2152 static void
2153 sf_kmem_cache_destructor(void *buf, void *size)
2154 {
2155 	struct sf_pkt *cmd = buf;
2156 
2157 	if (cmd->cmd_dmahandle != NULL) {
2158 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
2159 	}
2160 
2161 	if (cmd->cmd_block != NULL) {
2162 		sf_cr_free(cmd->cmd_cr_pool, cmd);
2163 	}
2164 	mutex_destroy(&cmd->cmd_abort_mutex);
2165 }
2166 
2167 
2168 /*
2169  * called by transport when a state change occurs
2170  */
2171 static void
2172 sf_statec_callback(void *arg, int msg)
2173 {
2174 	struct sf *sf = (struct sf *)arg;
2175 	struct sf_target	*target;
2176 	int i;
2177 	struct sf_pkt *cmd;
2178 	struct scsi_pkt *pkt;
2179 
2180 
2181 
2182 	switch (msg) {
2183 
2184 	case FCAL_STATUS_LOOP_ONLINE: {
2185 		uchar_t		al_pa;		/* to save AL-PA */
2186 		int		ret;		/* ret value from getmap */
2187 		int		lip_cnt;	/* to save current count */
2188 		int		cnt;		/* map length */
2189 
2190 		/*
2191 		 * the loop has gone online
2192 		 */
2193 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2194 		    ddi_get_instance(sf->sf_dip)));
2195 		mutex_enter(&sf->sf_mutex);
2196 		sf->sf_lip_cnt++;
2197 		sf->sf_state = SF_STATE_ONLINING;
2198 		mutex_exit(&sf->sf_mutex);
2199 
2200 		/* scan each target hash queue */
2201 		for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2202 			target = sf->sf_wwn_lists[i];
2203 			while (target != NULL) {
2204 				/*
2205 				 * foreach target, if it's not offline then
2206 				 * mark it as busy
2207 				 */
2208 				mutex_enter(&target->sft_mutex);
2209 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2210 					target->sft_state |= (SF_TARGET_BUSY
2211 					    | SF_TARGET_MARK);
2212 #ifdef DEBUG
2213 				/*
2214 				 * for debugging, print out info on any
2215 				 * pending commands (left hanging)
2216 				 */
2217 				cmd = target->sft_pkt_head;
2218 				while (cmd != (struct sf_pkt *)&target->
2219 				    sft_pkt_head) {
2220 					if (cmd->cmd_state ==
2221 					    SF_STATE_ISSUED) {
2222 						SF_DEBUG(1, (sf, CE_CONT,
2223 						    "cmd 0x%p pending "
2224 						    "after lip\n",
2225 						    (void *)cmd->cmd_fp_pkt));
2226 					}
2227 					cmd = cmd->cmd_forw;
2228 				}
2229 #endif
2230 				mutex_exit(&target->sft_mutex);
2231 				target = target->sft_next;
2232 			}
2233 		}
2234 
2235 		/*
2236 		 * since the loop has just gone online get a new map from
2237 		 * the transport
2238 		 */
2239 		if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2240 		    sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2241 		    sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2242 			if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2243 				(void) soc_take_core(sf->sf_sochandle,
2244 				    sf->sf_socp);
2245 				sf_core = 0;
2246 			}
2247 			sf_log(sf, CE_WARN,
2248 				"!soc lilp map failed status=0x%x\n", ret);
2249 			mutex_enter(&sf->sf_mutex);
2250 			sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2251 			sf->sf_lip_cnt++;
2252 			sf->sf_state = SF_STATE_OFFLINE;
2253 			mutex_exit(&sf->sf_mutex);
2254 			return;
2255 		}
2256 
2257 		/* ensure consistent view of DMA memory */
2258 		(void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2259 		    DDI_DMA_SYNC_FORKERNEL);
2260 
2261 		/* how many entries in map ? */
2262 		cnt = sf->sf_lilp_map->lilp_length;
2263 		if (cnt >= SF_MAX_LILP_ENTRIES) {
2264 			sf_log(sf, CE_WARN, "invalid lilp map\n");
2265 			return;
2266 		}
2267 
2268 		mutex_enter(&sf->sf_mutex);
2269 		sf->sf_device_count = cnt - 1;
2270 		sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2271 		lip_cnt = sf->sf_lip_cnt;
2272 		al_pa = sf->sf_al_pa;
2273 
2274 		SF_DEBUG(1, (sf, CE_CONT,
2275 		    "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2276 
2277 		/*
2278 		 * since the last entry of the map may be mine (common) check
2279 		 * for that, and if it is we have one less entry to look at
2280 		 */
2281 		if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2282 			cnt--;
2283 		}
2284 		/* If we didn't get a valid loop map enable all targets */
2285 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2286 			for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2287 				sf->sf_lilp_map->lilp_alpalist[i] =
2288 					sf_switch_to_alpa[i];
2289 			cnt = i;
2290 			sf->sf_device_count = cnt - 1;
2291 		}
2292 		if (sf->sf_device_count == 0) {
2293 			sf_finish_init(sf, lip_cnt);
2294 			mutex_exit(&sf->sf_mutex);
2295 			break;
2296 		}
2297 		mutex_exit(&sf->sf_mutex);
2298 
2299 		SF_DEBUG(2, (sf, CE_WARN,
2300 			"!statec_callback: starting with %d targets\n",
2301 			sf->sf_device_count));
2302 
2303 		/* scan loop map, logging into all ports (except mine) */
2304 		for (i = 0; i < cnt; i++) {
2305 			SF_DEBUG(1, (sf, CE_CONT,
2306 			    "!lilp map entry %d = %x,%x\n", i,
2307 			    sf->sf_lilp_map->lilp_alpalist[i],
2308 			    sf_alpa_to_switch[
2309 				    sf->sf_lilp_map->lilp_alpalist[i]]));
2310 			/* is this entry for somebody else ? */
2311 			if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2312 				/* do a PLOGI to this port */
2313 				if (!sf_login(sf, LA_ELS_PLOGI,
2314 				    sf->sf_lilp_map->lilp_alpalist[i],
2315 				    sf->sf_lilp_map->lilp_alpalist[cnt-1],
2316 				    lip_cnt)) {
2317 					/* a problem logging in */
2318 					mutex_enter(&sf->sf_mutex);
2319 					if (lip_cnt == sf->sf_lip_cnt) {
2320 						/*
2321 						 * problem not from a new LIP
2322 						 */
2323 						sf->sf_device_count--;
2324 						ASSERT(sf->sf_device_count
2325 						    >= 0);
2326 						if (sf->sf_device_count == 0) {
2327 							sf_finish_init(sf,
2328 							    lip_cnt);
2329 						}
2330 					}
2331 					mutex_exit(&sf->sf_mutex);
2332 				}
2333 			}
2334 		}
2335 		break;
2336 	}
2337 
2338 	case FCAL_STATUS_ERR_OFFLINE:
2339 		/*
2340 		 * loop has gone offline due to an error
2341 		 */
2342 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2343 		    ddi_get_instance(sf->sf_dip)));
2344 		mutex_enter(&sf->sf_mutex);
2345 		sf->sf_lip_cnt++;
2346 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2347 		if (!sf->sf_online_timer) {
2348 			sf->sf_online_timer = sf_watchdog_time +
2349 			    SF_ONLINE_TIMEOUT;
2350 		}
2351 		/*
2352 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2353 		 * since throttling logic in sf_watch() depends on
2354 		 * preservation of this flag while device is suspended
2355 		 */
2356 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2357 			sf->sf_state |= SF_STATE_OFFLINE;
2358 			SF_DEBUG(1, (sf, CE_CONT,
2359 			    "sf_statec_callback, sf%d: "
2360 			    "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2361 			    ddi_get_instance(sf->sf_dip)));
2362 		} else {
2363 			sf->sf_state = SF_STATE_OFFLINE;
2364 		}
2365 
2366 		/* scan each possible target on the loop */
2367 		for (i = 0; i < sf_max_targets; i++) {
2368 			target = sf->sf_targets[i];
2369 			while (target != NULL) {
2370 				mutex_enter(&target->sft_mutex);
2371 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2372 					target->sft_state |= (SF_TARGET_BUSY
2373 						| SF_TARGET_MARK);
2374 				mutex_exit(&target->sft_mutex);
2375 				target = target->sft_next_lun;
2376 			}
2377 		}
2378 		mutex_exit(&sf->sf_mutex);
2379 		break;
2380 
2381 	case FCAL_STATE_RESET: {
2382 		struct sf_els_hdr	*privp;	/* ptr to private list */
2383 		struct sf_els_hdr	*tmpp1;	/* tmp prev hdr ptr */
2384 		struct sf_els_hdr	*tmpp2;	/* tmp next hdr ptr */
2385 		struct sf_els_hdr	*head;	/* to save our private list */
2386 		struct fcal_packet	*fpkt;	/* ptr to pkt in hdr */
2387 
2388 		/*
2389 		 * a transport reset
2390 		 */
2391 		SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2392 		    ddi_get_instance(sf->sf_dip)));
2393 		tmpp1 = head = NULL;
2394 		mutex_enter(&sf->sf_mutex);
2395 		sf->sf_lip_cnt++;
2396 		sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2397 		/*
2398 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2399 		 * since throttling logic in sf_watch() depends on
2400 		 * preservation of this flag while device is suspended
2401 		 */
2402 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2403 			sf->sf_state |= SF_STATE_OFFLINE;
2404 			SF_DEBUG(1, (sf, CE_CONT,
2405 			    "sf_statec_callback, sf%d: "
2406 			    "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2407 			    ddi_get_instance(sf->sf_dip)));
2408 		} else {
2409 			sf->sf_state = SF_STATE_OFFLINE;
2410 		}
2411 
2412 		/*
2413 		 * scan each possible target on the loop, looking for targets
2414 		 * that need callbacks ran
2415 		 */
2416 		for (i = 0; i < sf_max_targets; i++) {
2417 			target = sf->sf_targets[i];
2418 			while (target != NULL) {
2419 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2420 					target->sft_state |= (SF_TARGET_BUSY
2421 						| SF_TARGET_MARK);
2422 					mutex_exit(&sf->sf_mutex);
2423 					/*
2424 					 * run remove event callbacks for lun
2425 					 *
2426 					 * We have a nasty race condition here
2427 					 * 'cause we're dropping this mutex to
2428 					 * run the callback and expect the
2429 					 * linked list to be the same.
2430 					 */
2431 					(void) ndi_event_retrieve_cookie(
2432 					    sf->sf_event_hdl, target->sft_dip,
2433 					    FCAL_REMOVE_EVENT, &sf_remove_eid,
2434 					    NDI_EVENT_NOPASS);
2435 					(void) ndi_event_run_callbacks(
2436 						sf->sf_event_hdl,
2437 						target->sft_dip,
2438 						sf_remove_eid, NULL);
2439 					mutex_enter(&sf->sf_mutex);
2440 				}
2441 				target = target->sft_next_lun;
2442 			}
2443 		}
2444 
2445 		/*
2446 		 * scan for ELS commands that are in transport, not complete,
2447 		 * and have a valid timeout, building a private list
2448 		 */
2449 		privp = sf->sf_els_list;
2450 		while (privp != NULL) {
2451 			fpkt = privp->fpkt;
2452 			if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2453 			    (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2454 			    (privp->timeout != SF_INVALID_TIMEOUT)) {
2455 				/*
2456 				 * cmd in transport && not complete &&
2457 				 * timeout valid
2458 				 *
2459 				 * move this entry from ELS input list to our
2460 				 * private list
2461 				 */
2462 
2463 				tmpp2 = privp->next; /* save ptr to next */
2464 
2465 				/* push this on private list head */
2466 				privp->next = head;
2467 				head = privp;
2468 
2469 				/* remove this entry from input list */
2470 				if (tmpp1 != NULL) {
2471 					/*
2472 					 * remove this entry from somewhere in
2473 					 * the middle of the list
2474 					 */
2475 					tmpp1->next = tmpp2;
2476 					if (tmpp2 != NULL) {
2477 						tmpp2->prev = tmpp1;
2478 					}
2479 				} else {
2480 					/*
2481 					 * remove this entry from the head
2482 					 * of the list
2483 					 */
2484 					sf->sf_els_list = tmpp2;
2485 					if (tmpp2 != NULL) {
2486 						tmpp2->prev = NULL;
2487 					}
2488 				}
2489 				privp = tmpp2;	/* skip to next entry */
2490 			} else {
2491 				tmpp1 = privp;	/* save ptr to prev entry */
2492 				privp = privp->next; /* skip to next entry */
2493 			}
2494 		}
2495 
2496 		mutex_exit(&sf->sf_mutex);
2497 
2498 		/*
2499 		 * foreach cmd in our list free the ELS packet associated
2500 		 * with it
2501 		 */
2502 		privp = head;
2503 		while (privp != NULL) {
2504 			fpkt = privp->fpkt;
2505 			privp = privp->next;
2506 			sf_els_free(fpkt);
2507 		}
2508 
2509 		/*
2510 		 * scan for commands from each possible target
2511 		 */
2512 		for (i = 0; i < sf_max_targets; i++) {
2513 			target = sf->sf_targets[i];
2514 			while (target != NULL) {
2515 				/*
2516 				 * scan all active commands for this target,
2517 				 * looking for commands that have been issued,
2518 				 * are in transport, and are not yet complete
2519 				 * (so we can terminate them because of the
2520 				 * reset)
2521 				 */
2522 				mutex_enter(&target->sft_pkt_mutex);
2523 				cmd = target->sft_pkt_head;
2524 				while (cmd != (struct sf_pkt *)&target->
2525 					sft_pkt_head) {
2526 					fpkt = cmd->cmd_fp_pkt;
2527 					mutex_enter(&cmd->cmd_abort_mutex);
2528 					if ((cmd->cmd_state ==
2529 						SF_STATE_ISSUED) &&
2530 						(fpkt->fcal_cmd_state &
2531 						FCAL_CMD_IN_TRANSPORT) &&
2532 						(!(fpkt->fcal_cmd_state &
2533 							FCAL_CMD_COMPLETE))) {
2534 						/* a command to be reset */
2535 						pkt = cmd->cmd_pkt;
2536 						pkt->pkt_reason = CMD_RESET;
2537 						pkt->pkt_statistics |=
2538 							STAT_BUS_RESET;
2539 						cmd->cmd_state = SF_STATE_IDLE;
2540 						mutex_exit(&cmd->
2541 							cmd_abort_mutex);
2542 						mutex_exit(&target->
2543 							sft_pkt_mutex);
2544 						if (pkt->pkt_comp != NULL) {
2545 							(*pkt->pkt_comp)(pkt);
2546 						}
2547 						mutex_enter(&target->
2548 							sft_pkt_mutex);
2549 						cmd = target->sft_pkt_head;
2550 					} else {
2551 						mutex_exit(&cmd->
2552 							cmd_abort_mutex);
2553 						/* get next command */
2554 						cmd = cmd->cmd_forw;
2555 					}
2556 				}
2557 				mutex_exit(&target->sft_pkt_mutex);
2558 				target = target->sft_next_lun;
2559 			}
2560 		}
2561 
2562 		/*
2563 		 * get packet queue for this target, resetting all remaining
2564 		 * commands
2565 		 */
2566 		mutex_enter(&sf->sf_mutex);
2567 		cmd = sf->sf_pkt_head;
2568 		sf->sf_pkt_head = NULL;
2569 		mutex_exit(&sf->sf_mutex);
2570 
2571 		while (cmd != NULL) {
2572 			pkt = cmd->cmd_pkt;
2573 			cmd = cmd->cmd_next;
2574 			pkt->pkt_reason = CMD_RESET;
2575 			pkt->pkt_statistics |= STAT_BUS_RESET;
2576 			if (pkt->pkt_comp != NULL) {
2577 				(*pkt->pkt_comp)(pkt);
2578 			}
2579 		}
2580 		break;
2581 	}
2582 
2583 	default:
2584 		break;
2585 	}
2586 }
2587 
2588 
2589 /*
2590  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2591  * returning TRUE upon success, else returning FALSE
2592  */
2593 static int
2594 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2595     int lip_cnt)
2596 {
2597 	struct la_els_logi	*logi;
2598 	struct	sf_els_hdr	*privp;
2599 
2600 
2601 	if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2602 	    sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2603 	    (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2604 		sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2605 			"due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2606 		return (FALSE);
2607 	}
2608 
2609 	privp->lip_cnt = lip_cnt;
2610 	if (els_code == LA_ELS_PLOGI) {
2611 		bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2612 		    (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2613 		    - 4);
2614 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2615 		    (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2616 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2617 		    (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2618 		bzero((caddr_t)&logi->reserved, 16);
2619 	} else if (els_code == LA_ELS_LOGO) {
2620 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2621 		    (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2622 		((struct la_els_logo	*)logi)->reserved = 0;
2623 		((struct la_els_logo	*)logi)->nport_id[0] = 0;
2624 		((struct la_els_logo	*)logi)->nport_id[1] = 0;
2625 		((struct la_els_logo	*)logi)->nport_id[2] = arg1;
2626 	}
2627 
2628 	privp->els_code = els_code;
2629 	logi->ls_code = els_code;
2630 	logi->mbz[0] = 0;
2631 	logi->mbz[1] = 0;
2632 	logi->mbz[2] = 0;
2633 
2634 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2635 	return (sf_els_transport(sf, privp));
2636 }
2637 
2638 
2639 /*
2640  * send an ELS IU via the transport,
2641  * returning TRUE upon success, else returning FALSE
2642  */
2643 static int
2644 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2645 {
2646 	struct fcal_packet *fpkt = privp->fpkt;
2647 
2648 
2649 	(void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2650 	    DDI_DMA_SYNC_FORDEV);
2651 	privp->prev = NULL;
2652 	mutex_enter(&sf->sf_mutex);
2653 	privp->next = sf->sf_els_list;
2654 	if (sf->sf_els_list != NULL) {
2655 		sf->sf_els_list->prev = privp;
2656 	}
2657 	sf->sf_els_list = privp;
2658 	mutex_exit(&sf->sf_mutex);
2659 
2660 	/* call the transport to send a packet */
2661 	if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2662 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2663 		mutex_enter(&sf->sf_mutex);
2664 		if (privp->prev != NULL) {
2665 			privp->prev->next = privp->next;
2666 		}
2667 		if (privp->next != NULL) {
2668 			privp->next->prev = privp->prev;
2669 		}
2670 		if (sf->sf_els_list == privp) {
2671 			sf->sf_els_list = privp->next;
2672 		}
2673 		mutex_exit(&sf->sf_mutex);
2674 		sf_els_free(fpkt);
2675 		return (FALSE);			/* failure */
2676 	}
2677 	return (TRUE);				/* success */
2678 }
2679 
2680 
2681 /*
2682  * called as the pkt_comp routine for ELS FC packets
2683  */
2684 static void
2685 sf_els_callback(struct fcal_packet *fpkt)
2686 {
2687 	struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2688 	struct sf *sf = privp->sf;
2689 	struct sf *tsf;
2690 	int tgt_id;
2691 	struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2692 	struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2693 	struct	sf_target *target;
2694 	short	ncmds;
2695 	short	free_pkt = TRUE;
2696 
2697 
2698 	/*
2699 	 * we've received an ELS callback, i.e. an ELS packet has arrived
2700 	 */
2701 
2702 	/* take the current packet off of the queue */
2703 	mutex_enter(&sf->sf_mutex);
2704 	if (privp->timeout == SF_INVALID_TIMEOUT) {
2705 		mutex_exit(&sf->sf_mutex);
2706 		return;
2707 	}
2708 	if (privp->prev != NULL) {
2709 		privp->prev->next = privp->next;
2710 	}
2711 	if (privp->next != NULL) {
2712 		privp->next->prev = privp->prev;
2713 	}
2714 	if (sf->sf_els_list == privp) {
2715 		sf->sf_els_list = privp->next;
2716 	}
2717 	privp->prev = privp->next = NULL;
2718 	mutex_exit(&sf->sf_mutex);
2719 
2720 	/* get # pkts in this callback */
2721 	ncmds = fpkt->fcal_ncmds;
2722 	ASSERT(ncmds >= 0);
2723 	mutex_enter(&sf->sf_cmd_mutex);
2724 	sf->sf_ncmds = ncmds;
2725 	mutex_exit(&sf->sf_cmd_mutex);
2726 
2727 	/* sync idea of memory */
2728 	(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2729 	    DDI_DMA_SYNC_FORKERNEL);
2730 
2731 	/* was this an OK ACC msg ?? */
2732 	if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2733 	    (ptr->ls_code == LA_ELS_ACC)) {
2734 
2735 		/*
2736 		 * this was an OK ACC pkt
2737 		 */
2738 
2739 		switch (privp->els_code) {
2740 		case LA_ELS_PLOGI:
2741 			/*
2742 			 * was able to to an N_port login
2743 			 */
2744 			SF_DEBUG(2, (sf, CE_CONT,
2745 			    "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2746 			    privp->dest_nport_id,
2747 			    *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2748 			    *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2749 			/* try to do a process login */
2750 			if (!sf_do_prli(sf, privp, ptr)) {
2751 				free_pkt = FALSE;
2752 				goto fail;	/* PRLI failed */
2753 			}
2754 			break;
2755 		case LA_ELS_PRLI:
2756 			/*
2757 			 * was able to do a process login
2758 			 */
2759 			SF_DEBUG(2, (sf, CE_CONT,
2760 			    "!PRLI to al_pa %x succeeded\n",
2761 			    privp->dest_nport_id));
2762 			/* try to do address discovery */
2763 			if (sf_do_adisc(sf, privp) != 1) {
2764 				free_pkt = FALSE;
2765 				goto fail;	/* ADISC failed */
2766 			}
2767 			break;
2768 		case LA_ELS_ADISC:
2769 			/*
2770 			 * found a target via ADISC
2771 			 */
2772 
2773 			SF_DEBUG(2, (sf, CE_CONT,
2774 			    "!ADISC to al_pa %x succeeded\n",
2775 			    privp->dest_nport_id));
2776 
2777 			/* create the target info */
2778 			if ((target = sf_create_target(sf, privp,
2779 				sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2780 				(int64_t)0))
2781 				== NULL) {
2782 				goto fail;	/* can't create target */
2783 			}
2784 
2785 			/*
2786 			 * ensure address discovered matches what we thought
2787 			 * it would be
2788 			 */
2789 			if ((uchar_t)adisc->hard_address !=
2790 			    privp->dest_nport_id) {
2791 				sf_log(sf, CE_WARN,
2792 				    "target 0x%x, AL-PA 0x%x and "
2793 				    "hard address 0x%x don't match\n",
2794 				    sf_alpa_to_switch[
2795 					    (uchar_t)privp->dest_nport_id],
2796 				    privp->dest_nport_id,
2797 				    (uchar_t)adisc->hard_address);
2798 				mutex_enter(&sf->sf_mutex);
2799 				sf_offline_target(sf, target);
2800 				mutex_exit(&sf->sf_mutex);
2801 				goto fail;	/* addr doesn't match */
2802 			}
2803 			/*
2804 			 * get inquiry data from the target
2805 			 */
2806 			if (!sf_do_reportlun(sf, privp, target)) {
2807 				mutex_enter(&sf->sf_mutex);
2808 				sf_offline_target(sf, target);
2809 				mutex_exit(&sf->sf_mutex);
2810 				free_pkt = FALSE;
2811 				goto fail;	/* inquiry failed */
2812 			}
2813 			break;
2814 		default:
2815 			SF_DEBUG(2, (sf, CE_CONT,
2816 			    "!ELS %x to al_pa %x succeeded\n",
2817 			    privp->els_code, privp->dest_nport_id));
2818 			sf_els_free(fpkt);
2819 			break;
2820 		}
2821 
2822 	} else {
2823 
2824 		/*
2825 		 * oh oh -- this was not an OK ACC packet
2826 		 */
2827 
2828 		/* get target ID from dest loop address */
2829 		tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2830 
2831 		/* keep track of failures */
2832 		sf->sf_stats.tstats[tgt_id].els_failures++;
2833 		if (++(privp->retries) < sf_els_retries &&
2834 			fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2835 			if (fpkt->fcal_pkt_status ==
2836 					FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2837 				tsf = sf->sf_sibling;
2838 				if (tsf != NULL) {
2839 					mutex_enter(&tsf->sf_cmd_mutex);
2840 					tsf->sf_flag = 1;
2841 					tsf->sf_throttle = SF_DECR_DELTA;
2842 					mutex_exit(&tsf->sf_cmd_mutex);
2843 				}
2844 			}
2845 			privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2846 			privp->prev = NULL;
2847 
2848 			mutex_enter(&sf->sf_mutex);
2849 
2850 			if (privp->lip_cnt == sf->sf_lip_cnt) {
2851 				SF_DEBUG(1, (sf, CE_WARN,
2852 				    "!ELS %x to al_pa %x failed, retrying",
2853 				    privp->els_code, privp->dest_nport_id));
2854 				privp->next = sf->sf_els_list;
2855 				if (sf->sf_els_list != NULL) {
2856 					sf->sf_els_list->prev = privp;
2857 				}
2858 
2859 				sf->sf_els_list = privp;
2860 
2861 				mutex_exit(&sf->sf_mutex);
2862 				/* device busy?  wait a bit ... */
2863 				if (fpkt->fcal_pkt_status ==
2864 					FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2865 					privp->delayed_retry = 1;
2866 					return;
2867 				}
2868 				/* call the transport to send a pkt */
2869 				if (soc_transport(sf->sf_sochandle, fpkt,
2870 				    FCAL_NOSLEEP, CQ_REQUEST_1) !=
2871 				    FCAL_TRANSPORT_SUCCESS) {
2872 					mutex_enter(&sf->sf_mutex);
2873 					if (privp->prev != NULL) {
2874 						privp->prev->next =
2875 							privp->next;
2876 					}
2877 					if (privp->next != NULL) {
2878 						privp->next->prev =
2879 							privp->prev;
2880 					}
2881 					if (sf->sf_els_list == privp) {
2882 						sf->sf_els_list = privp->next;
2883 					}
2884 					mutex_exit(&sf->sf_mutex);
2885 					goto fail;
2886 				} else
2887 					return;
2888 			} else {
2889 				mutex_exit(&sf->sf_mutex);
2890 				goto fail;
2891 			}
2892 		} else {
2893 #ifdef	DEBUG
2894 			if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2895 			SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2896 			    privp->els_code, privp->dest_nport_id));
2897 			if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2898 				SF_DEBUG(2, (sf, CE_NOTE,
2899 				    "els reply code = %x", ptr->ls_code));
2900 				if (ptr->ls_code == LA_ELS_RJT)
2901 					SF_DEBUG(1, (sf, CE_CONT,
2902 					    "LS_RJT reason = %x\n",
2903 					    *(((uint_t *)ptr) + 1)));
2904 			} else
2905 				SF_DEBUG(2, (sf, CE_NOTE,
2906 				    "fc packet status = %x",
2907 				    fpkt->fcal_pkt_status));
2908 			}
2909 #endif
2910 			goto fail;
2911 		}
2912 	}
2913 	return;					/* success */
2914 fail:
2915 	mutex_enter(&sf->sf_mutex);
2916 	if (sf->sf_lip_cnt == privp->lip_cnt) {
2917 		sf->sf_device_count--;
2918 		ASSERT(sf->sf_device_count >= 0);
2919 		if (sf->sf_device_count == 0) {
2920 			sf_finish_init(sf, privp->lip_cnt);
2921 		}
2922 	}
2923 	mutex_exit(&sf->sf_mutex);
2924 	if (free_pkt) {
2925 		sf_els_free(fpkt);
2926 	}
2927 }
2928 
2929 
2930 /*
2931  * send a PRLI (process login) ELS IU via the transport,
2932  * returning TRUE upon success, else returning FALSE
2933  */
2934 static int
2935 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2936 {
2937 	struct la_els_prli	*prli = (struct la_els_prli *)privp->cmd;
2938 	struct fcp_prli		*fprli;
2939 	struct  fcal_packet	*fpkt = privp->fpkt;
2940 
2941 
2942 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2943 	    sizeof (struct la_els_prli);
2944 	privp->els_code = LA_ELS_PRLI;
2945 	fprli = (struct fcp_prli *)prli->service_params;
2946 	prli->ls_code = LA_ELS_PRLI;
2947 	prli->page_length = 0x10;
2948 	prli->payload_length = sizeof (struct la_els_prli);
2949 	fprli->type = 0x08;			/* no define here? */
2950 	fprli->resvd1 = 0;
2951 	fprli->orig_process_assoc_valid = 0;
2952 	fprli->resp_process_assoc_valid = 0;
2953 	fprli->establish_image_pair = 1;
2954 	fprli->resvd2 = 0;
2955 	fprli->resvd3 = 0;
2956 	fprli->data_overlay_allowed = 0;
2957 	fprli->initiator_fn = 1;
2958 	fprli->target_fn = 0;
2959 	fprli->cmd_data_mixed = 0;
2960 	fprli->data_resp_mixed = 0;
2961 	fprli->read_xfer_rdy_disabled = 1;
2962 	fprli->write_xfer_rdy_disabled = 0;
2963 
2964 	bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2965 		sizeof (privp->port_wwn));
2966 	bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2967 		sizeof (privp->node_wwn));
2968 
2969 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2970 	return (sf_els_transport(sf, privp));
2971 }
2972 
2973 
2974 /*
2975  * send an ADISC (address discovery) ELS IU via the transport,
2976  * returning TRUE upon success, else returning FALSE
2977  */
2978 static int
2979 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2980 {
2981 	struct la_els_adisc	*adisc = (struct la_els_adisc *)privp->cmd;
2982 	struct	fcal_packet	*fpkt = privp->fpkt;
2983 
2984 	privp->els_code = LA_ELS_ADISC;
2985 	adisc->ls_code = LA_ELS_ADISC;
2986 	adisc->mbz[0] = 0;
2987 	adisc->mbz[1] = 0;
2988 	adisc->mbz[2] = 0;
2989 	adisc->hard_address = 0; /* ??? */
2990 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2991 	    sizeof (struct la_els_adisc);
2992 	bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2993 	    (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2994 	bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2995 	    (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2996 	adisc->nport_id = sf->sf_al_pa;
2997 
2998 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2999 	return (sf_els_transport(sf, privp));
3000 }
3001 
3002 
3003 static struct fcal_packet *
3004 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3005     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3006 {
3007 	struct	fcal_packet	*fpkt;
3008 	ddi_dma_cookie_t	pcookie;
3009 	ddi_dma_cookie_t	rcookie;
3010 	struct	sf_els_hdr	*privp;
3011 	ddi_dma_handle_t	cmd_dma_handle = NULL;
3012 	ddi_dma_handle_t	rsp_dma_handle = NULL;
3013 	ddi_acc_handle_t	cmd_acc_handle = NULL;
3014 	ddi_acc_handle_t	rsp_acc_handle = NULL;
3015 	size_t			real_size;
3016 	uint_t			ccount;
3017 	fc_frame_header_t	*hp;
3018 	int			cmd_bound = FALSE, rsp_bound = FALSE;
3019 	caddr_t			cmd = NULL;
3020 	caddr_t			rsp = NULL;
3021 
3022 	if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3023 	    sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3024 		SF_DEBUG(1, (sf, CE_WARN,
3025 			"Could not allocate fcal_packet for ELS\n"));
3026 		return (NULL);
3027 	}
3028 
3029 	if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3030 	    KM_NOSLEEP)) == NULL) {
3031 		SF_DEBUG(1, (sf, CE_WARN,
3032 			"Could not allocate sf_els_hdr for ELS\n"));
3033 		goto fail;
3034 	}
3035 
3036 	privp->size = priv_size;
3037 	fpkt->fcal_pkt_private = (caddr_t)privp;
3038 
3039 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3040 	    DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3041 		SF_DEBUG(1, (sf, CE_WARN,
3042 			"Could not allocate DMA handle for ELS\n"));
3043 		goto fail;
3044 	}
3045 
3046 	if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3047 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3048 	    DDI_DMA_DONTWAIT, NULL, &cmd,
3049 	    &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3050 		SF_DEBUG(1, (sf, CE_WARN,
3051 			"Could not allocate DMA memory for ELS\n"));
3052 		goto fail;
3053 	}
3054 
3055 	if (real_size < cmd_size) {
3056 		SF_DEBUG(1, (sf, CE_WARN,
3057 			"DMA memory too small for ELS\n"));
3058 		goto fail;
3059 	}
3060 
3061 	if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3062 	    cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3063 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3064 		SF_DEBUG(1, (sf, CE_WARN,
3065 			"Could not bind DMA memory for ELS\n"));
3066 		goto fail;
3067 	}
3068 	cmd_bound = TRUE;
3069 
3070 	if (ccount != 1) {
3071 		SF_DEBUG(1, (sf, CE_WARN,
3072 			"Wrong cookie count for ELS\n"));
3073 		goto fail;
3074 	}
3075 
3076 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3077 	    DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3078 		SF_DEBUG(1, (sf, CE_WARN,
3079 			"Could not allocate DMA handle for ELS rsp\n"));
3080 		goto fail;
3081 	}
3082 	if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3083 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3084 	    DDI_DMA_DONTWAIT, NULL, &rsp,
3085 	    &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3086 		SF_DEBUG(1, (sf, CE_WARN,
3087 			"Could not allocate DMA memory for ELS rsp\n"));
3088 		goto fail;
3089 	}
3090 
3091 	if (real_size < rsp_size) {
3092 		SF_DEBUG(1, (sf, CE_WARN,
3093 			"DMA memory too small for ELS rsp\n"));
3094 		goto fail;
3095 	}
3096 
3097 	if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3098 	    rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3099 	    DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3100 		SF_DEBUG(1, (sf, CE_WARN,
3101 			"Could not bind DMA memory for ELS rsp\n"));
3102 		goto fail;
3103 	}
3104 	rsp_bound = TRUE;
3105 
3106 	if (ccount != 1) {
3107 		SF_DEBUG(1, (sf, CE_WARN,
3108 			"Wrong cookie count for ELS rsp\n"));
3109 		goto fail;
3110 	}
3111 
3112 	privp->cmd = cmd;
3113 	privp->sf = sf;
3114 	privp->cmd_dma_handle = cmd_dma_handle;
3115 	privp->cmd_acc_handle = cmd_acc_handle;
3116 	privp->rsp = rsp;
3117 	privp->rsp_dma_handle = rsp_dma_handle;
3118 	privp->rsp_acc_handle = rsp_acc_handle;
3119 	privp->dest_nport_id = dest_id;
3120 	privp->fpkt = fpkt;
3121 
3122 	fpkt->fcal_pkt_cookie = sf->sf_socp;
3123 	fpkt->fcal_pkt_comp = sf_els_callback;
3124 	fpkt->fcal_magic = FCALP_MAGIC;
3125 	fpkt->fcal_pkt_flags = 0;
3126 	fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3127 	    (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3128 	fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3129 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3130 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3131 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3132 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3133 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3134 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3135 	fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3136 	    pcookie.dmac_address;
3137 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3138 	fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3139 	    rcookie.dmac_address;
3140 	fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3141 
3142 	/* Fill in the Fabric Channel Header */
3143 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3144 	hp->r_ctl = R_CTL_ELS_REQ;
3145 	hp->d_id = dest_id;
3146 	hp->s_id = sf->sf_al_pa;
3147 	hp->type = TYPE_EXTENDED_LS;
3148 	hp->reserved1 = 0;
3149 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3150 	hp->seq_id = 0;
3151 	hp->df_ctl  = 0;
3152 	hp->seq_cnt = 0;
3153 	hp->ox_id = 0xffff;
3154 	hp->rx_id = 0xffff;
3155 	hp->ro = 0;
3156 
3157 	*rprivp = (caddr_t)privp;
3158 	*cmd_buf = cmd;
3159 	return (fpkt);
3160 
3161 fail:
3162 	if (cmd_dma_handle != NULL) {
3163 		if (cmd_bound) {
3164 			(void) ddi_dma_unbind_handle(cmd_dma_handle);
3165 		}
3166 		ddi_dma_free_handle(&cmd_dma_handle);
3167 		privp->cmd_dma_handle = NULL;
3168 	}
3169 	if (rsp_dma_handle != NULL) {
3170 		if (rsp_bound) {
3171 			(void) ddi_dma_unbind_handle(rsp_dma_handle);
3172 		}
3173 		ddi_dma_free_handle(&rsp_dma_handle);
3174 		privp->rsp_dma_handle = NULL;
3175 	}
3176 	sf_els_free(fpkt);
3177 	return (NULL);
3178 }
3179 
3180 
3181 static void
3182 sf_els_free(struct fcal_packet *fpkt)
3183 {
3184 	struct	sf_els_hdr	*privp = fpkt->fcal_pkt_private;
3185 
3186 	if (privp != NULL) {
3187 		if (privp->cmd_dma_handle != NULL) {
3188 			(void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3189 			ddi_dma_free_handle(&privp->cmd_dma_handle);
3190 		}
3191 		if (privp->cmd != NULL) {
3192 			ddi_dma_mem_free(&privp->cmd_acc_handle);
3193 		}
3194 
3195 		if (privp->rsp_dma_handle != NULL) {
3196 			(void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3197 			ddi_dma_free_handle(&privp->rsp_dma_handle);
3198 		}
3199 
3200 		if (privp->rsp != NULL) {
3201 			ddi_dma_mem_free(&privp->rsp_acc_handle);
3202 		}
3203 		if (privp->data_dma_handle) {
3204 			(void) ddi_dma_unbind_handle(privp->data_dma_handle);
3205 			ddi_dma_free_handle(&privp->data_dma_handle);
3206 		}
3207 		if (privp->data_buf) {
3208 			ddi_dma_mem_free(&privp->data_acc_handle);
3209 		}
3210 		kmem_free(privp, privp->size);
3211 	}
3212 	kmem_free(fpkt, sizeof (struct fcal_packet));
3213 }
3214 
3215 
3216 static struct sf_target *
3217 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3218 {
3219 	struct sf_target *target, *ntarget, *otarget, *ptarget;
3220 	int hash;
3221 #ifdef RAID_LUNS
3222 	int64_t orig_lun = lun;
3223 
3224 	/* XXXX Work around SCSA limitations. */
3225 	lun = *((short *)&lun);
3226 #endif
3227 	ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3228 	mutex_enter(&sf->sf_mutex);
3229 	if (sf->sf_lip_cnt != privp->lip_cnt) {
3230 		mutex_exit(&sf->sf_mutex);
3231 		if (ntarget != NULL)
3232 			kmem_free(ntarget, sizeof (struct sf_target));
3233 		return (NULL);
3234 	}
3235 
3236 	target = sf_lookup_target(sf, privp->port_wwn, lun);
3237 	if (lun != 0) {
3238 		/*
3239 		 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3240 		 * and enqueue the new LUN.
3241 		 */
3242 		if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3243 			(int64_t)0)) ==	NULL) {
3244 			/*
3245 			 * Yeep -- no LUN 0?
3246 			 */
3247 			mutex_exit(&sf->sf_mutex);
3248 			sf_log(sf, CE_WARN, "target 0x%x "
3249 				"lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3250 			if (ntarget != NULL)
3251 				kmem_free(ntarget, sizeof (struct sf_target));
3252 			return (NULL);
3253 		}
3254 		mutex_enter(&ptarget->sft_mutex);
3255 		if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3256 			ptarget->sft_state&SF_TARGET_OFFLINE) {
3257 			/* LUN 0 already finished, duplicate its state */
3258 			mutex_exit(&ptarget->sft_mutex);
3259 			sf_offline_target(sf, target);
3260 			mutex_exit(&sf->sf_mutex);
3261 			if (ntarget != NULL)
3262 				kmem_free(ntarget, sizeof (struct sf_target));
3263 			return (target);
3264 		} else if (target != NULL) {
3265 			/*
3266 			 * LUN 0 online or not examined yet.
3267 			 * Try to bring the LUN back online
3268 			 */
3269 			mutex_exit(&ptarget->sft_mutex);
3270 			mutex_enter(&target->sft_mutex);
3271 			target->sft_lip_cnt = privp->lip_cnt;
3272 			target->sft_state |= SF_TARGET_BUSY;
3273 			target->sft_state &= ~(SF_TARGET_OFFLINE|
3274 				SF_TARGET_MARK);
3275 			target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3276 			target->sft_hard_address = sf_switch_to_alpa[tnum];
3277 			mutex_exit(&target->sft_mutex);
3278 			mutex_exit(&sf->sf_mutex);
3279 			if (ntarget != NULL)
3280 				kmem_free(ntarget, sizeof (struct sf_target));
3281 			return (target);
3282 		}
3283 		mutex_exit(&ptarget->sft_mutex);
3284 		if (ntarget == NULL) {
3285 			mutex_exit(&sf->sf_mutex);
3286 			return (NULL);
3287 		}
3288 		/* Initialize new target structure */
3289 		bcopy((caddr_t)&privp->node_wwn,
3290 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3291 		bcopy((caddr_t)&privp->port_wwn,
3292 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3293 		ntarget->sft_lun.l = lun;
3294 #ifdef RAID_LUNS
3295 		ntarget->sft_lun.l = orig_lun;
3296 		ntarget->sft_raid_lun = (uint_t)lun;
3297 #endif
3298 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3299 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3300 		/* Don't let anyone use this till we finishup init. */
3301 		mutex_enter(&ntarget->sft_mutex);
3302 		mutex_enter(&ntarget->sft_pkt_mutex);
3303 
3304 		hash = SF_HASH(privp->port_wwn, lun);
3305 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3306 		sf->sf_wwn_lists[hash] = ntarget;
3307 
3308 		ntarget->sft_lip_cnt = privp->lip_cnt;
3309 		ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3310 		ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3311 		ntarget->sft_device_type = DTYPE_UNKNOWN;
3312 		ntarget->sft_state = SF_TARGET_BUSY;
3313 		ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3314 		    sft_pkt_head;
3315 		ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3316 		    sft_pkt_head;
3317 
3318 		mutex_enter(&ptarget->sft_mutex);
3319 		/* Traverse the list looking for this target */
3320 		for (target = ptarget; target->sft_next_lun;
3321 			target = target->sft_next_lun) {
3322 			otarget = target->sft_next_lun;
3323 		}
3324 		ntarget->sft_next_lun = target->sft_next_lun;
3325 		target->sft_next_lun = ntarget;
3326 		mutex_exit(&ptarget->sft_mutex);
3327 		mutex_exit(&ntarget->sft_pkt_mutex);
3328 		mutex_exit(&ntarget->sft_mutex);
3329 		mutex_exit(&sf->sf_mutex);
3330 		return (ntarget);
3331 
3332 	}
3333 	if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3334 		/* It's been touched this LIP -- duplicate WWNs */
3335 		sf_offline_target(sf, target); /* And all the baby targets */
3336 		mutex_exit(&sf->sf_mutex);
3337 		sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3338 		    tnum);
3339 		if (ntarget != NULL) {
3340 			kmem_free(ntarget, sizeof (struct sf_target));
3341 		}
3342 		return (NULL);
3343 	}
3344 
3345 	if ((otarget = sf->sf_targets[tnum]) != NULL) {
3346 		/* Someone else is in our slot */
3347 		mutex_enter(&otarget->sft_mutex);
3348 		if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3349 			mutex_exit(&otarget->sft_mutex);
3350 			sf_offline_target(sf, otarget);
3351 			if (target != NULL)
3352 				sf_offline_target(sf, target);
3353 			mutex_exit(&sf->sf_mutex);
3354 			sf_log(sf, CE_WARN,
3355 			    "target 0x%x, duplicate switch settings\n", tnum);
3356 			if (ntarget != NULL)
3357 				kmem_free(ntarget, sizeof (struct sf_target));
3358 			return (NULL);
3359 		}
3360 		mutex_exit(&otarget->sft_mutex);
3361 		if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3362 		    sft_port_wwn, sizeof (privp->port_wwn))) {
3363 			sf_offline_target(sf, otarget);
3364 			mutex_exit(&sf->sf_mutex);
3365 			sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3366 					tnum);
3367 			bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3368 			    sizeof (struct sf_target_stats));
3369 			mutex_enter(&sf->sf_mutex);
3370 		}
3371 	}
3372 
3373 	sf->sf_targets[tnum] = target;
3374 	if ((target = sf->sf_targets[tnum]) == NULL) {
3375 		if (ntarget == NULL) {
3376 			mutex_exit(&sf->sf_mutex);
3377 			return (NULL);
3378 		}
3379 		bcopy((caddr_t)&privp->node_wwn,
3380 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3381 		bcopy((caddr_t)&privp->port_wwn,
3382 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3383 		ntarget->sft_lun.l = lun;
3384 #ifdef RAID_LUNS
3385 		ntarget->sft_lun.l = orig_lun;
3386 		ntarget->sft_raid_lun = (uint_t)lun;
3387 #endif
3388 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3389 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3390 		mutex_enter(&ntarget->sft_mutex);
3391 		mutex_enter(&ntarget->sft_pkt_mutex);
3392 		hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3393 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3394 		sf->sf_wwn_lists[hash] = ntarget;
3395 
3396 		target = ntarget;
3397 		target->sft_lip_cnt = privp->lip_cnt;
3398 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3399 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3400 		target->sft_device_type = DTYPE_UNKNOWN;
3401 		target->sft_state = SF_TARGET_BUSY;
3402 		target->sft_pkt_head = (struct sf_pkt *)&target->
3403 		    sft_pkt_head;
3404 		target->sft_pkt_tail = (struct sf_pkt *)&target->
3405 		    sft_pkt_head;
3406 		sf->sf_targets[tnum] = target;
3407 		mutex_exit(&ntarget->sft_mutex);
3408 		mutex_exit(&ntarget->sft_pkt_mutex);
3409 		mutex_exit(&sf->sf_mutex);
3410 	} else {
3411 		mutex_enter(&target->sft_mutex);
3412 		target->sft_lip_cnt = privp->lip_cnt;
3413 		target->sft_state |= SF_TARGET_BUSY;
3414 		target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3415 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3416 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3417 		mutex_exit(&target->sft_mutex);
3418 		mutex_exit(&sf->sf_mutex);
3419 		if (ntarget != NULL)
3420 			kmem_free(ntarget, sizeof (struct sf_target));
3421 	}
3422 	return (target);
3423 }
3424 
3425 
3426 /*
3427  * find the target for a given sf instance
3428  */
3429 /* ARGSUSED */
3430 static struct sf_target *
3431 #ifdef RAID_LUNS
3432 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3433 #else
3434 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3435 #endif
3436 {
3437 	int hash;
3438 	struct sf_target *target;
3439 
3440 	ASSERT(mutex_owned(&sf->sf_mutex));
3441 	hash = SF_HASH(wwn, lun);
3442 
3443 	target = sf->sf_wwn_lists[hash];
3444 	while (target != NULL) {
3445 
3446 #ifndef	RAID_LUNS
3447 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3448 		    sizeof (target->sft_port_wwn)) == 0 &&
3449 			target->sft_lun.l == lun)
3450 			break;
3451 #else
3452 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3453 		    sizeof (target->sft_port_wwn)) == 0 &&
3454 			target->sft_raid_lun == lun)
3455 			break;
3456 #endif
3457 		target = target->sft_next;
3458 	}
3459 
3460 	return (target);
3461 }
3462 
3463 
3464 /*
3465  * Send out a REPORT_LUNS command.
3466  */
3467 static int
3468 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3469     struct sf_target *target)
3470 {
3471 	struct	fcal_packet	*fpkt = privp->fpkt;
3472 	ddi_dma_cookie_t	pcookie;
3473 	ddi_dma_handle_t	lun_dma_handle = NULL;
3474 	ddi_acc_handle_t	lun_acc_handle;
3475 	uint_t			ccount;
3476 	size_t			real_size;
3477 	caddr_t			lun_buf = NULL;
3478 	int			handle_bound = 0;
3479 	fc_frame_header_t	*hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3480 	struct fcp_cmd		*reportlun = (struct fcp_cmd *)privp->cmd;
3481 	char			*msg = "Transport";
3482 
3483 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3484 	    DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3485 		msg = "ddi_dma_alloc_handle()";
3486 		goto fail;
3487 	}
3488 
3489 	if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3490 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3491 	    DDI_DMA_DONTWAIT, NULL, &lun_buf,
3492 	    &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3493 		msg = "ddi_dma_mem_alloc()";
3494 		goto fail;
3495 	}
3496 
3497 	if (real_size < REPORT_LUNS_SIZE) {
3498 		msg = "DMA mem < REPORT_LUNS_SIZE";
3499 		goto fail;
3500 	}
3501 
3502 	if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3503 	    lun_buf, real_size, DDI_DMA_READ |
3504 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3505 	    NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3506 		msg = "ddi_dma_addr_bind_handle()";
3507 		goto fail;
3508 	}
3509 	handle_bound = 1;
3510 
3511 	if (ccount != 1) {
3512 		msg = "ccount != 1";
3513 		goto fail;
3514 	}
3515 	privp->els_code = 0;
3516 	privp->target = target;
3517 	privp->data_dma_handle = lun_dma_handle;
3518 	privp->data_acc_handle = lun_acc_handle;
3519 	privp->data_buf = lun_buf;
3520 
3521 	fpkt->fcal_pkt_comp = sf_reportlun_callback;
3522 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3523 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3524 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3525 	    sizeof (struct fcp_cmd);
3526 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3527 	    (uint32_t)pcookie.dmac_address;
3528 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3529 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3530 	hp->r_ctl = R_CTL_COMMAND;
3531 	hp->type = TYPE_SCSI_FCP;
3532 	bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3533 	((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3534 	/* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3535 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3536 		(real_size&0x0ff);
3537 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3538 		(real_size>>8)&0x0ff;
3539 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3540 		(real_size>>16)&0x0ff;
3541 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3542 		(real_size>>24)&0x0ff;
3543 	reportlun->fcp_cntl.cntl_read_data = 1;
3544 	reportlun->fcp_cntl.cntl_write_data = 0;
3545 	reportlun->fcp_data_len = pcookie.dmac_size;
3546 	reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3547 
3548 	(void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3549 	/* We know he's there, so this should be fast */
3550 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3551 	if (sf_els_transport(sf, privp) == 1)
3552 		return (1);
3553 
3554 fail:
3555 	sf_log(sf, CE_WARN,
3556 		"%s failure for REPORTLUN to target 0x%x\n",
3557 		msg, sf_alpa_to_switch[privp->dest_nport_id]);
3558 	sf_els_free(fpkt);
3559 	if (lun_dma_handle != NULL) {
3560 		if (handle_bound)
3561 			(void) ddi_dma_unbind_handle(lun_dma_handle);
3562 		ddi_dma_free_handle(&lun_dma_handle);
3563 	}
3564 	if (lun_buf != NULL) {
3565 		ddi_dma_mem_free(&lun_acc_handle);
3566 	}
3567 	return (0);
3568 }
3569 
3570 /*
3571  * Handle the results of a REPORT_LUNS command:
3572  *	Create additional targets if necessary
3573  *	Initiate INQUIRYs on all LUNs.
3574  */
3575 static void
3576 sf_reportlun_callback(struct fcal_packet *fpkt)
3577 {
3578 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3579 	    fcal_pkt_private;
3580 	struct scsi_report_luns *ptr =
3581 		(struct scsi_report_luns *)privp->data_buf;
3582 	struct sf *sf = privp->sf;
3583 	struct sf_target *target = privp->target;
3584 	struct fcp_rsp *rsp = NULL;
3585 	int delayed_retry = 0;
3586 	int tid = sf_alpa_to_switch[target->sft_hard_address];
3587 	int i, free_pkt = 1;
3588 	short	ncmds;
3589 
3590 	mutex_enter(&sf->sf_mutex);
3591 	/* use as temporary state variable */
3592 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3593 		mutex_exit(&sf->sf_mutex);
3594 		return;
3595 	}
3596 	if (privp->prev)
3597 		privp->prev->next = privp->next;
3598 	if (privp->next)
3599 		privp->next->prev = privp->prev;
3600 	if (sf->sf_els_list == privp)
3601 		sf->sf_els_list = privp->next;
3602 	privp->prev = privp->next = NULL;
3603 	mutex_exit(&sf->sf_mutex);
3604 	ncmds = fpkt->fcal_ncmds;
3605 	ASSERT(ncmds >= 0);
3606 	mutex_enter(&sf->sf_cmd_mutex);
3607 	sf->sf_ncmds = ncmds;
3608 	mutex_exit(&sf->sf_cmd_mutex);
3609 
3610 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3611 		(void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3612 			0, DDI_DMA_SYNC_FORKERNEL);
3613 
3614 		rsp = (struct fcp_rsp *)privp->rsp;
3615 	}
3616 	SF_DEBUG(1, (sf, CE_CONT,
3617 		"!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3618 		privp->dest_nport_id,
3619 		fpkt->fcal_pkt_status,
3620 		rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3621 
3622 		/* See if target simply does not support REPORT_LUNS. */
3623 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3624 		rsp->fcp_u.fcp_status.sense_len_set &&
3625 		rsp->fcp_sense_len >=
3626 		offsetof(struct scsi_extended_sense, es_qual_code)) {
3627 			struct scsi_extended_sense *sense;
3628 			sense = (struct scsi_extended_sense *)
3629 			((caddr_t)rsp + sizeof (struct fcp_rsp)
3630 				+ rsp->fcp_response_len);
3631 			if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3632 				if (sense->es_add_code == 0x20) {
3633 					/* Fake LUN 0 */
3634 				SF_DEBUG(1, (sf, CE_CONT,
3635 					"!REPORTLUN Faking good "
3636 					"completion for alpa %x\n",
3637 					privp->dest_nport_id));
3638 					ptr->lun_list_len = FCP_LUN_SIZE;
3639 					ptr->lun[0] = 0;
3640 					rsp->fcp_u.fcp_status.scsi_status =
3641 						STATUS_GOOD;
3642 				} else if (sense->es_add_code == 0x25) {
3643 					SF_DEBUG(1, (sf, CE_CONT,
3644 					    "!REPORTLUN device alpa %x "
3645 					    "key %x code %x\n",
3646 					    privp->dest_nport_id,
3647 					    sense->es_key, sense->es_add_code));
3648 					    goto fail;
3649 				}
3650 			} else if (sense->es_key ==
3651 				KEY_UNIT_ATTENTION &&
3652 				sense->es_add_code == 0x29) {
3653 				SF_DEBUG(1, (sf, CE_CONT,
3654 					"!REPORTLUN device alpa %x was reset\n",
3655 					privp->dest_nport_id));
3656 			} else {
3657 				SF_DEBUG(1, (sf, CE_CONT,
3658 					"!REPORTLUN device alpa %x "
3659 					"key %x code %x\n",
3660 					privp->dest_nport_id,
3661 					sense->es_key, sense->es_add_code));
3662 /* XXXXXX The following is here to handle broken targets -- remove it later */
3663 				if (sf_reportlun_forever &&
3664 					sense->es_key == KEY_UNIT_ATTENTION)
3665 					goto retry;
3666 /* XXXXXX */
3667 				if (sense->es_key == KEY_NOT_READY)
3668 					delayed_retry = 1;
3669 				}
3670 		}
3671 
3672 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3673 		struct fcp_rsp_info *bep;
3674 
3675 		bep = (struct fcp_rsp_info *)(&rsp->
3676 			fcp_response_len + 1);
3677 		if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3678 			bep->rsp_code == FCP_NO_FAILURE) {
3679 			(void) ddi_dma_sync(privp->data_dma_handle,
3680 				0, 0, DDI_DMA_SYNC_FORKERNEL);
3681 
3682 			/* Convert from #bytes to #ints */
3683 			ptr->lun_list_len = ptr->lun_list_len >> 3;
3684 			SF_DEBUG(2, (sf, CE_CONT,
3685 				"!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3686 				privp->dest_nport_id, ptr->lun_list_len));
3687 			if (!ptr->lun_list_len) {
3688 				/* No LUNs? Ya gotta be kidding... */
3689 				sf_log(sf, CE_WARN,
3690 					"SCSI violation -- "
3691 					"target 0x%x reports no LUNs\n",
3692 					sf_alpa_to_switch[
3693 					privp->dest_nport_id]);
3694 				ptr->lun_list_len = 1;
3695 				ptr->lun[0] = 0;
3696 			}
3697 
3698 			mutex_enter(&sf->sf_mutex);
3699 			if (sf->sf_lip_cnt == privp->lip_cnt) {
3700 				sf->sf_device_count += ptr->lun_list_len - 1;
3701 			}
3702 
3703 			mutex_exit(&sf->sf_mutex);
3704 			for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3705 				sf->sf_lip_cnt; i++) {
3706 				struct sf_els_hdr *nprivp;
3707 				struct fcal_packet *nfpkt;
3708 
3709 				/* LUN 0 is already in `target' */
3710 				if (ptr->lun[i] != 0) {
3711 					target = sf_create_target(sf,
3712 						privp, tid, ptr->lun[i]);
3713 				}
3714 				nprivp = NULL;
3715 				nfpkt = NULL;
3716 				if (target) {
3717 					nfpkt = sf_els_alloc(sf,
3718 						target->sft_al_pa,
3719 						sizeof (struct sf_els_hdr),
3720 						sizeof (union sf_els_cmd),
3721 						sizeof (union sf_els_rsp),
3722 						(caddr_t *)&nprivp,
3723 						(caddr_t *)&rsp);
3724 					if (nprivp)
3725 						nprivp->lip_cnt =
3726 							privp->lip_cnt;
3727 				}
3728 				if (nfpkt && nprivp &&
3729 					(sf_do_inquiry(sf, nprivp, target) ==
3730 						0)) {
3731 					mutex_enter(&sf->sf_mutex);
3732 					if (sf->sf_lip_cnt == privp->
3733 						lip_cnt) {
3734 						sf->sf_device_count --;
3735 					}
3736 					sf_offline_target(sf, target);
3737 					mutex_exit(&sf->sf_mutex);
3738 				}
3739 			}
3740 			sf_els_free(fpkt);
3741 			return;
3742 		} else {
3743 			SF_DEBUG(1, (sf, CE_CONT,
3744 				"!REPORTLUN al_pa %x fcp failure, "
3745 				"fcp_rsp_code %x scsi status %x\n",
3746 				privp->dest_nport_id, bep->rsp_code,
3747 				rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3748 			goto fail;
3749 		}
3750 	}
3751 	if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3752 		(rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3753 		delayed_retry = 1;
3754 	}
3755 
3756 	if (++(privp->retries) < sf_els_retries ||
3757 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3758 /* XXXXXX The following is here to handle broken targets -- remove it later */
3759 retry:
3760 /* XXXXXX */
3761 		if (delayed_retry) {
3762 			privp->retries--;
3763 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3764 			privp->delayed_retry = 1;
3765 		} else {
3766 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3767 		}
3768 
3769 		privp->prev = NULL;
3770 		mutex_enter(&sf->sf_mutex);
3771 		if (privp->lip_cnt == sf->sf_lip_cnt) {
3772 			if (!delayed_retry)
3773 				SF_DEBUG(1, (sf, CE_WARN,
3774 				    "!REPORTLUN to al_pa %x failed, retrying\n",
3775 				    privp->dest_nport_id));
3776 			privp->next = sf->sf_els_list;
3777 			if (sf->sf_els_list != NULL)
3778 				sf->sf_els_list->prev = privp;
3779 			sf->sf_els_list = privp;
3780 			mutex_exit(&sf->sf_mutex);
3781 			if (!delayed_retry && soc_transport(sf->sf_sochandle,
3782 			    fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3783 			    FCAL_TRANSPORT_SUCCESS) {
3784 				mutex_enter(&sf->sf_mutex);
3785 				if (privp->prev)
3786 					privp->prev->next = privp->next;
3787 				if (privp->next)
3788 					privp->next->prev = privp->prev;
3789 				if (sf->sf_els_list == privp)
3790 					sf->sf_els_list = privp->next;
3791 				mutex_exit(&sf->sf_mutex);
3792 				goto fail;
3793 			} else
3794 				return;
3795 		} else {
3796 			mutex_exit(&sf->sf_mutex);
3797 		}
3798 	} else {
3799 fail:
3800 
3801 		/* REPORT_LUN failed -- try inquiry */
3802 		if (sf_do_inquiry(sf, privp, target) != 0) {
3803 			return;
3804 		} else {
3805 			free_pkt = 0;
3806 		}
3807 		mutex_enter(&sf->sf_mutex);
3808 		if (sf->sf_lip_cnt == privp->lip_cnt) {
3809 		    sf_log(sf, CE_WARN, "!REPORTLUN to target 0x%x failed\n",
3810 				sf_alpa_to_switch[privp->dest_nport_id]);
3811 		    sf_offline_target(sf, target);
3812 		    sf->sf_device_count--;
3813 		    ASSERT(sf->sf_device_count >= 0);
3814 		    if (sf->sf_device_count == 0)
3815 			sf_finish_init(sf, privp->lip_cnt);
3816 		}
3817 		mutex_exit(&sf->sf_mutex);
3818 	}
3819 	if (free_pkt) {
3820 		sf_els_free(fpkt);
3821 	}
3822 }
3823 
3824 static int
3825 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3826     struct sf_target *target)
3827 {
3828 	struct	fcal_packet	*fpkt = privp->fpkt;
3829 	ddi_dma_cookie_t	pcookie;
3830 	ddi_dma_handle_t	inq_dma_handle = NULL;
3831 	ddi_acc_handle_t	inq_acc_handle;
3832 	uint_t			ccount;
3833 	size_t			real_size;
3834 	caddr_t			inq_buf = NULL;
3835 	int			handle_bound = FALSE;
3836 	fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3837 	struct fcp_cmd		*inq = (struct fcp_cmd *)privp->cmd;
3838 	char			*msg = "Transport";
3839 
3840 
3841 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3842 	    DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3843 		msg = "ddi_dma_alloc_handle()";
3844 		goto fail;
3845 	}
3846 
3847 	if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3848 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3849 	    DDI_DMA_DONTWAIT, NULL, &inq_buf,
3850 	    &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3851 		msg = "ddi_dma_mem_alloc()";
3852 		goto fail;
3853 	}
3854 
3855 	if (real_size < SUN_INQSIZE) {
3856 		msg = "DMA mem < inquiry size";
3857 		goto fail;
3858 	}
3859 
3860 	if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3861 	    inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3862 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3863 		msg = "ddi_dma_addr_bind_handle()";
3864 		goto fail;
3865 	}
3866 	handle_bound = TRUE;
3867 
3868 	if (ccount != 1) {
3869 		msg = "ccount != 1";
3870 		goto fail;
3871 	}
3872 	privp->els_code = 0;			/* not an ELS command */
3873 	privp->target = target;
3874 	privp->data_dma_handle = inq_dma_handle;
3875 	privp->data_acc_handle = inq_acc_handle;
3876 	privp->data_buf = inq_buf;
3877 	fpkt->fcal_pkt_comp = sf_inq_callback;
3878 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3879 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3880 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3881 	    sizeof (struct fcp_cmd);
3882 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3883 	    (uint32_t)pcookie.dmac_address;
3884 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3885 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3886 	hp->r_ctl = R_CTL_COMMAND;
3887 	hp->type = TYPE_SCSI_FCP;
3888 	bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3889 	((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3890 	((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3891 	bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3892 		FCP_LUN_SIZE);
3893 	inq->fcp_cntl.cntl_read_data = 1;
3894 	inq->fcp_cntl.cntl_write_data = 0;
3895 	inq->fcp_data_len = pcookie.dmac_size;
3896 	inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3897 
3898 	(void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3899 	    DDI_DMA_SYNC_FORDEV);
3900 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3901 	SF_DEBUG(5, (sf, CE_WARN,
3902 		"!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3903 		privp->dest_nport_id,
3904 		SCSA_LUN(target)));
3905 	return (sf_els_transport(sf, privp));
3906 
3907 fail:
3908 	sf_log(sf, CE_WARN,
3909 		"%s failure for INQUIRY to target 0x%x\n",
3910 		msg, sf_alpa_to_switch[privp->dest_nport_id]);
3911 	sf_els_free(fpkt);
3912 	if (inq_dma_handle != NULL) {
3913 		if (handle_bound) {
3914 			(void) ddi_dma_unbind_handle(inq_dma_handle);
3915 		}
3916 		ddi_dma_free_handle(&inq_dma_handle);
3917 	}
3918 	if (inq_buf != NULL) {
3919 		ddi_dma_mem_free(&inq_acc_handle);
3920 	}
3921 	return (FALSE);
3922 }
3923 
3924 
3925 /*
3926  * called as the pkt_comp routine for INQ packets
3927  */
3928 static void
3929 sf_inq_callback(struct fcal_packet *fpkt)
3930 {
3931 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3932 	    fcal_pkt_private;
3933 	struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3934 	struct sf *sf = privp->sf;
3935 	struct sf *tsf;
3936 	struct sf_target *target = privp->target;
3937 	struct fcp_rsp *rsp;
3938 	int delayed_retry = FALSE;
3939 	short	ncmds;
3940 
3941 
3942 	mutex_enter(&sf->sf_mutex);
3943 	/* use as temporary state variable */
3944 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3945 		mutex_exit(&sf->sf_mutex);
3946 		return;
3947 	}
3948 	if (privp->prev != NULL) {
3949 		privp->prev->next = privp->next;
3950 	}
3951 	if (privp->next != NULL) {
3952 		privp->next->prev = privp->prev;
3953 	}
3954 	if (sf->sf_els_list == privp) {
3955 		sf->sf_els_list = privp->next;
3956 	}
3957 	privp->prev = privp->next = NULL;
3958 	mutex_exit(&sf->sf_mutex);
3959 	ncmds = fpkt->fcal_ncmds;
3960 	ASSERT(ncmds >= 0);
3961 	mutex_enter(&sf->sf_cmd_mutex);
3962 	sf->sf_ncmds = ncmds;
3963 	mutex_exit(&sf->sf_cmd_mutex);
3964 
3965 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3966 
3967 		(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3968 		    (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3969 
3970 		rsp = (struct fcp_rsp *)privp->rsp;
3971 		SF_DEBUG(2, (sf, CE_CONT,
3972 		    "!INQUIRY to al_pa %x scsi status %x",
3973 		    privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3974 
3975 		if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3976 		    !rsp->fcp_u.fcp_status.resid_over &&
3977 		    (!rsp->fcp_u.fcp_status.resid_under ||
3978 			((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3979 			struct fcp_rsp_info *bep;
3980 
3981 			bep = (struct fcp_rsp_info *)(&rsp->
3982 			    fcp_response_len + 1);
3983 
3984 			if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3985 			    (bep->rsp_code == FCP_NO_FAILURE)) {
3986 
3987 				SF_DEBUG(2, (sf, CE_CONT,
3988 				    "!INQUIRY to al_pa %x lun %" PRIx64
3989 				    " succeeded\n",
3990 				    privp->dest_nport_id, SCSA_LUN(target)));
3991 
3992 				(void) ddi_dma_sync(privp->data_dma_handle,
3993 				    (off_t)0, (size_t)0,
3994 				    DDI_DMA_SYNC_FORKERNEL);
3995 
3996 				mutex_enter(&sf->sf_mutex);
3997 
3998 				if (sf->sf_lip_cnt == privp->lip_cnt) {
3999 					mutex_enter(&target->sft_mutex);
4000 					target->sft_device_type =
4001 					    prt->inq_dtype;
4002 					bcopy(prt, &target->sft_inq,
4003 					    sizeof (*prt));
4004 					mutex_exit(&target->sft_mutex);
4005 					sf->sf_device_count--;
4006 					ASSERT(sf->sf_device_count >= 0);
4007 					if (sf->sf_device_count == 0) {
4008 						sf_finish_init(sf,
4009 						    privp->lip_cnt);
4010 					}
4011 				}
4012 				mutex_exit(&sf->sf_mutex);
4013 				sf_els_free(fpkt);
4014 				return;
4015 			}
4016 		} else if ((rsp->fcp_u.fcp_status.scsi_status ==
4017 		    STATUS_BUSY) ||
4018 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4019 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4020 			delayed_retry = TRUE;
4021 		}
4022 	} else {
4023 		SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4024 			privp->dest_nport_id, fpkt->fcal_pkt_status));
4025 	}
4026 
4027 	if (++(privp->retries) < sf_els_retries ||
4028 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4029 		if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4030 			tsf = sf->sf_sibling;
4031 			if (tsf != NULL) {
4032 				mutex_enter(&tsf->sf_cmd_mutex);
4033 				tsf->sf_flag = 1;
4034 				tsf->sf_throttle = SF_DECR_DELTA;
4035 				mutex_exit(&tsf->sf_cmd_mutex);
4036 			}
4037 			delayed_retry = 1;
4038 		}
4039 		if (delayed_retry) {
4040 			privp->retries--;
4041 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4042 			privp->delayed_retry = TRUE;
4043 		} else {
4044 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4045 		}
4046 
4047 		privp->prev = NULL;
4048 		mutex_enter(&sf->sf_mutex);
4049 		if (privp->lip_cnt == sf->sf_lip_cnt) {
4050 			if (!delayed_retry) {
4051 				SF_DEBUG(1, (sf, CE_WARN,
4052 				    "INQUIRY to al_pa %x failed, retrying",
4053 				    privp->dest_nport_id));
4054 			}
4055 			privp->next = sf->sf_els_list;
4056 			if (sf->sf_els_list != NULL) {
4057 				sf->sf_els_list->prev = privp;
4058 			}
4059 			sf->sf_els_list = privp;
4060 			mutex_exit(&sf->sf_mutex);
4061 			/* if not delayed call transport to send a pkt */
4062 			if (!delayed_retry &&
4063 			    (soc_transport(sf->sf_sochandle, fpkt,
4064 				FCAL_NOSLEEP, CQ_REQUEST_1) !=
4065 				FCAL_TRANSPORT_SUCCESS)) {
4066 				mutex_enter(&sf->sf_mutex);
4067 				if (privp->prev != NULL) {
4068 					privp->prev->next = privp->next;
4069 				}
4070 				if (privp->next != NULL) {
4071 					privp->next->prev = privp->prev;
4072 				}
4073 				if (sf->sf_els_list == privp) {
4074 					sf->sf_els_list = privp->next;
4075 				}
4076 				mutex_exit(&sf->sf_mutex);
4077 				goto fail;
4078 			}
4079 			return;
4080 		}
4081 		mutex_exit(&sf->sf_mutex);
4082 	} else {
4083 fail:
4084 		mutex_enter(&sf->sf_mutex);
4085 		if (sf->sf_lip_cnt == privp->lip_cnt) {
4086 			sf_offline_target(sf, target);
4087 			sf_log(sf, CE_NOTE,
4088 			    "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4089 			    "Retry Count: %d\n",
4090 			    sf_alpa_to_switch[privp->dest_nport_id],
4091 			    SCSA_LUN(target),
4092 			    privp->retries);
4093 			sf->sf_device_count--;
4094 			ASSERT(sf->sf_device_count >= 0);
4095 			if (sf->sf_device_count == 0) {
4096 				sf_finish_init(sf, privp->lip_cnt);
4097 			}
4098 		}
4099 		mutex_exit(&sf->sf_mutex);
4100 	}
4101 	sf_els_free(fpkt);
4102 }
4103 
4104 
4105 static void
4106 sf_finish_init(struct sf *sf, int lip_cnt)
4107 {
4108 	int			i;		/* loop index */
4109 	int			cflag;
4110 	struct sf_target	*target;	/* current target */
4111 	dev_info_t		*dip;
4112 	struct sf_hp_elem	*elem;		/* hotplug element created */
4113 
4114 	SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4115 	ASSERT(mutex_owned(&sf->sf_mutex));
4116 
4117 	/* scan all hash queues */
4118 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4119 		target = sf->sf_wwn_lists[i];
4120 		while (target != NULL) {
4121 			mutex_enter(&target->sft_mutex);
4122 
4123 			/* see if target is not offline */
4124 			if ((target->sft_state & SF_TARGET_OFFLINE)) {
4125 				/*
4126 				 * target already offline
4127 				 */
4128 				mutex_exit(&target->sft_mutex);
4129 				goto next_entry;
4130 			}
4131 
4132 			/*
4133 			 * target is not already offline -- see if it has
4134 			 * already been marked as ready to go offline
4135 			 */
4136 			if (target->sft_state & SF_TARGET_MARK) {
4137 				/*
4138 				 * target already marked, so take it offline
4139 				 */
4140 				mutex_exit(&target->sft_mutex);
4141 				sf_offline_target(sf, target);
4142 				goto next_entry;
4143 			}
4144 
4145 			/* clear target busy flag */
4146 			target->sft_state &= ~SF_TARGET_BUSY;
4147 
4148 			/* is target init not yet done ?? */
4149 			cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4150 
4151 			/* get pointer to target dip */
4152 			dip = target->sft_dip;
4153 
4154 			mutex_exit(&target->sft_mutex);
4155 			mutex_exit(&sf->sf_mutex);
4156 
4157 			if (cflag && (dip == NULL)) {
4158 				/*
4159 				 * target init not yet done &&
4160 				 * devinfo not yet created
4161 				 */
4162 				sf_create_devinfo(sf, target, lip_cnt);
4163 				mutex_enter(&sf->sf_mutex);
4164 				goto next_entry;
4165 			}
4166 
4167 			/*
4168 			 * target init already done || devinfo already created
4169 			 */
4170 			ASSERT(dip != NULL);
4171 			if (!sf_create_props(dip, target, lip_cnt)) {
4172 				/* a problem creating properties */
4173 				mutex_enter(&sf->sf_mutex);
4174 				goto next_entry;
4175 			}
4176 
4177 			/* create a new element for the hotplug list */
4178 			if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4179 			    KM_NOSLEEP)) != NULL) {
4180 
4181 				/* fill in the new element */
4182 				elem->dip = dip;
4183 				elem->target = target;
4184 				elem->what = SF_ONLINE;
4185 
4186 				/* add the new element into the hotplug list */
4187 				mutex_enter(&sf->sf_hp_daemon_mutex);
4188 				if (sf->sf_hp_elem_tail != NULL) {
4189 					sf->sf_hp_elem_tail->next = elem;
4190 					sf->sf_hp_elem_tail = elem;
4191 				} else {
4192 					/* this is the first element in list */
4193 					sf->sf_hp_elem_head =
4194 					    sf->sf_hp_elem_tail =
4195 						elem;
4196 				}
4197 				cv_signal(&sf->sf_hp_daemon_cv);
4198 				mutex_exit(&sf->sf_hp_daemon_mutex);
4199 			} else {
4200 				/* could not allocate memory for element ?? */
4201 				(void) ndi_devi_online_async(dip, 0);
4202 			}
4203 
4204 			mutex_enter(&sf->sf_mutex);
4205 
4206 next_entry:
4207 			/* ensure no new LIPs have occurred */
4208 			if (sf->sf_lip_cnt != lip_cnt) {
4209 				return;
4210 			}
4211 			target = target->sft_next;
4212 		}
4213 
4214 		/* done scanning all targets in this queue */
4215 	}
4216 
4217 	/* done with all hash queues */
4218 
4219 	sf->sf_state = SF_STATE_ONLINE;
4220 	sf->sf_online_timer = 0;
4221 }
4222 
4223 
4224 /*
4225  * create devinfo node
4226  */
4227 static void
4228 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4229 {
4230 	dev_info_t		*cdip = NULL;
4231 	char			*nname = NULL;
4232 	char			**compatible = NULL;
4233 	int			ncompatible;
4234 	struct scsi_inquiry	*inq = &target->sft_inq;
4235 	char			*scsi_binding_set;
4236 
4237 	/* get the 'scsi-binding-set' property */
4238 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4239 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4240 	    &scsi_binding_set) != DDI_PROP_SUCCESS)
4241 		scsi_binding_set = NULL;
4242 
4243 	/* determine the node name and compatible */
4244 	scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4245 	    inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4246 	if (scsi_binding_set)
4247 		ddi_prop_free(scsi_binding_set);
4248 
4249 	/* if nodename can't be determined then print a message and skip it */
4250 	if (nname == NULL) {
4251 #ifndef	RAID_LUNS
4252 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4253 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4254 		    "    compatible: %s",
4255 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4256 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4257 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4258 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4259 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4260 		    target->sft_lun.l, *compatible);
4261 #else
4262 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4263 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4264 		    "    compatible: %s",
4265 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4266 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4267 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4268 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4269 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4270 		    target->sft_raid_lun, *compatible);
4271 #endif
4272 		goto fail;
4273 	}
4274 
4275 	/* allocate the node */
4276 	if (ndi_devi_alloc(sf->sf_dip, nname,
4277 	    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4278 		goto fail;
4279 	}
4280 
4281 	/* decorate the node with compatible */
4282 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4283 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4284 		goto fail;
4285 	}
4286 
4287 	/* add addressing properties to the node */
4288 	if (sf_create_props(cdip, target, lip_cnt) != 1) {
4289 		goto fail;
4290 	}
4291 
4292 	mutex_enter(&target->sft_mutex);
4293 	if (target->sft_dip != NULL) {
4294 		mutex_exit(&target->sft_mutex);
4295 		goto fail;
4296 	}
4297 	target->sft_dip = cdip;
4298 	mutex_exit(&target->sft_mutex);
4299 
4300 	if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4301 		goto fail;
4302 	}
4303 
4304 	scsi_hba_nodename_compatible_free(nname, compatible);
4305 	return;
4306 
4307 fail:
4308 	scsi_hba_nodename_compatible_free(nname, compatible);
4309 	if (cdip != NULL) {
4310 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4311 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4312 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4313 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4314 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4315 		if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4316 			sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4317 		} else {
4318 			mutex_enter(&target->sft_mutex);
4319 			if (cdip == target->sft_dip) {
4320 				target->sft_dip = NULL;
4321 			}
4322 			mutex_exit(&target->sft_mutex);
4323 		}
4324 	}
4325 }
4326 
4327 /*
4328  * create required properties, returning TRUE iff we succeed, else
4329  * returning FALSE
4330  */
4331 static int
4332 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4333 {
4334 	int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4335 
4336 
4337 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4338 	    cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4339 	    DDI_PROP_SUCCESS) {
4340 		return (FALSE);
4341 	}
4342 
4343 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4344 	    cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4345 	    DDI_PROP_SUCCESS) {
4346 		return (FALSE);
4347 	}
4348 
4349 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4350 	    cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4351 		return (FALSE);
4352 	}
4353 
4354 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4355 	    cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4356 		return (FALSE);
4357 	}
4358 
4359 #ifndef	RAID_LUNS
4360 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4361 	    cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4362 		return (0);
4363 	}
4364 #else
4365 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4366 	    cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4367 		return (0);
4368 	}
4369 #endif
4370 
4371 	return (TRUE);
4372 }
4373 
4374 
4375 /*
4376  * called by the transport to offline a target
4377  */
4378 /* ARGSUSED */
4379 static void
4380 sf_offline_target(struct sf *sf, struct sf_target *target)
4381 {
4382 	dev_info_t *dip;
4383 	struct sf_target *next_target = NULL;
4384 	struct sf_hp_elem	*elem;
4385 
4386 	ASSERT(mutex_owned(&sf->sf_mutex));
4387 
4388 	if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4389 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4390 		sf_core = 0;
4391 	}
4392 
4393 	while (target != NULL) {
4394 		sf_log(sf, CE_NOTE,
4395 			"!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4396 			sf_alpa_to_switch[target->sft_al_pa],
4397 			target->sft_al_pa, SCSA_LUN(target));
4398 		mutex_enter(&target->sft_mutex);
4399 		target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4400 		target->sft_state |= SF_TARGET_OFFLINE;
4401 		mutex_exit(&target->sft_mutex);
4402 		mutex_exit(&sf->sf_mutex);
4403 
4404 		/* XXXX if this is LUN 0, offline all other LUNs */
4405 		if (next_target || target->sft_lun.l == 0)
4406 			next_target = target->sft_next_lun;
4407 
4408 		/* abort all cmds for this target */
4409 		sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4410 
4411 		mutex_enter(&sf->sf_mutex);
4412 		mutex_enter(&target->sft_mutex);
4413 		if (target->sft_state & SF_TARGET_INIT_DONE) {
4414 			dip = target->sft_dip;
4415 			mutex_exit(&target->sft_mutex);
4416 			mutex_exit(&sf->sf_mutex);
4417 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4418 				TARGET_PROP);
4419 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4420 			    dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4421 			    NDI_EVENT_NOPASS);
4422 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
4423 				target->sft_dip, sf_remove_eid, NULL);
4424 
4425 			elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4426 				KM_NOSLEEP);
4427 			if (elem != NULL) {
4428 				elem->dip = dip;
4429 				elem->target = target;
4430 				elem->what = SF_OFFLINE;
4431 				mutex_enter(&sf->sf_hp_daemon_mutex);
4432 				if (sf->sf_hp_elem_tail != NULL) {
4433 					sf->sf_hp_elem_tail->next = elem;
4434 					sf->sf_hp_elem_tail = elem;
4435 				} else {
4436 					sf->sf_hp_elem_head =
4437 						sf->sf_hp_elem_tail =
4438 						elem;
4439 				}
4440 				cv_signal(&sf->sf_hp_daemon_cv);
4441 				mutex_exit(&sf->sf_hp_daemon_mutex);
4442 			} else {
4443 				/* don't do NDI_DEVI_REMOVE for now */
4444 				if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4445 					SF_DEBUG(1, (sf, CE_WARN,
4446 						"target %x lun %" PRIx64 ", "
4447 						"device offline failed",
4448 						sf_alpa_to_switch[target->
4449 							sft_al_pa],
4450 						SCSA_LUN(target)));
4451 				} else {
4452 					SF_DEBUG(1, (sf, CE_NOTE,
4453 						"target %x, lun %" PRIx64 ", "
4454 						"device offline succeeded\n",
4455 						sf_alpa_to_switch[target->
4456 							sft_al_pa],
4457 						SCSA_LUN(target)));
4458 				}
4459 			}
4460 			mutex_enter(&sf->sf_mutex);
4461 		} else {
4462 			mutex_exit(&target->sft_mutex);
4463 		}
4464 		target = next_target;
4465 	}
4466 }
4467 
4468 
4469 /*
4470  * routine to get/set a capability
4471  *
4472  * returning:
4473  *	1 (TRUE)	boolean capability is true (on get)
4474  *	0 (FALSE)	invalid capability, can't set capability (on set),
4475  *			or boolean capability is false (on get)
4476  *	-1 (UNDEFINED)	can't find capability (SCSA) or unsupported capability
4477  *	3		when getting SCSI version number
4478  *	AL_PA		when getting port initiator ID
4479  */
4480 static int
4481 sf_commoncap(struct scsi_address *ap, char *cap,
4482     int val, int tgtonly, int doset)
4483 {
4484 	struct sf *sf = ADDR2SF(ap);
4485 	int cidx;
4486 	int rval = FALSE;
4487 
4488 
4489 	if (cap == NULL) {
4490 		SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4491 		return (rval);
4492 	}
4493 
4494 	/* get index of capability string */
4495 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4496 		/* can't find capability */
4497 		return (UNDEFINED);
4498 	}
4499 
4500 	if (doset) {
4501 		/*
4502 		 * Process setcap request.
4503 		 */
4504 
4505 		/*
4506 		 * At present, we can only set binary (0/1) values
4507 		 */
4508 		switch (cidx) {
4509 		case SCSI_CAP_ARQ:	/* can't set this capability */
4510 			break;
4511 		default:
4512 			SF_DEBUG(3, (sf, CE_WARN,
4513 			    "sf_setcap: unsupported %d", cidx));
4514 			rval = UNDEFINED;
4515 			break;
4516 		}
4517 
4518 		SF_DEBUG(4, (sf, CE_NOTE,
4519 		    "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4520 		    ",doset=0x%x,rval=%d\n",
4521 		    cap, val, tgtonly, doset, rval));
4522 
4523 	} else {
4524 		/*
4525 		 * Process getcap request.
4526 		 */
4527 		switch (cidx) {
4528 		case SCSI_CAP_DMA_MAX:
4529 			break;		/* don't' have this capability */
4530 		case SCSI_CAP_INITIATOR_ID:
4531 			rval = sf->sf_al_pa;
4532 			break;
4533 		case SCSI_CAP_ARQ:
4534 			rval = TRUE;	/* do have this capability */
4535 			break;
4536 		case SCSI_CAP_RESET_NOTIFICATION:
4537 		case SCSI_CAP_TAGGED_QING:
4538 			rval = TRUE;	/* do have this capability */
4539 			break;
4540 		case SCSI_CAP_SCSI_VERSION:
4541 			rval = 3;
4542 			break;
4543 		case SCSI_CAP_INTERCONNECT_TYPE:
4544 			rval = INTERCONNECT_FIBRE;
4545 			break;
4546 		default:
4547 			SF_DEBUG(4, (sf, CE_WARN,
4548 			    "sf_scsi_getcap: unsupported"));
4549 			rval = UNDEFINED;
4550 			break;
4551 		}
4552 		SF_DEBUG(4, (sf, CE_NOTE,
4553 		    "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4554 		    "doset=0x%x,rval=%d\n",
4555 		    cap, val, tgtonly, doset, rval));
4556 	}
4557 
4558 	return (rval);
4559 }
4560 
4561 
4562 /*
4563  * called by the transport to get a capability
4564  */
4565 static int
4566 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4567 {
4568 	return (sf_commoncap(ap, cap, 0, whom, FALSE));
4569 }
4570 
4571 
4572 /*
4573  * called by the transport to set a capability
4574  */
4575 static int
4576 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4577 {
4578 	return (sf_commoncap(ap, cap, value, whom, TRUE));
4579 }
4580 
4581 
4582 /*
4583  * called by the transport to abort a target
4584  */
4585 static int
4586 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4587 {
4588 	struct sf *sf = ADDR2SF(ap);
4589 	struct sf_target *target = ADDR2TARGET(ap);
4590 	struct sf_pkt *cmd, *ncmd, *pcmd;
4591 	struct fcal_packet *fpkt;
4592 	int	rval = 0, t, my_rval = FALSE;
4593 	int	old_target_state;
4594 	int	lip_cnt;
4595 	int	tgt_id;
4596 	fc_frame_header_t	*hp;
4597 	int	deferred_destroy;
4598 
4599 	deferred_destroy = 0;
4600 
4601 	if (pkt != NULL) {
4602 		cmd = (struct sf_pkt *)((char *)pkt - sizeof (struct sf_pkt)
4603 		    - sizeof (struct fcal_packet));
4604 		fpkt = (struct fcal_packet *)((char *)cmd +
4605 		    sizeof (struct sf_pkt));
4606 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4607 		    (void *)fpkt));
4608 		pcmd = NULL;
4609 		mutex_enter(&sf->sf_cmd_mutex);
4610 		ncmd = sf->sf_pkt_head;
4611 		while (ncmd != NULL) {
4612 			if (ncmd == cmd) {
4613 				if (pcmd != NULL) {
4614 					pcmd->cmd_next = cmd->cmd_next;
4615 				} else {
4616 					sf->sf_pkt_head = cmd->cmd_next;
4617 				}
4618 				cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4619 				cmd->cmd_state = SF_STATE_IDLE;
4620 				pkt->pkt_reason = CMD_ABORTED;
4621 				pkt->pkt_statistics |= STAT_ABORTED;
4622 				my_rval = TRUE;
4623 				break;
4624 			} else {
4625 				pcmd = ncmd;
4626 				ncmd = ncmd->cmd_next;
4627 			}
4628 		}
4629 		mutex_exit(&sf->sf_cmd_mutex);
4630 		if (ncmd == NULL) {
4631 			mutex_enter(&cmd->cmd_abort_mutex);
4632 			if (cmd->cmd_state == SF_STATE_ISSUED) {
4633 				cmd->cmd_state = SF_STATE_ABORTING;
4634 				cmd->cmd_timeout = sf_watchdog_time + 20;
4635 				mutex_exit(&cmd->cmd_abort_mutex);
4636 				/* call transport to abort command */
4637 				if (((rval = soc_abort(sf->sf_sochandle,
4638 				    sf->sf_socp, sf->sf_sochandle->fcal_portno,
4639 				    fpkt, 1)) == FCAL_ABORTED) ||
4640 				    (rval == FCAL_ABORT_FAILED)) {
4641 					my_rval = TRUE;
4642 					pkt->pkt_reason = CMD_ABORTED;
4643 					pkt->pkt_statistics |= STAT_ABORTED;
4644 					cmd->cmd_state = SF_STATE_IDLE;
4645 				} else if (rval == FCAL_BAD_ABORT) {
4646 					cmd->cmd_timeout = sf_watchdog_time
4647 					    + 20;
4648 					my_rval = FALSE;
4649 				} else {
4650 					SF_DEBUG(1, (sf, CE_NOTE,
4651 					    "Command Abort failed\n"));
4652 				}
4653 			} else {
4654 				mutex_exit(&cmd->cmd_abort_mutex);
4655 			}
4656 		}
4657 	} else {
4658 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4659 		mutex_enter(&sf->sf_mutex);
4660 		lip_cnt = sf->sf_lip_cnt;
4661 		mutex_enter(&target->sft_mutex);
4662 		if (target->sft_state & (SF_TARGET_BUSY |
4663 		    SF_TARGET_OFFLINE)) {
4664 			mutex_exit(&target->sft_mutex);
4665 			return (rval);
4666 		}
4667 		old_target_state = target->sft_state;
4668 		target->sft_state |= SF_TARGET_BUSY;
4669 		mutex_exit(&target->sft_mutex);
4670 		mutex_exit(&sf->sf_mutex);
4671 
4672 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4673 		    0, 0, 0, NULL, 0)) != NULL) {
4674 
4675 			cmd = PKT2CMD(pkt);
4676 			cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4677 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4678 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4679 
4680 			/* prepare the packet for transport */
4681 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4682 
4683 				cmd->cmd_state = SF_STATE_ISSUED;
4684 				/*
4685 				 * call transport to send a pkt polled
4686 				 *
4687 				 * if that fails call the transport to abort it
4688 				 */
4689 				if (soc_transport_poll(sf->sf_sochandle,
4690 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4691 				    CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4692 					(void) ddi_dma_sync(
4693 					    cmd->cmd_cr_pool->rsp_dma_handle,
4694 					    (off_t)
4695 						((caddr_t)cmd->cmd_rsp_block -
4696 						cmd->cmd_cr_pool->rsp_base),
4697 					    FCP_MAX_RSP_IU_SIZE,
4698 					    DDI_DMA_SYNC_FORKERNEL);
4699 					if (((struct fcp_rsp_info *)
4700 					    (&cmd->cmd_rsp_block->
4701 					    fcp_response_len + 1))->
4702 					    rsp_code == FCP_NO_FAILURE) {
4703 						/* abort cmds for this targ */
4704 						sf_abort_all(sf, target, TRUE,
4705 						    lip_cnt, TRUE);
4706 					} else {
4707 						hp = &cmd->cmd_fp_pkt->
4708 						    fcal_socal_request.
4709 						    sr_fc_frame_hdr;
4710 						tgt_id = sf_alpa_to_switch[
4711 							(uchar_t)hp->d_id];
4712 						sf->sf_stats.tstats[tgt_id].
4713 						    task_mgmt_failures++;
4714 						SF_DEBUG(1, (sf, CE_NOTE,
4715 						    "Target %d Abort Task "
4716 						    "Set failed\n", hp->d_id));
4717 					}
4718 				} else {
4719 				    mutex_enter(&cmd->cmd_abort_mutex);
4720 				    if (cmd->cmd_state == SF_STATE_ISSUED) {
4721 					cmd->cmd_state = SF_STATE_ABORTING;
4722 					cmd->cmd_timeout = sf_watchdog_time
4723 							+ 20;
4724 					mutex_exit(&cmd->cmd_abort_mutex);
4725 					if ((t = soc_abort(sf->sf_sochandle,
4726 					    sf->sf_socp, sf->sf_sochandle->
4727 					    fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4728 					    FCAL_ABORTED &&
4729 					    (t != FCAL_ABORT_FAILED)) {
4730 						sf_log(sf, CE_NOTE,
4731 						    "sf_abort failed, "
4732 						    "initiating LIP\n");
4733 						sf_force_lip(sf);
4734 						deferred_destroy = 1;
4735 					}
4736 				    } else {
4737 					mutex_exit(&cmd->cmd_abort_mutex);
4738 				    }
4739 				}
4740 			}
4741 			if (!deferred_destroy) {
4742 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4743 				    sf_cmd_callback;
4744 				cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4745 				sf_scsi_destroy_pkt(ap, pkt);
4746 				my_rval = TRUE;
4747 			}
4748 		}
4749 		mutex_enter(&sf->sf_mutex);
4750 		if (lip_cnt == sf->sf_lip_cnt) {
4751 			mutex_enter(&target->sft_mutex);
4752 			target->sft_state = old_target_state;
4753 			mutex_exit(&target->sft_mutex);
4754 		}
4755 		mutex_exit(&sf->sf_mutex);
4756 	}
4757 	return (my_rval);
4758 }
4759 
4760 
4761 /*
4762  * called by the transport and internally to reset a target
4763  */
4764 static int
4765 sf_reset(struct scsi_address *ap, int level)
4766 {
4767 	struct scsi_pkt *pkt;
4768 	struct fcal_packet *fpkt;
4769 	struct sf *sf = ADDR2SF(ap);
4770 	struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4771 	struct sf_pkt *cmd;
4772 	int	rval = FALSE, t;
4773 	int	lip_cnt;
4774 	int	tgt_id, ret;
4775 	fc_frame_header_t	*hp;
4776 	int	deferred_destroy;
4777 
4778 	/* We don't support RESET_LUN yet. */
4779 	if (level == RESET_TARGET) {
4780 		struct sf_reset_list *p;
4781 
4782 		if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4783 		    == NULL)
4784 			return (rval);
4785 
4786 		SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4787 		mutex_enter(&sf->sf_mutex);
4788 		/* All target resets go to LUN 0 */
4789 		if (target->sft_lun.l) {
4790 			target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4791 		}
4792 		mutex_enter(&target->sft_mutex);
4793 		if (target->sft_state & (SF_TARGET_BUSY |
4794 		    SF_TARGET_OFFLINE)) {
4795 			mutex_exit(&target->sft_mutex);
4796 			mutex_exit(&sf->sf_mutex);
4797 			kmem_free(p, sizeof (struct sf_reset_list));
4798 			return (rval);
4799 		}
4800 		lip_cnt = sf->sf_lip_cnt;
4801 		target->sft_state |= SF_TARGET_BUSY;
4802 		for (ntarget = target->sft_next_lun;
4803 			ntarget;
4804 			ntarget = ntarget->sft_next_lun) {
4805 			mutex_enter(&ntarget->sft_mutex);
4806 			/*
4807 			 * XXXX If we supported RESET_LUN we should check here
4808 			 * to see if any LUN were being reset and somehow fail
4809 			 * that operation.
4810 			 */
4811 			ntarget->sft_state |= SF_TARGET_BUSY;
4812 			mutex_exit(&ntarget->sft_mutex);
4813 		}
4814 		mutex_exit(&target->sft_mutex);
4815 		mutex_exit(&sf->sf_mutex);
4816 
4817 		deferred_destroy = 0;
4818 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4819 		    0, 0, 0, NULL, 0)) != NULL) {
4820 			cmd = PKT2CMD(pkt);
4821 			cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4822 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4823 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4824 
4825 			/* prepare the packet for transport */
4826 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4827 				/* call transport to send a pkt polled */
4828 				cmd->cmd_state = SF_STATE_ISSUED;
4829 				if ((ret = soc_transport_poll(sf->sf_sochandle,
4830 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4831 				    CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4832 					(void) ddi_dma_sync(cmd->cmd_cr_pool->
4833 					    rsp_dma_handle, (caddr_t)cmd->
4834 					    cmd_rsp_block - cmd->cmd_cr_pool->
4835 					    rsp_base, FCP_MAX_RSP_IU_SIZE,
4836 					    DDI_DMA_SYNC_FORKERNEL);
4837 					fpkt = cmd->cmd_fp_pkt;
4838 					if ((fpkt->fcal_pkt_status ==
4839 							FCAL_STATUS_OK) &&
4840 					    (((struct fcp_rsp_info *)
4841 					    (&cmd->cmd_rsp_block->
4842 					    fcp_response_len + 1))->
4843 					    rsp_code == FCP_NO_FAILURE)) {
4844 						sf_log(sf, CE_NOTE,
4845 						    "!sf%d: Target 0x%x Reset "
4846 						    "successful\n",
4847 						    ddi_get_instance(\
4848 							sf->sf_dip),
4849 						    sf_alpa_to_switch[
4850 							target->sft_al_pa]);
4851 						rval = TRUE;
4852 					} else {
4853 						hp = &cmd->cmd_fp_pkt->
4854 						    fcal_socal_request.
4855 						    sr_fc_frame_hdr;
4856 						tgt_id = sf_alpa_to_switch[
4857 							(uchar_t)hp->d_id];
4858 						sf->sf_stats.tstats[tgt_id].
4859 						    task_mgmt_failures++;
4860 							sf_log(sf, CE_NOTE,
4861 							"!sf%d: Target 0x%x "
4862 							"Reset failed."
4863 							"Status code 0x%x "
4864 							"Resp code 0x%x\n",
4865 							ddi_get_instance(\
4866 								sf->sf_dip),
4867 						    tgt_id,
4868 						    fpkt->fcal_pkt_status,
4869 						    ((struct fcp_rsp_info *)
4870 						    (&cmd->cmd_rsp_block->
4871 						    fcp_response_len + 1))->
4872 						    rsp_code);
4873 					}
4874 				} else {
4875 				    sf_log(sf, CE_NOTE, "!sf%d: Target "
4876 						"0x%x Reset Failed. Ret=%x\n",
4877 						ddi_get_instance(sf->sf_dip),
4878 						sf_alpa_to_switch[
4879 						target->sft_al_pa], ret);
4880 				    mutex_enter(&cmd->cmd_abort_mutex);
4881 				    if (cmd->cmd_state == SF_STATE_ISSUED) {
4882 					/* call the transport to abort a cmd */
4883 					cmd->cmd_timeout = sf_watchdog_time
4884 									+ 20;
4885 					cmd->cmd_state = SF_STATE_ABORTING;
4886 					mutex_exit(&cmd->cmd_abort_mutex);
4887 					if (((t = soc_abort(sf->sf_sochandle,
4888 					    sf->sf_socp,
4889 					    sf->sf_sochandle->fcal_portno,
4890 					    cmd->cmd_fp_pkt, 1)) !=
4891 					    FCAL_ABORTED) &&
4892 					    (t != FCAL_ABORT_FAILED)) {
4893 						sf_log(sf, CE_NOTE,
4894 						    "!sf%d: Target 0x%x Reset "
4895 						    "failed. Abort Failed, "
4896 						    "forcing LIP\n",
4897 						    ddi_get_instance(
4898 							sf->sf_dip),
4899 						    sf_alpa_to_switch[
4900 							target->sft_al_pa]);
4901 						sf_force_lip(sf);
4902 						rval = TRUE;
4903 						deferred_destroy = 1;
4904 					}
4905 				    } else {
4906 					mutex_exit(&cmd->cmd_abort_mutex);
4907 				    }
4908 				}
4909 			}
4910 			/*
4911 			 * Defer releasing the packet if we abort returned with
4912 			 * a BAD_ABORT or timed out, because there is a
4913 			 * possibility that the ucode might return it.
4914 			 * We wait for at least 20s and let it be released
4915 			 * by the sf_watch thread
4916 			 */
4917 			if (!deferred_destroy) {
4918 				cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4919 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4920 				    sf_cmd_callback;
4921 				cmd->cmd_state = SF_STATE_IDLE;
4922 				/* for cache */
4923 				sf_scsi_destroy_pkt(ap, pkt);
4924 			}
4925 		} else {
4926 			cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4927 					"Resource allocation error.\n",
4928 					ddi_get_instance(sf->sf_dip),
4929 					sf_alpa_to_switch[target->sft_al_pa]);
4930 		}
4931 		mutex_enter(&sf->sf_mutex);
4932 		if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4933 			p->target = target;
4934 			p->lip_cnt = lip_cnt;
4935 			p->timeout = ddi_get_lbolt() +
4936 			    drv_usectohz(SF_TARGET_RESET_DELAY);
4937 			p->next = sf->sf_reset_list;
4938 			sf->sf_reset_list = p;
4939 			mutex_exit(&sf->sf_mutex);
4940 			mutex_enter(&sf_global_mutex);
4941 			if (sf_reset_timeout_id == 0) {
4942 				sf_reset_timeout_id = timeout(
4943 				    sf_check_reset_delay, NULL,
4944 				    drv_usectohz(SF_TARGET_RESET_DELAY));
4945 			}
4946 			mutex_exit(&sf_global_mutex);
4947 		} else {
4948 			if (lip_cnt == sf->sf_lip_cnt) {
4949 				mutex_enter(&target->sft_mutex);
4950 				target->sft_state &= ~SF_TARGET_BUSY;
4951 				for (ntarget = target->sft_next_lun;
4952 					ntarget;
4953 					ntarget = ntarget->sft_next_lun) {
4954 					mutex_enter(&ntarget->sft_mutex);
4955 					ntarget->sft_state &= ~SF_TARGET_BUSY;
4956 					mutex_exit(&ntarget->sft_mutex);
4957 				}
4958 				mutex_exit(&target->sft_mutex);
4959 			}
4960 			mutex_exit(&sf->sf_mutex);
4961 			kmem_free(p, sizeof (struct sf_reset_list));
4962 		}
4963 	} else {
4964 		mutex_enter(&sf->sf_mutex);
4965 		if ((sf->sf_state == SF_STATE_OFFLINE) &&
4966 		    (sf_watchdog_time < sf->sf_timer)) {
4967 			/*
4968 			 * We are currently in a lip, so let this one
4969 			 * finish before forcing another one.
4970 			 */
4971 			mutex_exit(&sf->sf_mutex);
4972 			return (TRUE);
4973 		}
4974 		mutex_exit(&sf->sf_mutex);
4975 		sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4976 		sf_force_lip(sf);
4977 		rval = TRUE;
4978 	}
4979 	return (rval);
4980 }
4981 
4982 
4983 /*
4984  * abort all commands for a target
4985  *
4986  * if try_abort is set then send an abort
4987  * if abort is set then this is abort, else this is a reset
4988  */
4989 static void
4990 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4991     lip_cnt, int try_abort)
4992 {
4993 	struct sf_target *ntarget;
4994 	struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4995 	struct fcal_packet *fpkt;
4996 	struct scsi_pkt *pkt;
4997 	int rval = FCAL_ABORTED;
4998 
4999 	/*
5000 	 * First pull all commands for all LUNs on this target out of the
5001 	 * overflow list.  We can tell it's the same target by comparing
5002 	 * the node WWN.
5003 	 */
5004 	mutex_enter(&sf->sf_mutex);
5005 	if (lip_cnt == sf->sf_lip_cnt) {
5006 		mutex_enter(&sf->sf_cmd_mutex);
5007 		cmd = sf->sf_pkt_head;
5008 		while (cmd != NULL) {
5009 			ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5010 				pkt_address);
5011 			if (ntarget == target) {
5012 				if (pcmd != NULL)
5013 					pcmd->cmd_next = cmd->cmd_next;
5014 				else
5015 					sf->sf_pkt_head = cmd->cmd_next;
5016 				if (sf->sf_pkt_tail == cmd) {
5017 					sf->sf_pkt_tail = pcmd;
5018 					if (pcmd != NULL)
5019 						pcmd->cmd_next = NULL;
5020 				}
5021 				tcmd = cmd->cmd_next;
5022 				if (head == NULL) {
5023 					head = cmd;
5024 					tail = cmd;
5025 				} else {
5026 					tail->cmd_next = cmd;
5027 					tail = cmd;
5028 				}
5029 				cmd->cmd_next = NULL;
5030 				cmd = tcmd;
5031 			} else {
5032 				pcmd = cmd;
5033 				cmd = cmd->cmd_next;
5034 			}
5035 		}
5036 		mutex_exit(&sf->sf_cmd_mutex);
5037 	}
5038 	mutex_exit(&sf->sf_mutex);
5039 
5040 	/*
5041 	 * Now complete all the commands on our list.  In the process,
5042 	 * the completion routine may take the commands off the target
5043 	 * lists.
5044 	 */
5045 	cmd = head;
5046 	while (cmd != NULL) {
5047 		pkt = cmd->cmd_pkt;
5048 		if (abort) {
5049 			pkt->pkt_reason = CMD_ABORTED;
5050 			pkt->pkt_statistics |= STAT_ABORTED;
5051 		} else {
5052 			pkt->pkt_reason = CMD_RESET;
5053 			pkt->pkt_statistics |= STAT_DEV_RESET;
5054 		}
5055 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5056 		cmd->cmd_state = SF_STATE_IDLE;
5057 		cmd = cmd->cmd_next;
5058 		/*
5059 		 * call the packet completion routine only for
5060 		 * non-polled commands. Ignore the polled commands as
5061 		 * they timeout and will be handled differently
5062 		 */
5063 		if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5064 			(*pkt->pkt_comp)(pkt);
5065 
5066 	}
5067 
5068 	/*
5069 	 * Finally get all outstanding commands for each LUN, and abort them if
5070 	 * they've been issued, and call the completion routine.
5071 	 * For the case where sf_offline_target is called from sf_watch
5072 	 * due to a Offline Timeout, it is quite possible that the soc+
5073 	 * ucode is hosed and therefore  cannot return the commands.
5074 	 * Clear up all the issued commands as well.
5075 	 * Try_abort will be false only if sf_abort_all is coming from
5076 	 * sf_target_offline.
5077 	 */
5078 
5079 	if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5080 		mutex_enter(&target->sft_pkt_mutex);
5081 		cmd = tcmd = target->sft_pkt_head;
5082 		while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5083 			fpkt = cmd->cmd_fp_pkt;
5084 			pkt = cmd->cmd_pkt;
5085 			mutex_enter(&cmd->cmd_abort_mutex);
5086 			if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5087 				(fpkt->fcal_cmd_state &
5088 					FCAL_CMD_IN_TRANSPORT) &&
5089 				((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5090 					0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5091 				cmd->cmd_state = SF_STATE_ABORTING;
5092 				cmd->cmd_timeout = sf_watchdog_time +
5093 				    cmd->cmd_pkt->pkt_time + 20;
5094 				mutex_exit(&cmd->cmd_abort_mutex);
5095 				mutex_exit(&target->sft_pkt_mutex);
5096 				if (try_abort) {
5097 					/* call the transport to abort a pkt */
5098 					rval = soc_abort(sf->sf_sochandle,
5099 						sf->sf_socp,
5100 						sf->sf_sochandle->fcal_portno,
5101 						fpkt, 1);
5102 				}
5103 				if ((rval == FCAL_ABORTED) ||
5104 					(rval == FCAL_ABORT_FAILED)) {
5105 					if (abort) {
5106 						pkt->pkt_reason = CMD_ABORTED;
5107 						pkt->pkt_statistics |=
5108 							STAT_ABORTED;
5109 					} else {
5110 						pkt->pkt_reason = CMD_RESET;
5111 						pkt->pkt_statistics |=
5112 							STAT_DEV_RESET;
5113 					}
5114 					cmd->cmd_state = SF_STATE_IDLE;
5115 					if (pkt->pkt_comp)
5116 						(*pkt->pkt_comp)(pkt);
5117 				}
5118 				mutex_enter(&sf->sf_mutex);
5119 				if (lip_cnt != sf->sf_lip_cnt) {
5120 					mutex_exit(&sf->sf_mutex);
5121 					return;
5122 				}
5123 				mutex_exit(&sf->sf_mutex);
5124 				mutex_enter(&target->sft_pkt_mutex);
5125 				cmd = target->sft_pkt_head;
5126 			} else {
5127 				mutex_exit(&cmd->cmd_abort_mutex);
5128 				cmd = cmd->cmd_forw;
5129 			}
5130 		}
5131 		mutex_exit(&target->sft_pkt_mutex);
5132 	}
5133 }
5134 
5135 
5136 /*
5137  * called by the transport to start a packet
5138  */
5139 static int
5140 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5141 {
5142 	struct sf *sf = ADDR2SF(ap);
5143 	struct sf_target *target = ADDR2TARGET(ap);
5144 	struct sf_pkt *cmd = PKT2CMD(pkt);
5145 	int rval;
5146 
5147 
5148 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5149 
5150 	if (cmd->cmd_state == SF_STATE_ISSUED) {
5151 		cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5152 			(void *)cmd);
5153 	}
5154 
5155 	/* prepare the packet for transport */
5156 	if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5157 		return (rval);
5158 	}
5159 
5160 	if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5161 		if (target->sft_state & SF_TARGET_OFFLINE) {
5162 			return (TRAN_FATAL_ERROR);
5163 		}
5164 		if (pkt->pkt_flags & FLAG_NOINTR) {
5165 			return (TRAN_BUSY);
5166 		}
5167 		mutex_enter(&sf->sf_cmd_mutex);
5168 		sf->sf_use_lock = TRUE;
5169 		goto enque;
5170 	}
5171 
5172 
5173 	/* if no interrupts then do polled I/O */
5174 	if (pkt->pkt_flags & FLAG_NOINTR) {
5175 		return (sf_dopoll(sf, cmd));
5176 	}
5177 
5178 	/* regular interrupt-driven I/O */
5179 
5180 	if (!sf->sf_use_lock) {
5181 
5182 		/* locking no needed */
5183 
5184 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5185 			sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5186 		cmd->cmd_state = SF_STATE_ISSUED;
5187 
5188 		/* call the transport to send a pkt */
5189 		if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5190 		    FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5191 			cmd->cmd_state = SF_STATE_IDLE;
5192 			return (TRAN_BADPKT);
5193 		}
5194 		return (TRAN_ACCEPT);
5195 	}
5196 
5197 	/* regular I/O using locking */
5198 
5199 	mutex_enter(&sf->sf_cmd_mutex);
5200 	if ((sf->sf_ncmds >= sf->sf_throttle) ||
5201 	    (sf->sf_pkt_head != NULL)) {
5202 enque:
5203 		/*
5204 		 * either we're throttling back or there are already commands
5205 		 * on the queue, so enqueue this one for later
5206 		 */
5207 		cmd->cmd_flags |= CFLAG_IN_QUEUE;
5208 		if (sf->sf_pkt_head != NULL) {
5209 			/* add to the queue */
5210 			sf->sf_pkt_tail->cmd_next = cmd;
5211 			cmd->cmd_next = NULL;
5212 			sf->sf_pkt_tail = cmd;
5213 		} else {
5214 			/* this is the first entry in the queue */
5215 			sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5216 			cmd->cmd_next = NULL;
5217 		}
5218 		mutex_exit(&sf->sf_cmd_mutex);
5219 		return (TRAN_ACCEPT);
5220 	}
5221 
5222 	/*
5223 	 * start this packet now
5224 	 */
5225 
5226 	/* still have cmd mutex */
5227 	return (sf_start_internal(sf, cmd));
5228 }
5229 
5230 
5231 /*
5232  * internal routine to start a packet from the queue now
5233  *
5234  * enter with cmd mutex held and leave with it released
5235  */
5236 static int
5237 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5238 {
5239 	/* we have the cmd mutex */
5240 	sf->sf_ncmds++;
5241 	mutex_exit(&sf->sf_cmd_mutex);
5242 
5243 	ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5244 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5245 
5246 	cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5247 	    cmd->cmd_pkt->pkt_time : 0;
5248 	cmd->cmd_state = SF_STATE_ISSUED;
5249 
5250 	/* call transport to send the pkt */
5251 	if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5252 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5253 		cmd->cmd_state = SF_STATE_IDLE;
5254 		mutex_enter(&sf->sf_cmd_mutex);
5255 		sf->sf_ncmds--;
5256 		mutex_exit(&sf->sf_cmd_mutex);
5257 		return (TRAN_BADPKT);
5258 	}
5259 	return (TRAN_ACCEPT);
5260 }
5261 
5262 
5263 /*
5264  * prepare a packet for transport
5265  */
5266 static int
5267 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5268 {
5269 	struct fcp_cmd *fcmd = cmd->cmd_block;
5270 
5271 /* XXXX Need to set the LUN ? */
5272 	bcopy((caddr_t)&target->sft_lun.b,
5273 		(caddr_t)&fcmd->fcp_ent_addr,
5274 		FCP_LUN_SIZE);
5275 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5276 	cmd->cmd_pkt->pkt_state = 0;
5277 	cmd->cmd_pkt->pkt_statistics = 0;
5278 
5279 
5280 	if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5281 	    ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5282 		return (TRAN_BADPKT);
5283 	}
5284 
5285 	/* invalidate imp field(s) of rsp block */
5286 	cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5287 
5288 	/* set up amt of I/O to do */
5289 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
5290 		cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5291 		if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5292 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5293 			    DDI_DMA_SYNC_FORDEV);
5294 		}
5295 	} else {
5296 		cmd->cmd_pkt->pkt_resid = 0;
5297 	}
5298 
5299 	/* set up the Tagged Queuing type */
5300 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5301 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5302 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5303 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5304 	}
5305 
5306 	/*
5307 	 * Sync the cmd segment
5308 	 */
5309 	(void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5310 		(caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5311 		sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5312 
5313 	sf_fill_ids(sf, cmd, target);
5314 	return (TRAN_ACCEPT);
5315 }
5316 
5317 
5318 /*
5319  * fill in packet hdr source and destination IDs and hdr byte count
5320  */
5321 static void
5322 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5323 {
5324 	struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5325 	fc_frame_header_t	*hp;
5326 
5327 
5328 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5329 	hp->d_id = target->sft_al_pa;
5330 	hp->s_id = sf->sf_al_pa;
5331 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5332 		    cmd->cmd_dmacookie.dmac_size;
5333 }
5334 
5335 
5336 /*
5337  * do polled I/O using transport
5338  */
5339 static int
5340 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5341 {
5342 	int timeout;
5343 	int rval;
5344 
5345 
5346 	mutex_enter(&sf->sf_cmd_mutex);
5347 	sf->sf_ncmds++;
5348 	mutex_exit(&sf->sf_cmd_mutex);
5349 
5350 	timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5351 	    : SF_POLL_TIMEOUT;
5352 	cmd->cmd_timeout = 0;
5353 	cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5354 	cmd->cmd_state = SF_STATE_ISSUED;
5355 
5356 	/* call transport to send a pkt polled */
5357 	rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5358 		timeout*1000000, CQ_REQUEST_1);
5359 	mutex_enter(&cmd->cmd_abort_mutex);
5360 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5361 	if (rval != FCAL_TRANSPORT_SUCCESS) {
5362 		if (rval == FCAL_TRANSPORT_TIMEOUT) {
5363 			cmd->cmd_state = SF_STATE_ABORTING;
5364 			mutex_exit(&cmd->cmd_abort_mutex);
5365 			(void) sf_target_timeout(sf, cmd);
5366 		} else {
5367 			mutex_exit(&cmd->cmd_abort_mutex);
5368 		}
5369 		cmd->cmd_state = SF_STATE_IDLE;
5370 		cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5371 		mutex_enter(&sf->sf_cmd_mutex);
5372 		sf->sf_ncmds--;
5373 		mutex_exit(&sf->sf_cmd_mutex);
5374 		return (TRAN_BADPKT);
5375 	}
5376 	mutex_exit(&cmd->cmd_abort_mutex);
5377 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5378 	sf_cmd_callback(cmd->cmd_fp_pkt);
5379 	return (TRAN_ACCEPT);
5380 }
5381 
5382 
5383 /* a shortcut for defining debug messages below */
5384 #ifdef	DEBUG
5385 #define	SF_DMSG1(s)		msg1 = s
5386 #else
5387 #define	SF_DMSG1(s)		/* do nothing */
5388 #endif
5389 
5390 
5391 /*
5392  * the pkt_comp callback for command packets
5393  */
5394 static void
5395 sf_cmd_callback(struct fcal_packet *fpkt)
5396 {
5397 	struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5398 	struct scsi_pkt *pkt = cmd->cmd_pkt;
5399 	struct sf *sf = ADDR2SF(&pkt->pkt_address);
5400 	struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5401 	struct fcp_rsp *rsp;
5402 	char *msg1 = NULL;
5403 	char *msg2 = NULL;
5404 	short ncmds;
5405 	int tgt_id;
5406 	int good_scsi_status = TRUE;
5407 
5408 
5409 
5410 	if (cmd->cmd_state == SF_STATE_IDLE) {
5411 		cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5412 			(void *)cmd);
5413 	}
5414 
5415 	mutex_enter(&cmd->cmd_abort_mutex);
5416 	if (cmd->cmd_state == SF_STATE_ABORTING) {
5417 		/* cmd already being aborted -- nothing to do */
5418 		mutex_exit(&cmd->cmd_abort_mutex);
5419 		return;
5420 	}
5421 
5422 	cmd->cmd_state = SF_STATE_IDLE;
5423 	mutex_exit(&cmd->cmd_abort_mutex);
5424 
5425 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5426 
5427 		(void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5428 		    (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5429 		    FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5430 
5431 		rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5432 
5433 		if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5434 
5435 			if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5436 				sf_token = (int *)(uintptr_t)
5437 				    fpkt->fcal_socal_request.\
5438 				    sr_soc_hdr.sh_request_token;
5439 				(void) soc_take_core(sf->sf_sochandle,
5440 							sf->sf_socp);
5441 			}
5442 
5443 			pkt->pkt_reason = CMD_INCOMPLETE;
5444 			pkt->pkt_state = STATE_GOT_BUS;
5445 			pkt->pkt_statistics |= STAT_ABORTED;
5446 
5447 		} else {
5448 
5449 			pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5450 					STATE_SENT_CMD | STATE_GOT_STATUS;
5451 			pkt->pkt_resid = 0;
5452 			if (cmd->cmd_flags & CFLAG_DMAVALID) {
5453 				pkt->pkt_state |= STATE_XFERRED_DATA;
5454 			}
5455 
5456 			if ((pkt->pkt_scbp != NULL) &&
5457 			    ((*(pkt->pkt_scbp) =
5458 				rsp->fcp_u.fcp_status.scsi_status)
5459 							!= STATUS_GOOD)) {
5460 				good_scsi_status = FALSE;
5461 			/*
5462 			 * The next two checks make sure that if there
5463 			 * is no sense data or a valid response and
5464 			 * the command came back with check condition,
5465 			 * the command should be retried
5466 			 */
5467 				if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5468 				    !rsp->fcp_u.fcp_status.sense_len_set) {
5469 					pkt->pkt_state &= ~STATE_XFERRED_DATA;
5470 					pkt->pkt_resid = cmd->cmd_dmacount;
5471 				}
5472 			}
5473 
5474 			if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5475 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
5476 				(void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5477 					(uint_t)0, DDI_DMA_SYNC_FORCPU);
5478 			}
5479 			/*
5480 			 * Update the transfer resid, if appropriate
5481 			 */
5482 			if (rsp->fcp_u.fcp_status.resid_over ||
5483 			    rsp->fcp_u.fcp_status.resid_under)
5484 				pkt->pkt_resid = rsp->fcp_resid;
5485 
5486 			/*
5487 			 * Check to see if the SCSI command failed.
5488 			 *
5489 			 */
5490 
5491 			/*
5492 			 * First see if we got a FCP protocol error.
5493 			 */
5494 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
5495 			    struct fcp_rsp_info *bep;
5496 
5497 			    bep =
5498 			    (struct fcp_rsp_info *)(&rsp->fcp_response_len + 1);
5499 			    if (bep->rsp_code != FCP_NO_FAILURE) {
5500 				pkt->pkt_reason = CMD_TRAN_ERR;
5501 				tgt_id = pkt->pkt_address.a_target;
5502 				switch (bep->rsp_code) {
5503 				case FCP_CMND_INVALID:
5504 					SF_DMSG1("FCP_RSP FCP_CMND "
5505 						"fields invalid");
5506 					break;
5507 				case FCP_TASK_MGMT_NOT_SUPPTD:
5508 					SF_DMSG1("FCP_RSP Task"
5509 						"Management Function"
5510 						"Not Supported");
5511 					break;
5512 				case FCP_TASK_MGMT_FAILED:
5513 					SF_DMSG1("FCP_RSP Task "
5514 						"Management Function"
5515 						"Failed");
5516 					sf->sf_stats.tstats[tgt_id].
5517 						task_mgmt_failures++;
5518 					break;
5519 				case FCP_DATA_RO_MISMATCH:
5520 					SF_DMSG1("FCP_RSP FCP_DATA RO "
5521 						"mismatch with "
5522 						"FCP_XFER_RDY DATA_RO");
5523 					sf->sf_stats.tstats[tgt_id].
5524 						data_ro_mismatches++;
5525 					break;
5526 				case FCP_DL_LEN_MISMATCH:
5527 					SF_DMSG1("FCP_RSP FCP_DATA length "
5528 						"different than BURST_LEN");
5529 					sf->sf_stats.tstats[tgt_id].
5530 						dl_len_mismatches++;
5531 					break;
5532 				default:
5533 					SF_DMSG1("FCP_RSP invalid RSP_CODE");
5534 					break;
5535 				}
5536 			    }
5537 			}
5538 
5539 			/*
5540 			 * See if we got a SCSI error with sense data
5541 			 */
5542 			if (rsp->fcp_u.fcp_status.sense_len_set) {
5543 			    uchar_t rqlen = min(rsp->fcp_sense_len,
5544 					sizeof (struct scsi_extended_sense));
5545 			    caddr_t sense = (caddr_t)rsp +
5546 				sizeof (struct fcp_rsp) + rsp->fcp_response_len;
5547 			    struct scsi_arq_status *arq;
5548 			    struct scsi_extended_sense *sensep =
5549 				(struct scsi_extended_sense *)sense;
5550 
5551 			    if (rsp->fcp_u.fcp_status.scsi_status !=
5552 							STATUS_GOOD) {
5553 				if (rsp->fcp_u.fcp_status.scsi_status
5554 					== STATUS_CHECK) {
5555 					if (sensep->es_key ==
5556 						KEY_RECOVERABLE_ERROR)
5557 						good_scsi_status = 1;
5558 					if (sensep->es_key ==
5559 						KEY_UNIT_ATTENTION &&
5560 						sensep->es_add_code == 0x3f &&
5561 						sensep->es_qual_code == 0x0e) {
5562 						/* REPORT_LUNS_HAS_CHANGED */
5563 						sf_log(sf, CE_NOTE,
5564 						"!REPORT_LUNS_HAS_CHANGED\n");
5565 						sf_force_lip(sf);
5566 					}
5567 				}
5568 			    }
5569 
5570 			    if ((pkt->pkt_scbp != NULL) &&
5571 				(cmd->cmd_scblen >=
5572 					sizeof (struct scsi_arq_status))) {
5573 
5574 				pkt->pkt_state |= STATE_ARQ_DONE;
5575 
5576 				arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5577 				/*
5578 				 * copy out sense information
5579 				 */
5580 				bcopy(sense, (caddr_t)&arq->sts_sensedata,
5581 				    rqlen);
5582 				arq->sts_rqpkt_resid =
5583 				    sizeof (struct scsi_extended_sense) -
5584 					rqlen;
5585 				*((uchar_t *)&arq->sts_rqpkt_status) =
5586 				    STATUS_GOOD;
5587 				arq->sts_rqpkt_reason = 0;
5588 				arq->sts_rqpkt_statistics = 0;
5589 				arq->sts_rqpkt_state = STATE_GOT_BUS |
5590 				    STATE_GOT_TARGET | STATE_SENT_CMD |
5591 				    STATE_GOT_STATUS | STATE_ARQ_DONE |
5592 				    STATE_XFERRED_DATA;
5593 			    }
5594 			    target->sft_alive = TRUE;
5595 			}
5596 
5597 			/*
5598 			 * The firmware returns the number of bytes actually
5599 			 * xfered into/out of host. Compare this with what
5600 			 * we asked and if it is different, we lost frames ?
5601 			 */
5602 			if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5603 			    (good_scsi_status) &&
5604 			    (pkt->pkt_state & STATE_XFERRED_DATA) &&
5605 			    (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5606 			    (target->sft_device_type != DTYPE_ESI)) {
5607 				int byte_cnt =
5608 				fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt;
5609 				if (cmd->cmd_flags & CFLAG_DMASEND) {
5610 				    if (byte_cnt != 0) {
5611 					sf_log(sf, CE_NOTE,
5612 						"!sf_cmd_callback: Lost Frame: "
5613 						"(write) received 0x%x expected"
5614 						" 0x%x target 0x%x\n",
5615 						byte_cnt, cmd->cmd_dmacount,
5616 						sf_alpa_to_switch[
5617 							target->sft_al_pa]);
5618 					pkt->pkt_reason = CMD_INCOMPLETE;
5619 					pkt->pkt_statistics |= STAT_ABORTED;
5620 				    }
5621 				} else if (byte_cnt < cmd->cmd_dmacount) {
5622 				    sf_log(sf, CE_NOTE,
5623 					"!sf_cmd_callback: Lost Frame: (read) "
5624 					"received 0x%x expected 0x%x "
5625 					"target 0x%x\n", byte_cnt,
5626 					cmd->cmd_dmacount, sf_alpa_to_switch[
5627 							target->sft_al_pa]);
5628 					pkt->pkt_reason = CMD_INCOMPLETE;
5629 					pkt->pkt_statistics |= STAT_ABORTED;
5630 				}
5631 			}
5632 		}
5633 
5634 	} else {
5635 
5636 		/* pkt status was not ok */
5637 
5638 		switch (fpkt->fcal_pkt_status) {
5639 
5640 		case FCAL_STATUS_ERR_OFFLINE:
5641 			SF_DMSG1("Fibre Channel Offline");
5642 			mutex_enter(&target->sft_mutex);
5643 			if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5644 				target->sft_state |= (SF_TARGET_BUSY
5645 					    | SF_TARGET_MARK);
5646 			}
5647 			mutex_exit(&target->sft_mutex);
5648 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5649 				target->sft_dip, FCAL_REMOVE_EVENT,
5650 				&sf_remove_eid, NDI_EVENT_NOPASS);
5651 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
5652 				target->sft_dip, sf_remove_eid, NULL);
5653 			pkt->pkt_reason = CMD_TRAN_ERR;
5654 			pkt->pkt_statistics |= STAT_BUS_RESET;
5655 			break;
5656 
5657 		case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5658 			sf_throttle(sf);
5659 			sf->sf_use_lock = TRUE;
5660 			pkt->pkt_reason = CMD_TRAN_ERR;
5661 			pkt->pkt_state = STATE_GOT_BUS;
5662 			pkt->pkt_statistics |= STAT_ABORTED;
5663 			break;
5664 
5665 		case FCAL_STATUS_TIMEOUT:
5666 			SF_DMSG1("Fibre Channel Timeout");
5667 			pkt->pkt_reason = CMD_TIMEOUT;
5668 			break;
5669 
5670 		case FCAL_STATUS_ERR_OVERRUN:
5671 			SF_DMSG1("CMD_DATA_OVR");
5672 			pkt->pkt_reason = CMD_DATA_OVR;
5673 			break;
5674 
5675 		case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5676 			SF_DMSG1("Unknown CQ type");
5677 			pkt->pkt_reason = CMD_TRAN_ERR;
5678 			break;
5679 
5680 		case FCAL_STATUS_BAD_SEG_CNT:
5681 			SF_DMSG1("Bad SEG CNT");
5682 			pkt->pkt_reason = CMD_TRAN_ERR;
5683 			break;
5684 
5685 		case FCAL_STATUS_BAD_XID:
5686 			SF_DMSG1("Fibre Channel Invalid X_ID");
5687 			pkt->pkt_reason = CMD_TRAN_ERR;
5688 			break;
5689 
5690 		case FCAL_STATUS_XCHG_BUSY:
5691 			SF_DMSG1("Fibre Channel Exchange Busy");
5692 			pkt->pkt_reason = CMD_TRAN_ERR;
5693 			break;
5694 
5695 		case FCAL_STATUS_INSUFFICIENT_CQES:
5696 			SF_DMSG1("Insufficient CQEs");
5697 			pkt->pkt_reason = CMD_TRAN_ERR;
5698 			break;
5699 
5700 		case FCAL_STATUS_ALLOC_FAIL:
5701 			SF_DMSG1("ALLOC FAIL");
5702 			pkt->pkt_reason = CMD_TRAN_ERR;
5703 			break;
5704 
5705 		case FCAL_STATUS_BAD_SID:
5706 			SF_DMSG1("Fibre Channel Invalid S_ID");
5707 			pkt->pkt_reason = CMD_TRAN_ERR;
5708 			break;
5709 
5710 		case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5711 			if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5712 				sf_token = (int *)(uintptr_t)
5713 				    fpkt->fcal_socal_request.\
5714 				    sr_soc_hdr.sh_request_token;
5715 				(void) soc_take_core(sf->sf_sochandle,
5716 				    sf->sf_socp);
5717 				sf_core = 0;
5718 			}
5719 			msg2 =
5720 			"INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5721 			pkt->pkt_reason = CMD_INCOMPLETE;
5722 			pkt->pkt_state = STATE_GOT_BUS;
5723 			pkt->pkt_statistics |= STAT_ABORTED;
5724 			break;
5725 
5726 		case FCAL_STATUS_CRC_ERR:
5727 			msg2 = "Fibre Channel CRC Error on frames";
5728 			pkt->pkt_reason = CMD_INCOMPLETE;
5729 			pkt->pkt_state = STATE_GOT_BUS;
5730 			pkt->pkt_statistics |= STAT_ABORTED;
5731 			break;
5732 
5733 		case FCAL_STATUS_NO_SEQ_INIT:
5734 			SF_DMSG1("Fibre Channel Seq Init Error");
5735 			pkt->pkt_reason = CMD_TRAN_ERR;
5736 			break;
5737 
5738 		case  FCAL_STATUS_OPEN_FAIL:
5739 			pkt->pkt_reason = CMD_TRAN_ERR;
5740 			SF_DMSG1("Fibre Channel Open Failure");
5741 			if ((target->sft_state & (SF_TARGET_BUSY |
5742 			    SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5743 			    sf_log(sf, CE_NOTE, "!Open failure to target 0x%x "
5744 			    "forcing LIP\n",
5745 			    sf_alpa_to_switch[target->sft_al_pa]);
5746 			    sf_force_lip(sf);
5747 			}
5748 			break;
5749 
5750 
5751 		case FCAL_STATUS_ONLINE_TIMEOUT:
5752 			SF_DMSG1("Fibre Channel Online Timeout");
5753 			pkt->pkt_reason = CMD_TRAN_ERR;
5754 			break;
5755 
5756 		default:
5757 			SF_DMSG1("Unknown FC Status");
5758 			pkt->pkt_reason = CMD_TRAN_ERR;
5759 			break;
5760 		}
5761 	}
5762 
5763 #ifdef	DEBUG
5764 	/*
5765 	 * msg1 will be non-NULL if we've detected some sort of error
5766 	 */
5767 	if (msg1 != NULL && sfdebug >= 4) {
5768 		sf_log(sf, CE_WARN,
5769 		    "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5770 		    (void *)fpkt, pkt->pkt_address.a_target, msg1);
5771 	}
5772 #endif
5773 
5774 	if (msg2 != NULL) {
5775 		sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5776 		    pkt->pkt_address.a_target, msg2);
5777 	}
5778 
5779 	ncmds = fpkt->fcal_ncmds;
5780 	ASSERT(ncmds >= 0);
5781 	if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5782 #ifdef DEBUG
5783 		if (!sf->sf_use_lock) {
5784 			SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5785 		}
5786 #endif
5787 		sf->sf_use_lock = TRUE;
5788 	}
5789 
5790 	mutex_enter(&sf->sf_cmd_mutex);
5791 	sf->sf_ncmds = ncmds;
5792 	sf_throttle_start(sf);
5793 	mutex_exit(&sf->sf_cmd_mutex);
5794 
5795 	if (!msg1 && !msg2)
5796 		SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5797 		    (void *)pkt));
5798 	if (pkt->pkt_comp != NULL) {
5799 		(*pkt->pkt_comp)(pkt);
5800 	}
5801 }
5802 
5803 #undef	SF_DMSG1
5804 
5805 
5806 
5807 /*
5808  * start throttling for this instance
5809  */
5810 static void
5811 sf_throttle_start(struct sf *sf)
5812 {
5813 	struct sf_pkt *cmd, *prev_cmd = NULL;
5814 	struct scsi_pkt *pkt;
5815 	struct sf_target *target;
5816 
5817 
5818 	ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5819 
5820 	cmd = sf->sf_pkt_head;
5821 	while ((cmd != NULL) &&
5822 	    (sf->sf_state == SF_STATE_ONLINE) &&
5823 	    (sf->sf_ncmds < sf->sf_throttle)) {
5824 
5825 		pkt = CMD2PKT(cmd);
5826 
5827 		target = ADDR2TARGET(&pkt->pkt_address);
5828 		if (target->sft_state & SF_TARGET_BUSY) {
5829 			/* this command is busy -- go to next */
5830 			ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5831 			prev_cmd = cmd;
5832 			cmd = cmd->cmd_next;
5833 			continue;
5834 		}
5835 
5836 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5837 
5838 		/* this cmd not busy and not issued */
5839 
5840 		/* remove this packet from the queue */
5841 		if (sf->sf_pkt_head == cmd) {
5842 			/* this was the first packet */
5843 			sf->sf_pkt_head = cmd->cmd_next;
5844 		} else if (sf->sf_pkt_tail == cmd) {
5845 			/* this was the last packet */
5846 			sf->sf_pkt_tail = prev_cmd;
5847 			if (prev_cmd != NULL) {
5848 				prev_cmd->cmd_next = NULL;
5849 			}
5850 		} else {
5851 			/* some packet in the middle of the queue */
5852 			ASSERT(prev_cmd != NULL);
5853 			prev_cmd->cmd_next = cmd->cmd_next;
5854 		}
5855 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5856 
5857 		if (target->sft_state & SF_TARGET_OFFLINE) {
5858 			mutex_exit(&sf->sf_cmd_mutex);
5859 			pkt->pkt_reason = CMD_TRAN_ERR;
5860 			if (pkt->pkt_comp != NULL) {
5861 				(*pkt->pkt_comp)(cmd->cmd_pkt);
5862 			}
5863 		} else {
5864 			sf_fill_ids(sf, cmd, target);
5865 			if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5866 				pkt->pkt_reason = CMD_TRAN_ERR;
5867 				if (pkt->pkt_comp != NULL) {
5868 					(*pkt->pkt_comp)(cmd->cmd_pkt);
5869 				}
5870 			}
5871 		}
5872 		mutex_enter(&sf->sf_cmd_mutex);
5873 		cmd = sf->sf_pkt_head;
5874 		prev_cmd = NULL;
5875 	}
5876 }
5877 
5878 
5879 /*
5880  * called when the max exchange value is exceeded to throttle back commands
5881  */
5882 static void
5883 sf_throttle(struct sf *sf)
5884 {
5885 	int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5886 
5887 
5888 	mutex_enter(&sf->sf_cmd_mutex);
5889 
5890 	sf->sf_flag = TRUE;
5891 
5892 	if (sf->sf_ncmds > (cmdmax / 2)) {
5893 		sf->sf_throttle = cmdmax / 2;
5894 	} else {
5895 		if (sf->sf_ncmds > SF_DECR_DELTA) {
5896 			sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5897 		} else {
5898 			/*
5899 			 * This case is just a safeguard, should not really
5900 			 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5901 			 */
5902 			sf->sf_throttle = SF_DECR_DELTA;
5903 		}
5904 	}
5905 	mutex_exit(&sf->sf_cmd_mutex);
5906 
5907 	sf = sf->sf_sibling;
5908 	if (sf != NULL) {
5909 		mutex_enter(&sf->sf_cmd_mutex);
5910 		sf->sf_flag = TRUE;
5911 		if (sf->sf_ncmds >= (cmdmax / 2)) {
5912 			sf->sf_throttle = cmdmax / 2;
5913 		} else {
5914 			if (sf->sf_ncmds > SF_DECR_DELTA) {
5915 				sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5916 			} else {
5917 				sf->sf_throttle = SF_DECR_DELTA;
5918 			}
5919 		}
5920 
5921 		mutex_exit(&sf->sf_cmd_mutex);
5922 	}
5923 }
5924 
5925 
5926 /*
5927  * sf watchdog routine, called for a timeout
5928  */
5929 /*ARGSUSED*/
5930 static void
5931 sf_watch(void *arg)
5932 {
5933 	struct sf *sf;
5934 	struct sf_els_hdr	*privp;
5935 	static int count = 0, pscan_count = 0;
5936 	int cmdmax, i, mescount = 0;
5937 	struct sf_target *target;
5938 
5939 
5940 	sf_watchdog_time += sf_watchdog_timeout;
5941 	count++;
5942 	pscan_count++;
5943 
5944 	mutex_enter(&sf_global_mutex);
5945 	sf_watch_running = 1;
5946 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5947 
5948 		mutex_exit(&sf_global_mutex);
5949 
5950 		/* disable throttling while we're suspended */
5951 		mutex_enter(&sf->sf_mutex);
5952 		if (sf->sf_state & SF_STATE_SUSPENDED) {
5953 			mutex_exit(&sf->sf_mutex);
5954 			SF_DEBUG(1, (sf, CE_CONT,
5955 			    "sf_watch, sf%d:throttle disabled "
5956 			    "due to DDI_SUSPEND\n",
5957 			    ddi_get_instance(sf->sf_dip)));
5958 			mutex_enter(&sf_global_mutex);
5959 			continue;
5960 		}
5961 		mutex_exit(&sf->sf_mutex);
5962 
5963 		cmdmax = sf->sf_sochandle->fcal_cmdmax;
5964 
5965 		if (sf->sf_take_core) {
5966 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5967 		}
5968 
5969 		mutex_enter(&sf->sf_cmd_mutex);
5970 
5971 		if (!sf->sf_flag) {
5972 			if (sf->sf_throttle < (cmdmax / 2)) {
5973 				sf->sf_throttle = cmdmax / 2;
5974 			} else if ((sf->sf_throttle += SF_INCR_DELTA) >
5975 			    cmdmax) {
5976 				sf->sf_throttle = cmdmax;
5977 			}
5978 		} else {
5979 			sf->sf_flag = FALSE;
5980 		}
5981 
5982 		sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5983 		    >> 2;
5984 		if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5985 		    (sf->sf_pkt_head == NULL)) {
5986 #ifdef DEBUG
5987 			if (sf->sf_use_lock) {
5988 				SF_DEBUG(4, (sf, CE_NOTE,
5989 				    "use lock flag off\n"));
5990 			}
5991 #endif
5992 			sf->sf_use_lock = FALSE;
5993 		}
5994 
5995 		if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
5996 		    sf->sf_ncmds < sf->sf_throttle) {
5997 			sf_throttle_start(sf);
5998 		}
5999 
6000 		mutex_exit(&sf->sf_cmd_mutex);
6001 
6002 		if (pscan_count >= sf_pool_scan_cnt) {
6003 			if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6004 			    SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6005 				sf_crpool_free(sf);
6006 			}
6007 		}
6008 		mutex_enter(&sf->sf_mutex);
6009 
6010 		privp = sf->sf_els_list;
6011 		while (privp != NULL) {
6012 			if (privp->timeout < sf_watchdog_time) {
6013 				/* timeout this command */
6014 				privp = sf_els_timeout(sf, privp);
6015 			} else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6016 			    (privp->lip_cnt != sf->sf_lip_cnt)) {
6017 				if (privp->prev != NULL) {
6018 					privp->prev->next = privp->next;
6019 				}
6020 				if (sf->sf_els_list == privp) {
6021 					sf->sf_els_list = privp->next;
6022 				}
6023 				if (privp->next != NULL) {
6024 					privp->next->prev = privp->prev;
6025 				}
6026 				mutex_exit(&sf->sf_mutex);
6027 				sf_els_free(privp->fpkt);
6028 				mutex_enter(&sf->sf_mutex);
6029 				privp = sf->sf_els_list;
6030 			} else {
6031 				privp = privp->next;
6032 			}
6033 		}
6034 
6035 		if (sf->sf_online_timer && sf->sf_online_timer <
6036 		    sf_watchdog_time) {
6037 			for (i = 0; i < sf_max_targets; i++) {
6038 				target = sf->sf_targets[i];
6039 				if (target != NULL) {
6040 					if (!mescount && target->sft_state &
6041 					    SF_TARGET_BUSY) {
6042 						sf_log(sf, CE_WARN, "!Loop "
6043 						    "Unstable: Failed to bring "
6044 						    "Loop Online\n");
6045 						mescount = 1;
6046 					}
6047 					target->sft_state |= SF_TARGET_MARK;
6048 				}
6049 			}
6050 			sf_finish_init(sf, sf->sf_lip_cnt);
6051 			sf->sf_state = SF_STATE_INIT;
6052 			sf->sf_online_timer = 0;
6053 		}
6054 
6055 		if (sf->sf_state == SF_STATE_ONLINE) {
6056 			mutex_exit(&sf->sf_mutex);
6057 			if (count >= sf_pkt_scan_cnt) {
6058 				sf_check_targets(sf);
6059 			}
6060 		} else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6061 			(sf->sf_timer < sf_watchdog_time)) {
6062 			for (i = 0; i < sf_max_targets; i++) {
6063 				target = sf->sf_targets[i];
6064 				if ((target != NULL) &&
6065 					(target->sft_state &
6066 						SF_TARGET_BUSY)) {
6067 					sf_log(sf, CE_WARN,
6068 						"!Offline Timeout\n");
6069 					if (sf_core && (sf_core &
6070 						SF_CORE_OFFLINE_TIMEOUT)) {
6071 						(void) soc_take_core(
6072 							sf->sf_sochandle,
6073 							sf->sf_socp);
6074 						sf_core = 0;
6075 					}
6076 					break;
6077 				}
6078 			}
6079 			sf_finish_init(sf, sf->sf_lip_cnt);
6080 			sf->sf_state = SF_STATE_INIT;
6081 			mutex_exit(&sf->sf_mutex);
6082 		} else {
6083 			mutex_exit(&sf->sf_mutex);
6084 		}
6085 		mutex_enter(&sf_global_mutex);
6086 	}
6087 	mutex_exit(&sf_global_mutex);
6088 	if (count >= sf_pkt_scan_cnt) {
6089 		count = 0;
6090 	}
6091 	if (pscan_count >= sf_pool_scan_cnt) {
6092 		pscan_count = 0;
6093 	}
6094 
6095 	/* reset timeout */
6096 	sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6097 
6098 	/* signal waiting thread */
6099 	mutex_enter(&sf_global_mutex);
6100 	sf_watch_running = 0;
6101 	cv_broadcast(&sf_watch_cv);
6102 	mutex_exit(&sf_global_mutex);
6103 }
6104 
6105 
6106 /*
6107  * called during a timeout to check targets
6108  */
6109 static void
6110 sf_check_targets(struct sf *sf)
6111 {
6112 	struct sf_target *target;
6113 	int i;
6114 	struct sf_pkt *cmd;
6115 	struct scsi_pkt *pkt;
6116 	int lip_cnt;
6117 
6118 	mutex_enter(&sf->sf_mutex);
6119 	lip_cnt = sf->sf_lip_cnt;
6120 	mutex_exit(&sf->sf_mutex);
6121 
6122 	/* check scan all possible targets */
6123 	for (i = 0; i < sf_max_targets; i++) {
6124 		target = sf->sf_targets[i];
6125 		while (target != NULL) {
6126 			mutex_enter(&target->sft_pkt_mutex);
6127 			if (target->sft_alive && target->sft_scan_count !=
6128 			    sf_target_scan_cnt) {
6129 				target->sft_alive = 0;
6130 				target->sft_scan_count++;
6131 				mutex_exit(&target->sft_pkt_mutex);
6132 				return;
6133 			}
6134 			target->sft_alive = 0;
6135 			target->sft_scan_count = 0;
6136 			cmd = target->sft_pkt_head;
6137 			while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6138 				mutex_enter(&cmd->cmd_abort_mutex);
6139 				if (cmd->cmd_state == SF_STATE_ISSUED &&
6140 				    ((cmd->cmd_timeout && sf_watchdog_time >
6141 #ifdef	DEBUG
6142 				    cmd->cmd_timeout) || sf_abort_flag)) {
6143 					sf_abort_flag = 0;
6144 #else
6145 				    cmd->cmd_timeout))) {
6146 #endif
6147 					cmd->cmd_timeout = 0;
6148 	/* prevent reset from getting at this packet */
6149 					cmd->cmd_state = SF_STATE_ABORTING;
6150 					mutex_exit(&cmd->cmd_abort_mutex);
6151 					mutex_exit(&target->sft_pkt_mutex);
6152 					sf->sf_stats.tstats[i].timeouts++;
6153 					if (sf_target_timeout(sf, cmd))
6154 						return;
6155 					else {
6156 						if (lip_cnt != sf->sf_lip_cnt) {
6157 							return;
6158 						} else {
6159 							mutex_enter(&target->
6160 							    sft_pkt_mutex);
6161 							cmd = target->
6162 							    sft_pkt_head;
6163 						}
6164 					}
6165 	/*
6166 	 * if the abort and lip fail, a reset will be carried out.
6167 	 * But the reset will ignore this packet. We have waited at least
6168 	 * 20 seconds after the initial timeout. Now, complete it here.
6169 	 * This also takes care of spurious bad aborts.
6170 	 */
6171 				} else if ((cmd->cmd_state ==
6172 				    SF_STATE_ABORTING) && (cmd->cmd_timeout
6173 				    <= sf_watchdog_time)) {
6174 					cmd->cmd_state = SF_STATE_IDLE;
6175 					mutex_exit(&cmd->cmd_abort_mutex);
6176 					mutex_exit(&target->sft_pkt_mutex);
6177 	SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to sft 0x%p delayed release\n",
6178 						(void *)cmd, (void *)target));
6179 					pkt = cmd->cmd_pkt;
6180 					pkt->pkt_statistics |=
6181 					    (STAT_TIMEOUT|STAT_ABORTED);
6182 					pkt->pkt_reason = CMD_TIMEOUT;
6183 					if (pkt->pkt_comp) {
6184 						(*pkt->pkt_comp)(pkt);
6185 					/* handle deferred_destroy case */
6186 					} else {
6187 					    if ((cmd->cmd_block->fcp_cntl.
6188 						cntl_reset == 1) ||
6189 						(cmd->cmd_block->
6190 						fcp_cntl.cntl_abort_tsk == 1)) {
6191 						cmd->cmd_block->fcp_cntl.
6192 							cntl_reset = 0;
6193 						cmd->cmd_block->fcp_cntl.
6194 							cntl_abort_tsk = 0;
6195 						cmd->cmd_fp_pkt->fcal_pkt_comp =
6196 							sf_cmd_callback;
6197 					    /* for cache */
6198 						sf_scsi_destroy_pkt
6199 						    (&pkt->pkt_address, pkt);
6200 					    }
6201 					}
6202 					mutex_enter(&target->sft_pkt_mutex);
6203 					cmd = target->sft_pkt_head;
6204 				} else {
6205 					mutex_exit(&cmd->cmd_abort_mutex);
6206 					cmd = cmd->cmd_forw;
6207 				}
6208 			}
6209 			mutex_exit(&target->sft_pkt_mutex);
6210 			target = target->sft_next_lun;
6211 		}
6212 	}
6213 }
6214 
6215 
6216 /*
6217  * a command to a target has timed out
6218  * return TRUE iff cmd abort failed or timed out, else return FALSE
6219  */
6220 static int
6221 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6222 {
6223 	int rval;
6224 	struct scsi_pkt *pkt;
6225 	struct fcal_packet *fpkt;
6226 	int tgt_id;
6227 	int retval = FALSE;
6228 
6229 
6230 	SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6231 	    (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6232 
6233 	fpkt = (struct fcal_packet *)((char *)cmd + sizeof (struct sf_pkt));
6234 
6235 	if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6236 		sf_token = (int *)(uintptr_t)
6237 		    fpkt->fcal_socal_request.sr_soc_hdr.\
6238 		    sh_request_token;
6239 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6240 		sf_core = 0;
6241 	}
6242 
6243 	/* call the transport to abort a command */
6244 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6245 	    sf->sf_sochandle->fcal_portno, fpkt, 1);
6246 
6247 	switch (rval) {
6248 	case FCAL_ABORTED:
6249 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6250 		pkt = cmd->cmd_pkt;
6251 		cmd->cmd_state = SF_STATE_IDLE;
6252 		pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6253 		pkt->pkt_reason = CMD_TIMEOUT;
6254 		if (pkt->pkt_comp != NULL) {
6255 			(*pkt->pkt_comp)(pkt);
6256 		}
6257 		break;				/* success */
6258 
6259 	case FCAL_ABORT_FAILED:
6260 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6261 		pkt = cmd->cmd_pkt;
6262 		cmd->cmd_state = SF_STATE_IDLE;
6263 		pkt->pkt_reason = CMD_TIMEOUT;
6264 		pkt->pkt_statistics |= STAT_TIMEOUT;
6265 		tgt_id = pkt->pkt_address.a_target;
6266 		sf->sf_stats.tstats[tgt_id].abts_failures++;
6267 		if (pkt->pkt_comp != NULL) {
6268 			(*pkt->pkt_comp)(pkt);
6269 		}
6270 		break;
6271 
6272 	case FCAL_BAD_ABORT:
6273 		if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6274 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6275 			    sr_soc_hdr.sh_request_token;
6276 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6277 			sf_core = 0;
6278 		}
6279 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6280 		cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6281 		    + 20;
6282 		break;
6283 
6284 	case FCAL_TIMEOUT:
6285 		retval = TRUE;
6286 		break;
6287 
6288 	default:
6289 		pkt = cmd->cmd_pkt;
6290 		tgt_id = pkt->pkt_address.a_target;
6291 		sf_log(sf, CE_WARN,
6292 		"Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6293 		if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6294 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6295 			    sr_soc_hdr.sh_request_token;
6296 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6297 			sf_core = 0;
6298 		}
6299 		sf_force_lip(sf);
6300 		retval = TRUE;
6301 		break;
6302 	}
6303 
6304 	return (retval);
6305 }
6306 
6307 
6308 /*
6309  * an ELS command has timed out
6310  * return ???
6311  */
6312 static struct sf_els_hdr *
6313 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6314 {
6315 	struct fcal_packet *fpkt;
6316 	int rval, dflag, timeout = SF_ELS_TIMEOUT;
6317 	uint_t lip_cnt = privp->lip_cnt;
6318 	uchar_t els_code = privp->els_code;
6319 	struct sf_target *target = privp->target;
6320 	char what[64];
6321 
6322 	fpkt = privp->fpkt;
6323 	dflag = privp->delayed_retry;
6324 	/* use as temporary state variable */
6325 	privp->timeout = SF_INVALID_TIMEOUT;
6326 	mutex_exit(&sf->sf_mutex);
6327 
6328 	if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6329 		/*
6330 		 * take socal core if required. Timeouts for IB and hosts
6331 		 * are not very interesting, so we take socal core only
6332 		 * if the timeout is *not* for a IB or host.
6333 		 */
6334 		if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6335 		((sf_alpa_to_switch[privp->dest_nport_id] & 0x0d) != 0x0d) &&
6336 		((privp->dest_nport_id != 1) || (privp->dest_nport_id != 2) ||
6337 		(privp->dest_nport_id != 4) || (privp->dest_nport_id != 8) ||
6338 		(privp->dest_nport_id != 0xf))) {
6339 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6340 			    sr_soc_hdr.sh_request_token;
6341 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6342 			sf_core = 0;
6343 		}
6344 		(void) sprintf(what, "ELS 0x%x", privp->els_code);
6345 	} else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6346 		if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6347 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6348 			    sr_soc_hdr.sh_request_token;
6349 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6350 			sf_core = 0;
6351 		}
6352 		timeout = SF_FCP_TIMEOUT;
6353 		(void) sprintf(what, "REPORT_LUNS");
6354 	} else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6355 		if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6356 			sf_token = (int *)(uintptr_t)
6357 			    fpkt->fcal_socal_request.\
6358 			    sr_soc_hdr.sh_request_token;
6359 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6360 			sf_core = 0;
6361 		}
6362 		timeout = SF_FCP_TIMEOUT;
6363 		(void) sprintf(what, "INQUIRY to LUN 0x%lx",
6364 			(long)SCSA_LUN(target));
6365 	} else {
6366 		(void) sprintf(what, "UNKNOWN OPERATION");
6367 	}
6368 
6369 	if (dflag) {
6370 		/* delayed retry */
6371 		SF_DEBUG(2, (sf, CE_CONT,
6372 				"!sf%d: %s to target %x delayed retry\n",
6373 				ddi_get_instance(sf->sf_dip), what,
6374 				sf_alpa_to_switch[privp->dest_nport_id]));
6375 		privp->delayed_retry = FALSE;
6376 		goto try_again;
6377 	}
6378 
6379 	sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6380 		what, sf_alpa_to_switch[privp->dest_nport_id],
6381 		privp->dest_nport_id);
6382 
6383 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6384 	    ->fcal_portno, fpkt, 1);
6385 	if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6386 	SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6387 	    what, privp->dest_nport_id));
6388 try_again:
6389 
6390 		mutex_enter(&sf->sf_mutex);
6391 		if (privp->prev != NULL) {
6392 			privp->prev->next = privp->next;
6393 		}
6394 		if (sf->sf_els_list == privp) {
6395 			sf->sf_els_list = privp->next;
6396 		}
6397 		if (privp->next != NULL) {
6398 			privp->next->prev = privp->prev;
6399 		}
6400 		privp->prev = privp->next = NULL;
6401 		if (lip_cnt == sf->sf_lip_cnt) {
6402 			privp->timeout = sf_watchdog_time + timeout;
6403 			if ((++(privp->retries) < sf_els_retries) ||
6404 			    (dflag && (privp->retries < SF_BSY_RETRIES))) {
6405 				mutex_exit(&sf->sf_mutex);
6406 				sf_log(sf, CE_NOTE,
6407 					"!%s to target 0x%x retrying\n",
6408 					what,
6409 				sf_alpa_to_switch[privp->dest_nport_id]);
6410 				if (sf_els_transport(sf, privp) == 1) {
6411 					mutex_enter(&sf->sf_mutex);
6412 					return (sf->sf_els_list); /* success */
6413 				}
6414 				mutex_enter(&sf->sf_mutex);
6415 				fpkt = NULL;
6416 			}
6417 			if ((lip_cnt == sf->sf_lip_cnt) &&
6418 			    (els_code != LA_ELS_LOGO)) {
6419 				if (target != NULL) {
6420 					sf_offline_target(sf, target);
6421 				}
6422 				if (sf->sf_lip_cnt == lip_cnt) {
6423 					sf->sf_device_count--;
6424 					ASSERT(sf->sf_device_count >= 0);
6425 					if (sf->sf_device_count == 0) {
6426 						sf_finish_init(sf,
6427 						    sf->sf_lip_cnt);
6428 					}
6429 				}
6430 			}
6431 			privp = sf->sf_els_list;
6432 			mutex_exit(&sf->sf_mutex);
6433 			if (fpkt != NULL) {
6434 				sf_els_free(fpkt);
6435 			}
6436 		} else {
6437 			mutex_exit(&sf->sf_mutex);
6438 			sf_els_free(privp->fpkt);
6439 			privp = NULL;
6440 		}
6441 	} else {
6442 		if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6443 			sf_token = (int *)(uintptr_t)
6444 			    fpkt->fcal_socal_request.\
6445 			    sr_soc_hdr.sh_request_token;
6446 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6447 			sf_core = 0;
6448 		}
6449 		sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6450 			"status=0x%x, forcing LIP\n", what,
6451 			sf_alpa_to_switch[privp->dest_nport_id], rval);
6452 		privp = NULL;
6453 		if (sf->sf_lip_cnt == lip_cnt) {
6454 			sf_force_lip(sf);
6455 		}
6456 	}
6457 
6458 	mutex_enter(&sf->sf_mutex);
6459 	return (privp);
6460 }
6461 
6462 
6463 /*
6464  * called by timeout when a reset times out
6465  */
6466 /*ARGSUSED*/
6467 static void
6468 sf_check_reset_delay(void *arg)
6469 {
6470 	struct sf *sf;
6471 	struct sf_target *target;
6472 	struct sf_reset_list *rp, *tp;
6473 	uint_t lip_cnt, reset_timeout_flag = FALSE;
6474 	clock_t lb;
6475 
6476 
6477 	lb = ddi_get_lbolt();
6478 
6479 	mutex_enter(&sf_global_mutex);
6480 
6481 	sf_reset_timeout_id = 0;
6482 
6483 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6484 
6485 		mutex_exit(&sf_global_mutex);
6486 		mutex_enter(&sf->sf_mutex);
6487 
6488 		/* is this type cast needed? */
6489 		tp = (struct sf_reset_list *)&sf->sf_reset_list;
6490 
6491 		rp = sf->sf_reset_list;
6492 		while (rp != NULL) {
6493 			if (((rp->timeout - lb) < 0) &&
6494 			    (rp->lip_cnt == sf->sf_lip_cnt)) {
6495 				tp->next = rp->next;
6496 				mutex_exit(&sf->sf_mutex);
6497 				target = rp->target;
6498 				lip_cnt = rp->lip_cnt;
6499 				kmem_free(rp, sizeof (struct sf_reset_list));
6500 				/* abort all cmds for this target */
6501 				while (target) {
6502 					sf_abort_all(sf, target, FALSE,
6503 						lip_cnt, TRUE);
6504 					mutex_enter(&target->sft_mutex);
6505 					if (lip_cnt == sf->sf_lip_cnt) {
6506 						target->sft_state &=
6507 							~SF_TARGET_BUSY;
6508 					}
6509 					mutex_exit(&target->sft_mutex);
6510 					target = target->sft_next_lun;
6511 				}
6512 				mutex_enter(&sf->sf_mutex);
6513 				tp = (struct sf_reset_list *)
6514 				    &sf->sf_reset_list;
6515 				rp = sf->sf_reset_list;
6516 				lb = ddi_get_lbolt();
6517 			} else if (rp->lip_cnt != sf->sf_lip_cnt) {
6518 				tp->next = rp->next;
6519 				kmem_free(rp, sizeof (struct sf_reset_list));
6520 				rp = tp->next;
6521 			} else {
6522 				reset_timeout_flag = TRUE;
6523 				tp = rp;
6524 				rp = rp->next;
6525 			}
6526 		}
6527 		mutex_exit(&sf->sf_mutex);
6528 		mutex_enter(&sf_global_mutex);
6529 	}
6530 
6531 	if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6532 		sf_reset_timeout_id = timeout(sf_check_reset_delay,
6533 		    NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6534 	}
6535 
6536 	mutex_exit(&sf_global_mutex);
6537 }
6538 
6539 
6540 /*
6541  * called to "reset the bus", i.e. force loop initialization (and address
6542  * re-negotiation)
6543  */
6544 static void
6545 sf_force_lip(struct sf *sf)
6546 {
6547 	int i;
6548 	struct sf_target *target;
6549 
6550 
6551 	/* disable restart of lip if we're suspended */
6552 	mutex_enter(&sf->sf_mutex);
6553 	if (sf->sf_state & SF_STATE_SUSPENDED) {
6554 		mutex_exit(&sf->sf_mutex);
6555 		SF_DEBUG(1, (sf, CE_CONT,
6556 		    "sf_force_lip, sf%d: lip restart disabled "
6557 		    "due to DDI_SUSPEND\n",
6558 		    ddi_get_instance(sf->sf_dip)));
6559 		return;
6560 	}
6561 
6562 	sf_log(sf, CE_NOTE, "Forcing lip\n");
6563 
6564 	for (i = 0; i < sf_max_targets; i++) {
6565 		target = sf->sf_targets[i];
6566 		while (target != NULL) {
6567 			mutex_enter(&target->sft_mutex);
6568 			if (!(target->sft_state & SF_TARGET_OFFLINE))
6569 				target->sft_state |= SF_TARGET_BUSY;
6570 			mutex_exit(&target->sft_mutex);
6571 			target = target->sft_next_lun;
6572 		}
6573 	}
6574 
6575 	sf->sf_lip_cnt++;
6576 	sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6577 	sf->sf_state = SF_STATE_OFFLINE;
6578 	mutex_exit(&sf->sf_mutex);
6579 	sf->sf_stats.lip_count++;		/* no mutex for this? */
6580 
6581 #ifdef DEBUG
6582 	/* are we allowing LIPs ?? */
6583 	if (sf_lip_flag != 0) {
6584 #endif
6585 		/* call the transport to force loop initialization */
6586 		if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6587 		    sf->sf_sochandle->fcal_portno, 1,
6588 		    FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6589 		    (i != FCAL_TIMEOUT)) {
6590 			/* force LIP failed */
6591 			if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6592 				(void) soc_take_core(sf->sf_sochandle,
6593 				    sf->sf_socp);
6594 				sf_core = 0;
6595 			}
6596 #ifdef DEBUG
6597 			/* are we allowing reset after LIP failed ?? */
6598 			if (sf_reset_flag != 0) {
6599 #endif
6600 				/* restart socal after resetting it */
6601 			    sf_log(sf, CE_NOTE,
6602 			    "!Force lip failed Status code 0x%x. Reseting\n",
6603 									i);
6604 				/* call transport to force a reset */
6605 			    soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6606 				sf->sf_sochandle->fcal_portno, 1);
6607 #ifdef	DEBUG
6608 			}
6609 #endif
6610 		}
6611 #ifdef	DEBUG
6612 	}
6613 #endif
6614 }
6615 
6616 
6617 /*
6618  * called by the transport when an unsolicited ELS is received
6619  */
6620 static void
6621 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6622 {
6623 	struct sf *sf = (struct sf *)arg;
6624 	els_payload_t	*els = (els_payload_t *)payload;
6625 	struct la_els_rjt *rsp;
6626 	int	i, tgt_id;
6627 	uchar_t dest_id;
6628 	struct fcal_packet *fpkt;
6629 	fc_frame_header_t *hp;
6630 	struct sf_els_hdr *privp;
6631 
6632 
6633 	if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6634 	    return;
6635 	}
6636 
6637 	if (i > SOC_CQE_PAYLOAD) {
6638 		i = SOC_CQE_PAYLOAD;
6639 	}
6640 
6641 	dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6642 	tgt_id = sf_alpa_to_switch[dest_id];
6643 
6644 	switch (els->els_cmd.c.ls_command) {
6645 
6646 	case LA_ELS_LOGO:
6647 		/*
6648 		 * logout received -- log the fact
6649 		 */
6650 		sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6651 		sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6652 			tgt_id,
6653 			sf_lip_on_plogo ? "Forcing LIP...." : "");
6654 		if (sf_lip_on_plogo) {
6655 			sf_force_lip(sf);
6656 		}
6657 		break;
6658 
6659 	default:  /* includes LA_ELS_PLOGI */
6660 		/*
6661 		 * something besides a logout received -- we don't handle
6662 		 * this so send back a reject saying its unsupported
6663 		 */
6664 
6665 		sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6666 					els->els_cmd.c.ls_command, tgt_id);
6667 
6668 
6669 		/* allocate room for a response */
6670 		if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6671 		    sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6672 		    (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6673 			break;
6674 		}
6675 
6676 		fpkt = privp->fpkt;
6677 
6678 		/* fill in pkt header */
6679 		hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6680 		hp->r_ctl = R_CTL_ELS_RSP;
6681 		hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6682 		hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6683 		hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6684 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6685 		    CQ_TYPE_OUTBOUND;
6686 
6687 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6688 
6689 		/* fill in response */
6690 		rsp->ls_code = LA_ELS_RJT;	/* reject this ELS */
6691 		rsp->mbz[0] = 0;
6692 		rsp->mbz[1] = 0;
6693 		rsp->mbz[2] = 0;
6694 		((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6695 		*((int *)&rsp->reserved) = 0;
6696 		rsp->reason_code = RJT_UNSUPPORTED;
6697 		privp->retries = sf_els_retries;
6698 		privp->els_code = LA_ELS_RJT;
6699 		privp->timeout = (unsigned)0xffffffff;
6700 		(void) sf_els_transport(sf, privp);
6701 		break;
6702 	}
6703 }
6704 
6705 
6706 /*
6707  * Error logging, printing, and debug print routines
6708  */
6709 
6710 /*PRINTFLIKE3*/
6711 static void
6712 sf_log(struct sf *sf, int level, const char *fmt, ...)
6713 {
6714 	char buf[256];
6715 	dev_info_t *dip;
6716 	va_list ap;
6717 
6718 	if (sf != NULL) {
6719 		dip = sf->sf_dip;
6720 	} else {
6721 		dip = NULL;
6722 	}
6723 
6724 	va_start(ap, fmt);
6725 	(void) vsprintf(buf, fmt, ap);
6726 	va_end(ap);
6727 	scsi_log(dip, "sf", level, buf);
6728 }
6729 
6730 
6731 /*
6732  * called to get some sf kstats -- return 0 on success else return errno
6733  */
6734 static int
6735 sf_kstat_update(kstat_t *ksp, int rw)
6736 {
6737 	struct sf *sf;
6738 
6739 	if (rw == KSTAT_WRITE) {
6740 		/* can't write */
6741 		return (EACCES);
6742 	}
6743 
6744 	sf = ksp->ks_private;
6745 	sf->sf_stats.ncmds = sf->sf_ncmds;
6746 	sf->sf_stats.throttle_limit = sf->sf_throttle;
6747 	sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6748 
6749 	return (0);				/* success */
6750 }
6751 
6752 
6753 /*
6754  * Unix Entry Points
6755  */
6756 
6757 /*
6758  * driver entry point for opens on control device
6759  */
6760 /* ARGSUSED */
6761 static int
6762 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6763 {
6764 	dev_t dev = *dev_p;
6765 	struct sf *sf;
6766 
6767 
6768 	/* just ensure soft state exists for this device */
6769 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6770 	if (sf == NULL) {
6771 		return (ENXIO);
6772 	}
6773 
6774 	++(sf->sf_check_n_close);
6775 
6776 	return (0);
6777 }
6778 
6779 
6780 /*
6781  * driver entry point for last close on control device
6782  */
6783 /* ARGSUSED */
6784 static int
6785 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6786 {
6787 	struct sf *sf;
6788 
6789 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6790 	if (sf == NULL) {
6791 		return (ENXIO);
6792 	}
6793 
6794 	if (!sf->sf_check_n_close) { /* if this flag is zero */
6795 		cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6796 				SF_MINOR2INST(getminor(dev)));
6797 		return (ENODEV);
6798 	} else {
6799 		--(sf->sf_check_n_close);
6800 	}
6801 	return (0);
6802 }
6803 
6804 
6805 /*
6806  * driver entry point for sf ioctl commands
6807  */
6808 /* ARGSUSED */
6809 static int
6810 sf_ioctl(dev_t dev,
6811     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6812 {
6813 	struct sf *sf;
6814 	struct sf_target *target;
6815 	uchar_t al_pa;
6816 	struct sf_al_map map;
6817 	int cnt, i;
6818 	int	retval;				/* return value */
6819 	struct devctl_iocdata *dcp;
6820 	dev_info_t *cdip;
6821 	struct scsi_address ap;
6822 	scsi_hba_tran_t tran;
6823 
6824 
6825 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6826 	if (sf == NULL) {
6827 		return (ENXIO);
6828 	}
6829 
6830 	/* handle all ioctls */
6831 	switch (cmd) {
6832 
6833 	/*
6834 	 * We can use the generic implementation for these ioctls
6835 	 */
6836 	case DEVCTL_DEVICE_GETSTATE:
6837 	case DEVCTL_DEVICE_ONLINE:
6838 	case DEVCTL_DEVICE_OFFLINE:
6839 	case DEVCTL_BUS_GETSTATE:
6840 		return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6841 
6842 	/*
6843 	 * return FC map
6844 	 */
6845 	case SFIOCGMAP:
6846 		if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6847 		    sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6848 		    sf->sf_state != SF_STATE_ONLINE) {
6849 			retval = ENOENT;
6850 			goto dun;
6851 		}
6852 		mutex_enter(&sf->sf_mutex);
6853 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6854 			int i, j = 0;
6855 
6856 			/* Need to generate a fake lilp map */
6857 			for (i = 0; i < sf_max_targets; i++) {
6858 				if (sf->sf_targets[i])
6859 					sf->sf_lilp_map->lilp_alpalist[j++] =
6860 						sf->sf_targets[i]->
6861 						sft_hard_address;
6862 			}
6863 			sf->sf_lilp_map->lilp_length = (uchar_t)j;
6864 		}
6865 		cnt = sf->sf_lilp_map->lilp_length;
6866 		map.sf_count = (short)cnt;
6867 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6868 		    (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6869 		    sizeof (la_wwn_t));
6870 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6871 		    (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6872 		    sizeof (la_wwn_t));
6873 		map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6874 		map.sf_hba_addr.sf_hard_address = 0;
6875 		map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6876 		for (i = 0; i < cnt; i++) {
6877 			al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6878 			map.sf_addr_pair[i].sf_al_pa = al_pa;
6879 			if (al_pa == sf->sf_al_pa) {
6880 				(void) bcopy((caddr_t)&sf->sf_sochandle
6881 				    ->fcal_n_wwn, (caddr_t)&map.
6882 				    sf_addr_pair[i].sf_node_wwn,
6883 				    sizeof (la_wwn_t));
6884 				(void) bcopy((caddr_t)&sf->sf_sochandle
6885 				    ->fcal_p_wwn, (caddr_t)&map.
6886 				    sf_addr_pair[i].sf_port_wwn,
6887 				    sizeof (la_wwn_t));
6888 				map.sf_addr_pair[i].sf_hard_address =
6889 					al_pa;
6890 				map.sf_addr_pair[i].sf_inq_dtype =
6891 					DTYPE_PROCESSOR;
6892 				continue;
6893 			}
6894 			target = sf->sf_targets[sf_alpa_to_switch[
6895 				al_pa]];
6896 			if (target != NULL) {
6897 				mutex_enter(&target->sft_mutex);
6898 				if (!(target->sft_state &
6899 				    (SF_TARGET_OFFLINE |
6900 					SF_TARGET_BUSY))) {
6901 					bcopy((caddr_t)&target->
6902 					    sft_node_wwn,
6903 					    (caddr_t)&map.sf_addr_pair
6904 					    [i].sf_node_wwn,
6905 					    sizeof (la_wwn_t));
6906 					bcopy((caddr_t)&target->
6907 					    sft_port_wwn,
6908 					    (caddr_t)&map.sf_addr_pair
6909 					    [i].sf_port_wwn,
6910 					    sizeof (la_wwn_t));
6911 					map.sf_addr_pair[i].
6912 						sf_hard_address
6913 						= target->sft_hard_address;
6914 					map.sf_addr_pair[i].
6915 						sf_inq_dtype
6916 						= target->sft_device_type;
6917 					mutex_exit(&target->sft_mutex);
6918 					continue;
6919 				}
6920 				mutex_exit(&target->sft_mutex);
6921 			}
6922 			bzero((caddr_t)&map.sf_addr_pair[i].
6923 			    sf_node_wwn, sizeof (la_wwn_t));
6924 			bzero((caddr_t)&map.sf_addr_pair[i].
6925 			    sf_port_wwn, sizeof (la_wwn_t));
6926 			map.sf_addr_pair[i].sf_inq_dtype =
6927 				DTYPE_UNKNOWN;
6928 		}
6929 		mutex_exit(&sf->sf_mutex);
6930 		if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6931 		    sizeof (struct sf_al_map), mode) != 0) {
6932 			retval = EFAULT;
6933 			goto dun;
6934 		}
6935 		break;
6936 
6937 	/*
6938 	 * handle device control ioctls
6939 	 */
6940 	case DEVCTL_DEVICE_RESET:
6941 		if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6942 			retval = EFAULT;
6943 			goto dun;
6944 		}
6945 		if ((ndi_dc_getname(dcp) == NULL) ||
6946 		    (ndi_dc_getaddr(dcp) == NULL)) {
6947 			ndi_dc_freehdl(dcp);
6948 			retval = EINVAL;
6949 			goto dun;
6950 		}
6951 		cdip = ndi_devi_find(sf->sf_dip,
6952 		    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6953 		ndi_dc_freehdl(dcp);
6954 
6955 		if (cdip == NULL) {
6956 			retval = ENXIO;
6957 			goto dun;
6958 		}
6959 
6960 		if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6961 			retval = ENXIO;
6962 			goto dun;
6963 		}
6964 		mutex_enter(&target->sft_mutex);
6965 		if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6966 			mutex_exit(&target->sft_mutex);
6967 			retval = ENXIO;
6968 			goto dun;
6969 		}
6970 		tran = *target->sft_tran;
6971 		mutex_exit(&target->sft_mutex);
6972 		ap.a_hba_tran = &tran;
6973 		ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6974 		if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6975 			retval = EIO;
6976 			goto dun;
6977 		}
6978 		break;
6979 
6980 	case DEVCTL_BUS_QUIESCE:
6981 	case DEVCTL_BUS_UNQUIESCE:
6982 		retval = ENOTSUP;
6983 		goto dun;
6984 
6985 	case DEVCTL_BUS_RESET:
6986 	case DEVCTL_BUS_RESETALL:
6987 		sf_force_lip(sf);
6988 		break;
6989 
6990 	default:
6991 		retval = ENOTTY;
6992 		goto dun;
6993 	}
6994 
6995 	retval = 0;				/* success */
6996 
6997 dun:
6998 	return (retval);
6999 }
7000 
7001 
7002 /*
7003  * get the target given a DIP
7004  */
7005 static struct sf_target *
7006 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7007 {
7008 	int i;
7009 	struct sf_target *target;
7010 
7011 
7012 	/* scan each hash queue for the DIP in question */
7013 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7014 		target = sf->sf_wwn_lists[i];
7015 		while (target != NULL) {
7016 			if (target->sft_dip == dip) {
7017 				return (target); /* success: target found */
7018 			}
7019 			target = target->sft_next;
7020 		}
7021 	}
7022 	return (NULL);				/* failure: target not found */
7023 }
7024 
7025 
7026 /*
7027  * called by the transport to get an event cookie
7028  */
7029 static int
7030 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7031     ddi_eventcookie_t *event_cookiep)
7032 {
7033 	struct sf *sf;
7034 
7035 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7036 	if (sf == NULL) {
7037 		/* can't find instance for this device */
7038 		return (DDI_FAILURE);
7039 	}
7040 
7041 	return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7042 		    event_cookiep, NDI_EVENT_NOPASS));
7043 
7044 }
7045 
7046 
7047 /*
7048  * called by the transport to add an event callback
7049  */
7050 static int
7051 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7052     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7053     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7054     ddi_callback_id_t *cb_id)
7055 {
7056 	struct sf *sf;
7057 
7058 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7059 	if (sf == NULL) {
7060 		/* can't find instance for this device */
7061 		return (DDI_FAILURE);
7062 	}
7063 
7064 	return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7065 		eventid, callback, arg, NDI_SLEEP, cb_id));
7066 
7067 }
7068 
7069 
7070 /*
7071  * called by the transport to remove an event callback
7072  */
7073 static int
7074 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7075 {
7076 	struct sf *sf;
7077 
7078 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7079 	if (sf == NULL) {
7080 		/* can't find instance for this device */
7081 		return (DDI_FAILURE);
7082 	}
7083 
7084 	return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7085 }
7086 
7087 
7088 /*
7089  * called by the transport to post an event
7090  */
7091 static int
7092 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7093     ddi_eventcookie_t eventid, void *impldata)
7094 {
7095 	ddi_eventcookie_t remove_cookie, cookie;
7096 
7097 	/* is this a remove event ?? */
7098 	struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7099 	remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7100 	    SF_EVENT_TAG_REMOVE);
7101 
7102 	if (remove_cookie == eventid) {
7103 		struct sf_target *target;
7104 
7105 		/* handle remove event */
7106 
7107 		if (sf == NULL) {
7108 			/* no sf instance for this device */
7109 			return (NDI_FAILURE);
7110 		}
7111 
7112 		/* get the target for this event */
7113 		if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7114 			/*
7115 			 * clear device info for this target and mark as
7116 			 * not done
7117 			 */
7118 			mutex_enter(&target->sft_mutex);
7119 			target->sft_dip = NULL;
7120 			target->sft_state &= ~SF_TARGET_INIT_DONE;
7121 			mutex_exit(&target->sft_mutex);
7122 			return (NDI_SUCCESS); /* event handled */
7123 		}
7124 
7125 		/* no target for this event */
7126 		return (NDI_FAILURE);
7127 	}
7128 
7129 	/* an insertion event */
7130 	if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7131 	    != NDI_SUCCESS) {
7132 		return (NDI_FAILURE);
7133 	}
7134 
7135 	return (ndi_post_event(dip, rdip, cookie, impldata));
7136 }
7137 
7138 
7139 /*
7140  * the sf hotplug daemon, one thread per sf instance
7141  */
7142 static void
7143 sf_hp_daemon(void *arg)
7144 {
7145 	struct sf *sf = (struct sf *)arg;
7146 	struct sf_hp_elem *elem;
7147 	struct sf_target *target;
7148 	int tgt_id;
7149 	callb_cpr_t cprinfo;
7150 
7151 	CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7152 		callb_generic_cpr, "sf_hp_daemon");
7153 
7154 	mutex_enter(&sf->sf_hp_daemon_mutex);
7155 
7156 	do {
7157 		while (sf->sf_hp_elem_head != NULL) {
7158 
7159 			/* save ptr to head of list */
7160 			elem = sf->sf_hp_elem_head;
7161 
7162 			/* take element off of list */
7163 			if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7164 				/* element only one in list -- list now empty */
7165 				sf->sf_hp_elem_head = NULL;
7166 				sf->sf_hp_elem_tail = NULL;
7167 			} else {
7168 				/* remove element from head of list */
7169 				sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7170 			}
7171 
7172 			mutex_exit(&sf->sf_hp_daemon_mutex);
7173 
7174 			switch (elem->what) {
7175 			case SF_ONLINE:
7176 				/* online this target */
7177 				target = elem->target;
7178 				(void) ndi_devi_online(elem->dip, 0);
7179 				(void) ndi_event_retrieve_cookie(
7180 				    sf->sf_event_hdl,
7181 				    target->sft_dip, FCAL_INSERT_EVENT,
7182 				    &sf_insert_eid, NDI_EVENT_NOPASS);
7183 				(void) ndi_event_run_callbacks(sf->sf_event_hdl,
7184 				    target->sft_dip, sf_insert_eid, NULL);
7185 				break;
7186 			case SF_OFFLINE:
7187 				/* offline this target */
7188 				target = elem->target;
7189 				tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7190 				/* don't do NDI_DEVI_REMOVE for now */
7191 				if (ndi_devi_offline(elem->dip, 0) !=
7192 				    NDI_SUCCESS) {
7193 					SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7194 					    "device offline failed", tgt_id));
7195 				} else {
7196 					SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7197 					    "device offline succeeded\n",
7198 					    tgt_id));
7199 				}
7200 				break;
7201 			}
7202 			kmem_free(elem, sizeof (struct sf_hp_elem));
7203 			mutex_enter(&sf->sf_hp_daemon_mutex);
7204 		}
7205 
7206 		/* if exit is not already signaled */
7207 		if (sf->sf_hp_exit == 0) {
7208 			/* wait to be signaled by work or exit */
7209 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
7210 			cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7211 			CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7212 		}
7213 	} while (sf->sf_hp_exit == 0);
7214 
7215 	/* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7216 	CALLB_CPR_EXIT(&cprinfo);
7217 	thread_exit();			/* no more hotplug thread */
7218 	/* NOTREACHED */
7219 }
7220