xref: /titanic_44/usr/src/uts/sun/io/scsi/adapters/sf.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * sf - Solaris Fibre Channel driver
28  *
29  * This module implements some of the Fibre Channel FC-4 layer, converting
30  * from FC frames to SCSI and back.  (Note: no sequence management is done
31  * here, though.)
32  */
33 
34 #if defined(lint) && !defined(DEBUG)
35 #define	DEBUG	1
36 #endif
37 
38 /*
39  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
40  * Need to use the ugly RAID LUN mappings in FCP Annex D
41  * to prevent SCSA from barfing.  This *REALLY* needs to
42  * be addressed by the standards committee.
43  */
44 #define	RAID_LUNS	1
45 
46 #ifdef DEBUG
47 static int sfdebug = 0;
48 #include <sys/debug.h>
49 
50 #define	SF_DEBUG(level, args) \
51 	if (sfdebug >= (level)) sf_log args
52 #else
53 #define	SF_DEBUG(level, args)
54 #endif
55 
56 static int sf_bus_config_debug = 0;
57 
58 /* Why do I have to do this? */
59 #define	offsetof(s, m)  (size_t)(&(((s *)0)->m))
60 
61 #include <sys/scsi/scsi.h>
62 #include <sys/fc4/fcal.h>
63 #include <sys/fc4/fcp.h>
64 #include <sys/fc4/fcal_linkapp.h>
65 #include <sys/socal_cq_defs.h>
66 #include <sys/fc4/fcal_transport.h>
67 #include <sys/fc4/fcio.h>
68 #include <sys/scsi/adapters/sfvar.h>
69 #include <sys/scsi/impl/scsi_reset_notify.h>
70 #include <sys/stat.h>
71 #include <sys/varargs.h>
72 #include <sys/var.h>
73 #include <sys/thread.h>
74 #include <sys/proc.h>
75 #include <sys/kstat.h>
76 #include <sys/devctl.h>
77 #include <sys/scsi/targets/ses.h>
78 #include <sys/callb.h>
79 
80 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
81 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
82 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
83 static void sf_softstate_unlink(struct sf *);
84 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
85     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
86 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
87     ddi_bus_config_op_t op, void *arg);
88 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
89     scsi_hba_tran_t *, struct scsi_device *);
90 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
91     scsi_hba_tran_t *, struct scsi_device *);
92 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
93     int, int, int);
94 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
95 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
96     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
97 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
98 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
99 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
100 static int sf_scsi_reset_notify(struct scsi_address *, int,
101     void (*)(caddr_t), caddr_t);
102 static int sf_scsi_get_name(struct scsi_device *, char *, int);
103 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
104 static int sf_add_cr_pool(struct sf *);
105 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
106 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
107 static void sf_crpool_free(struct sf *);
108 static int sf_kmem_cache_constructor(void *, void *, int);
109 static void sf_kmem_cache_destructor(void *, void *);
110 static void sf_statec_callback(void *, int);
111 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
112 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
113 static void sf_els_callback(struct fcal_packet *);
114 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
115 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
116 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
117     struct sf_target *);
118 static void sf_reportlun_callback(struct fcal_packet *);
119 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
120     struct sf_target *);
121 static void sf_inq_callback(struct fcal_packet *);
122 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
123     int, caddr_t *, caddr_t *);
124 static void sf_els_free(struct fcal_packet *);
125 static struct sf_target *sf_create_target(struct sf *,
126     struct sf_els_hdr *, int, int64_t);
127 #ifdef RAID_LUNS
128 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
129 #else
130 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
131 #endif
132 static void sf_finish_init(struct sf *, int);
133 static void sf_offline_target(struct sf *, struct sf_target *);
134 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
135 static int sf_create_props(dev_info_t *, struct sf_target *, int);
136 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
137 static int sf_getcap(struct scsi_address *, char *, int);
138 static int sf_setcap(struct scsi_address *, char *, int, int);
139 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
140 static int sf_reset(struct scsi_address *, int);
141 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
142 static int sf_start(struct scsi_address *, struct scsi_pkt *);
143 static int sf_start_internal(struct sf *, struct sf_pkt *);
144 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
145 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
146 static int sf_dopoll(struct sf *, struct sf_pkt *);
147 static void sf_cmd_callback(struct fcal_packet *);
148 static void sf_throttle(struct sf *);
149 static void sf_watch(void *);
150 static void sf_throttle_start(struct sf *);
151 static void sf_check_targets(struct sf *);
152 static void sf_check_reset_delay(void *);
153 static int sf_target_timeout(struct sf *, struct sf_pkt *);
154 static void sf_force_lip(struct sf *);
155 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
156 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
157 /*PRINTFLIKE3*/
158 static void sf_log(struct sf *, int, const char *, ...);
159 static int sf_kstat_update(kstat_t *, int);
160 static int sf_open(dev_t *, int, int, cred_t *);
161 static int sf_close(dev_t, int, int, cred_t *);
162 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
163 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
164 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
165     ddi_eventcookie_t *);
166 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
167     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
168 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
169 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
170     ddi_eventcookie_t, void *);
171 
172 static void sf_hp_daemon(void *);
173 
174 /*
175  * this is required to be able to supply a control node
176  * where ioctls can be executed
177  */
178 struct cb_ops sf_cb_ops = {
179 	sf_open,			/* open */
180 	sf_close,			/* close */
181 	nodev,				/* strategy */
182 	nodev,				/* print */
183 	nodev,				/* dump */
184 	nodev,				/* read */
185 	nodev,				/* write */
186 	sf_ioctl,			/* ioctl */
187 	nodev,				/* devmap */
188 	nodev,				/* mmap */
189 	nodev,				/* segmap */
190 	nochpoll,			/* poll */
191 	ddi_prop_op,			/* cb_prop_op */
192 	0,				/* streamtab  */
193 	D_MP | D_NEW | D_HOTPLUG	/* driver flags */
194 
195 };
196 
197 /*
198  * autoconfiguration routines.
199  */
200 static struct dev_ops sf_ops = {
201 	DEVO_REV,		/* devo_rev, */
202 	0,			/* refcnt  */
203 	sf_info,		/* info */
204 	nulldev,		/* identify */
205 	nulldev,		/* probe */
206 	sf_attach,		/* attach */
207 	sf_detach,		/* detach */
208 	nodev,			/* reset */
209 	&sf_cb_ops,		/* driver operations */
210 	NULL,			/* bus operations */
211 	NULL,			/* power management */
212 	ddi_quiesce_not_supported,	/* devo_quiesce */
213 };
214 
215 /* to ensure this module gets loaded in memory when we do */
216 char _depends_on[] = "misc/scsi";
217 
218 #define	SF_NAME	"FC-AL FCP Nexus Driver"	/* Name of the module. */
219 static	char	sf_version[] = "1.72 08/19/2008"; /* version of the module */
220 
221 static struct modldrv modldrv = {
222 	&mod_driverops, /* Type of module. This one is a driver */
223 	SF_NAME,
224 	&sf_ops,	/* driver ops */
225 };
226 
227 static struct modlinkage modlinkage = {
228 	MODREV_1, (void *)&modldrv, NULL
229 };
230 
231 /* XXXXXX The following is here to handle broken targets -- remove it later */
232 static int sf_reportlun_forever = 0;
233 /* XXXXXX */
234 static int sf_lip_on_plogo = 0;
235 static int sf_els_retries = SF_ELS_RETRIES;
236 static struct sf *sf_head = NULL;
237 static int sf_target_scan_cnt = 4;
238 static int sf_pkt_scan_cnt = 5;
239 static int sf_pool_scan_cnt = 1800;
240 static void *sf_state = NULL;
241 static int sf_watchdog_init = 0;
242 static int sf_watchdog_time = 0;
243 static int sf_watchdog_timeout = 1;
244 static int sf_watchdog_tick;
245 static int sf_watch_running = 0;
246 static timeout_id_t sf_watchdog_id;
247 static timeout_id_t sf_reset_timeout_id;
248 static int sf_max_targets = SF_MAX_TARGETS;
249 static kmutex_t sf_global_mutex;
250 static int sf_core = 0;
251 int *sf_token = NULL; /* Must not be static or lint complains. */
252 static kcondvar_t sf_watch_cv;
253 extern pri_t minclsyspri;
254 static ddi_eventcookie_t	sf_insert_eid;
255 static ddi_eventcookie_t	sf_remove_eid;
256 
257 static ndi_event_definition_t	sf_event_defs[] = {
258 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
259 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
260 };
261 
262 #define	SF_N_NDI_EVENTS	\
263 	(sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
264 
265 #ifdef DEBUG
266 static int sf_lip_flag = 1;		/* bool: to allow LIPs */
267 static int sf_reset_flag = 1;		/* bool: to allow reset after LIP */
268 static int sf_abort_flag = 0;		/* bool: to do just one abort */
269 #endif
270 
271 extern int64_t ddi_get_lbolt64(void);
272 
273 /*
274  * for converting between target number (switch) and hard address/AL_PA
275  */
276 static uchar_t sf_switch_to_alpa[] = {
277 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
278 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
279 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
280 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
281 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
282 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
283 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
284 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
285 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
286 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
287 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
288 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
289 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
290 };
291 
292 static uchar_t sf_alpa_to_switch[] = {
293 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
294 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
295 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
296 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
297 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
298 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
299 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
300 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
301 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
302 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
303 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
304 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
305 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
306 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
307 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
308 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
309 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
310 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
311 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
312 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
313 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
314 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
315 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
316 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317 };
318 
319 /*
320  * these macros call the proper transport-layer function given
321  * a particular transport
322  */
323 #define	soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
324 #define	soc_transport_poll(a, b, c, d)\
325 	(*a->fcal_ops->fcal_transport_poll)(b, c, d)
326 #define	soc_get_lilp_map(a, b, c, d, e)\
327 	(*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
328 #define	soc_force_lip(a, b, c, d, e)\
329 	(*a->fcal_ops->fcal_force_lip)(b, c, d, e)
330 #define	soc_abort(a, b, c, d, e)\
331 	(*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
332 #define	soc_force_reset(a, b, c, d)\
333 	(*a->fcal_ops->fcal_force_reset)(b, c, d)
334 #define	soc_add_ulp(a, b, c, d, e, f, g, h)\
335 	(*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
336 #define	soc_remove_ulp(a, b, c, d, e)\
337 	(*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
338 #define	soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
339 
340 
341 /* power management property defines (should be in a common include file?) */
342 #define	PM_HARDWARE_STATE_PROP		"pm-hardware-state"
343 #define	PM_NEEDS_SUSPEND_RESUME		"needs-suspend-resume"
344 
345 
346 /* node properties */
347 #define	NODE_WWN_PROP			"node-wwn"
348 #define	PORT_WWN_PROP			"port-wwn"
349 #define	LIP_CNT_PROP			"lip-count"
350 #define	TARGET_PROP			"target"
351 #define	LUN_PROP			"lun"
352 
353 
354 /*
355  * initialize this driver and install this module
356  */
357 int
358 _init(void)
359 {
360 	int	i;
361 
362 	i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
363 	    SF_INIT_ITEMS);
364 	if (i != 0)
365 		return (i);
366 
367 	if ((i = scsi_hba_init(&modlinkage)) != 0) {
368 		ddi_soft_state_fini(&sf_state);
369 		return (i);
370 	}
371 
372 	mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
373 	sf_watch_running = 0;
374 	cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
375 
376 	if ((i = mod_install(&modlinkage)) != 0) {
377 		mutex_destroy(&sf_global_mutex);
378 		cv_destroy(&sf_watch_cv);
379 		scsi_hba_fini(&modlinkage);
380 		ddi_soft_state_fini(&sf_state);
381 		return (i);
382 	}
383 
384 	return (i);
385 }
386 
387 
388 /*
389  * remove this driver module from the system
390  */
391 int
392 _fini(void)
393 {
394 	int	i;
395 
396 	if ((i = mod_remove(&modlinkage)) == 0) {
397 		scsi_hba_fini(&modlinkage);
398 		mutex_destroy(&sf_global_mutex);
399 		cv_destroy(&sf_watch_cv);
400 		ddi_soft_state_fini(&sf_state);
401 	}
402 	return (i);
403 }
404 
405 
406 int
407 _info(struct modinfo *modinfop)
408 {
409 	return (mod_info(&modlinkage, modinfop));
410 }
411 
412 /*
413  * Given the device number return the devinfo pointer or instance
414  */
415 /*ARGSUSED*/
416 static int
417 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
418 {
419 	int		instance = SF_MINOR2INST(getminor((dev_t)arg));
420 	struct sf	*sf;
421 
422 	switch (infocmd) {
423 	case DDI_INFO_DEVT2DEVINFO:
424 		sf = ddi_get_soft_state(sf_state, instance);
425 		if (sf != NULL)
426 			*result = sf->sf_dip;
427 		else {
428 			*result = NULL;
429 			return (DDI_FAILURE);
430 		}
431 		break;
432 
433 	case DDI_INFO_DEVT2INSTANCE:
434 		*result = (void *)(uintptr_t)instance;
435 		break;
436 	default:
437 		return (DDI_FAILURE);
438 	}
439 	return (DDI_SUCCESS);
440 }
441 
442 /*
443  * either attach or resume this driver
444  */
445 static int
446 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
447 {
448 	int instance;
449 	int mutex_initted = FALSE;
450 	uint_t ccount;
451 	size_t i, real_size;
452 	struct fcal_transport *handle;
453 	char buf[64];
454 	struct sf *sf, *tsf;
455 	scsi_hba_tran_t *tran = NULL;
456 	int	handle_bound = FALSE;
457 	kthread_t *tp;
458 
459 
460 	switch ((int)cmd) {
461 
462 	case DDI_RESUME:
463 
464 		/*
465 		 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
466 		 * so time to undo that and get going again by forcing a
467 		 * lip
468 		 */
469 
470 		instance = ddi_get_instance(dip);
471 
472 		sf = ddi_get_soft_state(sf_state, instance);
473 		SF_DEBUG(2, (sf, CE_CONT,
474 		    "sf_attach: DDI_RESUME for sf%d\n", instance));
475 		if (sf == NULL) {
476 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
477 			return (DDI_FAILURE);
478 		}
479 
480 		/*
481 		 * clear suspended flag so that normal operations can resume
482 		 */
483 		mutex_enter(&sf->sf_mutex);
484 		sf->sf_state &= ~SF_STATE_SUSPENDED;
485 		mutex_exit(&sf->sf_mutex);
486 
487 		/*
488 		 * force a login by setting our state to offline
489 		 */
490 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
491 		sf->sf_state = SF_STATE_OFFLINE;
492 
493 		/*
494 		 * call transport routine to register state change and
495 		 * ELS callback routines (to register us as a ULP)
496 		 */
497 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
498 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
499 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
500 
501 		/*
502 		 * call transport routine to force loop initialization
503 		 */
504 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
505 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
506 
507 		/*
508 		 * increment watchdog init flag, setting watchdog timeout
509 		 * if we are the first (since somebody has to do it)
510 		 */
511 		mutex_enter(&sf_global_mutex);
512 		if (!sf_watchdog_init++) {
513 			mutex_exit(&sf_global_mutex);
514 			sf_watchdog_id = timeout(sf_watch,
515 			    (caddr_t)0, sf_watchdog_tick);
516 		} else {
517 			mutex_exit(&sf_global_mutex);
518 		}
519 
520 		return (DDI_SUCCESS);
521 
522 	case DDI_ATTACH:
523 
524 		/*
525 		 * this instance attaching for the first time
526 		 */
527 
528 		instance = ddi_get_instance(dip);
529 
530 		if (ddi_soft_state_zalloc(sf_state, instance) !=
531 		    DDI_SUCCESS) {
532 			cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
533 			    instance);
534 			return (DDI_FAILURE);
535 		}
536 
537 		sf = ddi_get_soft_state(sf_state, instance);
538 		SF_DEBUG(4, (sf, CE_CONT,
539 		    "sf_attach: DDI_ATTACH for sf%d\n", instance));
540 		if (sf == NULL) {
541 			/* this shouldn't happen since we just allocated it */
542 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
543 			return (DDI_FAILURE);
544 		}
545 
546 		/*
547 		 * from this point on, if there's an error, we must de-allocate
548 		 * soft state before returning DDI_FAILURE
549 		 */
550 
551 		if ((handle = ddi_get_parent_data(dip)) == NULL) {
552 			cmn_err(CE_WARN,
553 			    "sf%d: failed to obtain transport handle",
554 			    instance);
555 			goto fail;
556 		}
557 
558 		/* fill in our soft state structure */
559 		sf->sf_dip = dip;
560 		sf->sf_state = SF_STATE_INIT;
561 		sf->sf_throttle = handle->fcal_cmdmax;
562 		sf->sf_sochandle = handle;
563 		sf->sf_socp = handle->fcal_handle;
564 		sf->sf_check_n_close = 0;
565 
566 		/* create a command/response buffer pool for this instance */
567 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
568 			cmn_err(CE_WARN,
569 			    "sf%d: failed to allocate command/response pool",
570 			    instance);
571 			goto fail;
572 		}
573 
574 		/* create a a cache for this instance */
575 		(void) sprintf(buf, "sf%d_cache", instance);
576 		sf->sf_pkt_cache = kmem_cache_create(buf,
577 		    sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
578 		    scsi_pkt_size(), 8,
579 		    sf_kmem_cache_constructor, sf_kmem_cache_destructor,
580 		    NULL, NULL, NULL, 0);
581 		if (sf->sf_pkt_cache == NULL) {
582 			cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
583 			    instance);
584 			goto fail;
585 		}
586 
587 		/* set up a handle and allocate memory for DMA */
588 		if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
589 		    fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
590 		    sf_lilp_dmahandle) != DDI_SUCCESS) {
591 			cmn_err(CE_WARN,
592 			    "sf%d: failed to allocate dma handle for lilp map",
593 			    instance);
594 			goto fail;
595 		}
596 		i = sizeof (struct fcal_lilp_map) + 1;
597 		if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
598 		    i, sf->sf_sochandle->
599 		    fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
600 		    (caddr_t *)&sf->sf_lilp_map, &real_size,
601 		    &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
602 			cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
603 			    instance);
604 			goto fail;
605 		}
606 		if (real_size < i) {
607 			/* no error message ??? */
608 			goto fail;		/* trouble allocating memory */
609 		}
610 
611 		/*
612 		 * set up the address for the DMA transfers (getting a cookie)
613 		 */
614 		if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
615 		    (caddr_t)sf->sf_lilp_map, real_size,
616 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
617 		    &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
618 			cmn_err(CE_WARN,
619 			    "sf%d: failed to bind dma handle for lilp map",
620 			    instance);
621 			goto fail;
622 		}
623 		handle_bound = TRUE;
624 		/* ensure only one cookie was allocated */
625 		if (ccount != 1) {
626 			goto fail;
627 		}
628 
629 		/* ensure LILP map and DMA cookie addresses are even?? */
630 		sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
631 		    sf_lilp_map + 1) & ~1);
632 		sf->sf_lilp_dmacookie.dmac_address = (sf->
633 		    sf_lilp_dmacookie.dmac_address + 1) & ~1;
634 
635 		/* set up all of our mutexes and condition variables */
636 		mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
637 		mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
638 		mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
639 		mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
640 		cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
641 		cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
642 
643 		mutex_initted = TRUE;
644 
645 		/* create our devctl minor node */
646 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
647 		    SF_INST2DEVCTL_MINOR(instance),
648 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
649 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
650 			    " for devctl", instance);
651 			goto fail;
652 		}
653 
654 		/* create fc minor node */
655 		if (ddi_create_minor_node(dip, "fc", S_IFCHR,
656 		    SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
657 		    0) != DDI_SUCCESS) {
658 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
659 			    " for fc", instance);
660 			goto fail;
661 		}
662 		/* allocate a SCSI transport structure */
663 		tran = scsi_hba_tran_alloc(dip, 0);
664 		if (tran == NULL) {
665 			/* remove all minor nodes created */
666 			ddi_remove_minor_node(dip, NULL);
667 			cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
668 			    instance);
669 			goto fail;
670 		}
671 
672 		/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
673 		scsi_size_clean(dip);		/* SCSI_SIZE_CLEAN_VERIFY ok */
674 
675 		/* save ptr to new transport structure and fill it in */
676 		sf->sf_tran = tran;
677 
678 		tran->tran_hba_private		= sf;
679 		tran->tran_tgt_private		= NULL;
680 		tran->tran_tgt_init		= sf_scsi_tgt_init;
681 		tran->tran_tgt_probe		= NULL;
682 		tran->tran_tgt_free		= sf_scsi_tgt_free;
683 
684 		tran->tran_start		= sf_start;
685 		tran->tran_abort		= sf_abort;
686 		tran->tran_reset		= sf_reset;
687 		tran->tran_getcap		= sf_getcap;
688 		tran->tran_setcap		= sf_setcap;
689 		tran->tran_init_pkt		= sf_scsi_init_pkt;
690 		tran->tran_destroy_pkt		= sf_scsi_destroy_pkt;
691 		tran->tran_dmafree		= sf_scsi_dmafree;
692 		tran->tran_sync_pkt		= sf_scsi_sync_pkt;
693 		tran->tran_reset_notify		= sf_scsi_reset_notify;
694 
695 		/*
696 		 * register event notification routines with scsa
697 		 */
698 		tran->tran_get_eventcookie	= sf_bus_get_eventcookie;
699 		tran->tran_add_eventcall	= sf_bus_add_eventcall;
700 		tran->tran_remove_eventcall	= sf_bus_remove_eventcall;
701 		tran->tran_post_event		= sf_bus_post_event;
702 
703 		/*
704 		 * register bus configure/unconfigure
705 		 */
706 		tran->tran_bus_config		= sf_scsi_bus_config;
707 		tran->tran_bus_unconfig		= sf_scsi_bus_unconfig;
708 
709 		/*
710 		 * allocate an ndi event handle
711 		 */
712 		sf->sf_event_defs = (ndi_event_definition_t *)
713 		    kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
714 
715 		bcopy(sf_event_defs, sf->sf_event_defs,
716 		    sizeof (sf_event_defs));
717 
718 		(void) ndi_event_alloc_hdl(dip, NULL,
719 		    &sf->sf_event_hdl, NDI_SLEEP);
720 
721 		sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
722 		sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
723 		sf->sf_events.ndi_event_defs = sf->sf_event_defs;
724 
725 		if (ndi_event_bind_set(sf->sf_event_hdl,
726 		    &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
727 			goto fail;
728 		}
729 
730 		tran->tran_get_name		= sf_scsi_get_name;
731 		tran->tran_get_bus_addr		= sf_scsi_get_bus_addr;
732 
733 		/* setup and attach SCSI hba transport */
734 		if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
735 		    fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
736 			cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
737 			    instance);
738 			goto fail;
739 		}
740 
741 		/* set up kstats */
742 		if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
743 		    "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
744 		    KSTAT_FLAG_VIRTUAL)) == NULL) {
745 			cmn_err(CE_WARN, "sf%d: failed to create kstat",
746 			    instance);
747 		} else {
748 			sf->sf_stats.version = 2;
749 			(void) sprintf(sf->sf_stats.drvr_name,
750 			"%s: %s", SF_NAME, sf_version);
751 			sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
752 			sf->sf_ksp->ks_private = sf;
753 			sf->sf_ksp->ks_update = sf_kstat_update;
754 			kstat_install(sf->sf_ksp);
755 		}
756 
757 		/* create the hotplug thread */
758 		mutex_enter(&sf->sf_hp_daemon_mutex);
759 		tp = thread_create(NULL, 0,
760 		    (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
761 		sf->sf_hp_tid = tp->t_did;
762 		mutex_exit(&sf->sf_hp_daemon_mutex);
763 
764 		/* add this soft state instance to the head of the list */
765 		mutex_enter(&sf_global_mutex);
766 		sf->sf_next = sf_head;
767 		tsf = sf_head;
768 		sf_head = sf;
769 
770 		/*
771 		 * find entry in list that has the same FC-AL handle (if any)
772 		 */
773 		while (tsf != NULL) {
774 			if (tsf->sf_socp == sf->sf_socp) {
775 				break;		/* found matching entry */
776 			}
777 			tsf = tsf->sf_next;
778 		}
779 
780 		if (tsf != NULL) {
781 			/* if we found a matching entry keep track of it */
782 			sf->sf_sibling = tsf;
783 		}
784 
785 		/*
786 		 * increment watchdog init flag, setting watchdog timeout
787 		 * if we are the first (since somebody has to do it)
788 		 */
789 		if (!sf_watchdog_init++) {
790 			mutex_exit(&sf_global_mutex);
791 			sf_watchdog_tick = sf_watchdog_timeout *
792 			    drv_usectohz(1000000);
793 			sf_watchdog_id = timeout(sf_watch,
794 			    NULL, sf_watchdog_tick);
795 		} else {
796 			mutex_exit(&sf_global_mutex);
797 		}
798 
799 		if (tsf != NULL) {
800 			/*
801 			 * set up matching entry to be our sibling
802 			 */
803 			mutex_enter(&tsf->sf_mutex);
804 			tsf->sf_sibling = sf;
805 			mutex_exit(&tsf->sf_mutex);
806 		}
807 
808 		/*
809 		 * create this property so that PM code knows we want
810 		 * to be suspended at PM time
811 		 */
812 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
813 		    PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
814 
815 		/* log the fact that we have a new device */
816 		ddi_report_dev(dip);
817 
818 		/*
819 		 * force a login by setting our state to offline
820 		 */
821 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
822 		sf->sf_state = SF_STATE_OFFLINE;
823 
824 		/*
825 		 * call transport routine to register state change and
826 		 * ELS callback routines (to register us as a ULP)
827 		 */
828 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
829 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
830 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
831 
832 		/*
833 		 * call transport routine to force loop initialization
834 		 */
835 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
836 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
837 		sf->sf_reset_time = ddi_get_lbolt64();
838 		return (DDI_SUCCESS);
839 
840 	default:
841 		return (DDI_FAILURE);
842 	}
843 
844 fail:
845 	cmn_err(CE_WARN, "sf%d: failed to attach", instance);
846 
847 	/*
848 	 * Unbind and free event set
849 	 */
850 	if (sf->sf_event_hdl) {
851 		(void) ndi_event_unbind_set(sf->sf_event_hdl,
852 		    &sf->sf_events, NDI_SLEEP);
853 		(void) ndi_event_free_hdl(sf->sf_event_hdl);
854 	}
855 
856 	if (sf->sf_event_defs) {
857 		kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
858 	}
859 
860 	if (sf->sf_tran != NULL) {
861 		scsi_hba_tran_free(sf->sf_tran);
862 	}
863 	while (sf->sf_cr_pool != NULL) {
864 		sf_crpool_free(sf);
865 	}
866 	if (sf->sf_lilp_dmahandle != NULL) {
867 		if (handle_bound) {
868 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
869 		}
870 		ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
871 	}
872 	if (sf->sf_pkt_cache != NULL) {
873 		kmem_cache_destroy(sf->sf_pkt_cache);
874 	}
875 	if (sf->sf_lilp_map != NULL) {
876 		ddi_dma_mem_free(&sf->sf_lilp_acchandle);
877 	}
878 	if (sf->sf_ksp != NULL) {
879 		kstat_delete(sf->sf_ksp);
880 	}
881 	if (mutex_initted) {
882 		mutex_destroy(&sf->sf_mutex);
883 		mutex_destroy(&sf->sf_cmd_mutex);
884 		mutex_destroy(&sf->sf_cr_mutex);
885 		mutex_destroy(&sf->sf_hp_daemon_mutex);
886 		cv_destroy(&sf->sf_cr_cv);
887 		cv_destroy(&sf->sf_hp_daemon_cv);
888 	}
889 	mutex_enter(&sf_global_mutex);
890 
891 	/*
892 	 * kill off the watchdog if we are the last instance
893 	 */
894 	if (!--sf_watchdog_init) {
895 		timeout_id_t tid = sf_watchdog_id;
896 		mutex_exit(&sf_global_mutex);
897 		(void) untimeout(tid);
898 	} else {
899 		mutex_exit(&sf_global_mutex);
900 	}
901 
902 	ddi_soft_state_free(sf_state, instance);
903 
904 	if (tran != NULL) {
905 		/* remove all minor nodes */
906 		ddi_remove_minor_node(dip, NULL);
907 	}
908 
909 	return (DDI_FAILURE);
910 }
911 
912 
913 /* ARGSUSED */
914 static int
915 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
916 {
917 	struct sf		*sf;
918 	int			instance;
919 	int			i;
920 	struct sf_target	*target;
921 	timeout_id_t		tid;
922 
923 
924 
925 	/* NO OTHER THREADS ARE RUNNING */
926 
927 	instance = ddi_get_instance(dip);
928 
929 	if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
930 		cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
931 		return (DDI_FAILURE);
932 	}
933 
934 	switch (cmd) {
935 
936 	case DDI_SUSPEND:
937 		/*
938 		 * suspend our instance
939 		 */
940 
941 		SF_DEBUG(2, (sf, CE_CONT,
942 		    "sf_detach: DDI_SUSPEND for sf%d\n", instance));
943 		/*
944 		 * There is a race condition in socal where while doing
945 		 * callbacks if a ULP removes it self from the callback list
946 		 * the for loop in socal may panic as cblist is junk and
947 		 * while trying to get cblist->next the system will panic.
948 		 */
949 
950 		/* call transport to remove our unregister our callbacks */
951 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
952 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
953 
954 		/*
955 		 * begin process of clearing outstanding commands
956 		 * by issuing a lip
957 		 */
958 		sf_force_lip(sf);
959 
960 		/*
961 		 * toggle the device OFFLINE in order to cause
962 		 * outstanding commands to drain
963 		 */
964 		mutex_enter(&sf->sf_mutex);
965 		sf->sf_lip_cnt++;
966 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
967 		sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
968 		for (i = 0; i < sf_max_targets; i++) {
969 			target = sf->sf_targets[i];
970 			if (target != NULL) {
971 				struct sf_target *ntarget;
972 
973 				mutex_enter(&target->sft_mutex);
974 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
975 					target->sft_state |=
976 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
977 				}
978 				/* do this for all LUNs as well */
979 				for (ntarget = target->sft_next_lun;
980 				    ntarget;
981 				    ntarget = ntarget->sft_next_lun) {
982 					mutex_enter(&ntarget->sft_mutex);
983 					if (!(ntarget->sft_state &
984 					    SF_TARGET_OFFLINE)) {
985 						ntarget->sft_state |=
986 						    (SF_TARGET_BUSY |
987 						    SF_TARGET_MARK);
988 					}
989 					mutex_exit(&ntarget->sft_mutex);
990 				}
991 				mutex_exit(&target->sft_mutex);
992 			}
993 		}
994 		mutex_exit(&sf->sf_mutex);
995 		mutex_enter(&sf_global_mutex);
996 
997 		/*
998 		 * kill off the watchdog if we are the last instance
999 		 */
1000 		if (!--sf_watchdog_init) {
1001 			tid = sf_watchdog_id;
1002 			mutex_exit(&sf_global_mutex);
1003 			(void) untimeout(tid);
1004 		} else {
1005 			mutex_exit(&sf_global_mutex);
1006 		}
1007 
1008 		return (DDI_SUCCESS);
1009 
1010 	case DDI_DETACH:
1011 		/*
1012 		 * detach this instance
1013 		 */
1014 
1015 		SF_DEBUG(2, (sf, CE_CONT,
1016 		    "sf_detach: DDI_DETACH for sf%d\n", instance));
1017 
1018 		/* remove this "sf" from the list of sf softstates */
1019 		sf_softstate_unlink(sf);
1020 
1021 		/*
1022 		 * prior to taking any DDI_DETACH actions, toggle the
1023 		 * device OFFLINE in order to cause outstanding
1024 		 * commands to drain
1025 		 */
1026 		mutex_enter(&sf->sf_mutex);
1027 		sf->sf_lip_cnt++;
1028 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1029 		sf->sf_state = SF_STATE_OFFLINE;
1030 		for (i = 0; i < sf_max_targets; i++) {
1031 			target = sf->sf_targets[i];
1032 			if (target != NULL) {
1033 				struct sf_target *ntarget;
1034 
1035 				mutex_enter(&target->sft_mutex);
1036 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1037 					target->sft_state |=
1038 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
1039 				}
1040 				for (ntarget = target->sft_next_lun;
1041 				    ntarget;
1042 				    ntarget = ntarget->sft_next_lun) {
1043 					mutex_enter(&ntarget->sft_mutex);
1044 					if (!(ntarget->sft_state &
1045 					    SF_TARGET_OFFLINE)) {
1046 						ntarget->sft_state |=
1047 						    (SF_TARGET_BUSY |
1048 						    SF_TARGET_MARK);
1049 					}
1050 					mutex_exit(&ntarget->sft_mutex);
1051 				}
1052 				mutex_exit(&target->sft_mutex);
1053 			}
1054 		}
1055 		mutex_exit(&sf->sf_mutex);
1056 
1057 		/* call transport to remove and unregister our callbacks */
1058 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1059 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1060 
1061 		/*
1062 		 * kill off the watchdog if we are the last instance
1063 		 */
1064 		mutex_enter(&sf_global_mutex);
1065 		if (!--sf_watchdog_init) {
1066 			tid = sf_watchdog_id;
1067 			mutex_exit(&sf_global_mutex);
1068 			(void) untimeout(tid);
1069 		} else {
1070 			mutex_exit(&sf_global_mutex);
1071 		}
1072 
1073 		/* signal sf_hp_daemon() to exit and wait for exit */
1074 		mutex_enter(&sf->sf_hp_daemon_mutex);
1075 		ASSERT(sf->sf_hp_tid);
1076 		sf->sf_hp_exit = 1;		/* flag exit */
1077 		cv_signal(&sf->sf_hp_daemon_cv);
1078 		mutex_exit(&sf->sf_hp_daemon_mutex);
1079 		thread_join(sf->sf_hp_tid);	/* wait for hotplug to exit */
1080 
1081 		/*
1082 		 * Unbind and free event set
1083 		 */
1084 		if (sf->sf_event_hdl) {
1085 			(void) ndi_event_unbind_set(sf->sf_event_hdl,
1086 			    &sf->sf_events, NDI_SLEEP);
1087 			(void) ndi_event_free_hdl(sf->sf_event_hdl);
1088 		}
1089 
1090 		if (sf->sf_event_defs) {
1091 			kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1092 		}
1093 
1094 		/* detach this instance of the HBA driver */
1095 		(void) scsi_hba_detach(dip);
1096 		scsi_hba_tran_free(sf->sf_tran);
1097 
1098 		/* deallocate/unbind DMA handle for lilp map */
1099 		if (sf->sf_lilp_map != NULL) {
1100 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1101 			if (sf->sf_lilp_dmahandle != NULL) {
1102 				ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1103 			}
1104 			ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1105 		}
1106 
1107 		/*
1108 		 * the kmem cache must be destroyed before free'ing
1109 		 * up the crpools
1110 		 *
1111 		 * our finagle of "ntot" and "nfree"
1112 		 * causes an ASSERT failure in "sf_cr_free()"
1113 		 * if the kmem cache is free'd after invoking
1114 		 * "sf_crpool_free()".
1115 		 */
1116 		kmem_cache_destroy(sf->sf_pkt_cache);
1117 
1118 		SF_DEBUG(2, (sf, CE_CONT,
1119 		    "sf_detach: sf_crpool_free() for instance 0x%x\n",
1120 		    instance));
1121 		while (sf->sf_cr_pool != NULL) {
1122 			/*
1123 			 * set ntot to nfree for this particular entry
1124 			 *
1125 			 * this causes sf_crpool_free() to update
1126 			 * the cr_pool list when deallocating this entry
1127 			 */
1128 			sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1129 			sf_crpool_free(sf);
1130 		}
1131 
1132 		/*
1133 		 * now that the cr_pool's are gone it's safe
1134 		 * to destroy all softstate mutex's and cv's
1135 		 */
1136 		mutex_destroy(&sf->sf_mutex);
1137 		mutex_destroy(&sf->sf_cmd_mutex);
1138 		mutex_destroy(&sf->sf_cr_mutex);
1139 		mutex_destroy(&sf->sf_hp_daemon_mutex);
1140 		cv_destroy(&sf->sf_cr_cv);
1141 		cv_destroy(&sf->sf_hp_daemon_cv);
1142 
1143 		/* remove all minor nodes from the device tree */
1144 		ddi_remove_minor_node(dip, NULL);
1145 
1146 		/* remove properties created during attach() */
1147 		ddi_prop_remove_all(dip);
1148 
1149 		/* remove kstat's if present */
1150 		if (sf->sf_ksp != NULL) {
1151 			kstat_delete(sf->sf_ksp);
1152 		}
1153 
1154 		SF_DEBUG(2, (sf, CE_CONT,
1155 		    "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1156 		    instance));
1157 		ddi_soft_state_free(sf_state, instance);
1158 		return (DDI_SUCCESS);
1159 
1160 	default:
1161 		SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1162 		    instance, (int)cmd));
1163 		return (DDI_FAILURE);
1164 	}
1165 }
1166 
1167 
1168 /*
1169  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1170  */
1171 static void
1172 sf_softstate_unlink(struct sf *sf)
1173 {
1174 	struct sf	*sf_ptr;
1175 	struct sf	*sf_found_sibling;
1176 	struct sf	*sf_reposition = NULL;
1177 
1178 
1179 	mutex_enter(&sf_global_mutex);
1180 	while (sf_watch_running) {
1181 		/* Busy working the list -- wait */
1182 		cv_wait(&sf_watch_cv, &sf_global_mutex);
1183 	}
1184 	if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1185 		/*
1186 		 * we have a sibling so NULL out its reference to us
1187 		 */
1188 		mutex_enter(&sf_found_sibling->sf_mutex);
1189 		sf_found_sibling->sf_sibling = NULL;
1190 		mutex_exit(&sf_found_sibling->sf_mutex);
1191 	}
1192 
1193 	/* remove our instance from the global list */
1194 	if (sf == sf_head) {
1195 		/* we were at at head of the list */
1196 		sf_head = sf->sf_next;
1197 	} else {
1198 		/* find us in the list */
1199 		for (sf_ptr = sf_head;
1200 		    sf_ptr != NULL;
1201 		    sf_ptr = sf_ptr->sf_next) {
1202 			if (sf_ptr == sf) {
1203 				break;
1204 			}
1205 			/* remember this place */
1206 			sf_reposition = sf_ptr;
1207 		}
1208 		ASSERT(sf_ptr == sf);
1209 		ASSERT(sf_reposition != NULL);
1210 
1211 		sf_reposition->sf_next = sf_ptr->sf_next;
1212 	}
1213 	mutex_exit(&sf_global_mutex);
1214 }
1215 
1216 
1217 static int
1218 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1219     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1220 {
1221 	int64_t		reset_delay;
1222 	struct sf	*sf;
1223 
1224 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1225 	ASSERT(sf);
1226 
1227 	reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1228 	    (ddi_get_lbolt64() - sf->sf_reset_time);
1229 	if (reset_delay < 0)
1230 		reset_delay = 0;
1231 
1232 	if (sf_bus_config_debug)
1233 		flag |= NDI_DEVI_DEBUG;
1234 
1235 	return (ndi_busop_bus_config(parent, flag, op,
1236 	    arg, childp, (clock_t)reset_delay));
1237 }
1238 
1239 static int
1240 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1241     ddi_bus_config_op_t op, void *arg)
1242 {
1243 	if (sf_bus_config_debug)
1244 		flag |= NDI_DEVI_DEBUG;
1245 
1246 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1247 }
1248 
1249 
1250 /*
1251  * called by transport to initialize a SCSI target
1252  */
1253 /* ARGSUSED */
1254 static int
1255 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1256     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1257 {
1258 #ifdef RAID_LUNS
1259 	int lun;
1260 #else
1261 	int64_t lun;
1262 #endif
1263 	struct sf_target *target;
1264 	struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1265 	int i, t_len;
1266 	unsigned int lip_cnt;
1267 	unsigned char wwn[FC_WWN_SIZE];
1268 
1269 
1270 	/* get and validate our SCSI target ID */
1271 	i = sd->sd_address.a_target;
1272 	if (i >= sf_max_targets) {
1273 		return (DDI_NOT_WELL_FORMED);
1274 	}
1275 
1276 	/* get our port WWN property */
1277 	t_len = sizeof (wwn);
1278 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1279 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1280 	    (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1281 		/* no port WWN property - ignore the OBP stub node */
1282 		return (DDI_NOT_WELL_FORMED);
1283 	}
1284 
1285 	/* get our LIP count property */
1286 	t_len = sizeof (lip_cnt);
1287 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1288 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1289 	    (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1290 		return (DDI_FAILURE);
1291 	}
1292 	/* and our LUN property */
1293 	t_len = sizeof (lun);
1294 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1295 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1296 	    (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1297 		return (DDI_FAILURE);
1298 	}
1299 
1300 	/* find the target structure for this instance */
1301 	mutex_enter(&sf->sf_mutex);
1302 	if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1303 		mutex_exit(&sf->sf_mutex);
1304 		return (DDI_FAILURE);
1305 	}
1306 
1307 	mutex_enter(&target->sft_mutex);
1308 	if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1309 	    & SF_TARGET_INIT_DONE)) {
1310 		/*
1311 		 * set links between HBA transport and target structures
1312 		 * and set done flag
1313 		 */
1314 		hba_tran->tran_tgt_private = target;
1315 		target->sft_tran = hba_tran;
1316 		target->sft_state |= SF_TARGET_INIT_DONE;
1317 	} else {
1318 		/* already initialized ?? */
1319 		mutex_exit(&target->sft_mutex);
1320 		mutex_exit(&sf->sf_mutex);
1321 		return (DDI_FAILURE);
1322 	}
1323 	mutex_exit(&target->sft_mutex);
1324 	mutex_exit(&sf->sf_mutex);
1325 
1326 	return (DDI_SUCCESS);
1327 }
1328 
1329 
1330 /*
1331  * called by transport to free a target
1332  */
1333 /* ARGSUSED */
1334 static void
1335 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1336     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1337 {
1338 	struct sf_target *target = hba_tran->tran_tgt_private;
1339 
1340 	if (target != NULL) {
1341 		mutex_enter(&target->sft_mutex);
1342 		target->sft_tran = NULL;
1343 		target->sft_state &= ~SF_TARGET_INIT_DONE;
1344 		mutex_exit(&target->sft_mutex);
1345 	}
1346 }
1347 
1348 
1349 /*
1350  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1351  * success, else return FALSE
1352  */
1353 /*ARGSUSED*/
1354 static int
1355 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1356     int tgtlen, int statuslen, int kf)
1357 {
1358 	caddr_t scbp, tgt;
1359 	int failure = FALSE;
1360 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1361 
1362 
1363 	tgt = scbp = NULL;
1364 
1365 	if (tgtlen > PKT_PRIV_LEN) {
1366 		if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1367 			failure = TRUE;
1368 		} else {
1369 			cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1370 			pkt->pkt_private = tgt;
1371 		}
1372 	}
1373 	if (statuslen > EXTCMDS_STATUS_SIZE) {
1374 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1375 			failure = TRUE;
1376 		} else {
1377 			cmd->cmd_flags |= CFLAG_SCBEXTERN;
1378 			pkt->pkt_scbp = (opaque_t)scbp;
1379 		}
1380 	}
1381 	if (failure) {
1382 		sf_pkt_destroy_extern(sf, cmd);
1383 	}
1384 	return (failure);
1385 }
1386 
1387 
1388 /*
1389  * deallocator for non-std size cdb/pkt_private/status
1390  */
1391 static void
1392 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1393 {
1394 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1395 
1396 	if (cmd->cmd_flags & CFLAG_FREE) {
1397 		cmn_err(CE_PANIC,
1398 		    "sf_scsi_impl_pktfree: freeing free packet");
1399 		_NOTE(NOT_REACHED)
1400 		/* NOTREACHED */
1401 	}
1402 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1403 		kmem_free((caddr_t)pkt->pkt_scbp,
1404 		    (size_t)cmd->cmd_scblen);
1405 	}
1406 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1407 		kmem_free((caddr_t)pkt->pkt_private,
1408 		    (size_t)cmd->cmd_privlen);
1409 	}
1410 
1411 	cmd->cmd_flags = CFLAG_FREE;
1412 	kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1413 }
1414 
1415 
1416 /*
1417  * create or initialize a SCSI packet -- called internally and
1418  * by the transport
1419  */
1420 static struct scsi_pkt *
1421 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1422     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1423     int flags, int (*callback)(), caddr_t arg)
1424 {
1425 	int kf;
1426 	int failure = FALSE;
1427 	struct sf_pkt *cmd;
1428 	struct sf *sf = ADDR2SF(ap);
1429 	struct sf_target *target = ADDR2TARGET(ap);
1430 	struct sf_pkt	*new_cmd = NULL;
1431 	struct fcal_packet	*fpkt;
1432 	fc_frame_header_t	*hp;
1433 	struct fcp_cmd *fcmd;
1434 
1435 
1436 	/*
1437 	 * If we've already allocated a pkt once,
1438 	 * this request is for dma allocation only.
1439 	 */
1440 	if (pkt == NULL) {
1441 
1442 		/*
1443 		 * First step of sf_scsi_init_pkt:  pkt allocation
1444 		 */
1445 		if (cmdlen > FCP_CDB_SIZE) {
1446 			return (NULL);
1447 		}
1448 
1449 		kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1450 
1451 		if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1452 			/*
1453 			 * Selective zeroing of the pkt.
1454 			 */
1455 
1456 			cmd->cmd_flags = 0;
1457 			cmd->cmd_forw = 0;
1458 			cmd->cmd_back = 0;
1459 			cmd->cmd_next = 0;
1460 			cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1461 			    sizeof (struct sf_pkt) + sizeof (struct
1462 			    fcal_packet));
1463 			cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1464 			    sizeof (struct sf_pkt));
1465 			cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1466 			cmd->cmd_state = SF_STATE_IDLE;
1467 			cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1468 			cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1469 			cmd->cmd_pkt->pkt_comp	= NULL;
1470 			cmd->cmd_pkt->pkt_flags	= 0;
1471 			cmd->cmd_pkt->pkt_time	= 0;
1472 			cmd->cmd_pkt->pkt_resid	= 0;
1473 			cmd->cmd_pkt->pkt_reason = 0;
1474 			cmd->cmd_cdblen = (uchar_t)cmdlen;
1475 			cmd->cmd_scblen		= statuslen;
1476 			cmd->cmd_privlen	= tgtlen;
1477 			cmd->cmd_pkt->pkt_address = *ap;
1478 
1479 			/* zero pkt_private */
1480 			(int *)(cmd->cmd_pkt->pkt_private =
1481 			    cmd->cmd_pkt_private);
1482 			bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1483 			    PKT_PRIV_LEN);
1484 		} else {
1485 			failure = TRUE;
1486 		}
1487 
1488 		if (failure ||
1489 		    (tgtlen > PKT_PRIV_LEN) ||
1490 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
1491 			if (!failure) {
1492 				/* need to allocate more space */
1493 				failure = sf_pkt_alloc_extern(sf, cmd,
1494 				    tgtlen, statuslen, kf);
1495 			}
1496 			if (failure) {
1497 				return (NULL);
1498 			}
1499 		}
1500 
1501 		fpkt = cmd->cmd_fp_pkt;
1502 		if (cmd->cmd_block == NULL) {
1503 
1504 			/* allocate cmd/response pool buffers */
1505 			if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1506 				sf_pkt_destroy_extern(sf, cmd);
1507 				return (NULL);
1508 			}
1509 
1510 			/* fill in the FC-AL packet */
1511 			fpkt->fcal_pkt_cookie = sf->sf_socp;
1512 			fpkt->fcal_pkt_comp = sf_cmd_callback;
1513 			fpkt->fcal_pkt_flags = 0;
1514 			fpkt->fcal_magic = FCALP_MAGIC;
1515 			fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1516 			    (ushort_t)(SOC_FC_HEADER |
1517 			    sf->sf_sochandle->fcal_portno);
1518 			fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1519 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1520 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1521 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1522 			fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1523 			    (uint32_t)cmd->cmd_dmac;
1524 			fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1525 			    sizeof (struct fcp_cmd);
1526 			fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1527 			    (uint32_t)cmd->cmd_rsp_dmac;
1528 			fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1529 			    FCP_MAX_RSP_IU_SIZE;
1530 
1531 			/* Fill in the Fabric Channel Header */
1532 			hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1533 			hp->r_ctl = R_CTL_COMMAND;
1534 			hp->type = TYPE_SCSI_FCP;
1535 			hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1536 			hp->reserved1 = 0;
1537 			hp->seq_id = 0;
1538 			hp->df_ctl  = 0;
1539 			hp->seq_cnt = 0;
1540 			hp->ox_id = 0xffff;
1541 			hp->rx_id = 0xffff;
1542 			hp->ro = 0;
1543 
1544 			/* Establish the LUN */
1545 			bcopy((caddr_t)&target->sft_lun.b,
1546 			    (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1547 			    FCP_LUN_SIZE);
1548 			*((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1549 		}
1550 		cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1551 
1552 		mutex_enter(&target->sft_pkt_mutex);
1553 
1554 		target->sft_pkt_tail->cmd_forw = cmd;
1555 		cmd->cmd_back = target->sft_pkt_tail;
1556 		cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1557 		target->sft_pkt_tail = cmd;
1558 
1559 		mutex_exit(&target->sft_pkt_mutex);
1560 		new_cmd = cmd;		/* for later cleanup if needed */
1561 	} else {
1562 		/* pkt already exists -- just a request for DMA allocation */
1563 		cmd = PKT2CMD(pkt);
1564 		fpkt = cmd->cmd_fp_pkt;
1565 	}
1566 
1567 	/* zero cdb (bzero is too slow) */
1568 	bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1569 
1570 	/*
1571 	 * Second step of sf_scsi_init_pkt:  dma allocation
1572 	 * Set up dma info
1573 	 */
1574 	if ((bp != NULL) && (bp->b_bcount != 0)) {
1575 		int cmd_flags, dma_flags;
1576 		int rval = 0;
1577 		uint_t dmacookie_count;
1578 
1579 		/* there is a buffer and some data to transfer */
1580 
1581 		/* set up command and DMA flags */
1582 		cmd_flags = cmd->cmd_flags;
1583 		if (bp->b_flags & B_READ) {
1584 			/* a read */
1585 			cmd_flags &= ~CFLAG_DMASEND;
1586 			dma_flags = DDI_DMA_READ;
1587 		} else {
1588 			/* a write */
1589 			cmd_flags |= CFLAG_DMASEND;
1590 			dma_flags = DDI_DMA_WRITE;
1591 		}
1592 		if (flags & PKT_CONSISTENT) {
1593 			cmd_flags |= CFLAG_CMDIOPB;
1594 			dma_flags |= DDI_DMA_CONSISTENT;
1595 		}
1596 
1597 		/* ensure we have a DMA handle */
1598 		if (cmd->cmd_dmahandle == NULL) {
1599 			rval = ddi_dma_alloc_handle(sf->sf_dip,
1600 			    sf->sf_sochandle->fcal_dmaattr, callback, arg,
1601 			    &cmd->cmd_dmahandle);
1602 		}
1603 
1604 		if (rval == 0) {
1605 			/* bind our DMA handle to our buffer */
1606 			rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1607 			    dma_flags, callback, arg, &cmd->cmd_dmacookie,
1608 			    &dmacookie_count);
1609 		}
1610 
1611 		if (rval != 0) {
1612 			/* DMA failure */
1613 			SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1614 			switch (rval) {
1615 			case DDI_DMA_NORESOURCES:
1616 				bioerror(bp, 0);
1617 				break;
1618 			case DDI_DMA_BADATTR:
1619 			case DDI_DMA_NOMAPPING:
1620 				bioerror(bp, EFAULT);
1621 				break;
1622 			case DDI_DMA_TOOBIG:
1623 			default:
1624 				bioerror(bp, EINVAL);
1625 				break;
1626 			}
1627 			/* clear valid flag */
1628 			cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1629 			if (new_cmd != NULL) {
1630 				/* destroy packet if we just created it */
1631 				sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1632 			}
1633 			return (NULL);
1634 		}
1635 
1636 		ASSERT(dmacookie_count == 1);
1637 		/* set up amt to transfer and set valid flag */
1638 		cmd->cmd_dmacount = bp->b_bcount;
1639 		cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1640 
1641 		ASSERT(cmd->cmd_dmahandle != NULL);
1642 	}
1643 
1644 	/* set up FC-AL packet */
1645 	fcmd = cmd->cmd_block;
1646 
1647 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1648 		if (cmd->cmd_flags & CFLAG_DMASEND) {
1649 			/* DMA write */
1650 			fcmd->fcp_cntl.cntl_read_data = 0;
1651 			fcmd->fcp_cntl.cntl_write_data = 1;
1652 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1653 			    CQ_TYPE_IO_WRITE;
1654 		} else {
1655 			/* DMA read */
1656 			fcmd->fcp_cntl.cntl_read_data = 1;
1657 			fcmd->fcp_cntl.cntl_write_data = 0;
1658 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1659 			    CQ_TYPE_IO_READ;
1660 		}
1661 		fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1662 		    (uint32_t)cmd->cmd_dmacookie.dmac_address;
1663 		fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1664 		    cmd->cmd_dmacookie.dmac_size;
1665 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1666 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1667 		    cmd->cmd_dmacookie.dmac_size;
1668 		fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1669 	} else {
1670 		/* not a read or write */
1671 		fcmd->fcp_cntl.cntl_read_data = 0;
1672 		fcmd->fcp_cntl.cntl_write_data = 0;
1673 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1674 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1675 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1676 		    sizeof (struct fcp_cmd);
1677 		fcmd->fcp_data_len = 0;
1678 	}
1679 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1680 
1681 	return (cmd->cmd_pkt);
1682 }
1683 
1684 
1685 /*
1686  * destroy a SCSI packet -- called internally and by the transport
1687  */
1688 static void
1689 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1690 {
1691 	struct sf_pkt *cmd = PKT2CMD(pkt);
1692 	struct sf *sf = ADDR2SF(ap);
1693 	struct sf_target *target = ADDR2TARGET(ap);
1694 	struct fcal_packet	*fpkt = cmd->cmd_fp_pkt;
1695 
1696 
1697 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1698 		/* DMA was set up -- clean up */
1699 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1700 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1701 	}
1702 
1703 	/* take this packet off the doubly-linked list */
1704 	mutex_enter(&target->sft_pkt_mutex);
1705 	cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1706 	cmd->cmd_forw->cmd_back = cmd->cmd_back;
1707 	mutex_exit(&target->sft_pkt_mutex);
1708 
1709 	fpkt->fcal_pkt_flags = 0;
1710 	/* free the packet */
1711 	if ((cmd->cmd_flags &
1712 	    (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1713 		/* just a regular packet */
1714 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1715 		cmd->cmd_flags = CFLAG_FREE;
1716 		kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1717 	} else {
1718 		/* a packet with extra memory */
1719 		sf_pkt_destroy_extern(sf, cmd);
1720 	}
1721 }
1722 
1723 
1724 /*
1725  * called by transport to unbind DMA handle
1726  */
1727 /* ARGSUSED */
1728 static void
1729 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1730 {
1731 	struct sf_pkt *cmd = PKT2CMD(pkt);
1732 
1733 
1734 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1735 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1736 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1737 	}
1738 
1739 }
1740 
1741 
1742 /*
1743  * called by transport to synchronize CPU and I/O views of memory
1744  */
1745 /* ARGSUSED */
1746 static void
1747 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1748 {
1749 	struct sf_pkt *cmd = PKT2CMD(pkt);
1750 
1751 
1752 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1753 		if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1754 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
1755 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1756 		    DDI_SUCCESS) {
1757 			cmn_err(CE_WARN, "sf: sync pkt failed");
1758 		}
1759 	}
1760 }
1761 
1762 
1763 /*
1764  * routine for reset notification setup, to register or cancel. -- called
1765  * by transport
1766  */
1767 static int
1768 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1769     void (*callback)(caddr_t), caddr_t arg)
1770 {
1771 	struct sf	*sf = ADDR2SF(ap);
1772 
1773 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1774 	    &sf->sf_mutex, &sf->sf_reset_notify_listf));
1775 }
1776 
1777 
1778 /*
1779  * called by transport to get port WWN property (except sun4u)
1780  */
1781 /* ARGSUSED */
1782 static int
1783 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1784 {
1785 	char tbuf[(FC_WWN_SIZE*2)+1];
1786 	unsigned char wwn[FC_WWN_SIZE];
1787 	int i, lun;
1788 	dev_info_t *tgt_dip;
1789 
1790 	tgt_dip = sd->sd_dev;
1791 	i = sizeof (wwn);
1792 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1793 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1794 	    (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1795 		name[0] = '\0';
1796 		return (0);
1797 	}
1798 	i = sizeof (lun);
1799 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1800 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1801 	    (caddr_t)&lun, &i) != DDI_SUCCESS) {
1802 		name[0] = '\0';
1803 		return (0);
1804 	}
1805 	for (i = 0; i < FC_WWN_SIZE; i++)
1806 		(void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1807 	(void) sprintf(name, "w%s,%x", tbuf, lun);
1808 	return (1);
1809 }
1810 
1811 
1812 /*
1813  * called by transport to get target soft AL-PA (except sun4u)
1814  */
1815 /* ARGSUSED */
1816 static int
1817 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1818 {
1819 	struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1820 
1821 	if (target == NULL)
1822 		return (0);
1823 
1824 	(void) sprintf(name, "%x", target->sft_al_pa);
1825 	return (1);
1826 }
1827 
1828 
1829 /*
1830  * add to the command/response buffer pool for this sf instance
1831  */
1832 static int
1833 sf_add_cr_pool(struct sf *sf)
1834 {
1835 	int		cmd_buf_size;
1836 	size_t		real_cmd_buf_size;
1837 	int		rsp_buf_size;
1838 	size_t		real_rsp_buf_size;
1839 	uint_t		i, ccount;
1840 	struct sf_cr_pool	*ptr;
1841 	struct sf_cr_free_elem *cptr;
1842 	caddr_t	dptr, eptr;
1843 	ddi_dma_cookie_t	cmd_cookie;
1844 	ddi_dma_cookie_t	rsp_cookie;
1845 	int		cmd_bound = FALSE, rsp_bound = FALSE;
1846 
1847 
1848 	/* allocate room for the pool */
1849 	if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1850 	    NULL) {
1851 		return (DDI_FAILURE);
1852 	}
1853 
1854 	/* allocate a DMA handle for the command pool */
1855 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1856 	    DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1857 		goto fail;
1858 	}
1859 
1860 	/*
1861 	 * Get a piece of memory in which to put commands
1862 	 */
1863 	cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1864 	if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1865 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1866 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1867 	    &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1868 		goto fail;
1869 	}
1870 
1871 	/* bind the DMA handle to an address */
1872 	if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1873 	    ptr->cmd_base, real_cmd_buf_size,
1874 	    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1875 	    NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1876 		goto fail;
1877 	}
1878 	cmd_bound = TRUE;
1879 	/* ensure only one cookie was allocated */
1880 	if (ccount != 1) {
1881 		goto fail;
1882 	}
1883 
1884 	/* allocate a DMA handle for the response pool */
1885 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1886 	    DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1887 		goto fail;
1888 	}
1889 
1890 	/*
1891 	 * Get a piece of memory in which to put responses
1892 	 */
1893 	rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1894 	if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1895 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1896 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1897 	    &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1898 		goto fail;
1899 	}
1900 
1901 	/* bind the DMA handle to an address */
1902 	if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1903 	    ptr->rsp_base, real_rsp_buf_size,
1904 	    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1905 	    NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1906 		goto fail;
1907 	}
1908 	rsp_bound = TRUE;
1909 	/* ensure only one cookie was allocated */
1910 	if (ccount != 1) {
1911 		goto fail;
1912 	}
1913 
1914 	/*
1915 	 * Generate a (cmd/rsp structure) free list
1916 	 */
1917 	/* ensure ptr points to start of long word (8-byte block) */
1918 	dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1919 	/* keep track of actual size after moving pointer */
1920 	real_cmd_buf_size -= (dptr - ptr->cmd_base);
1921 	eptr = ptr->rsp_base;
1922 
1923 	/* set actual total number of entries */
1924 	ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1925 	    (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1926 	ptr->nfree = ptr->ntot;
1927 	ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1928 	ptr->sf = sf;
1929 
1930 	/* set up DMA for each pair of entries */
1931 	i = 0;
1932 	while (i < ptr->ntot) {
1933 		cptr = (struct sf_cr_free_elem *)dptr;
1934 		dptr += sizeof (struct fcp_cmd);
1935 
1936 		cptr->next = (struct sf_cr_free_elem *)dptr;
1937 		cptr->rsp = eptr;
1938 
1939 		cptr->cmd_dmac = cmd_cookie.dmac_address +
1940 		    (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1941 
1942 		cptr->rsp_dmac = rsp_cookie.dmac_address +
1943 		    (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1944 
1945 		eptr += FCP_MAX_RSP_IU_SIZE;
1946 		i++;
1947 	}
1948 
1949 	/* terminate the list */
1950 	cptr->next = NULL;
1951 
1952 	/* add this list at front of current one */
1953 	mutex_enter(&sf->sf_cr_mutex);
1954 	ptr->next = sf->sf_cr_pool;
1955 	sf->sf_cr_pool = ptr;
1956 	sf->sf_cr_pool_cnt++;
1957 	mutex_exit(&sf->sf_cr_mutex);
1958 
1959 	return (DDI_SUCCESS);
1960 
1961 fail:
1962 	/* we failed so clean up */
1963 	if (ptr->cmd_dma_handle != NULL) {
1964 		if (cmd_bound) {
1965 			(void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1966 		}
1967 		ddi_dma_free_handle(&ptr->cmd_dma_handle);
1968 	}
1969 
1970 	if (ptr->rsp_dma_handle != NULL) {
1971 		if (rsp_bound) {
1972 			(void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1973 		}
1974 		ddi_dma_free_handle(&ptr->rsp_dma_handle);
1975 	}
1976 
1977 	if (ptr->cmd_base != NULL) {
1978 		ddi_dma_mem_free(&ptr->cmd_acc_handle);
1979 	}
1980 
1981 	if (ptr->rsp_base != NULL) {
1982 		ddi_dma_mem_free(&ptr->rsp_acc_handle);
1983 	}
1984 
1985 	kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1986 	return (DDI_FAILURE);
1987 }
1988 
1989 
1990 /*
1991  * allocate a command/response buffer from the pool, allocating more
1992  * in the pool as needed
1993  */
1994 static int
1995 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1996 {
1997 	struct sf_cr_pool *ptr;
1998 	struct sf_cr_free_elem *cptr;
1999 
2000 
2001 	mutex_enter(&sf->sf_cr_mutex);
2002 
2003 try_again:
2004 
2005 	/* find a free buffer in the existing pool */
2006 	ptr = sf->sf_cr_pool;
2007 	while (ptr != NULL) {
2008 		if (ptr->nfree != 0) {
2009 			ptr->nfree--;
2010 			break;
2011 		} else {
2012 			ptr = ptr->next;
2013 		}
2014 	}
2015 
2016 	/* did we find a free buffer ? */
2017 	if (ptr != NULL) {
2018 		/* we found a free buffer -- take it off the free list */
2019 		cptr = ptr->free;
2020 		ptr->free = cptr->next;
2021 		mutex_exit(&sf->sf_cr_mutex);
2022 		/* set up the command to use the buffer pair */
2023 		cmd->cmd_block = (struct fcp_cmd *)cptr;
2024 		cmd->cmd_dmac = cptr->cmd_dmac;
2025 		cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2026 		cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2027 		cmd->cmd_cr_pool = ptr;
2028 		return (DDI_SUCCESS);		/* success */
2029 	}
2030 
2031 	/* no free buffer available -- can we allocate more ? */
2032 	if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2033 		/* we need to allocate more buffer pairs */
2034 		if (sf->sf_cr_flag) {
2035 			/* somebody already allocating for this instance */
2036 			if (func == SLEEP_FUNC) {
2037 				/* user wants to wait */
2038 				cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2039 				/* we've been woken so go try again */
2040 				goto try_again;
2041 			}
2042 			/* user does not want to wait */
2043 			mutex_exit(&sf->sf_cr_mutex);
2044 			sf->sf_stats.cralloc_failures++;
2045 			return (DDI_FAILURE);	/* give up */
2046 		}
2047 		/* set flag saying we're allocating */
2048 		sf->sf_cr_flag = 1;
2049 		mutex_exit(&sf->sf_cr_mutex);
2050 		/* add to our pool */
2051 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2052 			/* couldn't add to our pool for some reason */
2053 			mutex_enter(&sf->sf_cr_mutex);
2054 			sf->sf_cr_flag = 0;
2055 			cv_broadcast(&sf->sf_cr_cv);
2056 			mutex_exit(&sf->sf_cr_mutex);
2057 			sf->sf_stats.cralloc_failures++;
2058 			return (DDI_FAILURE);	/* give up */
2059 		}
2060 		/*
2061 		 * clear flag saying we're allocating and tell all other
2062 		 * that care
2063 		 */
2064 		mutex_enter(&sf->sf_cr_mutex);
2065 		sf->sf_cr_flag = 0;
2066 		cv_broadcast(&sf->sf_cr_cv);
2067 		/* now that we have more buffers try again */
2068 		goto try_again;
2069 	}
2070 
2071 	/* we don't have room to allocate any more buffers */
2072 	mutex_exit(&sf->sf_cr_mutex);
2073 	sf->sf_stats.cralloc_failures++;
2074 	return (DDI_FAILURE);			/* give up */
2075 }
2076 
2077 
2078 /*
2079  * free a cmd/response buffer pair in our pool
2080  */
2081 static void
2082 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2083 {
2084 	struct sf *sf = cp->sf;
2085 	struct sf_cr_free_elem *elem;
2086 
2087 	elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2088 	elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2089 	elem->cmd_dmac = cmd->cmd_dmac;
2090 	elem->rsp_dmac = cmd->cmd_rsp_dmac;
2091 
2092 	mutex_enter(&sf->sf_cr_mutex);
2093 	cp->nfree++;
2094 	ASSERT(cp->nfree <= cp->ntot);
2095 
2096 	elem->next = cp->free;
2097 	cp->free = elem;
2098 	mutex_exit(&sf->sf_cr_mutex);
2099 }
2100 
2101 
2102 /*
2103  * free our pool of cmd/response buffers
2104  */
2105 static void
2106 sf_crpool_free(struct sf *sf)
2107 {
2108 	struct sf_cr_pool *cp, *prev;
2109 
2110 	prev = NULL;
2111 	mutex_enter(&sf->sf_cr_mutex);
2112 	cp = sf->sf_cr_pool;
2113 	while (cp != NULL) {
2114 		if (cp->nfree == cp->ntot) {
2115 			if (prev != NULL) {
2116 				prev->next = cp->next;
2117 			} else {
2118 				sf->sf_cr_pool = cp->next;
2119 			}
2120 			sf->sf_cr_pool_cnt--;
2121 			mutex_exit(&sf->sf_cr_mutex);
2122 
2123 			(void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2124 			ddi_dma_free_handle(&cp->cmd_dma_handle);
2125 			(void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2126 			ddi_dma_free_handle(&cp->rsp_dma_handle);
2127 			ddi_dma_mem_free(&cp->cmd_acc_handle);
2128 			ddi_dma_mem_free(&cp->rsp_acc_handle);
2129 			kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2130 			return;
2131 		}
2132 		prev = cp;
2133 		cp = cp->next;
2134 	}
2135 	mutex_exit(&sf->sf_cr_mutex);
2136 }
2137 
2138 
2139 /* ARGSUSED */
2140 static int
2141 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2142 {
2143 	struct sf_pkt *cmd = buf;
2144 
2145 	mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2146 	cmd->cmd_block = NULL;
2147 	cmd->cmd_dmahandle = NULL;
2148 	return (0);
2149 }
2150 
2151 
2152 /* ARGSUSED */
2153 static void
2154 sf_kmem_cache_destructor(void *buf, void *size)
2155 {
2156 	struct sf_pkt *cmd = buf;
2157 
2158 	if (cmd->cmd_dmahandle != NULL) {
2159 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
2160 	}
2161 
2162 	if (cmd->cmd_block != NULL) {
2163 		sf_cr_free(cmd->cmd_cr_pool, cmd);
2164 	}
2165 	mutex_destroy(&cmd->cmd_abort_mutex);
2166 }
2167 
2168 
2169 /*
2170  * called by transport when a state change occurs
2171  */
2172 static void
2173 sf_statec_callback(void *arg, int msg)
2174 {
2175 	struct sf *sf = (struct sf *)arg;
2176 	struct sf_target	*target;
2177 	int i;
2178 	struct sf_pkt *cmd;
2179 	struct scsi_pkt *pkt;
2180 
2181 
2182 
2183 	switch (msg) {
2184 
2185 	case FCAL_STATUS_LOOP_ONLINE: {
2186 		uchar_t		al_pa;		/* to save AL-PA */
2187 		int		ret;		/* ret value from getmap */
2188 		int		lip_cnt;	/* to save current count */
2189 		int		cnt;		/* map length */
2190 
2191 		/*
2192 		 * the loop has gone online
2193 		 */
2194 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2195 		    ddi_get_instance(sf->sf_dip)));
2196 		mutex_enter(&sf->sf_mutex);
2197 		sf->sf_lip_cnt++;
2198 		sf->sf_state = SF_STATE_ONLINING;
2199 		mutex_exit(&sf->sf_mutex);
2200 
2201 		/* scan each target hash queue */
2202 		for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2203 			target = sf->sf_wwn_lists[i];
2204 			while (target != NULL) {
2205 				/*
2206 				 * foreach target, if it's not offline then
2207 				 * mark it as busy
2208 				 */
2209 				mutex_enter(&target->sft_mutex);
2210 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2211 					target->sft_state |= (SF_TARGET_BUSY
2212 					    | SF_TARGET_MARK);
2213 #ifdef DEBUG
2214 				/*
2215 				 * for debugging, print out info on any
2216 				 * pending commands (left hanging)
2217 				 */
2218 				cmd = target->sft_pkt_head;
2219 				while (cmd != (struct sf_pkt *)&target->
2220 				    sft_pkt_head) {
2221 					if (cmd->cmd_state ==
2222 					    SF_STATE_ISSUED) {
2223 						SF_DEBUG(1, (sf, CE_CONT,
2224 						    "cmd 0x%p pending "
2225 						    "after lip\n",
2226 						    (void *)cmd->cmd_fp_pkt));
2227 					}
2228 					cmd = cmd->cmd_forw;
2229 				}
2230 #endif
2231 				mutex_exit(&target->sft_mutex);
2232 				target = target->sft_next;
2233 			}
2234 		}
2235 
2236 		/*
2237 		 * since the loop has just gone online get a new map from
2238 		 * the transport
2239 		 */
2240 		if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2241 		    sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2242 		    sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2243 			if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2244 				(void) soc_take_core(sf->sf_sochandle,
2245 				    sf->sf_socp);
2246 				sf_core = 0;
2247 			}
2248 			sf_log(sf, CE_WARN,
2249 			    "!soc lilp map failed status=0x%x\n", ret);
2250 			mutex_enter(&sf->sf_mutex);
2251 			sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2252 			sf->sf_lip_cnt++;
2253 			sf->sf_state = SF_STATE_OFFLINE;
2254 			mutex_exit(&sf->sf_mutex);
2255 			return;
2256 		}
2257 
2258 		/* ensure consistent view of DMA memory */
2259 		(void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2260 		    DDI_DMA_SYNC_FORKERNEL);
2261 
2262 		/* how many entries in map ? */
2263 		cnt = sf->sf_lilp_map->lilp_length;
2264 		if (cnt >= SF_MAX_LILP_ENTRIES) {
2265 			sf_log(sf, CE_WARN, "invalid lilp map\n");
2266 			return;
2267 		}
2268 
2269 		mutex_enter(&sf->sf_mutex);
2270 		sf->sf_device_count = cnt - 1;
2271 		sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2272 		lip_cnt = sf->sf_lip_cnt;
2273 		al_pa = sf->sf_al_pa;
2274 
2275 		SF_DEBUG(1, (sf, CE_CONT,
2276 		    "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2277 
2278 		/*
2279 		 * since the last entry of the map may be mine (common) check
2280 		 * for that, and if it is we have one less entry to look at
2281 		 */
2282 		if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2283 			cnt--;
2284 		}
2285 		/* If we didn't get a valid loop map enable all targets */
2286 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2287 			for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2288 				sf->sf_lilp_map->lilp_alpalist[i] =
2289 				    sf_switch_to_alpa[i];
2290 			cnt = i;
2291 			sf->sf_device_count = cnt - 1;
2292 		}
2293 		if (sf->sf_device_count == 0) {
2294 			sf_finish_init(sf, lip_cnt);
2295 			mutex_exit(&sf->sf_mutex);
2296 			break;
2297 		}
2298 		mutex_exit(&sf->sf_mutex);
2299 
2300 		SF_DEBUG(2, (sf, CE_WARN,
2301 		    "!statec_callback: starting with %d targets\n",
2302 		    sf->sf_device_count));
2303 
2304 		/* scan loop map, logging into all ports (except mine) */
2305 		for (i = 0; i < cnt; i++) {
2306 			SF_DEBUG(1, (sf, CE_CONT,
2307 			    "!lilp map entry %d = %x,%x\n", i,
2308 			    sf->sf_lilp_map->lilp_alpalist[i],
2309 			    sf_alpa_to_switch[
2310 			    sf->sf_lilp_map->lilp_alpalist[i]]));
2311 			/* is this entry for somebody else ? */
2312 			if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2313 				/* do a PLOGI to this port */
2314 				if (!sf_login(sf, LA_ELS_PLOGI,
2315 				    sf->sf_lilp_map->lilp_alpalist[i],
2316 				    sf->sf_lilp_map->lilp_alpalist[cnt-1],
2317 				    lip_cnt)) {
2318 					/* a problem logging in */
2319 					mutex_enter(&sf->sf_mutex);
2320 					if (lip_cnt == sf->sf_lip_cnt) {
2321 						/*
2322 						 * problem not from a new LIP
2323 						 */
2324 						sf->sf_device_count--;
2325 						ASSERT(sf->sf_device_count
2326 						    >= 0);
2327 						if (sf->sf_device_count == 0) {
2328 							sf_finish_init(sf,
2329 							    lip_cnt);
2330 						}
2331 					}
2332 					mutex_exit(&sf->sf_mutex);
2333 				}
2334 			}
2335 		}
2336 		break;
2337 	}
2338 
2339 	case FCAL_STATUS_ERR_OFFLINE:
2340 		/*
2341 		 * loop has gone offline due to an error
2342 		 */
2343 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2344 		    ddi_get_instance(sf->sf_dip)));
2345 		mutex_enter(&sf->sf_mutex);
2346 		sf->sf_lip_cnt++;
2347 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2348 		if (!sf->sf_online_timer) {
2349 			sf->sf_online_timer = sf_watchdog_time +
2350 			    SF_ONLINE_TIMEOUT;
2351 		}
2352 		/*
2353 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2354 		 * since throttling logic in sf_watch() depends on
2355 		 * preservation of this flag while device is suspended
2356 		 */
2357 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2358 			sf->sf_state |= SF_STATE_OFFLINE;
2359 			SF_DEBUG(1, (sf, CE_CONT,
2360 			    "sf_statec_callback, sf%d: "
2361 			    "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2362 			    ddi_get_instance(sf->sf_dip)));
2363 		} else {
2364 			sf->sf_state = SF_STATE_OFFLINE;
2365 		}
2366 
2367 		/* scan each possible target on the loop */
2368 		for (i = 0; i < sf_max_targets; i++) {
2369 			target = sf->sf_targets[i];
2370 			while (target != NULL) {
2371 				mutex_enter(&target->sft_mutex);
2372 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2373 					target->sft_state |= (SF_TARGET_BUSY
2374 					    | SF_TARGET_MARK);
2375 				mutex_exit(&target->sft_mutex);
2376 				target = target->sft_next_lun;
2377 			}
2378 		}
2379 		mutex_exit(&sf->sf_mutex);
2380 		break;
2381 
2382 	case FCAL_STATE_RESET: {
2383 		struct sf_els_hdr	*privp;	/* ptr to private list */
2384 		struct sf_els_hdr	*tmpp1;	/* tmp prev hdr ptr */
2385 		struct sf_els_hdr	*tmpp2;	/* tmp next hdr ptr */
2386 		struct sf_els_hdr	*head;	/* to save our private list */
2387 		struct fcal_packet	*fpkt;	/* ptr to pkt in hdr */
2388 
2389 		/*
2390 		 * a transport reset
2391 		 */
2392 		SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2393 		    ddi_get_instance(sf->sf_dip)));
2394 		tmpp1 = head = NULL;
2395 		mutex_enter(&sf->sf_mutex);
2396 		sf->sf_lip_cnt++;
2397 		sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2398 		/*
2399 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2400 		 * since throttling logic in sf_watch() depends on
2401 		 * preservation of this flag while device is suspended
2402 		 */
2403 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2404 			sf->sf_state |= SF_STATE_OFFLINE;
2405 			SF_DEBUG(1, (sf, CE_CONT,
2406 			    "sf_statec_callback, sf%d: "
2407 			    "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2408 			    ddi_get_instance(sf->sf_dip)));
2409 		} else {
2410 			sf->sf_state = SF_STATE_OFFLINE;
2411 		}
2412 
2413 		/*
2414 		 * scan each possible target on the loop, looking for targets
2415 		 * that need callbacks ran
2416 		 */
2417 		for (i = 0; i < sf_max_targets; i++) {
2418 			target = sf->sf_targets[i];
2419 			while (target != NULL) {
2420 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2421 					target->sft_state |= (SF_TARGET_BUSY
2422 					    | SF_TARGET_MARK);
2423 					mutex_exit(&sf->sf_mutex);
2424 					/*
2425 					 * run remove event callbacks for lun
2426 					 *
2427 					 * We have a nasty race condition here
2428 					 * 'cause we're dropping this mutex to
2429 					 * run the callback and expect the
2430 					 * linked list to be the same.
2431 					 */
2432 					(void) ndi_event_retrieve_cookie(
2433 					    sf->sf_event_hdl, target->sft_dip,
2434 					    FCAL_REMOVE_EVENT, &sf_remove_eid,
2435 					    NDI_EVENT_NOPASS);
2436 					(void) ndi_event_run_callbacks(
2437 					    sf->sf_event_hdl,
2438 					    target->sft_dip,
2439 					    sf_remove_eid, NULL);
2440 					mutex_enter(&sf->sf_mutex);
2441 				}
2442 				target = target->sft_next_lun;
2443 			}
2444 		}
2445 
2446 		/*
2447 		 * scan for ELS commands that are in transport, not complete,
2448 		 * and have a valid timeout, building a private list
2449 		 */
2450 		privp = sf->sf_els_list;
2451 		while (privp != NULL) {
2452 			fpkt = privp->fpkt;
2453 			if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2454 			    (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2455 			    (privp->timeout != SF_INVALID_TIMEOUT)) {
2456 				/*
2457 				 * cmd in transport && not complete &&
2458 				 * timeout valid
2459 				 *
2460 				 * move this entry from ELS input list to our
2461 				 * private list
2462 				 */
2463 
2464 				tmpp2 = privp->next; /* save ptr to next */
2465 
2466 				/* push this on private list head */
2467 				privp->next = head;
2468 				head = privp;
2469 
2470 				/* remove this entry from input list */
2471 				if (tmpp1 != NULL) {
2472 					/*
2473 					 * remove this entry from somewhere in
2474 					 * the middle of the list
2475 					 */
2476 					tmpp1->next = tmpp2;
2477 					if (tmpp2 != NULL) {
2478 						tmpp2->prev = tmpp1;
2479 					}
2480 				} else {
2481 					/*
2482 					 * remove this entry from the head
2483 					 * of the list
2484 					 */
2485 					sf->sf_els_list = tmpp2;
2486 					if (tmpp2 != NULL) {
2487 						tmpp2->prev = NULL;
2488 					}
2489 				}
2490 				privp = tmpp2;	/* skip to next entry */
2491 			} else {
2492 				tmpp1 = privp;	/* save ptr to prev entry */
2493 				privp = privp->next; /* skip to next entry */
2494 			}
2495 		}
2496 
2497 		mutex_exit(&sf->sf_mutex);
2498 
2499 		/*
2500 		 * foreach cmd in our list free the ELS packet associated
2501 		 * with it
2502 		 */
2503 		privp = head;
2504 		while (privp != NULL) {
2505 			fpkt = privp->fpkt;
2506 			privp = privp->next;
2507 			sf_els_free(fpkt);
2508 		}
2509 
2510 		/*
2511 		 * scan for commands from each possible target
2512 		 */
2513 		for (i = 0; i < sf_max_targets; i++) {
2514 			target = sf->sf_targets[i];
2515 			while (target != NULL) {
2516 				/*
2517 				 * scan all active commands for this target,
2518 				 * looking for commands that have been issued,
2519 				 * are in transport, and are not yet complete
2520 				 * (so we can terminate them because of the
2521 				 * reset)
2522 				 */
2523 				mutex_enter(&target->sft_pkt_mutex);
2524 				cmd = target->sft_pkt_head;
2525 				while (cmd != (struct sf_pkt *)&target->
2526 				    sft_pkt_head) {
2527 					fpkt = cmd->cmd_fp_pkt;
2528 					mutex_enter(&cmd->cmd_abort_mutex);
2529 					if ((cmd->cmd_state ==
2530 					    SF_STATE_ISSUED) &&
2531 					    (fpkt->fcal_cmd_state &
2532 					    FCAL_CMD_IN_TRANSPORT) &&
2533 					    (!(fpkt->fcal_cmd_state &
2534 					    FCAL_CMD_COMPLETE))) {
2535 						/* a command to be reset */
2536 						pkt = cmd->cmd_pkt;
2537 						pkt->pkt_reason = CMD_RESET;
2538 						pkt->pkt_statistics |=
2539 						    STAT_BUS_RESET;
2540 						cmd->cmd_state = SF_STATE_IDLE;
2541 						mutex_exit(&cmd->
2542 						    cmd_abort_mutex);
2543 						mutex_exit(&target->
2544 						    sft_pkt_mutex);
2545 						if (pkt->pkt_comp != NULL) {
2546 							(*pkt->pkt_comp)(pkt);
2547 						}
2548 						mutex_enter(&target->
2549 						    sft_pkt_mutex);
2550 						cmd = target->sft_pkt_head;
2551 					} else {
2552 						mutex_exit(&cmd->
2553 						    cmd_abort_mutex);
2554 						/* get next command */
2555 						cmd = cmd->cmd_forw;
2556 					}
2557 				}
2558 				mutex_exit(&target->sft_pkt_mutex);
2559 				target = target->sft_next_lun;
2560 			}
2561 		}
2562 
2563 		/*
2564 		 * get packet queue for this target, resetting all remaining
2565 		 * commands
2566 		 */
2567 		mutex_enter(&sf->sf_mutex);
2568 		cmd = sf->sf_pkt_head;
2569 		sf->sf_pkt_head = NULL;
2570 		mutex_exit(&sf->sf_mutex);
2571 
2572 		while (cmd != NULL) {
2573 			pkt = cmd->cmd_pkt;
2574 			cmd = cmd->cmd_next;
2575 			pkt->pkt_reason = CMD_RESET;
2576 			pkt->pkt_statistics |= STAT_BUS_RESET;
2577 			if (pkt->pkt_comp != NULL) {
2578 				(*pkt->pkt_comp)(pkt);
2579 			}
2580 		}
2581 		break;
2582 	}
2583 
2584 	default:
2585 		break;
2586 	}
2587 }
2588 
2589 
2590 /*
2591  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2592  * returning TRUE upon success, else returning FALSE
2593  */
2594 static int
2595 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2596     int lip_cnt)
2597 {
2598 	struct la_els_logi	*logi;
2599 	struct	sf_els_hdr	*privp;
2600 
2601 
2602 	if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2603 	    sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2604 	    (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2605 		sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2606 		    "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2607 		return (FALSE);
2608 	}
2609 
2610 	privp->lip_cnt = lip_cnt;
2611 	if (els_code == LA_ELS_PLOGI) {
2612 		bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2613 		    (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2614 		    - 4);
2615 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2616 		    (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2617 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2618 		    (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2619 		bzero((caddr_t)&logi->reserved, 16);
2620 	} else if (els_code == LA_ELS_LOGO) {
2621 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2622 		    (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2623 		((struct la_els_logo	*)logi)->reserved = 0;
2624 		((struct la_els_logo	*)logi)->nport_id[0] = 0;
2625 		((struct la_els_logo	*)logi)->nport_id[1] = 0;
2626 		((struct la_els_logo	*)logi)->nport_id[2] = arg1;
2627 	}
2628 
2629 	privp->els_code = els_code;
2630 	logi->ls_code = els_code;
2631 	logi->mbz[0] = 0;
2632 	logi->mbz[1] = 0;
2633 	logi->mbz[2] = 0;
2634 
2635 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2636 	return (sf_els_transport(sf, privp));
2637 }
2638 
2639 
2640 /*
2641  * send an ELS IU via the transport,
2642  * returning TRUE upon success, else returning FALSE
2643  */
2644 static int
2645 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2646 {
2647 	struct fcal_packet *fpkt = privp->fpkt;
2648 
2649 
2650 	(void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2651 	    DDI_DMA_SYNC_FORDEV);
2652 	privp->prev = NULL;
2653 	mutex_enter(&sf->sf_mutex);
2654 	privp->next = sf->sf_els_list;
2655 	if (sf->sf_els_list != NULL) {
2656 		sf->sf_els_list->prev = privp;
2657 	}
2658 	sf->sf_els_list = privp;
2659 	mutex_exit(&sf->sf_mutex);
2660 
2661 	/* call the transport to send a packet */
2662 	if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2663 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2664 		mutex_enter(&sf->sf_mutex);
2665 		if (privp->prev != NULL) {
2666 			privp->prev->next = privp->next;
2667 		}
2668 		if (privp->next != NULL) {
2669 			privp->next->prev = privp->prev;
2670 		}
2671 		if (sf->sf_els_list == privp) {
2672 			sf->sf_els_list = privp->next;
2673 		}
2674 		mutex_exit(&sf->sf_mutex);
2675 		sf_els_free(fpkt);
2676 		return (FALSE);			/* failure */
2677 	}
2678 	return (TRUE);				/* success */
2679 }
2680 
2681 
2682 /*
2683  * called as the pkt_comp routine for ELS FC packets
2684  */
2685 static void
2686 sf_els_callback(struct fcal_packet *fpkt)
2687 {
2688 	struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2689 	struct sf *sf = privp->sf;
2690 	struct sf *tsf;
2691 	int tgt_id;
2692 	struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2693 	struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2694 	struct	sf_target *target;
2695 	short	ncmds;
2696 	short	free_pkt = TRUE;
2697 
2698 
2699 	/*
2700 	 * we've received an ELS callback, i.e. an ELS packet has arrived
2701 	 */
2702 
2703 	/* take the current packet off of the queue */
2704 	mutex_enter(&sf->sf_mutex);
2705 	if (privp->timeout == SF_INVALID_TIMEOUT) {
2706 		mutex_exit(&sf->sf_mutex);
2707 		return;
2708 	}
2709 	if (privp->prev != NULL) {
2710 		privp->prev->next = privp->next;
2711 	}
2712 	if (privp->next != NULL) {
2713 		privp->next->prev = privp->prev;
2714 	}
2715 	if (sf->sf_els_list == privp) {
2716 		sf->sf_els_list = privp->next;
2717 	}
2718 	privp->prev = privp->next = NULL;
2719 	mutex_exit(&sf->sf_mutex);
2720 
2721 	/* get # pkts in this callback */
2722 	ncmds = fpkt->fcal_ncmds;
2723 	ASSERT(ncmds >= 0);
2724 	mutex_enter(&sf->sf_cmd_mutex);
2725 	sf->sf_ncmds = ncmds;
2726 	mutex_exit(&sf->sf_cmd_mutex);
2727 
2728 	/* sync idea of memory */
2729 	(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2730 	    DDI_DMA_SYNC_FORKERNEL);
2731 
2732 	/* was this an OK ACC msg ?? */
2733 	if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2734 	    (ptr->ls_code == LA_ELS_ACC)) {
2735 
2736 		/*
2737 		 * this was an OK ACC pkt
2738 		 */
2739 
2740 		switch (privp->els_code) {
2741 		case LA_ELS_PLOGI:
2742 			/*
2743 			 * was able to to an N_port login
2744 			 */
2745 			SF_DEBUG(2, (sf, CE_CONT,
2746 			    "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2747 			    privp->dest_nport_id,
2748 			    *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2749 			    *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2750 			/* try to do a process login */
2751 			if (!sf_do_prli(sf, privp, ptr)) {
2752 				free_pkt = FALSE;
2753 				goto fail;	/* PRLI failed */
2754 			}
2755 			break;
2756 		case LA_ELS_PRLI:
2757 			/*
2758 			 * was able to do a process login
2759 			 */
2760 			SF_DEBUG(2, (sf, CE_CONT,
2761 			    "!PRLI to al_pa %x succeeded\n",
2762 			    privp->dest_nport_id));
2763 			/* try to do address discovery */
2764 			if (sf_do_adisc(sf, privp) != 1) {
2765 				free_pkt = FALSE;
2766 				goto fail;	/* ADISC failed */
2767 			}
2768 			break;
2769 		case LA_ELS_ADISC:
2770 			/*
2771 			 * found a target via ADISC
2772 			 */
2773 
2774 			SF_DEBUG(2, (sf, CE_CONT,
2775 			    "!ADISC to al_pa %x succeeded\n",
2776 			    privp->dest_nport_id));
2777 
2778 			/* create the target info */
2779 			if ((target = sf_create_target(sf, privp,
2780 			    sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2781 			    (int64_t)0))
2782 			    == NULL) {
2783 				goto fail;	/* can't create target */
2784 			}
2785 
2786 			/*
2787 			 * ensure address discovered matches what we thought
2788 			 * it would be
2789 			 */
2790 			if ((uchar_t)adisc->hard_address !=
2791 			    privp->dest_nport_id) {
2792 				sf_log(sf, CE_WARN,
2793 				    "target 0x%x, AL-PA 0x%x and "
2794 				    "hard address 0x%x don't match\n",
2795 				    sf_alpa_to_switch[
2796 				    (uchar_t)privp->dest_nport_id],
2797 				    privp->dest_nport_id,
2798 				    (uchar_t)adisc->hard_address);
2799 				mutex_enter(&sf->sf_mutex);
2800 				sf_offline_target(sf, target);
2801 				mutex_exit(&sf->sf_mutex);
2802 				goto fail;	/* addr doesn't match */
2803 			}
2804 			/*
2805 			 * get inquiry data from the target
2806 			 */
2807 			if (!sf_do_reportlun(sf, privp, target)) {
2808 				mutex_enter(&sf->sf_mutex);
2809 				sf_offline_target(sf, target);
2810 				mutex_exit(&sf->sf_mutex);
2811 				free_pkt = FALSE;
2812 				goto fail;	/* inquiry failed */
2813 			}
2814 			break;
2815 		default:
2816 			SF_DEBUG(2, (sf, CE_CONT,
2817 			    "!ELS %x to al_pa %x succeeded\n",
2818 			    privp->els_code, privp->dest_nport_id));
2819 			sf_els_free(fpkt);
2820 			break;
2821 		}
2822 
2823 	} else {
2824 
2825 		/*
2826 		 * oh oh -- this was not an OK ACC packet
2827 		 */
2828 
2829 		/* get target ID from dest loop address */
2830 		tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2831 
2832 		/* keep track of failures */
2833 		sf->sf_stats.tstats[tgt_id].els_failures++;
2834 		if (++(privp->retries) < sf_els_retries &&
2835 		    fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2836 			if (fpkt->fcal_pkt_status ==
2837 			    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2838 				tsf = sf->sf_sibling;
2839 				if (tsf != NULL) {
2840 					mutex_enter(&tsf->sf_cmd_mutex);
2841 					tsf->sf_flag = 1;
2842 					tsf->sf_throttle = SF_DECR_DELTA;
2843 					mutex_exit(&tsf->sf_cmd_mutex);
2844 				}
2845 			}
2846 			privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2847 			privp->prev = NULL;
2848 
2849 			mutex_enter(&sf->sf_mutex);
2850 
2851 			if (privp->lip_cnt == sf->sf_lip_cnt) {
2852 				SF_DEBUG(1, (sf, CE_WARN,
2853 				    "!ELS %x to al_pa %x failed, retrying",
2854 				    privp->els_code, privp->dest_nport_id));
2855 				privp->next = sf->sf_els_list;
2856 				if (sf->sf_els_list != NULL) {
2857 					sf->sf_els_list->prev = privp;
2858 				}
2859 
2860 				sf->sf_els_list = privp;
2861 
2862 				mutex_exit(&sf->sf_mutex);
2863 				/* device busy?  wait a bit ... */
2864 				if (fpkt->fcal_pkt_status ==
2865 				    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2866 					privp->delayed_retry = 1;
2867 					return;
2868 				}
2869 				/* call the transport to send a pkt */
2870 				if (soc_transport(sf->sf_sochandle, fpkt,
2871 				    FCAL_NOSLEEP, CQ_REQUEST_1) !=
2872 				    FCAL_TRANSPORT_SUCCESS) {
2873 					mutex_enter(&sf->sf_mutex);
2874 					if (privp->prev != NULL) {
2875 						privp->prev->next =
2876 						    privp->next;
2877 					}
2878 					if (privp->next != NULL) {
2879 						privp->next->prev =
2880 						    privp->prev;
2881 					}
2882 					if (sf->sf_els_list == privp) {
2883 						sf->sf_els_list = privp->next;
2884 					}
2885 					mutex_exit(&sf->sf_mutex);
2886 					goto fail;
2887 				} else
2888 					return;
2889 			} else {
2890 				mutex_exit(&sf->sf_mutex);
2891 				goto fail;
2892 			}
2893 		} else {
2894 #ifdef	DEBUG
2895 			if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2896 			SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2897 			    privp->els_code, privp->dest_nport_id));
2898 			if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2899 				SF_DEBUG(2, (sf, CE_NOTE,
2900 				    "els reply code = %x", ptr->ls_code));
2901 				if (ptr->ls_code == LA_ELS_RJT)
2902 					SF_DEBUG(1, (sf, CE_CONT,
2903 					    "LS_RJT reason = %x\n",
2904 					    *(((uint_t *)ptr) + 1)));
2905 			} else
2906 				SF_DEBUG(2, (sf, CE_NOTE,
2907 				    "fc packet status = %x",
2908 				    fpkt->fcal_pkt_status));
2909 			}
2910 #endif
2911 			goto fail;
2912 		}
2913 	}
2914 	return;					/* success */
2915 fail:
2916 	mutex_enter(&sf->sf_mutex);
2917 	if (sf->sf_lip_cnt == privp->lip_cnt) {
2918 		sf->sf_device_count--;
2919 		ASSERT(sf->sf_device_count >= 0);
2920 		if (sf->sf_device_count == 0) {
2921 			sf_finish_init(sf, privp->lip_cnt);
2922 		}
2923 	}
2924 	mutex_exit(&sf->sf_mutex);
2925 	if (free_pkt) {
2926 		sf_els_free(fpkt);
2927 	}
2928 }
2929 
2930 
2931 /*
2932  * send a PRLI (process login) ELS IU via the transport,
2933  * returning TRUE upon success, else returning FALSE
2934  */
2935 static int
2936 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2937 {
2938 	struct la_els_prli	*prli = (struct la_els_prli *)privp->cmd;
2939 	struct fcp_prli		*fprli;
2940 	struct  fcal_packet	*fpkt = privp->fpkt;
2941 
2942 
2943 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2944 	    sizeof (struct la_els_prli);
2945 	privp->els_code = LA_ELS_PRLI;
2946 	fprli = (struct fcp_prli *)prli->service_params;
2947 	prli->ls_code = LA_ELS_PRLI;
2948 	prli->page_length = 0x10;
2949 	prli->payload_length = sizeof (struct la_els_prli);
2950 	fprli->type = 0x08;			/* no define here? */
2951 	fprli->resvd1 = 0;
2952 	fprli->orig_process_assoc_valid = 0;
2953 	fprli->resp_process_assoc_valid = 0;
2954 	fprli->establish_image_pair = 1;
2955 	fprli->resvd2 = 0;
2956 	fprli->resvd3 = 0;
2957 	fprli->data_overlay_allowed = 0;
2958 	fprli->initiator_fn = 1;
2959 	fprli->target_fn = 0;
2960 	fprli->cmd_data_mixed = 0;
2961 	fprli->data_resp_mixed = 0;
2962 	fprli->read_xfer_rdy_disabled = 1;
2963 	fprli->write_xfer_rdy_disabled = 0;
2964 
2965 	bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2966 	    sizeof (privp->port_wwn));
2967 	bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2968 	    sizeof (privp->node_wwn));
2969 
2970 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2971 	return (sf_els_transport(sf, privp));
2972 }
2973 
2974 
2975 /*
2976  * send an ADISC (address discovery) ELS IU via the transport,
2977  * returning TRUE upon success, else returning FALSE
2978  */
2979 static int
2980 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2981 {
2982 	struct la_els_adisc	*adisc = (struct la_els_adisc *)privp->cmd;
2983 	struct	fcal_packet	*fpkt = privp->fpkt;
2984 
2985 	privp->els_code = LA_ELS_ADISC;
2986 	adisc->ls_code = LA_ELS_ADISC;
2987 	adisc->mbz[0] = 0;
2988 	adisc->mbz[1] = 0;
2989 	adisc->mbz[2] = 0;
2990 	adisc->hard_address = 0; /* ??? */
2991 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2992 	    sizeof (struct la_els_adisc);
2993 	bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2994 	    (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2995 	bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2996 	    (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2997 	adisc->nport_id = sf->sf_al_pa;
2998 
2999 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
3000 	return (sf_els_transport(sf, privp));
3001 }
3002 
3003 
3004 static struct fcal_packet *
3005 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3006     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3007 {
3008 	struct	fcal_packet	*fpkt;
3009 	ddi_dma_cookie_t	pcookie;
3010 	ddi_dma_cookie_t	rcookie;
3011 	struct	sf_els_hdr	*privp;
3012 	ddi_dma_handle_t	cmd_dma_handle = NULL;
3013 	ddi_dma_handle_t	rsp_dma_handle = NULL;
3014 	ddi_acc_handle_t	cmd_acc_handle = NULL;
3015 	ddi_acc_handle_t	rsp_acc_handle = NULL;
3016 	size_t			real_size;
3017 	uint_t			ccount;
3018 	fc_frame_header_t	*hp;
3019 	int			cmd_bound = FALSE, rsp_bound = FALSE;
3020 	caddr_t			cmd = NULL;
3021 	caddr_t			rsp = NULL;
3022 
3023 	if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3024 	    sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3025 		SF_DEBUG(1, (sf, CE_WARN,
3026 			"Could not allocate fcal_packet for ELS\n"));
3027 		return (NULL);
3028 	}
3029 
3030 	if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3031 	    KM_NOSLEEP)) == NULL) {
3032 		SF_DEBUG(1, (sf, CE_WARN,
3033 		    "Could not allocate sf_els_hdr for ELS\n"));
3034 		goto fail;
3035 	}
3036 
3037 	privp->size = priv_size;
3038 	fpkt->fcal_pkt_private = (caddr_t)privp;
3039 
3040 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3041 	    DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3042 		SF_DEBUG(1, (sf, CE_WARN,
3043 		    "Could not allocate DMA handle for ELS\n"));
3044 		goto fail;
3045 	}
3046 
3047 	if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3048 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3049 	    DDI_DMA_DONTWAIT, NULL, &cmd,
3050 	    &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3051 		SF_DEBUG(1, (sf, CE_WARN,
3052 		    "Could not allocate DMA memory for ELS\n"));
3053 		goto fail;
3054 	}
3055 
3056 	if (real_size < cmd_size) {
3057 		SF_DEBUG(1, (sf, CE_WARN,
3058 		    "DMA memory too small for ELS\n"));
3059 		goto fail;
3060 	}
3061 
3062 	if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3063 	    cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3064 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3065 		SF_DEBUG(1, (sf, CE_WARN,
3066 		    "Could not bind DMA memory for ELS\n"));
3067 		goto fail;
3068 	}
3069 	cmd_bound = TRUE;
3070 
3071 	if (ccount != 1) {
3072 		SF_DEBUG(1, (sf, CE_WARN,
3073 		    "Wrong cookie count for ELS\n"));
3074 		goto fail;
3075 	}
3076 
3077 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3078 	    DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3079 		SF_DEBUG(1, (sf, CE_WARN,
3080 		    "Could not allocate DMA handle for ELS rsp\n"));
3081 		goto fail;
3082 	}
3083 	if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3084 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3085 	    DDI_DMA_DONTWAIT, NULL, &rsp,
3086 	    &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3087 		SF_DEBUG(1, (sf, CE_WARN,
3088 		    "Could not allocate DMA memory for ELS rsp\n"));
3089 		goto fail;
3090 	}
3091 
3092 	if (real_size < rsp_size) {
3093 		SF_DEBUG(1, (sf, CE_WARN,
3094 		    "DMA memory too small for ELS rsp\n"));
3095 		goto fail;
3096 	}
3097 
3098 	if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3099 	    rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3100 	    DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3101 		SF_DEBUG(1, (sf, CE_WARN,
3102 		    "Could not bind DMA memory for ELS rsp\n"));
3103 		goto fail;
3104 	}
3105 	rsp_bound = TRUE;
3106 
3107 	if (ccount != 1) {
3108 		SF_DEBUG(1, (sf, CE_WARN,
3109 		    "Wrong cookie count for ELS rsp\n"));
3110 		goto fail;
3111 	}
3112 
3113 	privp->cmd = cmd;
3114 	privp->sf = sf;
3115 	privp->cmd_dma_handle = cmd_dma_handle;
3116 	privp->cmd_acc_handle = cmd_acc_handle;
3117 	privp->rsp = rsp;
3118 	privp->rsp_dma_handle = rsp_dma_handle;
3119 	privp->rsp_acc_handle = rsp_acc_handle;
3120 	privp->dest_nport_id = dest_id;
3121 	privp->fpkt = fpkt;
3122 
3123 	fpkt->fcal_pkt_cookie = sf->sf_socp;
3124 	fpkt->fcal_pkt_comp = sf_els_callback;
3125 	fpkt->fcal_magic = FCALP_MAGIC;
3126 	fpkt->fcal_pkt_flags = 0;
3127 	fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3128 	    (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3129 	fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3130 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3131 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3132 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3133 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3134 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3135 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3136 	fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3137 	    pcookie.dmac_address;
3138 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3139 	fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3140 	    rcookie.dmac_address;
3141 	fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3142 
3143 	/* Fill in the Fabric Channel Header */
3144 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3145 	hp->r_ctl = R_CTL_ELS_REQ;
3146 	hp->d_id = dest_id;
3147 	hp->s_id = sf->sf_al_pa;
3148 	hp->type = TYPE_EXTENDED_LS;
3149 	hp->reserved1 = 0;
3150 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3151 	hp->seq_id = 0;
3152 	hp->df_ctl  = 0;
3153 	hp->seq_cnt = 0;
3154 	hp->ox_id = 0xffff;
3155 	hp->rx_id = 0xffff;
3156 	hp->ro = 0;
3157 
3158 	*rprivp = (caddr_t)privp;
3159 	*cmd_buf = cmd;
3160 	return (fpkt);
3161 
3162 fail:
3163 	if (cmd_dma_handle != NULL) {
3164 		if (cmd_bound) {
3165 			(void) ddi_dma_unbind_handle(cmd_dma_handle);
3166 		}
3167 		ddi_dma_free_handle(&cmd_dma_handle);
3168 		privp->cmd_dma_handle = NULL;
3169 	}
3170 	if (rsp_dma_handle != NULL) {
3171 		if (rsp_bound) {
3172 			(void) ddi_dma_unbind_handle(rsp_dma_handle);
3173 		}
3174 		ddi_dma_free_handle(&rsp_dma_handle);
3175 		privp->rsp_dma_handle = NULL;
3176 	}
3177 	sf_els_free(fpkt);
3178 	return (NULL);
3179 }
3180 
3181 
3182 static void
3183 sf_els_free(struct fcal_packet *fpkt)
3184 {
3185 	struct	sf_els_hdr	*privp = fpkt->fcal_pkt_private;
3186 
3187 	if (privp != NULL) {
3188 		if (privp->cmd_dma_handle != NULL) {
3189 			(void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3190 			ddi_dma_free_handle(&privp->cmd_dma_handle);
3191 		}
3192 		if (privp->cmd != NULL) {
3193 			ddi_dma_mem_free(&privp->cmd_acc_handle);
3194 		}
3195 
3196 		if (privp->rsp_dma_handle != NULL) {
3197 			(void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3198 			ddi_dma_free_handle(&privp->rsp_dma_handle);
3199 		}
3200 
3201 		if (privp->rsp != NULL) {
3202 			ddi_dma_mem_free(&privp->rsp_acc_handle);
3203 		}
3204 		if (privp->data_dma_handle) {
3205 			(void) ddi_dma_unbind_handle(privp->data_dma_handle);
3206 			ddi_dma_free_handle(&privp->data_dma_handle);
3207 		}
3208 		if (privp->data_buf) {
3209 			ddi_dma_mem_free(&privp->data_acc_handle);
3210 		}
3211 		kmem_free(privp, privp->size);
3212 	}
3213 	kmem_free(fpkt, sizeof (struct fcal_packet));
3214 }
3215 
3216 
3217 static struct sf_target *
3218 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3219 {
3220 	struct sf_target *target, *ntarget, *otarget, *ptarget;
3221 	int hash;
3222 #ifdef RAID_LUNS
3223 	int64_t orig_lun = lun;
3224 
3225 	/* XXXX Work around SCSA limitations. */
3226 	lun = *((short *)&lun);
3227 #endif
3228 	ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3229 	mutex_enter(&sf->sf_mutex);
3230 	if (sf->sf_lip_cnt != privp->lip_cnt) {
3231 		mutex_exit(&sf->sf_mutex);
3232 		if (ntarget != NULL)
3233 			kmem_free(ntarget, sizeof (struct sf_target));
3234 		return (NULL);
3235 	}
3236 
3237 	target = sf_lookup_target(sf, privp->port_wwn, lun);
3238 	if (lun != 0) {
3239 		/*
3240 		 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3241 		 * and enqueue the new LUN.
3242 		 */
3243 		if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3244 		    (int64_t)0)) ==	NULL) {
3245 			/*
3246 			 * Yeep -- no LUN 0?
3247 			 */
3248 			mutex_exit(&sf->sf_mutex);
3249 			sf_log(sf, CE_WARN, "target 0x%x "
3250 			    "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3251 			if (ntarget != NULL)
3252 				kmem_free(ntarget, sizeof (struct sf_target));
3253 			return (NULL);
3254 		}
3255 		mutex_enter(&ptarget->sft_mutex);
3256 		if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3257 		    ptarget->sft_state&SF_TARGET_OFFLINE) {
3258 			/* LUN 0 already finished, duplicate its state */
3259 			mutex_exit(&ptarget->sft_mutex);
3260 			sf_offline_target(sf, target);
3261 			mutex_exit(&sf->sf_mutex);
3262 			if (ntarget != NULL)
3263 				kmem_free(ntarget, sizeof (struct sf_target));
3264 			return (target);
3265 		} else if (target != NULL) {
3266 			/*
3267 			 * LUN 0 online or not examined yet.
3268 			 * Try to bring the LUN back online
3269 			 */
3270 			mutex_exit(&ptarget->sft_mutex);
3271 			mutex_enter(&target->sft_mutex);
3272 			target->sft_lip_cnt = privp->lip_cnt;
3273 			target->sft_state |= SF_TARGET_BUSY;
3274 			target->sft_state &= ~(SF_TARGET_OFFLINE|
3275 			    SF_TARGET_MARK);
3276 			target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3277 			target->sft_hard_address = sf_switch_to_alpa[tnum];
3278 			mutex_exit(&target->sft_mutex);
3279 			mutex_exit(&sf->sf_mutex);
3280 			if (ntarget != NULL)
3281 				kmem_free(ntarget, sizeof (struct sf_target));
3282 			return (target);
3283 		}
3284 		mutex_exit(&ptarget->sft_mutex);
3285 		if (ntarget == NULL) {
3286 			mutex_exit(&sf->sf_mutex);
3287 			return (NULL);
3288 		}
3289 		/* Initialize new target structure */
3290 		bcopy((caddr_t)&privp->node_wwn,
3291 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3292 		bcopy((caddr_t)&privp->port_wwn,
3293 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3294 		ntarget->sft_lun.l = lun;
3295 #ifdef RAID_LUNS
3296 		ntarget->sft_lun.l = orig_lun;
3297 		ntarget->sft_raid_lun = (uint_t)lun;
3298 #endif
3299 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3300 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3301 		/* Don't let anyone use this till we finishup init. */
3302 		mutex_enter(&ntarget->sft_mutex);
3303 		mutex_enter(&ntarget->sft_pkt_mutex);
3304 
3305 		hash = SF_HASH(privp->port_wwn, lun);
3306 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3307 		sf->sf_wwn_lists[hash] = ntarget;
3308 
3309 		ntarget->sft_lip_cnt = privp->lip_cnt;
3310 		ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3311 		ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3312 		ntarget->sft_device_type = DTYPE_UNKNOWN;
3313 		ntarget->sft_state = SF_TARGET_BUSY;
3314 		ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3315 		    sft_pkt_head;
3316 		ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3317 		    sft_pkt_head;
3318 
3319 		mutex_enter(&ptarget->sft_mutex);
3320 		/* Traverse the list looking for this target */
3321 		for (target = ptarget; target->sft_next_lun;
3322 		    target = target->sft_next_lun) {
3323 			otarget = target->sft_next_lun;
3324 		}
3325 		ntarget->sft_next_lun = target->sft_next_lun;
3326 		target->sft_next_lun = ntarget;
3327 		mutex_exit(&ptarget->sft_mutex);
3328 		mutex_exit(&ntarget->sft_pkt_mutex);
3329 		mutex_exit(&ntarget->sft_mutex);
3330 		mutex_exit(&sf->sf_mutex);
3331 		return (ntarget);
3332 
3333 	}
3334 	if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3335 		/* It's been touched this LIP -- duplicate WWNs */
3336 		sf_offline_target(sf, target); /* And all the baby targets */
3337 		mutex_exit(&sf->sf_mutex);
3338 		sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3339 		    tnum);
3340 		if (ntarget != NULL) {
3341 			kmem_free(ntarget, sizeof (struct sf_target));
3342 		}
3343 		return (NULL);
3344 	}
3345 
3346 	if ((otarget = sf->sf_targets[tnum]) != NULL) {
3347 		/* Someone else is in our slot */
3348 		mutex_enter(&otarget->sft_mutex);
3349 		if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3350 			mutex_exit(&otarget->sft_mutex);
3351 			sf_offline_target(sf, otarget);
3352 			if (target != NULL)
3353 				sf_offline_target(sf, target);
3354 			mutex_exit(&sf->sf_mutex);
3355 			sf_log(sf, CE_WARN,
3356 			    "target 0x%x, duplicate switch settings\n", tnum);
3357 			if (ntarget != NULL)
3358 				kmem_free(ntarget, sizeof (struct sf_target));
3359 			return (NULL);
3360 		}
3361 		mutex_exit(&otarget->sft_mutex);
3362 		if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3363 		    sft_port_wwn, sizeof (privp->port_wwn))) {
3364 			sf_offline_target(sf, otarget);
3365 			mutex_exit(&sf->sf_mutex);
3366 			sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3367 			    tnum);
3368 			bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3369 			    sizeof (struct sf_target_stats));
3370 			mutex_enter(&sf->sf_mutex);
3371 		}
3372 	}
3373 
3374 	sf->sf_targets[tnum] = target;
3375 	if ((target = sf->sf_targets[tnum]) == NULL) {
3376 		if (ntarget == NULL) {
3377 			mutex_exit(&sf->sf_mutex);
3378 			return (NULL);
3379 		}
3380 		bcopy((caddr_t)&privp->node_wwn,
3381 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3382 		bcopy((caddr_t)&privp->port_wwn,
3383 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3384 		ntarget->sft_lun.l = lun;
3385 #ifdef RAID_LUNS
3386 		ntarget->sft_lun.l = orig_lun;
3387 		ntarget->sft_raid_lun = (uint_t)lun;
3388 #endif
3389 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3390 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3391 		mutex_enter(&ntarget->sft_mutex);
3392 		mutex_enter(&ntarget->sft_pkt_mutex);
3393 		hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3394 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3395 		sf->sf_wwn_lists[hash] = ntarget;
3396 
3397 		target = ntarget;
3398 		target->sft_lip_cnt = privp->lip_cnt;
3399 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3400 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3401 		target->sft_device_type = DTYPE_UNKNOWN;
3402 		target->sft_state = SF_TARGET_BUSY;
3403 		target->sft_pkt_head = (struct sf_pkt *)&target->
3404 		    sft_pkt_head;
3405 		target->sft_pkt_tail = (struct sf_pkt *)&target->
3406 		    sft_pkt_head;
3407 		sf->sf_targets[tnum] = target;
3408 		mutex_exit(&ntarget->sft_mutex);
3409 		mutex_exit(&ntarget->sft_pkt_mutex);
3410 		mutex_exit(&sf->sf_mutex);
3411 	} else {
3412 		mutex_enter(&target->sft_mutex);
3413 		target->sft_lip_cnt = privp->lip_cnt;
3414 		target->sft_state |= SF_TARGET_BUSY;
3415 		target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3416 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3417 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3418 		mutex_exit(&target->sft_mutex);
3419 		mutex_exit(&sf->sf_mutex);
3420 		if (ntarget != NULL)
3421 			kmem_free(ntarget, sizeof (struct sf_target));
3422 	}
3423 	return (target);
3424 }
3425 
3426 
3427 /*
3428  * find the target for a given sf instance
3429  */
3430 /* ARGSUSED */
3431 static struct sf_target *
3432 #ifdef RAID_LUNS
3433 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3434 #else
3435 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3436 #endif
3437 {
3438 	int hash;
3439 	struct sf_target *target;
3440 
3441 	ASSERT(mutex_owned(&sf->sf_mutex));
3442 	hash = SF_HASH(wwn, lun);
3443 
3444 	target = sf->sf_wwn_lists[hash];
3445 	while (target != NULL) {
3446 
3447 #ifndef	RAID_LUNS
3448 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3449 		    sizeof (target->sft_port_wwn)) == 0 &&
3450 			target->sft_lun.l == lun)
3451 			break;
3452 #else
3453 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3454 		    sizeof (target->sft_port_wwn)) == 0 &&
3455 			target->sft_raid_lun == lun)
3456 			break;
3457 #endif
3458 		target = target->sft_next;
3459 	}
3460 
3461 	return (target);
3462 }
3463 
3464 
3465 /*
3466  * Send out a REPORT_LUNS command.
3467  */
3468 static int
3469 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3470     struct sf_target *target)
3471 {
3472 	struct	fcal_packet	*fpkt = privp->fpkt;
3473 	ddi_dma_cookie_t	pcookie;
3474 	ddi_dma_handle_t	lun_dma_handle = NULL;
3475 	ddi_acc_handle_t	lun_acc_handle;
3476 	uint_t			ccount;
3477 	size_t			real_size;
3478 	caddr_t			lun_buf = NULL;
3479 	int			handle_bound = 0;
3480 	fc_frame_header_t	*hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3481 	struct fcp_cmd		*reportlun = (struct fcp_cmd *)privp->cmd;
3482 	char			*msg = "Transport";
3483 
3484 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3485 	    DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3486 		msg = "ddi_dma_alloc_handle()";
3487 		goto fail;
3488 	}
3489 
3490 	if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3491 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3492 	    DDI_DMA_DONTWAIT, NULL, &lun_buf,
3493 	    &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3494 		msg = "ddi_dma_mem_alloc()";
3495 		goto fail;
3496 	}
3497 
3498 	if (real_size < REPORT_LUNS_SIZE) {
3499 		msg = "DMA mem < REPORT_LUNS_SIZE";
3500 		goto fail;
3501 	}
3502 
3503 	if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3504 	    lun_buf, real_size, DDI_DMA_READ |
3505 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3506 	    NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3507 		msg = "ddi_dma_addr_bind_handle()";
3508 		goto fail;
3509 	}
3510 	handle_bound = 1;
3511 
3512 	if (ccount != 1) {
3513 		msg = "ccount != 1";
3514 		goto fail;
3515 	}
3516 	privp->els_code = 0;
3517 	privp->target = target;
3518 	privp->data_dma_handle = lun_dma_handle;
3519 	privp->data_acc_handle = lun_acc_handle;
3520 	privp->data_buf = lun_buf;
3521 
3522 	fpkt->fcal_pkt_comp = sf_reportlun_callback;
3523 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3524 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3525 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3526 	    sizeof (struct fcp_cmd);
3527 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3528 	    (uint32_t)pcookie.dmac_address;
3529 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3530 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3531 	hp->r_ctl = R_CTL_COMMAND;
3532 	hp->type = TYPE_SCSI_FCP;
3533 	bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3534 	((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3535 	/* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3536 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3537 	    (real_size&0x0ff);
3538 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3539 	    (real_size>>8)&0x0ff;
3540 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3541 	    (real_size>>16)&0x0ff;
3542 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3543 	    (real_size>>24)&0x0ff;
3544 	reportlun->fcp_cntl.cntl_read_data = 1;
3545 	reportlun->fcp_cntl.cntl_write_data = 0;
3546 	reportlun->fcp_data_len = pcookie.dmac_size;
3547 	reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3548 
3549 	(void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3550 	/* We know he's there, so this should be fast */
3551 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3552 	if (sf_els_transport(sf, privp) == 1)
3553 		return (1);
3554 
3555 fail:
3556 	sf_log(sf, CE_WARN,
3557 	    "%s failure for REPORTLUN to target 0x%x\n",
3558 	    msg, sf_alpa_to_switch[privp->dest_nport_id]);
3559 	sf_els_free(fpkt);
3560 	if (lun_dma_handle != NULL) {
3561 		if (handle_bound)
3562 			(void) ddi_dma_unbind_handle(lun_dma_handle);
3563 		ddi_dma_free_handle(&lun_dma_handle);
3564 	}
3565 	if (lun_buf != NULL) {
3566 		ddi_dma_mem_free(&lun_acc_handle);
3567 	}
3568 	return (0);
3569 }
3570 
3571 /*
3572  * Handle the results of a REPORT_LUNS command:
3573  *	Create additional targets if necessary
3574  *	Initiate INQUIRYs on all LUNs.
3575  */
3576 static void
3577 sf_reportlun_callback(struct fcal_packet *fpkt)
3578 {
3579 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3580 	    fcal_pkt_private;
3581 	struct scsi_report_luns *ptr =
3582 	    (struct scsi_report_luns *)privp->data_buf;
3583 	struct sf *sf = privp->sf;
3584 	struct sf_target *target = privp->target;
3585 	struct fcp_rsp *rsp = NULL;
3586 	int delayed_retry = 0;
3587 	int tid = sf_alpa_to_switch[target->sft_hard_address];
3588 	int i, free_pkt = 1;
3589 	short	ncmds;
3590 
3591 	mutex_enter(&sf->sf_mutex);
3592 	/* use as temporary state variable */
3593 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3594 		mutex_exit(&sf->sf_mutex);
3595 		return;
3596 	}
3597 	if (privp->prev)
3598 		privp->prev->next = privp->next;
3599 	if (privp->next)
3600 		privp->next->prev = privp->prev;
3601 	if (sf->sf_els_list == privp)
3602 		sf->sf_els_list = privp->next;
3603 	privp->prev = privp->next = NULL;
3604 	mutex_exit(&sf->sf_mutex);
3605 	ncmds = fpkt->fcal_ncmds;
3606 	ASSERT(ncmds >= 0);
3607 	mutex_enter(&sf->sf_cmd_mutex);
3608 	sf->sf_ncmds = ncmds;
3609 	mutex_exit(&sf->sf_cmd_mutex);
3610 
3611 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3612 		(void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3613 		    0, DDI_DMA_SYNC_FORKERNEL);
3614 
3615 		rsp = (struct fcp_rsp *)privp->rsp;
3616 	}
3617 	SF_DEBUG(1, (sf, CE_CONT,
3618 	    "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3619 	    privp->dest_nport_id,
3620 	    fpkt->fcal_pkt_status,
3621 	    rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3622 
3623 		/* See if target simply does not support REPORT_LUNS. */
3624 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3625 	    rsp->fcp_u.fcp_status.sense_len_set &&
3626 	    rsp->fcp_sense_len >=
3627 		offsetof(struct scsi_extended_sense, es_qual_code)) {
3628 			struct scsi_extended_sense *sense;
3629 			sense = (struct scsi_extended_sense *)
3630 			((caddr_t)rsp + sizeof (struct fcp_rsp)
3631 				+ rsp->fcp_response_len);
3632 			if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3633 				if (sense->es_add_code == 0x20) {
3634 					/* Fake LUN 0 */
3635 				SF_DEBUG(1, (sf, CE_CONT,
3636 					"!REPORTLUN Faking good "
3637 					"completion for alpa %x\n",
3638 					privp->dest_nport_id));
3639 					ptr->lun_list_len = FCP_LUN_SIZE;
3640 					ptr->lun[0] = 0;
3641 					rsp->fcp_u.fcp_status.scsi_status =
3642 						STATUS_GOOD;
3643 				} else if (sense->es_add_code == 0x25) {
3644 					SF_DEBUG(1, (sf, CE_CONT,
3645 					    "!REPORTLUN device alpa %x "
3646 					    "key %x code %x\n",
3647 					    privp->dest_nport_id,
3648 					    sense->es_key, sense->es_add_code));
3649 					    goto fail;
3650 				}
3651 			} else if (sense->es_key ==
3652 				KEY_UNIT_ATTENTION &&
3653 				sense->es_add_code == 0x29) {
3654 				SF_DEBUG(1, (sf, CE_CONT,
3655 					"!REPORTLUN device alpa %x was reset\n",
3656 					privp->dest_nport_id));
3657 			} else {
3658 				SF_DEBUG(1, (sf, CE_CONT,
3659 					"!REPORTLUN device alpa %x "
3660 					"key %x code %x\n",
3661 					privp->dest_nport_id,
3662 					sense->es_key, sense->es_add_code));
3663 /* XXXXXX The following is here to handle broken targets -- remove it later */
3664 				if (sf_reportlun_forever &&
3665 					sense->es_key == KEY_UNIT_ATTENTION)
3666 					goto retry;
3667 /* XXXXXX */
3668 				if (sense->es_key == KEY_NOT_READY)
3669 					delayed_retry = 1;
3670 				}
3671 		}
3672 
3673 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3674 		struct fcp_rsp_info *bep;
3675 
3676 		bep = (struct fcp_rsp_info *)(&rsp->
3677 		    fcp_response_len + 1);
3678 		if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3679 		    bep->rsp_code == FCP_NO_FAILURE) {
3680 			(void) ddi_dma_sync(privp->data_dma_handle,
3681 			    0, 0, DDI_DMA_SYNC_FORKERNEL);
3682 
3683 			/* Convert from #bytes to #ints */
3684 			ptr->lun_list_len = ptr->lun_list_len >> 3;
3685 			SF_DEBUG(2, (sf, CE_CONT,
3686 			    "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3687 			    privp->dest_nport_id, ptr->lun_list_len));
3688 			if (!ptr->lun_list_len) {
3689 				/* No LUNs? Ya gotta be kidding... */
3690 				sf_log(sf, CE_WARN,
3691 				    "SCSI violation -- "
3692 				    "target 0x%x reports no LUNs\n",
3693 				    sf_alpa_to_switch[
3694 				    privp->dest_nport_id]);
3695 				ptr->lun_list_len = 1;
3696 				ptr->lun[0] = 0;
3697 			}
3698 
3699 			mutex_enter(&sf->sf_mutex);
3700 			if (sf->sf_lip_cnt == privp->lip_cnt) {
3701 				sf->sf_device_count += ptr->lun_list_len - 1;
3702 			}
3703 
3704 			mutex_exit(&sf->sf_mutex);
3705 			for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3706 			    sf->sf_lip_cnt; i++) {
3707 				struct sf_els_hdr *nprivp;
3708 				struct fcal_packet *nfpkt;
3709 
3710 				/* LUN 0 is already in `target' */
3711 				if (ptr->lun[i] != 0) {
3712 					target = sf_create_target(sf,
3713 					    privp, tid, ptr->lun[i]);
3714 				}
3715 				nprivp = NULL;
3716 				nfpkt = NULL;
3717 				if (target) {
3718 					nfpkt = sf_els_alloc(sf,
3719 					    target->sft_al_pa,
3720 					    sizeof (struct sf_els_hdr),
3721 					    sizeof (union sf_els_cmd),
3722 					    sizeof (union sf_els_rsp),
3723 					    (caddr_t *)&nprivp,
3724 					    (caddr_t *)&rsp);
3725 					if (nprivp)
3726 						nprivp->lip_cnt =
3727 						    privp->lip_cnt;
3728 				}
3729 				if (nfpkt && nprivp &&
3730 				    (sf_do_inquiry(sf, nprivp, target) ==
3731 				    0)) {
3732 					mutex_enter(&sf->sf_mutex);
3733 					if (sf->sf_lip_cnt == privp->
3734 					    lip_cnt) {
3735 						sf->sf_device_count --;
3736 					}
3737 					sf_offline_target(sf, target);
3738 					mutex_exit(&sf->sf_mutex);
3739 				}
3740 			}
3741 			sf_els_free(fpkt);
3742 			return;
3743 		} else {
3744 			SF_DEBUG(1, (sf, CE_CONT,
3745 			    "!REPORTLUN al_pa %x fcp failure, "
3746 			    "fcp_rsp_code %x scsi status %x\n",
3747 			    privp->dest_nport_id, bep->rsp_code,
3748 			    rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3749 			goto fail;
3750 		}
3751 	}
3752 	if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3753 	    (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3754 		delayed_retry = 1;
3755 	}
3756 
3757 	if (++(privp->retries) < sf_els_retries ||
3758 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3759 /* XXXXXX The following is here to handle broken targets -- remove it later */
3760 retry:
3761 /* XXXXXX */
3762 		if (delayed_retry) {
3763 			privp->retries--;
3764 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3765 			privp->delayed_retry = 1;
3766 		} else {
3767 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3768 		}
3769 
3770 		privp->prev = NULL;
3771 		mutex_enter(&sf->sf_mutex);
3772 		if (privp->lip_cnt == sf->sf_lip_cnt) {
3773 			if (!delayed_retry)
3774 				SF_DEBUG(1, (sf, CE_WARN,
3775 				    "!REPORTLUN to al_pa %x failed, retrying\n",
3776 				    privp->dest_nport_id));
3777 			privp->next = sf->sf_els_list;
3778 			if (sf->sf_els_list != NULL)
3779 				sf->sf_els_list->prev = privp;
3780 			sf->sf_els_list = privp;
3781 			mutex_exit(&sf->sf_mutex);
3782 			if (!delayed_retry && soc_transport(sf->sf_sochandle,
3783 			    fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3784 			    FCAL_TRANSPORT_SUCCESS) {
3785 				mutex_enter(&sf->sf_mutex);
3786 				if (privp->prev)
3787 					privp->prev->next = privp->next;
3788 				if (privp->next)
3789 					privp->next->prev = privp->prev;
3790 				if (sf->sf_els_list == privp)
3791 					sf->sf_els_list = privp->next;
3792 				mutex_exit(&sf->sf_mutex);
3793 				goto fail;
3794 			} else
3795 				return;
3796 		} else {
3797 			mutex_exit(&sf->sf_mutex);
3798 		}
3799 	} else {
3800 fail:
3801 
3802 		/* REPORT_LUN failed -- try inquiry */
3803 		if (sf_do_inquiry(sf, privp, target) != 0) {
3804 			return;
3805 		} else {
3806 			free_pkt = 0;
3807 		}
3808 		mutex_enter(&sf->sf_mutex);
3809 		if (sf->sf_lip_cnt == privp->lip_cnt) {
3810 			sf_log(sf, CE_WARN,
3811 			    "!REPORTLUN to target 0x%x failed\n",
3812 			    sf_alpa_to_switch[privp->dest_nport_id]);
3813 			sf_offline_target(sf, target);
3814 			sf->sf_device_count--;
3815 			ASSERT(sf->sf_device_count >= 0);
3816 			if (sf->sf_device_count == 0)
3817 			sf_finish_init(sf, privp->lip_cnt);
3818 		}
3819 		mutex_exit(&sf->sf_mutex);
3820 	}
3821 	if (free_pkt) {
3822 		sf_els_free(fpkt);
3823 	}
3824 }
3825 
3826 static int
3827 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3828     struct sf_target *target)
3829 {
3830 	struct	fcal_packet	*fpkt = privp->fpkt;
3831 	ddi_dma_cookie_t	pcookie;
3832 	ddi_dma_handle_t	inq_dma_handle = NULL;
3833 	ddi_acc_handle_t	inq_acc_handle;
3834 	uint_t			ccount;
3835 	size_t			real_size;
3836 	caddr_t			inq_buf = NULL;
3837 	int			handle_bound = FALSE;
3838 	fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3839 	struct fcp_cmd		*inq = (struct fcp_cmd *)privp->cmd;
3840 	char			*msg = "Transport";
3841 
3842 
3843 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3844 	    DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3845 		msg = "ddi_dma_alloc_handle()";
3846 		goto fail;
3847 	}
3848 
3849 	if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3850 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3851 	    DDI_DMA_DONTWAIT, NULL, &inq_buf,
3852 	    &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3853 		msg = "ddi_dma_mem_alloc()";
3854 		goto fail;
3855 	}
3856 
3857 	if (real_size < SUN_INQSIZE) {
3858 		msg = "DMA mem < inquiry size";
3859 		goto fail;
3860 	}
3861 
3862 	if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3863 	    inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3864 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3865 		msg = "ddi_dma_addr_bind_handle()";
3866 		goto fail;
3867 	}
3868 	handle_bound = TRUE;
3869 
3870 	if (ccount != 1) {
3871 		msg = "ccount != 1";
3872 		goto fail;
3873 	}
3874 	privp->els_code = 0;			/* not an ELS command */
3875 	privp->target = target;
3876 	privp->data_dma_handle = inq_dma_handle;
3877 	privp->data_acc_handle = inq_acc_handle;
3878 	privp->data_buf = inq_buf;
3879 	fpkt->fcal_pkt_comp = sf_inq_callback;
3880 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3881 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3882 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3883 	    sizeof (struct fcp_cmd);
3884 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3885 	    (uint32_t)pcookie.dmac_address;
3886 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3887 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3888 	hp->r_ctl = R_CTL_COMMAND;
3889 	hp->type = TYPE_SCSI_FCP;
3890 	bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3891 	((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3892 	((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3893 	bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3894 	    FCP_LUN_SIZE);
3895 	inq->fcp_cntl.cntl_read_data = 1;
3896 	inq->fcp_cntl.cntl_write_data = 0;
3897 	inq->fcp_data_len = pcookie.dmac_size;
3898 	inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3899 
3900 	(void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3901 	    DDI_DMA_SYNC_FORDEV);
3902 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3903 	SF_DEBUG(5, (sf, CE_WARN,
3904 	    "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3905 	    privp->dest_nport_id,
3906 	    SCSA_LUN(target)));
3907 	return (sf_els_transport(sf, privp));
3908 
3909 fail:
3910 	sf_log(sf, CE_WARN,
3911 	    "%s failure for INQUIRY to target 0x%x\n",
3912 	    msg, sf_alpa_to_switch[privp->dest_nport_id]);
3913 	sf_els_free(fpkt);
3914 	if (inq_dma_handle != NULL) {
3915 		if (handle_bound) {
3916 			(void) ddi_dma_unbind_handle(inq_dma_handle);
3917 		}
3918 		ddi_dma_free_handle(&inq_dma_handle);
3919 	}
3920 	if (inq_buf != NULL) {
3921 		ddi_dma_mem_free(&inq_acc_handle);
3922 	}
3923 	return (FALSE);
3924 }
3925 
3926 
3927 /*
3928  * called as the pkt_comp routine for INQ packets
3929  */
3930 static void
3931 sf_inq_callback(struct fcal_packet *fpkt)
3932 {
3933 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3934 	    fcal_pkt_private;
3935 	struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3936 	struct sf *sf = privp->sf;
3937 	struct sf *tsf;
3938 	struct sf_target *target = privp->target;
3939 	struct fcp_rsp *rsp;
3940 	int delayed_retry = FALSE;
3941 	short	ncmds;
3942 
3943 
3944 	mutex_enter(&sf->sf_mutex);
3945 	/* use as temporary state variable */
3946 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3947 		mutex_exit(&sf->sf_mutex);
3948 		return;
3949 	}
3950 	if (privp->prev != NULL) {
3951 		privp->prev->next = privp->next;
3952 	}
3953 	if (privp->next != NULL) {
3954 		privp->next->prev = privp->prev;
3955 	}
3956 	if (sf->sf_els_list == privp) {
3957 		sf->sf_els_list = privp->next;
3958 	}
3959 	privp->prev = privp->next = NULL;
3960 	mutex_exit(&sf->sf_mutex);
3961 	ncmds = fpkt->fcal_ncmds;
3962 	ASSERT(ncmds >= 0);
3963 	mutex_enter(&sf->sf_cmd_mutex);
3964 	sf->sf_ncmds = ncmds;
3965 	mutex_exit(&sf->sf_cmd_mutex);
3966 
3967 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3968 
3969 		(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3970 		    (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3971 
3972 		rsp = (struct fcp_rsp *)privp->rsp;
3973 		SF_DEBUG(2, (sf, CE_CONT,
3974 		    "!INQUIRY to al_pa %x scsi status %x",
3975 		    privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3976 
3977 		if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3978 		    !rsp->fcp_u.fcp_status.resid_over &&
3979 		    (!rsp->fcp_u.fcp_status.resid_under ||
3980 		    ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3981 			struct fcp_rsp_info *bep;
3982 
3983 			bep = (struct fcp_rsp_info *)(&rsp->
3984 			    fcp_response_len + 1);
3985 
3986 			if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3987 			    (bep->rsp_code == FCP_NO_FAILURE)) {
3988 
3989 				SF_DEBUG(2, (sf, CE_CONT,
3990 				    "!INQUIRY to al_pa %x lun %" PRIx64
3991 				    " succeeded\n",
3992 				    privp->dest_nport_id, SCSA_LUN(target)));
3993 
3994 				(void) ddi_dma_sync(privp->data_dma_handle,
3995 				    (off_t)0, (size_t)0,
3996 				    DDI_DMA_SYNC_FORKERNEL);
3997 
3998 				mutex_enter(&sf->sf_mutex);
3999 
4000 				if (sf->sf_lip_cnt == privp->lip_cnt) {
4001 					mutex_enter(&target->sft_mutex);
4002 					target->sft_device_type =
4003 					    prt->inq_dtype;
4004 					bcopy(prt, &target->sft_inq,
4005 					    sizeof (*prt));
4006 					mutex_exit(&target->sft_mutex);
4007 					sf->sf_device_count--;
4008 					ASSERT(sf->sf_device_count >= 0);
4009 					if (sf->sf_device_count == 0) {
4010 						sf_finish_init(sf,
4011 						    privp->lip_cnt);
4012 					}
4013 				}
4014 				mutex_exit(&sf->sf_mutex);
4015 				sf_els_free(fpkt);
4016 				return;
4017 			}
4018 		} else if ((rsp->fcp_u.fcp_status.scsi_status ==
4019 		    STATUS_BUSY) ||
4020 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4021 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4022 			delayed_retry = TRUE;
4023 		}
4024 	} else {
4025 		SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4026 		    privp->dest_nport_id, fpkt->fcal_pkt_status));
4027 	}
4028 
4029 	if (++(privp->retries) < sf_els_retries ||
4030 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4031 		if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4032 			tsf = sf->sf_sibling;
4033 			if (tsf != NULL) {
4034 				mutex_enter(&tsf->sf_cmd_mutex);
4035 				tsf->sf_flag = 1;
4036 				tsf->sf_throttle = SF_DECR_DELTA;
4037 				mutex_exit(&tsf->sf_cmd_mutex);
4038 			}
4039 			delayed_retry = 1;
4040 		}
4041 		if (delayed_retry) {
4042 			privp->retries--;
4043 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4044 			privp->delayed_retry = TRUE;
4045 		} else {
4046 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4047 		}
4048 
4049 		privp->prev = NULL;
4050 		mutex_enter(&sf->sf_mutex);
4051 		if (privp->lip_cnt == sf->sf_lip_cnt) {
4052 			if (!delayed_retry) {
4053 				SF_DEBUG(1, (sf, CE_WARN,
4054 				    "INQUIRY to al_pa %x failed, retrying",
4055 				    privp->dest_nport_id));
4056 			}
4057 			privp->next = sf->sf_els_list;
4058 			if (sf->sf_els_list != NULL) {
4059 				sf->sf_els_list->prev = privp;
4060 			}
4061 			sf->sf_els_list = privp;
4062 			mutex_exit(&sf->sf_mutex);
4063 			/* if not delayed call transport to send a pkt */
4064 			if (!delayed_retry &&
4065 			    (soc_transport(sf->sf_sochandle, fpkt,
4066 			    FCAL_NOSLEEP, CQ_REQUEST_1) !=
4067 			    FCAL_TRANSPORT_SUCCESS)) {
4068 				mutex_enter(&sf->sf_mutex);
4069 				if (privp->prev != NULL) {
4070 					privp->prev->next = privp->next;
4071 				}
4072 				if (privp->next != NULL) {
4073 					privp->next->prev = privp->prev;
4074 				}
4075 				if (sf->sf_els_list == privp) {
4076 					sf->sf_els_list = privp->next;
4077 				}
4078 				mutex_exit(&sf->sf_mutex);
4079 				goto fail;
4080 			}
4081 			return;
4082 		}
4083 		mutex_exit(&sf->sf_mutex);
4084 	} else {
4085 fail:
4086 		mutex_enter(&sf->sf_mutex);
4087 		if (sf->sf_lip_cnt == privp->lip_cnt) {
4088 			sf_offline_target(sf, target);
4089 			sf_log(sf, CE_NOTE,
4090 			    "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4091 			    "Retry Count: %d\n",
4092 			    sf_alpa_to_switch[privp->dest_nport_id],
4093 			    SCSA_LUN(target),
4094 			    privp->retries);
4095 			sf->sf_device_count--;
4096 			ASSERT(sf->sf_device_count >= 0);
4097 			if (sf->sf_device_count == 0) {
4098 				sf_finish_init(sf, privp->lip_cnt);
4099 			}
4100 		}
4101 		mutex_exit(&sf->sf_mutex);
4102 	}
4103 	sf_els_free(fpkt);
4104 }
4105 
4106 
4107 static void
4108 sf_finish_init(struct sf *sf, int lip_cnt)
4109 {
4110 	int			i;		/* loop index */
4111 	int			cflag;
4112 	struct sf_target	*target;	/* current target */
4113 	dev_info_t		*dip;
4114 	struct sf_hp_elem	*elem;		/* hotplug element created */
4115 
4116 	SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4117 	ASSERT(mutex_owned(&sf->sf_mutex));
4118 
4119 	/* scan all hash queues */
4120 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4121 		target = sf->sf_wwn_lists[i];
4122 		while (target != NULL) {
4123 			mutex_enter(&target->sft_mutex);
4124 
4125 			/* see if target is not offline */
4126 			if ((target->sft_state & SF_TARGET_OFFLINE)) {
4127 				/*
4128 				 * target already offline
4129 				 */
4130 				mutex_exit(&target->sft_mutex);
4131 				goto next_entry;
4132 			}
4133 
4134 			/*
4135 			 * target is not already offline -- see if it has
4136 			 * already been marked as ready to go offline
4137 			 */
4138 			if (target->sft_state & SF_TARGET_MARK) {
4139 				/*
4140 				 * target already marked, so take it offline
4141 				 */
4142 				mutex_exit(&target->sft_mutex);
4143 				sf_offline_target(sf, target);
4144 				goto next_entry;
4145 			}
4146 
4147 			/* clear target busy flag */
4148 			target->sft_state &= ~SF_TARGET_BUSY;
4149 
4150 			/* is target init not yet done ?? */
4151 			cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4152 
4153 			/* get pointer to target dip */
4154 			dip = target->sft_dip;
4155 
4156 			mutex_exit(&target->sft_mutex);
4157 			mutex_exit(&sf->sf_mutex);
4158 
4159 			if (cflag && (dip == NULL)) {
4160 				/*
4161 				 * target init not yet done &&
4162 				 * devinfo not yet created
4163 				 */
4164 				sf_create_devinfo(sf, target, lip_cnt);
4165 				mutex_enter(&sf->sf_mutex);
4166 				goto next_entry;
4167 			}
4168 
4169 			/*
4170 			 * target init already done || devinfo already created
4171 			 */
4172 			ASSERT(dip != NULL);
4173 			if (!sf_create_props(dip, target, lip_cnt)) {
4174 				/* a problem creating properties */
4175 				mutex_enter(&sf->sf_mutex);
4176 				goto next_entry;
4177 			}
4178 
4179 			/* create a new element for the hotplug list */
4180 			if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4181 			    KM_NOSLEEP)) != NULL) {
4182 
4183 				/* fill in the new element */
4184 				elem->dip = dip;
4185 				elem->target = target;
4186 				elem->what = SF_ONLINE;
4187 
4188 				/* add the new element into the hotplug list */
4189 				mutex_enter(&sf->sf_hp_daemon_mutex);
4190 				if (sf->sf_hp_elem_tail != NULL) {
4191 					sf->sf_hp_elem_tail->next = elem;
4192 					sf->sf_hp_elem_tail = elem;
4193 				} else {
4194 					/* this is the first element in list */
4195 					sf->sf_hp_elem_head =
4196 					    sf->sf_hp_elem_tail =
4197 					    elem;
4198 				}
4199 				cv_signal(&sf->sf_hp_daemon_cv);
4200 				mutex_exit(&sf->sf_hp_daemon_mutex);
4201 			} else {
4202 				/* could not allocate memory for element ?? */
4203 				(void) ndi_devi_online_async(dip, 0);
4204 			}
4205 
4206 			mutex_enter(&sf->sf_mutex);
4207 
4208 next_entry:
4209 			/* ensure no new LIPs have occurred */
4210 			if (sf->sf_lip_cnt != lip_cnt) {
4211 				return;
4212 			}
4213 			target = target->sft_next;
4214 		}
4215 
4216 		/* done scanning all targets in this queue */
4217 	}
4218 
4219 	/* done with all hash queues */
4220 
4221 	sf->sf_state = SF_STATE_ONLINE;
4222 	sf->sf_online_timer = 0;
4223 }
4224 
4225 
4226 /*
4227  * create devinfo node
4228  */
4229 static void
4230 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4231 {
4232 	dev_info_t		*cdip = NULL;
4233 	char			*nname = NULL;
4234 	char			**compatible = NULL;
4235 	int			ncompatible;
4236 	struct scsi_inquiry	*inq = &target->sft_inq;
4237 	char			*scsi_binding_set;
4238 
4239 	/* get the 'scsi-binding-set' property */
4240 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4241 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4242 	    &scsi_binding_set) != DDI_PROP_SUCCESS)
4243 		scsi_binding_set = NULL;
4244 
4245 	/* determine the node name and compatible */
4246 	scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4247 	    inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4248 	if (scsi_binding_set)
4249 		ddi_prop_free(scsi_binding_set);
4250 
4251 	/* if nodename can't be determined then print a message and skip it */
4252 	if (nname == NULL) {
4253 #ifndef	RAID_LUNS
4254 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4255 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4256 		    "    compatible: %s",
4257 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4258 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4259 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4260 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4261 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4262 		    target->sft_lun.l, *compatible);
4263 #else
4264 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4265 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4266 		    "    compatible: %s",
4267 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4268 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4269 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4270 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4271 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4272 		    target->sft_raid_lun, *compatible);
4273 #endif
4274 		goto fail;
4275 	}
4276 
4277 	/* allocate the node */
4278 	if (ndi_devi_alloc(sf->sf_dip, nname,
4279 	    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4280 		goto fail;
4281 	}
4282 
4283 	/* decorate the node with compatible */
4284 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4285 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4286 		goto fail;
4287 	}
4288 
4289 	/* add addressing properties to the node */
4290 	if (sf_create_props(cdip, target, lip_cnt) != 1) {
4291 		goto fail;
4292 	}
4293 
4294 	mutex_enter(&target->sft_mutex);
4295 	if (target->sft_dip != NULL) {
4296 		mutex_exit(&target->sft_mutex);
4297 		goto fail;
4298 	}
4299 	target->sft_dip = cdip;
4300 	mutex_exit(&target->sft_mutex);
4301 
4302 	if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4303 		goto fail;
4304 	}
4305 
4306 	scsi_hba_nodename_compatible_free(nname, compatible);
4307 	return;
4308 
4309 fail:
4310 	scsi_hba_nodename_compatible_free(nname, compatible);
4311 	if (cdip != NULL) {
4312 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4313 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4314 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4315 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4316 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4317 		if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4318 			sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4319 		} else {
4320 			mutex_enter(&target->sft_mutex);
4321 			if (cdip == target->sft_dip) {
4322 				target->sft_dip = NULL;
4323 			}
4324 			mutex_exit(&target->sft_mutex);
4325 		}
4326 	}
4327 }
4328 
4329 /*
4330  * create required properties, returning TRUE iff we succeed, else
4331  * returning FALSE
4332  */
4333 static int
4334 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4335 {
4336 	int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4337 
4338 
4339 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4340 	    cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4341 	    DDI_PROP_SUCCESS) {
4342 		return (FALSE);
4343 	}
4344 
4345 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4346 	    cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4347 	    DDI_PROP_SUCCESS) {
4348 		return (FALSE);
4349 	}
4350 
4351 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4352 	    cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4353 		return (FALSE);
4354 	}
4355 
4356 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4357 	    cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4358 		return (FALSE);
4359 	}
4360 
4361 #ifndef	RAID_LUNS
4362 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4363 	    cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4364 		return (0);
4365 	}
4366 #else
4367 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4368 	    cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4369 		return (0);
4370 	}
4371 #endif
4372 
4373 	return (TRUE);
4374 }
4375 
4376 
4377 /*
4378  * called by the transport to offline a target
4379  */
4380 /* ARGSUSED */
4381 static void
4382 sf_offline_target(struct sf *sf, struct sf_target *target)
4383 {
4384 	dev_info_t *dip;
4385 	struct sf_target *next_target = NULL;
4386 	struct sf_hp_elem	*elem;
4387 
4388 	ASSERT(mutex_owned(&sf->sf_mutex));
4389 
4390 	if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4391 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4392 		sf_core = 0;
4393 	}
4394 
4395 	while (target != NULL) {
4396 		sf_log(sf, CE_NOTE,
4397 		    "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4398 		    sf_alpa_to_switch[target->sft_al_pa],
4399 		    target->sft_al_pa, SCSA_LUN(target));
4400 		mutex_enter(&target->sft_mutex);
4401 		target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4402 		target->sft_state |= SF_TARGET_OFFLINE;
4403 		mutex_exit(&target->sft_mutex);
4404 		mutex_exit(&sf->sf_mutex);
4405 
4406 		/* XXXX if this is LUN 0, offline all other LUNs */
4407 		if (next_target || target->sft_lun.l == 0)
4408 			next_target = target->sft_next_lun;
4409 
4410 		/* abort all cmds for this target */
4411 		sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4412 
4413 		mutex_enter(&sf->sf_mutex);
4414 		mutex_enter(&target->sft_mutex);
4415 		if (target->sft_state & SF_TARGET_INIT_DONE) {
4416 			dip = target->sft_dip;
4417 			mutex_exit(&target->sft_mutex);
4418 			mutex_exit(&sf->sf_mutex);
4419 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4420 			    TARGET_PROP);
4421 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4422 			    dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4423 			    NDI_EVENT_NOPASS);
4424 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
4425 			    target->sft_dip, sf_remove_eid, NULL);
4426 
4427 			elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4428 			    KM_NOSLEEP);
4429 			if (elem != NULL) {
4430 				elem->dip = dip;
4431 				elem->target = target;
4432 				elem->what = SF_OFFLINE;
4433 				mutex_enter(&sf->sf_hp_daemon_mutex);
4434 				if (sf->sf_hp_elem_tail != NULL) {
4435 					sf->sf_hp_elem_tail->next = elem;
4436 					sf->sf_hp_elem_tail = elem;
4437 				} else {
4438 					sf->sf_hp_elem_head =
4439 					    sf->sf_hp_elem_tail =
4440 					    elem;
4441 				}
4442 				cv_signal(&sf->sf_hp_daemon_cv);
4443 				mutex_exit(&sf->sf_hp_daemon_mutex);
4444 			} else {
4445 				/* don't do NDI_DEVI_REMOVE for now */
4446 				if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4447 					SF_DEBUG(1, (sf, CE_WARN,
4448 					    "target %x lun %" PRIx64 ", "
4449 					    "device offline failed",
4450 					    sf_alpa_to_switch[target->
4451 					    sft_al_pa],
4452 					    SCSA_LUN(target)));
4453 				} else {
4454 					SF_DEBUG(1, (sf, CE_NOTE,
4455 					    "target %x, lun %" PRIx64 ", "
4456 					    "device offline succeeded\n",
4457 					    sf_alpa_to_switch[target->
4458 					    sft_al_pa],
4459 					    SCSA_LUN(target)));
4460 				}
4461 			}
4462 			mutex_enter(&sf->sf_mutex);
4463 		} else {
4464 			mutex_exit(&target->sft_mutex);
4465 		}
4466 		target = next_target;
4467 	}
4468 }
4469 
4470 
4471 /*
4472  * routine to get/set a capability
4473  *
4474  * returning:
4475  *	1 (TRUE)	boolean capability is true (on get)
4476  *	0 (FALSE)	invalid capability, can't set capability (on set),
4477  *			or boolean capability is false (on get)
4478  *	-1 (UNDEFINED)	can't find capability (SCSA) or unsupported capability
4479  *	3		when getting SCSI version number
4480  *	AL_PA		when getting port initiator ID
4481  */
4482 static int
4483 sf_commoncap(struct scsi_address *ap, char *cap,
4484     int val, int tgtonly, int doset)
4485 {
4486 	struct sf *sf = ADDR2SF(ap);
4487 	int cidx;
4488 	int rval = FALSE;
4489 
4490 
4491 	if (cap == NULL) {
4492 		SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4493 		return (rval);
4494 	}
4495 
4496 	/* get index of capability string */
4497 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4498 		/* can't find capability */
4499 		return (UNDEFINED);
4500 	}
4501 
4502 	if (doset) {
4503 		/*
4504 		 * Process setcap request.
4505 		 */
4506 
4507 		/*
4508 		 * At present, we can only set binary (0/1) values
4509 		 */
4510 		switch (cidx) {
4511 		case SCSI_CAP_ARQ:	/* can't set this capability */
4512 			break;
4513 		default:
4514 			SF_DEBUG(3, (sf, CE_WARN,
4515 			    "sf_setcap: unsupported %d", cidx));
4516 			rval = UNDEFINED;
4517 			break;
4518 		}
4519 
4520 		SF_DEBUG(4, (sf, CE_NOTE,
4521 		    "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4522 		    ",doset=0x%x,rval=%d\n",
4523 		    cap, val, tgtonly, doset, rval));
4524 
4525 	} else {
4526 		/*
4527 		 * Process getcap request.
4528 		 */
4529 		switch (cidx) {
4530 		case SCSI_CAP_DMA_MAX:
4531 			break;		/* don't' have this capability */
4532 		case SCSI_CAP_INITIATOR_ID:
4533 			rval = sf->sf_al_pa;
4534 			break;
4535 		case SCSI_CAP_ARQ:
4536 			rval = TRUE;	/* do have this capability */
4537 			break;
4538 		case SCSI_CAP_RESET_NOTIFICATION:
4539 		case SCSI_CAP_TAGGED_QING:
4540 			rval = TRUE;	/* do have this capability */
4541 			break;
4542 		case SCSI_CAP_SCSI_VERSION:
4543 			rval = 3;
4544 			break;
4545 		case SCSI_CAP_INTERCONNECT_TYPE:
4546 			rval = INTERCONNECT_FIBRE;
4547 			break;
4548 		default:
4549 			SF_DEBUG(4, (sf, CE_WARN,
4550 			    "sf_scsi_getcap: unsupported"));
4551 			rval = UNDEFINED;
4552 			break;
4553 		}
4554 		SF_DEBUG(4, (sf, CE_NOTE,
4555 		    "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4556 		    "doset=0x%x,rval=%d\n",
4557 		    cap, val, tgtonly, doset, rval));
4558 	}
4559 
4560 	return (rval);
4561 }
4562 
4563 
4564 /*
4565  * called by the transport to get a capability
4566  */
4567 static int
4568 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4569 {
4570 	return (sf_commoncap(ap, cap, 0, whom, FALSE));
4571 }
4572 
4573 
4574 /*
4575  * called by the transport to set a capability
4576  */
4577 static int
4578 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4579 {
4580 	return (sf_commoncap(ap, cap, value, whom, TRUE));
4581 }
4582 
4583 
4584 /*
4585  * called by the transport to abort a target
4586  */
4587 static int
4588 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4589 {
4590 	struct sf *sf = ADDR2SF(ap);
4591 	struct sf_target *target = ADDR2TARGET(ap);
4592 	struct sf_pkt *cmd, *ncmd, *pcmd;
4593 	struct fcal_packet *fpkt;
4594 	int	rval = 0, t, my_rval = FALSE;
4595 	int	old_target_state;
4596 	int	lip_cnt;
4597 	int	tgt_id;
4598 	fc_frame_header_t	*hp;
4599 	int	deferred_destroy;
4600 
4601 	deferred_destroy = 0;
4602 
4603 	if (pkt != NULL) {
4604 		cmd = PKT2CMD(pkt);
4605 		fpkt = cmd->cmd_fp_pkt;
4606 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4607 		    (void *)fpkt));
4608 		pcmd = NULL;
4609 		mutex_enter(&sf->sf_cmd_mutex);
4610 		ncmd = sf->sf_pkt_head;
4611 		while (ncmd != NULL) {
4612 			if (ncmd == cmd) {
4613 				if (pcmd != NULL) {
4614 					pcmd->cmd_next = cmd->cmd_next;
4615 				} else {
4616 					sf->sf_pkt_head = cmd->cmd_next;
4617 				}
4618 				cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4619 				cmd->cmd_state = SF_STATE_IDLE;
4620 				pkt->pkt_reason = CMD_ABORTED;
4621 				pkt->pkt_statistics |= STAT_ABORTED;
4622 				my_rval = TRUE;
4623 				break;
4624 			} else {
4625 				pcmd = ncmd;
4626 				ncmd = ncmd->cmd_next;
4627 			}
4628 		}
4629 		mutex_exit(&sf->sf_cmd_mutex);
4630 		if (ncmd == NULL) {
4631 			mutex_enter(&cmd->cmd_abort_mutex);
4632 			if (cmd->cmd_state == SF_STATE_ISSUED) {
4633 				cmd->cmd_state = SF_STATE_ABORTING;
4634 				cmd->cmd_timeout = sf_watchdog_time + 20;
4635 				mutex_exit(&cmd->cmd_abort_mutex);
4636 				/* call transport to abort command */
4637 				if (((rval = soc_abort(sf->sf_sochandle,
4638 				    sf->sf_socp, sf->sf_sochandle->fcal_portno,
4639 				    fpkt, 1)) == FCAL_ABORTED) ||
4640 				    (rval == FCAL_ABORT_FAILED)) {
4641 					my_rval = TRUE;
4642 					pkt->pkt_reason = CMD_ABORTED;
4643 					pkt->pkt_statistics |= STAT_ABORTED;
4644 					cmd->cmd_state = SF_STATE_IDLE;
4645 				} else if (rval == FCAL_BAD_ABORT) {
4646 					cmd->cmd_timeout = sf_watchdog_time
4647 					    + 20;
4648 					my_rval = FALSE;
4649 				} else {
4650 					SF_DEBUG(1, (sf, CE_NOTE,
4651 					    "Command Abort failed\n"));
4652 				}
4653 			} else {
4654 				mutex_exit(&cmd->cmd_abort_mutex);
4655 			}
4656 		}
4657 	} else {
4658 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4659 		mutex_enter(&sf->sf_mutex);
4660 		lip_cnt = sf->sf_lip_cnt;
4661 		mutex_enter(&target->sft_mutex);
4662 		if (target->sft_state & (SF_TARGET_BUSY |
4663 		    SF_TARGET_OFFLINE)) {
4664 			mutex_exit(&target->sft_mutex);
4665 			return (rval);
4666 		}
4667 		old_target_state = target->sft_state;
4668 		target->sft_state |= SF_TARGET_BUSY;
4669 		mutex_exit(&target->sft_mutex);
4670 		mutex_exit(&sf->sf_mutex);
4671 
4672 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4673 		    0, 0, 0, NULL, 0)) != NULL) {
4674 
4675 			cmd = PKT2CMD(pkt);
4676 			cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4677 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4678 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4679 
4680 			/* prepare the packet for transport */
4681 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4682 
4683 				cmd->cmd_state = SF_STATE_ISSUED;
4684 				/*
4685 				 * call transport to send a pkt polled
4686 				 *
4687 				 * if that fails call the transport to abort it
4688 				 */
4689 				if (soc_transport_poll(sf->sf_sochandle,
4690 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4691 				    CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4692 					(void) ddi_dma_sync(
4693 					    cmd->cmd_cr_pool->rsp_dma_handle,
4694 					    (off_t)
4695 					    ((caddr_t)cmd->cmd_rsp_block -
4696 					    cmd->cmd_cr_pool->rsp_base),
4697 					    FCP_MAX_RSP_IU_SIZE,
4698 					    DDI_DMA_SYNC_FORKERNEL);
4699 					if (((struct fcp_rsp_info *)
4700 					    (&cmd->cmd_rsp_block->
4701 					    fcp_response_len + 1))->
4702 					    rsp_code == FCP_NO_FAILURE) {
4703 						/* abort cmds for this targ */
4704 						sf_abort_all(sf, target, TRUE,
4705 						    lip_cnt, TRUE);
4706 					} else {
4707 						hp = &cmd->cmd_fp_pkt->
4708 						    fcal_socal_request.
4709 						    sr_fc_frame_hdr;
4710 						tgt_id = sf_alpa_to_switch[
4711 						    (uchar_t)hp->d_id];
4712 						sf->sf_stats.tstats[tgt_id].
4713 						    task_mgmt_failures++;
4714 						SF_DEBUG(1, (sf, CE_NOTE,
4715 						    "Target %d Abort Task "
4716 						    "Set failed\n", hp->d_id));
4717 					}
4718 				} else {
4719 					mutex_enter(&cmd->cmd_abort_mutex);
4720 					if (cmd->cmd_state == SF_STATE_ISSUED) {
4721 					cmd->cmd_state = SF_STATE_ABORTING;
4722 					cmd->cmd_timeout = sf_watchdog_time
4723 					    + 20;
4724 					mutex_exit(&cmd->cmd_abort_mutex);
4725 					if ((t = soc_abort(sf->sf_sochandle,
4726 					    sf->sf_socp, sf->sf_sochandle->
4727 					    fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4728 					    FCAL_ABORTED &&
4729 					    (t != FCAL_ABORT_FAILED)) {
4730 						sf_log(sf, CE_NOTE,
4731 						    "sf_abort failed, "
4732 						    "initiating LIP\n");
4733 						sf_force_lip(sf);
4734 						deferred_destroy = 1;
4735 					}
4736 					} else {
4737 					mutex_exit(&cmd->cmd_abort_mutex);
4738 					}
4739 				}
4740 			}
4741 			if (!deferred_destroy) {
4742 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4743 				    sf_cmd_callback;
4744 				cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4745 				sf_scsi_destroy_pkt(ap, pkt);
4746 				my_rval = TRUE;
4747 			}
4748 		}
4749 		mutex_enter(&sf->sf_mutex);
4750 		if (lip_cnt == sf->sf_lip_cnt) {
4751 			mutex_enter(&target->sft_mutex);
4752 			target->sft_state = old_target_state;
4753 			mutex_exit(&target->sft_mutex);
4754 		}
4755 		mutex_exit(&sf->sf_mutex);
4756 	}
4757 	return (my_rval);
4758 }
4759 
4760 
4761 /*
4762  * called by the transport and internally to reset a target
4763  */
4764 static int
4765 sf_reset(struct scsi_address *ap, int level)
4766 {
4767 	struct scsi_pkt *pkt;
4768 	struct fcal_packet *fpkt;
4769 	struct sf *sf = ADDR2SF(ap);
4770 	struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4771 	struct sf_pkt *cmd;
4772 	int	rval = FALSE, t;
4773 	int	lip_cnt;
4774 	int	tgt_id, ret;
4775 	fc_frame_header_t	*hp;
4776 	int	deferred_destroy;
4777 
4778 	/* We don't support RESET_LUN yet. */
4779 	if (level == RESET_TARGET) {
4780 		struct sf_reset_list *p;
4781 
4782 		if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4783 		    == NULL)
4784 			return (rval);
4785 
4786 		SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4787 		mutex_enter(&sf->sf_mutex);
4788 		/* All target resets go to LUN 0 */
4789 		if (target->sft_lun.l) {
4790 			target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4791 		}
4792 		mutex_enter(&target->sft_mutex);
4793 		if (target->sft_state & (SF_TARGET_BUSY |
4794 		    SF_TARGET_OFFLINE)) {
4795 			mutex_exit(&target->sft_mutex);
4796 			mutex_exit(&sf->sf_mutex);
4797 			kmem_free(p, sizeof (struct sf_reset_list));
4798 			return (rval);
4799 		}
4800 		lip_cnt = sf->sf_lip_cnt;
4801 		target->sft_state |= SF_TARGET_BUSY;
4802 		for (ntarget = target->sft_next_lun;
4803 		    ntarget;
4804 		    ntarget = ntarget->sft_next_lun) {
4805 			mutex_enter(&ntarget->sft_mutex);
4806 			/*
4807 			 * XXXX If we supported RESET_LUN we should check here
4808 			 * to see if any LUN were being reset and somehow fail
4809 			 * that operation.
4810 			 */
4811 			ntarget->sft_state |= SF_TARGET_BUSY;
4812 			mutex_exit(&ntarget->sft_mutex);
4813 		}
4814 		mutex_exit(&target->sft_mutex);
4815 		mutex_exit(&sf->sf_mutex);
4816 
4817 		deferred_destroy = 0;
4818 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4819 		    0, 0, 0, NULL, 0)) != NULL) {
4820 			cmd = PKT2CMD(pkt);
4821 			cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4822 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4823 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4824 
4825 			/* prepare the packet for transport */
4826 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4827 				/* call transport to send a pkt polled */
4828 				cmd->cmd_state = SF_STATE_ISSUED;
4829 				if ((ret = soc_transport_poll(sf->sf_sochandle,
4830 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4831 				    CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4832 					(void) ddi_dma_sync(cmd->cmd_cr_pool->
4833 					    rsp_dma_handle, (caddr_t)cmd->
4834 					    cmd_rsp_block - cmd->cmd_cr_pool->
4835 					    rsp_base, FCP_MAX_RSP_IU_SIZE,
4836 					    DDI_DMA_SYNC_FORKERNEL);
4837 					fpkt = cmd->cmd_fp_pkt;
4838 					if ((fpkt->fcal_pkt_status ==
4839 					    FCAL_STATUS_OK) &&
4840 					    (((struct fcp_rsp_info *)
4841 					    (&cmd->cmd_rsp_block->
4842 					    fcp_response_len + 1))->
4843 					    rsp_code == FCP_NO_FAILURE)) {
4844 						sf_log(sf, CE_NOTE,
4845 						    "!sf%d: Target 0x%x Reset "
4846 						    "successful\n",
4847 						    ddi_get_instance(\
4848 						    sf->sf_dip),
4849 						    sf_alpa_to_switch[
4850 						    target->sft_al_pa]);
4851 						rval = TRUE;
4852 					} else {
4853 						hp = &cmd->cmd_fp_pkt->
4854 						    fcal_socal_request.
4855 						    sr_fc_frame_hdr;
4856 						tgt_id = sf_alpa_to_switch[
4857 						    (uchar_t)hp->d_id];
4858 						sf->sf_stats.tstats[tgt_id].
4859 						    task_mgmt_failures++;
4860 						sf_log(sf, CE_NOTE,
4861 						    "!sf%d: Target 0x%x "
4862 						    "Reset failed."
4863 						    "Status code 0x%x "
4864 						    "Resp code 0x%x\n",
4865 						    ddi_get_instance(\
4866 						    sf->sf_dip),
4867 						    tgt_id,
4868 						    fpkt->fcal_pkt_status,
4869 						    ((struct fcp_rsp_info *)
4870 						    (&cmd->cmd_rsp_block->
4871 						    fcp_response_len + 1))->
4872 						    rsp_code);
4873 					}
4874 				} else {
4875 					sf_log(sf, CE_NOTE, "!sf%d: Target "
4876 					    "0x%x Reset Failed. Ret=%x\n",
4877 					    ddi_get_instance(sf->sf_dip),
4878 					    sf_alpa_to_switch[
4879 					    target->sft_al_pa], ret);
4880 					mutex_enter(&cmd->cmd_abort_mutex);
4881 					if (cmd->cmd_state == SF_STATE_ISSUED) {
4882 					/* call the transport to abort a cmd */
4883 					cmd->cmd_timeout = sf_watchdog_time
4884 					    + 20;
4885 					cmd->cmd_state = SF_STATE_ABORTING;
4886 					mutex_exit(&cmd->cmd_abort_mutex);
4887 					if (((t = soc_abort(sf->sf_sochandle,
4888 					    sf->sf_socp,
4889 					    sf->sf_sochandle->fcal_portno,
4890 					    cmd->cmd_fp_pkt, 1)) !=
4891 					    FCAL_ABORTED) &&
4892 					    (t != FCAL_ABORT_FAILED)) {
4893 						sf_log(sf, CE_NOTE,
4894 						    "!sf%d: Target 0x%x Reset "
4895 						    "failed. Abort Failed, "
4896 						    "forcing LIP\n",
4897 						    ddi_get_instance(
4898 						    sf->sf_dip),
4899 						    sf_alpa_to_switch[
4900 						    target->sft_al_pa]);
4901 						sf_force_lip(sf);
4902 						rval = TRUE;
4903 						deferred_destroy = 1;
4904 					}
4905 					} else {
4906 						mutex_exit
4907 						    (&cmd->cmd_abort_mutex);
4908 					}
4909 				}
4910 			}
4911 			/*
4912 			 * Defer releasing the packet if we abort returned with
4913 			 * a BAD_ABORT or timed out, because there is a
4914 			 * possibility that the ucode might return it.
4915 			 * We wait for at least 20s and let it be released
4916 			 * by the sf_watch thread
4917 			 */
4918 			if (!deferred_destroy) {
4919 				cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4920 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4921 				    sf_cmd_callback;
4922 				cmd->cmd_state = SF_STATE_IDLE;
4923 				/* for cache */
4924 				sf_scsi_destroy_pkt(ap, pkt);
4925 			}
4926 		} else {
4927 			cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4928 			    "Resource allocation error.\n",
4929 			    ddi_get_instance(sf->sf_dip),
4930 			    sf_alpa_to_switch[target->sft_al_pa]);
4931 		}
4932 		mutex_enter(&sf->sf_mutex);
4933 		if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4934 			p->target = target;
4935 			p->lip_cnt = lip_cnt;
4936 			p->timeout = ddi_get_lbolt() +
4937 			    drv_usectohz(SF_TARGET_RESET_DELAY);
4938 			p->next = sf->sf_reset_list;
4939 			sf->sf_reset_list = p;
4940 			mutex_exit(&sf->sf_mutex);
4941 			mutex_enter(&sf_global_mutex);
4942 			if (sf_reset_timeout_id == 0) {
4943 				sf_reset_timeout_id = timeout(
4944 				    sf_check_reset_delay, NULL,
4945 				    drv_usectohz(SF_TARGET_RESET_DELAY));
4946 			}
4947 			mutex_exit(&sf_global_mutex);
4948 		} else {
4949 			if (lip_cnt == sf->sf_lip_cnt) {
4950 				mutex_enter(&target->sft_mutex);
4951 				target->sft_state &= ~SF_TARGET_BUSY;
4952 				for (ntarget = target->sft_next_lun;
4953 				    ntarget;
4954 				    ntarget = ntarget->sft_next_lun) {
4955 					mutex_enter(&ntarget->sft_mutex);
4956 					ntarget->sft_state &= ~SF_TARGET_BUSY;
4957 					mutex_exit(&ntarget->sft_mutex);
4958 				}
4959 				mutex_exit(&target->sft_mutex);
4960 			}
4961 			mutex_exit(&sf->sf_mutex);
4962 			kmem_free(p, sizeof (struct sf_reset_list));
4963 		}
4964 	} else {
4965 		mutex_enter(&sf->sf_mutex);
4966 		if ((sf->sf_state == SF_STATE_OFFLINE) &&
4967 		    (sf_watchdog_time < sf->sf_timer)) {
4968 			/*
4969 			 * We are currently in a lip, so let this one
4970 			 * finish before forcing another one.
4971 			 */
4972 			mutex_exit(&sf->sf_mutex);
4973 			return (TRUE);
4974 		}
4975 		mutex_exit(&sf->sf_mutex);
4976 		sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4977 		sf_force_lip(sf);
4978 		rval = TRUE;
4979 	}
4980 	return (rval);
4981 }
4982 
4983 
4984 /*
4985  * abort all commands for a target
4986  *
4987  * if try_abort is set then send an abort
4988  * if abort is set then this is abort, else this is a reset
4989  */
4990 static void
4991 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4992     lip_cnt, int try_abort)
4993 {
4994 	struct sf_target *ntarget;
4995 	struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4996 	struct fcal_packet *fpkt;
4997 	struct scsi_pkt *pkt;
4998 	int rval = FCAL_ABORTED;
4999 
5000 	/*
5001 	 * First pull all commands for all LUNs on this target out of the
5002 	 * overflow list.  We can tell it's the same target by comparing
5003 	 * the node WWN.
5004 	 */
5005 	mutex_enter(&sf->sf_mutex);
5006 	if (lip_cnt == sf->sf_lip_cnt) {
5007 		mutex_enter(&sf->sf_cmd_mutex);
5008 		cmd = sf->sf_pkt_head;
5009 		while (cmd != NULL) {
5010 			ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5011 			    pkt_address);
5012 			if (ntarget == target) {
5013 				if (pcmd != NULL)
5014 					pcmd->cmd_next = cmd->cmd_next;
5015 				else
5016 					sf->sf_pkt_head = cmd->cmd_next;
5017 				if (sf->sf_pkt_tail == cmd) {
5018 					sf->sf_pkt_tail = pcmd;
5019 					if (pcmd != NULL)
5020 						pcmd->cmd_next = NULL;
5021 				}
5022 				tcmd = cmd->cmd_next;
5023 				if (head == NULL) {
5024 					head = cmd;
5025 					tail = cmd;
5026 				} else {
5027 					tail->cmd_next = cmd;
5028 					tail = cmd;
5029 				}
5030 				cmd->cmd_next = NULL;
5031 				cmd = tcmd;
5032 			} else {
5033 				pcmd = cmd;
5034 				cmd = cmd->cmd_next;
5035 			}
5036 		}
5037 		mutex_exit(&sf->sf_cmd_mutex);
5038 	}
5039 	mutex_exit(&sf->sf_mutex);
5040 
5041 	/*
5042 	 * Now complete all the commands on our list.  In the process,
5043 	 * the completion routine may take the commands off the target
5044 	 * lists.
5045 	 */
5046 	cmd = head;
5047 	while (cmd != NULL) {
5048 		pkt = cmd->cmd_pkt;
5049 		if (abort) {
5050 			pkt->pkt_reason = CMD_ABORTED;
5051 			pkt->pkt_statistics |= STAT_ABORTED;
5052 		} else {
5053 			pkt->pkt_reason = CMD_RESET;
5054 			pkt->pkt_statistics |= STAT_DEV_RESET;
5055 		}
5056 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5057 		cmd->cmd_state = SF_STATE_IDLE;
5058 		cmd = cmd->cmd_next;
5059 		/*
5060 		 * call the packet completion routine only for
5061 		 * non-polled commands. Ignore the polled commands as
5062 		 * they timeout and will be handled differently
5063 		 */
5064 		if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5065 			(*pkt->pkt_comp)(pkt);
5066 
5067 	}
5068 
5069 	/*
5070 	 * Finally get all outstanding commands for each LUN, and abort them if
5071 	 * they've been issued, and call the completion routine.
5072 	 * For the case where sf_offline_target is called from sf_watch
5073 	 * due to a Offline Timeout, it is quite possible that the soc+
5074 	 * ucode is hosed and therefore  cannot return the commands.
5075 	 * Clear up all the issued commands as well.
5076 	 * Try_abort will be false only if sf_abort_all is coming from
5077 	 * sf_target_offline.
5078 	 */
5079 
5080 	if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5081 		mutex_enter(&target->sft_pkt_mutex);
5082 		cmd = tcmd = target->sft_pkt_head;
5083 		while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5084 			fpkt = cmd->cmd_fp_pkt;
5085 			pkt = cmd->cmd_pkt;
5086 			mutex_enter(&cmd->cmd_abort_mutex);
5087 			if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5088 			    (fpkt->fcal_cmd_state &
5089 			    FCAL_CMD_IN_TRANSPORT) &&
5090 			    ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5091 			    0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5092 				cmd->cmd_state = SF_STATE_ABORTING;
5093 				cmd->cmd_timeout = sf_watchdog_time +
5094 				    cmd->cmd_pkt->pkt_time + 20;
5095 				mutex_exit(&cmd->cmd_abort_mutex);
5096 				mutex_exit(&target->sft_pkt_mutex);
5097 				if (try_abort) {
5098 					/* call the transport to abort a pkt */
5099 					rval = soc_abort(sf->sf_sochandle,
5100 					    sf->sf_socp,
5101 					    sf->sf_sochandle->fcal_portno,
5102 					    fpkt, 1);
5103 				}
5104 				if ((rval == FCAL_ABORTED) ||
5105 				    (rval == FCAL_ABORT_FAILED)) {
5106 					if (abort) {
5107 						pkt->pkt_reason = CMD_ABORTED;
5108 						pkt->pkt_statistics |=
5109 						    STAT_ABORTED;
5110 					} else {
5111 						pkt->pkt_reason = CMD_RESET;
5112 						pkt->pkt_statistics |=
5113 						    STAT_DEV_RESET;
5114 					}
5115 					cmd->cmd_state = SF_STATE_IDLE;
5116 					if (pkt->pkt_comp)
5117 						(*pkt->pkt_comp)(pkt);
5118 				}
5119 				mutex_enter(&sf->sf_mutex);
5120 				if (lip_cnt != sf->sf_lip_cnt) {
5121 					mutex_exit(&sf->sf_mutex);
5122 					return;
5123 				}
5124 				mutex_exit(&sf->sf_mutex);
5125 				mutex_enter(&target->sft_pkt_mutex);
5126 				cmd = target->sft_pkt_head;
5127 			} else {
5128 				mutex_exit(&cmd->cmd_abort_mutex);
5129 				cmd = cmd->cmd_forw;
5130 			}
5131 		}
5132 		mutex_exit(&target->sft_pkt_mutex);
5133 	}
5134 }
5135 
5136 
5137 /*
5138  * called by the transport to start a packet
5139  */
5140 static int
5141 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5142 {
5143 	struct sf *sf = ADDR2SF(ap);
5144 	struct sf_target *target = ADDR2TARGET(ap);
5145 	struct sf_pkt *cmd = PKT2CMD(pkt);
5146 	int rval;
5147 
5148 
5149 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5150 
5151 	if (cmd->cmd_state == SF_STATE_ISSUED) {
5152 		cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5153 		    (void *)cmd);
5154 	}
5155 
5156 	/* prepare the packet for transport */
5157 	if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5158 		return (rval);
5159 	}
5160 
5161 	if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5162 		if (target->sft_state & SF_TARGET_OFFLINE) {
5163 			return (TRAN_FATAL_ERROR);
5164 		}
5165 		if (pkt->pkt_flags & FLAG_NOINTR) {
5166 			return (TRAN_BUSY);
5167 		}
5168 		mutex_enter(&sf->sf_cmd_mutex);
5169 		sf->sf_use_lock = TRUE;
5170 		goto enque;
5171 	}
5172 
5173 
5174 	/* if no interrupts then do polled I/O */
5175 	if (pkt->pkt_flags & FLAG_NOINTR) {
5176 		return (sf_dopoll(sf, cmd));
5177 	}
5178 
5179 	/* regular interrupt-driven I/O */
5180 
5181 	if (!sf->sf_use_lock) {
5182 
5183 		/* locking no needed */
5184 
5185 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5186 		    sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5187 		cmd->cmd_state = SF_STATE_ISSUED;
5188 
5189 		/* call the transport to send a pkt */
5190 		if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5191 		    FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5192 			cmd->cmd_state = SF_STATE_IDLE;
5193 			return (TRAN_BADPKT);
5194 		}
5195 		return (TRAN_ACCEPT);
5196 	}
5197 
5198 	/* regular I/O using locking */
5199 
5200 	mutex_enter(&sf->sf_cmd_mutex);
5201 	if ((sf->sf_ncmds >= sf->sf_throttle) ||
5202 	    (sf->sf_pkt_head != NULL)) {
5203 enque:
5204 		/*
5205 		 * either we're throttling back or there are already commands
5206 		 * on the queue, so enqueue this one for later
5207 		 */
5208 		cmd->cmd_flags |= CFLAG_IN_QUEUE;
5209 		if (sf->sf_pkt_head != NULL) {
5210 			/* add to the queue */
5211 			sf->sf_pkt_tail->cmd_next = cmd;
5212 			cmd->cmd_next = NULL;
5213 			sf->sf_pkt_tail = cmd;
5214 		} else {
5215 			/* this is the first entry in the queue */
5216 			sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5217 			cmd->cmd_next = NULL;
5218 		}
5219 		mutex_exit(&sf->sf_cmd_mutex);
5220 		return (TRAN_ACCEPT);
5221 	}
5222 
5223 	/*
5224 	 * start this packet now
5225 	 */
5226 
5227 	/* still have cmd mutex */
5228 	return (sf_start_internal(sf, cmd));
5229 }
5230 
5231 
5232 /*
5233  * internal routine to start a packet from the queue now
5234  *
5235  * enter with cmd mutex held and leave with it released
5236  */
5237 static int
5238 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5239 {
5240 	/* we have the cmd mutex */
5241 	sf->sf_ncmds++;
5242 	mutex_exit(&sf->sf_cmd_mutex);
5243 
5244 	ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5245 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5246 
5247 	cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5248 	    cmd->cmd_pkt->pkt_time : 0;
5249 	cmd->cmd_state = SF_STATE_ISSUED;
5250 
5251 	/* call transport to send the pkt */
5252 	if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5253 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5254 		cmd->cmd_state = SF_STATE_IDLE;
5255 		mutex_enter(&sf->sf_cmd_mutex);
5256 		sf->sf_ncmds--;
5257 		mutex_exit(&sf->sf_cmd_mutex);
5258 		return (TRAN_BADPKT);
5259 	}
5260 	return (TRAN_ACCEPT);
5261 }
5262 
5263 
5264 /*
5265  * prepare a packet for transport
5266  */
5267 static int
5268 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5269 {
5270 	struct fcp_cmd *fcmd = cmd->cmd_block;
5271 
5272 /* XXXX Need to set the LUN ? */
5273 	bcopy((caddr_t)&target->sft_lun.b,
5274 	    (caddr_t)&fcmd->fcp_ent_addr,
5275 	    FCP_LUN_SIZE);
5276 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5277 	cmd->cmd_pkt->pkt_state = 0;
5278 	cmd->cmd_pkt->pkt_statistics = 0;
5279 
5280 
5281 	if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5282 	    ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5283 		return (TRAN_BADPKT);
5284 	}
5285 
5286 	/* invalidate imp field(s) of rsp block */
5287 	cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5288 
5289 	/* set up amt of I/O to do */
5290 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
5291 		cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5292 		if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5293 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5294 			    DDI_DMA_SYNC_FORDEV);
5295 		}
5296 	} else {
5297 		cmd->cmd_pkt->pkt_resid = 0;
5298 	}
5299 
5300 	/* set up the Tagged Queuing type */
5301 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5302 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5303 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5304 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5305 	}
5306 
5307 	/*
5308 	 * Sync the cmd segment
5309 	 */
5310 	(void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5311 	    (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5312 	    sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5313 
5314 	sf_fill_ids(sf, cmd, target);
5315 	return (TRAN_ACCEPT);
5316 }
5317 
5318 
5319 /*
5320  * fill in packet hdr source and destination IDs and hdr byte count
5321  */
5322 static void
5323 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5324 {
5325 	struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5326 	fc_frame_header_t	*hp;
5327 
5328 
5329 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5330 	hp->d_id = target->sft_al_pa;
5331 	hp->s_id = sf->sf_al_pa;
5332 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5333 	    cmd->cmd_dmacookie.dmac_size;
5334 }
5335 
5336 
5337 /*
5338  * do polled I/O using transport
5339  */
5340 static int
5341 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5342 {
5343 	int timeout;
5344 	int rval;
5345 
5346 
5347 	mutex_enter(&sf->sf_cmd_mutex);
5348 	sf->sf_ncmds++;
5349 	mutex_exit(&sf->sf_cmd_mutex);
5350 
5351 	timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5352 	    : SF_POLL_TIMEOUT;
5353 	cmd->cmd_timeout = 0;
5354 	cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5355 	cmd->cmd_state = SF_STATE_ISSUED;
5356 
5357 	/* call transport to send a pkt polled */
5358 	rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5359 	    timeout*1000000, CQ_REQUEST_1);
5360 	mutex_enter(&cmd->cmd_abort_mutex);
5361 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5362 	if (rval != FCAL_TRANSPORT_SUCCESS) {
5363 		if (rval == FCAL_TRANSPORT_TIMEOUT) {
5364 			cmd->cmd_state = SF_STATE_ABORTING;
5365 			mutex_exit(&cmd->cmd_abort_mutex);
5366 			(void) sf_target_timeout(sf, cmd);
5367 		} else {
5368 			mutex_exit(&cmd->cmd_abort_mutex);
5369 		}
5370 		cmd->cmd_state = SF_STATE_IDLE;
5371 		cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5372 		mutex_enter(&sf->sf_cmd_mutex);
5373 		sf->sf_ncmds--;
5374 		mutex_exit(&sf->sf_cmd_mutex);
5375 		return (TRAN_BADPKT);
5376 	}
5377 	mutex_exit(&cmd->cmd_abort_mutex);
5378 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5379 	sf_cmd_callback(cmd->cmd_fp_pkt);
5380 	return (TRAN_ACCEPT);
5381 }
5382 
5383 
5384 /* a shortcut for defining debug messages below */
5385 #ifdef	DEBUG
5386 #define	SF_DMSG1(s)		msg1 = s
5387 #else
5388 #define	SF_DMSG1(s)		/* do nothing */
5389 #endif
5390 
5391 
5392 /*
5393  * the pkt_comp callback for command packets
5394  */
5395 static void
5396 sf_cmd_callback(struct fcal_packet *fpkt)
5397 {
5398 	struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5399 	struct scsi_pkt *pkt = cmd->cmd_pkt;
5400 	struct sf *sf = ADDR2SF(&pkt->pkt_address);
5401 	struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5402 	struct fcp_rsp *rsp;
5403 	char *msg1 = NULL;
5404 	char *msg2 = NULL;
5405 	short ncmds;
5406 	int tgt_id;
5407 	int good_scsi_status = TRUE;
5408 
5409 
5410 
5411 	if (cmd->cmd_state == SF_STATE_IDLE) {
5412 		cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5413 		    (void *)cmd);
5414 	}
5415 
5416 	mutex_enter(&cmd->cmd_abort_mutex);
5417 	if (cmd->cmd_state == SF_STATE_ABORTING) {
5418 		/* cmd already being aborted -- nothing to do */
5419 		mutex_exit(&cmd->cmd_abort_mutex);
5420 		return;
5421 	}
5422 
5423 	cmd->cmd_state = SF_STATE_IDLE;
5424 	mutex_exit(&cmd->cmd_abort_mutex);
5425 
5426 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5427 
5428 		(void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5429 		    (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5430 		    FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5431 
5432 		rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5433 
5434 		if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5435 
5436 			if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5437 				sf_token = (int *)(uintptr_t)
5438 				    fpkt->fcal_socal_request.\
5439 				    sr_soc_hdr.sh_request_token;
5440 				(void) soc_take_core(sf->sf_sochandle,
5441 				    sf->sf_socp);
5442 			}
5443 
5444 			pkt->pkt_reason = CMD_INCOMPLETE;
5445 			pkt->pkt_state = STATE_GOT_BUS;
5446 			pkt->pkt_statistics |= STAT_ABORTED;
5447 
5448 		} else {
5449 
5450 			pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5451 			    STATE_SENT_CMD | STATE_GOT_STATUS;
5452 			pkt->pkt_resid = 0;
5453 			if (cmd->cmd_flags & CFLAG_DMAVALID) {
5454 				pkt->pkt_state |= STATE_XFERRED_DATA;
5455 			}
5456 
5457 			if ((pkt->pkt_scbp != NULL) &&
5458 			    ((*(pkt->pkt_scbp) =
5459 			    rsp->fcp_u.fcp_status.scsi_status)
5460 			    != STATUS_GOOD)) {
5461 				good_scsi_status = FALSE;
5462 			/*
5463 			 * The next two checks make sure that if there
5464 			 * is no sense data or a valid response and
5465 			 * the command came back with check condition,
5466 			 * the command should be retried
5467 			 */
5468 				if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5469 				    !rsp->fcp_u.fcp_status.sense_len_set) {
5470 					pkt->pkt_state &= ~STATE_XFERRED_DATA;
5471 					pkt->pkt_resid = cmd->cmd_dmacount;
5472 				}
5473 			}
5474 
5475 			if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5476 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
5477 				(void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5478 				    (uint_t)0, DDI_DMA_SYNC_FORCPU);
5479 			}
5480 			/*
5481 			 * Update the transfer resid, if appropriate
5482 			 */
5483 			if (rsp->fcp_u.fcp_status.resid_over ||
5484 			    rsp->fcp_u.fcp_status.resid_under)
5485 				pkt->pkt_resid = rsp->fcp_resid;
5486 
5487 			/*
5488 			 * Check to see if the SCSI command failed.
5489 			 *
5490 			 */
5491 
5492 			/*
5493 			 * First see if we got a FCP protocol error.
5494 			 */
5495 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
5496 				struct fcp_rsp_info *bep;
5497 
5498 				bep = (struct fcp_rsp_info *)
5499 				    (&rsp->fcp_response_len + 1);
5500 				if (bep->rsp_code != FCP_NO_FAILURE) {
5501 						pkt->pkt_reason = CMD_TRAN_ERR;
5502 					tgt_id = pkt->pkt_address.a_target;
5503 					switch (bep->rsp_code) {
5504 					case FCP_CMND_INVALID:
5505 						SF_DMSG1("FCP_RSP FCP_CMND "
5506 						    "fields invalid");
5507 						break;
5508 					case FCP_TASK_MGMT_NOT_SUPPTD:
5509 						SF_DMSG1("FCP_RSP Task"
5510 						    "Management Function"
5511 						    "Not Supported");
5512 						break;
5513 					case FCP_TASK_MGMT_FAILED:
5514 						SF_DMSG1("FCP_RSP Task "
5515 						    "Management Function"
5516 						    "Failed");
5517 						sf->sf_stats.tstats[tgt_id].
5518 						    task_mgmt_failures++;
5519 						break;
5520 					case FCP_DATA_RO_MISMATCH:
5521 						SF_DMSG1("FCP_RSP FCP_DATA RO "
5522 						    "mismatch with "
5523 						    "FCP_XFER_RDY DATA_RO");
5524 						sf->sf_stats.tstats[tgt_id].
5525 						    data_ro_mismatches++;
5526 						break;
5527 					case FCP_DL_LEN_MISMATCH:
5528 						SF_DMSG1("FCP_RSP FCP_DATA "
5529 						    "length "
5530 						    "different than BURST_LEN");
5531 						sf->sf_stats.tstats[tgt_id].
5532 						    dl_len_mismatches++;
5533 						break;
5534 					default:
5535 						SF_DMSG1("FCP_RSP invalid "
5536 						    "RSP_CODE");
5537 						break;
5538 					}
5539 				}
5540 			}
5541 
5542 			/*
5543 			 * See if we got a SCSI error with sense data
5544 			 */
5545 			if (rsp->fcp_u.fcp_status.sense_len_set) {
5546 				uchar_t rqlen = min(rsp->fcp_sense_len,
5547 				    sizeof (struct scsi_extended_sense));
5548 				caddr_t sense = (caddr_t)rsp +
5549 				    sizeof (struct fcp_rsp) +
5550 				    rsp->fcp_response_len;
5551 				struct scsi_arq_status *arq;
5552 				struct scsi_extended_sense *sensep =
5553 				    (struct scsi_extended_sense *)sense;
5554 
5555 				if (rsp->fcp_u.fcp_status.scsi_status !=
5556 				    STATUS_GOOD) {
5557 				if (rsp->fcp_u.fcp_status.scsi_status
5558 				    == STATUS_CHECK) {
5559 					if (sensep->es_key ==
5560 					    KEY_RECOVERABLE_ERROR)
5561 						good_scsi_status = 1;
5562 					if (sensep->es_key ==
5563 					    KEY_UNIT_ATTENTION &&
5564 					    sensep->es_add_code == 0x3f &&
5565 					    sensep->es_qual_code == 0x0e) {
5566 						/* REPORT_LUNS_HAS_CHANGED */
5567 						sf_log(sf, CE_NOTE,
5568 						"!REPORT_LUNS_HAS_CHANGED\n");
5569 						sf_force_lip(sf);
5570 					}
5571 				}
5572 				}
5573 
5574 				if ((pkt->pkt_scbp != NULL) &&
5575 				    (cmd->cmd_scblen >=
5576 					sizeof (struct scsi_arq_status))) {
5577 
5578 				pkt->pkt_state |= STATE_ARQ_DONE;
5579 
5580 				arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5581 				/*
5582 				 * copy out sense information
5583 				 */
5584 				bcopy(sense, (caddr_t)&arq->sts_sensedata,
5585 				    rqlen);
5586 				arq->sts_rqpkt_resid =
5587 				    sizeof (struct scsi_extended_sense) -
5588 					rqlen;
5589 				*((uchar_t *)&arq->sts_rqpkt_status) =
5590 				    STATUS_GOOD;
5591 				arq->sts_rqpkt_reason = 0;
5592 				arq->sts_rqpkt_statistics = 0;
5593 				arq->sts_rqpkt_state = STATE_GOT_BUS |
5594 				    STATE_GOT_TARGET | STATE_SENT_CMD |
5595 				    STATE_GOT_STATUS | STATE_ARQ_DONE |
5596 				    STATE_XFERRED_DATA;
5597 			    }
5598 				target->sft_alive = TRUE;
5599 			}
5600 
5601 			/*
5602 			 * The firmware returns the number of bytes actually
5603 			 * xfered into/out of host. Compare this with what
5604 			 * we asked and if it is different, we lost frames ?
5605 			 */
5606 			if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5607 			    (good_scsi_status) &&
5608 			    (pkt->pkt_state & STATE_XFERRED_DATA) &&
5609 			    (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5610 			    (target->sft_device_type != DTYPE_ESI)) {
5611 				int byte_cnt =
5612 				    fpkt->fcal_socal_request.
5613 				    sr_soc_hdr.sh_byte_cnt;
5614 				if (cmd->cmd_flags & CFLAG_DMASEND) {
5615 					if (byte_cnt != 0) {
5616 					sf_log(sf, CE_NOTE,
5617 					    "!sf_cmd_callback: Lost Frame: "
5618 					    "(write) received 0x%x expected"
5619 					    " 0x%x target 0x%x\n",
5620 					    byte_cnt, cmd->cmd_dmacount,
5621 					    sf_alpa_to_switch[
5622 					    target->sft_al_pa]);
5623 					pkt->pkt_reason = CMD_INCOMPLETE;
5624 					pkt->pkt_statistics |= STAT_ABORTED;
5625 					}
5626 				} else if (byte_cnt < cmd->cmd_dmacount) {
5627 					sf_log(sf, CE_NOTE,
5628 					    "!sf_cmd_callback: "
5629 					    "Lost Frame: (read) "
5630 					    "received 0x%x expected 0x%x "
5631 					    "target 0x%x\n", byte_cnt,
5632 					    cmd->cmd_dmacount,
5633 					    sf_alpa_to_switch[
5634 					    target->sft_al_pa]);
5635 					pkt->pkt_reason = CMD_INCOMPLETE;
5636 					pkt->pkt_statistics |= STAT_ABORTED;
5637 				}
5638 			}
5639 		}
5640 
5641 	} else {
5642 
5643 		/* pkt status was not ok */
5644 
5645 		switch (fpkt->fcal_pkt_status) {
5646 
5647 		case FCAL_STATUS_ERR_OFFLINE:
5648 			SF_DMSG1("Fibre Channel Offline");
5649 			mutex_enter(&target->sft_mutex);
5650 			if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5651 				target->sft_state |= (SF_TARGET_BUSY
5652 				    | SF_TARGET_MARK);
5653 			}
5654 			mutex_exit(&target->sft_mutex);
5655 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5656 			    target->sft_dip, FCAL_REMOVE_EVENT,
5657 			    &sf_remove_eid, NDI_EVENT_NOPASS);
5658 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
5659 			    target->sft_dip, sf_remove_eid, NULL);
5660 			pkt->pkt_reason = CMD_TRAN_ERR;
5661 			pkt->pkt_statistics |= STAT_BUS_RESET;
5662 			break;
5663 
5664 		case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5665 			sf_throttle(sf);
5666 			sf->sf_use_lock = TRUE;
5667 			pkt->pkt_reason = CMD_TRAN_ERR;
5668 			pkt->pkt_state = STATE_GOT_BUS;
5669 			pkt->pkt_statistics |= STAT_ABORTED;
5670 			break;
5671 
5672 		case FCAL_STATUS_TIMEOUT:
5673 			SF_DMSG1("Fibre Channel Timeout");
5674 			pkt->pkt_reason = CMD_TIMEOUT;
5675 			break;
5676 
5677 		case FCAL_STATUS_ERR_OVERRUN:
5678 			SF_DMSG1("CMD_DATA_OVR");
5679 			pkt->pkt_reason = CMD_DATA_OVR;
5680 			break;
5681 
5682 		case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5683 			SF_DMSG1("Unknown CQ type");
5684 			pkt->pkt_reason = CMD_TRAN_ERR;
5685 			break;
5686 
5687 		case FCAL_STATUS_BAD_SEG_CNT:
5688 			SF_DMSG1("Bad SEG CNT");
5689 			pkt->pkt_reason = CMD_TRAN_ERR;
5690 			break;
5691 
5692 		case FCAL_STATUS_BAD_XID:
5693 			SF_DMSG1("Fibre Channel Invalid X_ID");
5694 			pkt->pkt_reason = CMD_TRAN_ERR;
5695 			break;
5696 
5697 		case FCAL_STATUS_XCHG_BUSY:
5698 			SF_DMSG1("Fibre Channel Exchange Busy");
5699 			pkt->pkt_reason = CMD_TRAN_ERR;
5700 			break;
5701 
5702 		case FCAL_STATUS_INSUFFICIENT_CQES:
5703 			SF_DMSG1("Insufficient CQEs");
5704 			pkt->pkt_reason = CMD_TRAN_ERR;
5705 			break;
5706 
5707 		case FCAL_STATUS_ALLOC_FAIL:
5708 			SF_DMSG1("ALLOC FAIL");
5709 			pkt->pkt_reason = CMD_TRAN_ERR;
5710 			break;
5711 
5712 		case FCAL_STATUS_BAD_SID:
5713 			SF_DMSG1("Fibre Channel Invalid S_ID");
5714 			pkt->pkt_reason = CMD_TRAN_ERR;
5715 			break;
5716 
5717 		case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5718 			if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5719 				sf_token = (int *)(uintptr_t)
5720 				    fpkt->fcal_socal_request.\
5721 				    sr_soc_hdr.sh_request_token;
5722 				(void) soc_take_core(sf->sf_sochandle,
5723 				    sf->sf_socp);
5724 				sf_core = 0;
5725 			}
5726 			msg2 =
5727 			"INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5728 			pkt->pkt_reason = CMD_INCOMPLETE;
5729 			pkt->pkt_state = STATE_GOT_BUS;
5730 			pkt->pkt_statistics |= STAT_ABORTED;
5731 			break;
5732 
5733 		case FCAL_STATUS_CRC_ERR:
5734 			msg2 = "Fibre Channel CRC Error on frames";
5735 			pkt->pkt_reason = CMD_INCOMPLETE;
5736 			pkt->pkt_state = STATE_GOT_BUS;
5737 			pkt->pkt_statistics |= STAT_ABORTED;
5738 			break;
5739 
5740 		case FCAL_STATUS_NO_SEQ_INIT:
5741 			SF_DMSG1("Fibre Channel Seq Init Error");
5742 			pkt->pkt_reason = CMD_TRAN_ERR;
5743 			break;
5744 
5745 		case  FCAL_STATUS_OPEN_FAIL:
5746 			pkt->pkt_reason = CMD_TRAN_ERR;
5747 			SF_DMSG1("Fibre Channel Open Failure");
5748 			if ((target->sft_state & (SF_TARGET_BUSY |
5749 			    SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5750 				sf_log(sf, CE_NOTE,
5751 				    "!Open failure to target 0x%x "
5752 				    "forcing LIP\n",
5753 				    sf_alpa_to_switch[target->sft_al_pa]);
5754 				sf_force_lip(sf);
5755 			}
5756 			break;
5757 
5758 
5759 		case FCAL_STATUS_ONLINE_TIMEOUT:
5760 			SF_DMSG1("Fibre Channel Online Timeout");
5761 			pkt->pkt_reason = CMD_TRAN_ERR;
5762 			break;
5763 
5764 		default:
5765 			SF_DMSG1("Unknown FC Status");
5766 			pkt->pkt_reason = CMD_TRAN_ERR;
5767 			break;
5768 		}
5769 	}
5770 
5771 #ifdef	DEBUG
5772 	/*
5773 	 * msg1 will be non-NULL if we've detected some sort of error
5774 	 */
5775 	if (msg1 != NULL && sfdebug >= 4) {
5776 		sf_log(sf, CE_WARN,
5777 		    "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5778 		    (void *)fpkt, pkt->pkt_address.a_target, msg1);
5779 	}
5780 #endif
5781 
5782 	if (msg2 != NULL) {
5783 		sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5784 		    pkt->pkt_address.a_target, msg2);
5785 	}
5786 
5787 	ncmds = fpkt->fcal_ncmds;
5788 	ASSERT(ncmds >= 0);
5789 	if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5790 #ifdef DEBUG
5791 		if (!sf->sf_use_lock) {
5792 			SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5793 		}
5794 #endif
5795 		sf->sf_use_lock = TRUE;
5796 	}
5797 
5798 	mutex_enter(&sf->sf_cmd_mutex);
5799 	sf->sf_ncmds = ncmds;
5800 	sf_throttle_start(sf);
5801 	mutex_exit(&sf->sf_cmd_mutex);
5802 
5803 	if (!msg1 && !msg2)
5804 		SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5805 		    (void *)pkt));
5806 	if (pkt->pkt_comp != NULL) {
5807 		(*pkt->pkt_comp)(pkt);
5808 	}
5809 }
5810 
5811 #undef	SF_DMSG1
5812 
5813 
5814 
5815 /*
5816  * start throttling for this instance
5817  */
5818 static void
5819 sf_throttle_start(struct sf *sf)
5820 {
5821 	struct sf_pkt *cmd, *prev_cmd = NULL;
5822 	struct scsi_pkt *pkt;
5823 	struct sf_target *target;
5824 
5825 
5826 	ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5827 
5828 	cmd = sf->sf_pkt_head;
5829 	while ((cmd != NULL) &&
5830 	    (sf->sf_state == SF_STATE_ONLINE) &&
5831 	    (sf->sf_ncmds < sf->sf_throttle)) {
5832 
5833 		pkt = CMD2PKT(cmd);
5834 
5835 		target = ADDR2TARGET(&pkt->pkt_address);
5836 		if (target->sft_state & SF_TARGET_BUSY) {
5837 			/* this command is busy -- go to next */
5838 			ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5839 			prev_cmd = cmd;
5840 			cmd = cmd->cmd_next;
5841 			continue;
5842 		}
5843 
5844 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5845 
5846 		/* this cmd not busy and not issued */
5847 
5848 		/* remove this packet from the queue */
5849 		if (sf->sf_pkt_head == cmd) {
5850 			/* this was the first packet */
5851 			sf->sf_pkt_head = cmd->cmd_next;
5852 		} else if (sf->sf_pkt_tail == cmd) {
5853 			/* this was the last packet */
5854 			sf->sf_pkt_tail = prev_cmd;
5855 			if (prev_cmd != NULL) {
5856 				prev_cmd->cmd_next = NULL;
5857 			}
5858 		} else {
5859 			/* some packet in the middle of the queue */
5860 			ASSERT(prev_cmd != NULL);
5861 			prev_cmd->cmd_next = cmd->cmd_next;
5862 		}
5863 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5864 
5865 		if (target->sft_state & SF_TARGET_OFFLINE) {
5866 			mutex_exit(&sf->sf_cmd_mutex);
5867 			pkt->pkt_reason = CMD_TRAN_ERR;
5868 			if (pkt->pkt_comp != NULL) {
5869 				(*pkt->pkt_comp)(cmd->cmd_pkt);
5870 			}
5871 		} else {
5872 			sf_fill_ids(sf, cmd, target);
5873 			if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5874 				pkt->pkt_reason = CMD_TRAN_ERR;
5875 				if (pkt->pkt_comp != NULL) {
5876 					(*pkt->pkt_comp)(cmd->cmd_pkt);
5877 				}
5878 			}
5879 		}
5880 		mutex_enter(&sf->sf_cmd_mutex);
5881 		cmd = sf->sf_pkt_head;
5882 		prev_cmd = NULL;
5883 	}
5884 }
5885 
5886 
5887 /*
5888  * called when the max exchange value is exceeded to throttle back commands
5889  */
5890 static void
5891 sf_throttle(struct sf *sf)
5892 {
5893 	int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5894 
5895 
5896 	mutex_enter(&sf->sf_cmd_mutex);
5897 
5898 	sf->sf_flag = TRUE;
5899 
5900 	if (sf->sf_ncmds > (cmdmax / 2)) {
5901 		sf->sf_throttle = cmdmax / 2;
5902 	} else {
5903 		if (sf->sf_ncmds > SF_DECR_DELTA) {
5904 			sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5905 		} else {
5906 			/*
5907 			 * This case is just a safeguard, should not really
5908 			 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5909 			 */
5910 			sf->sf_throttle = SF_DECR_DELTA;
5911 		}
5912 	}
5913 	mutex_exit(&sf->sf_cmd_mutex);
5914 
5915 	sf = sf->sf_sibling;
5916 	if (sf != NULL) {
5917 		mutex_enter(&sf->sf_cmd_mutex);
5918 		sf->sf_flag = TRUE;
5919 		if (sf->sf_ncmds >= (cmdmax / 2)) {
5920 			sf->sf_throttle = cmdmax / 2;
5921 		} else {
5922 			if (sf->sf_ncmds > SF_DECR_DELTA) {
5923 				sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5924 			} else {
5925 				sf->sf_throttle = SF_DECR_DELTA;
5926 			}
5927 		}
5928 
5929 		mutex_exit(&sf->sf_cmd_mutex);
5930 	}
5931 }
5932 
5933 
5934 /*
5935  * sf watchdog routine, called for a timeout
5936  */
5937 /*ARGSUSED*/
5938 static void
5939 sf_watch(void *arg)
5940 {
5941 	struct sf *sf;
5942 	struct sf_els_hdr	*privp;
5943 	static int count = 0, pscan_count = 0;
5944 	int cmdmax, i, mescount = 0;
5945 	struct sf_target *target;
5946 
5947 
5948 	sf_watchdog_time += sf_watchdog_timeout;
5949 	count++;
5950 	pscan_count++;
5951 
5952 	mutex_enter(&sf_global_mutex);
5953 	sf_watch_running = 1;
5954 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5955 
5956 		mutex_exit(&sf_global_mutex);
5957 
5958 		/* disable throttling while we're suspended */
5959 		mutex_enter(&sf->sf_mutex);
5960 		if (sf->sf_state & SF_STATE_SUSPENDED) {
5961 			mutex_exit(&sf->sf_mutex);
5962 			SF_DEBUG(1, (sf, CE_CONT,
5963 			    "sf_watch, sf%d:throttle disabled "
5964 			    "due to DDI_SUSPEND\n",
5965 			    ddi_get_instance(sf->sf_dip)));
5966 			mutex_enter(&sf_global_mutex);
5967 			continue;
5968 		}
5969 		mutex_exit(&sf->sf_mutex);
5970 
5971 		cmdmax = sf->sf_sochandle->fcal_cmdmax;
5972 
5973 		if (sf->sf_take_core) {
5974 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5975 		}
5976 
5977 		mutex_enter(&sf->sf_cmd_mutex);
5978 
5979 		if (!sf->sf_flag) {
5980 			if (sf->sf_throttle < (cmdmax / 2)) {
5981 				sf->sf_throttle = cmdmax / 2;
5982 			} else if ((sf->sf_throttle += SF_INCR_DELTA) >
5983 			    cmdmax) {
5984 				sf->sf_throttle = cmdmax;
5985 			}
5986 		} else {
5987 			sf->sf_flag = FALSE;
5988 		}
5989 
5990 		sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5991 		    >> 2;
5992 		if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5993 		    (sf->sf_pkt_head == NULL)) {
5994 #ifdef DEBUG
5995 			if (sf->sf_use_lock) {
5996 				SF_DEBUG(4, (sf, CE_NOTE,
5997 				    "use lock flag off\n"));
5998 			}
5999 #endif
6000 			sf->sf_use_lock = FALSE;
6001 		}
6002 
6003 		if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6004 		    sf->sf_ncmds < sf->sf_throttle) {
6005 			sf_throttle_start(sf);
6006 		}
6007 
6008 		mutex_exit(&sf->sf_cmd_mutex);
6009 
6010 		if (pscan_count >= sf_pool_scan_cnt) {
6011 			if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6012 			    SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6013 				sf_crpool_free(sf);
6014 			}
6015 		}
6016 		mutex_enter(&sf->sf_mutex);
6017 
6018 		privp = sf->sf_els_list;
6019 		while (privp != NULL) {
6020 			if (privp->timeout < sf_watchdog_time) {
6021 				/* timeout this command */
6022 				privp = sf_els_timeout(sf, privp);
6023 			} else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6024 			    (privp->lip_cnt != sf->sf_lip_cnt)) {
6025 				if (privp->prev != NULL) {
6026 					privp->prev->next = privp->next;
6027 				}
6028 				if (sf->sf_els_list == privp) {
6029 					sf->sf_els_list = privp->next;
6030 				}
6031 				if (privp->next != NULL) {
6032 					privp->next->prev = privp->prev;
6033 				}
6034 				mutex_exit(&sf->sf_mutex);
6035 				sf_els_free(privp->fpkt);
6036 				mutex_enter(&sf->sf_mutex);
6037 				privp = sf->sf_els_list;
6038 			} else {
6039 				privp = privp->next;
6040 			}
6041 		}
6042 
6043 		if (sf->sf_online_timer && sf->sf_online_timer <
6044 		    sf_watchdog_time) {
6045 			for (i = 0; i < sf_max_targets; i++) {
6046 				target = sf->sf_targets[i];
6047 				if (target != NULL) {
6048 					if (!mescount && target->sft_state &
6049 					    SF_TARGET_BUSY) {
6050 						sf_log(sf, CE_WARN, "!Loop "
6051 						    "Unstable: Failed to bring "
6052 						    "Loop Online\n");
6053 						mescount = 1;
6054 					}
6055 					target->sft_state |= SF_TARGET_MARK;
6056 				}
6057 			}
6058 			sf_finish_init(sf, sf->sf_lip_cnt);
6059 			sf->sf_state = SF_STATE_INIT;
6060 			sf->sf_online_timer = 0;
6061 		}
6062 
6063 		if (sf->sf_state == SF_STATE_ONLINE) {
6064 			mutex_exit(&sf->sf_mutex);
6065 			if (count >= sf_pkt_scan_cnt) {
6066 				sf_check_targets(sf);
6067 			}
6068 		} else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6069 		    (sf->sf_timer < sf_watchdog_time)) {
6070 			for (i = 0; i < sf_max_targets; i++) {
6071 				target = sf->sf_targets[i];
6072 				if ((target != NULL) &&
6073 				    (target->sft_state &
6074 				    SF_TARGET_BUSY)) {
6075 					sf_log(sf, CE_WARN,
6076 					    "!Offline Timeout\n");
6077 					if (sf_core && (sf_core &
6078 					    SF_CORE_OFFLINE_TIMEOUT)) {
6079 						(void) soc_take_core(
6080 						    sf->sf_sochandle,
6081 						    sf->sf_socp);
6082 						sf_core = 0;
6083 					}
6084 					break;
6085 				}
6086 			}
6087 			sf_finish_init(sf, sf->sf_lip_cnt);
6088 			sf->sf_state = SF_STATE_INIT;
6089 			mutex_exit(&sf->sf_mutex);
6090 		} else {
6091 			mutex_exit(&sf->sf_mutex);
6092 		}
6093 		mutex_enter(&sf_global_mutex);
6094 	}
6095 	mutex_exit(&sf_global_mutex);
6096 	if (count >= sf_pkt_scan_cnt) {
6097 		count = 0;
6098 	}
6099 	if (pscan_count >= sf_pool_scan_cnt) {
6100 		pscan_count = 0;
6101 	}
6102 
6103 	/* reset timeout */
6104 	sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6105 
6106 	/* signal waiting thread */
6107 	mutex_enter(&sf_global_mutex);
6108 	sf_watch_running = 0;
6109 	cv_broadcast(&sf_watch_cv);
6110 	mutex_exit(&sf_global_mutex);
6111 }
6112 
6113 
6114 /*
6115  * called during a timeout to check targets
6116  */
6117 static void
6118 sf_check_targets(struct sf *sf)
6119 {
6120 	struct sf_target *target;
6121 	int i;
6122 	struct sf_pkt *cmd;
6123 	struct scsi_pkt *pkt;
6124 	int lip_cnt;
6125 
6126 	mutex_enter(&sf->sf_mutex);
6127 	lip_cnt = sf->sf_lip_cnt;
6128 	mutex_exit(&sf->sf_mutex);
6129 
6130 	/* check scan all possible targets */
6131 	for (i = 0; i < sf_max_targets; i++) {
6132 		target = sf->sf_targets[i];
6133 		while (target != NULL) {
6134 			mutex_enter(&target->sft_pkt_mutex);
6135 			if (target->sft_alive && target->sft_scan_count !=
6136 			    sf_target_scan_cnt) {
6137 				target->sft_alive = 0;
6138 				target->sft_scan_count++;
6139 				mutex_exit(&target->sft_pkt_mutex);
6140 				return;
6141 			}
6142 			target->sft_alive = 0;
6143 			target->sft_scan_count = 0;
6144 			cmd = target->sft_pkt_head;
6145 			while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6146 				mutex_enter(&cmd->cmd_abort_mutex);
6147 				if (cmd->cmd_state == SF_STATE_ISSUED &&
6148 				    ((cmd->cmd_timeout && sf_watchdog_time >
6149 #ifdef	DEBUG
6150 				    cmd->cmd_timeout) || sf_abort_flag)) {
6151 					sf_abort_flag = 0;
6152 #else
6153 					cmd->cmd_timeout))) {
6154 #endif
6155 					cmd->cmd_timeout = 0;
6156 	/* prevent reset from getting at this packet */
6157 					cmd->cmd_state = SF_STATE_ABORTING;
6158 					mutex_exit(&cmd->cmd_abort_mutex);
6159 					mutex_exit(&target->sft_pkt_mutex);
6160 					sf->sf_stats.tstats[i].timeouts++;
6161 					if (sf_target_timeout(sf, cmd))
6162 						return;
6163 					else {
6164 						if (lip_cnt != sf->sf_lip_cnt) {
6165 							return;
6166 						} else {
6167 							mutex_enter(&target->
6168 							    sft_pkt_mutex);
6169 							cmd = target->
6170 							    sft_pkt_head;
6171 						}
6172 					}
6173 	/*
6174 	 * if the abort and lip fail, a reset will be carried out.
6175 	 * But the reset will ignore this packet. We have waited at least
6176 	 * 20 seconds after the initial timeout. Now, complete it here.
6177 	 * This also takes care of spurious bad aborts.
6178 	 */
6179 				} else if ((cmd->cmd_state ==
6180 				    SF_STATE_ABORTING) && (cmd->cmd_timeout
6181 				    <= sf_watchdog_time)) {
6182 					cmd->cmd_state = SF_STATE_IDLE;
6183 					mutex_exit(&cmd->cmd_abort_mutex);
6184 					mutex_exit(&target->sft_pkt_mutex);
6185 					SF_DEBUG(1, (sf, CE_NOTE,
6186 					    "Command 0x%p to sft 0x%p"
6187 					    " delayed release\n",
6188 					    (void *)cmd, (void *)target));
6189 					pkt = cmd->cmd_pkt;
6190 					pkt->pkt_statistics |=
6191 					    (STAT_TIMEOUT|STAT_ABORTED);
6192 					pkt->pkt_reason = CMD_TIMEOUT;
6193 					if (pkt->pkt_comp) {
6194 						scsi_hba_pkt_comp(pkt);
6195 					/* handle deferred_destroy case */
6196 					} else {
6197 						if ((cmd->cmd_block->fcp_cntl.
6198 						    cntl_reset == 1) ||
6199 						    (cmd->cmd_block->
6200 						    fcp_cntl.cntl_abort_tsk ==
6201 						    1)) {
6202 							cmd->cmd_block->
6203 							    fcp_cntl.
6204 							    cntl_reset = 0;
6205 							cmd->cmd_block->
6206 							    fcp_cntl.
6207 							    cntl_abort_tsk = 0;
6208 							cmd->cmd_fp_pkt->
6209 							    fcal_pkt_comp =
6210 							    sf_cmd_callback;
6211 							/* for cache */
6212 							sf_scsi_destroy_pkt
6213 							    (&pkt->pkt_address,
6214 							    pkt);
6215 						}
6216 					}
6217 					mutex_enter(&target->sft_pkt_mutex);
6218 					cmd = target->sft_pkt_head;
6219 				} else {
6220 					mutex_exit(&cmd->cmd_abort_mutex);
6221 					cmd = cmd->cmd_forw;
6222 				}
6223 			}
6224 			mutex_exit(&target->sft_pkt_mutex);
6225 			target = target->sft_next_lun;
6226 		}
6227 	}
6228 }
6229 
6230 
6231 /*
6232  * a command to a target has timed out
6233  * return TRUE iff cmd abort failed or timed out, else return FALSE
6234  */
6235 static int
6236 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6237 {
6238 	int rval;
6239 	struct scsi_pkt *pkt;
6240 	struct fcal_packet *fpkt;
6241 	int tgt_id;
6242 	int retval = FALSE;
6243 
6244 
6245 	SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6246 	    (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6247 
6248 	fpkt = cmd->cmd_fp_pkt;
6249 
6250 	if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6251 		sf_token = (int *)(uintptr_t)
6252 		    fpkt->fcal_socal_request.sr_soc_hdr.\
6253 		    sh_request_token;
6254 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6255 		sf_core = 0;
6256 	}
6257 
6258 	/* call the transport to abort a command */
6259 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6260 	    sf->sf_sochandle->fcal_portno, fpkt, 1);
6261 
6262 	switch (rval) {
6263 	case FCAL_ABORTED:
6264 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6265 		pkt = cmd->cmd_pkt;
6266 		cmd->cmd_state = SF_STATE_IDLE;
6267 		pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6268 		pkt->pkt_reason = CMD_TIMEOUT;
6269 		if (pkt->pkt_comp != NULL) {
6270 			(*pkt->pkt_comp)(pkt);
6271 		}
6272 		break;				/* success */
6273 
6274 	case FCAL_ABORT_FAILED:
6275 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6276 		pkt = cmd->cmd_pkt;
6277 		cmd->cmd_state = SF_STATE_IDLE;
6278 		pkt->pkt_reason = CMD_TIMEOUT;
6279 		pkt->pkt_statistics |= STAT_TIMEOUT;
6280 		tgt_id = pkt->pkt_address.a_target;
6281 		sf->sf_stats.tstats[tgt_id].abts_failures++;
6282 		if (pkt->pkt_comp != NULL) {
6283 			(*pkt->pkt_comp)(pkt);
6284 		}
6285 		break;
6286 
6287 	case FCAL_BAD_ABORT:
6288 		if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6289 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6290 			    sr_soc_hdr.sh_request_token;
6291 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6292 			sf_core = 0;
6293 		}
6294 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6295 		cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6296 		    + 20;
6297 		break;
6298 
6299 	case FCAL_TIMEOUT:
6300 		retval = TRUE;
6301 		break;
6302 
6303 	default:
6304 		pkt = cmd->cmd_pkt;
6305 		tgt_id = pkt->pkt_address.a_target;
6306 		sf_log(sf, CE_WARN,
6307 		"Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6308 		if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6309 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6310 			    sr_soc_hdr.sh_request_token;
6311 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6312 			sf_core = 0;
6313 		}
6314 		sf_force_lip(sf);
6315 		retval = TRUE;
6316 		break;
6317 	}
6318 
6319 	return (retval);
6320 }
6321 
6322 
6323 /*
6324  * an ELS command has timed out
6325  * return ???
6326  */
6327 static struct sf_els_hdr *
6328 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6329 {
6330 	struct fcal_packet *fpkt;
6331 	int rval, dflag, timeout = SF_ELS_TIMEOUT;
6332 	uint_t lip_cnt = privp->lip_cnt;
6333 	uchar_t els_code = privp->els_code;
6334 	struct sf_target *target = privp->target;
6335 	char what[64];
6336 
6337 	fpkt = privp->fpkt;
6338 	dflag = privp->delayed_retry;
6339 	/* use as temporary state variable */
6340 	privp->timeout = SF_INVALID_TIMEOUT;
6341 	mutex_exit(&sf->sf_mutex);
6342 
6343 	if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6344 		/*
6345 		 * take socal core if required. Timeouts for IB and hosts
6346 		 * are not very interesting, so we take socal core only
6347 		 * if the timeout is *not* for a IB or host.
6348 		 */
6349 		if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6350 		    ((sf_alpa_to_switch[privp->dest_nport_id] &
6351 		    0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6352 		    (privp->dest_nport_id != 2) ||
6353 		    (privp->dest_nport_id != 4) ||
6354 		    (privp->dest_nport_id != 8) ||
6355 		    (privp->dest_nport_id != 0xf))) {
6356 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6357 			    sr_soc_hdr.sh_request_token;
6358 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6359 			sf_core = 0;
6360 		}
6361 		(void) sprintf(what, "ELS 0x%x", privp->els_code);
6362 	} else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6363 		if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6364 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6365 			    sr_soc_hdr.sh_request_token;
6366 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6367 			sf_core = 0;
6368 		}
6369 		timeout = SF_FCP_TIMEOUT;
6370 		(void) sprintf(what, "REPORT_LUNS");
6371 	} else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6372 		if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6373 			sf_token = (int *)(uintptr_t)
6374 			    fpkt->fcal_socal_request.\
6375 			    sr_soc_hdr.sh_request_token;
6376 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6377 			sf_core = 0;
6378 		}
6379 		timeout = SF_FCP_TIMEOUT;
6380 		(void) sprintf(what, "INQUIRY to LUN 0x%lx",
6381 		    (long)SCSA_LUN(target));
6382 	} else {
6383 		(void) sprintf(what, "UNKNOWN OPERATION");
6384 	}
6385 
6386 	if (dflag) {
6387 		/* delayed retry */
6388 		SF_DEBUG(2, (sf, CE_CONT,
6389 		    "!sf%d: %s to target %x delayed retry\n",
6390 		    ddi_get_instance(sf->sf_dip), what,
6391 		    sf_alpa_to_switch[privp->dest_nport_id]));
6392 		privp->delayed_retry = FALSE;
6393 		goto try_again;
6394 	}
6395 
6396 	sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6397 	    what, sf_alpa_to_switch[privp->dest_nport_id],
6398 	    privp->dest_nport_id);
6399 
6400 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6401 	    ->fcal_portno, fpkt, 1);
6402 	if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6403 	SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6404 	    what, privp->dest_nport_id));
6405 try_again:
6406 
6407 		mutex_enter(&sf->sf_mutex);
6408 		if (privp->prev != NULL) {
6409 			privp->prev->next = privp->next;
6410 		}
6411 		if (sf->sf_els_list == privp) {
6412 			sf->sf_els_list = privp->next;
6413 		}
6414 		if (privp->next != NULL) {
6415 			privp->next->prev = privp->prev;
6416 		}
6417 		privp->prev = privp->next = NULL;
6418 		if (lip_cnt == sf->sf_lip_cnt) {
6419 			privp->timeout = sf_watchdog_time + timeout;
6420 			if ((++(privp->retries) < sf_els_retries) ||
6421 			    (dflag && (privp->retries < SF_BSY_RETRIES))) {
6422 				mutex_exit(&sf->sf_mutex);
6423 				sf_log(sf, CE_NOTE,
6424 				    "!%s to target 0x%x retrying\n",
6425 				    what,
6426 				    sf_alpa_to_switch[privp->dest_nport_id]);
6427 				if (sf_els_transport(sf, privp) == 1) {
6428 					mutex_enter(&sf->sf_mutex);
6429 					return (sf->sf_els_list); /* success */
6430 				}
6431 				mutex_enter(&sf->sf_mutex);
6432 				fpkt = NULL;
6433 			}
6434 			if ((lip_cnt == sf->sf_lip_cnt) &&
6435 			    (els_code != LA_ELS_LOGO)) {
6436 				if (target != NULL) {
6437 					sf_offline_target(sf, target);
6438 				}
6439 				if (sf->sf_lip_cnt == lip_cnt) {
6440 					sf->sf_device_count--;
6441 					ASSERT(sf->sf_device_count >= 0);
6442 					if (sf->sf_device_count == 0) {
6443 						sf_finish_init(sf,
6444 						    sf->sf_lip_cnt);
6445 					}
6446 				}
6447 			}
6448 			privp = sf->sf_els_list;
6449 			mutex_exit(&sf->sf_mutex);
6450 			if (fpkt != NULL) {
6451 				sf_els_free(fpkt);
6452 			}
6453 		} else {
6454 			mutex_exit(&sf->sf_mutex);
6455 			sf_els_free(privp->fpkt);
6456 			privp = NULL;
6457 		}
6458 	} else {
6459 		if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6460 			sf_token = (int *)(uintptr_t)
6461 			    fpkt->fcal_socal_request.\
6462 			    sr_soc_hdr.sh_request_token;
6463 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6464 			sf_core = 0;
6465 		}
6466 		sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6467 		    "status=0x%x, forcing LIP\n", what,
6468 		    sf_alpa_to_switch[privp->dest_nport_id], rval);
6469 		privp = NULL;
6470 		if (sf->sf_lip_cnt == lip_cnt) {
6471 			sf_force_lip(sf);
6472 		}
6473 	}
6474 
6475 	mutex_enter(&sf->sf_mutex);
6476 	return (privp);
6477 }
6478 
6479 
6480 /*
6481  * called by timeout when a reset times out
6482  */
6483 /*ARGSUSED*/
6484 static void
6485 sf_check_reset_delay(void *arg)
6486 {
6487 	struct sf *sf;
6488 	struct sf_target *target;
6489 	struct sf_reset_list *rp, *tp;
6490 	uint_t lip_cnt, reset_timeout_flag = FALSE;
6491 	clock_t lb;
6492 
6493 	lb = ddi_get_lbolt();
6494 
6495 	mutex_enter(&sf_global_mutex);
6496 
6497 	sf_reset_timeout_id = 0;
6498 
6499 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6500 
6501 		mutex_exit(&sf_global_mutex);
6502 		mutex_enter(&sf->sf_mutex);
6503 
6504 		/* is this type cast needed? */
6505 		tp = (struct sf_reset_list *)&sf->sf_reset_list;
6506 
6507 		rp = sf->sf_reset_list;
6508 		while (rp != NULL) {
6509 			if (((rp->timeout - lb) < 0) &&
6510 			    (rp->lip_cnt == sf->sf_lip_cnt)) {
6511 				tp->next = rp->next;
6512 				mutex_exit(&sf->sf_mutex);
6513 				target = rp->target;
6514 				lip_cnt = rp->lip_cnt;
6515 				kmem_free(rp, sizeof (struct sf_reset_list));
6516 				/* abort all cmds for this target */
6517 				while (target) {
6518 					sf_abort_all(sf, target, FALSE,
6519 					    lip_cnt, TRUE);
6520 					mutex_enter(&target->sft_mutex);
6521 					if (lip_cnt == sf->sf_lip_cnt) {
6522 						target->sft_state &=
6523 						    ~SF_TARGET_BUSY;
6524 					}
6525 					mutex_exit(&target->sft_mutex);
6526 					target = target->sft_next_lun;
6527 				}
6528 				mutex_enter(&sf->sf_mutex);
6529 				tp = (struct sf_reset_list *)
6530 				    &sf->sf_reset_list;
6531 				rp = sf->sf_reset_list;
6532 				lb = ddi_get_lbolt();
6533 			} else if (rp->lip_cnt != sf->sf_lip_cnt) {
6534 				tp->next = rp->next;
6535 				kmem_free(rp, sizeof (struct sf_reset_list));
6536 				rp = tp->next;
6537 			} else {
6538 				reset_timeout_flag = TRUE;
6539 				tp = rp;
6540 				rp = rp->next;
6541 			}
6542 		}
6543 		mutex_exit(&sf->sf_mutex);
6544 		mutex_enter(&sf_global_mutex);
6545 	}
6546 
6547 	if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6548 		sf_reset_timeout_id = timeout(sf_check_reset_delay,
6549 		    NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6550 	}
6551 
6552 	mutex_exit(&sf_global_mutex);
6553 }
6554 
6555 
6556 /*
6557  * called to "reset the bus", i.e. force loop initialization (and address
6558  * re-negotiation)
6559  */
6560 static void
6561 sf_force_lip(struct sf *sf)
6562 {
6563 	int i;
6564 	struct sf_target *target;
6565 
6566 
6567 	/* disable restart of lip if we're suspended */
6568 	mutex_enter(&sf->sf_mutex);
6569 	if (sf->sf_state & SF_STATE_SUSPENDED) {
6570 		mutex_exit(&sf->sf_mutex);
6571 		SF_DEBUG(1, (sf, CE_CONT,
6572 		    "sf_force_lip, sf%d: lip restart disabled "
6573 		    "due to DDI_SUSPEND\n",
6574 		    ddi_get_instance(sf->sf_dip)));
6575 		return;
6576 	}
6577 
6578 	sf_log(sf, CE_NOTE, "Forcing lip\n");
6579 
6580 	for (i = 0; i < sf_max_targets; i++) {
6581 		target = sf->sf_targets[i];
6582 		while (target != NULL) {
6583 			mutex_enter(&target->sft_mutex);
6584 			if (!(target->sft_state & SF_TARGET_OFFLINE))
6585 				target->sft_state |= SF_TARGET_BUSY;
6586 			mutex_exit(&target->sft_mutex);
6587 			target = target->sft_next_lun;
6588 		}
6589 	}
6590 
6591 	sf->sf_lip_cnt++;
6592 	sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6593 	sf->sf_state = SF_STATE_OFFLINE;
6594 	mutex_exit(&sf->sf_mutex);
6595 	sf->sf_stats.lip_count++;		/* no mutex for this? */
6596 
6597 #ifdef DEBUG
6598 	/* are we allowing LIPs ?? */
6599 	if (sf_lip_flag != 0) {
6600 #endif
6601 		/* call the transport to force loop initialization */
6602 		if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6603 		    sf->sf_sochandle->fcal_portno, 1,
6604 		    FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6605 		    (i != FCAL_TIMEOUT)) {
6606 			/* force LIP failed */
6607 			if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6608 				(void) soc_take_core(sf->sf_sochandle,
6609 				    sf->sf_socp);
6610 				sf_core = 0;
6611 			}
6612 #ifdef DEBUG
6613 			/* are we allowing reset after LIP failed ?? */
6614 			if (sf_reset_flag != 0) {
6615 #endif
6616 				/* restart socal after resetting it */
6617 				sf_log(sf, CE_NOTE,
6618 				    "!Force lip failed Status code 0x%x."
6619 				    " Reseting\n", i);
6620 				/* call transport to force a reset */
6621 				soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6622 				    sf->sf_sochandle->fcal_portno, 1);
6623 #ifdef	DEBUG
6624 			}
6625 #endif
6626 		}
6627 #ifdef	DEBUG
6628 	}
6629 #endif
6630 }
6631 
6632 
6633 /*
6634  * called by the transport when an unsolicited ELS is received
6635  */
6636 static void
6637 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6638 {
6639 	struct sf *sf = (struct sf *)arg;
6640 	els_payload_t	*els = (els_payload_t *)payload;
6641 	struct la_els_rjt *rsp;
6642 	int	i, tgt_id;
6643 	uchar_t dest_id;
6644 	struct fcal_packet *fpkt;
6645 	fc_frame_header_t *hp;
6646 	struct sf_els_hdr *privp;
6647 
6648 
6649 	if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6650 		return;
6651 	}
6652 
6653 	if (i > SOC_CQE_PAYLOAD) {
6654 		i = SOC_CQE_PAYLOAD;
6655 	}
6656 
6657 	dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6658 	tgt_id = sf_alpa_to_switch[dest_id];
6659 
6660 	switch (els->els_cmd.c.ls_command) {
6661 
6662 	case LA_ELS_LOGO:
6663 		/*
6664 		 * logout received -- log the fact
6665 		 */
6666 		sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6667 		sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6668 		    tgt_id,
6669 		    sf_lip_on_plogo ? "Forcing LIP...." : "");
6670 		if (sf_lip_on_plogo) {
6671 			sf_force_lip(sf);
6672 		}
6673 		break;
6674 
6675 	default:  /* includes LA_ELS_PLOGI */
6676 		/*
6677 		 * something besides a logout received -- we don't handle
6678 		 * this so send back a reject saying its unsupported
6679 		 */
6680 
6681 		sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6682 		    els->els_cmd.c.ls_command, tgt_id);
6683 
6684 
6685 		/* allocate room for a response */
6686 		if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6687 		    sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6688 		    (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6689 			break;
6690 		}
6691 
6692 		fpkt = privp->fpkt;
6693 
6694 		/* fill in pkt header */
6695 		hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6696 		hp->r_ctl = R_CTL_ELS_RSP;
6697 		hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6698 		hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6699 		hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6700 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6701 		    CQ_TYPE_OUTBOUND;
6702 
6703 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6704 
6705 		/* fill in response */
6706 		rsp->ls_code = LA_ELS_RJT;	/* reject this ELS */
6707 		rsp->mbz[0] = 0;
6708 		rsp->mbz[1] = 0;
6709 		rsp->mbz[2] = 0;
6710 		((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6711 		*((int *)&rsp->reserved) = 0;
6712 		rsp->reason_code = RJT_UNSUPPORTED;
6713 		privp->retries = sf_els_retries;
6714 		privp->els_code = LA_ELS_RJT;
6715 		privp->timeout = (unsigned)0xffffffff;
6716 		(void) sf_els_transport(sf, privp);
6717 		break;
6718 	}
6719 }
6720 
6721 
6722 /*
6723  * Error logging, printing, and debug print routines
6724  */
6725 
6726 /*PRINTFLIKE3*/
6727 static void
6728 sf_log(struct sf *sf, int level, const char *fmt, ...)
6729 {
6730 	char buf[256];
6731 	dev_info_t *dip;
6732 	va_list ap;
6733 
6734 	if (sf != NULL) {
6735 		dip = sf->sf_dip;
6736 	} else {
6737 		dip = NULL;
6738 	}
6739 
6740 	va_start(ap, fmt);
6741 	(void) vsprintf(buf, fmt, ap);
6742 	va_end(ap);
6743 	scsi_log(dip, "sf", level, buf);
6744 }
6745 
6746 
6747 /*
6748  * called to get some sf kstats -- return 0 on success else return errno
6749  */
6750 static int
6751 sf_kstat_update(kstat_t *ksp, int rw)
6752 {
6753 	struct sf *sf;
6754 
6755 	if (rw == KSTAT_WRITE) {
6756 		/* can't write */
6757 		return (EACCES);
6758 	}
6759 
6760 	sf = ksp->ks_private;
6761 	sf->sf_stats.ncmds = sf->sf_ncmds;
6762 	sf->sf_stats.throttle_limit = sf->sf_throttle;
6763 	sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6764 
6765 	return (0);				/* success */
6766 }
6767 
6768 
6769 /*
6770  * Unix Entry Points
6771  */
6772 
6773 /*
6774  * driver entry point for opens on control device
6775  */
6776 /* ARGSUSED */
6777 static int
6778 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6779 {
6780 	dev_t dev = *dev_p;
6781 	struct sf *sf;
6782 
6783 
6784 	/* just ensure soft state exists for this device */
6785 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6786 	if (sf == NULL) {
6787 		return (ENXIO);
6788 	}
6789 
6790 	++(sf->sf_check_n_close);
6791 
6792 	return (0);
6793 }
6794 
6795 
6796 /*
6797  * driver entry point for last close on control device
6798  */
6799 /* ARGSUSED */
6800 static int
6801 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6802 {
6803 	struct sf *sf;
6804 
6805 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6806 	if (sf == NULL) {
6807 		return (ENXIO);
6808 	}
6809 
6810 	if (!sf->sf_check_n_close) { /* if this flag is zero */
6811 		cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6812 		    SF_MINOR2INST(getminor(dev)));
6813 		return (ENODEV);
6814 	} else {
6815 		--(sf->sf_check_n_close);
6816 	}
6817 	return (0);
6818 }
6819 
6820 
6821 /*
6822  * driver entry point for sf ioctl commands
6823  */
6824 /* ARGSUSED */
6825 static int
6826 sf_ioctl(dev_t dev,
6827     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6828 {
6829 	struct sf *sf;
6830 	struct sf_target *target;
6831 	uchar_t al_pa;
6832 	struct sf_al_map map;
6833 	int cnt, i;
6834 	int	retval;				/* return value */
6835 	struct devctl_iocdata *dcp;
6836 	dev_info_t *cdip;
6837 	struct scsi_address ap;
6838 	scsi_hba_tran_t *tran;
6839 
6840 
6841 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6842 	if (sf == NULL) {
6843 		return (ENXIO);
6844 	}
6845 
6846 	/* handle all ioctls */
6847 	switch (cmd) {
6848 
6849 	/*
6850 	 * We can use the generic implementation for these ioctls
6851 	 */
6852 	case DEVCTL_DEVICE_GETSTATE:
6853 	case DEVCTL_DEVICE_ONLINE:
6854 	case DEVCTL_DEVICE_OFFLINE:
6855 	case DEVCTL_BUS_GETSTATE:
6856 		return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6857 
6858 	/*
6859 	 * return FC map
6860 	 */
6861 	case SFIOCGMAP:
6862 		if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6863 		    sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6864 		    sf->sf_state != SF_STATE_ONLINE) {
6865 			retval = ENOENT;
6866 			goto dun;
6867 		}
6868 		mutex_enter(&sf->sf_mutex);
6869 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6870 			int i, j = 0;
6871 
6872 			/* Need to generate a fake lilp map */
6873 			for (i = 0; i < sf_max_targets; i++) {
6874 				if (sf->sf_targets[i])
6875 					sf->sf_lilp_map->lilp_alpalist[j++] =
6876 					    sf->sf_targets[i]->
6877 					    sft_hard_address;
6878 			}
6879 			sf->sf_lilp_map->lilp_length = (uchar_t)j;
6880 		}
6881 		cnt = sf->sf_lilp_map->lilp_length;
6882 		map.sf_count = (short)cnt;
6883 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6884 		    (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6885 		    sizeof (la_wwn_t));
6886 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6887 		    (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6888 		    sizeof (la_wwn_t));
6889 		map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6890 		map.sf_hba_addr.sf_hard_address = 0;
6891 		map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6892 		for (i = 0; i < cnt; i++) {
6893 			al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6894 			map.sf_addr_pair[i].sf_al_pa = al_pa;
6895 			if (al_pa == sf->sf_al_pa) {
6896 				(void) bcopy((caddr_t)&sf->sf_sochandle
6897 				    ->fcal_n_wwn, (caddr_t)&map.
6898 				    sf_addr_pair[i].sf_node_wwn,
6899 				    sizeof (la_wwn_t));
6900 				(void) bcopy((caddr_t)&sf->sf_sochandle
6901 				    ->fcal_p_wwn, (caddr_t)&map.
6902 				    sf_addr_pair[i].sf_port_wwn,
6903 				    sizeof (la_wwn_t));
6904 				map.sf_addr_pair[i].sf_hard_address =
6905 				    al_pa;
6906 				map.sf_addr_pair[i].sf_inq_dtype =
6907 				    DTYPE_PROCESSOR;
6908 				continue;
6909 			}
6910 			target = sf->sf_targets[sf_alpa_to_switch[
6911 			    al_pa]];
6912 			if (target != NULL) {
6913 				mutex_enter(&target->sft_mutex);
6914 				if (!(target->sft_state &
6915 				    (SF_TARGET_OFFLINE |
6916 				    SF_TARGET_BUSY))) {
6917 					bcopy((caddr_t)&target->
6918 					    sft_node_wwn,
6919 					    (caddr_t)&map.sf_addr_pair
6920 					    [i].sf_node_wwn,
6921 					    sizeof (la_wwn_t));
6922 					bcopy((caddr_t)&target->
6923 					    sft_port_wwn,
6924 					    (caddr_t)&map.sf_addr_pair
6925 					    [i].sf_port_wwn,
6926 					    sizeof (la_wwn_t));
6927 					map.sf_addr_pair[i].
6928 					    sf_hard_address
6929 					    = target->sft_hard_address;
6930 					map.sf_addr_pair[i].
6931 					    sf_inq_dtype
6932 					    = target->sft_device_type;
6933 					mutex_exit(&target->sft_mutex);
6934 					continue;
6935 				}
6936 				mutex_exit(&target->sft_mutex);
6937 			}
6938 			bzero((caddr_t)&map.sf_addr_pair[i].
6939 			    sf_node_wwn, sizeof (la_wwn_t));
6940 			bzero((caddr_t)&map.sf_addr_pair[i].
6941 			    sf_port_wwn, sizeof (la_wwn_t));
6942 			map.sf_addr_pair[i].sf_inq_dtype =
6943 			    DTYPE_UNKNOWN;
6944 		}
6945 		mutex_exit(&sf->sf_mutex);
6946 		if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6947 		    sizeof (struct sf_al_map), mode) != 0) {
6948 			retval = EFAULT;
6949 			goto dun;
6950 		}
6951 		break;
6952 
6953 	/*
6954 	 * handle device control ioctls
6955 	 */
6956 	case DEVCTL_DEVICE_RESET:
6957 		if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6958 			retval = EFAULT;
6959 			goto dun;
6960 		}
6961 		if ((ndi_dc_getname(dcp) == NULL) ||
6962 		    (ndi_dc_getaddr(dcp) == NULL)) {
6963 			ndi_dc_freehdl(dcp);
6964 			retval = EINVAL;
6965 			goto dun;
6966 		}
6967 		cdip = ndi_devi_find(sf->sf_dip,
6968 		    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6969 		ndi_dc_freehdl(dcp);
6970 
6971 		if (cdip == NULL) {
6972 			retval = ENXIO;
6973 			goto dun;
6974 		}
6975 
6976 		if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6977 			retval = ENXIO;
6978 			goto dun;
6979 		}
6980 		mutex_enter(&target->sft_mutex);
6981 		if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6982 			mutex_exit(&target->sft_mutex);
6983 			retval = ENXIO;
6984 			goto dun;
6985 		}
6986 
6987 		/* This is ugly */
6988 		tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6989 		bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6990 		mutex_exit(&target->sft_mutex);
6991 		ap.a_hba_tran = tran;
6992 		ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6993 		if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6994 			retval = EIO;
6995 		} else {
6996 			retval = 0;
6997 		}
6998 		kmem_free(tran, scsi_hba_tran_size());
6999 		goto dun;
7000 
7001 	case DEVCTL_BUS_QUIESCE:
7002 	case DEVCTL_BUS_UNQUIESCE:
7003 		retval = ENOTSUP;
7004 		goto dun;
7005 
7006 	case DEVCTL_BUS_RESET:
7007 	case DEVCTL_BUS_RESETALL:
7008 		sf_force_lip(sf);
7009 		break;
7010 
7011 	default:
7012 		retval = ENOTTY;
7013 		goto dun;
7014 	}
7015 
7016 	retval = 0;				/* success */
7017 
7018 dun:
7019 	return (retval);
7020 }
7021 
7022 
7023 /*
7024  * get the target given a DIP
7025  */
7026 static struct sf_target *
7027 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7028 {
7029 	int i;
7030 	struct sf_target *target;
7031 
7032 
7033 	/* scan each hash queue for the DIP in question */
7034 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7035 		target = sf->sf_wwn_lists[i];
7036 		while (target != NULL) {
7037 			if (target->sft_dip == dip) {
7038 				return (target); /* success: target found */
7039 			}
7040 			target = target->sft_next;
7041 		}
7042 	}
7043 	return (NULL);				/* failure: target not found */
7044 }
7045 
7046 
7047 /*
7048  * called by the transport to get an event cookie
7049  */
7050 static int
7051 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7052     ddi_eventcookie_t *event_cookiep)
7053 {
7054 	struct sf *sf;
7055 
7056 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7057 	if (sf == NULL) {
7058 		/* can't find instance for this device */
7059 		return (DDI_FAILURE);
7060 	}
7061 
7062 	return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7063 	    event_cookiep, NDI_EVENT_NOPASS));
7064 
7065 }
7066 
7067 
7068 /*
7069  * called by the transport to add an event callback
7070  */
7071 static int
7072 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7073     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7074     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7075     ddi_callback_id_t *cb_id)
7076 {
7077 	struct sf *sf;
7078 
7079 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7080 	if (sf == NULL) {
7081 		/* can't find instance for this device */
7082 		return (DDI_FAILURE);
7083 	}
7084 
7085 	return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7086 	    eventid, callback, arg, NDI_SLEEP, cb_id));
7087 
7088 }
7089 
7090 
7091 /*
7092  * called by the transport to remove an event callback
7093  */
7094 static int
7095 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7096 {
7097 	struct sf *sf;
7098 
7099 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7100 	if (sf == NULL) {
7101 		/* can't find instance for this device */
7102 		return (DDI_FAILURE);
7103 	}
7104 
7105 	return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7106 }
7107 
7108 
7109 /*
7110  * called by the transport to post an event
7111  */
7112 static int
7113 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7114     ddi_eventcookie_t eventid, void *impldata)
7115 {
7116 	ddi_eventcookie_t remove_cookie, cookie;
7117 
7118 	/* is this a remove event ?? */
7119 	struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7120 	remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7121 	    SF_EVENT_TAG_REMOVE);
7122 
7123 	if (remove_cookie == eventid) {
7124 		struct sf_target *target;
7125 
7126 		/* handle remove event */
7127 
7128 		if (sf == NULL) {
7129 			/* no sf instance for this device */
7130 			return (NDI_FAILURE);
7131 		}
7132 
7133 		/* get the target for this event */
7134 		if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7135 			/*
7136 			 * clear device info for this target and mark as
7137 			 * not done
7138 			 */
7139 			mutex_enter(&target->sft_mutex);
7140 			target->sft_dip = NULL;
7141 			target->sft_state &= ~SF_TARGET_INIT_DONE;
7142 			mutex_exit(&target->sft_mutex);
7143 			return (NDI_SUCCESS); /* event handled */
7144 		}
7145 
7146 		/* no target for this event */
7147 		return (NDI_FAILURE);
7148 	}
7149 
7150 	/* an insertion event */
7151 	if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7152 	    != NDI_SUCCESS) {
7153 		return (NDI_FAILURE);
7154 	}
7155 
7156 	return (ndi_post_event(dip, rdip, cookie, impldata));
7157 }
7158 
7159 
7160 /*
7161  * the sf hotplug daemon, one thread per sf instance
7162  */
7163 static void
7164 sf_hp_daemon(void *arg)
7165 {
7166 	struct sf *sf = (struct sf *)arg;
7167 	struct sf_hp_elem *elem;
7168 	struct sf_target *target;
7169 	int tgt_id;
7170 	callb_cpr_t cprinfo;
7171 
7172 	CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7173 	    callb_generic_cpr, "sf_hp_daemon");
7174 
7175 	mutex_enter(&sf->sf_hp_daemon_mutex);
7176 
7177 	do {
7178 		while (sf->sf_hp_elem_head != NULL) {
7179 
7180 			/* save ptr to head of list */
7181 			elem = sf->sf_hp_elem_head;
7182 
7183 			/* take element off of list */
7184 			if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7185 				/* element only one in list -- list now empty */
7186 				sf->sf_hp_elem_head = NULL;
7187 				sf->sf_hp_elem_tail = NULL;
7188 			} else {
7189 				/* remove element from head of list */
7190 				sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7191 			}
7192 
7193 			mutex_exit(&sf->sf_hp_daemon_mutex);
7194 
7195 			switch (elem->what) {
7196 			case SF_ONLINE:
7197 				/* online this target */
7198 				target = elem->target;
7199 				(void) ndi_devi_online(elem->dip, 0);
7200 				(void) ndi_event_retrieve_cookie(
7201 				    sf->sf_event_hdl,
7202 				    target->sft_dip, FCAL_INSERT_EVENT,
7203 				    &sf_insert_eid, NDI_EVENT_NOPASS);
7204 				(void) ndi_event_run_callbacks(sf->sf_event_hdl,
7205 				    target->sft_dip, sf_insert_eid, NULL);
7206 				break;
7207 			case SF_OFFLINE:
7208 				/* offline this target */
7209 				target = elem->target;
7210 				tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7211 				/* don't do NDI_DEVI_REMOVE for now */
7212 				if (ndi_devi_offline(elem->dip, 0) !=
7213 				    NDI_SUCCESS) {
7214 					SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7215 					    "device offline failed", tgt_id));
7216 				} else {
7217 					SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7218 					    "device offline succeeded\n",
7219 					    tgt_id));
7220 				}
7221 				break;
7222 			}
7223 			kmem_free(elem, sizeof (struct sf_hp_elem));
7224 			mutex_enter(&sf->sf_hp_daemon_mutex);
7225 		}
7226 
7227 		/* if exit is not already signaled */
7228 		if (sf->sf_hp_exit == 0) {
7229 			/* wait to be signaled by work or exit */
7230 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
7231 			cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7232 			CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7233 		}
7234 	} while (sf->sf_hp_exit == 0);
7235 
7236 	/* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7237 	CALLB_CPR_EXIT(&cprinfo);
7238 	thread_exit();			/* no more hotplug thread */
7239 	/* NOTREACHED */
7240 }
7241