xref: /titanic_50/usr/src/uts/sun/io/scsi/adapters/sf.c (revision 31c6d826a7f7a4ee7d83c8e99f25d82a4a248076)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25  */
26 
27 /*
28  * sf - Solaris Fibre Channel driver
29  *
30  * This module implements some of the Fibre Channel FC-4 layer, converting
31  * from FC frames to SCSI and back.  (Note: no sequence management is done
32  * here, though.)
33  */
34 
35 #if defined(lint) && !defined(DEBUG)
36 #define	DEBUG	1
37 #endif
38 
39 /*
40  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
41  * Need to use the ugly RAID LUN mappings in FCP Annex D
42  * to prevent SCSA from barfing.  This *REALLY* needs to
43  * be addressed by the standards committee.
44  */
45 #define	RAID_LUNS	1
46 
47 #ifdef DEBUG
48 static int sfdebug = 0;
49 #include <sys/debug.h>
50 
51 #define	SF_DEBUG(level, args) \
52 	if (sfdebug >= (level)) sf_log args
53 #else
54 #define	SF_DEBUG(level, args)
55 #endif
56 
57 static int sf_bus_config_debug = 0;
58 
59 /* Why do I have to do this? */
60 #define	offsetof(s, m)  (size_t)(&(((s *)0)->m))
61 
62 #include <sys/scsi/scsi.h>
63 #include <sys/fc4/fcal.h>
64 #include <sys/fc4/fcp.h>
65 #include <sys/fc4/fcal_linkapp.h>
66 #include <sys/socal_cq_defs.h>
67 #include <sys/fc4/fcal_transport.h>
68 #include <sys/fc4/fcio.h>
69 #include <sys/scsi/adapters/sfvar.h>
70 #include <sys/scsi/impl/scsi_reset_notify.h>
71 #include <sys/stat.h>
72 #include <sys/varargs.h>
73 #include <sys/var.h>
74 #include <sys/thread.h>
75 #include <sys/proc.h>
76 #include <sys/kstat.h>
77 #include <sys/devctl.h>
78 #include <sys/scsi/targets/ses.h>
79 #include <sys/callb.h>
80 
81 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
82 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
83 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
84 static void sf_softstate_unlink(struct sf *);
85 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
86     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
87 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
88     ddi_bus_config_op_t op, void *arg);
89 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
90     scsi_hba_tran_t *, struct scsi_device *);
91 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
92     scsi_hba_tran_t *, struct scsi_device *);
93 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
94     int, int, int);
95 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
96 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
97     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
98 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
99 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
100 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
101 static int sf_scsi_reset_notify(struct scsi_address *, int,
102     void (*)(caddr_t), caddr_t);
103 static int sf_scsi_get_name(struct scsi_device *, char *, int);
104 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
105 static int sf_add_cr_pool(struct sf *);
106 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
107 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
108 static void sf_crpool_free(struct sf *);
109 static int sf_kmem_cache_constructor(void *, void *, int);
110 static void sf_kmem_cache_destructor(void *, void *);
111 static void sf_statec_callback(void *, int);
112 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
113 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
114 static void sf_els_callback(struct fcal_packet *);
115 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
116 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
117 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
118     struct sf_target *);
119 static void sf_reportlun_callback(struct fcal_packet *);
120 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
121     struct sf_target *);
122 static void sf_inq_callback(struct fcal_packet *);
123 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
124     int, caddr_t *, caddr_t *);
125 static void sf_els_free(struct fcal_packet *);
126 static struct sf_target *sf_create_target(struct sf *,
127     struct sf_els_hdr *, int, int64_t);
128 #ifdef RAID_LUNS
129 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
130 #else
131 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
132 #endif
133 static void sf_finish_init(struct sf *, int);
134 static void sf_offline_target(struct sf *, struct sf_target *);
135 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
136 static int sf_create_props(dev_info_t *, struct sf_target *, int);
137 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
138 static int sf_getcap(struct scsi_address *, char *, int);
139 static int sf_setcap(struct scsi_address *, char *, int, int);
140 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
141 static int sf_reset(struct scsi_address *, int);
142 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
143 static int sf_start(struct scsi_address *, struct scsi_pkt *);
144 static int sf_start_internal(struct sf *, struct sf_pkt *);
145 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
146 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
147 static int sf_dopoll(struct sf *, struct sf_pkt *);
148 static void sf_cmd_callback(struct fcal_packet *);
149 static void sf_throttle(struct sf *);
150 static void sf_watch(void *);
151 static void sf_throttle_start(struct sf *);
152 static void sf_check_targets(struct sf *);
153 static void sf_check_reset_delay(void *);
154 static int sf_target_timeout(struct sf *, struct sf_pkt *);
155 static void sf_force_lip(struct sf *);
156 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
157 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
158 /*PRINTFLIKE3*/
159 static void sf_log(struct sf *, int, const char *, ...);
160 static int sf_kstat_update(kstat_t *, int);
161 static int sf_open(dev_t *, int, int, cred_t *);
162 static int sf_close(dev_t, int, int, cred_t *);
163 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
164 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
165 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
166     ddi_eventcookie_t *);
167 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
168     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
169 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
170 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
171     ddi_eventcookie_t, void *);
172 
173 static void sf_hp_daemon(void *);
174 
175 /*
176  * this is required to be able to supply a control node
177  * where ioctls can be executed
178  */
179 struct cb_ops sf_cb_ops = {
180 	sf_open,			/* open */
181 	sf_close,			/* close */
182 	nodev,				/* strategy */
183 	nodev,				/* print */
184 	nodev,				/* dump */
185 	nodev,				/* read */
186 	nodev,				/* write */
187 	sf_ioctl,			/* ioctl */
188 	nodev,				/* devmap */
189 	nodev,				/* mmap */
190 	nodev,				/* segmap */
191 	nochpoll,			/* poll */
192 	ddi_prop_op,			/* cb_prop_op */
193 	0,				/* streamtab  */
194 	D_MP | D_NEW | D_HOTPLUG	/* driver flags */
195 
196 };
197 
198 /*
199  * autoconfiguration routines.
200  */
201 static struct dev_ops sf_ops = {
202 	DEVO_REV,		/* devo_rev, */
203 	0,			/* refcnt  */
204 	sf_info,		/* info */
205 	nulldev,		/* identify */
206 	nulldev,		/* probe */
207 	sf_attach,		/* attach */
208 	sf_detach,		/* detach */
209 	nodev,			/* reset */
210 	&sf_cb_ops,		/* driver operations */
211 	NULL,			/* bus operations */
212 	NULL,			/* power management */
213 	ddi_quiesce_not_supported,	/* devo_quiesce */
214 };
215 
216 #define	SF_NAME	"FC-AL FCP Nexus Driver"	/* Name of the module. */
217 static	char	sf_version[] = "1.72 08/19/2008"; /* version of the module */
218 
219 static struct modldrv modldrv = {
220 	&mod_driverops, /* Type of module. This one is a driver */
221 	SF_NAME,
222 	&sf_ops,	/* driver ops */
223 };
224 
225 static struct modlinkage modlinkage = {
226 	MODREV_1, (void *)&modldrv, NULL
227 };
228 
229 /* XXXXXX The following is here to handle broken targets -- remove it later */
230 static int sf_reportlun_forever = 0;
231 /* XXXXXX */
232 static int sf_lip_on_plogo = 0;
233 static int sf_els_retries = SF_ELS_RETRIES;
234 static struct sf *sf_head = NULL;
235 static int sf_target_scan_cnt = 4;
236 static int sf_pkt_scan_cnt = 5;
237 static int sf_pool_scan_cnt = 1800;
238 static void *sf_state = NULL;
239 static int sf_watchdog_init = 0;
240 static int sf_watchdog_time = 0;
241 static int sf_watchdog_timeout = 1;
242 static int sf_watchdog_tick;
243 static int sf_watch_running = 0;
244 static timeout_id_t sf_watchdog_id;
245 static timeout_id_t sf_reset_timeout_id;
246 static int sf_max_targets = SF_MAX_TARGETS;
247 static kmutex_t sf_global_mutex;
248 static int sf_core = 0;
249 int *sf_token = NULL; /* Must not be static or lint complains. */
250 static kcondvar_t sf_watch_cv;
251 extern pri_t minclsyspri;
252 static ddi_eventcookie_t	sf_insert_eid;
253 static ddi_eventcookie_t	sf_remove_eid;
254 
255 static ndi_event_definition_t	sf_event_defs[] = {
256 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
257 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
258 };
259 
260 #define	SF_N_NDI_EVENTS	\
261 	(sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
262 
263 #ifdef DEBUG
264 static int sf_lip_flag = 1;		/* bool: to allow LIPs */
265 static int sf_reset_flag = 1;		/* bool: to allow reset after LIP */
266 static int sf_abort_flag = 0;		/* bool: to do just one abort */
267 #endif
268 
269 extern int64_t ddi_get_lbolt64(void);
270 
271 /*
272  * for converting between target number (switch) and hard address/AL_PA
273  */
274 static uchar_t sf_switch_to_alpa[] = {
275 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
276 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
277 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
278 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
279 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
280 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
281 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
282 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
283 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
284 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
285 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
286 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
287 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
288 };
289 
290 static uchar_t sf_alpa_to_switch[] = {
291 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
292 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
293 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
294 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
295 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
296 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
297 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
298 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
299 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
300 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
301 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
302 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
303 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
304 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
306 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
307 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
308 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
309 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
310 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
311 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
312 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
313 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
314 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 };
316 
317 /*
318  * these macros call the proper transport-layer function given
319  * a particular transport
320  */
321 #define	soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
322 #define	soc_transport_poll(a, b, c, d)\
323 	(*a->fcal_ops->fcal_transport_poll)(b, c, d)
324 #define	soc_get_lilp_map(a, b, c, d, e)\
325 	(*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
326 #define	soc_force_lip(a, b, c, d, e)\
327 	(*a->fcal_ops->fcal_force_lip)(b, c, d, e)
328 #define	soc_abort(a, b, c, d, e)\
329 	(*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
330 #define	soc_force_reset(a, b, c, d)\
331 	(*a->fcal_ops->fcal_force_reset)(b, c, d)
332 #define	soc_add_ulp(a, b, c, d, e, f, g, h)\
333 	(*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
334 #define	soc_remove_ulp(a, b, c, d, e)\
335 	(*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
336 #define	soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
337 
338 
339 /* power management property defines (should be in a common include file?) */
340 #define	PM_HARDWARE_STATE_PROP		"pm-hardware-state"
341 #define	PM_NEEDS_SUSPEND_RESUME		"needs-suspend-resume"
342 
343 
344 /* node properties */
345 #define	NODE_WWN_PROP			"node-wwn"
346 #define	PORT_WWN_PROP			"port-wwn"
347 #define	LIP_CNT_PROP			"lip-count"
348 #define	TARGET_PROP			"target"
349 #define	LUN_PROP			"lun"
350 
351 
352 /*
353  * initialize this driver and install this module
354  */
355 int
356 _init(void)
357 {
358 	int	i;
359 
360 	i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
361 	    SF_INIT_ITEMS);
362 	if (i != 0)
363 		return (i);
364 
365 	if ((i = scsi_hba_init(&modlinkage)) != 0) {
366 		ddi_soft_state_fini(&sf_state);
367 		return (i);
368 	}
369 
370 	mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
371 	sf_watch_running = 0;
372 	cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
373 
374 	if ((i = mod_install(&modlinkage)) != 0) {
375 		mutex_destroy(&sf_global_mutex);
376 		cv_destroy(&sf_watch_cv);
377 		scsi_hba_fini(&modlinkage);
378 		ddi_soft_state_fini(&sf_state);
379 		return (i);
380 	}
381 
382 	return (i);
383 }
384 
385 
386 /*
387  * remove this driver module from the system
388  */
389 int
390 _fini(void)
391 {
392 	int	i;
393 
394 	if ((i = mod_remove(&modlinkage)) == 0) {
395 		scsi_hba_fini(&modlinkage);
396 		mutex_destroy(&sf_global_mutex);
397 		cv_destroy(&sf_watch_cv);
398 		ddi_soft_state_fini(&sf_state);
399 	}
400 	return (i);
401 }
402 
403 
404 int
405 _info(struct modinfo *modinfop)
406 {
407 	return (mod_info(&modlinkage, modinfop));
408 }
409 
410 /*
411  * Given the device number return the devinfo pointer or instance
412  */
413 /*ARGSUSED*/
414 static int
415 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
416 {
417 	int		instance = SF_MINOR2INST(getminor((dev_t)arg));
418 	struct sf	*sf;
419 
420 	switch (infocmd) {
421 	case DDI_INFO_DEVT2DEVINFO:
422 		sf = ddi_get_soft_state(sf_state, instance);
423 		if (sf != NULL)
424 			*result = sf->sf_dip;
425 		else {
426 			*result = NULL;
427 			return (DDI_FAILURE);
428 		}
429 		break;
430 
431 	case DDI_INFO_DEVT2INSTANCE:
432 		*result = (void *)(uintptr_t)instance;
433 		break;
434 	default:
435 		return (DDI_FAILURE);
436 	}
437 	return (DDI_SUCCESS);
438 }
439 
440 /*
441  * either attach or resume this driver
442  */
443 static int
444 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
445 {
446 	int instance;
447 	int mutex_initted = FALSE;
448 	uint_t ccount;
449 	size_t i, real_size;
450 	struct fcal_transport *handle;
451 	char buf[64];
452 	struct sf *sf, *tsf;
453 	scsi_hba_tran_t *tran = NULL;
454 	int	handle_bound = FALSE;
455 	kthread_t *tp;
456 
457 
458 	switch ((int)cmd) {
459 
460 	case DDI_RESUME:
461 
462 		/*
463 		 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
464 		 * so time to undo that and get going again by forcing a
465 		 * lip
466 		 */
467 
468 		instance = ddi_get_instance(dip);
469 
470 		sf = ddi_get_soft_state(sf_state, instance);
471 		SF_DEBUG(2, (sf, CE_CONT,
472 		    "sf_attach: DDI_RESUME for sf%d\n", instance));
473 		if (sf == NULL) {
474 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
475 			return (DDI_FAILURE);
476 		}
477 
478 		/*
479 		 * clear suspended flag so that normal operations can resume
480 		 */
481 		mutex_enter(&sf->sf_mutex);
482 		sf->sf_state &= ~SF_STATE_SUSPENDED;
483 		mutex_exit(&sf->sf_mutex);
484 
485 		/*
486 		 * force a login by setting our state to offline
487 		 */
488 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
489 		sf->sf_state = SF_STATE_OFFLINE;
490 
491 		/*
492 		 * call transport routine to register state change and
493 		 * ELS callback routines (to register us as a ULP)
494 		 */
495 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
496 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
497 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
498 
499 		/*
500 		 * call transport routine to force loop initialization
501 		 */
502 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
503 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
504 
505 		/*
506 		 * increment watchdog init flag, setting watchdog timeout
507 		 * if we are the first (since somebody has to do it)
508 		 */
509 		mutex_enter(&sf_global_mutex);
510 		if (!sf_watchdog_init++) {
511 			mutex_exit(&sf_global_mutex);
512 			sf_watchdog_id = timeout(sf_watch,
513 			    (caddr_t)0, sf_watchdog_tick);
514 		} else {
515 			mutex_exit(&sf_global_mutex);
516 		}
517 
518 		return (DDI_SUCCESS);
519 
520 	case DDI_ATTACH:
521 
522 		/*
523 		 * this instance attaching for the first time
524 		 */
525 
526 		instance = ddi_get_instance(dip);
527 
528 		if (ddi_soft_state_zalloc(sf_state, instance) !=
529 		    DDI_SUCCESS) {
530 			cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
531 			    instance);
532 			return (DDI_FAILURE);
533 		}
534 
535 		sf = ddi_get_soft_state(sf_state, instance);
536 		SF_DEBUG(4, (sf, CE_CONT,
537 		    "sf_attach: DDI_ATTACH for sf%d\n", instance));
538 		if (sf == NULL) {
539 			/* this shouldn't happen since we just allocated it */
540 			cmn_err(CE_WARN, "sf%d: bad soft state", instance);
541 			return (DDI_FAILURE);
542 		}
543 
544 		/*
545 		 * from this point on, if there's an error, we must de-allocate
546 		 * soft state before returning DDI_FAILURE
547 		 */
548 
549 		if ((handle = ddi_get_parent_data(dip)) == NULL) {
550 			cmn_err(CE_WARN,
551 			    "sf%d: failed to obtain transport handle",
552 			    instance);
553 			goto fail;
554 		}
555 
556 		/* fill in our soft state structure */
557 		sf->sf_dip = dip;
558 		sf->sf_state = SF_STATE_INIT;
559 		sf->sf_throttle = handle->fcal_cmdmax;
560 		sf->sf_sochandle = handle;
561 		sf->sf_socp = handle->fcal_handle;
562 		sf->sf_check_n_close = 0;
563 
564 		/* create a command/response buffer pool for this instance */
565 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
566 			cmn_err(CE_WARN,
567 			    "sf%d: failed to allocate command/response pool",
568 			    instance);
569 			goto fail;
570 		}
571 
572 		/* create a a cache for this instance */
573 		(void) sprintf(buf, "sf%d_cache", instance);
574 		sf->sf_pkt_cache = kmem_cache_create(buf,
575 		    sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
576 		    scsi_pkt_size(), 8,
577 		    sf_kmem_cache_constructor, sf_kmem_cache_destructor,
578 		    NULL, NULL, NULL, 0);
579 		if (sf->sf_pkt_cache == NULL) {
580 			cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
581 			    instance);
582 			goto fail;
583 		}
584 
585 		/* set up a handle and allocate memory for DMA */
586 		if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
587 		    fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
588 		    sf_lilp_dmahandle) != DDI_SUCCESS) {
589 			cmn_err(CE_WARN,
590 			    "sf%d: failed to allocate dma handle for lilp map",
591 			    instance);
592 			goto fail;
593 		}
594 		i = sizeof (struct fcal_lilp_map) + 1;
595 		if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
596 		    i, sf->sf_sochandle->
597 		    fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
598 		    (caddr_t *)&sf->sf_lilp_map, &real_size,
599 		    &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
600 			cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
601 			    instance);
602 			goto fail;
603 		}
604 		if (real_size < i) {
605 			/* no error message ??? */
606 			goto fail;		/* trouble allocating memory */
607 		}
608 
609 		/*
610 		 * set up the address for the DMA transfers (getting a cookie)
611 		 */
612 		if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
613 		    (caddr_t)sf->sf_lilp_map, real_size,
614 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
615 		    &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
616 			cmn_err(CE_WARN,
617 			    "sf%d: failed to bind dma handle for lilp map",
618 			    instance);
619 			goto fail;
620 		}
621 		handle_bound = TRUE;
622 		/* ensure only one cookie was allocated */
623 		if (ccount != 1) {
624 			goto fail;
625 		}
626 
627 		/* ensure LILP map and DMA cookie addresses are even?? */
628 		sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
629 		    sf_lilp_map + 1) & ~1);
630 		sf->sf_lilp_dmacookie.dmac_address = (sf->
631 		    sf_lilp_dmacookie.dmac_address + 1) & ~1;
632 
633 		/* set up all of our mutexes and condition variables */
634 		mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
635 		mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
636 		mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
637 		mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
638 		cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
639 		cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
640 
641 		mutex_initted = TRUE;
642 
643 		/* create our devctl minor node */
644 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
645 		    SF_INST2DEVCTL_MINOR(instance),
646 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
647 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
648 			    " for devctl", instance);
649 			goto fail;
650 		}
651 
652 		/* create fc minor node */
653 		if (ddi_create_minor_node(dip, "fc", S_IFCHR,
654 		    SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
655 		    0) != DDI_SUCCESS) {
656 			cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
657 			    " for fc", instance);
658 			goto fail;
659 		}
660 		/* allocate a SCSI transport structure */
661 		tran = scsi_hba_tran_alloc(dip, 0);
662 		if (tran == NULL) {
663 			/* remove all minor nodes created */
664 			ddi_remove_minor_node(dip, NULL);
665 			cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
666 			    instance);
667 			goto fail;
668 		}
669 
670 		/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
671 		scsi_size_clean(dip);		/* SCSI_SIZE_CLEAN_VERIFY ok */
672 
673 		/* save ptr to new transport structure and fill it in */
674 		sf->sf_tran = tran;
675 
676 		tran->tran_hba_private		= sf;
677 		tran->tran_tgt_private		= NULL;
678 		tran->tran_tgt_init		= sf_scsi_tgt_init;
679 		tran->tran_tgt_probe		= NULL;
680 		tran->tran_tgt_free		= sf_scsi_tgt_free;
681 
682 		tran->tran_start		= sf_start;
683 		tran->tran_abort		= sf_abort;
684 		tran->tran_reset		= sf_reset;
685 		tran->tran_getcap		= sf_getcap;
686 		tran->tran_setcap		= sf_setcap;
687 		tran->tran_init_pkt		= sf_scsi_init_pkt;
688 		tran->tran_destroy_pkt		= sf_scsi_destroy_pkt;
689 		tran->tran_dmafree		= sf_scsi_dmafree;
690 		tran->tran_sync_pkt		= sf_scsi_sync_pkt;
691 		tran->tran_reset_notify		= sf_scsi_reset_notify;
692 
693 		/*
694 		 * register event notification routines with scsa
695 		 */
696 		tran->tran_get_eventcookie	= sf_bus_get_eventcookie;
697 		tran->tran_add_eventcall	= sf_bus_add_eventcall;
698 		tran->tran_remove_eventcall	= sf_bus_remove_eventcall;
699 		tran->tran_post_event		= sf_bus_post_event;
700 
701 		/*
702 		 * register bus configure/unconfigure
703 		 */
704 		tran->tran_bus_config		= sf_scsi_bus_config;
705 		tran->tran_bus_unconfig		= sf_scsi_bus_unconfig;
706 
707 		/*
708 		 * allocate an ndi event handle
709 		 */
710 		sf->sf_event_defs = (ndi_event_definition_t *)
711 		    kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
712 
713 		bcopy(sf_event_defs, sf->sf_event_defs,
714 		    sizeof (sf_event_defs));
715 
716 		(void) ndi_event_alloc_hdl(dip, NULL,
717 		    &sf->sf_event_hdl, NDI_SLEEP);
718 
719 		sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
720 		sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
721 		sf->sf_events.ndi_event_defs = sf->sf_event_defs;
722 
723 		if (ndi_event_bind_set(sf->sf_event_hdl,
724 		    &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
725 			goto fail;
726 		}
727 
728 		tran->tran_get_name		= sf_scsi_get_name;
729 		tran->tran_get_bus_addr		= sf_scsi_get_bus_addr;
730 
731 		/* setup and attach SCSI hba transport */
732 		if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
733 		    fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
734 			cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
735 			    instance);
736 			goto fail;
737 		}
738 
739 		/* set up kstats */
740 		if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
741 		    "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
742 		    KSTAT_FLAG_VIRTUAL)) == NULL) {
743 			cmn_err(CE_WARN, "sf%d: failed to create kstat",
744 			    instance);
745 		} else {
746 			sf->sf_stats.version = 2;
747 			(void) sprintf(sf->sf_stats.drvr_name,
748 			"%s: %s", SF_NAME, sf_version);
749 			sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
750 			sf->sf_ksp->ks_private = sf;
751 			sf->sf_ksp->ks_update = sf_kstat_update;
752 			kstat_install(sf->sf_ksp);
753 		}
754 
755 		/* create the hotplug thread */
756 		mutex_enter(&sf->sf_hp_daemon_mutex);
757 		tp = thread_create(NULL, 0,
758 		    (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
759 		sf->sf_hp_tid = tp->t_did;
760 		mutex_exit(&sf->sf_hp_daemon_mutex);
761 
762 		/* add this soft state instance to the head of the list */
763 		mutex_enter(&sf_global_mutex);
764 		sf->sf_next = sf_head;
765 		tsf = sf_head;
766 		sf_head = sf;
767 
768 		/*
769 		 * find entry in list that has the same FC-AL handle (if any)
770 		 */
771 		while (tsf != NULL) {
772 			if (tsf->sf_socp == sf->sf_socp) {
773 				break;		/* found matching entry */
774 			}
775 			tsf = tsf->sf_next;
776 		}
777 
778 		if (tsf != NULL) {
779 			/* if we found a matching entry keep track of it */
780 			sf->sf_sibling = tsf;
781 		}
782 
783 		/*
784 		 * increment watchdog init flag, setting watchdog timeout
785 		 * if we are the first (since somebody has to do it)
786 		 */
787 		if (!sf_watchdog_init++) {
788 			mutex_exit(&sf_global_mutex);
789 			sf_watchdog_tick = sf_watchdog_timeout *
790 			    drv_usectohz(1000000);
791 			sf_watchdog_id = timeout(sf_watch,
792 			    NULL, sf_watchdog_tick);
793 		} else {
794 			mutex_exit(&sf_global_mutex);
795 		}
796 
797 		if (tsf != NULL) {
798 			/*
799 			 * set up matching entry to be our sibling
800 			 */
801 			mutex_enter(&tsf->sf_mutex);
802 			tsf->sf_sibling = sf;
803 			mutex_exit(&tsf->sf_mutex);
804 		}
805 
806 		/*
807 		 * create this property so that PM code knows we want
808 		 * to be suspended at PM time
809 		 */
810 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
811 		    PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
812 
813 		/* log the fact that we have a new device */
814 		ddi_report_dev(dip);
815 
816 		/*
817 		 * force a login by setting our state to offline
818 		 */
819 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
820 		sf->sf_state = SF_STATE_OFFLINE;
821 
822 		/*
823 		 * call transport routine to register state change and
824 		 * ELS callback routines (to register us as a ULP)
825 		 */
826 		soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
827 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
828 		    sf_statec_callback, sf_unsol_els_callback, NULL, sf);
829 
830 		/*
831 		 * call transport routine to force loop initialization
832 		 */
833 		(void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
834 		    sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
835 		sf->sf_reset_time = ddi_get_lbolt64();
836 		return (DDI_SUCCESS);
837 
838 	default:
839 		return (DDI_FAILURE);
840 	}
841 
842 fail:
843 	cmn_err(CE_WARN, "sf%d: failed to attach", instance);
844 
845 	/*
846 	 * Unbind and free event set
847 	 */
848 	if (sf->sf_event_hdl) {
849 		(void) ndi_event_unbind_set(sf->sf_event_hdl,
850 		    &sf->sf_events, NDI_SLEEP);
851 		(void) ndi_event_free_hdl(sf->sf_event_hdl);
852 	}
853 
854 	if (sf->sf_event_defs) {
855 		kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
856 	}
857 
858 	if (sf->sf_tran != NULL) {
859 		scsi_hba_tran_free(sf->sf_tran);
860 	}
861 	while (sf->sf_cr_pool != NULL) {
862 		sf_crpool_free(sf);
863 	}
864 	if (sf->sf_lilp_dmahandle != NULL) {
865 		if (handle_bound) {
866 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
867 		}
868 		ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
869 	}
870 	if (sf->sf_pkt_cache != NULL) {
871 		kmem_cache_destroy(sf->sf_pkt_cache);
872 	}
873 	if (sf->sf_lilp_map != NULL) {
874 		ddi_dma_mem_free(&sf->sf_lilp_acchandle);
875 	}
876 	if (sf->sf_ksp != NULL) {
877 		kstat_delete(sf->sf_ksp);
878 	}
879 	if (mutex_initted) {
880 		mutex_destroy(&sf->sf_mutex);
881 		mutex_destroy(&sf->sf_cmd_mutex);
882 		mutex_destroy(&sf->sf_cr_mutex);
883 		mutex_destroy(&sf->sf_hp_daemon_mutex);
884 		cv_destroy(&sf->sf_cr_cv);
885 		cv_destroy(&sf->sf_hp_daemon_cv);
886 	}
887 	mutex_enter(&sf_global_mutex);
888 
889 	/*
890 	 * kill off the watchdog if we are the last instance
891 	 */
892 	if (!--sf_watchdog_init) {
893 		timeout_id_t tid = sf_watchdog_id;
894 		mutex_exit(&sf_global_mutex);
895 		(void) untimeout(tid);
896 	} else {
897 		mutex_exit(&sf_global_mutex);
898 	}
899 
900 	ddi_soft_state_free(sf_state, instance);
901 
902 	if (tran != NULL) {
903 		/* remove all minor nodes */
904 		ddi_remove_minor_node(dip, NULL);
905 	}
906 
907 	return (DDI_FAILURE);
908 }
909 
910 
911 /* ARGSUSED */
912 static int
913 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
914 {
915 	struct sf		*sf;
916 	int			instance;
917 	int			i;
918 	struct sf_target	*target;
919 	timeout_id_t		tid;
920 
921 
922 
923 	/* NO OTHER THREADS ARE RUNNING */
924 
925 	instance = ddi_get_instance(dip);
926 
927 	if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
928 		cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
929 		return (DDI_FAILURE);
930 	}
931 
932 	switch (cmd) {
933 
934 	case DDI_SUSPEND:
935 		/*
936 		 * suspend our instance
937 		 */
938 
939 		SF_DEBUG(2, (sf, CE_CONT,
940 		    "sf_detach: DDI_SUSPEND for sf%d\n", instance));
941 		/*
942 		 * There is a race condition in socal where while doing
943 		 * callbacks if a ULP removes it self from the callback list
944 		 * the for loop in socal may panic as cblist is junk and
945 		 * while trying to get cblist->next the system will panic.
946 		 */
947 
948 		/* call transport to remove our unregister our callbacks */
949 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
950 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
951 
952 		/*
953 		 * begin process of clearing outstanding commands
954 		 * by issuing a lip
955 		 */
956 		sf_force_lip(sf);
957 
958 		/*
959 		 * toggle the device OFFLINE in order to cause
960 		 * outstanding commands to drain
961 		 */
962 		mutex_enter(&sf->sf_mutex);
963 		sf->sf_lip_cnt++;
964 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
965 		sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
966 		for (i = 0; i < sf_max_targets; i++) {
967 			target = sf->sf_targets[i];
968 			if (target != NULL) {
969 				struct sf_target *ntarget;
970 
971 				mutex_enter(&target->sft_mutex);
972 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
973 					target->sft_state |=
974 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
975 				}
976 				/* do this for all LUNs as well */
977 				for (ntarget = target->sft_next_lun;
978 				    ntarget;
979 				    ntarget = ntarget->sft_next_lun) {
980 					mutex_enter(&ntarget->sft_mutex);
981 					if (!(ntarget->sft_state &
982 					    SF_TARGET_OFFLINE)) {
983 						ntarget->sft_state |=
984 						    (SF_TARGET_BUSY |
985 						    SF_TARGET_MARK);
986 					}
987 					mutex_exit(&ntarget->sft_mutex);
988 				}
989 				mutex_exit(&target->sft_mutex);
990 			}
991 		}
992 		mutex_exit(&sf->sf_mutex);
993 		mutex_enter(&sf_global_mutex);
994 
995 		/*
996 		 * kill off the watchdog if we are the last instance
997 		 */
998 		if (!--sf_watchdog_init) {
999 			tid = sf_watchdog_id;
1000 			mutex_exit(&sf_global_mutex);
1001 			(void) untimeout(tid);
1002 		} else {
1003 			mutex_exit(&sf_global_mutex);
1004 		}
1005 
1006 		return (DDI_SUCCESS);
1007 
1008 	case DDI_DETACH:
1009 		/*
1010 		 * detach this instance
1011 		 */
1012 
1013 		SF_DEBUG(2, (sf, CE_CONT,
1014 		    "sf_detach: DDI_DETACH for sf%d\n", instance));
1015 
1016 		/* remove this "sf" from the list of sf softstates */
1017 		sf_softstate_unlink(sf);
1018 
1019 		/*
1020 		 * prior to taking any DDI_DETACH actions, toggle the
1021 		 * device OFFLINE in order to cause outstanding
1022 		 * commands to drain
1023 		 */
1024 		mutex_enter(&sf->sf_mutex);
1025 		sf->sf_lip_cnt++;
1026 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1027 		sf->sf_state = SF_STATE_OFFLINE;
1028 		for (i = 0; i < sf_max_targets; i++) {
1029 			target = sf->sf_targets[i];
1030 			if (target != NULL) {
1031 				struct sf_target *ntarget;
1032 
1033 				mutex_enter(&target->sft_mutex);
1034 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1035 					target->sft_state |=
1036 					    (SF_TARGET_BUSY | SF_TARGET_MARK);
1037 				}
1038 				for (ntarget = target->sft_next_lun;
1039 				    ntarget;
1040 				    ntarget = ntarget->sft_next_lun) {
1041 					mutex_enter(&ntarget->sft_mutex);
1042 					if (!(ntarget->sft_state &
1043 					    SF_TARGET_OFFLINE)) {
1044 						ntarget->sft_state |=
1045 						    (SF_TARGET_BUSY |
1046 						    SF_TARGET_MARK);
1047 					}
1048 					mutex_exit(&ntarget->sft_mutex);
1049 				}
1050 				mutex_exit(&target->sft_mutex);
1051 			}
1052 		}
1053 		mutex_exit(&sf->sf_mutex);
1054 
1055 		/* call transport to remove and unregister our callbacks */
1056 		soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1057 		    sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1058 
1059 		/*
1060 		 * kill off the watchdog if we are the last instance
1061 		 */
1062 		mutex_enter(&sf_global_mutex);
1063 		if (!--sf_watchdog_init) {
1064 			tid = sf_watchdog_id;
1065 			mutex_exit(&sf_global_mutex);
1066 			(void) untimeout(tid);
1067 		} else {
1068 			mutex_exit(&sf_global_mutex);
1069 		}
1070 
1071 		/* signal sf_hp_daemon() to exit and wait for exit */
1072 		mutex_enter(&sf->sf_hp_daemon_mutex);
1073 		ASSERT(sf->sf_hp_tid);
1074 		sf->sf_hp_exit = 1;		/* flag exit */
1075 		cv_signal(&sf->sf_hp_daemon_cv);
1076 		mutex_exit(&sf->sf_hp_daemon_mutex);
1077 		thread_join(sf->sf_hp_tid);	/* wait for hotplug to exit */
1078 
1079 		/*
1080 		 * Unbind and free event set
1081 		 */
1082 		if (sf->sf_event_hdl) {
1083 			(void) ndi_event_unbind_set(sf->sf_event_hdl,
1084 			    &sf->sf_events, NDI_SLEEP);
1085 			(void) ndi_event_free_hdl(sf->sf_event_hdl);
1086 		}
1087 
1088 		if (sf->sf_event_defs) {
1089 			kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1090 		}
1091 
1092 		/* detach this instance of the HBA driver */
1093 		(void) scsi_hba_detach(dip);
1094 		scsi_hba_tran_free(sf->sf_tran);
1095 
1096 		/* deallocate/unbind DMA handle for lilp map */
1097 		if (sf->sf_lilp_map != NULL) {
1098 			(void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1099 			if (sf->sf_lilp_dmahandle != NULL) {
1100 				ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1101 			}
1102 			ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1103 		}
1104 
1105 		/*
1106 		 * the kmem cache must be destroyed before free'ing
1107 		 * up the crpools
1108 		 *
1109 		 * our finagle of "ntot" and "nfree"
1110 		 * causes an ASSERT failure in "sf_cr_free()"
1111 		 * if the kmem cache is free'd after invoking
1112 		 * "sf_crpool_free()".
1113 		 */
1114 		kmem_cache_destroy(sf->sf_pkt_cache);
1115 
1116 		SF_DEBUG(2, (sf, CE_CONT,
1117 		    "sf_detach: sf_crpool_free() for instance 0x%x\n",
1118 		    instance));
1119 		while (sf->sf_cr_pool != NULL) {
1120 			/*
1121 			 * set ntot to nfree for this particular entry
1122 			 *
1123 			 * this causes sf_crpool_free() to update
1124 			 * the cr_pool list when deallocating this entry
1125 			 */
1126 			sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1127 			sf_crpool_free(sf);
1128 		}
1129 
1130 		/*
1131 		 * now that the cr_pool's are gone it's safe
1132 		 * to destroy all softstate mutex's and cv's
1133 		 */
1134 		mutex_destroy(&sf->sf_mutex);
1135 		mutex_destroy(&sf->sf_cmd_mutex);
1136 		mutex_destroy(&sf->sf_cr_mutex);
1137 		mutex_destroy(&sf->sf_hp_daemon_mutex);
1138 		cv_destroy(&sf->sf_cr_cv);
1139 		cv_destroy(&sf->sf_hp_daemon_cv);
1140 
1141 		/* remove all minor nodes from the device tree */
1142 		ddi_remove_minor_node(dip, NULL);
1143 
1144 		/* remove properties created during attach() */
1145 		ddi_prop_remove_all(dip);
1146 
1147 		/* remove kstat's if present */
1148 		if (sf->sf_ksp != NULL) {
1149 			kstat_delete(sf->sf_ksp);
1150 		}
1151 
1152 		SF_DEBUG(2, (sf, CE_CONT,
1153 		    "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1154 		    instance));
1155 		ddi_soft_state_free(sf_state, instance);
1156 		return (DDI_SUCCESS);
1157 
1158 	default:
1159 		SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1160 		    instance, (int)cmd));
1161 		return (DDI_FAILURE);
1162 	}
1163 }
1164 
1165 
1166 /*
1167  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1168  */
1169 static void
1170 sf_softstate_unlink(struct sf *sf)
1171 {
1172 	struct sf	*sf_ptr;
1173 	struct sf	*sf_found_sibling;
1174 	struct sf	*sf_reposition = NULL;
1175 
1176 
1177 	mutex_enter(&sf_global_mutex);
1178 	while (sf_watch_running) {
1179 		/* Busy working the list -- wait */
1180 		cv_wait(&sf_watch_cv, &sf_global_mutex);
1181 	}
1182 	if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1183 		/*
1184 		 * we have a sibling so NULL out its reference to us
1185 		 */
1186 		mutex_enter(&sf_found_sibling->sf_mutex);
1187 		sf_found_sibling->sf_sibling = NULL;
1188 		mutex_exit(&sf_found_sibling->sf_mutex);
1189 	}
1190 
1191 	/* remove our instance from the global list */
1192 	if (sf == sf_head) {
1193 		/* we were at at head of the list */
1194 		sf_head = sf->sf_next;
1195 	} else {
1196 		/* find us in the list */
1197 		for (sf_ptr = sf_head;
1198 		    sf_ptr != NULL;
1199 		    sf_ptr = sf_ptr->sf_next) {
1200 			if (sf_ptr == sf) {
1201 				break;
1202 			}
1203 			/* remember this place */
1204 			sf_reposition = sf_ptr;
1205 		}
1206 		ASSERT(sf_ptr == sf);
1207 		ASSERT(sf_reposition != NULL);
1208 
1209 		sf_reposition->sf_next = sf_ptr->sf_next;
1210 	}
1211 	mutex_exit(&sf_global_mutex);
1212 }
1213 
1214 
1215 static int
1216 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1217     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1218 {
1219 	int64_t		reset_delay;
1220 	struct sf	*sf;
1221 
1222 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1223 	ASSERT(sf);
1224 
1225 	reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1226 	    (ddi_get_lbolt64() - sf->sf_reset_time);
1227 	if (reset_delay < 0)
1228 		reset_delay = 0;
1229 
1230 	if (sf_bus_config_debug)
1231 		flag |= NDI_DEVI_DEBUG;
1232 
1233 	return (ndi_busop_bus_config(parent, flag, op,
1234 	    arg, childp, (clock_t)reset_delay));
1235 }
1236 
1237 static int
1238 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1239     ddi_bus_config_op_t op, void *arg)
1240 {
1241 	if (sf_bus_config_debug)
1242 		flag |= NDI_DEVI_DEBUG;
1243 
1244 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1245 }
1246 
1247 
1248 /*
1249  * called by transport to initialize a SCSI target
1250  */
1251 /* ARGSUSED */
1252 static int
1253 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1254     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1255 {
1256 #ifdef RAID_LUNS
1257 	int lun;
1258 #else
1259 	int64_t lun;
1260 #endif
1261 	struct sf_target *target;
1262 	struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1263 	int i, t_len;
1264 	unsigned int lip_cnt;
1265 	unsigned char wwn[FC_WWN_SIZE];
1266 
1267 
1268 	/* get and validate our SCSI target ID */
1269 	i = sd->sd_address.a_target;
1270 	if (i >= sf_max_targets) {
1271 		return (DDI_NOT_WELL_FORMED);
1272 	}
1273 
1274 	/* get our port WWN property */
1275 	t_len = sizeof (wwn);
1276 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1277 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1278 	    (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1279 		/* no port WWN property - ignore the OBP stub node */
1280 		return (DDI_NOT_WELL_FORMED);
1281 	}
1282 
1283 	/* get our LIP count property */
1284 	t_len = sizeof (lip_cnt);
1285 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1286 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1287 	    (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1288 		return (DDI_FAILURE);
1289 	}
1290 	/* and our LUN property */
1291 	t_len = sizeof (lun);
1292 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1293 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1294 	    (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1295 		return (DDI_FAILURE);
1296 	}
1297 
1298 	/* find the target structure for this instance */
1299 	mutex_enter(&sf->sf_mutex);
1300 	if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1301 		mutex_exit(&sf->sf_mutex);
1302 		return (DDI_FAILURE);
1303 	}
1304 
1305 	mutex_enter(&target->sft_mutex);
1306 	if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1307 	    & SF_TARGET_INIT_DONE)) {
1308 		/*
1309 		 * set links between HBA transport and target structures
1310 		 * and set done flag
1311 		 */
1312 		hba_tran->tran_tgt_private = target;
1313 		target->sft_tran = hba_tran;
1314 		target->sft_state |= SF_TARGET_INIT_DONE;
1315 	} else {
1316 		/* already initialized ?? */
1317 		mutex_exit(&target->sft_mutex);
1318 		mutex_exit(&sf->sf_mutex);
1319 		return (DDI_FAILURE);
1320 	}
1321 	mutex_exit(&target->sft_mutex);
1322 	mutex_exit(&sf->sf_mutex);
1323 
1324 	return (DDI_SUCCESS);
1325 }
1326 
1327 
1328 /*
1329  * called by transport to free a target
1330  */
1331 /* ARGSUSED */
1332 static void
1333 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1334     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1335 {
1336 	struct sf_target *target = hba_tran->tran_tgt_private;
1337 
1338 	if (target != NULL) {
1339 		mutex_enter(&target->sft_mutex);
1340 		target->sft_tran = NULL;
1341 		target->sft_state &= ~SF_TARGET_INIT_DONE;
1342 		mutex_exit(&target->sft_mutex);
1343 	}
1344 }
1345 
1346 
1347 /*
1348  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1349  * success, else return FALSE
1350  */
1351 /*ARGSUSED*/
1352 static int
1353 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1354     int tgtlen, int statuslen, int kf)
1355 {
1356 	caddr_t scbp, tgt;
1357 	int failure = FALSE;
1358 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1359 
1360 
1361 	tgt = scbp = NULL;
1362 
1363 	if (tgtlen > PKT_PRIV_LEN) {
1364 		if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1365 			failure = TRUE;
1366 		} else {
1367 			cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1368 			pkt->pkt_private = tgt;
1369 		}
1370 	}
1371 	if (statuslen > EXTCMDS_STATUS_SIZE) {
1372 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1373 			failure = TRUE;
1374 		} else {
1375 			cmd->cmd_flags |= CFLAG_SCBEXTERN;
1376 			pkt->pkt_scbp = (opaque_t)scbp;
1377 		}
1378 	}
1379 	if (failure) {
1380 		sf_pkt_destroy_extern(sf, cmd);
1381 	}
1382 	return (failure);
1383 }
1384 
1385 
1386 /*
1387  * deallocator for non-std size cdb/pkt_private/status
1388  */
1389 static void
1390 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1391 {
1392 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1393 
1394 	if (cmd->cmd_flags & CFLAG_FREE) {
1395 		cmn_err(CE_PANIC,
1396 		    "sf_scsi_impl_pktfree: freeing free packet");
1397 		_NOTE(NOT_REACHED)
1398 		/* NOTREACHED */
1399 	}
1400 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1401 		kmem_free((caddr_t)pkt->pkt_scbp,
1402 		    (size_t)cmd->cmd_scblen);
1403 	}
1404 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1405 		kmem_free((caddr_t)pkt->pkt_private,
1406 		    (size_t)cmd->cmd_privlen);
1407 	}
1408 
1409 	cmd->cmd_flags = CFLAG_FREE;
1410 	kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1411 }
1412 
1413 
1414 /*
1415  * create or initialize a SCSI packet -- called internally and
1416  * by the transport
1417  */
1418 static struct scsi_pkt *
1419 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1420     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1421     int flags, int (*callback)(), caddr_t arg)
1422 {
1423 	int kf;
1424 	int failure = FALSE;
1425 	struct sf_pkt *cmd;
1426 	struct sf *sf = ADDR2SF(ap);
1427 	struct sf_target *target = ADDR2TARGET(ap);
1428 	struct sf_pkt	*new_cmd = NULL;
1429 	struct fcal_packet	*fpkt;
1430 	fc_frame_header_t	*hp;
1431 	struct fcp_cmd *fcmd;
1432 
1433 
1434 	/*
1435 	 * If we've already allocated a pkt once,
1436 	 * this request is for dma allocation only.
1437 	 */
1438 	if (pkt == NULL) {
1439 
1440 		/*
1441 		 * First step of sf_scsi_init_pkt:  pkt allocation
1442 		 */
1443 		if (cmdlen > FCP_CDB_SIZE) {
1444 			return (NULL);
1445 		}
1446 
1447 		kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1448 
1449 		if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1450 			/*
1451 			 * Selective zeroing of the pkt.
1452 			 */
1453 
1454 			cmd->cmd_flags = 0;
1455 			cmd->cmd_forw = 0;
1456 			cmd->cmd_back = 0;
1457 			cmd->cmd_next = 0;
1458 			cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1459 			    sizeof (struct sf_pkt) + sizeof (struct
1460 			    fcal_packet));
1461 			cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1462 			    sizeof (struct sf_pkt));
1463 			cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1464 			cmd->cmd_state = SF_STATE_IDLE;
1465 			cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1466 			cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1467 			cmd->cmd_pkt->pkt_comp	= NULL;
1468 			cmd->cmd_pkt->pkt_flags	= 0;
1469 			cmd->cmd_pkt->pkt_time	= 0;
1470 			cmd->cmd_pkt->pkt_resid	= 0;
1471 			cmd->cmd_pkt->pkt_reason = 0;
1472 			cmd->cmd_cdblen = (uchar_t)cmdlen;
1473 			cmd->cmd_scblen		= statuslen;
1474 			cmd->cmd_privlen	= tgtlen;
1475 			cmd->cmd_pkt->pkt_address = *ap;
1476 
1477 			/* zero pkt_private */
1478 			(int *)(cmd->cmd_pkt->pkt_private =
1479 			    cmd->cmd_pkt_private);
1480 			bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1481 			    PKT_PRIV_LEN);
1482 		} else {
1483 			failure = TRUE;
1484 		}
1485 
1486 		if (failure ||
1487 		    (tgtlen > PKT_PRIV_LEN) ||
1488 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
1489 			if (!failure) {
1490 				/* need to allocate more space */
1491 				failure = sf_pkt_alloc_extern(sf, cmd,
1492 				    tgtlen, statuslen, kf);
1493 			}
1494 			if (failure) {
1495 				return (NULL);
1496 			}
1497 		}
1498 
1499 		fpkt = cmd->cmd_fp_pkt;
1500 		if (cmd->cmd_block == NULL) {
1501 
1502 			/* allocate cmd/response pool buffers */
1503 			if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1504 				sf_pkt_destroy_extern(sf, cmd);
1505 				return (NULL);
1506 			}
1507 
1508 			/* fill in the FC-AL packet */
1509 			fpkt->fcal_pkt_cookie = sf->sf_socp;
1510 			fpkt->fcal_pkt_comp = sf_cmd_callback;
1511 			fpkt->fcal_pkt_flags = 0;
1512 			fpkt->fcal_magic = FCALP_MAGIC;
1513 			fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1514 			    (ushort_t)(SOC_FC_HEADER |
1515 			    sf->sf_sochandle->fcal_portno);
1516 			fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1517 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1518 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1519 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1520 			fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1521 			    (uint32_t)cmd->cmd_dmac;
1522 			fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1523 			    sizeof (struct fcp_cmd);
1524 			fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1525 			    (uint32_t)cmd->cmd_rsp_dmac;
1526 			fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1527 			    FCP_MAX_RSP_IU_SIZE;
1528 
1529 			/* Fill in the Fabric Channel Header */
1530 			hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1531 			hp->r_ctl = R_CTL_COMMAND;
1532 			hp->type = TYPE_SCSI_FCP;
1533 			hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1534 			hp->reserved1 = 0;
1535 			hp->seq_id = 0;
1536 			hp->df_ctl  = 0;
1537 			hp->seq_cnt = 0;
1538 			hp->ox_id = 0xffff;
1539 			hp->rx_id = 0xffff;
1540 			hp->ro = 0;
1541 
1542 			/* Establish the LUN */
1543 			bcopy((caddr_t)&target->sft_lun.b,
1544 			    (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1545 			    FCP_LUN_SIZE);
1546 			*((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1547 		}
1548 		cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1549 
1550 		mutex_enter(&target->sft_pkt_mutex);
1551 
1552 		target->sft_pkt_tail->cmd_forw = cmd;
1553 		cmd->cmd_back = target->sft_pkt_tail;
1554 		cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1555 		target->sft_pkt_tail = cmd;
1556 
1557 		mutex_exit(&target->sft_pkt_mutex);
1558 		new_cmd = cmd;		/* for later cleanup if needed */
1559 	} else {
1560 		/* pkt already exists -- just a request for DMA allocation */
1561 		cmd = PKT2CMD(pkt);
1562 		fpkt = cmd->cmd_fp_pkt;
1563 	}
1564 
1565 	/* zero cdb (bzero is too slow) */
1566 	bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1567 
1568 	/*
1569 	 * Second step of sf_scsi_init_pkt:  dma allocation
1570 	 * Set up dma info
1571 	 */
1572 	if ((bp != NULL) && (bp->b_bcount != 0)) {
1573 		int cmd_flags, dma_flags;
1574 		int rval = 0;
1575 		uint_t dmacookie_count;
1576 
1577 		/* there is a buffer and some data to transfer */
1578 
1579 		/* set up command and DMA flags */
1580 		cmd_flags = cmd->cmd_flags;
1581 		if (bp->b_flags & B_READ) {
1582 			/* a read */
1583 			cmd_flags &= ~CFLAG_DMASEND;
1584 			dma_flags = DDI_DMA_READ;
1585 		} else {
1586 			/* a write */
1587 			cmd_flags |= CFLAG_DMASEND;
1588 			dma_flags = DDI_DMA_WRITE;
1589 		}
1590 		if (flags & PKT_CONSISTENT) {
1591 			cmd_flags |= CFLAG_CMDIOPB;
1592 			dma_flags |= DDI_DMA_CONSISTENT;
1593 		}
1594 
1595 		/* ensure we have a DMA handle */
1596 		if (cmd->cmd_dmahandle == NULL) {
1597 			rval = ddi_dma_alloc_handle(sf->sf_dip,
1598 			    sf->sf_sochandle->fcal_dmaattr, callback, arg,
1599 			    &cmd->cmd_dmahandle);
1600 		}
1601 
1602 		if (rval == 0) {
1603 			/* bind our DMA handle to our buffer */
1604 			rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1605 			    dma_flags, callback, arg, &cmd->cmd_dmacookie,
1606 			    &dmacookie_count);
1607 		}
1608 
1609 		if (rval != 0) {
1610 			/* DMA failure */
1611 			SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1612 			switch (rval) {
1613 			case DDI_DMA_NORESOURCES:
1614 				bioerror(bp, 0);
1615 				break;
1616 			case DDI_DMA_BADATTR:
1617 			case DDI_DMA_NOMAPPING:
1618 				bioerror(bp, EFAULT);
1619 				break;
1620 			case DDI_DMA_TOOBIG:
1621 			default:
1622 				bioerror(bp, EINVAL);
1623 				break;
1624 			}
1625 			/* clear valid flag */
1626 			cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1627 			if (new_cmd != NULL) {
1628 				/* destroy packet if we just created it */
1629 				sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1630 			}
1631 			return (NULL);
1632 		}
1633 
1634 		ASSERT(dmacookie_count == 1);
1635 		/* set up amt to transfer and set valid flag */
1636 		cmd->cmd_dmacount = bp->b_bcount;
1637 		cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1638 
1639 		ASSERT(cmd->cmd_dmahandle != NULL);
1640 	}
1641 
1642 	/* set up FC-AL packet */
1643 	fcmd = cmd->cmd_block;
1644 
1645 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1646 		if (cmd->cmd_flags & CFLAG_DMASEND) {
1647 			/* DMA write */
1648 			fcmd->fcp_cntl.cntl_read_data = 0;
1649 			fcmd->fcp_cntl.cntl_write_data = 1;
1650 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1651 			    CQ_TYPE_IO_WRITE;
1652 		} else {
1653 			/* DMA read */
1654 			fcmd->fcp_cntl.cntl_read_data = 1;
1655 			fcmd->fcp_cntl.cntl_write_data = 0;
1656 			fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1657 			    CQ_TYPE_IO_READ;
1658 		}
1659 		fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1660 		    (uint32_t)cmd->cmd_dmacookie.dmac_address;
1661 		fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1662 		    cmd->cmd_dmacookie.dmac_size;
1663 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1664 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1665 		    cmd->cmd_dmacookie.dmac_size;
1666 		fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1667 	} else {
1668 		/* not a read or write */
1669 		fcmd->fcp_cntl.cntl_read_data = 0;
1670 		fcmd->fcp_cntl.cntl_write_data = 0;
1671 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1672 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1673 		fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1674 		    sizeof (struct fcp_cmd);
1675 		fcmd->fcp_data_len = 0;
1676 	}
1677 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1678 
1679 	return (cmd->cmd_pkt);
1680 }
1681 
1682 
1683 /*
1684  * destroy a SCSI packet -- called internally and by the transport
1685  */
1686 static void
1687 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1688 {
1689 	struct sf_pkt *cmd = PKT2CMD(pkt);
1690 	struct sf *sf = ADDR2SF(ap);
1691 	struct sf_target *target = ADDR2TARGET(ap);
1692 	struct fcal_packet	*fpkt = cmd->cmd_fp_pkt;
1693 
1694 
1695 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1696 		/* DMA was set up -- clean up */
1697 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1698 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1699 	}
1700 
1701 	/* take this packet off the doubly-linked list */
1702 	mutex_enter(&target->sft_pkt_mutex);
1703 	cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1704 	cmd->cmd_forw->cmd_back = cmd->cmd_back;
1705 	mutex_exit(&target->sft_pkt_mutex);
1706 
1707 	fpkt->fcal_pkt_flags = 0;
1708 	/* free the packet */
1709 	if ((cmd->cmd_flags &
1710 	    (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1711 		/* just a regular packet */
1712 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1713 		cmd->cmd_flags = CFLAG_FREE;
1714 		kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1715 	} else {
1716 		/* a packet with extra memory */
1717 		sf_pkt_destroy_extern(sf, cmd);
1718 	}
1719 }
1720 
1721 
1722 /*
1723  * called by transport to unbind DMA handle
1724  */
1725 /* ARGSUSED */
1726 static void
1727 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1728 {
1729 	struct sf_pkt *cmd = PKT2CMD(pkt);
1730 
1731 
1732 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1733 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1734 		cmd->cmd_flags ^= CFLAG_DMAVALID;
1735 	}
1736 
1737 }
1738 
1739 
1740 /*
1741  * called by transport to synchronize CPU and I/O views of memory
1742  */
1743 /* ARGSUSED */
1744 static void
1745 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1746 {
1747 	struct sf_pkt *cmd = PKT2CMD(pkt);
1748 
1749 
1750 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
1751 		if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1752 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
1753 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1754 		    DDI_SUCCESS) {
1755 			cmn_err(CE_WARN, "sf: sync pkt failed");
1756 		}
1757 	}
1758 }
1759 
1760 
1761 /*
1762  * routine for reset notification setup, to register or cancel. -- called
1763  * by transport
1764  */
1765 static int
1766 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1767     void (*callback)(caddr_t), caddr_t arg)
1768 {
1769 	struct sf	*sf = ADDR2SF(ap);
1770 
1771 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1772 	    &sf->sf_mutex, &sf->sf_reset_notify_listf));
1773 }
1774 
1775 
1776 /*
1777  * called by transport to get port WWN property (except sun4u)
1778  */
1779 /* ARGSUSED */
1780 static int
1781 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1782 {
1783 	char tbuf[(FC_WWN_SIZE*2)+1];
1784 	unsigned char wwn[FC_WWN_SIZE];
1785 	int i, lun;
1786 	dev_info_t *tgt_dip;
1787 
1788 	tgt_dip = sd->sd_dev;
1789 	i = sizeof (wwn);
1790 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1791 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1792 	    (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1793 		name[0] = '\0';
1794 		return (0);
1795 	}
1796 	i = sizeof (lun);
1797 	if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1798 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1799 	    (caddr_t)&lun, &i) != DDI_SUCCESS) {
1800 		name[0] = '\0';
1801 		return (0);
1802 	}
1803 	for (i = 0; i < FC_WWN_SIZE; i++)
1804 		(void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1805 	(void) sprintf(name, "w%s,%x", tbuf, lun);
1806 	return (1);
1807 }
1808 
1809 
1810 /*
1811  * called by transport to get target soft AL-PA (except sun4u)
1812  */
1813 /* ARGSUSED */
1814 static int
1815 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1816 {
1817 	struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1818 
1819 	if (target == NULL)
1820 		return (0);
1821 
1822 	(void) sprintf(name, "%x", target->sft_al_pa);
1823 	return (1);
1824 }
1825 
1826 
1827 /*
1828  * add to the command/response buffer pool for this sf instance
1829  */
1830 static int
1831 sf_add_cr_pool(struct sf *sf)
1832 {
1833 	int		cmd_buf_size;
1834 	size_t		real_cmd_buf_size;
1835 	int		rsp_buf_size;
1836 	size_t		real_rsp_buf_size;
1837 	uint_t		i, ccount;
1838 	struct sf_cr_pool	*ptr;
1839 	struct sf_cr_free_elem *cptr;
1840 	caddr_t	dptr, eptr;
1841 	ddi_dma_cookie_t	cmd_cookie;
1842 	ddi_dma_cookie_t	rsp_cookie;
1843 	int		cmd_bound = FALSE, rsp_bound = FALSE;
1844 
1845 
1846 	/* allocate room for the pool */
1847 	if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1848 	    NULL) {
1849 		return (DDI_FAILURE);
1850 	}
1851 
1852 	/* allocate a DMA handle for the command pool */
1853 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1854 	    DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1855 		goto fail;
1856 	}
1857 
1858 	/*
1859 	 * Get a piece of memory in which to put commands
1860 	 */
1861 	cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1862 	if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1863 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1864 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1865 	    &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1866 		goto fail;
1867 	}
1868 
1869 	/* bind the DMA handle to an address */
1870 	if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1871 	    ptr->cmd_base, real_cmd_buf_size,
1872 	    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1873 	    NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1874 		goto fail;
1875 	}
1876 	cmd_bound = TRUE;
1877 	/* ensure only one cookie was allocated */
1878 	if (ccount != 1) {
1879 		goto fail;
1880 	}
1881 
1882 	/* allocate a DMA handle for the response pool */
1883 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1884 	    DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1885 		goto fail;
1886 	}
1887 
1888 	/*
1889 	 * Get a piece of memory in which to put responses
1890 	 */
1891 	rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1892 	if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1893 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1894 	    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1895 	    &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1896 		goto fail;
1897 	}
1898 
1899 	/* bind the DMA handle to an address */
1900 	if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1901 	    ptr->rsp_base, real_rsp_buf_size,
1902 	    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1903 	    NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1904 		goto fail;
1905 	}
1906 	rsp_bound = TRUE;
1907 	/* ensure only one cookie was allocated */
1908 	if (ccount != 1) {
1909 		goto fail;
1910 	}
1911 
1912 	/*
1913 	 * Generate a (cmd/rsp structure) free list
1914 	 */
1915 	/* ensure ptr points to start of long word (8-byte block) */
1916 	dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1917 	/* keep track of actual size after moving pointer */
1918 	real_cmd_buf_size -= (dptr - ptr->cmd_base);
1919 	eptr = ptr->rsp_base;
1920 
1921 	/* set actual total number of entries */
1922 	ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1923 	    (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1924 	ptr->nfree = ptr->ntot;
1925 	ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1926 	ptr->sf = sf;
1927 
1928 	/* set up DMA for each pair of entries */
1929 	i = 0;
1930 	while (i < ptr->ntot) {
1931 		cptr = (struct sf_cr_free_elem *)dptr;
1932 		dptr += sizeof (struct fcp_cmd);
1933 
1934 		cptr->next = (struct sf_cr_free_elem *)dptr;
1935 		cptr->rsp = eptr;
1936 
1937 		cptr->cmd_dmac = cmd_cookie.dmac_address +
1938 		    (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1939 
1940 		cptr->rsp_dmac = rsp_cookie.dmac_address +
1941 		    (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1942 
1943 		eptr += FCP_MAX_RSP_IU_SIZE;
1944 		i++;
1945 	}
1946 
1947 	/* terminate the list */
1948 	cptr->next = NULL;
1949 
1950 	/* add this list at front of current one */
1951 	mutex_enter(&sf->sf_cr_mutex);
1952 	ptr->next = sf->sf_cr_pool;
1953 	sf->sf_cr_pool = ptr;
1954 	sf->sf_cr_pool_cnt++;
1955 	mutex_exit(&sf->sf_cr_mutex);
1956 
1957 	return (DDI_SUCCESS);
1958 
1959 fail:
1960 	/* we failed so clean up */
1961 	if (ptr->cmd_dma_handle != NULL) {
1962 		if (cmd_bound) {
1963 			(void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1964 		}
1965 		ddi_dma_free_handle(&ptr->cmd_dma_handle);
1966 	}
1967 
1968 	if (ptr->rsp_dma_handle != NULL) {
1969 		if (rsp_bound) {
1970 			(void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1971 		}
1972 		ddi_dma_free_handle(&ptr->rsp_dma_handle);
1973 	}
1974 
1975 	if (ptr->cmd_base != NULL) {
1976 		ddi_dma_mem_free(&ptr->cmd_acc_handle);
1977 	}
1978 
1979 	if (ptr->rsp_base != NULL) {
1980 		ddi_dma_mem_free(&ptr->rsp_acc_handle);
1981 	}
1982 
1983 	kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1984 	return (DDI_FAILURE);
1985 }
1986 
1987 
1988 /*
1989  * allocate a command/response buffer from the pool, allocating more
1990  * in the pool as needed
1991  */
1992 static int
1993 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1994 {
1995 	struct sf_cr_pool *ptr;
1996 	struct sf_cr_free_elem *cptr;
1997 
1998 
1999 	mutex_enter(&sf->sf_cr_mutex);
2000 
2001 try_again:
2002 
2003 	/* find a free buffer in the existing pool */
2004 	ptr = sf->sf_cr_pool;
2005 	while (ptr != NULL) {
2006 		if (ptr->nfree != 0) {
2007 			ptr->nfree--;
2008 			break;
2009 		} else {
2010 			ptr = ptr->next;
2011 		}
2012 	}
2013 
2014 	/* did we find a free buffer ? */
2015 	if (ptr != NULL) {
2016 		/* we found a free buffer -- take it off the free list */
2017 		cptr = ptr->free;
2018 		ptr->free = cptr->next;
2019 		mutex_exit(&sf->sf_cr_mutex);
2020 		/* set up the command to use the buffer pair */
2021 		cmd->cmd_block = (struct fcp_cmd *)cptr;
2022 		cmd->cmd_dmac = cptr->cmd_dmac;
2023 		cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2024 		cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2025 		cmd->cmd_cr_pool = ptr;
2026 		return (DDI_SUCCESS);		/* success */
2027 	}
2028 
2029 	/* no free buffer available -- can we allocate more ? */
2030 	if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2031 		/* we need to allocate more buffer pairs */
2032 		if (sf->sf_cr_flag) {
2033 			/* somebody already allocating for this instance */
2034 			if (func == SLEEP_FUNC) {
2035 				/* user wants to wait */
2036 				cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2037 				/* we've been woken so go try again */
2038 				goto try_again;
2039 			}
2040 			/* user does not want to wait */
2041 			mutex_exit(&sf->sf_cr_mutex);
2042 			sf->sf_stats.cralloc_failures++;
2043 			return (DDI_FAILURE);	/* give up */
2044 		}
2045 		/* set flag saying we're allocating */
2046 		sf->sf_cr_flag = 1;
2047 		mutex_exit(&sf->sf_cr_mutex);
2048 		/* add to our pool */
2049 		if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2050 			/* couldn't add to our pool for some reason */
2051 			mutex_enter(&sf->sf_cr_mutex);
2052 			sf->sf_cr_flag = 0;
2053 			cv_broadcast(&sf->sf_cr_cv);
2054 			mutex_exit(&sf->sf_cr_mutex);
2055 			sf->sf_stats.cralloc_failures++;
2056 			return (DDI_FAILURE);	/* give up */
2057 		}
2058 		/*
2059 		 * clear flag saying we're allocating and tell all other
2060 		 * that care
2061 		 */
2062 		mutex_enter(&sf->sf_cr_mutex);
2063 		sf->sf_cr_flag = 0;
2064 		cv_broadcast(&sf->sf_cr_cv);
2065 		/* now that we have more buffers try again */
2066 		goto try_again;
2067 	}
2068 
2069 	/* we don't have room to allocate any more buffers */
2070 	mutex_exit(&sf->sf_cr_mutex);
2071 	sf->sf_stats.cralloc_failures++;
2072 	return (DDI_FAILURE);			/* give up */
2073 }
2074 
2075 
2076 /*
2077  * free a cmd/response buffer pair in our pool
2078  */
2079 static void
2080 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2081 {
2082 	struct sf *sf = cp->sf;
2083 	struct sf_cr_free_elem *elem;
2084 
2085 	elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2086 	elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2087 	elem->cmd_dmac = cmd->cmd_dmac;
2088 	elem->rsp_dmac = cmd->cmd_rsp_dmac;
2089 
2090 	mutex_enter(&sf->sf_cr_mutex);
2091 	cp->nfree++;
2092 	ASSERT(cp->nfree <= cp->ntot);
2093 
2094 	elem->next = cp->free;
2095 	cp->free = elem;
2096 	mutex_exit(&sf->sf_cr_mutex);
2097 }
2098 
2099 
2100 /*
2101  * free our pool of cmd/response buffers
2102  */
2103 static void
2104 sf_crpool_free(struct sf *sf)
2105 {
2106 	struct sf_cr_pool *cp, *prev;
2107 
2108 	prev = NULL;
2109 	mutex_enter(&sf->sf_cr_mutex);
2110 	cp = sf->sf_cr_pool;
2111 	while (cp != NULL) {
2112 		if (cp->nfree == cp->ntot) {
2113 			if (prev != NULL) {
2114 				prev->next = cp->next;
2115 			} else {
2116 				sf->sf_cr_pool = cp->next;
2117 			}
2118 			sf->sf_cr_pool_cnt--;
2119 			mutex_exit(&sf->sf_cr_mutex);
2120 
2121 			(void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2122 			ddi_dma_free_handle(&cp->cmd_dma_handle);
2123 			(void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2124 			ddi_dma_free_handle(&cp->rsp_dma_handle);
2125 			ddi_dma_mem_free(&cp->cmd_acc_handle);
2126 			ddi_dma_mem_free(&cp->rsp_acc_handle);
2127 			kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2128 			return;
2129 		}
2130 		prev = cp;
2131 		cp = cp->next;
2132 	}
2133 	mutex_exit(&sf->sf_cr_mutex);
2134 }
2135 
2136 
2137 /* ARGSUSED */
2138 static int
2139 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2140 {
2141 	struct sf_pkt *cmd = buf;
2142 
2143 	mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2144 	cmd->cmd_block = NULL;
2145 	cmd->cmd_dmahandle = NULL;
2146 	return (0);
2147 }
2148 
2149 
2150 /* ARGSUSED */
2151 static void
2152 sf_kmem_cache_destructor(void *buf, void *size)
2153 {
2154 	struct sf_pkt *cmd = buf;
2155 
2156 	if (cmd->cmd_dmahandle != NULL) {
2157 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
2158 	}
2159 
2160 	if (cmd->cmd_block != NULL) {
2161 		sf_cr_free(cmd->cmd_cr_pool, cmd);
2162 	}
2163 	mutex_destroy(&cmd->cmd_abort_mutex);
2164 }
2165 
2166 
2167 /*
2168  * called by transport when a state change occurs
2169  */
2170 static void
2171 sf_statec_callback(void *arg, int msg)
2172 {
2173 	struct sf *sf = (struct sf *)arg;
2174 	struct sf_target	*target;
2175 	int i;
2176 	struct sf_pkt *cmd;
2177 	struct scsi_pkt *pkt;
2178 
2179 
2180 
2181 	switch (msg) {
2182 
2183 	case FCAL_STATUS_LOOP_ONLINE: {
2184 		uchar_t		al_pa;		/* to save AL-PA */
2185 		int		ret;		/* ret value from getmap */
2186 		int		lip_cnt;	/* to save current count */
2187 		int		cnt;		/* map length */
2188 
2189 		/*
2190 		 * the loop has gone online
2191 		 */
2192 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2193 		    ddi_get_instance(sf->sf_dip)));
2194 		mutex_enter(&sf->sf_mutex);
2195 		sf->sf_lip_cnt++;
2196 		sf->sf_state = SF_STATE_ONLINING;
2197 		mutex_exit(&sf->sf_mutex);
2198 
2199 		/* scan each target hash queue */
2200 		for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2201 			target = sf->sf_wwn_lists[i];
2202 			while (target != NULL) {
2203 				/*
2204 				 * foreach target, if it's not offline then
2205 				 * mark it as busy
2206 				 */
2207 				mutex_enter(&target->sft_mutex);
2208 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2209 					target->sft_state |= (SF_TARGET_BUSY
2210 					    | SF_TARGET_MARK);
2211 #ifdef DEBUG
2212 				/*
2213 				 * for debugging, print out info on any
2214 				 * pending commands (left hanging)
2215 				 */
2216 				cmd = target->sft_pkt_head;
2217 				while (cmd != (struct sf_pkt *)&target->
2218 				    sft_pkt_head) {
2219 					if (cmd->cmd_state ==
2220 					    SF_STATE_ISSUED) {
2221 						SF_DEBUG(1, (sf, CE_CONT,
2222 						    "cmd 0x%p pending "
2223 						    "after lip\n",
2224 						    (void *)cmd->cmd_fp_pkt));
2225 					}
2226 					cmd = cmd->cmd_forw;
2227 				}
2228 #endif
2229 				mutex_exit(&target->sft_mutex);
2230 				target = target->sft_next;
2231 			}
2232 		}
2233 
2234 		/*
2235 		 * since the loop has just gone online get a new map from
2236 		 * the transport
2237 		 */
2238 		if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2239 		    sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2240 		    sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2241 			if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2242 				(void) soc_take_core(sf->sf_sochandle,
2243 				    sf->sf_socp);
2244 				sf_core = 0;
2245 			}
2246 			sf_log(sf, CE_WARN,
2247 			    "!soc lilp map failed status=0x%x\n", ret);
2248 			mutex_enter(&sf->sf_mutex);
2249 			sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2250 			sf->sf_lip_cnt++;
2251 			sf->sf_state = SF_STATE_OFFLINE;
2252 			mutex_exit(&sf->sf_mutex);
2253 			return;
2254 		}
2255 
2256 		/* ensure consistent view of DMA memory */
2257 		(void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2258 		    DDI_DMA_SYNC_FORKERNEL);
2259 
2260 		/* how many entries in map ? */
2261 		cnt = sf->sf_lilp_map->lilp_length;
2262 		if (cnt >= SF_MAX_LILP_ENTRIES) {
2263 			sf_log(sf, CE_WARN, "invalid lilp map\n");
2264 			return;
2265 		}
2266 
2267 		mutex_enter(&sf->sf_mutex);
2268 		sf->sf_device_count = cnt - 1;
2269 		sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2270 		lip_cnt = sf->sf_lip_cnt;
2271 		al_pa = sf->sf_al_pa;
2272 
2273 		SF_DEBUG(1, (sf, CE_CONT,
2274 		    "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2275 
2276 		/*
2277 		 * since the last entry of the map may be mine (common) check
2278 		 * for that, and if it is we have one less entry to look at
2279 		 */
2280 		if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2281 			cnt--;
2282 		}
2283 		/* If we didn't get a valid loop map enable all targets */
2284 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2285 			for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2286 				sf->sf_lilp_map->lilp_alpalist[i] =
2287 				    sf_switch_to_alpa[i];
2288 			cnt = i;
2289 			sf->sf_device_count = cnt - 1;
2290 		}
2291 		if (sf->sf_device_count == 0) {
2292 			sf_finish_init(sf, lip_cnt);
2293 			mutex_exit(&sf->sf_mutex);
2294 			break;
2295 		}
2296 		mutex_exit(&sf->sf_mutex);
2297 
2298 		SF_DEBUG(2, (sf, CE_WARN,
2299 		    "!statec_callback: starting with %d targets\n",
2300 		    sf->sf_device_count));
2301 
2302 		/* scan loop map, logging into all ports (except mine) */
2303 		for (i = 0; i < cnt; i++) {
2304 			SF_DEBUG(1, (sf, CE_CONT,
2305 			    "!lilp map entry %d = %x,%x\n", i,
2306 			    sf->sf_lilp_map->lilp_alpalist[i],
2307 			    sf_alpa_to_switch[
2308 			    sf->sf_lilp_map->lilp_alpalist[i]]));
2309 			/* is this entry for somebody else ? */
2310 			if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2311 				/* do a PLOGI to this port */
2312 				if (!sf_login(sf, LA_ELS_PLOGI,
2313 				    sf->sf_lilp_map->lilp_alpalist[i],
2314 				    sf->sf_lilp_map->lilp_alpalist[cnt-1],
2315 				    lip_cnt)) {
2316 					/* a problem logging in */
2317 					mutex_enter(&sf->sf_mutex);
2318 					if (lip_cnt == sf->sf_lip_cnt) {
2319 						/*
2320 						 * problem not from a new LIP
2321 						 */
2322 						sf->sf_device_count--;
2323 						ASSERT(sf->sf_device_count
2324 						    >= 0);
2325 						if (sf->sf_device_count == 0) {
2326 							sf_finish_init(sf,
2327 							    lip_cnt);
2328 						}
2329 					}
2330 					mutex_exit(&sf->sf_mutex);
2331 				}
2332 			}
2333 		}
2334 		break;
2335 	}
2336 
2337 	case FCAL_STATUS_ERR_OFFLINE:
2338 		/*
2339 		 * loop has gone offline due to an error
2340 		 */
2341 		SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2342 		    ddi_get_instance(sf->sf_dip)));
2343 		mutex_enter(&sf->sf_mutex);
2344 		sf->sf_lip_cnt++;
2345 		sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2346 		if (!sf->sf_online_timer) {
2347 			sf->sf_online_timer = sf_watchdog_time +
2348 			    SF_ONLINE_TIMEOUT;
2349 		}
2350 		/*
2351 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2352 		 * since throttling logic in sf_watch() depends on
2353 		 * preservation of this flag while device is suspended
2354 		 */
2355 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2356 			sf->sf_state |= SF_STATE_OFFLINE;
2357 			SF_DEBUG(1, (sf, CE_CONT,
2358 			    "sf_statec_callback, sf%d: "
2359 			    "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2360 			    ddi_get_instance(sf->sf_dip)));
2361 		} else {
2362 			sf->sf_state = SF_STATE_OFFLINE;
2363 		}
2364 
2365 		/* scan each possible target on the loop */
2366 		for (i = 0; i < sf_max_targets; i++) {
2367 			target = sf->sf_targets[i];
2368 			while (target != NULL) {
2369 				mutex_enter(&target->sft_mutex);
2370 				if (!(target->sft_state & SF_TARGET_OFFLINE))
2371 					target->sft_state |= (SF_TARGET_BUSY
2372 					    | SF_TARGET_MARK);
2373 				mutex_exit(&target->sft_mutex);
2374 				target = target->sft_next_lun;
2375 			}
2376 		}
2377 		mutex_exit(&sf->sf_mutex);
2378 		break;
2379 
2380 	case FCAL_STATE_RESET: {
2381 		struct sf_els_hdr	*privp;	/* ptr to private list */
2382 		struct sf_els_hdr	*tmpp1;	/* tmp prev hdr ptr */
2383 		struct sf_els_hdr	*tmpp2;	/* tmp next hdr ptr */
2384 		struct sf_els_hdr	*head;	/* to save our private list */
2385 		struct fcal_packet	*fpkt;	/* ptr to pkt in hdr */
2386 
2387 		/*
2388 		 * a transport reset
2389 		 */
2390 		SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2391 		    ddi_get_instance(sf->sf_dip)));
2392 		tmpp1 = head = NULL;
2393 		mutex_enter(&sf->sf_mutex);
2394 		sf->sf_lip_cnt++;
2395 		sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2396 		/*
2397 		 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2398 		 * since throttling logic in sf_watch() depends on
2399 		 * preservation of this flag while device is suspended
2400 		 */
2401 		if (sf->sf_state & SF_STATE_SUSPENDED) {
2402 			sf->sf_state |= SF_STATE_OFFLINE;
2403 			SF_DEBUG(1, (sf, CE_CONT,
2404 			    "sf_statec_callback, sf%d: "
2405 			    "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2406 			    ddi_get_instance(sf->sf_dip)));
2407 		} else {
2408 			sf->sf_state = SF_STATE_OFFLINE;
2409 		}
2410 
2411 		/*
2412 		 * scan each possible target on the loop, looking for targets
2413 		 * that need callbacks ran
2414 		 */
2415 		for (i = 0; i < sf_max_targets; i++) {
2416 			target = sf->sf_targets[i];
2417 			while (target != NULL) {
2418 				if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2419 					target->sft_state |= (SF_TARGET_BUSY
2420 					    | SF_TARGET_MARK);
2421 					mutex_exit(&sf->sf_mutex);
2422 					/*
2423 					 * run remove event callbacks for lun
2424 					 *
2425 					 * We have a nasty race condition here
2426 					 * 'cause we're dropping this mutex to
2427 					 * run the callback and expect the
2428 					 * linked list to be the same.
2429 					 */
2430 					(void) ndi_event_retrieve_cookie(
2431 					    sf->sf_event_hdl, target->sft_dip,
2432 					    FCAL_REMOVE_EVENT, &sf_remove_eid,
2433 					    NDI_EVENT_NOPASS);
2434 					(void) ndi_event_run_callbacks(
2435 					    sf->sf_event_hdl,
2436 					    target->sft_dip,
2437 					    sf_remove_eid, NULL);
2438 					mutex_enter(&sf->sf_mutex);
2439 				}
2440 				target = target->sft_next_lun;
2441 			}
2442 		}
2443 
2444 		/*
2445 		 * scan for ELS commands that are in transport, not complete,
2446 		 * and have a valid timeout, building a private list
2447 		 */
2448 		privp = sf->sf_els_list;
2449 		while (privp != NULL) {
2450 			fpkt = privp->fpkt;
2451 			if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2452 			    (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2453 			    (privp->timeout != SF_INVALID_TIMEOUT)) {
2454 				/*
2455 				 * cmd in transport && not complete &&
2456 				 * timeout valid
2457 				 *
2458 				 * move this entry from ELS input list to our
2459 				 * private list
2460 				 */
2461 
2462 				tmpp2 = privp->next; /* save ptr to next */
2463 
2464 				/* push this on private list head */
2465 				privp->next = head;
2466 				head = privp;
2467 
2468 				/* remove this entry from input list */
2469 				if (tmpp1 != NULL) {
2470 					/*
2471 					 * remove this entry from somewhere in
2472 					 * the middle of the list
2473 					 */
2474 					tmpp1->next = tmpp2;
2475 					if (tmpp2 != NULL) {
2476 						tmpp2->prev = tmpp1;
2477 					}
2478 				} else {
2479 					/*
2480 					 * remove this entry from the head
2481 					 * of the list
2482 					 */
2483 					sf->sf_els_list = tmpp2;
2484 					if (tmpp2 != NULL) {
2485 						tmpp2->prev = NULL;
2486 					}
2487 				}
2488 				privp = tmpp2;	/* skip to next entry */
2489 			} else {
2490 				tmpp1 = privp;	/* save ptr to prev entry */
2491 				privp = privp->next; /* skip to next entry */
2492 			}
2493 		}
2494 
2495 		mutex_exit(&sf->sf_mutex);
2496 
2497 		/*
2498 		 * foreach cmd in our list free the ELS packet associated
2499 		 * with it
2500 		 */
2501 		privp = head;
2502 		while (privp != NULL) {
2503 			fpkt = privp->fpkt;
2504 			privp = privp->next;
2505 			sf_els_free(fpkt);
2506 		}
2507 
2508 		/*
2509 		 * scan for commands from each possible target
2510 		 */
2511 		for (i = 0; i < sf_max_targets; i++) {
2512 			target = sf->sf_targets[i];
2513 			while (target != NULL) {
2514 				/*
2515 				 * scan all active commands for this target,
2516 				 * looking for commands that have been issued,
2517 				 * are in transport, and are not yet complete
2518 				 * (so we can terminate them because of the
2519 				 * reset)
2520 				 */
2521 				mutex_enter(&target->sft_pkt_mutex);
2522 				cmd = target->sft_pkt_head;
2523 				while (cmd != (struct sf_pkt *)&target->
2524 				    sft_pkt_head) {
2525 					fpkt = cmd->cmd_fp_pkt;
2526 					mutex_enter(&cmd->cmd_abort_mutex);
2527 					if ((cmd->cmd_state ==
2528 					    SF_STATE_ISSUED) &&
2529 					    (fpkt->fcal_cmd_state &
2530 					    FCAL_CMD_IN_TRANSPORT) &&
2531 					    (!(fpkt->fcal_cmd_state &
2532 					    FCAL_CMD_COMPLETE))) {
2533 						/* a command to be reset */
2534 						pkt = cmd->cmd_pkt;
2535 						pkt->pkt_reason = CMD_RESET;
2536 						pkt->pkt_statistics |=
2537 						    STAT_BUS_RESET;
2538 						cmd->cmd_state = SF_STATE_IDLE;
2539 						mutex_exit(&cmd->
2540 						    cmd_abort_mutex);
2541 						mutex_exit(&target->
2542 						    sft_pkt_mutex);
2543 						if (pkt->pkt_comp != NULL) {
2544 							(*pkt->pkt_comp)(pkt);
2545 						}
2546 						mutex_enter(&target->
2547 						    sft_pkt_mutex);
2548 						cmd = target->sft_pkt_head;
2549 					} else {
2550 						mutex_exit(&cmd->
2551 						    cmd_abort_mutex);
2552 						/* get next command */
2553 						cmd = cmd->cmd_forw;
2554 					}
2555 				}
2556 				mutex_exit(&target->sft_pkt_mutex);
2557 				target = target->sft_next_lun;
2558 			}
2559 		}
2560 
2561 		/*
2562 		 * get packet queue for this target, resetting all remaining
2563 		 * commands
2564 		 */
2565 		mutex_enter(&sf->sf_mutex);
2566 		cmd = sf->sf_pkt_head;
2567 		sf->sf_pkt_head = NULL;
2568 		mutex_exit(&sf->sf_mutex);
2569 
2570 		while (cmd != NULL) {
2571 			pkt = cmd->cmd_pkt;
2572 			cmd = cmd->cmd_next;
2573 			pkt->pkt_reason = CMD_RESET;
2574 			pkt->pkt_statistics |= STAT_BUS_RESET;
2575 			if (pkt->pkt_comp != NULL) {
2576 				(*pkt->pkt_comp)(pkt);
2577 			}
2578 		}
2579 		break;
2580 	}
2581 
2582 	default:
2583 		break;
2584 	}
2585 }
2586 
2587 
2588 /*
2589  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2590  * returning TRUE upon success, else returning FALSE
2591  */
2592 static int
2593 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2594     int lip_cnt)
2595 {
2596 	struct la_els_logi	*logi;
2597 	struct	sf_els_hdr	*privp;
2598 
2599 
2600 	if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2601 	    sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2602 	    (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2603 		sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2604 		    "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2605 		return (FALSE);
2606 	}
2607 
2608 	privp->lip_cnt = lip_cnt;
2609 	if (els_code == LA_ELS_PLOGI) {
2610 		bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2611 		    (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2612 		    - 4);
2613 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2614 		    (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2615 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2616 		    (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2617 		bzero((caddr_t)&logi->reserved, 16);
2618 	} else if (els_code == LA_ELS_LOGO) {
2619 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2620 		    (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2621 		((struct la_els_logo	*)logi)->reserved = 0;
2622 		((struct la_els_logo	*)logi)->nport_id[0] = 0;
2623 		((struct la_els_logo	*)logi)->nport_id[1] = 0;
2624 		((struct la_els_logo	*)logi)->nport_id[2] = arg1;
2625 	}
2626 
2627 	privp->els_code = els_code;
2628 	logi->ls_code = els_code;
2629 	logi->mbz[0] = 0;
2630 	logi->mbz[1] = 0;
2631 	logi->mbz[2] = 0;
2632 
2633 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2634 	return (sf_els_transport(sf, privp));
2635 }
2636 
2637 
2638 /*
2639  * send an ELS IU via the transport,
2640  * returning TRUE upon success, else returning FALSE
2641  */
2642 static int
2643 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2644 {
2645 	struct fcal_packet *fpkt = privp->fpkt;
2646 
2647 
2648 	(void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2649 	    DDI_DMA_SYNC_FORDEV);
2650 	privp->prev = NULL;
2651 	mutex_enter(&sf->sf_mutex);
2652 	privp->next = sf->sf_els_list;
2653 	if (sf->sf_els_list != NULL) {
2654 		sf->sf_els_list->prev = privp;
2655 	}
2656 	sf->sf_els_list = privp;
2657 	mutex_exit(&sf->sf_mutex);
2658 
2659 	/* call the transport to send a packet */
2660 	if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2661 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2662 		mutex_enter(&sf->sf_mutex);
2663 		if (privp->prev != NULL) {
2664 			privp->prev->next = privp->next;
2665 		}
2666 		if (privp->next != NULL) {
2667 			privp->next->prev = privp->prev;
2668 		}
2669 		if (sf->sf_els_list == privp) {
2670 			sf->sf_els_list = privp->next;
2671 		}
2672 		mutex_exit(&sf->sf_mutex);
2673 		sf_els_free(fpkt);
2674 		return (FALSE);			/* failure */
2675 	}
2676 	return (TRUE);				/* success */
2677 }
2678 
2679 
2680 /*
2681  * called as the pkt_comp routine for ELS FC packets
2682  */
2683 static void
2684 sf_els_callback(struct fcal_packet *fpkt)
2685 {
2686 	struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2687 	struct sf *sf = privp->sf;
2688 	struct sf *tsf;
2689 	int tgt_id;
2690 	struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2691 	struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2692 	struct	sf_target *target;
2693 	short	ncmds;
2694 	short	free_pkt = TRUE;
2695 
2696 
2697 	/*
2698 	 * we've received an ELS callback, i.e. an ELS packet has arrived
2699 	 */
2700 
2701 	/* take the current packet off of the queue */
2702 	mutex_enter(&sf->sf_mutex);
2703 	if (privp->timeout == SF_INVALID_TIMEOUT) {
2704 		mutex_exit(&sf->sf_mutex);
2705 		return;
2706 	}
2707 	if (privp->prev != NULL) {
2708 		privp->prev->next = privp->next;
2709 	}
2710 	if (privp->next != NULL) {
2711 		privp->next->prev = privp->prev;
2712 	}
2713 	if (sf->sf_els_list == privp) {
2714 		sf->sf_els_list = privp->next;
2715 	}
2716 	privp->prev = privp->next = NULL;
2717 	mutex_exit(&sf->sf_mutex);
2718 
2719 	/* get # pkts in this callback */
2720 	ncmds = fpkt->fcal_ncmds;
2721 	ASSERT(ncmds >= 0);
2722 	mutex_enter(&sf->sf_cmd_mutex);
2723 	sf->sf_ncmds = ncmds;
2724 	mutex_exit(&sf->sf_cmd_mutex);
2725 
2726 	/* sync idea of memory */
2727 	(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2728 	    DDI_DMA_SYNC_FORKERNEL);
2729 
2730 	/* was this an OK ACC msg ?? */
2731 	if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2732 	    (ptr->ls_code == LA_ELS_ACC)) {
2733 
2734 		/*
2735 		 * this was an OK ACC pkt
2736 		 */
2737 
2738 		switch (privp->els_code) {
2739 		case LA_ELS_PLOGI:
2740 			/*
2741 			 * was able to to an N_port login
2742 			 */
2743 			SF_DEBUG(2, (sf, CE_CONT,
2744 			    "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2745 			    privp->dest_nport_id,
2746 			    *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2747 			    *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2748 			/* try to do a process login */
2749 			if (!sf_do_prli(sf, privp, ptr)) {
2750 				free_pkt = FALSE;
2751 				goto fail;	/* PRLI failed */
2752 			}
2753 			break;
2754 		case LA_ELS_PRLI:
2755 			/*
2756 			 * was able to do a process login
2757 			 */
2758 			SF_DEBUG(2, (sf, CE_CONT,
2759 			    "!PRLI to al_pa %x succeeded\n",
2760 			    privp->dest_nport_id));
2761 			/* try to do address discovery */
2762 			if (sf_do_adisc(sf, privp) != 1) {
2763 				free_pkt = FALSE;
2764 				goto fail;	/* ADISC failed */
2765 			}
2766 			break;
2767 		case LA_ELS_ADISC:
2768 			/*
2769 			 * found a target via ADISC
2770 			 */
2771 
2772 			SF_DEBUG(2, (sf, CE_CONT,
2773 			    "!ADISC to al_pa %x succeeded\n",
2774 			    privp->dest_nport_id));
2775 
2776 			/* create the target info */
2777 			if ((target = sf_create_target(sf, privp,
2778 			    sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2779 			    (int64_t)0))
2780 			    == NULL) {
2781 				goto fail;	/* can't create target */
2782 			}
2783 
2784 			/*
2785 			 * ensure address discovered matches what we thought
2786 			 * it would be
2787 			 */
2788 			if ((uchar_t)adisc->hard_address !=
2789 			    privp->dest_nport_id) {
2790 				sf_log(sf, CE_WARN,
2791 				    "target 0x%x, AL-PA 0x%x and "
2792 				    "hard address 0x%x don't match\n",
2793 				    sf_alpa_to_switch[
2794 				    (uchar_t)privp->dest_nport_id],
2795 				    privp->dest_nport_id,
2796 				    (uchar_t)adisc->hard_address);
2797 				mutex_enter(&sf->sf_mutex);
2798 				sf_offline_target(sf, target);
2799 				mutex_exit(&sf->sf_mutex);
2800 				goto fail;	/* addr doesn't match */
2801 			}
2802 			/*
2803 			 * get inquiry data from the target
2804 			 */
2805 			if (!sf_do_reportlun(sf, privp, target)) {
2806 				mutex_enter(&sf->sf_mutex);
2807 				sf_offline_target(sf, target);
2808 				mutex_exit(&sf->sf_mutex);
2809 				free_pkt = FALSE;
2810 				goto fail;	/* inquiry failed */
2811 			}
2812 			break;
2813 		default:
2814 			SF_DEBUG(2, (sf, CE_CONT,
2815 			    "!ELS %x to al_pa %x succeeded\n",
2816 			    privp->els_code, privp->dest_nport_id));
2817 			sf_els_free(fpkt);
2818 			break;
2819 		}
2820 
2821 	} else {
2822 
2823 		/*
2824 		 * oh oh -- this was not an OK ACC packet
2825 		 */
2826 
2827 		/* get target ID from dest loop address */
2828 		tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2829 
2830 		/* keep track of failures */
2831 		sf->sf_stats.tstats[tgt_id].els_failures++;
2832 		if (++(privp->retries) < sf_els_retries &&
2833 		    fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2834 			if (fpkt->fcal_pkt_status ==
2835 			    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2836 				tsf = sf->sf_sibling;
2837 				if (tsf != NULL) {
2838 					mutex_enter(&tsf->sf_cmd_mutex);
2839 					tsf->sf_flag = 1;
2840 					tsf->sf_throttle = SF_DECR_DELTA;
2841 					mutex_exit(&tsf->sf_cmd_mutex);
2842 				}
2843 			}
2844 			privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2845 			privp->prev = NULL;
2846 
2847 			mutex_enter(&sf->sf_mutex);
2848 
2849 			if (privp->lip_cnt == sf->sf_lip_cnt) {
2850 				SF_DEBUG(1, (sf, CE_WARN,
2851 				    "!ELS %x to al_pa %x failed, retrying",
2852 				    privp->els_code, privp->dest_nport_id));
2853 				privp->next = sf->sf_els_list;
2854 				if (sf->sf_els_list != NULL) {
2855 					sf->sf_els_list->prev = privp;
2856 				}
2857 
2858 				sf->sf_els_list = privp;
2859 
2860 				mutex_exit(&sf->sf_mutex);
2861 				/* device busy?  wait a bit ... */
2862 				if (fpkt->fcal_pkt_status ==
2863 				    FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2864 					privp->delayed_retry = 1;
2865 					return;
2866 				}
2867 				/* call the transport to send a pkt */
2868 				if (soc_transport(sf->sf_sochandle, fpkt,
2869 				    FCAL_NOSLEEP, CQ_REQUEST_1) !=
2870 				    FCAL_TRANSPORT_SUCCESS) {
2871 					mutex_enter(&sf->sf_mutex);
2872 					if (privp->prev != NULL) {
2873 						privp->prev->next =
2874 						    privp->next;
2875 					}
2876 					if (privp->next != NULL) {
2877 						privp->next->prev =
2878 						    privp->prev;
2879 					}
2880 					if (sf->sf_els_list == privp) {
2881 						sf->sf_els_list = privp->next;
2882 					}
2883 					mutex_exit(&sf->sf_mutex);
2884 					goto fail;
2885 				} else
2886 					return;
2887 			} else {
2888 				mutex_exit(&sf->sf_mutex);
2889 				goto fail;
2890 			}
2891 		} else {
2892 #ifdef	DEBUG
2893 			if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2894 			SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2895 			    privp->els_code, privp->dest_nport_id));
2896 			if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2897 				SF_DEBUG(2, (sf, CE_NOTE,
2898 				    "els reply code = %x", ptr->ls_code));
2899 				if (ptr->ls_code == LA_ELS_RJT)
2900 					SF_DEBUG(1, (sf, CE_CONT,
2901 					    "LS_RJT reason = %x\n",
2902 					    *(((uint_t *)ptr) + 1)));
2903 			} else
2904 				SF_DEBUG(2, (sf, CE_NOTE,
2905 				    "fc packet status = %x",
2906 				    fpkt->fcal_pkt_status));
2907 			}
2908 #endif
2909 			goto fail;
2910 		}
2911 	}
2912 	return;					/* success */
2913 fail:
2914 	mutex_enter(&sf->sf_mutex);
2915 	if (sf->sf_lip_cnt == privp->lip_cnt) {
2916 		sf->sf_device_count--;
2917 		ASSERT(sf->sf_device_count >= 0);
2918 		if (sf->sf_device_count == 0) {
2919 			sf_finish_init(sf, privp->lip_cnt);
2920 		}
2921 	}
2922 	mutex_exit(&sf->sf_mutex);
2923 	if (free_pkt) {
2924 		sf_els_free(fpkt);
2925 	}
2926 }
2927 
2928 
2929 /*
2930  * send a PRLI (process login) ELS IU via the transport,
2931  * returning TRUE upon success, else returning FALSE
2932  */
2933 static int
2934 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2935 {
2936 	struct la_els_prli	*prli = (struct la_els_prli *)privp->cmd;
2937 	struct fcp_prli		*fprli;
2938 	struct  fcal_packet	*fpkt = privp->fpkt;
2939 
2940 
2941 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2942 	    sizeof (struct la_els_prli);
2943 	privp->els_code = LA_ELS_PRLI;
2944 	fprli = (struct fcp_prli *)prli->service_params;
2945 	prli->ls_code = LA_ELS_PRLI;
2946 	prli->page_length = 0x10;
2947 	prli->payload_length = sizeof (struct la_els_prli);
2948 	fprli->type = 0x08;			/* no define here? */
2949 	fprli->resvd1 = 0;
2950 	fprli->orig_process_assoc_valid = 0;
2951 	fprli->resp_process_assoc_valid = 0;
2952 	fprli->establish_image_pair = 1;
2953 	fprli->resvd2 = 0;
2954 	fprli->resvd3 = 0;
2955 	fprli->data_overlay_allowed = 0;
2956 	fprli->initiator_fn = 1;
2957 	fprli->target_fn = 0;
2958 	fprli->cmd_data_mixed = 0;
2959 	fprli->data_resp_mixed = 0;
2960 	fprli->read_xfer_rdy_disabled = 1;
2961 	fprli->write_xfer_rdy_disabled = 0;
2962 
2963 	bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2964 	    sizeof (privp->port_wwn));
2965 	bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2966 	    sizeof (privp->node_wwn));
2967 
2968 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2969 	return (sf_els_transport(sf, privp));
2970 }
2971 
2972 
2973 /*
2974  * send an ADISC (address discovery) ELS IU via the transport,
2975  * returning TRUE upon success, else returning FALSE
2976  */
2977 static int
2978 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2979 {
2980 	struct la_els_adisc	*adisc = (struct la_els_adisc *)privp->cmd;
2981 	struct	fcal_packet	*fpkt = privp->fpkt;
2982 
2983 	privp->els_code = LA_ELS_ADISC;
2984 	adisc->ls_code = LA_ELS_ADISC;
2985 	adisc->mbz[0] = 0;
2986 	adisc->mbz[1] = 0;
2987 	adisc->mbz[2] = 0;
2988 	adisc->hard_address = 0; /* ??? */
2989 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2990 	    sizeof (struct la_els_adisc);
2991 	bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2992 	    (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2993 	bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2994 	    (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2995 	adisc->nport_id = sf->sf_al_pa;
2996 
2997 	privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2998 	return (sf_els_transport(sf, privp));
2999 }
3000 
3001 
3002 static struct fcal_packet *
3003 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3004     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3005 {
3006 	struct	fcal_packet	*fpkt;
3007 	ddi_dma_cookie_t	pcookie;
3008 	ddi_dma_cookie_t	rcookie;
3009 	struct	sf_els_hdr	*privp;
3010 	ddi_dma_handle_t	cmd_dma_handle = NULL;
3011 	ddi_dma_handle_t	rsp_dma_handle = NULL;
3012 	ddi_acc_handle_t	cmd_acc_handle = NULL;
3013 	ddi_acc_handle_t	rsp_acc_handle = NULL;
3014 	size_t			real_size;
3015 	uint_t			ccount;
3016 	fc_frame_header_t	*hp;
3017 	int			cmd_bound = FALSE, rsp_bound = FALSE;
3018 	caddr_t			cmd = NULL;
3019 	caddr_t			rsp = NULL;
3020 
3021 	if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3022 	    sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3023 		SF_DEBUG(1, (sf, CE_WARN,
3024 			"Could not allocate fcal_packet for ELS\n"));
3025 		return (NULL);
3026 	}
3027 
3028 	if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3029 	    KM_NOSLEEP)) == NULL) {
3030 		SF_DEBUG(1, (sf, CE_WARN,
3031 		    "Could not allocate sf_els_hdr for ELS\n"));
3032 		goto fail;
3033 	}
3034 
3035 	privp->size = priv_size;
3036 	fpkt->fcal_pkt_private = (caddr_t)privp;
3037 
3038 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3039 	    DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3040 		SF_DEBUG(1, (sf, CE_WARN,
3041 		    "Could not allocate DMA handle for ELS\n"));
3042 		goto fail;
3043 	}
3044 
3045 	if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3046 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3047 	    DDI_DMA_DONTWAIT, NULL, &cmd,
3048 	    &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3049 		SF_DEBUG(1, (sf, CE_WARN,
3050 		    "Could not allocate DMA memory for ELS\n"));
3051 		goto fail;
3052 	}
3053 
3054 	if (real_size < cmd_size) {
3055 		SF_DEBUG(1, (sf, CE_WARN,
3056 		    "DMA memory too small for ELS\n"));
3057 		goto fail;
3058 	}
3059 
3060 	if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3061 	    cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3062 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3063 		SF_DEBUG(1, (sf, CE_WARN,
3064 		    "Could not bind DMA memory for ELS\n"));
3065 		goto fail;
3066 	}
3067 	cmd_bound = TRUE;
3068 
3069 	if (ccount != 1) {
3070 		SF_DEBUG(1, (sf, CE_WARN,
3071 		    "Wrong cookie count for ELS\n"));
3072 		goto fail;
3073 	}
3074 
3075 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3076 	    DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3077 		SF_DEBUG(1, (sf, CE_WARN,
3078 		    "Could not allocate DMA handle for ELS rsp\n"));
3079 		goto fail;
3080 	}
3081 	if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3082 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3083 	    DDI_DMA_DONTWAIT, NULL, &rsp,
3084 	    &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3085 		SF_DEBUG(1, (sf, CE_WARN,
3086 		    "Could not allocate DMA memory for ELS rsp\n"));
3087 		goto fail;
3088 	}
3089 
3090 	if (real_size < rsp_size) {
3091 		SF_DEBUG(1, (sf, CE_WARN,
3092 		    "DMA memory too small for ELS rsp\n"));
3093 		goto fail;
3094 	}
3095 
3096 	if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3097 	    rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3098 	    DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3099 		SF_DEBUG(1, (sf, CE_WARN,
3100 		    "Could not bind DMA memory for ELS rsp\n"));
3101 		goto fail;
3102 	}
3103 	rsp_bound = TRUE;
3104 
3105 	if (ccount != 1) {
3106 		SF_DEBUG(1, (sf, CE_WARN,
3107 		    "Wrong cookie count for ELS rsp\n"));
3108 		goto fail;
3109 	}
3110 
3111 	privp->cmd = cmd;
3112 	privp->sf = sf;
3113 	privp->cmd_dma_handle = cmd_dma_handle;
3114 	privp->cmd_acc_handle = cmd_acc_handle;
3115 	privp->rsp = rsp;
3116 	privp->rsp_dma_handle = rsp_dma_handle;
3117 	privp->rsp_acc_handle = rsp_acc_handle;
3118 	privp->dest_nport_id = dest_id;
3119 	privp->fpkt = fpkt;
3120 
3121 	fpkt->fcal_pkt_cookie = sf->sf_socp;
3122 	fpkt->fcal_pkt_comp = sf_els_callback;
3123 	fpkt->fcal_magic = FCALP_MAGIC;
3124 	fpkt->fcal_pkt_flags = 0;
3125 	fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3126 	    (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3127 	fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3128 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3129 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3130 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3131 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3132 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3133 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3134 	fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3135 	    pcookie.dmac_address;
3136 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3137 	fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3138 	    rcookie.dmac_address;
3139 	fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3140 
3141 	/* Fill in the Fabric Channel Header */
3142 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3143 	hp->r_ctl = R_CTL_ELS_REQ;
3144 	hp->d_id = dest_id;
3145 	hp->s_id = sf->sf_al_pa;
3146 	hp->type = TYPE_EXTENDED_LS;
3147 	hp->reserved1 = 0;
3148 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3149 	hp->seq_id = 0;
3150 	hp->df_ctl  = 0;
3151 	hp->seq_cnt = 0;
3152 	hp->ox_id = 0xffff;
3153 	hp->rx_id = 0xffff;
3154 	hp->ro = 0;
3155 
3156 	*rprivp = (caddr_t)privp;
3157 	*cmd_buf = cmd;
3158 	return (fpkt);
3159 
3160 fail:
3161 	if (cmd_dma_handle != NULL) {
3162 		if (cmd_bound) {
3163 			(void) ddi_dma_unbind_handle(cmd_dma_handle);
3164 		}
3165 		ddi_dma_free_handle(&cmd_dma_handle);
3166 		privp->cmd_dma_handle = NULL;
3167 	}
3168 	if (rsp_dma_handle != NULL) {
3169 		if (rsp_bound) {
3170 			(void) ddi_dma_unbind_handle(rsp_dma_handle);
3171 		}
3172 		ddi_dma_free_handle(&rsp_dma_handle);
3173 		privp->rsp_dma_handle = NULL;
3174 	}
3175 	sf_els_free(fpkt);
3176 	return (NULL);
3177 }
3178 
3179 
3180 static void
3181 sf_els_free(struct fcal_packet *fpkt)
3182 {
3183 	struct	sf_els_hdr	*privp = fpkt->fcal_pkt_private;
3184 
3185 	if (privp != NULL) {
3186 		if (privp->cmd_dma_handle != NULL) {
3187 			(void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3188 			ddi_dma_free_handle(&privp->cmd_dma_handle);
3189 		}
3190 		if (privp->cmd != NULL) {
3191 			ddi_dma_mem_free(&privp->cmd_acc_handle);
3192 		}
3193 
3194 		if (privp->rsp_dma_handle != NULL) {
3195 			(void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3196 			ddi_dma_free_handle(&privp->rsp_dma_handle);
3197 		}
3198 
3199 		if (privp->rsp != NULL) {
3200 			ddi_dma_mem_free(&privp->rsp_acc_handle);
3201 		}
3202 		if (privp->data_dma_handle) {
3203 			(void) ddi_dma_unbind_handle(privp->data_dma_handle);
3204 			ddi_dma_free_handle(&privp->data_dma_handle);
3205 		}
3206 		if (privp->data_buf) {
3207 			ddi_dma_mem_free(&privp->data_acc_handle);
3208 		}
3209 		kmem_free(privp, privp->size);
3210 	}
3211 	kmem_free(fpkt, sizeof (struct fcal_packet));
3212 }
3213 
3214 
3215 static struct sf_target *
3216 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3217 {
3218 	struct sf_target *target, *ntarget, *otarget, *ptarget;
3219 	int hash;
3220 #ifdef RAID_LUNS
3221 	int64_t orig_lun = lun;
3222 
3223 	/* XXXX Work around SCSA limitations. */
3224 	lun = *((short *)&lun);
3225 #endif
3226 	ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3227 	mutex_enter(&sf->sf_mutex);
3228 	if (sf->sf_lip_cnt != privp->lip_cnt) {
3229 		mutex_exit(&sf->sf_mutex);
3230 		if (ntarget != NULL)
3231 			kmem_free(ntarget, sizeof (struct sf_target));
3232 		return (NULL);
3233 	}
3234 
3235 	target = sf_lookup_target(sf, privp->port_wwn, lun);
3236 	if (lun != 0) {
3237 		/*
3238 		 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3239 		 * and enqueue the new LUN.
3240 		 */
3241 		if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3242 		    (int64_t)0)) ==	NULL) {
3243 			/*
3244 			 * Yeep -- no LUN 0?
3245 			 */
3246 			mutex_exit(&sf->sf_mutex);
3247 			sf_log(sf, CE_WARN, "target 0x%x "
3248 			    "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3249 			if (ntarget != NULL)
3250 				kmem_free(ntarget, sizeof (struct sf_target));
3251 			return (NULL);
3252 		}
3253 		mutex_enter(&ptarget->sft_mutex);
3254 		if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3255 		    ptarget->sft_state&SF_TARGET_OFFLINE) {
3256 			/* LUN 0 already finished, duplicate its state */
3257 			mutex_exit(&ptarget->sft_mutex);
3258 			sf_offline_target(sf, target);
3259 			mutex_exit(&sf->sf_mutex);
3260 			if (ntarget != NULL)
3261 				kmem_free(ntarget, sizeof (struct sf_target));
3262 			return (target);
3263 		} else if (target != NULL) {
3264 			/*
3265 			 * LUN 0 online or not examined yet.
3266 			 * Try to bring the LUN back online
3267 			 */
3268 			mutex_exit(&ptarget->sft_mutex);
3269 			mutex_enter(&target->sft_mutex);
3270 			target->sft_lip_cnt = privp->lip_cnt;
3271 			target->sft_state |= SF_TARGET_BUSY;
3272 			target->sft_state &= ~(SF_TARGET_OFFLINE|
3273 			    SF_TARGET_MARK);
3274 			target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3275 			target->sft_hard_address = sf_switch_to_alpa[tnum];
3276 			mutex_exit(&target->sft_mutex);
3277 			mutex_exit(&sf->sf_mutex);
3278 			if (ntarget != NULL)
3279 				kmem_free(ntarget, sizeof (struct sf_target));
3280 			return (target);
3281 		}
3282 		mutex_exit(&ptarget->sft_mutex);
3283 		if (ntarget == NULL) {
3284 			mutex_exit(&sf->sf_mutex);
3285 			return (NULL);
3286 		}
3287 		/* Initialize new target structure */
3288 		bcopy((caddr_t)&privp->node_wwn,
3289 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3290 		bcopy((caddr_t)&privp->port_wwn,
3291 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3292 		ntarget->sft_lun.l = lun;
3293 #ifdef RAID_LUNS
3294 		ntarget->sft_lun.l = orig_lun;
3295 		ntarget->sft_raid_lun = (uint_t)lun;
3296 #endif
3297 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3298 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3299 		/* Don't let anyone use this till we finishup init. */
3300 		mutex_enter(&ntarget->sft_mutex);
3301 		mutex_enter(&ntarget->sft_pkt_mutex);
3302 
3303 		hash = SF_HASH(privp->port_wwn, lun);
3304 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3305 		sf->sf_wwn_lists[hash] = ntarget;
3306 
3307 		ntarget->sft_lip_cnt = privp->lip_cnt;
3308 		ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3309 		ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3310 		ntarget->sft_device_type = DTYPE_UNKNOWN;
3311 		ntarget->sft_state = SF_TARGET_BUSY;
3312 		ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3313 		    sft_pkt_head;
3314 		ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3315 		    sft_pkt_head;
3316 
3317 		mutex_enter(&ptarget->sft_mutex);
3318 		/* Traverse the list looking for this target */
3319 		for (target = ptarget; target->sft_next_lun;
3320 		    target = target->sft_next_lun) {
3321 			otarget = target->sft_next_lun;
3322 		}
3323 		ntarget->sft_next_lun = target->sft_next_lun;
3324 		target->sft_next_lun = ntarget;
3325 		mutex_exit(&ptarget->sft_mutex);
3326 		mutex_exit(&ntarget->sft_pkt_mutex);
3327 		mutex_exit(&ntarget->sft_mutex);
3328 		mutex_exit(&sf->sf_mutex);
3329 		return (ntarget);
3330 
3331 	}
3332 	if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3333 		/* It's been touched this LIP -- duplicate WWNs */
3334 		sf_offline_target(sf, target); /* And all the baby targets */
3335 		mutex_exit(&sf->sf_mutex);
3336 		sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3337 		    tnum);
3338 		if (ntarget != NULL) {
3339 			kmem_free(ntarget, sizeof (struct sf_target));
3340 		}
3341 		return (NULL);
3342 	}
3343 
3344 	if ((otarget = sf->sf_targets[tnum]) != NULL) {
3345 		/* Someone else is in our slot */
3346 		mutex_enter(&otarget->sft_mutex);
3347 		if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3348 			mutex_exit(&otarget->sft_mutex);
3349 			sf_offline_target(sf, otarget);
3350 			if (target != NULL)
3351 				sf_offline_target(sf, target);
3352 			mutex_exit(&sf->sf_mutex);
3353 			sf_log(sf, CE_WARN,
3354 			    "target 0x%x, duplicate switch settings\n", tnum);
3355 			if (ntarget != NULL)
3356 				kmem_free(ntarget, sizeof (struct sf_target));
3357 			return (NULL);
3358 		}
3359 		mutex_exit(&otarget->sft_mutex);
3360 		if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3361 		    sft_port_wwn, sizeof (privp->port_wwn))) {
3362 			sf_offline_target(sf, otarget);
3363 			mutex_exit(&sf->sf_mutex);
3364 			sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3365 			    tnum);
3366 			bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3367 			    sizeof (struct sf_target_stats));
3368 			mutex_enter(&sf->sf_mutex);
3369 		}
3370 	}
3371 
3372 	sf->sf_targets[tnum] = target;
3373 	if ((target = sf->sf_targets[tnum]) == NULL) {
3374 		if (ntarget == NULL) {
3375 			mutex_exit(&sf->sf_mutex);
3376 			return (NULL);
3377 		}
3378 		bcopy((caddr_t)&privp->node_wwn,
3379 		    (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3380 		bcopy((caddr_t)&privp->port_wwn,
3381 		    (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3382 		ntarget->sft_lun.l = lun;
3383 #ifdef RAID_LUNS
3384 		ntarget->sft_lun.l = orig_lun;
3385 		ntarget->sft_raid_lun = (uint_t)lun;
3386 #endif
3387 		mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3388 		mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3389 		mutex_enter(&ntarget->sft_mutex);
3390 		mutex_enter(&ntarget->sft_pkt_mutex);
3391 		hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3392 		ntarget->sft_next = sf->sf_wwn_lists[hash];
3393 		sf->sf_wwn_lists[hash] = ntarget;
3394 
3395 		target = ntarget;
3396 		target->sft_lip_cnt = privp->lip_cnt;
3397 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3398 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3399 		target->sft_device_type = DTYPE_UNKNOWN;
3400 		target->sft_state = SF_TARGET_BUSY;
3401 		target->sft_pkt_head = (struct sf_pkt *)&target->
3402 		    sft_pkt_head;
3403 		target->sft_pkt_tail = (struct sf_pkt *)&target->
3404 		    sft_pkt_head;
3405 		sf->sf_targets[tnum] = target;
3406 		mutex_exit(&ntarget->sft_mutex);
3407 		mutex_exit(&ntarget->sft_pkt_mutex);
3408 		mutex_exit(&sf->sf_mutex);
3409 	} else {
3410 		mutex_enter(&target->sft_mutex);
3411 		target->sft_lip_cnt = privp->lip_cnt;
3412 		target->sft_state |= SF_TARGET_BUSY;
3413 		target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3414 		target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3415 		target->sft_hard_address = sf_switch_to_alpa[tnum];
3416 		mutex_exit(&target->sft_mutex);
3417 		mutex_exit(&sf->sf_mutex);
3418 		if (ntarget != NULL)
3419 			kmem_free(ntarget, sizeof (struct sf_target));
3420 	}
3421 	return (target);
3422 }
3423 
3424 
3425 /*
3426  * find the target for a given sf instance
3427  */
3428 /* ARGSUSED */
3429 static struct sf_target *
3430 #ifdef RAID_LUNS
3431 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3432 #else
3433 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3434 #endif
3435 {
3436 	int hash;
3437 	struct sf_target *target;
3438 
3439 	ASSERT(mutex_owned(&sf->sf_mutex));
3440 	hash = SF_HASH(wwn, lun);
3441 
3442 	target = sf->sf_wwn_lists[hash];
3443 	while (target != NULL) {
3444 
3445 #ifndef	RAID_LUNS
3446 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3447 		    sizeof (target->sft_port_wwn)) == 0 &&
3448 			target->sft_lun.l == lun)
3449 			break;
3450 #else
3451 		if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3452 		    sizeof (target->sft_port_wwn)) == 0 &&
3453 			target->sft_raid_lun == lun)
3454 			break;
3455 #endif
3456 		target = target->sft_next;
3457 	}
3458 
3459 	return (target);
3460 }
3461 
3462 
3463 /*
3464  * Send out a REPORT_LUNS command.
3465  */
3466 static int
3467 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3468     struct sf_target *target)
3469 {
3470 	struct	fcal_packet	*fpkt = privp->fpkt;
3471 	ddi_dma_cookie_t	pcookie;
3472 	ddi_dma_handle_t	lun_dma_handle = NULL;
3473 	ddi_acc_handle_t	lun_acc_handle;
3474 	uint_t			ccount;
3475 	size_t			real_size;
3476 	caddr_t			lun_buf = NULL;
3477 	int			handle_bound = 0;
3478 	fc_frame_header_t	*hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3479 	struct fcp_cmd		*reportlun = (struct fcp_cmd *)privp->cmd;
3480 	char			*msg = "Transport";
3481 
3482 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3483 	    DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3484 		msg = "ddi_dma_alloc_handle()";
3485 		goto fail;
3486 	}
3487 
3488 	if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3489 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3490 	    DDI_DMA_DONTWAIT, NULL, &lun_buf,
3491 	    &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3492 		msg = "ddi_dma_mem_alloc()";
3493 		goto fail;
3494 	}
3495 
3496 	if (real_size < REPORT_LUNS_SIZE) {
3497 		msg = "DMA mem < REPORT_LUNS_SIZE";
3498 		goto fail;
3499 	}
3500 
3501 	if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3502 	    lun_buf, real_size, DDI_DMA_READ |
3503 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3504 	    NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3505 		msg = "ddi_dma_addr_bind_handle()";
3506 		goto fail;
3507 	}
3508 	handle_bound = 1;
3509 
3510 	if (ccount != 1) {
3511 		msg = "ccount != 1";
3512 		goto fail;
3513 	}
3514 	privp->els_code = 0;
3515 	privp->target = target;
3516 	privp->data_dma_handle = lun_dma_handle;
3517 	privp->data_acc_handle = lun_acc_handle;
3518 	privp->data_buf = lun_buf;
3519 
3520 	fpkt->fcal_pkt_comp = sf_reportlun_callback;
3521 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3522 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3523 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3524 	    sizeof (struct fcp_cmd);
3525 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3526 	    (uint32_t)pcookie.dmac_address;
3527 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3528 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3529 	hp->r_ctl = R_CTL_COMMAND;
3530 	hp->type = TYPE_SCSI_FCP;
3531 	bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3532 	((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3533 	/* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3534 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3535 	    (real_size&0x0ff);
3536 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3537 	    (real_size>>8)&0x0ff;
3538 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3539 	    (real_size>>16)&0x0ff;
3540 	((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3541 	    (real_size>>24)&0x0ff;
3542 	reportlun->fcp_cntl.cntl_read_data = 1;
3543 	reportlun->fcp_cntl.cntl_write_data = 0;
3544 	reportlun->fcp_data_len = pcookie.dmac_size;
3545 	reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3546 
3547 	(void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3548 	/* We know he's there, so this should be fast */
3549 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3550 	if (sf_els_transport(sf, privp) == 1)
3551 		return (1);
3552 
3553 fail:
3554 	sf_log(sf, CE_WARN,
3555 	    "%s failure for REPORTLUN to target 0x%x\n",
3556 	    msg, sf_alpa_to_switch[privp->dest_nport_id]);
3557 	sf_els_free(fpkt);
3558 	if (lun_dma_handle != NULL) {
3559 		if (handle_bound)
3560 			(void) ddi_dma_unbind_handle(lun_dma_handle);
3561 		ddi_dma_free_handle(&lun_dma_handle);
3562 	}
3563 	if (lun_buf != NULL) {
3564 		ddi_dma_mem_free(&lun_acc_handle);
3565 	}
3566 	return (0);
3567 }
3568 
3569 /*
3570  * Handle the results of a REPORT_LUNS command:
3571  *	Create additional targets if necessary
3572  *	Initiate INQUIRYs on all LUNs.
3573  */
3574 static void
3575 sf_reportlun_callback(struct fcal_packet *fpkt)
3576 {
3577 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3578 	    fcal_pkt_private;
3579 	struct scsi_report_luns *ptr =
3580 	    (struct scsi_report_luns *)privp->data_buf;
3581 	struct sf *sf = privp->sf;
3582 	struct sf_target *target = privp->target;
3583 	struct fcp_rsp *rsp = NULL;
3584 	int delayed_retry = 0;
3585 	int tid = sf_alpa_to_switch[target->sft_hard_address];
3586 	int i, free_pkt = 1;
3587 	short	ncmds;
3588 
3589 	mutex_enter(&sf->sf_mutex);
3590 	/* use as temporary state variable */
3591 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3592 		mutex_exit(&sf->sf_mutex);
3593 		return;
3594 	}
3595 	if (privp->prev)
3596 		privp->prev->next = privp->next;
3597 	if (privp->next)
3598 		privp->next->prev = privp->prev;
3599 	if (sf->sf_els_list == privp)
3600 		sf->sf_els_list = privp->next;
3601 	privp->prev = privp->next = NULL;
3602 	mutex_exit(&sf->sf_mutex);
3603 	ncmds = fpkt->fcal_ncmds;
3604 	ASSERT(ncmds >= 0);
3605 	mutex_enter(&sf->sf_cmd_mutex);
3606 	sf->sf_ncmds = ncmds;
3607 	mutex_exit(&sf->sf_cmd_mutex);
3608 
3609 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3610 		(void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3611 		    0, DDI_DMA_SYNC_FORKERNEL);
3612 
3613 		rsp = (struct fcp_rsp *)privp->rsp;
3614 	}
3615 	SF_DEBUG(1, (sf, CE_CONT,
3616 	    "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3617 	    privp->dest_nport_id,
3618 	    fpkt->fcal_pkt_status,
3619 	    rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3620 
3621 		/* See if target simply does not support REPORT_LUNS. */
3622 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3623 	    rsp->fcp_u.fcp_status.sense_len_set &&
3624 	    rsp->fcp_sense_len >=
3625 		offsetof(struct scsi_extended_sense, es_qual_code)) {
3626 			struct scsi_extended_sense *sense;
3627 			sense = (struct scsi_extended_sense *)
3628 			((caddr_t)rsp + sizeof (struct fcp_rsp)
3629 				+ rsp->fcp_response_len);
3630 			if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3631 				if (sense->es_add_code == 0x20) {
3632 					/* Fake LUN 0 */
3633 				SF_DEBUG(1, (sf, CE_CONT,
3634 					"!REPORTLUN Faking good "
3635 					"completion for alpa %x\n",
3636 					privp->dest_nport_id));
3637 					ptr->lun_list_len = FCP_LUN_SIZE;
3638 					ptr->lun[0] = 0;
3639 					rsp->fcp_u.fcp_status.scsi_status =
3640 						STATUS_GOOD;
3641 				} else if (sense->es_add_code == 0x25) {
3642 					SF_DEBUG(1, (sf, CE_CONT,
3643 					    "!REPORTLUN device alpa %x "
3644 					    "key %x code %x\n",
3645 					    privp->dest_nport_id,
3646 					    sense->es_key, sense->es_add_code));
3647 					    goto fail;
3648 				}
3649 			} else if (sense->es_key ==
3650 				KEY_UNIT_ATTENTION &&
3651 				sense->es_add_code == 0x29) {
3652 				SF_DEBUG(1, (sf, CE_CONT,
3653 					"!REPORTLUN device alpa %x was reset\n",
3654 					privp->dest_nport_id));
3655 			} else {
3656 				SF_DEBUG(1, (sf, CE_CONT,
3657 					"!REPORTLUN device alpa %x "
3658 					"key %x code %x\n",
3659 					privp->dest_nport_id,
3660 					sense->es_key, sense->es_add_code));
3661 /* XXXXXX The following is here to handle broken targets -- remove it later */
3662 				if (sf_reportlun_forever &&
3663 					sense->es_key == KEY_UNIT_ATTENTION)
3664 					goto retry;
3665 /* XXXXXX */
3666 				if (sense->es_key == KEY_NOT_READY)
3667 					delayed_retry = 1;
3668 				}
3669 		}
3670 
3671 	if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3672 		struct fcp_rsp_info *bep;
3673 
3674 		bep = (struct fcp_rsp_info *)(&rsp->
3675 		    fcp_response_len + 1);
3676 		if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3677 		    bep->rsp_code == FCP_NO_FAILURE) {
3678 			(void) ddi_dma_sync(privp->data_dma_handle,
3679 			    0, 0, DDI_DMA_SYNC_FORKERNEL);
3680 
3681 			/* Convert from #bytes to #ints */
3682 			ptr->lun_list_len = ptr->lun_list_len >> 3;
3683 			SF_DEBUG(2, (sf, CE_CONT,
3684 			    "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3685 			    privp->dest_nport_id, ptr->lun_list_len));
3686 			if (!ptr->lun_list_len) {
3687 				/* No LUNs? Ya gotta be kidding... */
3688 				sf_log(sf, CE_WARN,
3689 				    "SCSI violation -- "
3690 				    "target 0x%x reports no LUNs\n",
3691 				    sf_alpa_to_switch[
3692 				    privp->dest_nport_id]);
3693 				ptr->lun_list_len = 1;
3694 				ptr->lun[0] = 0;
3695 			}
3696 
3697 			mutex_enter(&sf->sf_mutex);
3698 			if (sf->sf_lip_cnt == privp->lip_cnt) {
3699 				sf->sf_device_count += ptr->lun_list_len - 1;
3700 			}
3701 
3702 			mutex_exit(&sf->sf_mutex);
3703 			for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3704 			    sf->sf_lip_cnt; i++) {
3705 				struct sf_els_hdr *nprivp;
3706 				struct fcal_packet *nfpkt;
3707 
3708 				/* LUN 0 is already in `target' */
3709 				if (ptr->lun[i] != 0) {
3710 					target = sf_create_target(sf,
3711 					    privp, tid, ptr->lun[i]);
3712 				}
3713 				nprivp = NULL;
3714 				nfpkt = NULL;
3715 				if (target) {
3716 					nfpkt = sf_els_alloc(sf,
3717 					    target->sft_al_pa,
3718 					    sizeof (struct sf_els_hdr),
3719 					    sizeof (union sf_els_cmd),
3720 					    sizeof (union sf_els_rsp),
3721 					    (caddr_t *)&nprivp,
3722 					    (caddr_t *)&rsp);
3723 					if (nprivp)
3724 						nprivp->lip_cnt =
3725 						    privp->lip_cnt;
3726 				}
3727 				if (nfpkt && nprivp &&
3728 				    (sf_do_inquiry(sf, nprivp, target) ==
3729 				    0)) {
3730 					mutex_enter(&sf->sf_mutex);
3731 					if (sf->sf_lip_cnt == privp->
3732 					    lip_cnt) {
3733 						sf->sf_device_count --;
3734 					}
3735 					sf_offline_target(sf, target);
3736 					mutex_exit(&sf->sf_mutex);
3737 				}
3738 			}
3739 			sf_els_free(fpkt);
3740 			return;
3741 		} else {
3742 			SF_DEBUG(1, (sf, CE_CONT,
3743 			    "!REPORTLUN al_pa %x fcp failure, "
3744 			    "fcp_rsp_code %x scsi status %x\n",
3745 			    privp->dest_nport_id, bep->rsp_code,
3746 			    rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3747 			goto fail;
3748 		}
3749 	}
3750 	if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3751 	    (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3752 		delayed_retry = 1;
3753 	}
3754 
3755 	if (++(privp->retries) < sf_els_retries ||
3756 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3757 /* XXXXXX The following is here to handle broken targets -- remove it later */
3758 retry:
3759 /* XXXXXX */
3760 		if (delayed_retry) {
3761 			privp->retries--;
3762 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3763 			privp->delayed_retry = 1;
3764 		} else {
3765 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3766 		}
3767 
3768 		privp->prev = NULL;
3769 		mutex_enter(&sf->sf_mutex);
3770 		if (privp->lip_cnt == sf->sf_lip_cnt) {
3771 			if (!delayed_retry)
3772 				SF_DEBUG(1, (sf, CE_WARN,
3773 				    "!REPORTLUN to al_pa %x failed, retrying\n",
3774 				    privp->dest_nport_id));
3775 			privp->next = sf->sf_els_list;
3776 			if (sf->sf_els_list != NULL)
3777 				sf->sf_els_list->prev = privp;
3778 			sf->sf_els_list = privp;
3779 			mutex_exit(&sf->sf_mutex);
3780 			if (!delayed_retry && soc_transport(sf->sf_sochandle,
3781 			    fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3782 			    FCAL_TRANSPORT_SUCCESS) {
3783 				mutex_enter(&sf->sf_mutex);
3784 				if (privp->prev)
3785 					privp->prev->next = privp->next;
3786 				if (privp->next)
3787 					privp->next->prev = privp->prev;
3788 				if (sf->sf_els_list == privp)
3789 					sf->sf_els_list = privp->next;
3790 				mutex_exit(&sf->sf_mutex);
3791 				goto fail;
3792 			} else
3793 				return;
3794 		} else {
3795 			mutex_exit(&sf->sf_mutex);
3796 		}
3797 	} else {
3798 fail:
3799 
3800 		/* REPORT_LUN failed -- try inquiry */
3801 		if (sf_do_inquiry(sf, privp, target) != 0) {
3802 			return;
3803 		} else {
3804 			free_pkt = 0;
3805 		}
3806 		mutex_enter(&sf->sf_mutex);
3807 		if (sf->sf_lip_cnt == privp->lip_cnt) {
3808 			sf_log(sf, CE_WARN,
3809 			    "!REPORTLUN to target 0x%x failed\n",
3810 			    sf_alpa_to_switch[privp->dest_nport_id]);
3811 			sf_offline_target(sf, target);
3812 			sf->sf_device_count--;
3813 			ASSERT(sf->sf_device_count >= 0);
3814 			if (sf->sf_device_count == 0)
3815 			sf_finish_init(sf, privp->lip_cnt);
3816 		}
3817 		mutex_exit(&sf->sf_mutex);
3818 	}
3819 	if (free_pkt) {
3820 		sf_els_free(fpkt);
3821 	}
3822 }
3823 
3824 static int
3825 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3826     struct sf_target *target)
3827 {
3828 	struct	fcal_packet	*fpkt = privp->fpkt;
3829 	ddi_dma_cookie_t	pcookie;
3830 	ddi_dma_handle_t	inq_dma_handle = NULL;
3831 	ddi_acc_handle_t	inq_acc_handle;
3832 	uint_t			ccount;
3833 	size_t			real_size;
3834 	caddr_t			inq_buf = NULL;
3835 	int			handle_bound = FALSE;
3836 	fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3837 	struct fcp_cmd		*inq = (struct fcp_cmd *)privp->cmd;
3838 	char			*msg = "Transport";
3839 
3840 
3841 	if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3842 	    DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3843 		msg = "ddi_dma_alloc_handle()";
3844 		goto fail;
3845 	}
3846 
3847 	if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3848 	    sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3849 	    DDI_DMA_DONTWAIT, NULL, &inq_buf,
3850 	    &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3851 		msg = "ddi_dma_mem_alloc()";
3852 		goto fail;
3853 	}
3854 
3855 	if (real_size < SUN_INQSIZE) {
3856 		msg = "DMA mem < inquiry size";
3857 		goto fail;
3858 	}
3859 
3860 	if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3861 	    inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3862 	    DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3863 		msg = "ddi_dma_addr_bind_handle()";
3864 		goto fail;
3865 	}
3866 	handle_bound = TRUE;
3867 
3868 	if (ccount != 1) {
3869 		msg = "ccount != 1";
3870 		goto fail;
3871 	}
3872 	privp->els_code = 0;			/* not an ELS command */
3873 	privp->target = target;
3874 	privp->data_dma_handle = inq_dma_handle;
3875 	privp->data_acc_handle = inq_acc_handle;
3876 	privp->data_buf = inq_buf;
3877 	fpkt->fcal_pkt_comp = sf_inq_callback;
3878 	fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3879 	fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3880 	fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3881 	    sizeof (struct fcp_cmd);
3882 	fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3883 	    (uint32_t)pcookie.dmac_address;
3884 	fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3885 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3886 	hp->r_ctl = R_CTL_COMMAND;
3887 	hp->type = TYPE_SCSI_FCP;
3888 	bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3889 	((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3890 	((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3891 	bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3892 	    FCP_LUN_SIZE);
3893 	inq->fcp_cntl.cntl_read_data = 1;
3894 	inq->fcp_cntl.cntl_write_data = 0;
3895 	inq->fcp_data_len = pcookie.dmac_size;
3896 	inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3897 
3898 	(void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3899 	    DDI_DMA_SYNC_FORDEV);
3900 	privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3901 	SF_DEBUG(5, (sf, CE_WARN,
3902 	    "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3903 	    privp->dest_nport_id,
3904 	    SCSA_LUN(target)));
3905 	return (sf_els_transport(sf, privp));
3906 
3907 fail:
3908 	sf_log(sf, CE_WARN,
3909 	    "%s failure for INQUIRY to target 0x%x\n",
3910 	    msg, sf_alpa_to_switch[privp->dest_nport_id]);
3911 	sf_els_free(fpkt);
3912 	if (inq_dma_handle != NULL) {
3913 		if (handle_bound) {
3914 			(void) ddi_dma_unbind_handle(inq_dma_handle);
3915 		}
3916 		ddi_dma_free_handle(&inq_dma_handle);
3917 	}
3918 	if (inq_buf != NULL) {
3919 		ddi_dma_mem_free(&inq_acc_handle);
3920 	}
3921 	return (FALSE);
3922 }
3923 
3924 
3925 /*
3926  * called as the pkt_comp routine for INQ packets
3927  */
3928 static void
3929 sf_inq_callback(struct fcal_packet *fpkt)
3930 {
3931 	struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3932 	    fcal_pkt_private;
3933 	struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3934 	struct sf *sf = privp->sf;
3935 	struct sf *tsf;
3936 	struct sf_target *target = privp->target;
3937 	struct fcp_rsp *rsp;
3938 	int delayed_retry = FALSE;
3939 	short	ncmds;
3940 
3941 
3942 	mutex_enter(&sf->sf_mutex);
3943 	/* use as temporary state variable */
3944 	if (privp->timeout == SF_INVALID_TIMEOUT) {
3945 		mutex_exit(&sf->sf_mutex);
3946 		return;
3947 	}
3948 	if (privp->prev != NULL) {
3949 		privp->prev->next = privp->next;
3950 	}
3951 	if (privp->next != NULL) {
3952 		privp->next->prev = privp->prev;
3953 	}
3954 	if (sf->sf_els_list == privp) {
3955 		sf->sf_els_list = privp->next;
3956 	}
3957 	privp->prev = privp->next = NULL;
3958 	mutex_exit(&sf->sf_mutex);
3959 	ncmds = fpkt->fcal_ncmds;
3960 	ASSERT(ncmds >= 0);
3961 	mutex_enter(&sf->sf_cmd_mutex);
3962 	sf->sf_ncmds = ncmds;
3963 	mutex_exit(&sf->sf_cmd_mutex);
3964 
3965 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3966 
3967 		(void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3968 		    (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3969 
3970 		rsp = (struct fcp_rsp *)privp->rsp;
3971 		SF_DEBUG(2, (sf, CE_CONT,
3972 		    "!INQUIRY to al_pa %x scsi status %x",
3973 		    privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3974 
3975 		if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3976 		    !rsp->fcp_u.fcp_status.resid_over &&
3977 		    (!rsp->fcp_u.fcp_status.resid_under ||
3978 		    ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3979 			struct fcp_rsp_info *bep;
3980 
3981 			bep = (struct fcp_rsp_info *)(&rsp->
3982 			    fcp_response_len + 1);
3983 
3984 			if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3985 			    (bep->rsp_code == FCP_NO_FAILURE)) {
3986 
3987 				SF_DEBUG(2, (sf, CE_CONT,
3988 				    "!INQUIRY to al_pa %x lun %" PRIx64
3989 				    " succeeded\n",
3990 				    privp->dest_nport_id, SCSA_LUN(target)));
3991 
3992 				(void) ddi_dma_sync(privp->data_dma_handle,
3993 				    (off_t)0, (size_t)0,
3994 				    DDI_DMA_SYNC_FORKERNEL);
3995 
3996 				mutex_enter(&sf->sf_mutex);
3997 
3998 				if (sf->sf_lip_cnt == privp->lip_cnt) {
3999 					mutex_enter(&target->sft_mutex);
4000 					target->sft_device_type =
4001 					    prt->inq_dtype;
4002 					bcopy(prt, &target->sft_inq,
4003 					    sizeof (*prt));
4004 					mutex_exit(&target->sft_mutex);
4005 					sf->sf_device_count--;
4006 					ASSERT(sf->sf_device_count >= 0);
4007 					if (sf->sf_device_count == 0) {
4008 						sf_finish_init(sf,
4009 						    privp->lip_cnt);
4010 					}
4011 				}
4012 				mutex_exit(&sf->sf_mutex);
4013 				sf_els_free(fpkt);
4014 				return;
4015 			}
4016 		} else if ((rsp->fcp_u.fcp_status.scsi_status ==
4017 		    STATUS_BUSY) ||
4018 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4019 		    (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4020 			delayed_retry = TRUE;
4021 		}
4022 	} else {
4023 		SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4024 		    privp->dest_nport_id, fpkt->fcal_pkt_status));
4025 	}
4026 
4027 	if (++(privp->retries) < sf_els_retries ||
4028 	    (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4029 		if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4030 			tsf = sf->sf_sibling;
4031 			if (tsf != NULL) {
4032 				mutex_enter(&tsf->sf_cmd_mutex);
4033 				tsf->sf_flag = 1;
4034 				tsf->sf_throttle = SF_DECR_DELTA;
4035 				mutex_exit(&tsf->sf_cmd_mutex);
4036 			}
4037 			delayed_retry = 1;
4038 		}
4039 		if (delayed_retry) {
4040 			privp->retries--;
4041 			privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4042 			privp->delayed_retry = TRUE;
4043 		} else {
4044 			privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4045 		}
4046 
4047 		privp->prev = NULL;
4048 		mutex_enter(&sf->sf_mutex);
4049 		if (privp->lip_cnt == sf->sf_lip_cnt) {
4050 			if (!delayed_retry) {
4051 				SF_DEBUG(1, (sf, CE_WARN,
4052 				    "INQUIRY to al_pa %x failed, retrying",
4053 				    privp->dest_nport_id));
4054 			}
4055 			privp->next = sf->sf_els_list;
4056 			if (sf->sf_els_list != NULL) {
4057 				sf->sf_els_list->prev = privp;
4058 			}
4059 			sf->sf_els_list = privp;
4060 			mutex_exit(&sf->sf_mutex);
4061 			/* if not delayed call transport to send a pkt */
4062 			if (!delayed_retry &&
4063 			    (soc_transport(sf->sf_sochandle, fpkt,
4064 			    FCAL_NOSLEEP, CQ_REQUEST_1) !=
4065 			    FCAL_TRANSPORT_SUCCESS)) {
4066 				mutex_enter(&sf->sf_mutex);
4067 				if (privp->prev != NULL) {
4068 					privp->prev->next = privp->next;
4069 				}
4070 				if (privp->next != NULL) {
4071 					privp->next->prev = privp->prev;
4072 				}
4073 				if (sf->sf_els_list == privp) {
4074 					sf->sf_els_list = privp->next;
4075 				}
4076 				mutex_exit(&sf->sf_mutex);
4077 				goto fail;
4078 			}
4079 			return;
4080 		}
4081 		mutex_exit(&sf->sf_mutex);
4082 	} else {
4083 fail:
4084 		mutex_enter(&sf->sf_mutex);
4085 		if (sf->sf_lip_cnt == privp->lip_cnt) {
4086 			sf_offline_target(sf, target);
4087 			sf_log(sf, CE_NOTE,
4088 			    "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4089 			    "Retry Count: %d\n",
4090 			    sf_alpa_to_switch[privp->dest_nport_id],
4091 			    SCSA_LUN(target),
4092 			    privp->retries);
4093 			sf->sf_device_count--;
4094 			ASSERT(sf->sf_device_count >= 0);
4095 			if (sf->sf_device_count == 0) {
4096 				sf_finish_init(sf, privp->lip_cnt);
4097 			}
4098 		}
4099 		mutex_exit(&sf->sf_mutex);
4100 	}
4101 	sf_els_free(fpkt);
4102 }
4103 
4104 
4105 static void
4106 sf_finish_init(struct sf *sf, int lip_cnt)
4107 {
4108 	int			i;		/* loop index */
4109 	int			cflag;
4110 	struct sf_target	*target;	/* current target */
4111 	dev_info_t		*dip;
4112 	struct sf_hp_elem	*elem;		/* hotplug element created */
4113 
4114 	SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4115 	ASSERT(mutex_owned(&sf->sf_mutex));
4116 
4117 	/* scan all hash queues */
4118 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4119 		target = sf->sf_wwn_lists[i];
4120 		while (target != NULL) {
4121 			mutex_enter(&target->sft_mutex);
4122 
4123 			/* see if target is not offline */
4124 			if ((target->sft_state & SF_TARGET_OFFLINE)) {
4125 				/*
4126 				 * target already offline
4127 				 */
4128 				mutex_exit(&target->sft_mutex);
4129 				goto next_entry;
4130 			}
4131 
4132 			/*
4133 			 * target is not already offline -- see if it has
4134 			 * already been marked as ready to go offline
4135 			 */
4136 			if (target->sft_state & SF_TARGET_MARK) {
4137 				/*
4138 				 * target already marked, so take it offline
4139 				 */
4140 				mutex_exit(&target->sft_mutex);
4141 				sf_offline_target(sf, target);
4142 				goto next_entry;
4143 			}
4144 
4145 			/* clear target busy flag */
4146 			target->sft_state &= ~SF_TARGET_BUSY;
4147 
4148 			/* is target init not yet done ?? */
4149 			cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4150 
4151 			/* get pointer to target dip */
4152 			dip = target->sft_dip;
4153 
4154 			mutex_exit(&target->sft_mutex);
4155 			mutex_exit(&sf->sf_mutex);
4156 
4157 			if (cflag && (dip == NULL)) {
4158 				/*
4159 				 * target init not yet done &&
4160 				 * devinfo not yet created
4161 				 */
4162 				sf_create_devinfo(sf, target, lip_cnt);
4163 				mutex_enter(&sf->sf_mutex);
4164 				goto next_entry;
4165 			}
4166 
4167 			/*
4168 			 * target init already done || devinfo already created
4169 			 */
4170 			ASSERT(dip != NULL);
4171 			if (!sf_create_props(dip, target, lip_cnt)) {
4172 				/* a problem creating properties */
4173 				mutex_enter(&sf->sf_mutex);
4174 				goto next_entry;
4175 			}
4176 
4177 			/* create a new element for the hotplug list */
4178 			if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4179 			    KM_NOSLEEP)) != NULL) {
4180 
4181 				/* fill in the new element */
4182 				elem->dip = dip;
4183 				elem->target = target;
4184 				elem->what = SF_ONLINE;
4185 
4186 				/* add the new element into the hotplug list */
4187 				mutex_enter(&sf->sf_hp_daemon_mutex);
4188 				if (sf->sf_hp_elem_tail != NULL) {
4189 					sf->sf_hp_elem_tail->next = elem;
4190 					sf->sf_hp_elem_tail = elem;
4191 				} else {
4192 					/* this is the first element in list */
4193 					sf->sf_hp_elem_head =
4194 					    sf->sf_hp_elem_tail =
4195 					    elem;
4196 				}
4197 				cv_signal(&sf->sf_hp_daemon_cv);
4198 				mutex_exit(&sf->sf_hp_daemon_mutex);
4199 			} else {
4200 				/* could not allocate memory for element ?? */
4201 				(void) ndi_devi_online_async(dip, 0);
4202 			}
4203 
4204 			mutex_enter(&sf->sf_mutex);
4205 
4206 next_entry:
4207 			/* ensure no new LIPs have occurred */
4208 			if (sf->sf_lip_cnt != lip_cnt) {
4209 				return;
4210 			}
4211 			target = target->sft_next;
4212 		}
4213 
4214 		/* done scanning all targets in this queue */
4215 	}
4216 
4217 	/* done with all hash queues */
4218 
4219 	sf->sf_state = SF_STATE_ONLINE;
4220 	sf->sf_online_timer = 0;
4221 }
4222 
4223 
4224 /*
4225  * create devinfo node
4226  */
4227 static void
4228 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4229 {
4230 	dev_info_t		*cdip = NULL;
4231 	char			*nname = NULL;
4232 	char			**compatible = NULL;
4233 	int			ncompatible;
4234 	struct scsi_inquiry	*inq = &target->sft_inq;
4235 	char			*scsi_binding_set;
4236 
4237 	/* get the 'scsi-binding-set' property */
4238 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4239 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4240 	    &scsi_binding_set) != DDI_PROP_SUCCESS)
4241 		scsi_binding_set = NULL;
4242 
4243 	/* determine the node name and compatible */
4244 	scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4245 	    inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4246 	if (scsi_binding_set)
4247 		ddi_prop_free(scsi_binding_set);
4248 
4249 	/* if nodename can't be determined then print a message and skip it */
4250 	if (nname == NULL) {
4251 #ifndef	RAID_LUNS
4252 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4253 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4254 		    "    compatible: %s",
4255 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4256 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4257 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4258 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4259 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4260 		    target->sft_lun.l, *compatible);
4261 #else
4262 		sf_log(sf, CE_WARN, "%s%d: no driver for device "
4263 		    "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4264 		    "    compatible: %s",
4265 		    ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4266 		    target->sft_port_wwn[0], target->sft_port_wwn[1],
4267 		    target->sft_port_wwn[2], target->sft_port_wwn[3],
4268 		    target->sft_port_wwn[4], target->sft_port_wwn[5],
4269 		    target->sft_port_wwn[6], target->sft_port_wwn[7],
4270 		    target->sft_raid_lun, *compatible);
4271 #endif
4272 		goto fail;
4273 	}
4274 
4275 	/* allocate the node */
4276 	if (ndi_devi_alloc(sf->sf_dip, nname,
4277 	    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4278 		goto fail;
4279 	}
4280 
4281 	/* decorate the node with compatible */
4282 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4283 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4284 		goto fail;
4285 	}
4286 
4287 	/* add addressing properties to the node */
4288 	if (sf_create_props(cdip, target, lip_cnt) != 1) {
4289 		goto fail;
4290 	}
4291 
4292 	mutex_enter(&target->sft_mutex);
4293 	if (target->sft_dip != NULL) {
4294 		mutex_exit(&target->sft_mutex);
4295 		goto fail;
4296 	}
4297 	target->sft_dip = cdip;
4298 	mutex_exit(&target->sft_mutex);
4299 
4300 	if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4301 		goto fail;
4302 	}
4303 
4304 	scsi_hba_nodename_compatible_free(nname, compatible);
4305 	return;
4306 
4307 fail:
4308 	scsi_hba_nodename_compatible_free(nname, compatible);
4309 	if (cdip != NULL) {
4310 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4311 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4312 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4313 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4314 		(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4315 		if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4316 			sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4317 		} else {
4318 			mutex_enter(&target->sft_mutex);
4319 			if (cdip == target->sft_dip) {
4320 				target->sft_dip = NULL;
4321 			}
4322 			mutex_exit(&target->sft_mutex);
4323 		}
4324 	}
4325 }
4326 
4327 /*
4328  * create required properties, returning TRUE iff we succeed, else
4329  * returning FALSE
4330  */
4331 static int
4332 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4333 {
4334 	int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4335 
4336 
4337 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4338 	    cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4339 	    DDI_PROP_SUCCESS) {
4340 		return (FALSE);
4341 	}
4342 
4343 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4344 	    cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4345 	    DDI_PROP_SUCCESS) {
4346 		return (FALSE);
4347 	}
4348 
4349 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4350 	    cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4351 		return (FALSE);
4352 	}
4353 
4354 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4355 	    cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4356 		return (FALSE);
4357 	}
4358 
4359 #ifndef	RAID_LUNS
4360 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4361 	    cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4362 		return (0);
4363 	}
4364 #else
4365 	if (ndi_prop_update_int(DDI_DEV_T_NONE,
4366 	    cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4367 		return (0);
4368 	}
4369 #endif
4370 
4371 	return (TRUE);
4372 }
4373 
4374 
4375 /*
4376  * called by the transport to offline a target
4377  */
4378 /* ARGSUSED */
4379 static void
4380 sf_offline_target(struct sf *sf, struct sf_target *target)
4381 {
4382 	dev_info_t *dip;
4383 	struct sf_target *next_target = NULL;
4384 	struct sf_hp_elem	*elem;
4385 
4386 	ASSERT(mutex_owned(&sf->sf_mutex));
4387 
4388 	if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4389 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4390 		sf_core = 0;
4391 	}
4392 
4393 	while (target != NULL) {
4394 		sf_log(sf, CE_NOTE,
4395 		    "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4396 		    sf_alpa_to_switch[target->sft_al_pa],
4397 		    target->sft_al_pa, SCSA_LUN(target));
4398 		mutex_enter(&target->sft_mutex);
4399 		target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4400 		target->sft_state |= SF_TARGET_OFFLINE;
4401 		mutex_exit(&target->sft_mutex);
4402 		mutex_exit(&sf->sf_mutex);
4403 
4404 		/* XXXX if this is LUN 0, offline all other LUNs */
4405 		if (next_target || target->sft_lun.l == 0)
4406 			next_target = target->sft_next_lun;
4407 
4408 		/* abort all cmds for this target */
4409 		sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4410 
4411 		mutex_enter(&sf->sf_mutex);
4412 		mutex_enter(&target->sft_mutex);
4413 		if (target->sft_state & SF_TARGET_INIT_DONE) {
4414 			dip = target->sft_dip;
4415 			mutex_exit(&target->sft_mutex);
4416 			mutex_exit(&sf->sf_mutex);
4417 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4418 			    TARGET_PROP);
4419 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4420 			    dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4421 			    NDI_EVENT_NOPASS);
4422 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
4423 			    target->sft_dip, sf_remove_eid, NULL);
4424 
4425 			elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4426 			    KM_NOSLEEP);
4427 			if (elem != NULL) {
4428 				elem->dip = dip;
4429 				elem->target = target;
4430 				elem->what = SF_OFFLINE;
4431 				mutex_enter(&sf->sf_hp_daemon_mutex);
4432 				if (sf->sf_hp_elem_tail != NULL) {
4433 					sf->sf_hp_elem_tail->next = elem;
4434 					sf->sf_hp_elem_tail = elem;
4435 				} else {
4436 					sf->sf_hp_elem_head =
4437 					    sf->sf_hp_elem_tail =
4438 					    elem;
4439 				}
4440 				cv_signal(&sf->sf_hp_daemon_cv);
4441 				mutex_exit(&sf->sf_hp_daemon_mutex);
4442 			} else {
4443 				/* don't do NDI_DEVI_REMOVE for now */
4444 				if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4445 					SF_DEBUG(1, (sf, CE_WARN,
4446 					    "target %x lun %" PRIx64 ", "
4447 					    "device offline failed",
4448 					    sf_alpa_to_switch[target->
4449 					    sft_al_pa],
4450 					    SCSA_LUN(target)));
4451 				} else {
4452 					SF_DEBUG(1, (sf, CE_NOTE,
4453 					    "target %x, lun %" PRIx64 ", "
4454 					    "device offline succeeded\n",
4455 					    sf_alpa_to_switch[target->
4456 					    sft_al_pa],
4457 					    SCSA_LUN(target)));
4458 				}
4459 			}
4460 			mutex_enter(&sf->sf_mutex);
4461 		} else {
4462 			mutex_exit(&target->sft_mutex);
4463 		}
4464 		target = next_target;
4465 	}
4466 }
4467 
4468 
4469 /*
4470  * routine to get/set a capability
4471  *
4472  * returning:
4473  *	1 (TRUE)	boolean capability is true (on get)
4474  *	0 (FALSE)	invalid capability, can't set capability (on set),
4475  *			or boolean capability is false (on get)
4476  *	-1 (UNDEFINED)	can't find capability (SCSA) or unsupported capability
4477  *	3		when getting SCSI version number
4478  *	AL_PA		when getting port initiator ID
4479  */
4480 static int
4481 sf_commoncap(struct scsi_address *ap, char *cap,
4482     int val, int tgtonly, int doset)
4483 {
4484 	struct sf *sf = ADDR2SF(ap);
4485 	int cidx;
4486 	int rval = FALSE;
4487 
4488 
4489 	if (cap == NULL) {
4490 		SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4491 		return (rval);
4492 	}
4493 
4494 	/* get index of capability string */
4495 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4496 		/* can't find capability */
4497 		return (UNDEFINED);
4498 	}
4499 
4500 	if (doset) {
4501 		/*
4502 		 * Process setcap request.
4503 		 */
4504 
4505 		/*
4506 		 * At present, we can only set binary (0/1) values
4507 		 */
4508 		switch (cidx) {
4509 		case SCSI_CAP_ARQ:	/* can't set this capability */
4510 			break;
4511 		default:
4512 			SF_DEBUG(3, (sf, CE_WARN,
4513 			    "sf_setcap: unsupported %d", cidx));
4514 			rval = UNDEFINED;
4515 			break;
4516 		}
4517 
4518 		SF_DEBUG(4, (sf, CE_NOTE,
4519 		    "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4520 		    ",doset=0x%x,rval=%d\n",
4521 		    cap, val, tgtonly, doset, rval));
4522 
4523 	} else {
4524 		/*
4525 		 * Process getcap request.
4526 		 */
4527 		switch (cidx) {
4528 		case SCSI_CAP_DMA_MAX:
4529 			break;		/* don't' have this capability */
4530 		case SCSI_CAP_INITIATOR_ID:
4531 			rval = sf->sf_al_pa;
4532 			break;
4533 		case SCSI_CAP_ARQ:
4534 			rval = TRUE;	/* do have this capability */
4535 			break;
4536 		case SCSI_CAP_RESET_NOTIFICATION:
4537 		case SCSI_CAP_TAGGED_QING:
4538 			rval = TRUE;	/* do have this capability */
4539 			break;
4540 		case SCSI_CAP_SCSI_VERSION:
4541 			rval = 3;
4542 			break;
4543 		case SCSI_CAP_INTERCONNECT_TYPE:
4544 			rval = INTERCONNECT_FIBRE;
4545 			break;
4546 		default:
4547 			SF_DEBUG(4, (sf, CE_WARN,
4548 			    "sf_scsi_getcap: unsupported"));
4549 			rval = UNDEFINED;
4550 			break;
4551 		}
4552 		SF_DEBUG(4, (sf, CE_NOTE,
4553 		    "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4554 		    "doset=0x%x,rval=%d\n",
4555 		    cap, val, tgtonly, doset, rval));
4556 	}
4557 
4558 	return (rval);
4559 }
4560 
4561 
4562 /*
4563  * called by the transport to get a capability
4564  */
4565 static int
4566 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4567 {
4568 	return (sf_commoncap(ap, cap, 0, whom, FALSE));
4569 }
4570 
4571 
4572 /*
4573  * called by the transport to set a capability
4574  */
4575 static int
4576 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4577 {
4578 	return (sf_commoncap(ap, cap, value, whom, TRUE));
4579 }
4580 
4581 
4582 /*
4583  * called by the transport to abort a target
4584  */
4585 static int
4586 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4587 {
4588 	struct sf *sf = ADDR2SF(ap);
4589 	struct sf_target *target = ADDR2TARGET(ap);
4590 	struct sf_pkt *cmd, *ncmd, *pcmd;
4591 	struct fcal_packet *fpkt;
4592 	int	rval = 0, t, my_rval = FALSE;
4593 	int	old_target_state;
4594 	int	lip_cnt;
4595 	int	tgt_id;
4596 	fc_frame_header_t	*hp;
4597 	int	deferred_destroy;
4598 
4599 	deferred_destroy = 0;
4600 
4601 	if (pkt != NULL) {
4602 		cmd = PKT2CMD(pkt);
4603 		fpkt = cmd->cmd_fp_pkt;
4604 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4605 		    (void *)fpkt));
4606 		pcmd = NULL;
4607 		mutex_enter(&sf->sf_cmd_mutex);
4608 		ncmd = sf->sf_pkt_head;
4609 		while (ncmd != NULL) {
4610 			if (ncmd == cmd) {
4611 				if (pcmd != NULL) {
4612 					pcmd->cmd_next = cmd->cmd_next;
4613 				} else {
4614 					sf->sf_pkt_head = cmd->cmd_next;
4615 				}
4616 				cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4617 				cmd->cmd_state = SF_STATE_IDLE;
4618 				pkt->pkt_reason = CMD_ABORTED;
4619 				pkt->pkt_statistics |= STAT_ABORTED;
4620 				my_rval = TRUE;
4621 				break;
4622 			} else {
4623 				pcmd = ncmd;
4624 				ncmd = ncmd->cmd_next;
4625 			}
4626 		}
4627 		mutex_exit(&sf->sf_cmd_mutex);
4628 		if (ncmd == NULL) {
4629 			mutex_enter(&cmd->cmd_abort_mutex);
4630 			if (cmd->cmd_state == SF_STATE_ISSUED) {
4631 				cmd->cmd_state = SF_STATE_ABORTING;
4632 				cmd->cmd_timeout = sf_watchdog_time + 20;
4633 				mutex_exit(&cmd->cmd_abort_mutex);
4634 				/* call transport to abort command */
4635 				if (((rval = soc_abort(sf->sf_sochandle,
4636 				    sf->sf_socp, sf->sf_sochandle->fcal_portno,
4637 				    fpkt, 1)) == FCAL_ABORTED) ||
4638 				    (rval == FCAL_ABORT_FAILED)) {
4639 					my_rval = TRUE;
4640 					pkt->pkt_reason = CMD_ABORTED;
4641 					pkt->pkt_statistics |= STAT_ABORTED;
4642 					cmd->cmd_state = SF_STATE_IDLE;
4643 				} else if (rval == FCAL_BAD_ABORT) {
4644 					cmd->cmd_timeout = sf_watchdog_time
4645 					    + 20;
4646 					my_rval = FALSE;
4647 				} else {
4648 					SF_DEBUG(1, (sf, CE_NOTE,
4649 					    "Command Abort failed\n"));
4650 				}
4651 			} else {
4652 				mutex_exit(&cmd->cmd_abort_mutex);
4653 			}
4654 		}
4655 	} else {
4656 		SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4657 		mutex_enter(&sf->sf_mutex);
4658 		lip_cnt = sf->sf_lip_cnt;
4659 		mutex_enter(&target->sft_mutex);
4660 		if (target->sft_state & (SF_TARGET_BUSY |
4661 		    SF_TARGET_OFFLINE)) {
4662 			mutex_exit(&target->sft_mutex);
4663 			return (rval);
4664 		}
4665 		old_target_state = target->sft_state;
4666 		target->sft_state |= SF_TARGET_BUSY;
4667 		mutex_exit(&target->sft_mutex);
4668 		mutex_exit(&sf->sf_mutex);
4669 
4670 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4671 		    0, 0, 0, NULL, 0)) != NULL) {
4672 
4673 			cmd = PKT2CMD(pkt);
4674 			cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4675 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4676 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4677 
4678 			/* prepare the packet for transport */
4679 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4680 
4681 				cmd->cmd_state = SF_STATE_ISSUED;
4682 				/*
4683 				 * call transport to send a pkt polled
4684 				 *
4685 				 * if that fails call the transport to abort it
4686 				 */
4687 				if (soc_transport_poll(sf->sf_sochandle,
4688 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4689 				    CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4690 					(void) ddi_dma_sync(
4691 					    cmd->cmd_cr_pool->rsp_dma_handle,
4692 					    (off_t)
4693 					    ((caddr_t)cmd->cmd_rsp_block -
4694 					    cmd->cmd_cr_pool->rsp_base),
4695 					    FCP_MAX_RSP_IU_SIZE,
4696 					    DDI_DMA_SYNC_FORKERNEL);
4697 					if (((struct fcp_rsp_info *)
4698 					    (&cmd->cmd_rsp_block->
4699 					    fcp_response_len + 1))->
4700 					    rsp_code == FCP_NO_FAILURE) {
4701 						/* abort cmds for this targ */
4702 						sf_abort_all(sf, target, TRUE,
4703 						    lip_cnt, TRUE);
4704 					} else {
4705 						hp = &cmd->cmd_fp_pkt->
4706 						    fcal_socal_request.
4707 						    sr_fc_frame_hdr;
4708 						tgt_id = sf_alpa_to_switch[
4709 						    (uchar_t)hp->d_id];
4710 						sf->sf_stats.tstats[tgt_id].
4711 						    task_mgmt_failures++;
4712 						SF_DEBUG(1, (sf, CE_NOTE,
4713 						    "Target %d Abort Task "
4714 						    "Set failed\n", hp->d_id));
4715 					}
4716 				} else {
4717 					mutex_enter(&cmd->cmd_abort_mutex);
4718 					if (cmd->cmd_state == SF_STATE_ISSUED) {
4719 					cmd->cmd_state = SF_STATE_ABORTING;
4720 					cmd->cmd_timeout = sf_watchdog_time
4721 					    + 20;
4722 					mutex_exit(&cmd->cmd_abort_mutex);
4723 					if ((t = soc_abort(sf->sf_sochandle,
4724 					    sf->sf_socp, sf->sf_sochandle->
4725 					    fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4726 					    FCAL_ABORTED &&
4727 					    (t != FCAL_ABORT_FAILED)) {
4728 						sf_log(sf, CE_NOTE,
4729 						    "sf_abort failed, "
4730 						    "initiating LIP\n");
4731 						sf_force_lip(sf);
4732 						deferred_destroy = 1;
4733 					}
4734 					} else {
4735 					mutex_exit(&cmd->cmd_abort_mutex);
4736 					}
4737 				}
4738 			}
4739 			if (!deferred_destroy) {
4740 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4741 				    sf_cmd_callback;
4742 				cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4743 				sf_scsi_destroy_pkt(ap, pkt);
4744 				my_rval = TRUE;
4745 			}
4746 		}
4747 		mutex_enter(&sf->sf_mutex);
4748 		if (lip_cnt == sf->sf_lip_cnt) {
4749 			mutex_enter(&target->sft_mutex);
4750 			target->sft_state = old_target_state;
4751 			mutex_exit(&target->sft_mutex);
4752 		}
4753 		mutex_exit(&sf->sf_mutex);
4754 	}
4755 	return (my_rval);
4756 }
4757 
4758 
4759 /*
4760  * called by the transport and internally to reset a target
4761  */
4762 static int
4763 sf_reset(struct scsi_address *ap, int level)
4764 {
4765 	struct scsi_pkt *pkt;
4766 	struct fcal_packet *fpkt;
4767 	struct sf *sf = ADDR2SF(ap);
4768 	struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4769 	struct sf_pkt *cmd;
4770 	int	rval = FALSE, t;
4771 	int	lip_cnt;
4772 	int	tgt_id, ret;
4773 	fc_frame_header_t	*hp;
4774 	int	deferred_destroy;
4775 
4776 	/* We don't support RESET_LUN yet. */
4777 	if (level == RESET_TARGET) {
4778 		struct sf_reset_list *p;
4779 
4780 		if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4781 		    == NULL)
4782 			return (rval);
4783 
4784 		SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4785 		mutex_enter(&sf->sf_mutex);
4786 		/* All target resets go to LUN 0 */
4787 		if (target->sft_lun.l) {
4788 			target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4789 		}
4790 		mutex_enter(&target->sft_mutex);
4791 		if (target->sft_state & (SF_TARGET_BUSY |
4792 		    SF_TARGET_OFFLINE)) {
4793 			mutex_exit(&target->sft_mutex);
4794 			mutex_exit(&sf->sf_mutex);
4795 			kmem_free(p, sizeof (struct sf_reset_list));
4796 			return (rval);
4797 		}
4798 		lip_cnt = sf->sf_lip_cnt;
4799 		target->sft_state |= SF_TARGET_BUSY;
4800 		for (ntarget = target->sft_next_lun;
4801 		    ntarget;
4802 		    ntarget = ntarget->sft_next_lun) {
4803 			mutex_enter(&ntarget->sft_mutex);
4804 			/*
4805 			 * XXXX If we supported RESET_LUN we should check here
4806 			 * to see if any LUN were being reset and somehow fail
4807 			 * that operation.
4808 			 */
4809 			ntarget->sft_state |= SF_TARGET_BUSY;
4810 			mutex_exit(&ntarget->sft_mutex);
4811 		}
4812 		mutex_exit(&target->sft_mutex);
4813 		mutex_exit(&sf->sf_mutex);
4814 
4815 		deferred_destroy = 0;
4816 		if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4817 		    0, 0, 0, NULL, 0)) != NULL) {
4818 			cmd = PKT2CMD(pkt);
4819 			cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4820 			cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4821 			cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4822 
4823 			/* prepare the packet for transport */
4824 			if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4825 				/* call transport to send a pkt polled */
4826 				cmd->cmd_state = SF_STATE_ISSUED;
4827 				if ((ret = soc_transport_poll(sf->sf_sochandle,
4828 				    cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4829 				    CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4830 					(void) ddi_dma_sync(cmd->cmd_cr_pool->
4831 					    rsp_dma_handle, (caddr_t)cmd->
4832 					    cmd_rsp_block - cmd->cmd_cr_pool->
4833 					    rsp_base, FCP_MAX_RSP_IU_SIZE,
4834 					    DDI_DMA_SYNC_FORKERNEL);
4835 					fpkt = cmd->cmd_fp_pkt;
4836 					if ((fpkt->fcal_pkt_status ==
4837 					    FCAL_STATUS_OK) &&
4838 					    (((struct fcp_rsp_info *)
4839 					    (&cmd->cmd_rsp_block->
4840 					    fcp_response_len + 1))->
4841 					    rsp_code == FCP_NO_FAILURE)) {
4842 						sf_log(sf, CE_NOTE,
4843 						    "!sf%d: Target 0x%x Reset "
4844 						    "successful\n",
4845 						    ddi_get_instance(\
4846 						    sf->sf_dip),
4847 						    sf_alpa_to_switch[
4848 						    target->sft_al_pa]);
4849 						rval = TRUE;
4850 					} else {
4851 						hp = &cmd->cmd_fp_pkt->
4852 						    fcal_socal_request.
4853 						    sr_fc_frame_hdr;
4854 						tgt_id = sf_alpa_to_switch[
4855 						    (uchar_t)hp->d_id];
4856 						sf->sf_stats.tstats[tgt_id].
4857 						    task_mgmt_failures++;
4858 						sf_log(sf, CE_NOTE,
4859 						    "!sf%d: Target 0x%x "
4860 						    "Reset failed."
4861 						    "Status code 0x%x "
4862 						    "Resp code 0x%x\n",
4863 						    ddi_get_instance(\
4864 						    sf->sf_dip),
4865 						    tgt_id,
4866 						    fpkt->fcal_pkt_status,
4867 						    ((struct fcp_rsp_info *)
4868 						    (&cmd->cmd_rsp_block->
4869 						    fcp_response_len + 1))->
4870 						    rsp_code);
4871 					}
4872 				} else {
4873 					sf_log(sf, CE_NOTE, "!sf%d: Target "
4874 					    "0x%x Reset Failed. Ret=%x\n",
4875 					    ddi_get_instance(sf->sf_dip),
4876 					    sf_alpa_to_switch[
4877 					    target->sft_al_pa], ret);
4878 					mutex_enter(&cmd->cmd_abort_mutex);
4879 					if (cmd->cmd_state == SF_STATE_ISSUED) {
4880 					/* call the transport to abort a cmd */
4881 					cmd->cmd_timeout = sf_watchdog_time
4882 					    + 20;
4883 					cmd->cmd_state = SF_STATE_ABORTING;
4884 					mutex_exit(&cmd->cmd_abort_mutex);
4885 					if (((t = soc_abort(sf->sf_sochandle,
4886 					    sf->sf_socp,
4887 					    sf->sf_sochandle->fcal_portno,
4888 					    cmd->cmd_fp_pkt, 1)) !=
4889 					    FCAL_ABORTED) &&
4890 					    (t != FCAL_ABORT_FAILED)) {
4891 						sf_log(sf, CE_NOTE,
4892 						    "!sf%d: Target 0x%x Reset "
4893 						    "failed. Abort Failed, "
4894 						    "forcing LIP\n",
4895 						    ddi_get_instance(
4896 						    sf->sf_dip),
4897 						    sf_alpa_to_switch[
4898 						    target->sft_al_pa]);
4899 						sf_force_lip(sf);
4900 						rval = TRUE;
4901 						deferred_destroy = 1;
4902 					}
4903 					} else {
4904 						mutex_exit
4905 						    (&cmd->cmd_abort_mutex);
4906 					}
4907 				}
4908 			}
4909 			/*
4910 			 * Defer releasing the packet if we abort returned with
4911 			 * a BAD_ABORT or timed out, because there is a
4912 			 * possibility that the ucode might return it.
4913 			 * We wait for at least 20s and let it be released
4914 			 * by the sf_watch thread
4915 			 */
4916 			if (!deferred_destroy) {
4917 				cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4918 				cmd->cmd_fp_pkt->fcal_pkt_comp =
4919 				    sf_cmd_callback;
4920 				cmd->cmd_state = SF_STATE_IDLE;
4921 				/* for cache */
4922 				sf_scsi_destroy_pkt(ap, pkt);
4923 			}
4924 		} else {
4925 			cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4926 			    "Resource allocation error.\n",
4927 			    ddi_get_instance(sf->sf_dip),
4928 			    sf_alpa_to_switch[target->sft_al_pa]);
4929 		}
4930 		mutex_enter(&sf->sf_mutex);
4931 		if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4932 			p->target = target;
4933 			p->lip_cnt = lip_cnt;
4934 			p->timeout = ddi_get_lbolt() +
4935 			    drv_usectohz(SF_TARGET_RESET_DELAY);
4936 			p->next = sf->sf_reset_list;
4937 			sf->sf_reset_list = p;
4938 			mutex_exit(&sf->sf_mutex);
4939 			mutex_enter(&sf_global_mutex);
4940 			if (sf_reset_timeout_id == 0) {
4941 				sf_reset_timeout_id = timeout(
4942 				    sf_check_reset_delay, NULL,
4943 				    drv_usectohz(SF_TARGET_RESET_DELAY));
4944 			}
4945 			mutex_exit(&sf_global_mutex);
4946 		} else {
4947 			if (lip_cnt == sf->sf_lip_cnt) {
4948 				mutex_enter(&target->sft_mutex);
4949 				target->sft_state &= ~SF_TARGET_BUSY;
4950 				for (ntarget = target->sft_next_lun;
4951 				    ntarget;
4952 				    ntarget = ntarget->sft_next_lun) {
4953 					mutex_enter(&ntarget->sft_mutex);
4954 					ntarget->sft_state &= ~SF_TARGET_BUSY;
4955 					mutex_exit(&ntarget->sft_mutex);
4956 				}
4957 				mutex_exit(&target->sft_mutex);
4958 			}
4959 			mutex_exit(&sf->sf_mutex);
4960 			kmem_free(p, sizeof (struct sf_reset_list));
4961 		}
4962 	} else {
4963 		mutex_enter(&sf->sf_mutex);
4964 		if ((sf->sf_state == SF_STATE_OFFLINE) &&
4965 		    (sf_watchdog_time < sf->sf_timer)) {
4966 			/*
4967 			 * We are currently in a lip, so let this one
4968 			 * finish before forcing another one.
4969 			 */
4970 			mutex_exit(&sf->sf_mutex);
4971 			return (TRUE);
4972 		}
4973 		mutex_exit(&sf->sf_mutex);
4974 		sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4975 		sf_force_lip(sf);
4976 		rval = TRUE;
4977 	}
4978 	return (rval);
4979 }
4980 
4981 
4982 /*
4983  * abort all commands for a target
4984  *
4985  * if try_abort is set then send an abort
4986  * if abort is set then this is abort, else this is a reset
4987  */
4988 static void
4989 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4990     lip_cnt, int try_abort)
4991 {
4992 	struct sf_target *ntarget;
4993 	struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4994 	struct fcal_packet *fpkt;
4995 	struct scsi_pkt *pkt;
4996 	int rval = FCAL_ABORTED;
4997 
4998 	/*
4999 	 * First pull all commands for all LUNs on this target out of the
5000 	 * overflow list.  We can tell it's the same target by comparing
5001 	 * the node WWN.
5002 	 */
5003 	mutex_enter(&sf->sf_mutex);
5004 	if (lip_cnt == sf->sf_lip_cnt) {
5005 		mutex_enter(&sf->sf_cmd_mutex);
5006 		cmd = sf->sf_pkt_head;
5007 		while (cmd != NULL) {
5008 			ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5009 			    pkt_address);
5010 			if (ntarget == target) {
5011 				if (pcmd != NULL)
5012 					pcmd->cmd_next = cmd->cmd_next;
5013 				else
5014 					sf->sf_pkt_head = cmd->cmd_next;
5015 				if (sf->sf_pkt_tail == cmd) {
5016 					sf->sf_pkt_tail = pcmd;
5017 					if (pcmd != NULL)
5018 						pcmd->cmd_next = NULL;
5019 				}
5020 				tcmd = cmd->cmd_next;
5021 				if (head == NULL) {
5022 					head = cmd;
5023 					tail = cmd;
5024 				} else {
5025 					tail->cmd_next = cmd;
5026 					tail = cmd;
5027 				}
5028 				cmd->cmd_next = NULL;
5029 				cmd = tcmd;
5030 			} else {
5031 				pcmd = cmd;
5032 				cmd = cmd->cmd_next;
5033 			}
5034 		}
5035 		mutex_exit(&sf->sf_cmd_mutex);
5036 	}
5037 	mutex_exit(&sf->sf_mutex);
5038 
5039 	/*
5040 	 * Now complete all the commands on our list.  In the process,
5041 	 * the completion routine may take the commands off the target
5042 	 * lists.
5043 	 */
5044 	cmd = head;
5045 	while (cmd != NULL) {
5046 		pkt = cmd->cmd_pkt;
5047 		if (abort) {
5048 			pkt->pkt_reason = CMD_ABORTED;
5049 			pkt->pkt_statistics |= STAT_ABORTED;
5050 		} else {
5051 			pkt->pkt_reason = CMD_RESET;
5052 			pkt->pkt_statistics |= STAT_DEV_RESET;
5053 		}
5054 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5055 		cmd->cmd_state = SF_STATE_IDLE;
5056 		cmd = cmd->cmd_next;
5057 		/*
5058 		 * call the packet completion routine only for
5059 		 * non-polled commands. Ignore the polled commands as
5060 		 * they timeout and will be handled differently
5061 		 */
5062 		if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5063 			(*pkt->pkt_comp)(pkt);
5064 
5065 	}
5066 
5067 	/*
5068 	 * Finally get all outstanding commands for each LUN, and abort them if
5069 	 * they've been issued, and call the completion routine.
5070 	 * For the case where sf_offline_target is called from sf_watch
5071 	 * due to a Offline Timeout, it is quite possible that the soc+
5072 	 * ucode is hosed and therefore  cannot return the commands.
5073 	 * Clear up all the issued commands as well.
5074 	 * Try_abort will be false only if sf_abort_all is coming from
5075 	 * sf_target_offline.
5076 	 */
5077 
5078 	if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5079 		mutex_enter(&target->sft_pkt_mutex);
5080 		cmd = tcmd = target->sft_pkt_head;
5081 		while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5082 			fpkt = cmd->cmd_fp_pkt;
5083 			pkt = cmd->cmd_pkt;
5084 			mutex_enter(&cmd->cmd_abort_mutex);
5085 			if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5086 			    (fpkt->fcal_cmd_state &
5087 			    FCAL_CMD_IN_TRANSPORT) &&
5088 			    ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5089 			    0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5090 				cmd->cmd_state = SF_STATE_ABORTING;
5091 				cmd->cmd_timeout = sf_watchdog_time +
5092 				    cmd->cmd_pkt->pkt_time + 20;
5093 				mutex_exit(&cmd->cmd_abort_mutex);
5094 				mutex_exit(&target->sft_pkt_mutex);
5095 				if (try_abort) {
5096 					/* call the transport to abort a pkt */
5097 					rval = soc_abort(sf->sf_sochandle,
5098 					    sf->sf_socp,
5099 					    sf->sf_sochandle->fcal_portno,
5100 					    fpkt, 1);
5101 				}
5102 				if ((rval == FCAL_ABORTED) ||
5103 				    (rval == FCAL_ABORT_FAILED)) {
5104 					if (abort) {
5105 						pkt->pkt_reason = CMD_ABORTED;
5106 						pkt->pkt_statistics |=
5107 						    STAT_ABORTED;
5108 					} else {
5109 						pkt->pkt_reason = CMD_RESET;
5110 						pkt->pkt_statistics |=
5111 						    STAT_DEV_RESET;
5112 					}
5113 					cmd->cmd_state = SF_STATE_IDLE;
5114 					if (pkt->pkt_comp)
5115 						(*pkt->pkt_comp)(pkt);
5116 				}
5117 				mutex_enter(&sf->sf_mutex);
5118 				if (lip_cnt != sf->sf_lip_cnt) {
5119 					mutex_exit(&sf->sf_mutex);
5120 					return;
5121 				}
5122 				mutex_exit(&sf->sf_mutex);
5123 				mutex_enter(&target->sft_pkt_mutex);
5124 				cmd = target->sft_pkt_head;
5125 			} else {
5126 				mutex_exit(&cmd->cmd_abort_mutex);
5127 				cmd = cmd->cmd_forw;
5128 			}
5129 		}
5130 		mutex_exit(&target->sft_pkt_mutex);
5131 	}
5132 }
5133 
5134 
5135 /*
5136  * called by the transport to start a packet
5137  */
5138 static int
5139 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5140 {
5141 	struct sf *sf = ADDR2SF(ap);
5142 	struct sf_target *target = ADDR2TARGET(ap);
5143 	struct sf_pkt *cmd = PKT2CMD(pkt);
5144 	int rval;
5145 
5146 
5147 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5148 
5149 	if (cmd->cmd_state == SF_STATE_ISSUED) {
5150 		cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5151 		    (void *)cmd);
5152 	}
5153 
5154 	/* prepare the packet for transport */
5155 	if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5156 		return (rval);
5157 	}
5158 
5159 	if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5160 		if (target->sft_state & SF_TARGET_OFFLINE) {
5161 			return (TRAN_FATAL_ERROR);
5162 		}
5163 		if (pkt->pkt_flags & FLAG_NOINTR) {
5164 			return (TRAN_BUSY);
5165 		}
5166 		mutex_enter(&sf->sf_cmd_mutex);
5167 		sf->sf_use_lock = TRUE;
5168 		goto enque;
5169 	}
5170 
5171 
5172 	/* if no interrupts then do polled I/O */
5173 	if (pkt->pkt_flags & FLAG_NOINTR) {
5174 		return (sf_dopoll(sf, cmd));
5175 	}
5176 
5177 	/* regular interrupt-driven I/O */
5178 
5179 	if (!sf->sf_use_lock) {
5180 
5181 		/* locking no needed */
5182 
5183 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5184 		    sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5185 		cmd->cmd_state = SF_STATE_ISSUED;
5186 
5187 		/* call the transport to send a pkt */
5188 		if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5189 		    FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5190 			cmd->cmd_state = SF_STATE_IDLE;
5191 			return (TRAN_BADPKT);
5192 		}
5193 		return (TRAN_ACCEPT);
5194 	}
5195 
5196 	/* regular I/O using locking */
5197 
5198 	mutex_enter(&sf->sf_cmd_mutex);
5199 	if ((sf->sf_ncmds >= sf->sf_throttle) ||
5200 	    (sf->sf_pkt_head != NULL)) {
5201 enque:
5202 		/*
5203 		 * either we're throttling back or there are already commands
5204 		 * on the queue, so enqueue this one for later
5205 		 */
5206 		cmd->cmd_flags |= CFLAG_IN_QUEUE;
5207 		if (sf->sf_pkt_head != NULL) {
5208 			/* add to the queue */
5209 			sf->sf_pkt_tail->cmd_next = cmd;
5210 			cmd->cmd_next = NULL;
5211 			sf->sf_pkt_tail = cmd;
5212 		} else {
5213 			/* this is the first entry in the queue */
5214 			sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5215 			cmd->cmd_next = NULL;
5216 		}
5217 		mutex_exit(&sf->sf_cmd_mutex);
5218 		return (TRAN_ACCEPT);
5219 	}
5220 
5221 	/*
5222 	 * start this packet now
5223 	 */
5224 
5225 	/* still have cmd mutex */
5226 	return (sf_start_internal(sf, cmd));
5227 }
5228 
5229 
5230 /*
5231  * internal routine to start a packet from the queue now
5232  *
5233  * enter with cmd mutex held and leave with it released
5234  */
5235 static int
5236 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5237 {
5238 	/* we have the cmd mutex */
5239 	sf->sf_ncmds++;
5240 	mutex_exit(&sf->sf_cmd_mutex);
5241 
5242 	ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5243 	SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5244 
5245 	cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5246 	    cmd->cmd_pkt->pkt_time : 0;
5247 	cmd->cmd_state = SF_STATE_ISSUED;
5248 
5249 	/* call transport to send the pkt */
5250 	if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5251 	    CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5252 		cmd->cmd_state = SF_STATE_IDLE;
5253 		mutex_enter(&sf->sf_cmd_mutex);
5254 		sf->sf_ncmds--;
5255 		mutex_exit(&sf->sf_cmd_mutex);
5256 		return (TRAN_BADPKT);
5257 	}
5258 	return (TRAN_ACCEPT);
5259 }
5260 
5261 
5262 /*
5263  * prepare a packet for transport
5264  */
5265 static int
5266 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5267 {
5268 	struct fcp_cmd *fcmd = cmd->cmd_block;
5269 
5270 /* XXXX Need to set the LUN ? */
5271 	bcopy((caddr_t)&target->sft_lun.b,
5272 	    (caddr_t)&fcmd->fcp_ent_addr,
5273 	    FCP_LUN_SIZE);
5274 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5275 	cmd->cmd_pkt->pkt_state = 0;
5276 	cmd->cmd_pkt->pkt_statistics = 0;
5277 
5278 
5279 	if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5280 	    ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5281 		return (TRAN_BADPKT);
5282 	}
5283 
5284 	/* invalidate imp field(s) of rsp block */
5285 	cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5286 
5287 	/* set up amt of I/O to do */
5288 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
5289 		cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5290 		if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5291 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5292 			    DDI_DMA_SYNC_FORDEV);
5293 		}
5294 	} else {
5295 		cmd->cmd_pkt->pkt_resid = 0;
5296 	}
5297 
5298 	/* set up the Tagged Queuing type */
5299 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5300 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5301 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5302 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5303 	}
5304 
5305 	/*
5306 	 * Sync the cmd segment
5307 	 */
5308 	(void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5309 	    (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5310 	    sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5311 
5312 	sf_fill_ids(sf, cmd, target);
5313 	return (TRAN_ACCEPT);
5314 }
5315 
5316 
5317 /*
5318  * fill in packet hdr source and destination IDs and hdr byte count
5319  */
5320 static void
5321 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5322 {
5323 	struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5324 	fc_frame_header_t	*hp;
5325 
5326 
5327 	hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5328 	hp->d_id = target->sft_al_pa;
5329 	hp->s_id = sf->sf_al_pa;
5330 	fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5331 	    cmd->cmd_dmacookie.dmac_size;
5332 }
5333 
5334 
5335 /*
5336  * do polled I/O using transport
5337  */
5338 static int
5339 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5340 {
5341 	int timeout;
5342 	int rval;
5343 
5344 
5345 	mutex_enter(&sf->sf_cmd_mutex);
5346 	sf->sf_ncmds++;
5347 	mutex_exit(&sf->sf_cmd_mutex);
5348 
5349 	timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5350 	    : SF_POLL_TIMEOUT;
5351 	cmd->cmd_timeout = 0;
5352 	cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5353 	cmd->cmd_state = SF_STATE_ISSUED;
5354 
5355 	/* call transport to send a pkt polled */
5356 	rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5357 	    timeout*1000000, CQ_REQUEST_1);
5358 	mutex_enter(&cmd->cmd_abort_mutex);
5359 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5360 	if (rval != FCAL_TRANSPORT_SUCCESS) {
5361 		if (rval == FCAL_TRANSPORT_TIMEOUT) {
5362 			cmd->cmd_state = SF_STATE_ABORTING;
5363 			mutex_exit(&cmd->cmd_abort_mutex);
5364 			(void) sf_target_timeout(sf, cmd);
5365 		} else {
5366 			mutex_exit(&cmd->cmd_abort_mutex);
5367 		}
5368 		cmd->cmd_state = SF_STATE_IDLE;
5369 		cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5370 		mutex_enter(&sf->sf_cmd_mutex);
5371 		sf->sf_ncmds--;
5372 		mutex_exit(&sf->sf_cmd_mutex);
5373 		return (TRAN_BADPKT);
5374 	}
5375 	mutex_exit(&cmd->cmd_abort_mutex);
5376 	cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5377 	sf_cmd_callback(cmd->cmd_fp_pkt);
5378 	return (TRAN_ACCEPT);
5379 }
5380 
5381 
5382 /* a shortcut for defining debug messages below */
5383 #ifdef	DEBUG
5384 #define	SF_DMSG1(s)		msg1 = s
5385 #else
5386 #define	SF_DMSG1(s)		/* do nothing */
5387 #endif
5388 
5389 
5390 /*
5391  * the pkt_comp callback for command packets
5392  */
5393 static void
5394 sf_cmd_callback(struct fcal_packet *fpkt)
5395 {
5396 	struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5397 	struct scsi_pkt *pkt = cmd->cmd_pkt;
5398 	struct sf *sf = ADDR2SF(&pkt->pkt_address);
5399 	struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5400 	struct fcp_rsp *rsp;
5401 	char *msg1 = NULL;
5402 	char *msg2 = NULL;
5403 	short ncmds;
5404 	int tgt_id;
5405 	int good_scsi_status = TRUE;
5406 
5407 
5408 
5409 	if (cmd->cmd_state == SF_STATE_IDLE) {
5410 		cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5411 		    (void *)cmd);
5412 	}
5413 
5414 	mutex_enter(&cmd->cmd_abort_mutex);
5415 	if (cmd->cmd_state == SF_STATE_ABORTING) {
5416 		/* cmd already being aborted -- nothing to do */
5417 		mutex_exit(&cmd->cmd_abort_mutex);
5418 		return;
5419 	}
5420 
5421 	cmd->cmd_state = SF_STATE_IDLE;
5422 	mutex_exit(&cmd->cmd_abort_mutex);
5423 
5424 	if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5425 
5426 		(void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5427 		    (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5428 		    FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5429 
5430 		rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5431 
5432 		if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5433 
5434 			if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5435 				sf_token = (int *)(uintptr_t)
5436 				    fpkt->fcal_socal_request.\
5437 				    sr_soc_hdr.sh_request_token;
5438 				(void) soc_take_core(sf->sf_sochandle,
5439 				    sf->sf_socp);
5440 			}
5441 
5442 			pkt->pkt_reason = CMD_INCOMPLETE;
5443 			pkt->pkt_state = STATE_GOT_BUS;
5444 			pkt->pkt_statistics |= STAT_ABORTED;
5445 
5446 		} else {
5447 
5448 			pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5449 			    STATE_SENT_CMD | STATE_GOT_STATUS;
5450 			pkt->pkt_resid = 0;
5451 			if (cmd->cmd_flags & CFLAG_DMAVALID) {
5452 				pkt->pkt_state |= STATE_XFERRED_DATA;
5453 			}
5454 
5455 			if ((pkt->pkt_scbp != NULL) &&
5456 			    ((*(pkt->pkt_scbp) =
5457 			    rsp->fcp_u.fcp_status.scsi_status)
5458 			    != STATUS_GOOD)) {
5459 				good_scsi_status = FALSE;
5460 			/*
5461 			 * The next two checks make sure that if there
5462 			 * is no sense data or a valid response and
5463 			 * the command came back with check condition,
5464 			 * the command should be retried
5465 			 */
5466 				if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5467 				    !rsp->fcp_u.fcp_status.sense_len_set) {
5468 					pkt->pkt_state &= ~STATE_XFERRED_DATA;
5469 					pkt->pkt_resid = cmd->cmd_dmacount;
5470 				}
5471 			}
5472 
5473 			if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5474 			    (pkt->pkt_state & STATE_XFERRED_DATA)) {
5475 				(void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5476 				    (uint_t)0, DDI_DMA_SYNC_FORCPU);
5477 			}
5478 			/*
5479 			 * Update the transfer resid, if appropriate
5480 			 */
5481 			if (rsp->fcp_u.fcp_status.resid_over ||
5482 			    rsp->fcp_u.fcp_status.resid_under)
5483 				pkt->pkt_resid = rsp->fcp_resid;
5484 
5485 			/*
5486 			 * Check to see if the SCSI command failed.
5487 			 *
5488 			 */
5489 
5490 			/*
5491 			 * First see if we got a FCP protocol error.
5492 			 */
5493 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
5494 				struct fcp_rsp_info *bep;
5495 
5496 				bep = (struct fcp_rsp_info *)
5497 				    (&rsp->fcp_response_len + 1);
5498 				if (bep->rsp_code != FCP_NO_FAILURE) {
5499 						pkt->pkt_reason = CMD_TRAN_ERR;
5500 					tgt_id = pkt->pkt_address.a_target;
5501 					switch (bep->rsp_code) {
5502 					case FCP_CMND_INVALID:
5503 						SF_DMSG1("FCP_RSP FCP_CMND "
5504 						    "fields invalid");
5505 						break;
5506 					case FCP_TASK_MGMT_NOT_SUPPTD:
5507 						SF_DMSG1("FCP_RSP Task"
5508 						    "Management Function"
5509 						    "Not Supported");
5510 						break;
5511 					case FCP_TASK_MGMT_FAILED:
5512 						SF_DMSG1("FCP_RSP Task "
5513 						    "Management Function"
5514 						    "Failed");
5515 						sf->sf_stats.tstats[tgt_id].
5516 						    task_mgmt_failures++;
5517 						break;
5518 					case FCP_DATA_RO_MISMATCH:
5519 						SF_DMSG1("FCP_RSP FCP_DATA RO "
5520 						    "mismatch with "
5521 						    "FCP_XFER_RDY DATA_RO");
5522 						sf->sf_stats.tstats[tgt_id].
5523 						    data_ro_mismatches++;
5524 						break;
5525 					case FCP_DL_LEN_MISMATCH:
5526 						SF_DMSG1("FCP_RSP FCP_DATA "
5527 						    "length "
5528 						    "different than BURST_LEN");
5529 						sf->sf_stats.tstats[tgt_id].
5530 						    dl_len_mismatches++;
5531 						break;
5532 					default:
5533 						SF_DMSG1("FCP_RSP invalid "
5534 						    "RSP_CODE");
5535 						break;
5536 					}
5537 				}
5538 			}
5539 
5540 			/*
5541 			 * See if we got a SCSI error with sense data
5542 			 */
5543 			if (rsp->fcp_u.fcp_status.sense_len_set) {
5544 				uchar_t rqlen = min(rsp->fcp_sense_len,
5545 				    sizeof (struct scsi_extended_sense));
5546 				caddr_t sense = (caddr_t)rsp +
5547 				    sizeof (struct fcp_rsp) +
5548 				    rsp->fcp_response_len;
5549 				struct scsi_arq_status *arq;
5550 				struct scsi_extended_sense *sensep =
5551 				    (struct scsi_extended_sense *)sense;
5552 
5553 				if (rsp->fcp_u.fcp_status.scsi_status !=
5554 				    STATUS_GOOD) {
5555 				if (rsp->fcp_u.fcp_status.scsi_status
5556 				    == STATUS_CHECK) {
5557 					if (sensep->es_key ==
5558 					    KEY_RECOVERABLE_ERROR)
5559 						good_scsi_status = 1;
5560 					if (sensep->es_key ==
5561 					    KEY_UNIT_ATTENTION &&
5562 					    sensep->es_add_code == 0x3f &&
5563 					    sensep->es_qual_code == 0x0e) {
5564 						/* REPORT_LUNS_HAS_CHANGED */
5565 						sf_log(sf, CE_NOTE,
5566 						"!REPORT_LUNS_HAS_CHANGED\n");
5567 						sf_force_lip(sf);
5568 					}
5569 				}
5570 				}
5571 
5572 				if ((pkt->pkt_scbp != NULL) &&
5573 				    (cmd->cmd_scblen >=
5574 					sizeof (struct scsi_arq_status))) {
5575 
5576 				pkt->pkt_state |= STATE_ARQ_DONE;
5577 
5578 				arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5579 				/*
5580 				 * copy out sense information
5581 				 */
5582 				bcopy(sense, (caddr_t)&arq->sts_sensedata,
5583 				    rqlen);
5584 				arq->sts_rqpkt_resid =
5585 				    sizeof (struct scsi_extended_sense) -
5586 					rqlen;
5587 				*((uchar_t *)&arq->sts_rqpkt_status) =
5588 				    STATUS_GOOD;
5589 				arq->sts_rqpkt_reason = 0;
5590 				arq->sts_rqpkt_statistics = 0;
5591 				arq->sts_rqpkt_state = STATE_GOT_BUS |
5592 				    STATE_GOT_TARGET | STATE_SENT_CMD |
5593 				    STATE_GOT_STATUS | STATE_ARQ_DONE |
5594 				    STATE_XFERRED_DATA;
5595 			    }
5596 				target->sft_alive = TRUE;
5597 			}
5598 
5599 			/*
5600 			 * The firmware returns the number of bytes actually
5601 			 * xfered into/out of host. Compare this with what
5602 			 * we asked and if it is different, we lost frames ?
5603 			 */
5604 			if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5605 			    (good_scsi_status) &&
5606 			    (pkt->pkt_state & STATE_XFERRED_DATA) &&
5607 			    (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5608 			    (target->sft_device_type != DTYPE_ESI)) {
5609 				int byte_cnt =
5610 				    fpkt->fcal_socal_request.
5611 				    sr_soc_hdr.sh_byte_cnt;
5612 				if (cmd->cmd_flags & CFLAG_DMASEND) {
5613 					if (byte_cnt != 0) {
5614 					sf_log(sf, CE_NOTE,
5615 					    "!sf_cmd_callback: Lost Frame: "
5616 					    "(write) received 0x%x expected"
5617 					    " 0x%x target 0x%x\n",
5618 					    byte_cnt, cmd->cmd_dmacount,
5619 					    sf_alpa_to_switch[
5620 					    target->sft_al_pa]);
5621 					pkt->pkt_reason = CMD_INCOMPLETE;
5622 					pkt->pkt_statistics |= STAT_ABORTED;
5623 					}
5624 				} else if (byte_cnt < cmd->cmd_dmacount) {
5625 					sf_log(sf, CE_NOTE,
5626 					    "!sf_cmd_callback: "
5627 					    "Lost Frame: (read) "
5628 					    "received 0x%x expected 0x%x "
5629 					    "target 0x%x\n", byte_cnt,
5630 					    cmd->cmd_dmacount,
5631 					    sf_alpa_to_switch[
5632 					    target->sft_al_pa]);
5633 					pkt->pkt_reason = CMD_INCOMPLETE;
5634 					pkt->pkt_statistics |= STAT_ABORTED;
5635 				}
5636 			}
5637 		}
5638 
5639 	} else {
5640 
5641 		/* pkt status was not ok */
5642 
5643 		switch (fpkt->fcal_pkt_status) {
5644 
5645 		case FCAL_STATUS_ERR_OFFLINE:
5646 			SF_DMSG1("Fibre Channel Offline");
5647 			mutex_enter(&target->sft_mutex);
5648 			if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5649 				target->sft_state |= (SF_TARGET_BUSY
5650 				    | SF_TARGET_MARK);
5651 			}
5652 			mutex_exit(&target->sft_mutex);
5653 			(void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5654 			    target->sft_dip, FCAL_REMOVE_EVENT,
5655 			    &sf_remove_eid, NDI_EVENT_NOPASS);
5656 			(void) ndi_event_run_callbacks(sf->sf_event_hdl,
5657 			    target->sft_dip, sf_remove_eid, NULL);
5658 			pkt->pkt_reason = CMD_TRAN_ERR;
5659 			pkt->pkt_statistics |= STAT_BUS_RESET;
5660 			break;
5661 
5662 		case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5663 			sf_throttle(sf);
5664 			sf->sf_use_lock = TRUE;
5665 			pkt->pkt_reason = CMD_TRAN_ERR;
5666 			pkt->pkt_state = STATE_GOT_BUS;
5667 			pkt->pkt_statistics |= STAT_ABORTED;
5668 			break;
5669 
5670 		case FCAL_STATUS_TIMEOUT:
5671 			SF_DMSG1("Fibre Channel Timeout");
5672 			pkt->pkt_reason = CMD_TIMEOUT;
5673 			break;
5674 
5675 		case FCAL_STATUS_ERR_OVERRUN:
5676 			SF_DMSG1("CMD_DATA_OVR");
5677 			pkt->pkt_reason = CMD_DATA_OVR;
5678 			break;
5679 
5680 		case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5681 			SF_DMSG1("Unknown CQ type");
5682 			pkt->pkt_reason = CMD_TRAN_ERR;
5683 			break;
5684 
5685 		case FCAL_STATUS_BAD_SEG_CNT:
5686 			SF_DMSG1("Bad SEG CNT");
5687 			pkt->pkt_reason = CMD_TRAN_ERR;
5688 			break;
5689 
5690 		case FCAL_STATUS_BAD_XID:
5691 			SF_DMSG1("Fibre Channel Invalid X_ID");
5692 			pkt->pkt_reason = CMD_TRAN_ERR;
5693 			break;
5694 
5695 		case FCAL_STATUS_XCHG_BUSY:
5696 			SF_DMSG1("Fibre Channel Exchange Busy");
5697 			pkt->pkt_reason = CMD_TRAN_ERR;
5698 			break;
5699 
5700 		case FCAL_STATUS_INSUFFICIENT_CQES:
5701 			SF_DMSG1("Insufficient CQEs");
5702 			pkt->pkt_reason = CMD_TRAN_ERR;
5703 			break;
5704 
5705 		case FCAL_STATUS_ALLOC_FAIL:
5706 			SF_DMSG1("ALLOC FAIL");
5707 			pkt->pkt_reason = CMD_TRAN_ERR;
5708 			break;
5709 
5710 		case FCAL_STATUS_BAD_SID:
5711 			SF_DMSG1("Fibre Channel Invalid S_ID");
5712 			pkt->pkt_reason = CMD_TRAN_ERR;
5713 			break;
5714 
5715 		case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5716 			if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5717 				sf_token = (int *)(uintptr_t)
5718 				    fpkt->fcal_socal_request.\
5719 				    sr_soc_hdr.sh_request_token;
5720 				(void) soc_take_core(sf->sf_sochandle,
5721 				    sf->sf_socp);
5722 				sf_core = 0;
5723 			}
5724 			msg2 =
5725 			"INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5726 			pkt->pkt_reason = CMD_INCOMPLETE;
5727 			pkt->pkt_state = STATE_GOT_BUS;
5728 			pkt->pkt_statistics |= STAT_ABORTED;
5729 			break;
5730 
5731 		case FCAL_STATUS_CRC_ERR:
5732 			msg2 = "Fibre Channel CRC Error on frames";
5733 			pkt->pkt_reason = CMD_INCOMPLETE;
5734 			pkt->pkt_state = STATE_GOT_BUS;
5735 			pkt->pkt_statistics |= STAT_ABORTED;
5736 			break;
5737 
5738 		case FCAL_STATUS_NO_SEQ_INIT:
5739 			SF_DMSG1("Fibre Channel Seq Init Error");
5740 			pkt->pkt_reason = CMD_TRAN_ERR;
5741 			break;
5742 
5743 		case  FCAL_STATUS_OPEN_FAIL:
5744 			pkt->pkt_reason = CMD_TRAN_ERR;
5745 			SF_DMSG1("Fibre Channel Open Failure");
5746 			if ((target->sft_state & (SF_TARGET_BUSY |
5747 			    SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5748 				sf_log(sf, CE_NOTE,
5749 				    "!Open failure to target 0x%x "
5750 				    "forcing LIP\n",
5751 				    sf_alpa_to_switch[target->sft_al_pa]);
5752 				sf_force_lip(sf);
5753 			}
5754 			break;
5755 
5756 
5757 		case FCAL_STATUS_ONLINE_TIMEOUT:
5758 			SF_DMSG1("Fibre Channel Online Timeout");
5759 			pkt->pkt_reason = CMD_TRAN_ERR;
5760 			break;
5761 
5762 		default:
5763 			SF_DMSG1("Unknown FC Status");
5764 			pkt->pkt_reason = CMD_TRAN_ERR;
5765 			break;
5766 		}
5767 	}
5768 
5769 #ifdef	DEBUG
5770 	/*
5771 	 * msg1 will be non-NULL if we've detected some sort of error
5772 	 */
5773 	if (msg1 != NULL && sfdebug >= 4) {
5774 		sf_log(sf, CE_WARN,
5775 		    "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5776 		    (void *)fpkt, pkt->pkt_address.a_target, msg1);
5777 	}
5778 #endif
5779 
5780 	if (msg2 != NULL) {
5781 		sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5782 		    pkt->pkt_address.a_target, msg2);
5783 	}
5784 
5785 	ncmds = fpkt->fcal_ncmds;
5786 	ASSERT(ncmds >= 0);
5787 	if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5788 #ifdef DEBUG
5789 		if (!sf->sf_use_lock) {
5790 			SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5791 		}
5792 #endif
5793 		sf->sf_use_lock = TRUE;
5794 	}
5795 
5796 	mutex_enter(&sf->sf_cmd_mutex);
5797 	sf->sf_ncmds = ncmds;
5798 	sf_throttle_start(sf);
5799 	mutex_exit(&sf->sf_cmd_mutex);
5800 
5801 	if (!msg1 && !msg2)
5802 		SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5803 		    (void *)pkt));
5804 	if (pkt->pkt_comp != NULL) {
5805 		(*pkt->pkt_comp)(pkt);
5806 	}
5807 }
5808 
5809 #undef	SF_DMSG1
5810 
5811 
5812 
5813 /*
5814  * start throttling for this instance
5815  */
5816 static void
5817 sf_throttle_start(struct sf *sf)
5818 {
5819 	struct sf_pkt *cmd, *prev_cmd = NULL;
5820 	struct scsi_pkt *pkt;
5821 	struct sf_target *target;
5822 
5823 
5824 	ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5825 
5826 	cmd = sf->sf_pkt_head;
5827 	while ((cmd != NULL) &&
5828 	    (sf->sf_state == SF_STATE_ONLINE) &&
5829 	    (sf->sf_ncmds < sf->sf_throttle)) {
5830 
5831 		pkt = CMD2PKT(cmd);
5832 
5833 		target = ADDR2TARGET(&pkt->pkt_address);
5834 		if (target->sft_state & SF_TARGET_BUSY) {
5835 			/* this command is busy -- go to next */
5836 			ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5837 			prev_cmd = cmd;
5838 			cmd = cmd->cmd_next;
5839 			continue;
5840 		}
5841 
5842 		ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5843 
5844 		/* this cmd not busy and not issued */
5845 
5846 		/* remove this packet from the queue */
5847 		if (sf->sf_pkt_head == cmd) {
5848 			/* this was the first packet */
5849 			sf->sf_pkt_head = cmd->cmd_next;
5850 		} else if (sf->sf_pkt_tail == cmd) {
5851 			/* this was the last packet */
5852 			sf->sf_pkt_tail = prev_cmd;
5853 			if (prev_cmd != NULL) {
5854 				prev_cmd->cmd_next = NULL;
5855 			}
5856 		} else {
5857 			/* some packet in the middle of the queue */
5858 			ASSERT(prev_cmd != NULL);
5859 			prev_cmd->cmd_next = cmd->cmd_next;
5860 		}
5861 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5862 
5863 		if (target->sft_state & SF_TARGET_OFFLINE) {
5864 			mutex_exit(&sf->sf_cmd_mutex);
5865 			pkt->pkt_reason = CMD_TRAN_ERR;
5866 			if (pkt->pkt_comp != NULL) {
5867 				(*pkt->pkt_comp)(cmd->cmd_pkt);
5868 			}
5869 		} else {
5870 			sf_fill_ids(sf, cmd, target);
5871 			if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5872 				pkt->pkt_reason = CMD_TRAN_ERR;
5873 				if (pkt->pkt_comp != NULL) {
5874 					(*pkt->pkt_comp)(cmd->cmd_pkt);
5875 				}
5876 			}
5877 		}
5878 		mutex_enter(&sf->sf_cmd_mutex);
5879 		cmd = sf->sf_pkt_head;
5880 		prev_cmd = NULL;
5881 	}
5882 }
5883 
5884 
5885 /*
5886  * called when the max exchange value is exceeded to throttle back commands
5887  */
5888 static void
5889 sf_throttle(struct sf *sf)
5890 {
5891 	int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5892 
5893 
5894 	mutex_enter(&sf->sf_cmd_mutex);
5895 
5896 	sf->sf_flag = TRUE;
5897 
5898 	if (sf->sf_ncmds > (cmdmax / 2)) {
5899 		sf->sf_throttle = cmdmax / 2;
5900 	} else {
5901 		if (sf->sf_ncmds > SF_DECR_DELTA) {
5902 			sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5903 		} else {
5904 			/*
5905 			 * This case is just a safeguard, should not really
5906 			 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5907 			 */
5908 			sf->sf_throttle = SF_DECR_DELTA;
5909 		}
5910 	}
5911 	mutex_exit(&sf->sf_cmd_mutex);
5912 
5913 	sf = sf->sf_sibling;
5914 	if (sf != NULL) {
5915 		mutex_enter(&sf->sf_cmd_mutex);
5916 		sf->sf_flag = TRUE;
5917 		if (sf->sf_ncmds >= (cmdmax / 2)) {
5918 			sf->sf_throttle = cmdmax / 2;
5919 		} else {
5920 			if (sf->sf_ncmds > SF_DECR_DELTA) {
5921 				sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5922 			} else {
5923 				sf->sf_throttle = SF_DECR_DELTA;
5924 			}
5925 		}
5926 
5927 		mutex_exit(&sf->sf_cmd_mutex);
5928 	}
5929 }
5930 
5931 
5932 /*
5933  * sf watchdog routine, called for a timeout
5934  */
5935 /*ARGSUSED*/
5936 static void
5937 sf_watch(void *arg)
5938 {
5939 	struct sf *sf;
5940 	struct sf_els_hdr	*privp;
5941 	static int count = 0, pscan_count = 0;
5942 	int cmdmax, i, mescount = 0;
5943 	struct sf_target *target;
5944 
5945 
5946 	sf_watchdog_time += sf_watchdog_timeout;
5947 	count++;
5948 	pscan_count++;
5949 
5950 	mutex_enter(&sf_global_mutex);
5951 	sf_watch_running = 1;
5952 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5953 
5954 		mutex_exit(&sf_global_mutex);
5955 
5956 		/* disable throttling while we're suspended */
5957 		mutex_enter(&sf->sf_mutex);
5958 		if (sf->sf_state & SF_STATE_SUSPENDED) {
5959 			mutex_exit(&sf->sf_mutex);
5960 			SF_DEBUG(1, (sf, CE_CONT,
5961 			    "sf_watch, sf%d:throttle disabled "
5962 			    "due to DDI_SUSPEND\n",
5963 			    ddi_get_instance(sf->sf_dip)));
5964 			mutex_enter(&sf_global_mutex);
5965 			continue;
5966 		}
5967 		mutex_exit(&sf->sf_mutex);
5968 
5969 		cmdmax = sf->sf_sochandle->fcal_cmdmax;
5970 
5971 		if (sf->sf_take_core) {
5972 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5973 		}
5974 
5975 		mutex_enter(&sf->sf_cmd_mutex);
5976 
5977 		if (!sf->sf_flag) {
5978 			if (sf->sf_throttle < (cmdmax / 2)) {
5979 				sf->sf_throttle = cmdmax / 2;
5980 			} else if ((sf->sf_throttle += SF_INCR_DELTA) >
5981 			    cmdmax) {
5982 				sf->sf_throttle = cmdmax;
5983 			}
5984 		} else {
5985 			sf->sf_flag = FALSE;
5986 		}
5987 
5988 		sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5989 		    >> 2;
5990 		if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5991 		    (sf->sf_pkt_head == NULL)) {
5992 #ifdef DEBUG
5993 			if (sf->sf_use_lock) {
5994 				SF_DEBUG(4, (sf, CE_NOTE,
5995 				    "use lock flag off\n"));
5996 			}
5997 #endif
5998 			sf->sf_use_lock = FALSE;
5999 		}
6000 
6001 		if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6002 		    sf->sf_ncmds < sf->sf_throttle) {
6003 			sf_throttle_start(sf);
6004 		}
6005 
6006 		mutex_exit(&sf->sf_cmd_mutex);
6007 
6008 		if (pscan_count >= sf_pool_scan_cnt) {
6009 			if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6010 			    SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6011 				sf_crpool_free(sf);
6012 			}
6013 		}
6014 		mutex_enter(&sf->sf_mutex);
6015 
6016 		privp = sf->sf_els_list;
6017 		while (privp != NULL) {
6018 			if (privp->timeout < sf_watchdog_time) {
6019 				/* timeout this command */
6020 				privp = sf_els_timeout(sf, privp);
6021 			} else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6022 			    (privp->lip_cnt != sf->sf_lip_cnt)) {
6023 				if (privp->prev != NULL) {
6024 					privp->prev->next = privp->next;
6025 				}
6026 				if (sf->sf_els_list == privp) {
6027 					sf->sf_els_list = privp->next;
6028 				}
6029 				if (privp->next != NULL) {
6030 					privp->next->prev = privp->prev;
6031 				}
6032 				mutex_exit(&sf->sf_mutex);
6033 				sf_els_free(privp->fpkt);
6034 				mutex_enter(&sf->sf_mutex);
6035 				privp = sf->sf_els_list;
6036 			} else {
6037 				privp = privp->next;
6038 			}
6039 		}
6040 
6041 		if (sf->sf_online_timer && sf->sf_online_timer <
6042 		    sf_watchdog_time) {
6043 			for (i = 0; i < sf_max_targets; i++) {
6044 				target = sf->sf_targets[i];
6045 				if (target != NULL) {
6046 					if (!mescount && target->sft_state &
6047 					    SF_TARGET_BUSY) {
6048 						sf_log(sf, CE_WARN, "!Loop "
6049 						    "Unstable: Failed to bring "
6050 						    "Loop Online\n");
6051 						mescount = 1;
6052 					}
6053 					target->sft_state |= SF_TARGET_MARK;
6054 				}
6055 			}
6056 			sf_finish_init(sf, sf->sf_lip_cnt);
6057 			sf->sf_state = SF_STATE_INIT;
6058 			sf->sf_online_timer = 0;
6059 		}
6060 
6061 		if (sf->sf_state == SF_STATE_ONLINE) {
6062 			mutex_exit(&sf->sf_mutex);
6063 			if (count >= sf_pkt_scan_cnt) {
6064 				sf_check_targets(sf);
6065 			}
6066 		} else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6067 		    (sf->sf_timer < sf_watchdog_time)) {
6068 			for (i = 0; i < sf_max_targets; i++) {
6069 				target = sf->sf_targets[i];
6070 				if ((target != NULL) &&
6071 				    (target->sft_state &
6072 				    SF_TARGET_BUSY)) {
6073 					sf_log(sf, CE_WARN,
6074 					    "!Offline Timeout\n");
6075 					if (sf_core && (sf_core &
6076 					    SF_CORE_OFFLINE_TIMEOUT)) {
6077 						(void) soc_take_core(
6078 						    sf->sf_sochandle,
6079 						    sf->sf_socp);
6080 						sf_core = 0;
6081 					}
6082 					break;
6083 				}
6084 			}
6085 			sf_finish_init(sf, sf->sf_lip_cnt);
6086 			sf->sf_state = SF_STATE_INIT;
6087 			mutex_exit(&sf->sf_mutex);
6088 		} else {
6089 			mutex_exit(&sf->sf_mutex);
6090 		}
6091 		mutex_enter(&sf_global_mutex);
6092 	}
6093 	mutex_exit(&sf_global_mutex);
6094 	if (count >= sf_pkt_scan_cnt) {
6095 		count = 0;
6096 	}
6097 	if (pscan_count >= sf_pool_scan_cnt) {
6098 		pscan_count = 0;
6099 	}
6100 
6101 	/* reset timeout */
6102 	sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6103 
6104 	/* signal waiting thread */
6105 	mutex_enter(&sf_global_mutex);
6106 	sf_watch_running = 0;
6107 	cv_broadcast(&sf_watch_cv);
6108 	mutex_exit(&sf_global_mutex);
6109 }
6110 
6111 
6112 /*
6113  * called during a timeout to check targets
6114  */
6115 static void
6116 sf_check_targets(struct sf *sf)
6117 {
6118 	struct sf_target *target;
6119 	int i;
6120 	struct sf_pkt *cmd;
6121 	struct scsi_pkt *pkt;
6122 	int lip_cnt;
6123 
6124 	mutex_enter(&sf->sf_mutex);
6125 	lip_cnt = sf->sf_lip_cnt;
6126 	mutex_exit(&sf->sf_mutex);
6127 
6128 	/* check scan all possible targets */
6129 	for (i = 0; i < sf_max_targets; i++) {
6130 		target = sf->sf_targets[i];
6131 		while (target != NULL) {
6132 			mutex_enter(&target->sft_pkt_mutex);
6133 			if (target->sft_alive && target->sft_scan_count !=
6134 			    sf_target_scan_cnt) {
6135 				target->sft_alive = 0;
6136 				target->sft_scan_count++;
6137 				mutex_exit(&target->sft_pkt_mutex);
6138 				return;
6139 			}
6140 			target->sft_alive = 0;
6141 			target->sft_scan_count = 0;
6142 			cmd = target->sft_pkt_head;
6143 			while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6144 				mutex_enter(&cmd->cmd_abort_mutex);
6145 				if (cmd->cmd_state == SF_STATE_ISSUED &&
6146 				    ((cmd->cmd_timeout && sf_watchdog_time >
6147 #ifdef	DEBUG
6148 				    cmd->cmd_timeout) || sf_abort_flag)) {
6149 					sf_abort_flag = 0;
6150 #else
6151 					cmd->cmd_timeout))) {
6152 #endif
6153 					cmd->cmd_timeout = 0;
6154 	/* prevent reset from getting at this packet */
6155 					cmd->cmd_state = SF_STATE_ABORTING;
6156 					mutex_exit(&cmd->cmd_abort_mutex);
6157 					mutex_exit(&target->sft_pkt_mutex);
6158 					sf->sf_stats.tstats[i].timeouts++;
6159 					if (sf_target_timeout(sf, cmd))
6160 						return;
6161 					else {
6162 						if (lip_cnt != sf->sf_lip_cnt) {
6163 							return;
6164 						} else {
6165 							mutex_enter(&target->
6166 							    sft_pkt_mutex);
6167 							cmd = target->
6168 							    sft_pkt_head;
6169 						}
6170 					}
6171 	/*
6172 	 * if the abort and lip fail, a reset will be carried out.
6173 	 * But the reset will ignore this packet. We have waited at least
6174 	 * 20 seconds after the initial timeout. Now, complete it here.
6175 	 * This also takes care of spurious bad aborts.
6176 	 */
6177 				} else if ((cmd->cmd_state ==
6178 				    SF_STATE_ABORTING) && (cmd->cmd_timeout
6179 				    <= sf_watchdog_time)) {
6180 					cmd->cmd_state = SF_STATE_IDLE;
6181 					mutex_exit(&cmd->cmd_abort_mutex);
6182 					mutex_exit(&target->sft_pkt_mutex);
6183 					SF_DEBUG(1, (sf, CE_NOTE,
6184 					    "Command 0x%p to sft 0x%p"
6185 					    " delayed release\n",
6186 					    (void *)cmd, (void *)target));
6187 					pkt = cmd->cmd_pkt;
6188 					pkt->pkt_statistics |=
6189 					    (STAT_TIMEOUT|STAT_ABORTED);
6190 					pkt->pkt_reason = CMD_TIMEOUT;
6191 					if (pkt->pkt_comp) {
6192 						scsi_hba_pkt_comp(pkt);
6193 					/* handle deferred_destroy case */
6194 					} else {
6195 						if ((cmd->cmd_block->fcp_cntl.
6196 						    cntl_reset == 1) ||
6197 						    (cmd->cmd_block->
6198 						    fcp_cntl.cntl_abort_tsk ==
6199 						    1)) {
6200 							cmd->cmd_block->
6201 							    fcp_cntl.
6202 							    cntl_reset = 0;
6203 							cmd->cmd_block->
6204 							    fcp_cntl.
6205 							    cntl_abort_tsk = 0;
6206 							cmd->cmd_fp_pkt->
6207 							    fcal_pkt_comp =
6208 							    sf_cmd_callback;
6209 							/* for cache */
6210 							sf_scsi_destroy_pkt
6211 							    (&pkt->pkt_address,
6212 							    pkt);
6213 						}
6214 					}
6215 					mutex_enter(&target->sft_pkt_mutex);
6216 					cmd = target->sft_pkt_head;
6217 				} else {
6218 					mutex_exit(&cmd->cmd_abort_mutex);
6219 					cmd = cmd->cmd_forw;
6220 				}
6221 			}
6222 			mutex_exit(&target->sft_pkt_mutex);
6223 			target = target->sft_next_lun;
6224 		}
6225 	}
6226 }
6227 
6228 
6229 /*
6230  * a command to a target has timed out
6231  * return TRUE iff cmd abort failed or timed out, else return FALSE
6232  */
6233 static int
6234 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6235 {
6236 	int rval;
6237 	struct scsi_pkt *pkt;
6238 	struct fcal_packet *fpkt;
6239 	int tgt_id;
6240 	int retval = FALSE;
6241 
6242 
6243 	SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6244 	    (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6245 
6246 	fpkt = cmd->cmd_fp_pkt;
6247 
6248 	if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6249 		sf_token = (int *)(uintptr_t)
6250 		    fpkt->fcal_socal_request.sr_soc_hdr.\
6251 		    sh_request_token;
6252 		(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6253 		sf_core = 0;
6254 	}
6255 
6256 	/* call the transport to abort a command */
6257 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6258 	    sf->sf_sochandle->fcal_portno, fpkt, 1);
6259 
6260 	switch (rval) {
6261 	case FCAL_ABORTED:
6262 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6263 		pkt = cmd->cmd_pkt;
6264 		cmd->cmd_state = SF_STATE_IDLE;
6265 		pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6266 		pkt->pkt_reason = CMD_TIMEOUT;
6267 		if (pkt->pkt_comp != NULL) {
6268 			(*pkt->pkt_comp)(pkt);
6269 		}
6270 		break;				/* success */
6271 
6272 	case FCAL_ABORT_FAILED:
6273 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6274 		pkt = cmd->cmd_pkt;
6275 		cmd->cmd_state = SF_STATE_IDLE;
6276 		pkt->pkt_reason = CMD_TIMEOUT;
6277 		pkt->pkt_statistics |= STAT_TIMEOUT;
6278 		tgt_id = pkt->pkt_address.a_target;
6279 		sf->sf_stats.tstats[tgt_id].abts_failures++;
6280 		if (pkt->pkt_comp != NULL) {
6281 			(*pkt->pkt_comp)(pkt);
6282 		}
6283 		break;
6284 
6285 	case FCAL_BAD_ABORT:
6286 		if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6287 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6288 			    sr_soc_hdr.sh_request_token;
6289 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6290 			sf_core = 0;
6291 		}
6292 		SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6293 		cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6294 		    + 20;
6295 		break;
6296 
6297 	case FCAL_TIMEOUT:
6298 		retval = TRUE;
6299 		break;
6300 
6301 	default:
6302 		pkt = cmd->cmd_pkt;
6303 		tgt_id = pkt->pkt_address.a_target;
6304 		sf_log(sf, CE_WARN,
6305 		"Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6306 		if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6307 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6308 			    sr_soc_hdr.sh_request_token;
6309 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6310 			sf_core = 0;
6311 		}
6312 		sf_force_lip(sf);
6313 		retval = TRUE;
6314 		break;
6315 	}
6316 
6317 	return (retval);
6318 }
6319 
6320 
6321 /*
6322  * an ELS command has timed out
6323  * return ???
6324  */
6325 static struct sf_els_hdr *
6326 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6327 {
6328 	struct fcal_packet *fpkt;
6329 	int rval, dflag, timeout = SF_ELS_TIMEOUT;
6330 	uint_t lip_cnt = privp->lip_cnt;
6331 	uchar_t els_code = privp->els_code;
6332 	struct sf_target *target = privp->target;
6333 	char what[64];
6334 
6335 	fpkt = privp->fpkt;
6336 	dflag = privp->delayed_retry;
6337 	/* use as temporary state variable */
6338 	privp->timeout = SF_INVALID_TIMEOUT;
6339 	mutex_exit(&sf->sf_mutex);
6340 
6341 	if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6342 		/*
6343 		 * take socal core if required. Timeouts for IB and hosts
6344 		 * are not very interesting, so we take socal core only
6345 		 * if the timeout is *not* for a IB or host.
6346 		 */
6347 		if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6348 		    ((sf_alpa_to_switch[privp->dest_nport_id] &
6349 		    0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6350 		    (privp->dest_nport_id != 2) ||
6351 		    (privp->dest_nport_id != 4) ||
6352 		    (privp->dest_nport_id != 8) ||
6353 		    (privp->dest_nport_id != 0xf))) {
6354 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6355 			    sr_soc_hdr.sh_request_token;
6356 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6357 			sf_core = 0;
6358 		}
6359 		(void) sprintf(what, "ELS 0x%x", privp->els_code);
6360 	} else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6361 		if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6362 			sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6363 			    sr_soc_hdr.sh_request_token;
6364 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6365 			sf_core = 0;
6366 		}
6367 		timeout = SF_FCP_TIMEOUT;
6368 		(void) sprintf(what, "REPORT_LUNS");
6369 	} else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6370 		if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6371 			sf_token = (int *)(uintptr_t)
6372 			    fpkt->fcal_socal_request.\
6373 			    sr_soc_hdr.sh_request_token;
6374 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6375 			sf_core = 0;
6376 		}
6377 		timeout = SF_FCP_TIMEOUT;
6378 		(void) sprintf(what, "INQUIRY to LUN 0x%lx",
6379 		    (long)SCSA_LUN(target));
6380 	} else {
6381 		(void) sprintf(what, "UNKNOWN OPERATION");
6382 	}
6383 
6384 	if (dflag) {
6385 		/* delayed retry */
6386 		SF_DEBUG(2, (sf, CE_CONT,
6387 		    "!sf%d: %s to target %x delayed retry\n",
6388 		    ddi_get_instance(sf->sf_dip), what,
6389 		    sf_alpa_to_switch[privp->dest_nport_id]));
6390 		privp->delayed_retry = FALSE;
6391 		goto try_again;
6392 	}
6393 
6394 	sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6395 	    what, sf_alpa_to_switch[privp->dest_nport_id],
6396 	    privp->dest_nport_id);
6397 
6398 	rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6399 	    ->fcal_portno, fpkt, 1);
6400 	if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6401 	SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6402 	    what, privp->dest_nport_id));
6403 try_again:
6404 
6405 		mutex_enter(&sf->sf_mutex);
6406 		if (privp->prev != NULL) {
6407 			privp->prev->next = privp->next;
6408 		}
6409 		if (sf->sf_els_list == privp) {
6410 			sf->sf_els_list = privp->next;
6411 		}
6412 		if (privp->next != NULL) {
6413 			privp->next->prev = privp->prev;
6414 		}
6415 		privp->prev = privp->next = NULL;
6416 		if (lip_cnt == sf->sf_lip_cnt) {
6417 			privp->timeout = sf_watchdog_time + timeout;
6418 			if ((++(privp->retries) < sf_els_retries) ||
6419 			    (dflag && (privp->retries < SF_BSY_RETRIES))) {
6420 				mutex_exit(&sf->sf_mutex);
6421 				sf_log(sf, CE_NOTE,
6422 				    "!%s to target 0x%x retrying\n",
6423 				    what,
6424 				    sf_alpa_to_switch[privp->dest_nport_id]);
6425 				if (sf_els_transport(sf, privp) == 1) {
6426 					mutex_enter(&sf->sf_mutex);
6427 					return (sf->sf_els_list); /* success */
6428 				}
6429 				mutex_enter(&sf->sf_mutex);
6430 				fpkt = NULL;
6431 			}
6432 			if ((lip_cnt == sf->sf_lip_cnt) &&
6433 			    (els_code != LA_ELS_LOGO)) {
6434 				if (target != NULL) {
6435 					sf_offline_target(sf, target);
6436 				}
6437 				if (sf->sf_lip_cnt == lip_cnt) {
6438 					sf->sf_device_count--;
6439 					ASSERT(sf->sf_device_count >= 0);
6440 					if (sf->sf_device_count == 0) {
6441 						sf_finish_init(sf,
6442 						    sf->sf_lip_cnt);
6443 					}
6444 				}
6445 			}
6446 			privp = sf->sf_els_list;
6447 			mutex_exit(&sf->sf_mutex);
6448 			if (fpkt != NULL) {
6449 				sf_els_free(fpkt);
6450 			}
6451 		} else {
6452 			mutex_exit(&sf->sf_mutex);
6453 			sf_els_free(privp->fpkt);
6454 			privp = NULL;
6455 		}
6456 	} else {
6457 		if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6458 			sf_token = (int *)(uintptr_t)
6459 			    fpkt->fcal_socal_request.\
6460 			    sr_soc_hdr.sh_request_token;
6461 			(void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6462 			sf_core = 0;
6463 		}
6464 		sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6465 		    "status=0x%x, forcing LIP\n", what,
6466 		    sf_alpa_to_switch[privp->dest_nport_id], rval);
6467 		privp = NULL;
6468 		if (sf->sf_lip_cnt == lip_cnt) {
6469 			sf_force_lip(sf);
6470 		}
6471 	}
6472 
6473 	mutex_enter(&sf->sf_mutex);
6474 	return (privp);
6475 }
6476 
6477 
6478 /*
6479  * called by timeout when a reset times out
6480  */
6481 /*ARGSUSED*/
6482 static void
6483 sf_check_reset_delay(void *arg)
6484 {
6485 	struct sf *sf;
6486 	struct sf_target *target;
6487 	struct sf_reset_list *rp, *tp;
6488 	uint_t lip_cnt, reset_timeout_flag = FALSE;
6489 	clock_t lb;
6490 
6491 	lb = ddi_get_lbolt();
6492 
6493 	mutex_enter(&sf_global_mutex);
6494 
6495 	sf_reset_timeout_id = 0;
6496 
6497 	for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6498 
6499 		mutex_exit(&sf_global_mutex);
6500 		mutex_enter(&sf->sf_mutex);
6501 
6502 		/* is this type cast needed? */
6503 		tp = (struct sf_reset_list *)&sf->sf_reset_list;
6504 
6505 		rp = sf->sf_reset_list;
6506 		while (rp != NULL) {
6507 			if (((rp->timeout - lb) < 0) &&
6508 			    (rp->lip_cnt == sf->sf_lip_cnt)) {
6509 				tp->next = rp->next;
6510 				mutex_exit(&sf->sf_mutex);
6511 				target = rp->target;
6512 				lip_cnt = rp->lip_cnt;
6513 				kmem_free(rp, sizeof (struct sf_reset_list));
6514 				/* abort all cmds for this target */
6515 				while (target) {
6516 					sf_abort_all(sf, target, FALSE,
6517 					    lip_cnt, TRUE);
6518 					mutex_enter(&target->sft_mutex);
6519 					if (lip_cnt == sf->sf_lip_cnt) {
6520 						target->sft_state &=
6521 						    ~SF_TARGET_BUSY;
6522 					}
6523 					mutex_exit(&target->sft_mutex);
6524 					target = target->sft_next_lun;
6525 				}
6526 				mutex_enter(&sf->sf_mutex);
6527 				tp = (struct sf_reset_list *)
6528 				    &sf->sf_reset_list;
6529 				rp = sf->sf_reset_list;
6530 				lb = ddi_get_lbolt();
6531 			} else if (rp->lip_cnt != sf->sf_lip_cnt) {
6532 				tp->next = rp->next;
6533 				kmem_free(rp, sizeof (struct sf_reset_list));
6534 				rp = tp->next;
6535 			} else {
6536 				reset_timeout_flag = TRUE;
6537 				tp = rp;
6538 				rp = rp->next;
6539 			}
6540 		}
6541 		mutex_exit(&sf->sf_mutex);
6542 		mutex_enter(&sf_global_mutex);
6543 	}
6544 
6545 	if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6546 		sf_reset_timeout_id = timeout(sf_check_reset_delay,
6547 		    NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6548 	}
6549 
6550 	mutex_exit(&sf_global_mutex);
6551 }
6552 
6553 
6554 /*
6555  * called to "reset the bus", i.e. force loop initialization (and address
6556  * re-negotiation)
6557  */
6558 static void
6559 sf_force_lip(struct sf *sf)
6560 {
6561 	int i;
6562 	struct sf_target *target;
6563 
6564 
6565 	/* disable restart of lip if we're suspended */
6566 	mutex_enter(&sf->sf_mutex);
6567 	if (sf->sf_state & SF_STATE_SUSPENDED) {
6568 		mutex_exit(&sf->sf_mutex);
6569 		SF_DEBUG(1, (sf, CE_CONT,
6570 		    "sf_force_lip, sf%d: lip restart disabled "
6571 		    "due to DDI_SUSPEND\n",
6572 		    ddi_get_instance(sf->sf_dip)));
6573 		return;
6574 	}
6575 
6576 	sf_log(sf, CE_NOTE, "Forcing lip\n");
6577 
6578 	for (i = 0; i < sf_max_targets; i++) {
6579 		target = sf->sf_targets[i];
6580 		while (target != NULL) {
6581 			mutex_enter(&target->sft_mutex);
6582 			if (!(target->sft_state & SF_TARGET_OFFLINE))
6583 				target->sft_state |= SF_TARGET_BUSY;
6584 			mutex_exit(&target->sft_mutex);
6585 			target = target->sft_next_lun;
6586 		}
6587 	}
6588 
6589 	sf->sf_lip_cnt++;
6590 	sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6591 	sf->sf_state = SF_STATE_OFFLINE;
6592 	mutex_exit(&sf->sf_mutex);
6593 	sf->sf_stats.lip_count++;		/* no mutex for this? */
6594 
6595 #ifdef DEBUG
6596 	/* are we allowing LIPs ?? */
6597 	if (sf_lip_flag != 0) {
6598 #endif
6599 		/* call the transport to force loop initialization */
6600 		if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6601 		    sf->sf_sochandle->fcal_portno, 1,
6602 		    FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6603 		    (i != FCAL_TIMEOUT)) {
6604 			/* force LIP failed */
6605 			if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6606 				(void) soc_take_core(sf->sf_sochandle,
6607 				    sf->sf_socp);
6608 				sf_core = 0;
6609 			}
6610 #ifdef DEBUG
6611 			/* are we allowing reset after LIP failed ?? */
6612 			if (sf_reset_flag != 0) {
6613 #endif
6614 				/* restart socal after resetting it */
6615 				sf_log(sf, CE_NOTE,
6616 				    "!Force lip failed Status code 0x%x."
6617 				    " Reseting\n", i);
6618 				/* call transport to force a reset */
6619 				soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6620 				    sf->sf_sochandle->fcal_portno, 1);
6621 #ifdef	DEBUG
6622 			}
6623 #endif
6624 		}
6625 #ifdef	DEBUG
6626 	}
6627 #endif
6628 }
6629 
6630 
6631 /*
6632  * called by the transport when an unsolicited ELS is received
6633  */
6634 static void
6635 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6636 {
6637 	struct sf *sf = (struct sf *)arg;
6638 	els_payload_t	*els = (els_payload_t *)payload;
6639 	struct la_els_rjt *rsp;
6640 	int	i, tgt_id;
6641 	uchar_t dest_id;
6642 	struct fcal_packet *fpkt;
6643 	fc_frame_header_t *hp;
6644 	struct sf_els_hdr *privp;
6645 
6646 
6647 	if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6648 		return;
6649 	}
6650 
6651 	if (i > SOC_CQE_PAYLOAD) {
6652 		i = SOC_CQE_PAYLOAD;
6653 	}
6654 
6655 	dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6656 	tgt_id = sf_alpa_to_switch[dest_id];
6657 
6658 	switch (els->els_cmd.c.ls_command) {
6659 
6660 	case LA_ELS_LOGO:
6661 		/*
6662 		 * logout received -- log the fact
6663 		 */
6664 		sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6665 		sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6666 		    tgt_id,
6667 		    sf_lip_on_plogo ? "Forcing LIP...." : "");
6668 		if (sf_lip_on_plogo) {
6669 			sf_force_lip(sf);
6670 		}
6671 		break;
6672 
6673 	default:  /* includes LA_ELS_PLOGI */
6674 		/*
6675 		 * something besides a logout received -- we don't handle
6676 		 * this so send back a reject saying its unsupported
6677 		 */
6678 
6679 		sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6680 		    els->els_cmd.c.ls_command, tgt_id);
6681 
6682 
6683 		/* allocate room for a response */
6684 		if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6685 		    sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6686 		    (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6687 			break;
6688 		}
6689 
6690 		fpkt = privp->fpkt;
6691 
6692 		/* fill in pkt header */
6693 		hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6694 		hp->r_ctl = R_CTL_ELS_RSP;
6695 		hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6696 		hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6697 		hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6698 		fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6699 		    CQ_TYPE_OUTBOUND;
6700 
6701 		fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6702 
6703 		/* fill in response */
6704 		rsp->ls_code = LA_ELS_RJT;	/* reject this ELS */
6705 		rsp->mbz[0] = 0;
6706 		rsp->mbz[1] = 0;
6707 		rsp->mbz[2] = 0;
6708 		((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6709 		*((int *)&rsp->reserved) = 0;
6710 		rsp->reason_code = RJT_UNSUPPORTED;
6711 		privp->retries = sf_els_retries;
6712 		privp->els_code = LA_ELS_RJT;
6713 		privp->timeout = (unsigned)0xffffffff;
6714 		(void) sf_els_transport(sf, privp);
6715 		break;
6716 	}
6717 }
6718 
6719 
6720 /*
6721  * Error logging, printing, and debug print routines
6722  */
6723 
6724 /*PRINTFLIKE3*/
6725 static void
6726 sf_log(struct sf *sf, int level, const char *fmt, ...)
6727 {
6728 	char buf[256];
6729 	dev_info_t *dip;
6730 	va_list ap;
6731 
6732 	if (sf != NULL) {
6733 		dip = sf->sf_dip;
6734 	} else {
6735 		dip = NULL;
6736 	}
6737 
6738 	va_start(ap, fmt);
6739 	(void) vsprintf(buf, fmt, ap);
6740 	va_end(ap);
6741 	scsi_log(dip, "sf", level, buf);
6742 }
6743 
6744 
6745 /*
6746  * called to get some sf kstats -- return 0 on success else return errno
6747  */
6748 static int
6749 sf_kstat_update(kstat_t *ksp, int rw)
6750 {
6751 	struct sf *sf;
6752 
6753 	if (rw == KSTAT_WRITE) {
6754 		/* can't write */
6755 		return (EACCES);
6756 	}
6757 
6758 	sf = ksp->ks_private;
6759 	sf->sf_stats.ncmds = sf->sf_ncmds;
6760 	sf->sf_stats.throttle_limit = sf->sf_throttle;
6761 	sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6762 
6763 	return (0);				/* success */
6764 }
6765 
6766 
6767 /*
6768  * Unix Entry Points
6769  */
6770 
6771 /*
6772  * driver entry point for opens on control device
6773  */
6774 /* ARGSUSED */
6775 static int
6776 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6777 {
6778 	dev_t dev = *dev_p;
6779 	struct sf *sf;
6780 
6781 
6782 	/* just ensure soft state exists for this device */
6783 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6784 	if (sf == NULL) {
6785 		return (ENXIO);
6786 	}
6787 
6788 	++(sf->sf_check_n_close);
6789 
6790 	return (0);
6791 }
6792 
6793 
6794 /*
6795  * driver entry point for last close on control device
6796  */
6797 /* ARGSUSED */
6798 static int
6799 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6800 {
6801 	struct sf *sf;
6802 
6803 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6804 	if (sf == NULL) {
6805 		return (ENXIO);
6806 	}
6807 
6808 	if (!sf->sf_check_n_close) { /* if this flag is zero */
6809 		cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6810 		    SF_MINOR2INST(getminor(dev)));
6811 		return (ENODEV);
6812 	} else {
6813 		--(sf->sf_check_n_close);
6814 	}
6815 	return (0);
6816 }
6817 
6818 
6819 /*
6820  * driver entry point for sf ioctl commands
6821  */
6822 /* ARGSUSED */
6823 static int
6824 sf_ioctl(dev_t dev,
6825     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6826 {
6827 	struct sf *sf;
6828 	struct sf_target *target;
6829 	uchar_t al_pa;
6830 	struct sf_al_map map;
6831 	int cnt, i;
6832 	int	retval;				/* return value */
6833 	struct devctl_iocdata *dcp;
6834 	dev_info_t *cdip;
6835 	struct scsi_address ap;
6836 	scsi_hba_tran_t *tran;
6837 
6838 
6839 	sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6840 	if (sf == NULL) {
6841 		return (ENXIO);
6842 	}
6843 
6844 	/* handle all ioctls */
6845 	switch (cmd) {
6846 
6847 	/*
6848 	 * We can use the generic implementation for these ioctls
6849 	 */
6850 	case DEVCTL_DEVICE_GETSTATE:
6851 	case DEVCTL_DEVICE_ONLINE:
6852 	case DEVCTL_DEVICE_OFFLINE:
6853 	case DEVCTL_BUS_GETSTATE:
6854 		return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6855 
6856 	/*
6857 	 * return FC map
6858 	 */
6859 	case SFIOCGMAP:
6860 		if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6861 		    sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6862 		    sf->sf_state != SF_STATE_ONLINE) {
6863 			retval = ENOENT;
6864 			goto dun;
6865 		}
6866 		mutex_enter(&sf->sf_mutex);
6867 		if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6868 			int i, j = 0;
6869 
6870 			/* Need to generate a fake lilp map */
6871 			for (i = 0; i < sf_max_targets; i++) {
6872 				if (sf->sf_targets[i])
6873 					sf->sf_lilp_map->lilp_alpalist[j++] =
6874 					    sf->sf_targets[i]->
6875 					    sft_hard_address;
6876 			}
6877 			sf->sf_lilp_map->lilp_length = (uchar_t)j;
6878 		}
6879 		cnt = sf->sf_lilp_map->lilp_length;
6880 		map.sf_count = (short)cnt;
6881 		bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6882 		    (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6883 		    sizeof (la_wwn_t));
6884 		bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6885 		    (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6886 		    sizeof (la_wwn_t));
6887 		map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6888 		map.sf_hba_addr.sf_hard_address = 0;
6889 		map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6890 		for (i = 0; i < cnt; i++) {
6891 			al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6892 			map.sf_addr_pair[i].sf_al_pa = al_pa;
6893 			if (al_pa == sf->sf_al_pa) {
6894 				(void) bcopy((caddr_t)&sf->sf_sochandle
6895 				    ->fcal_n_wwn, (caddr_t)&map.
6896 				    sf_addr_pair[i].sf_node_wwn,
6897 				    sizeof (la_wwn_t));
6898 				(void) bcopy((caddr_t)&sf->sf_sochandle
6899 				    ->fcal_p_wwn, (caddr_t)&map.
6900 				    sf_addr_pair[i].sf_port_wwn,
6901 				    sizeof (la_wwn_t));
6902 				map.sf_addr_pair[i].sf_hard_address =
6903 				    al_pa;
6904 				map.sf_addr_pair[i].sf_inq_dtype =
6905 				    DTYPE_PROCESSOR;
6906 				continue;
6907 			}
6908 			target = sf->sf_targets[sf_alpa_to_switch[
6909 			    al_pa]];
6910 			if (target != NULL) {
6911 				mutex_enter(&target->sft_mutex);
6912 				if (!(target->sft_state &
6913 				    (SF_TARGET_OFFLINE |
6914 				    SF_TARGET_BUSY))) {
6915 					bcopy((caddr_t)&target->
6916 					    sft_node_wwn,
6917 					    (caddr_t)&map.sf_addr_pair
6918 					    [i].sf_node_wwn,
6919 					    sizeof (la_wwn_t));
6920 					bcopy((caddr_t)&target->
6921 					    sft_port_wwn,
6922 					    (caddr_t)&map.sf_addr_pair
6923 					    [i].sf_port_wwn,
6924 					    sizeof (la_wwn_t));
6925 					map.sf_addr_pair[i].
6926 					    sf_hard_address
6927 					    = target->sft_hard_address;
6928 					map.sf_addr_pair[i].
6929 					    sf_inq_dtype
6930 					    = target->sft_device_type;
6931 					mutex_exit(&target->sft_mutex);
6932 					continue;
6933 				}
6934 				mutex_exit(&target->sft_mutex);
6935 			}
6936 			bzero((caddr_t)&map.sf_addr_pair[i].
6937 			    sf_node_wwn, sizeof (la_wwn_t));
6938 			bzero((caddr_t)&map.sf_addr_pair[i].
6939 			    sf_port_wwn, sizeof (la_wwn_t));
6940 			map.sf_addr_pair[i].sf_inq_dtype =
6941 			    DTYPE_UNKNOWN;
6942 		}
6943 		mutex_exit(&sf->sf_mutex);
6944 		if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6945 		    sizeof (struct sf_al_map), mode) != 0) {
6946 			retval = EFAULT;
6947 			goto dun;
6948 		}
6949 		break;
6950 
6951 	/*
6952 	 * handle device control ioctls
6953 	 */
6954 	case DEVCTL_DEVICE_RESET:
6955 		if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6956 			retval = EFAULT;
6957 			goto dun;
6958 		}
6959 		if ((ndi_dc_getname(dcp) == NULL) ||
6960 		    (ndi_dc_getaddr(dcp) == NULL)) {
6961 			ndi_dc_freehdl(dcp);
6962 			retval = EINVAL;
6963 			goto dun;
6964 		}
6965 		cdip = ndi_devi_find(sf->sf_dip,
6966 		    ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6967 		ndi_dc_freehdl(dcp);
6968 
6969 		if (cdip == NULL) {
6970 			retval = ENXIO;
6971 			goto dun;
6972 		}
6973 
6974 		if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6975 			retval = ENXIO;
6976 			goto dun;
6977 		}
6978 		mutex_enter(&target->sft_mutex);
6979 		if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6980 			mutex_exit(&target->sft_mutex);
6981 			retval = ENXIO;
6982 			goto dun;
6983 		}
6984 
6985 		/* This is ugly */
6986 		tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6987 		bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6988 		mutex_exit(&target->sft_mutex);
6989 		ap.a_hba_tran = tran;
6990 		ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6991 		if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6992 			retval = EIO;
6993 		} else {
6994 			retval = 0;
6995 		}
6996 		kmem_free(tran, scsi_hba_tran_size());
6997 		goto dun;
6998 
6999 	case DEVCTL_BUS_QUIESCE:
7000 	case DEVCTL_BUS_UNQUIESCE:
7001 		retval = ENOTSUP;
7002 		goto dun;
7003 
7004 	case DEVCTL_BUS_RESET:
7005 	case DEVCTL_BUS_RESETALL:
7006 		sf_force_lip(sf);
7007 		break;
7008 
7009 	default:
7010 		retval = ENOTTY;
7011 		goto dun;
7012 	}
7013 
7014 	retval = 0;				/* success */
7015 
7016 dun:
7017 	return (retval);
7018 }
7019 
7020 
7021 /*
7022  * get the target given a DIP
7023  */
7024 static struct sf_target *
7025 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7026 {
7027 	int i;
7028 	struct sf_target *target;
7029 
7030 
7031 	/* scan each hash queue for the DIP in question */
7032 	for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7033 		target = sf->sf_wwn_lists[i];
7034 		while (target != NULL) {
7035 			if (target->sft_dip == dip) {
7036 				return (target); /* success: target found */
7037 			}
7038 			target = target->sft_next;
7039 		}
7040 	}
7041 	return (NULL);				/* failure: target not found */
7042 }
7043 
7044 
7045 /*
7046  * called by the transport to get an event cookie
7047  */
7048 static int
7049 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7050     ddi_eventcookie_t *event_cookiep)
7051 {
7052 	struct sf *sf;
7053 
7054 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7055 	if (sf == NULL) {
7056 		/* can't find instance for this device */
7057 		return (DDI_FAILURE);
7058 	}
7059 
7060 	return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7061 	    event_cookiep, NDI_EVENT_NOPASS));
7062 
7063 }
7064 
7065 
7066 /*
7067  * called by the transport to add an event callback
7068  */
7069 static int
7070 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7071     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7072     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7073     ddi_callback_id_t *cb_id)
7074 {
7075 	struct sf *sf;
7076 
7077 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7078 	if (sf == NULL) {
7079 		/* can't find instance for this device */
7080 		return (DDI_FAILURE);
7081 	}
7082 
7083 	return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7084 	    eventid, callback, arg, NDI_SLEEP, cb_id));
7085 
7086 }
7087 
7088 
7089 /*
7090  * called by the transport to remove an event callback
7091  */
7092 static int
7093 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7094 {
7095 	struct sf *sf;
7096 
7097 	sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7098 	if (sf == NULL) {
7099 		/* can't find instance for this device */
7100 		return (DDI_FAILURE);
7101 	}
7102 
7103 	return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7104 }
7105 
7106 
7107 /*
7108  * called by the transport to post an event
7109  */
7110 static int
7111 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7112     ddi_eventcookie_t eventid, void *impldata)
7113 {
7114 	ddi_eventcookie_t remove_cookie, cookie;
7115 
7116 	/* is this a remove event ?? */
7117 	struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7118 	remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7119 	    SF_EVENT_TAG_REMOVE);
7120 
7121 	if (remove_cookie == eventid) {
7122 		struct sf_target *target;
7123 
7124 		/* handle remove event */
7125 
7126 		if (sf == NULL) {
7127 			/* no sf instance for this device */
7128 			return (NDI_FAILURE);
7129 		}
7130 
7131 		/* get the target for this event */
7132 		if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7133 			/*
7134 			 * clear device info for this target and mark as
7135 			 * not done
7136 			 */
7137 			mutex_enter(&target->sft_mutex);
7138 			target->sft_dip = NULL;
7139 			target->sft_state &= ~SF_TARGET_INIT_DONE;
7140 			mutex_exit(&target->sft_mutex);
7141 			return (NDI_SUCCESS); /* event handled */
7142 		}
7143 
7144 		/* no target for this event */
7145 		return (NDI_FAILURE);
7146 	}
7147 
7148 	/* an insertion event */
7149 	if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7150 	    != NDI_SUCCESS) {
7151 		return (NDI_FAILURE);
7152 	}
7153 
7154 	return (ndi_post_event(dip, rdip, cookie, impldata));
7155 }
7156 
7157 
7158 /*
7159  * the sf hotplug daemon, one thread per sf instance
7160  */
7161 static void
7162 sf_hp_daemon(void *arg)
7163 {
7164 	struct sf *sf = (struct sf *)arg;
7165 	struct sf_hp_elem *elem;
7166 	struct sf_target *target;
7167 	int tgt_id;
7168 	callb_cpr_t cprinfo;
7169 
7170 	CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7171 	    callb_generic_cpr, "sf_hp_daemon");
7172 
7173 	mutex_enter(&sf->sf_hp_daemon_mutex);
7174 
7175 	do {
7176 		while (sf->sf_hp_elem_head != NULL) {
7177 
7178 			/* save ptr to head of list */
7179 			elem = sf->sf_hp_elem_head;
7180 
7181 			/* take element off of list */
7182 			if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7183 				/* element only one in list -- list now empty */
7184 				sf->sf_hp_elem_head = NULL;
7185 				sf->sf_hp_elem_tail = NULL;
7186 			} else {
7187 				/* remove element from head of list */
7188 				sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7189 			}
7190 
7191 			mutex_exit(&sf->sf_hp_daemon_mutex);
7192 
7193 			switch (elem->what) {
7194 			case SF_ONLINE:
7195 				/* online this target */
7196 				target = elem->target;
7197 				(void) ndi_devi_online(elem->dip, 0);
7198 				(void) ndi_event_retrieve_cookie(
7199 				    sf->sf_event_hdl,
7200 				    target->sft_dip, FCAL_INSERT_EVENT,
7201 				    &sf_insert_eid, NDI_EVENT_NOPASS);
7202 				(void) ndi_event_run_callbacks(sf->sf_event_hdl,
7203 				    target->sft_dip, sf_insert_eid, NULL);
7204 				break;
7205 			case SF_OFFLINE:
7206 				/* offline this target */
7207 				target = elem->target;
7208 				tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7209 				/* don't do NDI_DEVI_REMOVE for now */
7210 				if (ndi_devi_offline(elem->dip, 0) !=
7211 				    NDI_SUCCESS) {
7212 					SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7213 					    "device offline failed", tgt_id));
7214 				} else {
7215 					SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7216 					    "device offline succeeded\n",
7217 					    tgt_id));
7218 				}
7219 				break;
7220 			}
7221 			kmem_free(elem, sizeof (struct sf_hp_elem));
7222 			mutex_enter(&sf->sf_hp_daemon_mutex);
7223 		}
7224 
7225 		/* if exit is not already signaled */
7226 		if (sf->sf_hp_exit == 0) {
7227 			/* wait to be signaled by work or exit */
7228 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
7229 			cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7230 			CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7231 		}
7232 	} while (sf->sf_hp_exit == 0);
7233 
7234 	/* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7235 	CALLB_CPR_EXIT(&cprinfo);
7236 	thread_exit();			/* no more hotplug thread */
7237 	/* NOTREACHED */
7238 }
7239