xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 98b45ebecf42e6d81a4aa85f88ffcc06af817f34)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Fibre Channel SCSI ULP Mapping driver
25  */
26 
27 #include <sys/scsi/scsi.h>
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/devctl.h>
31 #include <sys/thread.h>
32 #include <sys/thread.h>
33 #include <sys/open.h>
34 #include <sys/file.h>
35 #include <sys/sunndi.h>
36 #include <sys/console.h>
37 #include <sys/proc.h>
38 #include <sys/time.h>
39 #include <sys/utsname.h>
40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 #include <sys/ndi_impldefs.h>
42 #include <sys/byteorder.h>
43 #include <sys/fs/dv_node.h>
44 #include <sys/ctype.h>
45 #include <sys/sunmdi.h>
46 
47 #include <sys/fibre-channel/fc.h>
48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 #include <sys/fibre-channel/ulp/fcpvar.h>
50 
51 /*
52  * Discovery Process
53  * =================
54  *
55  *    The discovery process is a major function of FCP.	 In order to help
56  * understand that function a flow diagram is given here.  This diagram
57  * doesn't claim to cover all the cases and the events that can occur during
58  * the discovery process nor the subtleties of the code.  The code paths shown
59  * are simplified.  Its purpose is to help the reader (and potentially bug
60  * fixer) have an overall view of the logic of the code.  For that reason the
61  * diagram covers the simple case of the line coming up cleanly or of a new
62  * port attaching to FCP the link being up.  The reader must keep in mind
63  * that:
64  *
65  *	- There are special cases where bringing devices online and offline
66  *	  is driven by Ioctl.
67  *
68  *	- The behavior of the discovery process can be modified through the
69  *	  .conf file.
70  *
71  *	- The line can go down and come back up at any time during the
72  *	  discovery process which explains some of the complexity of the code.
73  *
74  * ............................................................................
75  *
76  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77  *
78  *
79  *			+-------------------------+
80  *   fp/fctl module --->|    fcp_port_attach	  |
81  *			+-------------------------+
82  *	   |			     |
83  *	   |			     |
84  *	   |			     v
85  *	   |		+-------------------------+
86  *	   |		| fcp_handle_port_attach  |
87  *	   |		+-------------------------+
88  *	   |				|
89  *	   |				|
90  *	   +--------------------+	|
91  *				|	|
92  *				v	v
93  *			+-------------------------+
94  *			|   fcp_statec_callback	  |
95  *			+-------------------------+
96  *				    |
97  *				    |
98  *				    v
99  *			+-------------------------+
100  *			|    fcp_handle_devices	  |
101  *			+-------------------------+
102  *				    |
103  *				    |
104  *				    v
105  *			+-------------------------+
106  *			|   fcp_handle_mapflags	  |
107  *			+-------------------------+
108  *				    |
109  *				    |
110  *				    v
111  *			+-------------------------+
112  *			|     fcp_send_els	  |
113  *			|			  |
114  *			| PLOGI or PRLI To all the|
115  *			| reachable devices.	  |
116  *			+-------------------------+
117  *
118  *
119  * ............................................................................
120  *
121  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122  *	   STEP 1 are called (it is actually the same function).
123  *
124  *
125  *			+-------------------------+
126  *			|    fcp_icmd_callback	  |
127  *   fp/fctl module --->|			  |
128  *			| callback for PLOGI and  |
129  *			| PRLI.			  |
130  *			+-------------------------+
131  *				     |
132  *				     |
133  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
134  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
135  *		      |		   \   /	   |
136  *		      |		    \-/		   |
137  *		      |				   |
138  *		      v				   v
139  *	+-------------------------+	+-------------------------+
140  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
141  *	|			  |	|			  |
142  *	|	  PRLI		  |	|	REPORT_LUN	  |
143  *	+-------------------------+	+-------------------------+
144  *
145  * ............................................................................
146  *
147  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148  *	   (It is actually the same function).
149  *
150  *
151  *			    +-------------------------+
152  *   fp/fctl module ------->|	 fcp_scsi_callback    |
153  *			    +-------------------------+
154  *					|
155  *					|
156  *					|
157  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
158  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
159  *		 |		      \	  /			  |
160  *		 |		       \-/			  |
161  *		 |			|			  |
162  *		 | Receive INQUIRY reply|			  |
163  *		 |			|			  |
164  *		 v			v			  v
165  * +------------------------+ +----------------------+ +----------------------+
166  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
167  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168  * +------------------------+ +----------------------+ +----------------------+
169  *		 |			|			  |
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 v			v			  |
173  *     +-----------------+	+-----------------+		  |
174  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
175  *     |		 |	|		  |		  |
176  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
177  *     |  (To each LUN)	 |	+-----------------+		  |
178  *     +-----------------+					  |
179  *								  |
180  *								  v
181  *						      +------------------------+
182  *						      |	 fcp_call_finish_init  |
183  *						      +------------------------+
184  *								  |
185  *								  v
186  *						 +-----------------------------+
187  *						 |  fcp_call_finish_init_held  |
188  *						 +-----------------------------+
189  *								  |
190  *								  |
191  *			   All LUNs scanned			 /-\
192  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
193  *			      |					\   /
194  *			      |					 \-/
195  *			      v					  |
196  *		     +------------------+			  |
197  *		     |	fcp_finish_tgt	|			  |
198  *		     +------------------+			  |
199  *			      |	  Target Not Offline and	  |
200  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
201  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
202  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
203  *	       |	    \	/		|		  |
204  *	       |	     \-/		|		  |
205  *	       v				v		  |
206  * +----------------------------+     +-------------------+	  |
207  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
208  * |				|     +-------------------+	  |
209  * | A structure fcp_tgt_elem	|		|		  |
210  * | is created and queued in	|		v		  |
211  * | the FCP port list		|     +-------------------+	  |
212  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
213  * | will be unqueued by the	|     |			  |	  |
214  * | watchdog timer.		|     | Called for each	  |	  |
215  * +----------------------------+     | LUN. Dispatches	  |	  |
216  *		  |		      | fcp_hp_task	  |	  |
217  *		  |		      +-------------------+	  |
218  *		  |				|		  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				+---------------->|
222  *		  |						  |
223  *		  +---------------------------------------------->|
224  *								  |
225  *								  |
226  *		All the targets (devices) have been scanned	 /-\
227  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
228  *			       |				\   /
229  *			       |				 \-/
230  *	    +-------------------------------------+		  |
231  *	    |		fcp_finish_init		  |		  |
232  *	    |					  |		  |
233  *	    | Signal broadcasts the condition	  |		  |
234  *	    | variable port_config_cv of the FCP  |		  |
235  *	    | port.  One potential code sequence  |		  |
236  *	    | waiting on the condition variable	  |		  |
237  *	    | the code sequence handling	  |		  |
238  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
239  *	    | The other is in the function	  |		  |
240  *	    | fcp_reconfig_wait which is called	  |		  |
241  *	    | in the transmit path preventing IOs |		  |
242  *	    | from going through till the disco-  |		  |
243  *	    | very process is over.		  |		  |
244  *	    +-------------------------------------+		  |
245  *			       |				  |
246  *			       |				  |
247  *			       +--------------------------------->|
248  *								  |
249  *								  v
250  *								Return
251  *
252  * ............................................................................
253  *
254  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255  *
256  *
257  *			+-------------------------+
258  *			|      fcp_hp_task	  |
259  *			+-------------------------+
260  *				     |
261  *				     |
262  *				     v
263  *			+-------------------------+
264  *			|     fcp_trigger_lun	  |
265  *			+-------------------------+
266  *				     |
267  *				     |
268  *				     v
269  *		   Bring offline    /-\	 Bring online
270  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
271  *		 |		   \   /		   |
272  *		 |		    \-/			   |
273  *		 v					   v
274  *    +---------------------+			+-----------------------+
275  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
276  *    +---------------------+			|			|
277  *						| Creates a dev_info_t	|
278  *						| or a mdi_pathinfo_t	|
279  *						| depending on whether	|
280  *						| mpxio is on or off.	|
281  *						+-----------------------+
282  *							   |
283  *							   |
284  *							   v
285  *						+-----------------------+
286  *						|  fcp_online_child	|
287  *						|			|
288  *						| Set device online	|
289  *						| using NDI or MDI.	|
290  *						+-----------------------+
291  *
292  * ............................................................................
293  *
294  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
295  *	   what is described here.  We only show the target offline path.
296  *
297  *
298  *			 +--------------------------+
299  *			 |	  fcp_watch	    |
300  *			 +--------------------------+
301  *				       |
302  *				       |
303  *				       v
304  *			 +--------------------------+
305  *			 |  fcp_scan_offline_tgts   |
306  *			 +--------------------------+
307  *				       |
308  *				       |
309  *				       v
310  *			 +--------------------------+
311  *			 |  fcp_offline_target_now  |
312  *			 +--------------------------+
313  *				       |
314  *				       |
315  *				       v
316  *			 +--------------------------+
317  *			 |   fcp_offline_tgt_luns   |
318  *			 +--------------------------+
319  *				       |
320  *				       |
321  *				       v
322  *			 +--------------------------+
323  *			 |     fcp_offline_lun	    |
324  *			 +--------------------------+
325  *				       |
326  *				       |
327  *				       v
328  *		     +----------------------------------+
329  *		     |	     fcp_offline_lun_now	|
330  *		     |					|
331  *		     | A request (or two if mpxio) is	|
332  *		     | sent to the hot plug task using	|
333  *		     | a fcp_hp_elem structure.		|
334  *		     +----------------------------------+
335  */
336 
337 /*
338  * Functions registered with DDI framework
339  */
340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345     cred_t *credp, int *rval);
346 
347 /*
348  * Functions registered with FC Transport framework
349  */
350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351     fc_attach_cmd_t cmd,  uint32_t s_id);
352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353     fc_detach_cmd_t cmd);
354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356     uint32_t claimed);
357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358     fc_unsol_buf_t *buf, uint32_t claimed);
359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360     fc_unsol_buf_t *buf, uint32_t claimed);
361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363     uint32_t  dev_cnt, uint32_t port_sid);
364 
365 /*
366  * Functions registered with SCSA framework
367  */
368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379     int whom);
380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382     void (*callback)(caddr_t), caddr_t arg);
383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384     char *name, ddi_eventcookie_t *event_cookiep);
385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387     ddi_callback_id_t *cb_id);
388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389     ddi_callback_id_t cb_id);
390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391     ddi_eventcookie_t eventid, void *impldata);
392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395     ddi_bus_config_op_t op, void *arg);
396 
397 /*
398  * Internal functions
399  */
400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401     int mode, int *rval);
402 
403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404     int mode, int *rval);
405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406     struct fcp_scsi_cmd *fscsi, int mode);
407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408     caddr_t base_addr, int mode);
409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 
411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
413     int *fc_pkt_reason, int *fc_pkt_action);
414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
417     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 
423 static void fcp_handle_devices(struct fcp_port *pptr,
424     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425     fcp_map_tag_t *map_tag, int cause);
426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428     int tgt_cnt, int cause);
429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 /*
628  * New functions to support software FCA (like fcoei)
629  */
630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 	struct scsi_address *ap, struct scsi_pkt *pkt,
632 	struct buf *bp, int cmdlen, int statuslen,
633 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 static void fcp_pseudo_destroy_pkt(
635 	struct scsi_address *ap, struct scsi_pkt *pkt);
636 static void fcp_pseudo_sync_pkt(
637 	struct scsi_address *ap, struct scsi_pkt *pkt);
638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 static void fcp_pseudo_dmafree(
640 	struct scsi_address *ap, struct scsi_pkt *pkt);
641 
642 extern struct mod_ops	mod_driverops;
643 /*
644  * This variable is defined in modctl.c and set to '1' after the root driver
645  * and fs are loaded.  It serves as an indication that the root filesystem can
646  * be used.
647  */
648 extern int		modrootloaded;
649 /*
650  * This table contains strings associated with the SCSI sense key codes.  It
651  * is used by FCP to print a clear explanation of the code returned in the
652  * sense information by a device.
653  */
654 extern char		*sense_keys[];
655 /*
656  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
657  * under this device that the paths to a physical device are created when
658  * MPxIO is used.
659  */
660 extern dev_info_t	*scsi_vhci_dip;
661 
662 /*
663  * Report lun processing
664  */
665 #define	FCP_LUN_ADDRESSING		0x80
666 #define	FCP_PD_ADDRESSING		0x00
667 #define	FCP_VOLUME_ADDRESSING		0x40
668 
669 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
670 #define	MAX_INT_DMA			0x7fffffff
671 /*
672  * Property definitions
673  */
674 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
675 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
676 #define	TARGET_PROP	(char *)fcp_target_prop
677 #define	LUN_PROP	(char *)fcp_lun_prop
678 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
679 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
680 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
681 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
682 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
683 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
684 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
685 /*
686  * Short hand macros.
687  */
688 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
689 #define	LUN_TGT		(plun->lun_tgt)
690 
691 /*
692  * Driver private macros
693  */
694 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
695 			((x) >= 'a' && (x) <= 'f') ?			\
696 			((x) - 'a' + 10) : ((x) - 'A' + 10))
697 
698 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
699 
700 #define	FCP_N_NDI_EVENTS						\
701 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 
703 #define	FCP_LINK_STATE_CHANGED(p, c)			\
704 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 
706 #define	FCP_TGT_STATE_CHANGED(t, c)			\
707 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 
709 #define	FCP_STATE_CHANGED(p, t, c)		\
710 	(FCP_TGT_STATE_CHANGED(t, c))
711 
712 #define	FCP_MUST_RETRY(fpkt)				\
713 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
714 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
715 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
716 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
717 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
718 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
719 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
720 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 
722 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
723 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
724 	(es)->es_add_code == 0x3f &&		\
725 	(es)->es_qual_code == 0x0e)
726 
727 #define	FCP_SENSE_NO_LUN(es)			\
728 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
729 	(es)->es_add_code == 0x25 &&		\
730 	(es)->es_qual_code == 0x0)
731 
732 #define	FCP_VERSION		"20091208-1.192"
733 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
734 
735 #define	FCP_NUM_ELEMENTS(array)			\
736 	(sizeof (array) / sizeof ((array)[0]))
737 
738 /*
739  * Debugging, Error reporting, and tracing
740  */
741 #define	FCP_LOG_SIZE		1024 * 1024
742 
743 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
744 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
745 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
746 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
747 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
748 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
749 #define	FCP_LEVEL_7		0x00040
750 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
751 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
752 
753 
754 
755 /*
756  * Log contents to system messages file
757  */
758 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 
768 
769 /*
770  * Log contents to trace buffer
771  */
772 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 
782 
783 /*
784  * Log contents to both system messages file and trace buffer
785  */
786 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
787 				FC_TRACE_LOG_MSG)
788 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
789 				FC_TRACE_LOG_MSG)
790 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
791 				FC_TRACE_LOG_MSG)
792 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
793 				FC_TRACE_LOG_MSG)
794 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
795 				FC_TRACE_LOG_MSG)
796 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
797 				FC_TRACE_LOG_MSG)
798 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
799 				FC_TRACE_LOG_MSG)
800 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
801 				FC_TRACE_LOG_MSG)
802 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
803 				FC_TRACE_LOG_MSG)
804 #ifdef DEBUG
805 #define	FCP_DTRACE	fc_trace_debug
806 #else
807 #define	FCP_DTRACE
808 #endif
809 
810 #define	FCP_TRACE	fc_trace_debug
811 
812 static struct cb_ops fcp_cb_ops = {
813 	fcp_open,			/* open */
814 	fcp_close,			/* close */
815 	nodev,				/* strategy */
816 	nodev,				/* print */
817 	nodev,				/* dump */
818 	nodev,				/* read */
819 	nodev,				/* write */
820 	fcp_ioctl,			/* ioctl */
821 	nodev,				/* devmap */
822 	nodev,				/* mmap */
823 	nodev,				/* segmap */
824 	nochpoll,			/* chpoll */
825 	ddi_prop_op,			/* cb_prop_op */
826 	0,				/* streamtab */
827 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
828 	CB_REV,				/* rev */
829 	nodev,				/* aread */
830 	nodev				/* awrite */
831 };
832 
833 
834 static struct dev_ops fcp_ops = {
835 	DEVO_REV,
836 	0,
837 	ddi_getinfo_1to1,
838 	nulldev,		/* identify */
839 	nulldev,		/* probe */
840 	fcp_attach,		/* attach and detach are mandatory */
841 	fcp_detach,
842 	nodev,			/* reset */
843 	&fcp_cb_ops,		/* cb_ops */
844 	NULL,			/* bus_ops */
845 	NULL,			/* power */
846 };
847 
848 
849 char *fcp_version = FCP_NAME_VERSION;
850 
851 static struct modldrv modldrv = {
852 	&mod_driverops,
853 	FCP_NAME_VERSION,
854 	&fcp_ops
855 };
856 
857 
858 static struct modlinkage modlinkage = {
859 	MODREV_1,
860 	&modldrv,
861 	NULL
862 };
863 
864 
865 static fc_ulp_modinfo_t fcp_modinfo = {
866 	&fcp_modinfo,			/* ulp_handle */
867 	FCTL_ULP_MODREV_4,		/* ulp_rev */
868 	FC4_SCSI_FCP,			/* ulp_type */
869 	"fcp",				/* ulp_name */
870 	FCP_STATEC_MASK,		/* ulp_statec_mask */
871 	fcp_port_attach,		/* ulp_port_attach */
872 	fcp_port_detach,		/* ulp_port_detach */
873 	fcp_port_ioctl,			/* ulp_port_ioctl */
874 	fcp_els_callback,		/* ulp_els_callback */
875 	fcp_data_callback,		/* ulp_data_callback */
876 	fcp_statec_callback		/* ulp_statec_callback */
877 };
878 
879 #ifdef	DEBUG
880 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
881 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
882 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
883 				FCP_LEVEL_6 | FCP_LEVEL_7)
884 #else
885 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
886 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
887 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
888 				FCP_LEVEL_6 | FCP_LEVEL_7)
889 #endif
890 
891 /* FCP global variables */
892 int			fcp_bus_config_debug = 0;
893 static int		fcp_log_size = FCP_LOG_SIZE;
894 static int		fcp_trace = FCP_TRACE_DEFAULT;
895 static fc_trace_logq_t	*fcp_logq = NULL;
896 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
897 /*
898  * The auto-configuration is set by default.  The only way of disabling it is
899  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900  */
901 static int		fcp_enable_auto_configuration = 1;
902 static int		fcp_max_bus_config_retries	= 4;
903 static int		fcp_lun_ready_retry = 300;
904 /*
905  * The value assigned to the following variable has changed several times due
906  * to a problem with the data underruns reporting of some firmware(s).	The
907  * current value of 50 gives a timeout value of 25 seconds for a max number
908  * of 256 LUNs.
909  */
910 static int		fcp_max_target_retries = 50;
911 /*
912  * Watchdog variables
913  * ------------------
914  *
915  * fcp_watchdog_init
916  *
917  *	Indicates if the watchdog timer is running or not.  This is actually
918  *	a counter of the number of Fibre Channel ports that attached.  When
919  *	the first port attaches the watchdog is started.  When the last port
920  *	detaches the watchdog timer is stopped.
921  *
922  * fcp_watchdog_time
923  *
924  *	This is the watchdog clock counter.  It is incremented by
925  *	fcp_watchdog_time each time the watchdog timer expires.
926  *
927  * fcp_watchdog_timeout
928  *
929  *	Increment value of the variable fcp_watchdog_time as well as the
930  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
931  *	is strange that this is not a #define	but a variable since the code
932  *	never changes this value.  The reason why it can be said that the
933  *	unit is 1 second is because the number of ticks for the watchdog
934  *	timer is determined like this:
935  *
936  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
937  *				  drv_usectohz(1000000);
938  *
939  *	The value 1000000 is hard coded in the code.
940  *
941  * fcp_watchdog_tick
942  *
943  *	Watchdog timer value in ticks.
944  */
945 static int		fcp_watchdog_init = 0;
946 static int		fcp_watchdog_time = 0;
947 static int		fcp_watchdog_timeout = 1;
948 static int		fcp_watchdog_tick;
949 
950 /*
951  * fcp_offline_delay is a global variable to enable customisation of
952  * the timeout on link offlines or RSCNs. The default value is set
953  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954  * specified in FCP4 Chapter 11 (see www.t10.org).
955  *
956  * The variable fcp_offline_delay is specified in SECONDS.
957  *
958  * If we made this a static var then the user would not be able to
959  * change it. This variable is set in fcp_attach().
960  */
961 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
962 
963 static void		*fcp_softstate = NULL; /* for soft state */
964 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
965 static kmutex_t		fcp_global_mutex;
966 static kmutex_t		fcp_ioctl_mutex;
967 static dev_info_t	*fcp_global_dip = NULL;
968 static timeout_id_t	fcp_watchdog_id;
969 const char		*fcp_lun_prop = "lun";
970 const char		*fcp_sam_lun_prop = "sam-lun";
971 const char		*fcp_target_prop = "target";
972 /*
973  * NOTE: consumers of "node-wwn" property include stmsboot in ON
974  * consolidation.
975  */
976 const char		*fcp_node_wwn_prop = "node-wwn";
977 const char		*fcp_port_wwn_prop = "port-wwn";
978 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
979 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 const char		*fcp_manual_config_only = "manual_configuration_only";
981 const char		*fcp_init_port_prop = "initiator-port";
982 const char		*fcp_tgt_port_prop = "target-port";
983 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 
985 static struct fcp_port	*fcp_port_head = NULL;
986 static ddi_eventcookie_t	fcp_insert_eid;
987 static ddi_eventcookie_t	fcp_remove_eid;
988 
989 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
990 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 };
993 
994 /*
995  * List of valid commands for the scsi_ioctl call
996  */
997 static uint8_t scsi_ioctl_list[] = {
998 	SCMD_INQUIRY,
999 	SCMD_REPORT_LUN,
1000 	SCMD_READ_CAPACITY
1001 };
1002 
1003 /*
1004  * this is used to dummy up a report lun response for cases
1005  * where the target doesn't support it
1006  */
1007 static uchar_t fcp_dummy_lun[] = {
1008 	0x00,		/* MSB length (length = no of luns * 8) */
1009 	0x00,
1010 	0x00,
1011 	0x08,		/* LSB length */
1012 	0x00,		/* MSB reserved */
1013 	0x00,
1014 	0x00,
1015 	0x00,		/* LSB reserved */
1016 	FCP_PD_ADDRESSING,
1017 	0x00,		/* LUN is ZERO at the first level */
1018 	0x00,
1019 	0x00,		/* second level is zero */
1020 	0x00,
1021 	0x00,		/* third level is zero */
1022 	0x00,
1023 	0x00		/* fourth level is zero */
1024 };
1025 
1026 static uchar_t fcp_alpa_to_switch[] = {
1027 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 };
1052 
1053 static caddr_t pid = "SESS01	      ";
1054 
1055 #if	!defined(lint)
1056 
1057 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058     fcp_port::fcp_next fcp_watchdog_id))
1059 
1060 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 
1062 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063     fcp_insert_eid
1064     fcp_remove_eid
1065     fcp_watchdog_time))
1066 
1067 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068     fcp_cb_ops
1069     fcp_ops
1070     callb_cpr))
1071 
1072 #endif /* lint */
1073 
1074 /*
1075  * This table is used to determine whether or not it's safe to copy in
1076  * the target node name for a lun.  Since all luns behind the same target
1077  * have the same wwnn, only tagets that do not support multiple luns are
1078  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079  */
1080 
1081 char *fcp_symmetric_disk_table[] = {
1082 	"SEAGATE ST",
1083 	"IBM	 DDYFT",
1084 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1085 	"SUN	 SENA",		/* SES device */
1086 	"SUN	 SESS01"	/* VICOM SVE box */
1087 };
1088 
1089 int fcp_symmetric_disk_table_size =
1090 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 
1092 /*
1093  * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094  * will panic if you don't pass this in to the routine, this information.
1095  * Need to determine what the actual impact to the system is by providing
1096  * this information if any. Since dma allocation is done in pkt_init it may
1097  * not have any impact. These values are straight from the Writing Device
1098  * Driver manual.
1099  */
1100 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 	DMA_ATTR_V0,	/* ddi_dma_attr version */
1102 	0,		/* low address */
1103 	0xffffffff,	/* high address */
1104 	0x00ffffff,	/* counter upper bound */
1105 	1,		/* alignment requirements */
1106 	0x3f,		/* burst sizes */
1107 	1,		/* minimum DMA access */
1108 	0xffffffff,	/* maximum DMA access */
1109 	(1 << 24) - 1,	/* segment boundary restrictions */
1110 	1,		/* scater/gather list length */
1111 	512,		/* device granularity */
1112 	0		/* DMA flags */
1113 };
1114 
1115 /*
1116  * The _init(9e) return value should be that of mod_install(9f). Under
1117  * some circumstances, a failure may not be related mod_install(9f) and
1118  * one would then require a return value to indicate the failure. Looking
1119  * at mod_install(9f), it is expected to return 0 for success and non-zero
1120  * for failure. mod_install(9f) for device drivers, further goes down the
1121  * calling chain and ends up in ddi_installdrv(), whose return values are
1122  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123  * calling chain of mod_install(9f) which return values like EINVAL and
1124  * in some even return -1.
1125  *
1126  * To work around the vagaries of the mod_install() calling chain, return
1127  * either 0 or ENODEV depending on the success or failure of mod_install()
1128  */
1129 int
1130 _init(void)
1131 {
1132 	int rval;
1133 
1134 	/*
1135 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 	 * before registering with the transport first.
1137 	 */
1138 	if (ddi_soft_state_init(&fcp_softstate,
1139 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 		return (EINVAL);
1141 	}
1142 
1143 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 
1146 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 		mutex_destroy(&fcp_global_mutex);
1149 		mutex_destroy(&fcp_ioctl_mutex);
1150 		ddi_soft_state_fini(&fcp_softstate);
1151 		return (ENODEV);
1152 	}
1153 
1154 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 
1156 	if ((rval = mod_install(&modlinkage)) != 0) {
1157 		fc_trace_free_logq(fcp_logq);
1158 		(void) fc_ulp_remove(&fcp_modinfo);
1159 		mutex_destroy(&fcp_global_mutex);
1160 		mutex_destroy(&fcp_ioctl_mutex);
1161 		ddi_soft_state_fini(&fcp_softstate);
1162 		rval = ENODEV;
1163 	}
1164 
1165 	return (rval);
1166 }
1167 
1168 
1169 /*
1170  * the system is done with us as a driver, so clean up
1171  */
1172 int
1173 _fini(void)
1174 {
1175 	int rval;
1176 
1177 	/*
1178 	 * don't start cleaning up until we know that the module remove
1179 	 * has worked  -- if this works, then we know that each instance
1180 	 * has successfully been DDI_DETACHed
1181 	 */
1182 	if ((rval = mod_remove(&modlinkage)) != 0) {
1183 		return (rval);
1184 	}
1185 
1186 	(void) fc_ulp_remove(&fcp_modinfo);
1187 
1188 	ddi_soft_state_fini(&fcp_softstate);
1189 	mutex_destroy(&fcp_global_mutex);
1190 	mutex_destroy(&fcp_ioctl_mutex);
1191 	fc_trace_free_logq(fcp_logq);
1192 
1193 	return (rval);
1194 }
1195 
1196 
1197 int
1198 _info(struct modinfo *modinfop)
1199 {
1200 	return (mod_info(&modlinkage, modinfop));
1201 }
1202 
1203 
1204 /*
1205  * attach the module
1206  */
1207 static int
1208 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 {
1210 	int rval = DDI_SUCCESS;
1211 
1212 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 
1215 	if (cmd == DDI_ATTACH) {
1216 		/* The FCP pseudo device is created here. */
1217 		mutex_enter(&fcp_global_mutex);
1218 		fcp_global_dip = devi;
1219 		mutex_exit(&fcp_global_mutex);
1220 
1221 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 			ddi_report_dev(fcp_global_dip);
1224 		} else {
1225 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 			mutex_enter(&fcp_global_mutex);
1227 			fcp_global_dip = NULL;
1228 			mutex_exit(&fcp_global_mutex);
1229 
1230 			rval = DDI_FAILURE;
1231 		}
1232 		/*
1233 		 * We check the fcp_offline_delay property at this
1234 		 * point. This variable is global for the driver,
1235 		 * not specific to an instance.
1236 		 *
1237 		 * We do not recommend setting the value to less
1238 		 * than 10 seconds (RA_TOV_els), or greater than
1239 		 * 60 seconds.
1240 		 */
1241 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 		if ((fcp_offline_delay < 10) ||
1245 		    (fcp_offline_delay > 60)) {
1246 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 			    "to %d second(s). This is outside the "
1248 			    "recommended range of 10..60 seconds.",
1249 			    fcp_offline_delay);
1250 		}
1251 	}
1252 
1253 	return (rval);
1254 }
1255 
1256 
1257 /*ARGSUSED*/
1258 static int
1259 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 {
1261 	int	res = DDI_SUCCESS;
1262 
1263 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1265 
1266 	if (cmd == DDI_DETACH) {
1267 		/*
1268 		 * Check if there are active ports/threads. If there
1269 		 * are any, we will fail, else we will succeed (there
1270 		 * should not be much to clean up)
1271 		 */
1272 		mutex_enter(&fcp_global_mutex);
1273 		FCP_DTRACE(fcp_logq, "fcp",
1274 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1275 		    (void *) fcp_port_head);
1276 
1277 		if (fcp_port_head == NULL) {
1278 			ddi_remove_minor_node(fcp_global_dip, NULL);
1279 			fcp_global_dip = NULL;
1280 			mutex_exit(&fcp_global_mutex);
1281 		} else {
1282 			mutex_exit(&fcp_global_mutex);
1283 			res = DDI_FAILURE;
1284 		}
1285 	}
1286 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1288 
1289 	return (res);
1290 }
1291 
1292 
1293 /* ARGSUSED */
1294 static int
1295 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 {
1297 	if (otype != OTYP_CHR) {
1298 		return (EINVAL);
1299 	}
1300 
1301 	/*
1302 	 * Allow only root to talk;
1303 	 */
1304 	if (drv_priv(credp)) {
1305 		return (EPERM);
1306 	}
1307 
1308 	mutex_enter(&fcp_global_mutex);
1309 	if (fcp_oflag & FCP_EXCL) {
1310 		mutex_exit(&fcp_global_mutex);
1311 		return (EBUSY);
1312 	}
1313 
1314 	if (flag & FEXCL) {
1315 		if (fcp_oflag & FCP_OPEN) {
1316 			mutex_exit(&fcp_global_mutex);
1317 			return (EBUSY);
1318 		}
1319 		fcp_oflag |= FCP_EXCL;
1320 	}
1321 	fcp_oflag |= FCP_OPEN;
1322 	mutex_exit(&fcp_global_mutex);
1323 
1324 	return (0);
1325 }
1326 
1327 
1328 /* ARGSUSED */
1329 static int
1330 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 {
1332 	if (otype != OTYP_CHR) {
1333 		return (EINVAL);
1334 	}
1335 
1336 	mutex_enter(&fcp_global_mutex);
1337 	if (!(fcp_oflag & FCP_OPEN)) {
1338 		mutex_exit(&fcp_global_mutex);
1339 		return (ENODEV);
1340 	}
1341 	fcp_oflag = FCP_IDLE;
1342 	mutex_exit(&fcp_global_mutex);
1343 
1344 	return (0);
1345 }
1346 
1347 
1348 /*
1349  * fcp_ioctl
1350  *	Entry point for the FCP ioctls
1351  *
1352  * Input:
1353  *	See ioctl(9E)
1354  *
1355  * Output:
1356  *	See ioctl(9E)
1357  *
1358  * Returns:
1359  *	See ioctl(9E)
1360  *
1361  * Context:
1362  *	Kernel context.
1363  */
1364 /* ARGSUSED */
1365 static int
1366 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367     int *rval)
1368 {
1369 	int			ret = 0;
1370 
1371 	mutex_enter(&fcp_global_mutex);
1372 	if (!(fcp_oflag & FCP_OPEN)) {
1373 		mutex_exit(&fcp_global_mutex);
1374 		return (ENXIO);
1375 	}
1376 	mutex_exit(&fcp_global_mutex);
1377 
1378 	switch (cmd) {
1379 	case FCP_TGT_INQUIRY:
1380 	case FCP_TGT_CREATE:
1381 	case FCP_TGT_DELETE:
1382 		ret = fcp_setup_device_data_ioctl(cmd,
1383 		    (struct fcp_ioctl *)data, mode, rval);
1384 		break;
1385 
1386 	case FCP_TGT_SEND_SCSI:
1387 		mutex_enter(&fcp_ioctl_mutex);
1388 		ret = fcp_setup_scsi_ioctl(
1389 		    (struct fcp_scsi_cmd *)data, mode, rval);
1390 		mutex_exit(&fcp_ioctl_mutex);
1391 		break;
1392 
1393 	case FCP_STATE_COUNT:
1394 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 		    mode, rval);
1396 		break;
1397 	case FCP_GET_TARGET_MAPPINGS:
1398 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 		    mode, rval);
1400 		break;
1401 	default:
1402 		fcp_log(CE_WARN, NULL,
1403 		    "!Invalid ioctl opcode = 0x%x", cmd);
1404 		ret	= EINVAL;
1405 	}
1406 
1407 	return (ret);
1408 }
1409 
1410 
1411 /*
1412  * fcp_setup_device_data_ioctl
1413  *	Setup handler for the "device data" style of
1414  *	ioctl for FCP.	See "fcp_util.h" for data structure
1415  *	definition.
1416  *
1417  * Input:
1418  *	cmd	= FCP ioctl command
1419  *	data	= ioctl data
1420  *	mode	= See ioctl(9E)
1421  *
1422  * Output:
1423  *	data	= ioctl data
1424  *	rval	= return value - see ioctl(9E)
1425  *
1426  * Returns:
1427  *	See ioctl(9E)
1428  *
1429  * Context:
1430  *	Kernel context.
1431  */
1432 /* ARGSUSED */
1433 static int
1434 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435     int *rval)
1436 {
1437 	struct fcp_port	*pptr;
1438 	struct	device_data	*dev_data;
1439 	uint32_t		link_cnt;
1440 	la_wwn_t		*wwn_ptr = NULL;
1441 	struct fcp_tgt		*ptgt = NULL;
1442 	struct fcp_lun		*plun = NULL;
1443 	int			i, error;
1444 	struct fcp_ioctl	fioctl;
1445 
1446 #ifdef	_MULTI_DATAMODEL
1447 	switch (ddi_model_convert_from(mode & FMODELS)) {
1448 	case DDI_MODEL_ILP32: {
1449 		struct fcp32_ioctl f32_ioctl;
1450 
1451 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 		    sizeof (struct fcp32_ioctl), mode)) {
1453 			return (EFAULT);
1454 		}
1455 		fioctl.fp_minor = f32_ioctl.fp_minor;
1456 		fioctl.listlen = f32_ioctl.listlen;
1457 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 		break;
1459 	}
1460 	case DDI_MODEL_NONE:
1461 		if (ddi_copyin((void *)data, (void *)&fioctl,
1462 		    sizeof (struct fcp_ioctl), mode)) {
1463 			return (EFAULT);
1464 		}
1465 		break;
1466 	}
1467 
1468 #else	/* _MULTI_DATAMODEL */
1469 	if (ddi_copyin((void *)data, (void *)&fioctl,
1470 	    sizeof (struct fcp_ioctl), mode)) {
1471 		return (EFAULT);
1472 	}
1473 #endif	/* _MULTI_DATAMODEL */
1474 
1475 	/*
1476 	 * Right now we can assume that the minor number matches with
1477 	 * this instance of fp. If this changes we will need to
1478 	 * revisit this logic.
1479 	 */
1480 	mutex_enter(&fcp_global_mutex);
1481 	pptr = fcp_port_head;
1482 	while (pptr) {
1483 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 			break;
1485 		} else {
1486 			pptr = pptr->port_next;
1487 		}
1488 	}
1489 	mutex_exit(&fcp_global_mutex);
1490 	if (pptr == NULL) {
1491 		return (ENXIO);
1492 	}
1493 	mutex_enter(&pptr->port_mutex);
1494 
1495 
1496 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 		mutex_exit(&pptr->port_mutex);
1499 		return (ENOMEM);
1500 	}
1501 
1502 	if (ddi_copyin(fioctl.list, dev_data,
1503 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 		mutex_exit(&pptr->port_mutex);
1506 		return (EFAULT);
1507 	}
1508 	link_cnt = pptr->port_link_cnt;
1509 
1510 	if (cmd == FCP_TGT_INQUIRY) {
1511 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 			/* This ioctl is requesting INQ info of local HBA */
1515 			mutex_exit(&pptr->port_mutex);
1516 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 			dev_data[0].dev_status = 0;
1518 			if (ddi_copyout(dev_data, fioctl.list,
1519 			    (sizeof (struct device_data)) * fioctl.listlen,
1520 			    mode)) {
1521 				kmem_free(dev_data,
1522 				    sizeof (*dev_data) * fioctl.listlen);
1523 				return (EFAULT);
1524 			}
1525 			kmem_free(dev_data,
1526 			    sizeof (*dev_data) * fioctl.listlen);
1527 #ifdef	_MULTI_DATAMODEL
1528 			switch (ddi_model_convert_from(mode & FMODELS)) {
1529 			case DDI_MODEL_ILP32: {
1530 				struct fcp32_ioctl f32_ioctl;
1531 				f32_ioctl.fp_minor = fioctl.fp_minor;
1532 				f32_ioctl.listlen = fioctl.listlen;
1533 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 				if (ddi_copyout((void *)&f32_ioctl,
1535 				    (void *)data,
1536 				    sizeof (struct fcp32_ioctl), mode)) {
1537 					return (EFAULT);
1538 				}
1539 				break;
1540 			}
1541 			case DDI_MODEL_NONE:
1542 				if (ddi_copyout((void *)&fioctl, (void *)data,
1543 				    sizeof (struct fcp_ioctl), mode)) {
1544 					return (EFAULT);
1545 				}
1546 				break;
1547 			}
1548 #else	/* _MULTI_DATAMODEL */
1549 			if (ddi_copyout((void *)&fioctl, (void *)data,
1550 			    sizeof (struct fcp_ioctl), mode)) {
1551 				return (EFAULT);
1552 			}
1553 #endif	/* _MULTI_DATAMODEL */
1554 			return (0);
1555 		}
1556 	}
1557 
1558 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 		mutex_exit(&pptr->port_mutex);
1561 		return (ENXIO);
1562 	}
1563 
1564 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 	    i++) {
1566 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 
1568 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 
1570 
1571 		dev_data[i].dev_status = ENXIO;
1572 
1573 		if ((ptgt = fcp_lookup_target(pptr,
1574 		    (uchar_t *)wwn_ptr)) == NULL) {
1575 			mutex_exit(&pptr->port_mutex);
1576 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 			    wwn_ptr, &error, 0) == NULL) {
1578 				dev_data[i].dev_status = ENODEV;
1579 				mutex_enter(&pptr->port_mutex);
1580 				continue;
1581 			} else {
1582 
1583 				dev_data[i].dev_status = EAGAIN;
1584 
1585 				mutex_enter(&pptr->port_mutex);
1586 				continue;
1587 			}
1588 		} else {
1589 			mutex_enter(&ptgt->tgt_mutex);
1590 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 			    FCP_TGT_BUSY)) {
1592 				dev_data[i].dev_status = EAGAIN;
1593 				mutex_exit(&ptgt->tgt_mutex);
1594 				continue;
1595 			}
1596 
1597 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 					dev_data[i].dev_status = ENOTSUP;
1600 				} else {
1601 					dev_data[i].dev_status = ENXIO;
1602 				}
1603 				mutex_exit(&ptgt->tgt_mutex);
1604 				continue;
1605 			}
1606 
1607 			switch (cmd) {
1608 			case FCP_TGT_INQUIRY:
1609 				/*
1610 				 * The reason we give device type of
1611 				 * lun 0 only even though in some
1612 				 * cases(like maxstrat) lun 0 device
1613 				 * type may be 0x3f(invalid) is that
1614 				 * for bridge boxes target will appear
1615 				 * as luns and the first lun could be
1616 				 * a device that utility may not care
1617 				 * about (like a tape device).
1618 				 */
1619 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 				dev_data[i].dev_status = 0;
1621 				mutex_exit(&ptgt->tgt_mutex);
1622 
1623 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 				} else {
1626 					dev_data[i].dev0_type = plun->lun_type;
1627 				}
1628 				mutex_enter(&ptgt->tgt_mutex);
1629 				break;
1630 
1631 			case FCP_TGT_CREATE:
1632 				mutex_exit(&ptgt->tgt_mutex);
1633 				mutex_exit(&pptr->port_mutex);
1634 
1635 				/*
1636 				 * serialize state change call backs.
1637 				 * only one call back will be handled
1638 				 * at a time.
1639 				 */
1640 				mutex_enter(&fcp_global_mutex);
1641 				if (fcp_oflag & FCP_BUSY) {
1642 					mutex_exit(&fcp_global_mutex);
1643 					if (dev_data) {
1644 						kmem_free(dev_data,
1645 						    sizeof (*dev_data) *
1646 						    fioctl.listlen);
1647 					}
1648 					return (EBUSY);
1649 				}
1650 				fcp_oflag |= FCP_BUSY;
1651 				mutex_exit(&fcp_global_mutex);
1652 
1653 				dev_data[i].dev_status =
1654 				    fcp_create_on_demand(pptr,
1655 				    wwn_ptr->raw_wwn);
1656 
1657 				if (dev_data[i].dev_status != 0) {
1658 					char	buf[25];
1659 
1660 					for (i = 0; i < FC_WWN_SIZE; i++) {
1661 						(void) sprintf(&buf[i << 1],
1662 						    "%02x",
1663 						    wwn_ptr->raw_wwn[i]);
1664 					}
1665 
1666 					fcp_log(CE_WARN, pptr->port_dip,
1667 					    "!Failed to create nodes for"
1668 					    " pwwn=%s; error=%x", buf,
1669 					    dev_data[i].dev_status);
1670 				}
1671 
1672 				/* allow state change call backs again */
1673 				mutex_enter(&fcp_global_mutex);
1674 				fcp_oflag &= ~FCP_BUSY;
1675 				mutex_exit(&fcp_global_mutex);
1676 
1677 				mutex_enter(&pptr->port_mutex);
1678 				mutex_enter(&ptgt->tgt_mutex);
1679 
1680 				break;
1681 
1682 			case FCP_TGT_DELETE:
1683 				break;
1684 
1685 			default:
1686 				fcp_log(CE_WARN, pptr->port_dip,
1687 				    "!Invalid device data ioctl "
1688 				    "opcode = 0x%x", cmd);
1689 			}
1690 			mutex_exit(&ptgt->tgt_mutex);
1691 		}
1692 	}
1693 	mutex_exit(&pptr->port_mutex);
1694 
1695 	if (ddi_copyout(dev_data, fioctl.list,
1696 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 		return (EFAULT);
1699 	}
1700 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 
1702 #ifdef	_MULTI_DATAMODEL
1703 	switch (ddi_model_convert_from(mode & FMODELS)) {
1704 	case DDI_MODEL_ILP32: {
1705 		struct fcp32_ioctl f32_ioctl;
1706 
1707 		f32_ioctl.fp_minor = fioctl.fp_minor;
1708 		f32_ioctl.listlen = fioctl.listlen;
1709 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 		    sizeof (struct fcp32_ioctl), mode)) {
1712 			return (EFAULT);
1713 		}
1714 		break;
1715 	}
1716 	case DDI_MODEL_NONE:
1717 		if (ddi_copyout((void *)&fioctl, (void *)data,
1718 		    sizeof (struct fcp_ioctl), mode)) {
1719 			return (EFAULT);
1720 		}
1721 		break;
1722 	}
1723 #else	/* _MULTI_DATAMODEL */
1724 
1725 	if (ddi_copyout((void *)&fioctl, (void *)data,
1726 	    sizeof (struct fcp_ioctl), mode)) {
1727 		return (EFAULT);
1728 	}
1729 #endif	/* _MULTI_DATAMODEL */
1730 
1731 	return (0);
1732 }
1733 
1734 /*
1735  * Fetch the target mappings (path, etc.) for all LUNs
1736  * on this port.
1737  */
1738 /* ARGSUSED */
1739 static int
1740 fcp_get_target_mappings(struct fcp_ioctl *data,
1741     int mode, int *rval)
1742 {
1743 	struct fcp_port	    *pptr;
1744 	fc_hba_target_mappings_t    *mappings;
1745 	fc_hba_mapping_entry_t	    *map;
1746 	struct fcp_tgt	    *ptgt = NULL;
1747 	struct fcp_lun	    *plun = NULL;
1748 	int			    i, mapIndex, mappingSize;
1749 	int			    listlen;
1750 	struct fcp_ioctl	    fioctl;
1751 	char			    *path;
1752 	fcp_ent_addr_t		    sam_lun_addr;
1753 
1754 #ifdef	_MULTI_DATAMODEL
1755 	switch (ddi_model_convert_from(mode & FMODELS)) {
1756 	case DDI_MODEL_ILP32: {
1757 		struct fcp32_ioctl f32_ioctl;
1758 
1759 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 		    sizeof (struct fcp32_ioctl), mode)) {
1761 			return (EFAULT);
1762 		}
1763 		fioctl.fp_minor = f32_ioctl.fp_minor;
1764 		fioctl.listlen = f32_ioctl.listlen;
1765 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 		break;
1767 	}
1768 	case DDI_MODEL_NONE:
1769 		if (ddi_copyin((void *)data, (void *)&fioctl,
1770 		    sizeof (struct fcp_ioctl), mode)) {
1771 			return (EFAULT);
1772 		}
1773 		break;
1774 	}
1775 
1776 #else	/* _MULTI_DATAMODEL */
1777 	if (ddi_copyin((void *)data, (void *)&fioctl,
1778 	    sizeof (struct fcp_ioctl), mode)) {
1779 		return (EFAULT);
1780 	}
1781 #endif	/* _MULTI_DATAMODEL */
1782 
1783 	/*
1784 	 * Right now we can assume that the minor number matches with
1785 	 * this instance of fp. If this changes we will need to
1786 	 * revisit this logic.
1787 	 */
1788 	mutex_enter(&fcp_global_mutex);
1789 	pptr = fcp_port_head;
1790 	while (pptr) {
1791 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 			break;
1793 		} else {
1794 			pptr = pptr->port_next;
1795 		}
1796 	}
1797 	mutex_exit(&fcp_global_mutex);
1798 	if (pptr == NULL) {
1799 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 		    fioctl.fp_minor);
1801 		return (ENXIO);
1802 	}
1803 
1804 
1805 	/* We use listlen to show the total buffer size */
1806 	mappingSize = fioctl.listlen;
1807 
1808 	/* Now calculate how many mapping entries will fit */
1809 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 	    - sizeof (fc_hba_target_mappings_t);
1811 	if (listlen <= 0) {
1812 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 		return (ENXIO);
1814 	}
1815 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 
1817 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 		return (ENOMEM);
1819 	}
1820 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 
1822 	/* Now get to work */
1823 	mapIndex = 0;
1824 
1825 	mutex_enter(&pptr->port_mutex);
1826 	/* Loop through all targets on this port */
1827 	for (i = 0; i < FCP_NUM_HASH; i++) {
1828 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 		    ptgt = ptgt->tgt_next) {
1830 
1831 			mutex_enter(&ptgt->tgt_mutex);
1832 
1833 			/* Loop through all LUNs on this target */
1834 			for (plun = ptgt->tgt_lun; plun != NULL;
1835 			    plun = plun->lun_next) {
1836 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 					continue;
1838 				}
1839 
1840 				path = fcp_get_lun_path(plun);
1841 				if (path == NULL) {
1842 					continue;
1843 				}
1844 
1845 				if (mapIndex >= listlen) {
1846 					mapIndex ++;
1847 					kmem_free(path, MAXPATHLEN);
1848 					continue;
1849 				}
1850 				map = &mappings->entries[mapIndex++];
1851 				bcopy(path, map->targetDriver,
1852 				    sizeof (map->targetDriver));
1853 				map->d_id = ptgt->tgt_d_id;
1854 				map->busNumber = 0;
1855 				map->targetNumber = ptgt->tgt_d_id;
1856 				map->osLUN = plun->lun_num;
1857 
1858 				/*
1859 				 * We had swapped lun when we stored it in
1860 				 * lun_addr. We need to swap it back before
1861 				 * returning it to user land
1862 				 */
1863 
1864 				sam_lun_addr.ent_addr_0 =
1865 				    BE_16(plun->lun_addr.ent_addr_0);
1866 				sam_lun_addr.ent_addr_1 =
1867 				    BE_16(plun->lun_addr.ent_addr_1);
1868 				sam_lun_addr.ent_addr_2 =
1869 				    BE_16(plun->lun_addr.ent_addr_2);
1870 				sam_lun_addr.ent_addr_3 =
1871 				    BE_16(plun->lun_addr.ent_addr_3);
1872 
1873 				bcopy(&sam_lun_addr, &map->samLUN,
1874 				    FCP_LUN_SIZE);
1875 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 
1880 				if (plun->lun_guid) {
1881 
1882 					/* convert ascii wwn to bytes */
1883 					fcp_ascii_to_wwn(plun->lun_guid,
1884 					    map->guid, sizeof (map->guid));
1885 
1886 					if ((sizeof (map->guid)) <
1887 					    plun->lun_guid_size / 2) {
1888 						cmn_err(CE_WARN,
1889 						    "fcp_get_target_mappings:"
1890 						    "guid copy space "
1891 						    "insufficient."
1892 						    "Copy Truncation - "
1893 						    "available %d; need %d",
1894 						    (int)sizeof (map->guid),
1895 						    (int)
1896 						    plun->lun_guid_size / 2);
1897 					}
1898 				}
1899 				kmem_free(path, MAXPATHLEN);
1900 			}
1901 			mutex_exit(&ptgt->tgt_mutex);
1902 		}
1903 	}
1904 	mutex_exit(&pptr->port_mutex);
1905 	mappings->numLuns = mapIndex;
1906 
1907 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 		kmem_free(mappings, mappingSize);
1909 		return (EFAULT);
1910 	}
1911 	kmem_free(mappings, mappingSize);
1912 
1913 #ifdef	_MULTI_DATAMODEL
1914 	switch (ddi_model_convert_from(mode & FMODELS)) {
1915 	case DDI_MODEL_ILP32: {
1916 		struct fcp32_ioctl f32_ioctl;
1917 
1918 		f32_ioctl.fp_minor = fioctl.fp_minor;
1919 		f32_ioctl.listlen = fioctl.listlen;
1920 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 		    sizeof (struct fcp32_ioctl), mode)) {
1923 			return (EFAULT);
1924 		}
1925 		break;
1926 	}
1927 	case DDI_MODEL_NONE:
1928 		if (ddi_copyout((void *)&fioctl, (void *)data,
1929 		    sizeof (struct fcp_ioctl), mode)) {
1930 			return (EFAULT);
1931 		}
1932 		break;
1933 	}
1934 #else	/* _MULTI_DATAMODEL */
1935 
1936 	if (ddi_copyout((void *)&fioctl, (void *)data,
1937 	    sizeof (struct fcp_ioctl), mode)) {
1938 		return (EFAULT);
1939 	}
1940 #endif	/* _MULTI_DATAMODEL */
1941 
1942 	return (0);
1943 }
1944 
1945 /*
1946  * fcp_setup_scsi_ioctl
1947  *	Setup handler for the "scsi passthru" style of
1948  *	ioctl for FCP.	See "fcp_util.h" for data structure
1949  *	definition.
1950  *
1951  * Input:
1952  *	u_fscsi	= ioctl data (user address space)
1953  *	mode	= See ioctl(9E)
1954  *
1955  * Output:
1956  *	u_fscsi	= ioctl data (user address space)
1957  *	rval	= return value - see ioctl(9E)
1958  *
1959  * Returns:
1960  *	0	= OK
1961  *	EAGAIN	= See errno.h
1962  *	EBUSY	= See errno.h
1963  *	EFAULT	= See errno.h
1964  *	EINTR	= See errno.h
1965  *	EINVAL	= See errno.h
1966  *	EIO	= See errno.h
1967  *	ENOMEM	= See errno.h
1968  *	ENXIO	= See errno.h
1969  *
1970  * Context:
1971  *	Kernel context.
1972  */
1973 /* ARGSUSED */
1974 static int
1975 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976     int mode, int *rval)
1977 {
1978 	int			ret		= 0;
1979 	int			temp_ret;
1980 	caddr_t			k_cdbbufaddr	= NULL;
1981 	caddr_t			k_bufaddr	= NULL;
1982 	caddr_t			k_rqbufaddr	= NULL;
1983 	caddr_t			u_cdbbufaddr;
1984 	caddr_t			u_bufaddr;
1985 	caddr_t			u_rqbufaddr;
1986 	struct fcp_scsi_cmd	k_fscsi;
1987 
1988 	/*
1989 	 * Get fcp_scsi_cmd array element from user address space
1990 	 */
1991 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 	    != 0) {
1993 		return (ret);
1994 	}
1995 
1996 
1997 	/*
1998 	 * Even though kmem_alloc() checks the validity of the
1999 	 * buffer length, this check is needed when the
2000 	 * kmem_flags set and the zero buffer length is passed.
2001 	 */
2002 	if ((k_fscsi.scsi_cdblen <= 0) ||
2003 	    (k_fscsi.scsi_buflen <= 0) ||
2004 	    (k_fscsi.scsi_rqlen <= 0)) {
2005 		return (EINVAL);
2006 	}
2007 
2008 	/*
2009 	 * Allocate data for fcp_scsi_cmd pointer fields
2010 	 */
2011 	if (ret == 0) {
2012 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
2015 
2016 		if (k_cdbbufaddr == NULL ||
2017 		    k_bufaddr	 == NULL ||
2018 		    k_rqbufaddr	 == NULL) {
2019 			ret = ENOMEM;
2020 		}
2021 	}
2022 
2023 	/*
2024 	 * Get fcp_scsi_cmd pointer fields from user
2025 	 * address space
2026 	 */
2027 	if (ret == 0) {
2028 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 		u_bufaddr    = k_fscsi.scsi_bufaddr;
2030 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
2031 
2032 		if (ddi_copyin(u_cdbbufaddr,
2033 		    k_cdbbufaddr,
2034 		    k_fscsi.scsi_cdblen,
2035 		    mode)) {
2036 			ret = EFAULT;
2037 		} else if (ddi_copyin(u_bufaddr,
2038 		    k_bufaddr,
2039 		    k_fscsi.scsi_buflen,
2040 		    mode)) {
2041 			ret = EFAULT;
2042 		} else if (ddi_copyin(u_rqbufaddr,
2043 		    k_rqbufaddr,
2044 		    k_fscsi.scsi_rqlen,
2045 		    mode)) {
2046 			ret = EFAULT;
2047 		}
2048 	}
2049 
2050 	/*
2051 	 * Send scsi command (blocking)
2052 	 */
2053 	if (ret == 0) {
2054 		/*
2055 		 * Prior to sending the scsi command, the
2056 		 * fcp_scsi_cmd data structure must contain kernel,
2057 		 * not user, addresses.
2058 		 */
2059 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2060 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2061 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2062 
2063 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 
2065 		/*
2066 		 * After sending the scsi command, the
2067 		 * fcp_scsi_cmd data structure must contain user,
2068 		 * not kernel, addresses.
2069 		 */
2070 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2071 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2072 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2073 	}
2074 
2075 	/*
2076 	 * Put fcp_scsi_cmd pointer fields to user address space
2077 	 */
2078 	if (ret == 0) {
2079 		if (ddi_copyout(k_cdbbufaddr,
2080 		    u_cdbbufaddr,
2081 		    k_fscsi.scsi_cdblen,
2082 		    mode)) {
2083 			ret = EFAULT;
2084 		} else if (ddi_copyout(k_bufaddr,
2085 		    u_bufaddr,
2086 		    k_fscsi.scsi_buflen,
2087 		    mode)) {
2088 			ret = EFAULT;
2089 		} else if (ddi_copyout(k_rqbufaddr,
2090 		    u_rqbufaddr,
2091 		    k_fscsi.scsi_rqlen,
2092 		    mode)) {
2093 			ret = EFAULT;
2094 		}
2095 	}
2096 
2097 	/*
2098 	 * Free data for fcp_scsi_cmd pointer fields
2099 	 */
2100 	if (k_cdbbufaddr != NULL) {
2101 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 	}
2103 	if (k_bufaddr != NULL) {
2104 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 	}
2106 	if (k_rqbufaddr != NULL) {
2107 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 	}
2109 
2110 	/*
2111 	 * Put fcp_scsi_cmd array element to user address space
2112 	 */
2113 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 	if (temp_ret != 0) {
2115 		ret = temp_ret;
2116 	}
2117 
2118 	/*
2119 	 * Return status
2120 	 */
2121 	return (ret);
2122 }
2123 
2124 
2125 /*
2126  * fcp_copyin_scsi_cmd
2127  *	Copy in fcp_scsi_cmd data structure from user address space.
2128  *	The data may be in 32 bit or 64 bit modes.
2129  *
2130  * Input:
2131  *	base_addr	= from address (user address space)
2132  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2133  *
2134  * Output:
2135  *	fscsi		= to address (kernel address space)
2136  *
2137  * Returns:
2138  *	0	= OK
2139  *	EFAULT	= Error
2140  *
2141  * Context:
2142  *	Kernel context.
2143  */
2144 static int
2145 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 {
2147 #ifdef	_MULTI_DATAMODEL
2148 	struct fcp32_scsi_cmd	f32scsi;
2149 
2150 	switch (ddi_model_convert_from(mode & FMODELS)) {
2151 	case DDI_MODEL_ILP32:
2152 		/*
2153 		 * Copy data from user address space
2154 		 */
2155 		if (ddi_copyin((void *)base_addr,
2156 		    &f32scsi,
2157 		    sizeof (struct fcp32_scsi_cmd),
2158 		    mode)) {
2159 			return (EFAULT);
2160 		}
2161 		/*
2162 		 * Convert from 32 bit to 64 bit
2163 		 */
2164 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 		break;
2166 	case DDI_MODEL_NONE:
2167 		/*
2168 		 * Copy data from user address space
2169 		 */
2170 		if (ddi_copyin((void *)base_addr,
2171 		    fscsi,
2172 		    sizeof (struct fcp_scsi_cmd),
2173 		    mode)) {
2174 			return (EFAULT);
2175 		}
2176 		break;
2177 	}
2178 #else	/* _MULTI_DATAMODEL */
2179 	/*
2180 	 * Copy data from user address space
2181 	 */
2182 	if (ddi_copyin((void *)base_addr,
2183 	    fscsi,
2184 	    sizeof (struct fcp_scsi_cmd),
2185 	    mode)) {
2186 		return (EFAULT);
2187 	}
2188 #endif	/* _MULTI_DATAMODEL */
2189 
2190 	return (0);
2191 }
2192 
2193 
2194 /*
2195  * fcp_copyout_scsi_cmd
2196  *	Copy out fcp_scsi_cmd data structure to user address space.
2197  *	The data may be in 32 bit or 64 bit modes.
2198  *
2199  * Input:
2200  *	fscsi		= to address (kernel address space)
2201  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2202  *
2203  * Output:
2204  *	base_addr	= from address (user address space)
2205  *
2206  * Returns:
2207  *	0	= OK
2208  *	EFAULT	= Error
2209  *
2210  * Context:
2211  *	Kernel context.
2212  */
2213 static int
2214 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 {
2216 #ifdef	_MULTI_DATAMODEL
2217 	struct fcp32_scsi_cmd	f32scsi;
2218 
2219 	switch (ddi_model_convert_from(mode & FMODELS)) {
2220 	case DDI_MODEL_ILP32:
2221 		/*
2222 		 * Convert from 64 bit to 32 bit
2223 		 */
2224 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 		/*
2226 		 * Copy data to user address space
2227 		 */
2228 		if (ddi_copyout(&f32scsi,
2229 		    (void *)base_addr,
2230 		    sizeof (struct fcp32_scsi_cmd),
2231 		    mode)) {
2232 			return (EFAULT);
2233 		}
2234 		break;
2235 	case DDI_MODEL_NONE:
2236 		/*
2237 		 * Copy data to user address space
2238 		 */
2239 		if (ddi_copyout(fscsi,
2240 		    (void *)base_addr,
2241 		    sizeof (struct fcp_scsi_cmd),
2242 		    mode)) {
2243 			return (EFAULT);
2244 		}
2245 		break;
2246 	}
2247 #else	/* _MULTI_DATAMODEL */
2248 	/*
2249 	 * Copy data to user address space
2250 	 */
2251 	if (ddi_copyout(fscsi,
2252 	    (void *)base_addr,
2253 	    sizeof (struct fcp_scsi_cmd),
2254 	    mode)) {
2255 		return (EFAULT);
2256 	}
2257 #endif	/* _MULTI_DATAMODEL */
2258 
2259 	return (0);
2260 }
2261 
2262 
2263 /*
2264  * fcp_send_scsi_ioctl
2265  *	Sends the SCSI command in blocking mode.
2266  *
2267  * Input:
2268  *	fscsi		= SCSI command data structure
2269  *
2270  * Output:
2271  *	fscsi		= SCSI command data structure
2272  *
2273  * Returns:
2274  *	0	= OK
2275  *	EAGAIN	= See errno.h
2276  *	EBUSY	= See errno.h
2277  *	EINTR	= See errno.h
2278  *	EINVAL	= See errno.h
2279  *	EIO	= See errno.h
2280  *	ENOMEM	= See errno.h
2281  *	ENXIO	= See errno.h
2282  *
2283  * Context:
2284  *	Kernel context.
2285  */
2286 static int
2287 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 {
2289 	struct fcp_lun	*plun		= NULL;
2290 	struct fcp_port	*pptr		= NULL;
2291 	struct fcp_tgt	*ptgt		= NULL;
2292 	fc_packet_t		*fpkt		= NULL;
2293 	struct fcp_ipkt	*icmd		= NULL;
2294 	int			target_created	= FALSE;
2295 	fc_frame_hdr_t		*hp;
2296 	struct fcp_cmd		fcp_cmd;
2297 	struct fcp_cmd		*fcmd;
2298 	union scsi_cdb		*scsi_cdb;
2299 	la_wwn_t		*wwn_ptr;
2300 	int			nodma;
2301 	struct fcp_rsp		*rsp;
2302 	struct fcp_rsp_info	*rsp_info;
2303 	caddr_t			rsp_sense;
2304 	int			buf_len;
2305 	int			info_len;
2306 	int			sense_len;
2307 	struct scsi_extended_sense	*sense_to = NULL;
2308 	timeout_id_t		tid;
2309 	uint8_t			reconfig_lun = FALSE;
2310 	uint8_t			reconfig_pending = FALSE;
2311 	uint8_t			scsi_cmd;
2312 	int			rsp_len;
2313 	int			cmd_index;
2314 	int			fc_status;
2315 	int			pkt_state;
2316 	int			pkt_action;
2317 	int			pkt_reason;
2318 	int			ret, xport_retval = ~FC_SUCCESS;
2319 	int			lcount;
2320 	int			tcount;
2321 	int			reconfig_status;
2322 	int			port_busy = FALSE;
2323 	uchar_t			*lun_string;
2324 
2325 	/*
2326 	 * Check valid SCSI command
2327 	 */
2328 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 	ret = EINVAL;
2330 	for (cmd_index = 0;
2331 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 	    ret != 0;
2333 	    cmd_index++) {
2334 		/*
2335 		 * First byte of CDB is the SCSI command
2336 		 */
2337 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 			ret = 0;
2339 		}
2340 	}
2341 
2342 	/*
2343 	 * Check inputs
2344 	 */
2345 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 		ret = EINVAL;
2347 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 		/* no larger than */
2349 		ret = EINVAL;
2350 	}
2351 
2352 
2353 	/*
2354 	 * Find FC port
2355 	 */
2356 	if (ret == 0) {
2357 		/*
2358 		 * Acquire global mutex
2359 		 */
2360 		mutex_enter(&fcp_global_mutex);
2361 
2362 		pptr = fcp_port_head;
2363 		while (pptr) {
2364 			if (pptr->port_instance ==
2365 			    (uint32_t)fscsi->scsi_fc_port_num) {
2366 				break;
2367 			} else {
2368 				pptr = pptr->port_next;
2369 			}
2370 		}
2371 
2372 		if (pptr == NULL) {
2373 			ret = ENXIO;
2374 		} else {
2375 			/*
2376 			 * fc_ulp_busy_port can raise power
2377 			 *  so, we must not hold any mutexes involved in PM
2378 			 */
2379 			mutex_exit(&fcp_global_mutex);
2380 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 		}
2382 
2383 		if (ret == 0) {
2384 
2385 			/* remember port is busy, so we will release later */
2386 			port_busy = TRUE;
2387 
2388 			/*
2389 			 * If there is a reconfiguration in progress, wait
2390 			 * for it to complete.
2391 			 */
2392 
2393 			fcp_reconfig_wait(pptr);
2394 
2395 			/* reacquire mutexes in order */
2396 			mutex_enter(&fcp_global_mutex);
2397 			mutex_enter(&pptr->port_mutex);
2398 
2399 			/*
2400 			 * Will port accept DMA?
2401 			 */
2402 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 			    ? 1 : 0;
2404 
2405 			/*
2406 			 * If init or offline, device not known
2407 			 *
2408 			 * If we are discovering (onlining), we can
2409 			 * NOT obviously provide reliable data about
2410 			 * devices until it is complete
2411 			 */
2412 			if (pptr->port_state &	  (FCP_STATE_INIT |
2413 			    FCP_STATE_OFFLINE)) {
2414 				ret = ENXIO;
2415 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 				ret = EBUSY;
2417 			} else {
2418 				/*
2419 				 * Find target from pwwn
2420 				 *
2421 				 * The wwn must be put into a local
2422 				 * variable to ensure alignment.
2423 				 */
2424 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 				ptgt = fcp_lookup_target(pptr,
2426 				    (uchar_t *)wwn_ptr);
2427 
2428 				/*
2429 				 * If target not found,
2430 				 */
2431 				if (ptgt == NULL) {
2432 					/*
2433 					 * Note: Still have global &
2434 					 * port mutexes
2435 					 */
2436 					mutex_exit(&pptr->port_mutex);
2437 					ptgt = fcp_port_create_tgt(pptr,
2438 					    wwn_ptr, &ret, &fc_status,
2439 					    &pkt_state, &pkt_action,
2440 					    &pkt_reason);
2441 					mutex_enter(&pptr->port_mutex);
2442 
2443 					fscsi->scsi_fc_status  = fc_status;
2444 					fscsi->scsi_pkt_state  =
2445 					    (uchar_t)pkt_state;
2446 					fscsi->scsi_pkt_reason = pkt_reason;
2447 					fscsi->scsi_pkt_action =
2448 					    (uchar_t)pkt_action;
2449 
2450 					if (ptgt != NULL) {
2451 						target_created = TRUE;
2452 					} else if (ret == 0) {
2453 						ret = ENOMEM;
2454 					}
2455 				}
2456 
2457 				if (ret == 0) {
2458 					/*
2459 					 * Acquire target
2460 					 */
2461 					mutex_enter(&ptgt->tgt_mutex);
2462 
2463 					/*
2464 					 * If target is mark or busy,
2465 					 * then target can not be used
2466 					 */
2467 					if (ptgt->tgt_state &
2468 					    (FCP_TGT_MARK |
2469 					    FCP_TGT_BUSY)) {
2470 						ret = EBUSY;
2471 					} else {
2472 						/*
2473 						 * Mark target as busy
2474 						 */
2475 						ptgt->tgt_state |=
2476 						    FCP_TGT_BUSY;
2477 					}
2478 
2479 					/*
2480 					 * Release target
2481 					 */
2482 					lcount = pptr->port_link_cnt;
2483 					tcount = ptgt->tgt_change_cnt;
2484 					mutex_exit(&ptgt->tgt_mutex);
2485 				}
2486 			}
2487 
2488 			/*
2489 			 * Release port
2490 			 */
2491 			mutex_exit(&pptr->port_mutex);
2492 		}
2493 
2494 		/*
2495 		 * Release global mutex
2496 		 */
2497 		mutex_exit(&fcp_global_mutex);
2498 	}
2499 
2500 	if (ret == 0) {
2501 		uint64_t belun = BE_64(fscsi->scsi_lun);
2502 
2503 		/*
2504 		 * If it's a target device, find lun from pwwn
2505 		 * The wwn must be put into a local
2506 		 * variable to ensure alignment.
2507 		 */
2508 		mutex_enter(&pptr->port_mutex);
2509 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 			/* this is not a target */
2512 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 			ret = ENXIO;
2514 		} else if ((belun << 16) != 0) {
2515 			/*
2516 			 * Since fcp only support PD and LU addressing method
2517 			 * so far, the last 6 bytes of a valid LUN are expected
2518 			 * to be filled with 00h.
2519 			 */
2520 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2523 			    (uint8_t)(belun >> 62), belun);
2524 			ret = ENXIO;
2525 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 			/*
2528 			 * This is a SCSI target, but no LUN at this
2529 			 * address.
2530 			 *
2531 			 * In the future, we may want to send this to
2532 			 * the target, and let it respond
2533 			 * appropriately
2534 			 */
2535 			ret = ENXIO;
2536 		}
2537 		mutex_exit(&pptr->port_mutex);
2538 	}
2539 
2540 	/*
2541 	 * Finished grabbing external resources
2542 	 * Allocate internal packet (icmd)
2543 	 */
2544 	if (ret == 0) {
2545 		/*
2546 		 * Calc rsp len assuming rsp info included
2547 		 */
2548 		rsp_len = sizeof (struct fcp_rsp) +
2549 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 
2551 		icmd = fcp_icmd_alloc(pptr, ptgt,
2552 		    sizeof (struct fcp_cmd),
2553 		    rsp_len,
2554 		    fscsi->scsi_buflen,
2555 		    nodma,
2556 		    lcount,			/* ipkt_link_cnt */
2557 		    tcount,			/* ipkt_change_cnt */
2558 		    0,				/* cause */
2559 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2560 
2561 		if (icmd == NULL) {
2562 			ret = ENOMEM;
2563 		} else {
2564 			/*
2565 			 * Setup internal packet as sema sync
2566 			 */
2567 			fcp_ipkt_sema_init(icmd);
2568 		}
2569 	}
2570 
2571 	if (ret == 0) {
2572 		/*
2573 		 * Init fpkt pointer for use.
2574 		 */
2575 
2576 		fpkt = icmd->ipkt_fpkt;
2577 
2578 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2580 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2581 
2582 		/*
2583 		 * Init fcmd pointer for use by SCSI command
2584 		 */
2585 
2586 		if (nodma) {
2587 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 		} else {
2589 			fcmd = &fcp_cmd;
2590 		}
2591 		bzero(fcmd, sizeof (struct fcp_cmd));
2592 		ptgt = plun->lun_tgt;
2593 
2594 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 
2596 		fcmd->fcp_ent_addr.ent_addr_0 =
2597 		    BE_16(*(uint16_t *)&(lun_string[0]));
2598 		fcmd->fcp_ent_addr.ent_addr_1 =
2599 		    BE_16(*(uint16_t *)&(lun_string[2]));
2600 		fcmd->fcp_ent_addr.ent_addr_2 =
2601 		    BE_16(*(uint16_t *)&(lun_string[4]));
2602 		fcmd->fcp_ent_addr.ent_addr_3 =
2603 		    BE_16(*(uint16_t *)&(lun_string[6]));
2604 
2605 		/*
2606 		 * Setup internal packet(icmd)
2607 		 */
2608 		icmd->ipkt_lun		= plun;
2609 		icmd->ipkt_restart	= 0;
2610 		icmd->ipkt_retries	= 0;
2611 		icmd->ipkt_opcode	= 0;
2612 
2613 		/*
2614 		 * Init the frame HEADER Pointer for use
2615 		 */
2616 		hp = &fpkt->pkt_cmd_fhdr;
2617 
2618 		hp->s_id	= pptr->port_id;
2619 		hp->d_id	= ptgt->tgt_d_id;
2620 		hp->r_ctl	= R_CTL_COMMAND;
2621 		hp->type	= FC_TYPE_SCSI_FCP;
2622 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 		hp->rsvd	= 0;
2624 		hp->seq_id	= 0;
2625 		hp->seq_cnt	= 0;
2626 		hp->ox_id	= 0xffff;
2627 		hp->rx_id	= 0xffff;
2628 		hp->ro		= 0;
2629 
2630 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2631 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2632 		fcmd->fcp_cntl.cntl_write_data	= 0;
2633 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2634 
2635 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 		    fscsi->scsi_cdblen);
2638 
2639 		if (!nodma) {
2640 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 		}
2643 
2644 		/*
2645 		 * Send SCSI command to FC transport
2646 		 */
2647 
2648 		if (ret == 0) {
2649 			mutex_enter(&ptgt->tgt_mutex);
2650 
2651 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 				mutex_exit(&ptgt->tgt_mutex);
2653 				fscsi->scsi_fc_status = xport_retval =
2654 				    fc_ulp_transport(pptr->port_fp_handle,
2655 				    fpkt);
2656 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 					ret = EIO;
2658 				}
2659 			} else {
2660 				mutex_exit(&ptgt->tgt_mutex);
2661 				ret = EBUSY;
2662 			}
2663 		}
2664 	}
2665 
2666 	/*
2667 	 * Wait for completion only if fc_ulp_transport was called and it
2668 	 * returned a success. This is the only time callback will happen.
2669 	 * Otherwise, there is no point in waiting
2670 	 */
2671 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 		ret = fcp_ipkt_sema_wait(icmd);
2673 	}
2674 
2675 	/*
2676 	 * Copy data to IOCTL data structures
2677 	 */
2678 	rsp = NULL;
2679 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 
2682 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 			fcp_log(CE_WARN, pptr->port_dip,
2684 			    "!SCSI command to d_id=0x%x lun=0x%x"
2685 			    " failed, Bad FCP response values:"
2686 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 			    ptgt->tgt_d_id, plun->lun_num,
2689 			    rsp->reserved_0, rsp->reserved_1,
2690 			    rsp->fcp_u.fcp_status.reserved_0,
2691 			    rsp->fcp_u.fcp_status.reserved_1,
2692 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2693 
2694 			ret = EIO;
2695 		}
2696 	}
2697 
2698 	if ((ret == 0) && (rsp != NULL)) {
2699 		/*
2700 		 * Calc response lengths
2701 		 */
2702 		sense_len = 0;
2703 		info_len = 0;
2704 
2705 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 			info_len = rsp->fcp_response_len;
2707 		}
2708 
2709 		rsp_info   = (struct fcp_rsp_info *)
2710 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 
2712 		/*
2713 		 * Get SCSI status
2714 		 */
2715 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 		/*
2717 		 * If a lun was just added or removed and the next command
2718 		 * comes through this interface, we need to capture the check
2719 		 * condition so we can discover the new topology.
2720 		 */
2721 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 		    rsp->fcp_u.fcp_status.sense_len_set) {
2723 			sense_len = rsp->fcp_sense_len;
2724 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 			    (FCP_SENSE_NO_LUN(sense_to))) {
2728 				reconfig_lun = TRUE;
2729 			}
2730 		}
2731 
2732 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 			if (reconfig_lun == FALSE) {
2735 				reconfig_status =
2736 				    fcp_is_reconfig_needed(ptgt, fpkt);
2737 			}
2738 
2739 			if ((reconfig_lun == TRUE) ||
2740 			    (reconfig_status == TRUE)) {
2741 				mutex_enter(&ptgt->tgt_mutex);
2742 				if (ptgt->tgt_tid == NULL) {
2743 					/*
2744 					 * Either we've been notified the
2745 					 * REPORT_LUN data has changed, or
2746 					 * we've determined on our own that
2747 					 * we're out of date.  Kick off
2748 					 * rediscovery.
2749 					 */
2750 					tid = timeout(fcp_reconfigure_luns,
2751 					    (caddr_t)ptgt, drv_usectohz(1));
2752 
2753 					ptgt->tgt_tid = tid;
2754 					ptgt->tgt_state |= FCP_TGT_BUSY;
2755 					ret = EBUSY;
2756 					reconfig_pending = TRUE;
2757 				}
2758 				mutex_exit(&ptgt->tgt_mutex);
2759 			}
2760 		}
2761 
2762 		/*
2763 		 * Calc residuals and buffer lengths
2764 		 */
2765 
2766 		if (ret == 0) {
2767 			buf_len = fscsi->scsi_buflen;
2768 			fscsi->scsi_bufresid	= 0;
2769 			if (rsp->fcp_u.fcp_status.resid_under) {
2770 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 					fscsi->scsi_bufresid = rsp->fcp_resid;
2772 				} else {
2773 					cmn_err(CE_WARN, "fcp: bad residue %x "
2774 					    "for txfer len %x", rsp->fcp_resid,
2775 					    fscsi->scsi_buflen);
2776 					fscsi->scsi_bufresid =
2777 					    fscsi->scsi_buflen;
2778 				}
2779 				buf_len -= fscsi->scsi_bufresid;
2780 			}
2781 			if (rsp->fcp_u.fcp_status.resid_over) {
2782 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 			}
2784 
2785 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2786 			if (fscsi->scsi_rqlen < sense_len) {
2787 				sense_len = fscsi->scsi_rqlen;
2788 			}
2789 
2790 			fscsi->scsi_fc_rspcode	= 0;
2791 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2793 			}
2794 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2795 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2796 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2797 
2798 			/*
2799 			 * Copy data and request sense
2800 			 *
2801 			 * Data must be copied by using the FCP_CP_IN macro.
2802 			 * This will ensure the proper byte order since the data
2803 			 * is being copied directly from the memory mapped
2804 			 * device register.
2805 			 *
2806 			 * The response (and request sense) will be in the
2807 			 * correct byte order.	No special copy is necessary.
2808 			 */
2809 
2810 			if (buf_len) {
2811 				FCP_CP_IN(fpkt->pkt_data,
2812 				    fscsi->scsi_bufaddr,
2813 				    fpkt->pkt_data_acc,
2814 				    buf_len);
2815 			}
2816 			bcopy((void *)rsp_sense,
2817 			    (void *)fscsi->scsi_rqbufaddr,
2818 			    sense_len);
2819 		}
2820 	}
2821 
2822 	/*
2823 	 * Cleanup transport data structures if icmd was alloc-ed
2824 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 	 */
2826 	if (icmd != NULL) {
2827 		fcp_ipkt_sema_cleanup(icmd);
2828 	}
2829 
2830 	/* restore pm busy/idle status */
2831 	if (port_busy) {
2832 		fc_ulp_idle_port(pptr->port_fp_handle);
2833 	}
2834 
2835 	/*
2836 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2837 	 * flag, it'll be cleared when the reconfig is complete.
2838 	 */
2839 	if ((ptgt != NULL) && !reconfig_pending) {
2840 		/*
2841 		 * If target was created,
2842 		 */
2843 		if (target_created) {
2844 			mutex_enter(&ptgt->tgt_mutex);
2845 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 			mutex_exit(&ptgt->tgt_mutex);
2847 		} else {
2848 			/*
2849 			 * De-mark target as busy
2850 			 */
2851 			mutex_enter(&ptgt->tgt_mutex);
2852 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 			mutex_exit(&ptgt->tgt_mutex);
2854 		}
2855 	}
2856 	return (ret);
2857 }
2858 
2859 
2860 static int
2861 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862     fc_packet_t	*fpkt)
2863 {
2864 	uchar_t			*lun_string;
2865 	uint16_t		lun_num, i;
2866 	int			num_luns;
2867 	int			actual_luns;
2868 	int			num_masked_luns;
2869 	int			lun_buflen;
2870 	struct fcp_lun	*plun	= NULL;
2871 	struct fcp_reportlun_resp	*report_lun;
2872 	uint8_t			reconfig_needed = FALSE;
2873 	uint8_t			lun_exists = FALSE;
2874 	fcp_port_t			*pptr		 = ptgt->tgt_port;
2875 
2876 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 
2878 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 	    fpkt->pkt_datalen);
2880 
2881 	/* get number of luns (which is supplied as LUNS * 8) */
2882 	num_luns = BE_32(report_lun->num_lun) >> 3;
2883 
2884 	/*
2885 	 * Figure out exactly how many lun strings our response buffer
2886 	 * can hold.
2887 	 */
2888 	lun_buflen = (fpkt->pkt_datalen -
2889 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 
2891 	/*
2892 	 * Is our response buffer full or not? We don't want to
2893 	 * potentially walk beyond the number of luns we have.
2894 	 */
2895 	if (num_luns <= lun_buflen) {
2896 		actual_luns = num_luns;
2897 	} else {
2898 		actual_luns = lun_buflen;
2899 	}
2900 
2901 	mutex_enter(&ptgt->tgt_mutex);
2902 
2903 	/* Scan each lun to see if we have masked it. */
2904 	num_masked_luns = 0;
2905 	if (fcp_lun_blacklist != NULL) {
2906 		for (i = 0; i < actual_luns; i++) {
2907 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 			switch (lun_string[0] & 0xC0) {
2909 			case FCP_LUN_ADDRESSING:
2910 			case FCP_PD_ADDRESSING:
2911 			case FCP_VOLUME_ADDRESSING:
2912 				lun_num = ((lun_string[0] & 0x3F) << 8)
2913 				    | lun_string[1];
2914 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 				    lun_num) == TRUE) {
2916 					num_masked_luns++;
2917 				}
2918 				break;
2919 			default:
2920 				break;
2921 			}
2922 		}
2923 	}
2924 
2925 	/*
2926 	 * The quick and easy check.  If the number of LUNs reported
2927 	 * doesn't match the number we currently know about, we need
2928 	 * to reconfigure.
2929 	 */
2930 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 		mutex_exit(&ptgt->tgt_mutex);
2932 		kmem_free(report_lun, fpkt->pkt_datalen);
2933 		return (TRUE);
2934 	}
2935 
2936 	/*
2937 	 * If the quick and easy check doesn't turn up anything, we walk
2938 	 * the list of luns from the REPORT_LUN response and look for
2939 	 * any luns we don't know about.  If we find one, we know we need
2940 	 * to reconfigure. We will skip LUNs that are masked because of the
2941 	 * blacklist.
2942 	 */
2943 	for (i = 0; i < actual_luns; i++) {
2944 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 		lun_exists = FALSE;
2946 		switch (lun_string[0] & 0xC0) {
2947 		case FCP_LUN_ADDRESSING:
2948 		case FCP_PD_ADDRESSING:
2949 		case FCP_VOLUME_ADDRESSING:
2950 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 
2952 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 				lun_exists = TRUE;
2955 				break;
2956 			}
2957 
2958 			for (plun = ptgt->tgt_lun; plun;
2959 			    plun = plun->lun_next) {
2960 				if (plun->lun_num == lun_num) {
2961 					lun_exists = TRUE;
2962 					break;
2963 				}
2964 			}
2965 			break;
2966 		default:
2967 			break;
2968 		}
2969 
2970 		if (lun_exists == FALSE) {
2971 			reconfig_needed = TRUE;
2972 			break;
2973 		}
2974 	}
2975 
2976 	mutex_exit(&ptgt->tgt_mutex);
2977 	kmem_free(report_lun, fpkt->pkt_datalen);
2978 
2979 	return (reconfig_needed);
2980 }
2981 
2982 /*
2983  * This function is called by fcp_handle_page83 and uses inquiry response data
2984  * stored in plun->lun_inq to determine whether or not a device is a member of
2985  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986  * otherwise 1.
2987  */
2988 static int
2989 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 {
2991 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2992 	char			*devidptr;
2993 	int			i, len;
2994 
2995 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 		devidptr = fcp_symmetric_disk_table[i];
2997 		len = (int)strlen(devidptr);
2998 
2999 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 			return (0);
3001 		}
3002 	}
3003 	return (1);
3004 }
3005 
3006 
3007 /*
3008  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009  * It basically returns the current count of # of state change callbacks
3010  * i.e the value of tgt_change_cnt.
3011  *
3012  * INPUT:
3013  *   fcp_ioctl.fp_minor -> The minor # of the fp port
3014  *   fcp_ioctl.listlen	-> 1
3015  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
3016  */
3017 /*ARGSUSED2*/
3018 static int
3019 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 {
3021 	int			ret;
3022 	uint32_t		link_cnt;
3023 	struct fcp_ioctl	fioctl;
3024 	struct fcp_port	*pptr = NULL;
3025 
3026 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 	    &pptr)) != 0) {
3028 		return (ret);
3029 	}
3030 
3031 	ASSERT(pptr != NULL);
3032 
3033 	if (fioctl.listlen != 1) {
3034 		return (EINVAL);
3035 	}
3036 
3037 	mutex_enter(&pptr->port_mutex);
3038 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 		mutex_exit(&pptr->port_mutex);
3040 		return (ENXIO);
3041 	}
3042 
3043 	/*
3044 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 	 * When the fcp initially attaches to the port and there are nothing
3046 	 * hanging out of the port or if there was a repeat offline state change
3047 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 	 * will differentiate the 2 cases.
3050 	 */
3051 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 		mutex_exit(&pptr->port_mutex);
3053 		return (ENXIO);
3054 	}
3055 
3056 	link_cnt = pptr->port_link_cnt;
3057 	mutex_exit(&pptr->port_mutex);
3058 
3059 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 		return (EFAULT);
3061 	}
3062 
3063 #ifdef	_MULTI_DATAMODEL
3064 	switch (ddi_model_convert_from(mode & FMODELS)) {
3065 	case DDI_MODEL_ILP32: {
3066 		struct fcp32_ioctl f32_ioctl;
3067 
3068 		f32_ioctl.fp_minor = fioctl.fp_minor;
3069 		f32_ioctl.listlen = fioctl.listlen;
3070 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 		    sizeof (struct fcp32_ioctl), mode)) {
3073 			return (EFAULT);
3074 		}
3075 		break;
3076 	}
3077 	case DDI_MODEL_NONE:
3078 		if (ddi_copyout((void *)&fioctl, (void *)data,
3079 		    sizeof (struct fcp_ioctl), mode)) {
3080 			return (EFAULT);
3081 		}
3082 		break;
3083 	}
3084 #else	/* _MULTI_DATAMODEL */
3085 
3086 	if (ddi_copyout((void *)&fioctl, (void *)data,
3087 	    sizeof (struct fcp_ioctl), mode)) {
3088 		return (EFAULT);
3089 	}
3090 #endif	/* _MULTI_DATAMODEL */
3091 
3092 	return (0);
3093 }
3094 
3095 /*
3096  * This function copies the fcp_ioctl structure passed in from user land
3097  * into kernel land. Handles 32 bit applications.
3098  */
3099 /*ARGSUSED*/
3100 static int
3101 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 {
3104 	struct fcp_port	*t_pptr;
3105 
3106 #ifdef	_MULTI_DATAMODEL
3107 	switch (ddi_model_convert_from(mode & FMODELS)) {
3108 	case DDI_MODEL_ILP32: {
3109 		struct fcp32_ioctl f32_ioctl;
3110 
3111 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 		    sizeof (struct fcp32_ioctl), mode)) {
3113 			return (EFAULT);
3114 		}
3115 		fioctl->fp_minor = f32_ioctl.fp_minor;
3116 		fioctl->listlen = f32_ioctl.listlen;
3117 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 		break;
3119 	}
3120 	case DDI_MODEL_NONE:
3121 		if (ddi_copyin((void *)data, (void *)fioctl,
3122 		    sizeof (struct fcp_ioctl), mode)) {
3123 			return (EFAULT);
3124 		}
3125 		break;
3126 	}
3127 
3128 #else	/* _MULTI_DATAMODEL */
3129 	if (ddi_copyin((void *)data, (void *)fioctl,
3130 	    sizeof (struct fcp_ioctl), mode)) {
3131 		return (EFAULT);
3132 	}
3133 #endif	/* _MULTI_DATAMODEL */
3134 
3135 	/*
3136 	 * Right now we can assume that the minor number matches with
3137 	 * this instance of fp. If this changes we will need to
3138 	 * revisit this logic.
3139 	 */
3140 	mutex_enter(&fcp_global_mutex);
3141 	t_pptr = fcp_port_head;
3142 	while (t_pptr) {
3143 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 			break;
3145 		} else {
3146 			t_pptr = t_pptr->port_next;
3147 		}
3148 	}
3149 	*pptr = t_pptr;
3150 	mutex_exit(&fcp_global_mutex);
3151 	if (t_pptr == NULL) {
3152 		return (ENXIO);
3153 	}
3154 
3155 	return (0);
3156 }
3157 
3158 /*
3159  *     Function: fcp_port_create_tgt
3160  *
3161  *  Description: As the name suggest this function creates the target context
3162  *		 specified by the the WWN provided by the caller.  If the
3163  *		 creation goes well and the target is known by fp/fctl a PLOGI
3164  *		 followed by a PRLI are issued.
3165  *
3166  *     Argument: pptr		fcp port structure
3167  *		 pwwn		WWN of the target
3168  *		 ret_val	Address of the return code.  It could be:
3169  *				EIO, ENOMEM or 0.
3170  *		 fc_status	PLOGI or PRLI status completion
3171  *		 fc_pkt_state	PLOGI or PRLI state completion
3172  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3173  *		 fc_pkt_action	PLOGI or PRLI action completion
3174  *
3175  * Return Value: NULL if it failed
3176  *		 Target structure address if it succeeds
3177  */
3178 static struct fcp_tgt *
3179 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 {
3182 	struct fcp_tgt	*ptgt = NULL;
3183 	fc_portmap_t		devlist;
3184 	int			lcount;
3185 	int			error;
3186 
3187 	*ret_val = 0;
3188 
3189 	/*
3190 	 * Check FC port device & get port map
3191 	 */
3192 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 	    &error, 1) == NULL) {
3194 		*ret_val = EIO;
3195 	} else {
3196 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 		    &devlist) != FC_SUCCESS) {
3198 			*ret_val = EIO;
3199 		}
3200 	}
3201 
3202 	/* Set port map flags */
3203 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 
3205 	/* Allocate target */
3206 	if (*ret_val == 0) {
3207 		lcount = pptr->port_link_cnt;
3208 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 		if (ptgt == NULL) {
3210 			fcp_log(CE_WARN, pptr->port_dip,
3211 			    "!FC target allocation failed");
3212 			*ret_val = ENOMEM;
3213 		} else {
3214 			/* Setup target */
3215 			mutex_enter(&ptgt->tgt_mutex);
3216 
3217 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3218 			ptgt->tgt_tmp_cnt	= 1;
3219 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3220 			ptgt->tgt_hard_addr	=
3221 			    devlist.map_hard_addr.hard_addr;
3222 			ptgt->tgt_pd_handle	= devlist.map_pd;
3223 			ptgt->tgt_fca_dev	= NULL;
3224 
3225 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 			    FC_WWN_SIZE);
3227 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 			    FC_WWN_SIZE);
3229 
3230 			mutex_exit(&ptgt->tgt_mutex);
3231 		}
3232 	}
3233 
3234 	/* Release global mutex for PLOGI and PRLI */
3235 	mutex_exit(&fcp_global_mutex);
3236 
3237 	/* Send PLOGI (If necessary) */
3238 	if (*ret_val == 0) {
3239 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 	}
3242 
3243 	/* Send PRLI (If necessary) */
3244 	if (*ret_val == 0) {
3245 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 	}
3248 
3249 	mutex_enter(&fcp_global_mutex);
3250 
3251 	return (ptgt);
3252 }
3253 
3254 /*
3255  *     Function: fcp_tgt_send_plogi
3256  *
3257  *  Description: This function sends a PLOGI to the target specified by the
3258  *		 caller and waits till it completes.
3259  *
3260  *     Argument: ptgt		Target to send the plogi to.
3261  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3262  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3263  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3264  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3265  *
3266  * Return Value: 0
3267  *		 ENOMEM
3268  *		 EIO
3269  *
3270  *	Context: User context.
3271  */
3272 static int
3273 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274     int *fc_pkt_reason, int *fc_pkt_action)
3275 {
3276 	struct fcp_port	*pptr;
3277 	struct fcp_ipkt	*icmd;
3278 	struct fc_packet	*fpkt;
3279 	fc_frame_hdr_t		*hp;
3280 	struct la_els_logi	logi;
3281 	int			tcount;
3282 	int			lcount;
3283 	int			ret, login_retval = ~FC_SUCCESS;
3284 
3285 	ret = 0;
3286 
3287 	pptr = ptgt->tgt_port;
3288 
3289 	lcount = pptr->port_link_cnt;
3290 	tcount = ptgt->tgt_change_cnt;
3291 
3292 	/* Alloc internal packet */
3293 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 	    sizeof (la_els_logi_t), 0,
3295 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 
3298 	if (icmd == NULL) {
3299 		ret = ENOMEM;
3300 	} else {
3301 		/*
3302 		 * Setup internal packet as sema sync
3303 		 */
3304 		fcp_ipkt_sema_init(icmd);
3305 
3306 		/*
3307 		 * Setup internal packet (icmd)
3308 		 */
3309 		icmd->ipkt_lun		= NULL;
3310 		icmd->ipkt_restart	= 0;
3311 		icmd->ipkt_retries	= 0;
3312 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3313 
3314 		/*
3315 		 * Setup fc_packet
3316 		 */
3317 		fpkt = icmd->ipkt_fpkt;
3318 
3319 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3321 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3322 
3323 		/*
3324 		 * Setup FC frame header
3325 		 */
3326 		hp = &fpkt->pkt_cmd_fhdr;
3327 
3328 		hp->s_id	= pptr->port_id;	/* source ID */
3329 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3330 		hp->r_ctl	= R_CTL_ELS_REQ;
3331 		hp->type	= FC_TYPE_EXTENDED_LS;
3332 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 		hp->seq_id	= 0;
3334 		hp->rsvd	= 0;
3335 		hp->df_ctl	= 0;
3336 		hp->seq_cnt	= 0;
3337 		hp->ox_id	= 0xffff;		/* i.e. none */
3338 		hp->rx_id	= 0xffff;		/* i.e. none */
3339 		hp->ro		= 0;
3340 
3341 		/*
3342 		 * Setup PLOGI
3343 		 */
3344 		bzero(&logi, sizeof (struct la_els_logi));
3345 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 
3347 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 
3350 		/*
3351 		 * Send PLOGI
3352 		 */
3353 		*fc_status = login_retval =
3354 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 		if (*fc_status != FC_SUCCESS) {
3356 			ret = EIO;
3357 		}
3358 	}
3359 
3360 	/*
3361 	 * Wait for completion
3362 	 */
3363 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 		ret = fcp_ipkt_sema_wait(icmd);
3365 
3366 		*fc_pkt_state	= fpkt->pkt_state;
3367 		*fc_pkt_reason	= fpkt->pkt_reason;
3368 		*fc_pkt_action	= fpkt->pkt_action;
3369 	}
3370 
3371 	/*
3372 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 	 * Otherwise, cleanup happens in callback routine.
3375 	 */
3376 	if (icmd != NULL) {
3377 		fcp_ipkt_sema_cleanup(icmd);
3378 	}
3379 
3380 	return (ret);
3381 }
3382 
3383 /*
3384  *     Function: fcp_tgt_send_prli
3385  *
3386  *  Description: Does nothing as of today.
3387  *
3388  *     Argument: ptgt		Target to send the prli to.
3389  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3390  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3391  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3392  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3393  *
3394  * Return Value: 0
3395  */
3396 /*ARGSUSED*/
3397 static int
3398 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399     int *fc_pkt_reason, int *fc_pkt_action)
3400 {
3401 	return (0);
3402 }
3403 
3404 /*
3405  *     Function: fcp_ipkt_sema_init
3406  *
3407  *  Description: Initializes the semaphore contained in the internal packet.
3408  *
3409  *     Argument: icmd	Internal packet the semaphore of which must be
3410  *			initialized.
3411  *
3412  * Return Value: None
3413  *
3414  *	Context: User context only.
3415  */
3416 static void
3417 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 {
3419 	struct fc_packet	*fpkt;
3420 
3421 	fpkt = icmd->ipkt_fpkt;
3422 
3423 	/* Create semaphore for sync */
3424 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 
3426 	/* Setup the completion callback */
3427 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 }
3429 
3430 /*
3431  *     Function: fcp_ipkt_sema_wait
3432  *
3433  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3434  *		 semaphore is released in the callback.
3435  *
3436  *     Argument: icmd	Internal packet to wait on for completion.
3437  *
3438  * Return Value: 0
3439  *		 EIO
3440  *		 EBUSY
3441  *		 EAGAIN
3442  *
3443  *	Context: User context only.
3444  *
3445  * This function does a conversion between the field pkt_state of the fc_packet
3446  * embedded in the internal packet (icmd) and the code it returns.
3447  */
3448 static int
3449 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 {
3451 	struct fc_packet	*fpkt;
3452 	int	ret;
3453 
3454 	ret = EIO;
3455 	fpkt = icmd->ipkt_fpkt;
3456 
3457 	/*
3458 	 * Wait on semaphore
3459 	 */
3460 	sema_p(&(icmd->ipkt_sema));
3461 
3462 	/*
3463 	 * Check the status of the FC packet
3464 	 */
3465 	switch (fpkt->pkt_state) {
3466 	case FC_PKT_SUCCESS:
3467 		ret = 0;
3468 		break;
3469 	case FC_PKT_LOCAL_RJT:
3470 		switch (fpkt->pkt_reason) {
3471 		case FC_REASON_SEQ_TIMEOUT:
3472 		case FC_REASON_RX_BUF_TIMEOUT:
3473 			ret = EAGAIN;
3474 			break;
3475 		case FC_REASON_PKT_BUSY:
3476 			ret = EBUSY;
3477 			break;
3478 		}
3479 		break;
3480 	case FC_PKT_TIMEOUT:
3481 		ret = EAGAIN;
3482 		break;
3483 	case FC_PKT_LOCAL_BSY:
3484 	case FC_PKT_TRAN_BSY:
3485 	case FC_PKT_NPORT_BSY:
3486 	case FC_PKT_FABRIC_BSY:
3487 		ret = EBUSY;
3488 		break;
3489 	case FC_PKT_LS_RJT:
3490 	case FC_PKT_BA_RJT:
3491 		switch (fpkt->pkt_reason) {
3492 		case FC_REASON_LOGICAL_BSY:
3493 			ret = EBUSY;
3494 			break;
3495 		}
3496 		break;
3497 	case FC_PKT_FS_RJT:
3498 		switch (fpkt->pkt_reason) {
3499 		case FC_REASON_FS_LOGICAL_BUSY:
3500 			ret = EBUSY;
3501 			break;
3502 		}
3503 		break;
3504 	}
3505 
3506 	return (ret);
3507 }
3508 
3509 /*
3510  *     Function: fcp_ipkt_sema_callback
3511  *
3512  *  Description: Registered as the completion callback function for the FC
3513  *		 transport when the ipkt semaphore is used for sync. This will
3514  *		 cleanup the used data structures, if necessary and wake up
3515  *		 the user thread to complete the transaction.
3516  *
3517  *     Argument: fpkt	FC packet (points to the icmd)
3518  *
3519  * Return Value: None
3520  *
3521  *	Context: User context only
3522  */
3523 static void
3524 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 {
3526 	struct fcp_ipkt	*icmd;
3527 
3528 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 
3530 	/*
3531 	 * Wake up user thread
3532 	 */
3533 	sema_v(&(icmd->ipkt_sema));
3534 }
3535 
3536 /*
3537  *     Function: fcp_ipkt_sema_cleanup
3538  *
3539  *  Description: Called to cleanup (if necessary) the data structures used
3540  *		 when ipkt sema is used for sync.  This function will detect
3541  *		 whether the caller is the last thread (via counter) and
3542  *		 cleanup only if necessary.
3543  *
3544  *     Argument: icmd	Internal command packet
3545  *
3546  * Return Value: None
3547  *
3548  *	Context: User context only
3549  */
3550 static void
3551 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 {
3553 	struct fcp_tgt	*ptgt;
3554 	struct fcp_port	*pptr;
3555 
3556 	ptgt = icmd->ipkt_tgt;
3557 	pptr = icmd->ipkt_port;
3558 
3559 	/*
3560 	 * Acquire data structure
3561 	 */
3562 	mutex_enter(&ptgt->tgt_mutex);
3563 
3564 	/*
3565 	 * Destroy semaphore
3566 	 */
3567 	sema_destroy(&(icmd->ipkt_sema));
3568 
3569 	/*
3570 	 * Cleanup internal packet
3571 	 */
3572 	mutex_exit(&ptgt->tgt_mutex);
3573 	fcp_icmd_free(pptr, icmd);
3574 }
3575 
3576 /*
3577  *     Function: fcp_port_attach
3578  *
3579  *  Description: Called by the transport framework to resume, suspend or
3580  *		 attach a new port.
3581  *
3582  *     Argument: ulph		Port handle
3583  *		 *pinfo		Port information
3584  *		 cmd		Command
3585  *		 s_id		Port ID
3586  *
3587  * Return Value: FC_FAILURE or FC_SUCCESS
3588  */
3589 /*ARGSUSED*/
3590 static int
3591 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592     fc_attach_cmd_t cmd, uint32_t s_id)
3593 {
3594 	int	instance;
3595 	int	res = FC_FAILURE; /* default result */
3596 
3597 	ASSERT(pinfo != NULL);
3598 
3599 	instance = ddi_get_instance(pinfo->port_dip);
3600 
3601 	switch (cmd) {
3602 	case FC_CMD_ATTACH:
3603 		/*
3604 		 * this port instance attaching for the first time (or after
3605 		 * being detached before)
3606 		 */
3607 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 		    instance) == DDI_SUCCESS) {
3609 			res = FC_SUCCESS;
3610 		} else {
3611 			ASSERT(ddi_get_soft_state(fcp_softstate,
3612 			    instance) == NULL);
3613 		}
3614 		break;
3615 
3616 	case FC_CMD_RESUME:
3617 	case FC_CMD_POWER_UP:
3618 		/*
3619 		 * this port instance was attached and the suspended and
3620 		 * will now be resumed
3621 		 */
3622 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 		    instance) == DDI_SUCCESS) {
3624 			res = FC_SUCCESS;
3625 		}
3626 		break;
3627 
3628 	default:
3629 		/* shouldn't happen */
3630 		FCP_TRACE(fcp_logq, "fcp",
3631 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 		    "port_attach: unknown cmdcommand: %d", cmd);
3633 		break;
3634 	}
3635 
3636 	/* return result */
3637 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 
3640 	return (res);
3641 }
3642 
3643 
3644 /*
3645  * detach or suspend this port instance
3646  *
3647  * acquires and releases the global mutex
3648  *
3649  * acquires and releases the mutex for this port
3650  *
3651  * acquires and releases the hotplug mutex for this port
3652  */
3653 /*ARGSUSED*/
3654 static int
3655 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656     fc_detach_cmd_t cmd)
3657 {
3658 	int			flag;
3659 	int			instance;
3660 	struct fcp_port		*pptr;
3661 
3662 	instance = ddi_get_instance(info->port_dip);
3663 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 
3665 	switch (cmd) {
3666 	case FC_CMD_SUSPEND:
3667 		FCP_DTRACE(fcp_logq, "fcp",
3668 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 		    "port suspend called for port %d", instance);
3670 		flag = FCP_STATE_SUSPENDED;
3671 		break;
3672 
3673 	case FC_CMD_POWER_DOWN:
3674 		FCP_DTRACE(fcp_logq, "fcp",
3675 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 		    "port power down called for port %d", instance);
3677 		flag = FCP_STATE_POWER_DOWN;
3678 		break;
3679 
3680 	case FC_CMD_DETACH:
3681 		FCP_DTRACE(fcp_logq, "fcp",
3682 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 		    "port detach called for port %d", instance);
3684 		flag = FCP_STATE_DETACHING;
3685 		break;
3686 
3687 	default:
3688 		/* shouldn't happen */
3689 		return (FC_FAILURE);
3690 	}
3691 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 
3694 	return (fcp_handle_port_detach(pptr, flag, instance));
3695 }
3696 
3697 
3698 /*
3699  * called for ioctls on the transport's devctl interface, and the transport
3700  * has passed it to us
3701  *
3702  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703  *
3704  * return FC_SUCCESS if we decide to claim the ioctl,
3705  * else return FC_UNCLAIMED
3706  *
3707  * *rval is set iff we decide to claim the ioctl
3708  */
3709 /*ARGSUSED*/
3710 static int
3711 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 {
3714 	int			retval = FC_UNCLAIMED;	/* return value */
3715 	struct fcp_port		*pptr = NULL;		/* our soft state */
3716 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3717 	dev_info_t		*cdip;
3718 	mdi_pathinfo_t		*pip = NULL;
3719 	char			*ndi_nm;		/* NDI name */
3720 	char			*ndi_addr;		/* NDI addr */
3721 	int			is_mpxio, circ;
3722 	int			devi_entered = 0;
3723 	clock_t			end_time;
3724 
3725 	ASSERT(rval != NULL);
3726 
3727 	FCP_DTRACE(fcp_logq, "fcp",
3728 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 
3731 	/* if already claimed then forget it */
3732 	if (claimed) {
3733 		/*
3734 		 * for now, if this ioctl has already been claimed, then
3735 		 * we just ignore it
3736 		 */
3737 		return (retval);
3738 	}
3739 
3740 	/* get our port info */
3741 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 		fcp_log(CE_WARN, NULL,
3743 		    "!fcp:Invalid port handle handle in ioctl");
3744 		*rval = ENXIO;
3745 		return (retval);
3746 	}
3747 	is_mpxio = pptr->port_mpxio;
3748 
3749 	switch (cmd) {
3750 	case DEVCTL_BUS_GETSTATE:
3751 	case DEVCTL_BUS_QUIESCE:
3752 	case DEVCTL_BUS_UNQUIESCE:
3753 	case DEVCTL_BUS_RESET:
3754 	case DEVCTL_BUS_RESETALL:
3755 
3756 	case DEVCTL_BUS_DEV_CREATE:
3757 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 			return (retval);
3759 		}
3760 		break;
3761 
3762 	case DEVCTL_DEVICE_GETSTATE:
3763 	case DEVCTL_DEVICE_OFFLINE:
3764 	case DEVCTL_DEVICE_ONLINE:
3765 	case DEVCTL_DEVICE_REMOVE:
3766 	case DEVCTL_DEVICE_RESET:
3767 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 			return (retval);
3769 		}
3770 
3771 		ASSERT(dcp != NULL);
3772 
3773 		/* ensure we have a name and address */
3774 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 			    "ioctl: can't get name (%s) or addr (%s)",
3779 			    ndi_nm ? ndi_nm : "<null ptr>",
3780 			    ndi_addr ? ndi_addr : "<null ptr>");
3781 			ndi_dc_freehdl(dcp);
3782 			return (retval);
3783 		}
3784 
3785 
3786 		/* get our child's DIP */
3787 		ASSERT(pptr != NULL);
3788 		if (is_mpxio) {
3789 			mdi_devi_enter(pptr->port_dip, &circ);
3790 		} else {
3791 			ndi_devi_enter(pptr->port_dip, &circ);
3792 		}
3793 		devi_entered = 1;
3794 
3795 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 		    ndi_addr)) == NULL) {
3797 			/* Look for virtually enumerated devices. */
3798 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 			if (pip == NULL ||
3800 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 				*rval = ENXIO;
3802 				goto out;
3803 			}
3804 		}
3805 		break;
3806 
3807 	default:
3808 		*rval = ENOTTY;
3809 		return (retval);
3810 	}
3811 
3812 	/* this ioctl is ours -- process it */
3813 
3814 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3815 
3816 	/* we assume it will be a success; else we'll set error value */
3817 	*rval = 0;
3818 
3819 
3820 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 	    "ioctl: claiming this one");
3823 
3824 	/* handle ioctls now */
3825 	switch (cmd) {
3826 	case DEVCTL_DEVICE_GETSTATE:
3827 		ASSERT(cdip != NULL);
3828 		ASSERT(dcp != NULL);
3829 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 			*rval = EFAULT;
3831 		}
3832 		break;
3833 
3834 	case DEVCTL_DEVICE_REMOVE:
3835 	case DEVCTL_DEVICE_OFFLINE: {
3836 		int			flag = 0;
3837 		int			lcount;
3838 		int			tcount;
3839 		struct fcp_pkt	*head = NULL;
3840 		struct fcp_lun	*plun;
3841 		child_info_t		*cip = CIP(cdip);
3842 		int			all = 1;
3843 		struct fcp_lun	*tplun;
3844 		struct fcp_tgt	*ptgt;
3845 
3846 		ASSERT(pptr != NULL);
3847 		ASSERT(cdip != NULL);
3848 
3849 		mutex_enter(&pptr->port_mutex);
3850 		if (pip != NULL) {
3851 			cip = CIP(pip);
3852 		}
3853 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 			mutex_exit(&pptr->port_mutex);
3855 			*rval = ENXIO;
3856 			break;
3857 		}
3858 
3859 		head = fcp_scan_commands(plun);
3860 		if (head != NULL) {
3861 			fcp_abort_commands(head, LUN_PORT);
3862 		}
3863 		lcount = pptr->port_link_cnt;
3864 		tcount = plun->lun_tgt->tgt_change_cnt;
3865 		mutex_exit(&pptr->port_mutex);
3866 
3867 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 			flag = NDI_DEVI_REMOVE;
3869 		}
3870 
3871 		if (is_mpxio) {
3872 			mdi_devi_exit(pptr->port_dip, circ);
3873 		} else {
3874 			ndi_devi_exit(pptr->port_dip, circ);
3875 		}
3876 		devi_entered = 0;
3877 
3878 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 		    FCP_OFFLINE, lcount, tcount, flag);
3880 
3881 		if (*rval != NDI_SUCCESS) {
3882 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 			break;
3884 		}
3885 
3886 		fcp_update_offline_flags(plun);
3887 
3888 		ptgt = plun->lun_tgt;
3889 		mutex_enter(&ptgt->tgt_mutex);
3890 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 		    tplun->lun_next) {
3892 			mutex_enter(&tplun->lun_mutex);
3893 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 				all = 0;
3895 			}
3896 			mutex_exit(&tplun->lun_mutex);
3897 		}
3898 
3899 		if (all) {
3900 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 			/*
3902 			 * The user is unconfiguring/offlining the device.
3903 			 * If fabric and the auto configuration is set
3904 			 * then make sure the user is the only one who
3905 			 * can reconfigure the device.
3906 			 */
3907 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 			    fcp_enable_auto_configuration) {
3909 				ptgt->tgt_manual_config_only = 1;
3910 			}
3911 		}
3912 		mutex_exit(&ptgt->tgt_mutex);
3913 		break;
3914 	}
3915 
3916 	case DEVCTL_DEVICE_ONLINE: {
3917 		int			lcount;
3918 		int			tcount;
3919 		struct fcp_lun	*plun;
3920 		child_info_t		*cip = CIP(cdip);
3921 
3922 		ASSERT(cdip != NULL);
3923 		ASSERT(pptr != NULL);
3924 
3925 		mutex_enter(&pptr->port_mutex);
3926 		if (pip != NULL) {
3927 			cip = CIP(pip);
3928 		}
3929 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 			mutex_exit(&pptr->port_mutex);
3931 			*rval = ENXIO;
3932 			break;
3933 		}
3934 		lcount = pptr->port_link_cnt;
3935 		tcount = plun->lun_tgt->tgt_change_cnt;
3936 		mutex_exit(&pptr->port_mutex);
3937 
3938 		/*
3939 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 		 * to allow the device attach to occur when the device is
3941 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 		 * from the scsi_probe()).
3943 		 */
3944 		mutex_enter(&LUN_TGT->tgt_mutex);
3945 		plun->lun_state |= FCP_LUN_ONLINING;
3946 		mutex_exit(&LUN_TGT->tgt_mutex);
3947 
3948 		if (is_mpxio) {
3949 			mdi_devi_exit(pptr->port_dip, circ);
3950 		} else {
3951 			ndi_devi_exit(pptr->port_dip, circ);
3952 		}
3953 		devi_entered = 0;
3954 
3955 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 		    FCP_ONLINE, lcount, tcount, 0);
3957 
3958 		if (*rval != NDI_SUCCESS) {
3959 			/* Reset the FCP_LUN_ONLINING bit */
3960 			mutex_enter(&LUN_TGT->tgt_mutex);
3961 			plun->lun_state &= ~FCP_LUN_ONLINING;
3962 			mutex_exit(&LUN_TGT->tgt_mutex);
3963 			*rval = EIO;
3964 			break;
3965 		}
3966 		mutex_enter(&LUN_TGT->tgt_mutex);
3967 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 		    FCP_LUN_ONLINING);
3969 		mutex_exit(&LUN_TGT->tgt_mutex);
3970 		break;
3971 	}
3972 
3973 	case DEVCTL_BUS_DEV_CREATE: {
3974 		uchar_t			*bytes = NULL;
3975 		uint_t			nbytes;
3976 		struct fcp_tgt		*ptgt = NULL;
3977 		struct fcp_lun		*plun = NULL;
3978 		dev_info_t		*useless_dip = NULL;
3979 
3980 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 		    DEVCTL_CONSTRUCT, &useless_dip);
3982 		if (*rval != 0 || useless_dip == NULL) {
3983 			break;
3984 		}
3985 
3986 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 			*rval = EINVAL;
3990 			(void) ndi_devi_free(useless_dip);
3991 			if (bytes != NULL) {
3992 				ddi_prop_free(bytes);
3993 			}
3994 			break;
3995 		}
3996 
3997 		*rval = fcp_create_on_demand(pptr, bytes);
3998 		if (*rval == 0) {
3999 			mutex_enter(&pptr->port_mutex);
4000 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 			if (ptgt) {
4002 				/*
4003 				 * We now have a pointer to the target that
4004 				 * was created. Lets point to the first LUN on
4005 				 * this new target.
4006 				 */
4007 				mutex_enter(&ptgt->tgt_mutex);
4008 
4009 				plun = ptgt->tgt_lun;
4010 				/*
4011 				 * There may be stale/offline LUN entries on
4012 				 * this list (this is by design) and so we have
4013 				 * to make sure we point to the first online
4014 				 * LUN
4015 				 */
4016 				while (plun &&
4017 				    plun->lun_state & FCP_LUN_OFFLINE) {
4018 					plun = plun->lun_next;
4019 				}
4020 
4021 				mutex_exit(&ptgt->tgt_mutex);
4022 			}
4023 			mutex_exit(&pptr->port_mutex);
4024 		}
4025 
4026 		if (*rval == 0 && ptgt && plun) {
4027 			mutex_enter(&plun->lun_mutex);
4028 			/*
4029 			 * Allow up to fcp_lun_ready_retry seconds to
4030 			 * configure all the luns behind the target.
4031 			 *
4032 			 * The intent here is to allow targets with long
4033 			 * reboot/reset-recovery times to become available
4034 			 * while limiting the maximum wait time for an
4035 			 * unresponsive target.
4036 			 */
4037 			end_time = ddi_get_lbolt() +
4038 			    SEC_TO_TICK(fcp_lun_ready_retry);
4039 
4040 			while (ddi_get_lbolt() < end_time) {
4041 				retval = FC_SUCCESS;
4042 
4043 				/*
4044 				 * The new ndi interfaces for on-demand creation
4045 				 * are inflexible, Do some more work to pass on
4046 				 * a path name of some LUN (design is broken !)
4047 				 */
4048 				if (plun->lun_cip) {
4049 					if (plun->lun_mpxio == 0) {
4050 						cdip = DIP(plun->lun_cip);
4051 					} else {
4052 						cdip = mdi_pi_get_client(
4053 						    PIP(plun->lun_cip));
4054 					}
4055 					if (cdip == NULL) {
4056 						*rval = ENXIO;
4057 						break;
4058 					}
4059 
4060 					if (!i_ddi_devi_attached(cdip)) {
4061 						mutex_exit(&plun->lun_mutex);
4062 						delay(drv_usectohz(1000000));
4063 						mutex_enter(&plun->lun_mutex);
4064 					} else {
4065 						/*
4066 						 * This Lun is ready, lets
4067 						 * check the next one.
4068 						 */
4069 						mutex_exit(&plun->lun_mutex);
4070 						plun = plun->lun_next;
4071 						while (plun && (plun->lun_state
4072 						    & FCP_LUN_OFFLINE)) {
4073 							plun = plun->lun_next;
4074 						}
4075 						if (!plun) {
4076 							break;
4077 						}
4078 						mutex_enter(&plun->lun_mutex);
4079 					}
4080 				} else {
4081 					/*
4082 					 * lun_cip field for a valid lun
4083 					 * should never be NULL. Fail the
4084 					 * command.
4085 					 */
4086 					*rval = ENXIO;
4087 					break;
4088 				}
4089 			}
4090 			if (plun) {
4091 				mutex_exit(&plun->lun_mutex);
4092 			} else {
4093 				char devnm[MAXNAMELEN];
4094 				int nmlen;
4095 
4096 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 				    ddi_node_name(cdip),
4098 				    ddi_get_name_addr(cdip));
4099 
4100 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 				    0) {
4102 					*rval = EFAULT;
4103 				}
4104 			}
4105 		} else {
4106 			int	i;
4107 			char	buf[25];
4108 
4109 			for (i = 0; i < FC_WWN_SIZE; i++) {
4110 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 			}
4112 
4113 			fcp_log(CE_WARN, pptr->port_dip,
4114 			    "!Failed to create nodes for pwwn=%s; error=%x",
4115 			    buf, *rval);
4116 		}
4117 
4118 		(void) ndi_devi_free(useless_dip);
4119 		ddi_prop_free(bytes);
4120 		break;
4121 	}
4122 
4123 	case DEVCTL_DEVICE_RESET: {
4124 		struct fcp_lun		*plun;
4125 		child_info_t		*cip = CIP(cdip);
4126 
4127 		ASSERT(cdip != NULL);
4128 		ASSERT(pptr != NULL);
4129 		mutex_enter(&pptr->port_mutex);
4130 		if (pip != NULL) {
4131 			cip = CIP(pip);
4132 		}
4133 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 			mutex_exit(&pptr->port_mutex);
4135 			*rval = ENXIO;
4136 			break;
4137 		}
4138 		mutex_exit(&pptr->port_mutex);
4139 
4140 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 
4144 			*rval = ENXIO;
4145 			break;
4146 		}
4147 
4148 		if (plun->lun_sd == NULL) {
4149 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 
4151 			*rval = ENXIO;
4152 			break;
4153 		}
4154 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 
4156 		/*
4157 		 * set up ap so that fcp_reset can figure out
4158 		 * which target to reset
4159 		 */
4160 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 		    RESET_TARGET) == FALSE) {
4162 			*rval = EIO;
4163 		}
4164 		break;
4165 	}
4166 
4167 	case DEVCTL_BUS_GETSTATE:
4168 		ASSERT(dcp != NULL);
4169 		ASSERT(pptr != NULL);
4170 		ASSERT(pptr->port_dip != NULL);
4171 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 		    NDI_SUCCESS) {
4173 			*rval = EFAULT;
4174 		}
4175 		break;
4176 
4177 	case DEVCTL_BUS_QUIESCE:
4178 	case DEVCTL_BUS_UNQUIESCE:
4179 		*rval = ENOTSUP;
4180 		break;
4181 
4182 	case DEVCTL_BUS_RESET:
4183 	case DEVCTL_BUS_RESETALL:
4184 		ASSERT(pptr != NULL);
4185 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4186 		break;
4187 
4188 	default:
4189 		ASSERT(dcp != NULL);
4190 		*rval = ENOTTY;
4191 		break;
4192 	}
4193 
4194 	/* all done -- clean up and return */
4195 out:	if (devi_entered) {
4196 		if (is_mpxio) {
4197 			mdi_devi_exit(pptr->port_dip, circ);
4198 		} else {
4199 			ndi_devi_exit(pptr->port_dip, circ);
4200 		}
4201 	}
4202 
4203 	if (dcp != NULL) {
4204 		ndi_dc_freehdl(dcp);
4205 	}
4206 
4207 	return (retval);
4208 }
4209 
4210 
4211 /*ARGSUSED*/
4212 static int
4213 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214     uint32_t claimed)
4215 {
4216 	uchar_t			r_ctl;
4217 	uchar_t			ls_code;
4218 	struct fcp_port	*pptr;
4219 
4220 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 		return (FC_UNCLAIMED);
4222 	}
4223 
4224 	mutex_enter(&pptr->port_mutex);
4225 	if (pptr->port_state & (FCP_STATE_DETACHING |
4226 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 		mutex_exit(&pptr->port_mutex);
4228 		return (FC_UNCLAIMED);
4229 	}
4230 	mutex_exit(&pptr->port_mutex);
4231 
4232 	r_ctl = buf->ub_frame.r_ctl;
4233 
4234 	switch (r_ctl & R_CTL_ROUTING) {
4235 	case R_CTL_EXTENDED_SVC:
4236 		if (r_ctl == R_CTL_ELS_REQ) {
4237 			ls_code = buf->ub_buffer[0];
4238 
4239 			switch (ls_code) {
4240 			case LA_ELS_PRLI:
4241 				/*
4242 				 * We really don't care if something fails.
4243 				 * If the PRLI was not sent out, then the
4244 				 * other end will time it out.
4245 				 */
4246 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 					return (FC_SUCCESS);
4248 				}
4249 				return (FC_UNCLAIMED);
4250 				/* NOTREACHED */
4251 
4252 			default:
4253 				break;
4254 			}
4255 		}
4256 		/* FALLTHROUGH */
4257 
4258 	default:
4259 		return (FC_UNCLAIMED);
4260 	}
4261 }
4262 
4263 
4264 /*ARGSUSED*/
4265 static int
4266 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267     uint32_t claimed)
4268 {
4269 	return (FC_UNCLAIMED);
4270 }
4271 
4272 /*
4273  *     Function: fcp_statec_callback
4274  *
4275  *  Description: The purpose of this function is to handle a port state change.
4276  *		 It is called from fp/fctl and, in a few instances, internally.
4277  *
4278  *     Argument: ulph		fp/fctl port handle
4279  *		 port_handle	fcp_port structure
4280  *		 port_state	Physical state of the port
4281  *		 port_top	Topology
4282  *		 *devlist	Pointer to the first entry of a table
4283  *				containing the remote ports that can be
4284  *				reached.
4285  *		 dev_cnt	Number of entries pointed by devlist.
4286  *		 port_sid	Port ID of the local port.
4287  *
4288  * Return Value: None
4289  */
4290 /*ARGSUSED*/
4291 static void
4292 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294     uint32_t dev_cnt, uint32_t port_sid)
4295 {
4296 	uint32_t		link_count;
4297 	int			map_len = 0;
4298 	struct fcp_port	*pptr;
4299 	fcp_map_tag_t		*map_tag = NULL;
4300 
4301 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 		return;			/* nothing to work with! */
4304 	}
4305 
4306 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 	    "fcp_statec_callback: port state/dev_cnt/top ="
4309 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 	    dev_cnt, port_top);
4311 
4312 	mutex_enter(&pptr->port_mutex);
4313 
4314 	/*
4315 	 * If a thread is in detach, don't do anything.
4316 	 */
4317 	if (pptr->port_state & (FCP_STATE_DETACHING |
4318 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 		mutex_exit(&pptr->port_mutex);
4320 		return;
4321 	}
4322 
4323 	/*
4324 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 	 * init_pkt is called, it knows whether or not the target's status
4326 	 * (or pd) might be changing.
4327 	 */
4328 
4329 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 	}
4332 
4333 	/*
4334 	 * the transport doesn't allocate or probe unless being
4335 	 * asked to by either the applications or ULPs
4336 	 *
4337 	 * in cases where the port is OFFLINE at the time of port
4338 	 * attach callback and the link comes ONLINE later, for
4339 	 * easier automatic node creation (i.e. without you having to
4340 	 * go out and run the utility to perform LOGINs) the
4341 	 * following conditional is helpful
4342 	 */
4343 	pptr->port_phys_state = port_state;
4344 
4345 	if (dev_cnt) {
4346 		mutex_exit(&pptr->port_mutex);
4347 
4348 		map_len = sizeof (*map_tag) * dev_cnt;
4349 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 		if (map_tag == NULL) {
4351 			fcp_log(CE_WARN, pptr->port_dip,
4352 			    "!fcp%d: failed to allocate for map tags; "
4353 			    " state change will not be processed",
4354 			    pptr->port_instance);
4355 
4356 			mutex_enter(&pptr->port_mutex);
4357 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 			mutex_exit(&pptr->port_mutex);
4359 
4360 			return;
4361 		}
4362 
4363 		mutex_enter(&pptr->port_mutex);
4364 	}
4365 
4366 	if (pptr->port_id != port_sid) {
4367 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 		    port_sid);
4371 		/*
4372 		 * The local port changed ID. It is the first time a port ID
4373 		 * is assigned or something drastic happened.  We might have
4374 		 * been unplugged and replugged on another loop or fabric port
4375 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 		 * the fabric we were plugged into.
4377 		 */
4378 		pptr->port_id = port_sid;
4379 	}
4380 
4381 	switch (FC_PORT_STATE_MASK(port_state)) {
4382 	case FC_STATE_OFFLINE:
4383 	case FC_STATE_RESET_REQUESTED:
4384 		/*
4385 		 * link has gone from online to offline -- just update the
4386 		 * state of this port to BUSY and MARKed to go offline
4387 		 */
4388 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 		    "link went offline");
4391 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 			/*
4393 			 * We were offline a while ago and this one
4394 			 * seems to indicate that the loop has gone
4395 			 * dead forever.
4396 			 */
4397 			pptr->port_tmp_cnt += dev_cnt;
4398 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 			pptr->port_state |= FCP_STATE_INIT;
4400 			link_count = pptr->port_link_cnt;
4401 			fcp_handle_devices(pptr, devlist, dev_cnt,
4402 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 		} else {
4404 			pptr->port_link_cnt++;
4405 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 			if (pptr->port_mpxio) {
4409 				fcp_update_mpxio_path_verifybusy(pptr);
4410 			}
4411 			pptr->port_state |= FCP_STATE_OFFLINE;
4412 			pptr->port_state &=
4413 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 			pptr->port_tmp_cnt = 0;
4415 		}
4416 		mutex_exit(&pptr->port_mutex);
4417 		break;
4418 
4419 	case FC_STATE_ONLINE:
4420 	case FC_STATE_LIP:
4421 	case FC_STATE_LIP_LBIT_SET:
4422 		/*
4423 		 * link has gone from offline to online
4424 		 */
4425 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 		    "link went online");
4428 
4429 		pptr->port_link_cnt++;
4430 
4431 		while (pptr->port_ipkt_cnt) {
4432 			mutex_exit(&pptr->port_mutex);
4433 			delay(drv_usectohz(1000000));
4434 			mutex_enter(&pptr->port_mutex);
4435 		}
4436 
4437 		pptr->port_topology = port_top;
4438 
4439 		/*
4440 		 * The state of the targets and luns accessible through this
4441 		 * port is updated.
4442 		 */
4443 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 		    FCP_CAUSE_LINK_CHANGE);
4445 
4446 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 		pptr->port_state |= FCP_STATE_ONLINING;
4448 		pptr->port_tmp_cnt = dev_cnt;
4449 		link_count = pptr->port_link_cnt;
4450 
4451 		pptr->port_deadline = fcp_watchdog_time +
4452 		    FCP_ICMD_DEADLINE;
4453 
4454 		if (!dev_cnt) {
4455 			/*
4456 			 * We go directly to the online state if no remote
4457 			 * ports were discovered.
4458 			 */
4459 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 			    "No remote ports discovered");
4462 
4463 			pptr->port_state &= ~FCP_STATE_ONLINING;
4464 			pptr->port_state |= FCP_STATE_ONLINE;
4465 		}
4466 
4467 		switch (port_top) {
4468 		case FC_TOP_FABRIC:
4469 		case FC_TOP_PUBLIC_LOOP:
4470 		case FC_TOP_PRIVATE_LOOP:
4471 		case FC_TOP_PT_PT:
4472 
4473 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 				fcp_retry_ns_registry(pptr, port_sid);
4475 			}
4476 
4477 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4479 			break;
4480 
4481 		default:
4482 			/*
4483 			 * We got here because we were provided with an unknown
4484 			 * topology.
4485 			 */
4486 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 			}
4489 
4490 			pptr->port_tmp_cnt -= dev_cnt;
4491 			fcp_log(CE_WARN, pptr->port_dip,
4492 			    "!unknown/unsupported topology (0x%x)", port_top);
4493 			break;
4494 		}
4495 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 		    "Notify ssd of the reset to reinstate the reservations");
4498 
4499 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 		    &pptr->port_reset_notify_listf);
4501 
4502 		mutex_exit(&pptr->port_mutex);
4503 
4504 		break;
4505 
4506 	case FC_STATE_RESET:
4507 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 		    "RESET state, waiting for Offline/Online state_cb");
4511 		mutex_exit(&pptr->port_mutex);
4512 		break;
4513 
4514 	case FC_STATE_DEVICE_CHANGE:
4515 		/*
4516 		 * We come here when an application has requested
4517 		 * Dynamic node creation/deletion in Fabric connectivity.
4518 		 */
4519 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 		    FCP_STATE_INIT)) {
4521 			/*
4522 			 * This case can happen when the FCTL is in the
4523 			 * process of giving us on online and the host on
4524 			 * the other side issues a PLOGI/PLOGO. Ideally
4525 			 * the state changes should be serialized unless
4526 			 * they are opposite (online-offline).
4527 			 * The transport will give us a final state change
4528 			 * so we can ignore this for the time being.
4529 			 */
4530 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 			mutex_exit(&pptr->port_mutex);
4532 			break;
4533 		}
4534 
4535 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 			fcp_retry_ns_registry(pptr, port_sid);
4537 		}
4538 
4539 		/*
4540 		 * Extend the deadline under steady state conditions
4541 		 * to provide more time for the device-change-commands
4542 		 */
4543 		if (!pptr->port_ipkt_cnt) {
4544 			pptr->port_deadline = fcp_watchdog_time +
4545 			    FCP_ICMD_DEADLINE;
4546 		}
4547 
4548 		/*
4549 		 * There is another race condition here, where if we were
4550 		 * in ONLINEING state and a devices in the map logs out,
4551 		 * fp will give another state change as DEVICE_CHANGE
4552 		 * and OLD. This will result in that target being offlined.
4553 		 * The pd_handle is freed. If from the first statec callback
4554 		 * we were going to fire a PLOGI/PRLI, the system will
4555 		 * panic in fc_ulp_transport with invalid pd_handle.
4556 		 * The fix is to check for the link_cnt before issuing
4557 		 * any command down.
4558 		 */
4559 		fcp_update_targets(pptr, devlist, dev_cnt,
4560 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 
4562 		link_count = pptr->port_link_cnt;
4563 
4564 		fcp_handle_devices(pptr, devlist, dev_cnt,
4565 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 
4567 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 
4569 		mutex_exit(&pptr->port_mutex);
4570 		break;
4571 
4572 	case FC_STATE_TARGET_PORT_RESET:
4573 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 			fcp_retry_ns_registry(pptr, port_sid);
4575 		}
4576 
4577 		/* Do nothing else */
4578 		mutex_exit(&pptr->port_mutex);
4579 		break;
4580 
4581 	default:
4582 		fcp_log(CE_WARN, pptr->port_dip,
4583 		    "!Invalid state change=0x%x", port_state);
4584 		mutex_exit(&pptr->port_mutex);
4585 		break;
4586 	}
4587 
4588 	if (map_tag) {
4589 		kmem_free(map_tag, map_len);
4590 	}
4591 }
4592 
4593 /*
4594  *     Function: fcp_handle_devices
4595  *
4596  *  Description: This function updates the devices currently known by
4597  *		 walking the list provided by the caller.  The list passed
4598  *		 by the caller is supposed to be the list of reachable
4599  *		 devices.
4600  *
4601  *     Argument: *pptr		Fcp port structure.
4602  *		 *devlist	Pointer to the first entry of a table
4603  *				containing the remote ports that can be
4604  *				reached.
4605  *		 dev_cnt	Number of entries pointed by devlist.
4606  *		 link_cnt	Link state count.
4607  *		 *map_tag	Array of fcp_map_tag_t structures.
4608  *		 cause		What caused this function to be called.
4609  *
4610  * Return Value: None
4611  *
4612  *	  Notes: The pptr->port_mutex must be held.
4613  */
4614 static void
4615 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 {
4618 	int			i;
4619 	int			check_finish_init = 0;
4620 	fc_portmap_t		*map_entry;
4621 	struct fcp_tgt	*ptgt = NULL;
4622 
4623 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 
4627 	if (dev_cnt) {
4628 		ASSERT(map_tag != NULL);
4629 	}
4630 
4631 	/*
4632 	 * The following code goes through the list of remote ports that are
4633 	 * accessible through this (pptr) local port (The list walked is the
4634 	 * one provided by the caller which is the list of the remote ports
4635 	 * currently reachable).  It checks if any of them was already
4636 	 * known by looking for the corresponding target structure based on
4637 	 * the world wide name.	 If a target is part of the list it is tagged
4638 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 	 *
4640 	 * Old comment
4641 	 * -----------
4642 	 * Before we drop port mutex; we MUST get the tags updated; This
4643 	 * two step process is somewhat slow, but more reliable.
4644 	 */
4645 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 		map_entry = &(devlist[i]);
4647 
4648 		/*
4649 		 * get ptr to this map entry in our port's
4650 		 * list (if any)
4651 		 */
4652 		ptgt = fcp_lookup_target(pptr,
4653 		    (uchar_t *)&(map_entry->map_pwwn));
4654 
4655 		if (ptgt) {
4656 			map_tag[i] = ptgt->tgt_change_cnt;
4657 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 			}
4660 		}
4661 	}
4662 
4663 	/*
4664 	 * At this point we know which devices of the new list were already
4665 	 * known (The field tgt_aux_state of the target structure has been
4666 	 * set to FCP_TGT_TAGGED).
4667 	 *
4668 	 * The following code goes through the list of targets currently known
4669 	 * by the local port (the list is actually a hashing table).  If a
4670 	 * target is found and is not tagged, it means the target cannot
4671 	 * be reached anymore through the local port (pptr).  It is offlined.
4672 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 	 */
4674 	for (i = 0; i < FCP_NUM_HASH; i++) {
4675 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 		    ptgt = ptgt->tgt_next) {
4677 			mutex_enter(&ptgt->tgt_mutex);
4678 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 				fcp_offline_target_now(pptr, ptgt,
4682 				    link_cnt, ptgt->tgt_change_cnt, 0);
4683 			}
4684 			mutex_exit(&ptgt->tgt_mutex);
4685 		}
4686 	}
4687 
4688 	/*
4689 	 * At this point, the devices that were known but cannot be reached
4690 	 * anymore, have most likely been offlined.
4691 	 *
4692 	 * The following section of code seems to go through the list of
4693 	 * remote ports that can now be reached.  For every single one it
4694 	 * checks if it is already known or if it is a new port.
4695 	 */
4696 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 
4698 		if (check_finish_init) {
4699 			ASSERT(i > 0);
4700 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 			    map_tag[i - 1], cause);
4702 			check_finish_init = 0;
4703 		}
4704 
4705 		/* get a pointer to this map entry */
4706 		map_entry = &(devlist[i]);
4707 
4708 		/*
4709 		 * Check for the duplicate map entry flag. If we have marked
4710 		 * this entry as a duplicate we skip it since the correct
4711 		 * (perhaps even same) state change will be encountered
4712 		 * later in the list.
4713 		 */
4714 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 			continue;
4716 		}
4717 
4718 		/* get ptr to this map entry in our port's list (if any) */
4719 		ptgt = fcp_lookup_target(pptr,
4720 		    (uchar_t *)&(map_entry->map_pwwn));
4721 
4722 		if (ptgt) {
4723 			/*
4724 			 * This device was already known.  The field
4725 			 * tgt_aux_state is reset (was probably set to
4726 			 * FCP_TGT_TAGGED previously in this routine).
4727 			 */
4728 			ptgt->tgt_aux_state = 0;
4729 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 			    "handle_devices: map did/state/type/flags = "
4732 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 			    "tgt_state=%d",
4734 			    map_entry->map_did.port_id, map_entry->map_state,
4735 			    map_entry->map_type, map_entry->map_flags,
4736 			    ptgt->tgt_d_id, ptgt->tgt_state);
4737 		}
4738 
4739 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 		    map_entry->map_type == PORT_DEVICE_NEW ||
4741 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 			    "map_type=%x, did = %x",
4746 			    map_entry->map_type,
4747 			    map_entry->map_did.port_id);
4748 		}
4749 
4750 		switch (map_entry->map_type) {
4751 		case PORT_DEVICE_NOCHANGE:
4752 		case PORT_DEVICE_USER_CREATE:
4753 		case PORT_DEVICE_USER_LOGIN:
4754 		case PORT_DEVICE_NEW:
4755 		case PORT_DEVICE_REPORTLUN_CHANGED:
4756 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 
4758 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4760 			    cause) == TRUE) {
4761 
4762 				FCP_TGT_TRACE(ptgt, map_tag[i],
4763 				    FCP_TGT_TRACE_2);
4764 				check_finish_init++;
4765 			}
4766 			break;
4767 
4768 		case PORT_DEVICE_OLD:
4769 			if (ptgt != NULL) {
4770 				FCP_TGT_TRACE(ptgt, map_tag[i],
4771 				    FCP_TGT_TRACE_3);
4772 
4773 				mutex_enter(&ptgt->tgt_mutex);
4774 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 					/*
4776 					 * Must do an in-line wait for I/Os
4777 					 * to get drained
4778 					 */
4779 					mutex_exit(&ptgt->tgt_mutex);
4780 					mutex_exit(&pptr->port_mutex);
4781 
4782 					mutex_enter(&ptgt->tgt_mutex);
4783 					while (ptgt->tgt_ipkt_cnt ||
4784 					    fcp_outstanding_lun_cmds(ptgt)
4785 					    == FC_SUCCESS) {
4786 						mutex_exit(&ptgt->tgt_mutex);
4787 						delay(drv_usectohz(1000000));
4788 						mutex_enter(&ptgt->tgt_mutex);
4789 					}
4790 					mutex_exit(&ptgt->tgt_mutex);
4791 
4792 					mutex_enter(&pptr->port_mutex);
4793 					mutex_enter(&ptgt->tgt_mutex);
4794 
4795 					(void) fcp_offline_target(pptr, ptgt,
4796 					    link_cnt, map_tag[i], 0, 0);
4797 				}
4798 				mutex_exit(&ptgt->tgt_mutex);
4799 			}
4800 			check_finish_init++;
4801 			break;
4802 
4803 		case PORT_DEVICE_USER_DELETE:
4804 		case PORT_DEVICE_USER_LOGOUT:
4805 			if (ptgt != NULL) {
4806 				FCP_TGT_TRACE(ptgt, map_tag[i],
4807 				    FCP_TGT_TRACE_4);
4808 
4809 				mutex_enter(&ptgt->tgt_mutex);
4810 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 					(void) fcp_offline_target(pptr, ptgt,
4812 					    link_cnt, map_tag[i], 1, 0);
4813 				}
4814 				mutex_exit(&ptgt->tgt_mutex);
4815 			}
4816 			check_finish_init++;
4817 			break;
4818 
4819 		case PORT_DEVICE_CHANGED:
4820 			if (ptgt != NULL) {
4821 				FCP_TGT_TRACE(ptgt, map_tag[i],
4822 				    FCP_TGT_TRACE_5);
4823 
4824 				if (fcp_device_changed(pptr, ptgt,
4825 				    map_entry, link_cnt, map_tag[i],
4826 				    cause) == TRUE) {
4827 					check_finish_init++;
4828 				}
4829 			} else {
4830 				if (fcp_handle_mapflags(pptr, ptgt,
4831 				    map_entry, link_cnt, 0, cause) == TRUE) {
4832 					check_finish_init++;
4833 				}
4834 			}
4835 			break;
4836 
4837 		default:
4838 			fcp_log(CE_WARN, pptr->port_dip,
4839 			    "!Invalid map_type=0x%x", map_entry->map_type);
4840 			check_finish_init++;
4841 			break;
4842 		}
4843 	}
4844 
4845 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 		ASSERT(i > 0);
4847 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 		    map_tag[i-1], cause);
4849 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 		fcp_offline_all(pptr, link_cnt, cause);
4851 	}
4852 }
4853 
4854 static int
4855 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 {
4857 	struct fcp_lun	*plun;
4858 	struct fcp_port *pptr;
4859 	int		 rscn_count;
4860 	int		 lun0_newalloc;
4861 	int		 ret  = TRUE;
4862 
4863 	ASSERT(ptgt);
4864 	pptr = ptgt->tgt_port;
4865 	lun0_newalloc = 0;
4866 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 		/*
4868 		 * no LUN struct for LUN 0 yet exists,
4869 		 * so create one
4870 		 */
4871 		plun = fcp_alloc_lun(ptgt);
4872 		if (plun == NULL) {
4873 			fcp_log(CE_WARN, pptr->port_dip,
4874 			    "!Failed to allocate lun 0 for"
4875 			    " D_ID=%x", ptgt->tgt_d_id);
4876 			return (ret);
4877 		}
4878 		lun0_newalloc = 1;
4879 	}
4880 
4881 	mutex_enter(&ptgt->tgt_mutex);
4882 	/*
4883 	 * consider lun 0 as device not connected if it is
4884 	 * offlined or newly allocated
4885 	 */
4886 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 	}
4889 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 	ptgt->tgt_lun_cnt = 1;
4892 	ptgt->tgt_report_lun_cnt = 0;
4893 	mutex_exit(&ptgt->tgt_mutex);
4894 
4895 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 		    "to D_ID=%x", ptgt->tgt_d_id);
4902 	} else {
4903 		ret = FALSE;
4904 	}
4905 
4906 	return (ret);
4907 }
4908 
4909 /*
4910  *     Function: fcp_handle_mapflags
4911  *
4912  *  Description: This function creates a target structure if the ptgt passed
4913  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4914  *		 into the target yet or the PRLI if we are logged into the
4915  *		 target already.  The rest of the treatment is done in the
4916  *		 callbacks of the PLOGI or PRLI.
4917  *
4918  *     Argument: *pptr		FCP Port structure.
4919  *		 *ptgt		Target structure.
4920  *		 *map_entry	Array of fc_portmap_t structures.
4921  *		 link_cnt	Link state count.
4922  *		 tgt_cnt	Target state count.
4923  *		 cause		What caused this function to be called.
4924  *
4925  * Return Value: TRUE	Failed
4926  *		 FALSE	Succeeded
4927  *
4928  *	  Notes: pptr->port_mutex must be owned.
4929  */
4930 static int
4931 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4932     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 {
4934 	int			lcount;
4935 	int			tcount;
4936 	int			ret = TRUE;
4937 	int			alloc;
4938 	struct fcp_ipkt	*icmd;
4939 	struct fcp_lun	*pseq_lun = NULL;
4940 	uchar_t			opcode;
4941 	int			valid_ptgt_was_passed = FALSE;
4942 
4943 	ASSERT(mutex_owned(&pptr->port_mutex));
4944 
4945 	/*
4946 	 * This case is possible where the FCTL has come up and done discovery
4947 	 * before FCP was loaded and attached. FCTL would have discovered the
4948 	 * devices and later the ULP came online. In this case ULP's would get
4949 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 	 */
4951 	if (ptgt == NULL) {
4952 		/* don't already have a target */
4953 		mutex_exit(&pptr->port_mutex);
4954 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 		mutex_enter(&pptr->port_mutex);
4956 
4957 		if (ptgt == NULL) {
4958 			fcp_log(CE_WARN, pptr->port_dip,
4959 			    "!FC target allocation failed");
4960 			return (ret);
4961 		}
4962 		mutex_enter(&ptgt->tgt_mutex);
4963 		ptgt->tgt_statec_cause = cause;
4964 		ptgt->tgt_tmp_cnt = 1;
4965 		mutex_exit(&ptgt->tgt_mutex);
4966 	} else {
4967 		valid_ptgt_was_passed = TRUE;
4968 	}
4969 
4970 	/*
4971 	 * Copy in the target parameters
4972 	 */
4973 	mutex_enter(&ptgt->tgt_mutex);
4974 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 	ptgt->tgt_pd_handle = map_entry->map_pd;
4977 	ptgt->tgt_fca_dev = NULL;
4978 
4979 	/* Copy port and node WWNs */
4980 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 	    FC_WWN_SIZE);
4982 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 	    FC_WWN_SIZE);
4984 
4985 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 	    valid_ptgt_was_passed) {
4989 		/*
4990 		 * determine if there are any tape LUNs on this target
4991 		 */
4992 		for (pseq_lun = ptgt->tgt_lun;
4993 		    pseq_lun != NULL;
4994 		    pseq_lun = pseq_lun->lun_next) {
4995 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 				fcp_update_tgt_state(ptgt, FCP_RESET,
4998 				    FCP_LUN_MARK);
4999 				mutex_exit(&ptgt->tgt_mutex);
5000 				return (ret);
5001 			}
5002 		}
5003 	}
5004 
5005 	/*
5006 	 * if UA'REPORT_LUN_CHANGED received,
5007 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 	 */
5009 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 		mutex_exit(&ptgt->tgt_mutex);
5012 		mutex_exit(&pptr->port_mutex);
5013 
5014 		ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 
5016 		mutex_enter(&pptr->port_mutex);
5017 		return (ret);
5018 	}
5019 
5020 	/*
5021 	 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 	 * was never specifically initialized but zeroed out which means
5023 	 * FCP_TGT_NODE_NONE.
5024 	 */
5025 	switch (ptgt->tgt_node_state) {
5026 	case FCP_TGT_NODE_NONE:
5027 	case FCP_TGT_NODE_ON_DEMAND:
5028 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 		    !fcp_enable_auto_configuration &&
5030 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 		    fcp_enable_auto_configuration &&
5034 		    (ptgt->tgt_manual_config_only == 1) &&
5035 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 			/*
5037 			 * If auto configuration is set and
5038 			 * the tgt_manual_config_only flag is set then
5039 			 * we only want the user to be able to change
5040 			 * the state through create_on_demand.
5041 			 */
5042 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 		} else {
5044 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 		}
5046 		break;
5047 
5048 	case FCP_TGT_NODE_PRESENT:
5049 		break;
5050 	}
5051 	/*
5052 	 * If we are booting from a fabric device, make sure we
5053 	 * mark the node state appropriately for this target to be
5054 	 * enumerated
5055 	 */
5056 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 		}
5062 	}
5063 	mutex_exit(&ptgt->tgt_mutex);
5064 
5065 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 	    map_entry->map_rscn_info.ulp_rscn_count);
5070 
5071 	mutex_enter(&ptgt->tgt_mutex);
5072 
5073 	/*
5074 	 * Reset target OFFLINE state and mark the target BUSY
5075 	 */
5076 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 
5079 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 	lcount = link_cnt;
5081 
5082 	mutex_exit(&ptgt->tgt_mutex);
5083 	mutex_exit(&pptr->port_mutex);
5084 
5085 	/*
5086 	 * if we are already logged in, then we do a PRLI, else
5087 	 * we do a PLOGI first (to get logged in)
5088 	 *
5089 	 * We will not check if we are the PLOGI initiator
5090 	 */
5091 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 
5094 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 
5096 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 
5100 	if (icmd == NULL) {
5101 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 		/*
5103 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 		 * we need to make sure we reacquire it before returning.
5105 		 */
5106 		mutex_enter(&pptr->port_mutex);
5107 		return (FALSE);
5108 	}
5109 
5110 	/* TRUE is only returned while target is intended skipped */
5111 	ret = FALSE;
5112 	/* discover info about this target */
5113 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5115 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 	} else {
5117 		fcp_icmd_free(pptr, icmd);
5118 		ret = TRUE;
5119 	}
5120 	mutex_enter(&pptr->port_mutex);
5121 
5122 	return (ret);
5123 }
5124 
5125 /*
5126  *     Function: fcp_send_els
5127  *
5128  *  Description: Sends an ELS to the target specified by the caller.  Supports
5129  *		 PLOGI and PRLI.
5130  *
5131  *     Argument: *pptr		Fcp port.
5132  *		 *ptgt		Target to send the ELS to.
5133  *		 *icmd		Internal packet
5134  *		 opcode		ELS opcode
5135  *		 lcount		Link state change counter
5136  *		 tcount		Target state change counter
5137  *		 cause		What caused the call
5138  *
5139  * Return Value: DDI_SUCCESS
5140  *		 Others
5141  */
5142 static int
5143 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 {
5146 	fc_packet_t		*fpkt;
5147 	fc_frame_hdr_t		*hp;
5148 	int			internal = 0;
5149 	int			alloc;
5150 	int			cmd_len;
5151 	int			resp_len;
5152 	int			res = DDI_FAILURE; /* default result */
5153 	int			rval = DDI_FAILURE;
5154 
5155 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 	ASSERT(ptgt->tgt_port == pptr);
5157 
5158 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 
5163 	if (opcode == LA_ELS_PLOGI) {
5164 		cmd_len = sizeof (la_els_logi_t);
5165 		resp_len = sizeof (la_els_logi_t);
5166 	} else {
5167 		ASSERT(opcode == LA_ELS_PRLI);
5168 		cmd_len = sizeof (la_els_prli_t);
5169 		resp_len = sizeof (la_els_prli_t);
5170 	}
5171 
5172 	if (icmd == NULL) {
5173 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 		    sizeof (la_els_prli_t));
5175 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 		    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 		if (icmd == NULL) {
5179 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 			return (res);
5181 		}
5182 		internal++;
5183 	}
5184 	fpkt = icmd->ipkt_fpkt;
5185 
5186 	fpkt->pkt_cmdlen = cmd_len;
5187 	fpkt->pkt_rsplen = resp_len;
5188 	fpkt->pkt_datalen = 0;
5189 	icmd->ipkt_retries = 0;
5190 
5191 	/* fill in fpkt info */
5192 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 
5196 	/* get ptr to frame hdr in fpkt */
5197 	hp = &fpkt->pkt_cmd_fhdr;
5198 
5199 	/*
5200 	 * fill in frame hdr
5201 	 */
5202 	hp->r_ctl = R_CTL_ELS_REQ;
5203 	hp->s_id = pptr->port_id;	/* source ID */
5204 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5205 	hp->type = FC_TYPE_EXTENDED_LS;
5206 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 	hp->seq_id = 0;
5208 	hp->rsvd = 0;
5209 	hp->df_ctl  = 0;
5210 	hp->seq_cnt = 0;
5211 	hp->ox_id = 0xffff;		/* i.e. none */
5212 	hp->rx_id = 0xffff;		/* i.e. none */
5213 	hp->ro = 0;
5214 
5215 	/*
5216 	 * at this point we have a filled in cmd pkt
5217 	 *
5218 	 * fill in the respective info, then use the transport to send
5219 	 * the packet
5220 	 *
5221 	 * for a PLOGI call fc_ulp_login(), and
5222 	 * for a PRLI call fc_ulp_issue_els()
5223 	 */
5224 	switch (opcode) {
5225 	case LA_ELS_PLOGI: {
5226 		struct la_els_logi logi;
5227 
5228 		bzero(&logi, sizeof (struct la_els_logi));
5229 
5230 		hp = &fpkt->pkt_cmd_fhdr;
5231 		hp->r_ctl = R_CTL_ELS_REQ;
5232 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 		logi.ls_code.mbz = 0;
5234 
5235 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 
5238 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 
5240 		mutex_enter(&pptr->port_mutex);
5241 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 
5243 			mutex_exit(&pptr->port_mutex);
5244 
5245 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 			if (rval == FC_SUCCESS) {
5247 				res = DDI_SUCCESS;
5248 				break;
5249 			}
5250 
5251 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 
5253 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 			    rval, "PLOGI");
5255 		} else {
5256 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 			    "fcp_send_els1: state change occured"
5259 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5260 			mutex_exit(&pptr->port_mutex);
5261 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 		}
5263 		break;
5264 	}
5265 
5266 	case LA_ELS_PRLI: {
5267 		struct la_els_prli	prli;
5268 		struct fcp_prli		*fprli;
5269 
5270 		bzero(&prli, sizeof (struct la_els_prli));
5271 
5272 		hp = &fpkt->pkt_cmd_fhdr;
5273 		hp->r_ctl = R_CTL_ELS_REQ;
5274 
5275 		/* fill in PRLI cmd ELS fields */
5276 		prli.ls_code = LA_ELS_PRLI;
5277 		prli.page_length = 0x10;	/* huh? */
5278 		prli.payload_length = sizeof (struct la_els_prli);
5279 
5280 		icmd->ipkt_opcode = LA_ELS_PRLI;
5281 
5282 		/* get ptr to PRLI service params */
5283 		fprli = (struct fcp_prli *)prli.service_params;
5284 
5285 		/* fill in service params */
5286 		fprli->type = 0x08;
5287 		fprli->resvd1 = 0;
5288 		fprli->orig_process_assoc_valid = 0;
5289 		fprli->resp_process_assoc_valid = 0;
5290 		fprli->establish_image_pair = 1;
5291 		fprli->resvd2 = 0;
5292 		fprli->resvd3 = 0;
5293 		fprli->obsolete_1 = 0;
5294 		fprli->obsolete_2 = 0;
5295 		fprli->data_overlay_allowed = 0;
5296 		fprli->initiator_fn = 1;
5297 		fprli->confirmed_compl_allowed = 1;
5298 
5299 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 			fprli->target_fn = 1;
5301 		} else {
5302 			fprli->target_fn = 0;
5303 		}
5304 
5305 		fprli->retry = 1;
5306 		fprli->read_xfer_rdy_disabled = 1;
5307 		fprli->write_xfer_rdy_disabled = 0;
5308 
5309 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 
5312 		/* issue the PRLI request */
5313 
5314 		mutex_enter(&pptr->port_mutex);
5315 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 
5317 			mutex_exit(&pptr->port_mutex);
5318 
5319 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 			if (rval == FC_SUCCESS) {
5321 				res = DDI_SUCCESS;
5322 				break;
5323 			}
5324 
5325 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 
5327 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 			    rval, "PRLI");
5329 		} else {
5330 			mutex_exit(&pptr->port_mutex);
5331 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 		}
5333 		break;
5334 	}
5335 
5336 	default:
5337 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 		break;
5339 	}
5340 
5341 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 	    "fcp_send_els: returning %d", res);
5344 
5345 	if (res != DDI_SUCCESS) {
5346 		if (internal) {
5347 			fcp_icmd_free(pptr, icmd);
5348 		}
5349 	}
5350 
5351 	return (res);
5352 }
5353 
5354 
5355 /*
5356  * called internally update the state of all of the tgts and each LUN
5357  * for this port (i.e. each target  known to be attached to this port)
5358  * if they are not already offline
5359  *
5360  * must be called with the port mutex owned
5361  *
5362  * acquires and releases the target mutexes for each target attached
5363  * to this port
5364  */
5365 void
5366 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 {
5368 	int i;
5369 	struct fcp_tgt *ptgt;
5370 
5371 	ASSERT(mutex_owned(&pptr->port_mutex));
5372 
5373 	for (i = 0; i < FCP_NUM_HASH; i++) {
5374 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 		    ptgt = ptgt->tgt_next) {
5376 			mutex_enter(&ptgt->tgt_mutex);
5377 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 			ptgt->tgt_change_cnt++;
5379 			ptgt->tgt_statec_cause = cause;
5380 			ptgt->tgt_tmp_cnt = 1;
5381 			ptgt->tgt_done = 0;
5382 			mutex_exit(&ptgt->tgt_mutex);
5383 		}
5384 	}
5385 }
5386 
5387 
5388 static void
5389 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 {
5391 	int i;
5392 	int ndevs;
5393 	struct fcp_tgt *ptgt;
5394 
5395 	ASSERT(mutex_owned(&pptr->port_mutex));
5396 
5397 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 		    ptgt = ptgt->tgt_next) {
5400 			ndevs++;
5401 		}
5402 	}
5403 
5404 	if (ndevs == 0) {
5405 		return;
5406 	}
5407 	pptr->port_tmp_cnt = ndevs;
5408 
5409 	for (i = 0; i < FCP_NUM_HASH; i++) {
5410 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 		    ptgt = ptgt->tgt_next) {
5412 			(void) fcp_call_finish_init_held(pptr, ptgt,
5413 			    lcount, ptgt->tgt_change_cnt, cause);
5414 		}
5415 	}
5416 }
5417 
5418 /*
5419  *     Function: fcp_update_tgt_state
5420  *
5421  *  Description: This function updates the field tgt_state of a target.	 That
5422  *		 field is a bitmap and which bit can be set or reset
5423  *		 individually.	The action applied to the target state is also
5424  *		 applied to all the LUNs belonging to the target (provided the
5425  *		 LUN is not offline).  A side effect of applying the state
5426  *		 modification to the target and the LUNs is the field tgt_trace
5427  *		 of the target and lun_trace of the LUNs is set to zero.
5428  *
5429  *
5430  *     Argument: *ptgt	Target structure.
5431  *		 flag	Flag indication what action to apply (set/reset).
5432  *		 state	State bits to update.
5433  *
5434  * Return Value: None
5435  *
5436  *	Context: Interrupt, Kernel or User context.
5437  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5438  *		 calling this function.
5439  */
5440 void
5441 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 {
5443 	struct fcp_lun *plun;
5444 
5445 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 
5447 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 		/* The target is not offline. */
5449 		if (flag == FCP_SET) {
5450 			ptgt->tgt_state |= state;
5451 			ptgt->tgt_trace = 0;
5452 		} else {
5453 			ptgt->tgt_state &= ~state;
5454 		}
5455 
5456 		for (plun = ptgt->tgt_lun; plun != NULL;
5457 		    plun = plun->lun_next) {
5458 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 				/* The LUN is not offline. */
5460 				if (flag == FCP_SET) {
5461 					plun->lun_state |= state;
5462 					plun->lun_trace = 0;
5463 				} else {
5464 					plun->lun_state &= ~state;
5465 				}
5466 			}
5467 		}
5468 	}
5469 }
5470 
5471 /*
5472  *     Function: fcp_update_tgt_state
5473  *
5474  *  Description: This function updates the field lun_state of a LUN.  That
5475  *		 field is a bitmap and which bit can be set or reset
5476  *		 individually.
5477  *
5478  *     Argument: *plun	LUN structure.
5479  *		 flag	Flag indication what action to apply (set/reset).
5480  *		 state	State bits to update.
5481  *
5482  * Return Value: None
5483  *
5484  *	Context: Interrupt, Kernel or User context.
5485  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5486  *		 calling this function.
5487  */
5488 void
5489 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 {
5491 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5492 
5493 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 
5495 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 		if (flag == FCP_SET) {
5497 			plun->lun_state |= state;
5498 		} else {
5499 			plun->lun_state &= ~state;
5500 		}
5501 	}
5502 }
5503 
5504 /*
5505  *     Function: fcp_get_port
5506  *
5507  *  Description: This function returns the fcp_port structure from the opaque
5508  *		 handle passed by the caller.  That opaque handle is the handle
5509  *		 used by fp/fctl to identify a particular local port.  That
5510  *		 handle has been stored in the corresponding fcp_port
5511  *		 structure.  This function is going to walk the global list of
5512  *		 fcp_port structures till one has a port_fp_handle that matches
5513  *		 the handle passed by the caller.  This function enters the
5514  *		 mutex fcp_global_mutex while walking the global list and then
5515  *		 releases it.
5516  *
5517  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5518  *				particular port.
5519  *
5520  * Return Value: NULL		Not found.
5521  *		 Not NULL	Pointer to the fcp_port structure.
5522  *
5523  *	Context: Interrupt, Kernel or User context.
5524  */
5525 static struct fcp_port *
5526 fcp_get_port(opaque_t port_handle)
5527 {
5528 	struct fcp_port *pptr;
5529 
5530 	ASSERT(port_handle != NULL);
5531 
5532 	mutex_enter(&fcp_global_mutex);
5533 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 		if (pptr->port_fp_handle == port_handle) {
5535 			break;
5536 		}
5537 	}
5538 	mutex_exit(&fcp_global_mutex);
5539 
5540 	return (pptr);
5541 }
5542 
5543 
5544 static void
5545 fcp_unsol_callback(fc_packet_t *fpkt)
5546 {
5547 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 	struct fcp_port *pptr = icmd->ipkt_port;
5549 
5550 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 		caddr_t state, reason, action, expln;
5552 
5553 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 		    &action, &expln);
5555 
5556 		fcp_log(CE_WARN, pptr->port_dip,
5557 		    "!couldn't post response to unsolicited request: "
5558 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5559 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 		    fpkt->pkt_cmd_fhdr.rx_id);
5561 	}
5562 	fcp_icmd_free(pptr, icmd);
5563 }
5564 
5565 
5566 /*
5567  * Perform general purpose preparation of a response to an unsolicited request
5568  */
5569 static void
5570 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571     uchar_t r_ctl, uchar_t type)
5572 {
5573 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 	pkt->pkt_cmd_fhdr.type = type;
5577 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5580 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 	pkt->pkt_cmd_fhdr.ro = 0;
5584 	pkt->pkt_cmd_fhdr.rsvd = 0;
5585 	pkt->pkt_comp = fcp_unsol_callback;
5586 	pkt->pkt_pd = NULL;
5587 	pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 }
5589 
5590 
5591 /*ARGSUSED*/
5592 static int
5593 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 {
5595 	fc_packet_t		*fpkt;
5596 	struct la_els_prli	prli;
5597 	struct fcp_prli		*fprli;
5598 	struct fcp_ipkt	*icmd;
5599 	struct la_els_prli	*from;
5600 	struct fcp_prli		*orig;
5601 	struct fcp_tgt	*ptgt;
5602 	int			tcount = 0;
5603 	int			lcount;
5604 
5605 	from = (struct la_els_prli *)buf->ub_buffer;
5606 	orig = (struct fcp_prli *)from->service_params;
5607 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 	    NULL) {
5609 		mutex_enter(&ptgt->tgt_mutex);
5610 		tcount = ptgt->tgt_change_cnt;
5611 		mutex_exit(&ptgt->tgt_mutex);
5612 	}
5613 
5614 	mutex_enter(&pptr->port_mutex);
5615 	lcount = pptr->port_link_cnt;
5616 	mutex_exit(&pptr->port_mutex);
5617 
5618 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 	    sizeof (la_els_prli_t), 0,
5620 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 		return (FC_FAILURE);
5623 	}
5624 
5625 	fpkt = icmd->ipkt_fpkt;
5626 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 	fpkt->pkt_rsplen = 0;
5631 	fpkt->pkt_datalen = 0;
5632 
5633 	icmd->ipkt_opcode = LA_ELS_PRLI;
5634 
5635 	bzero(&prli, sizeof (struct la_els_prli));
5636 	fprli = (struct fcp_prli *)prli.service_params;
5637 	prli.ls_code = LA_ELS_ACC;
5638 	prli.page_length = 0x10;
5639 	prli.payload_length = sizeof (struct la_els_prli);
5640 
5641 	/* fill in service params */
5642 	fprli->type = 0x08;
5643 	fprli->resvd1 = 0;
5644 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 	fprli->orig_process_associator = orig->orig_process_associator;
5646 	fprli->resp_process_assoc_valid = 0;
5647 	fprli->establish_image_pair = 1;
5648 	fprli->resvd2 = 0;
5649 	fprli->resvd3 = 0;
5650 	fprli->obsolete_1 = 0;
5651 	fprli->obsolete_2 = 0;
5652 	fprli->data_overlay_allowed = 0;
5653 	fprli->initiator_fn = 1;
5654 	fprli->confirmed_compl_allowed = 1;
5655 
5656 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 		fprli->target_fn = 1;
5658 	} else {
5659 		fprli->target_fn = 0;
5660 	}
5661 
5662 	fprli->retry = 1;
5663 	fprli->read_xfer_rdy_disabled = 1;
5664 	fprli->write_xfer_rdy_disabled = 0;
5665 
5666 	/* save the unsol prli payload first */
5667 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 
5670 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 
5673 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 
5675 	mutex_enter(&pptr->port_mutex);
5676 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 		int rval;
5678 		mutex_exit(&pptr->port_mutex);
5679 
5680 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 		    FC_SUCCESS) {
5682 			if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 			    ptgt != NULL) {
5684 				fcp_queue_ipkt(pptr, fpkt);
5685 				return (FC_SUCCESS);
5686 			}
5687 			/* Let it timeout */
5688 			fcp_icmd_free(pptr, icmd);
5689 			return (FC_FAILURE);
5690 		}
5691 	} else {
5692 		mutex_exit(&pptr->port_mutex);
5693 		fcp_icmd_free(pptr, icmd);
5694 		return (FC_FAILURE);
5695 	}
5696 
5697 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 
5699 	return (FC_SUCCESS);
5700 }
5701 
5702 /*
5703  *     Function: fcp_icmd_alloc
5704  *
5705  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5706  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5707  *		 modified by the caller (such as fcp_send_scsi).  The
5708  *		 structure is also tied to the state of the line and of the
5709  *		 target at a particular time.  That link is established by
5710  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711  *		 and tcount which came respectively from pptr->link_cnt and
5712  *		 ptgt->tgt_change_cnt.
5713  *
5714  *     Argument: *pptr		Fcp port.
5715  *		 *ptgt		Target (destination of the command).
5716  *		 cmd_len	Length of the command.
5717  *		 resp_len	Length of the expected response.
5718  *		 data_len	Length of the data.
5719  *		 nodma		Indicates weither the command and response.
5720  *				will be transfer through DMA or not.
5721  *		 lcount		Link state change counter.
5722  *		 tcount		Target state change counter.
5723  *		 cause		Reason that lead to this call.
5724  *
5725  * Return Value: NULL		Failed.
5726  *		 Not NULL	Internal packet address.
5727  */
5728 static struct fcp_ipkt *
5729 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731     uint32_t rscn_count)
5732 {
5733 	int			dma_setup = 0;
5734 	fc_packet_t		*fpkt;
5735 	struct fcp_ipkt	*icmd = NULL;
5736 
5737 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 	    KM_NOSLEEP);
5740 	if (icmd == NULL) {
5741 		fcp_log(CE_WARN, pptr->port_dip,
5742 		    "!internal packet allocation failed");
5743 		return (NULL);
5744 	}
5745 
5746 	/*
5747 	 * initialize the allocated packet
5748 	 */
5749 	icmd->ipkt_nodma = nodma;
5750 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 	icmd->ipkt_lun = NULL;
5752 
5753 	icmd->ipkt_link_cnt = lcount;
5754 	icmd->ipkt_change_cnt = tcount;
5755 	icmd->ipkt_cause = cause;
5756 
5757 	mutex_enter(&pptr->port_mutex);
5758 	icmd->ipkt_port = pptr;
5759 	mutex_exit(&pptr->port_mutex);
5760 
5761 	/* keep track of amt of data to be sent in pkt */
5762 	icmd->ipkt_cmdlen = cmd_len;
5763 	icmd->ipkt_resplen = resp_len;
5764 	icmd->ipkt_datalen = data_len;
5765 
5766 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 
5769 	/* set pkt's private ptr to point to cmd pkt */
5770 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 
5772 	/* set FCA private ptr to memory just beyond */
5773 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 	    pptr->port_dmacookie_sz);
5776 
5777 	/* get ptr to fpkt substruct and fill it in */
5778 	fpkt = icmd->ipkt_fpkt;
5779 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 	    sizeof (struct fcp_ipkt));
5781 
5782 	if (ptgt != NULL) {
5783 		icmd->ipkt_tgt = ptgt;
5784 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 	}
5786 
5787 	fpkt->pkt_comp = fcp_icmd_callback;
5788 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 	fpkt->pkt_cmdlen = cmd_len;
5790 	fpkt->pkt_rsplen = resp_len;
5791 	fpkt->pkt_datalen = data_len;
5792 
5793 	/*
5794 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 	 * rscn_count as fcp knows down to the transport. If a valid count was
5796 	 * passed into this function, we allocate memory to actually pass down
5797 	 * this info.
5798 	 *
5799 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 	 * basically mean that fcp will not be able to help transport
5801 	 * distinguish if a new RSCN has come after fcp was last informed about
5802 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 	 * 5068068 where the device might end up going offline in case of RSCN
5804 	 * storms.
5805 	 */
5806 	fpkt->pkt_ulp_rscn_infop = NULL;
5807 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 			    "Failed to alloc memory to pass rscn info");
5814 		}
5815 	}
5816 
5817 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 		fc_ulp_rscn_info_t	*rscnp;
5819 
5820 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 		rscnp->ulp_rscn_count = rscn_count;
5822 	}
5823 
5824 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 		goto fail;
5826 	}
5827 	dma_setup++;
5828 
5829 	/*
5830 	 * Must hold target mutex across setting of pkt_pd and call to
5831 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 	 * away while we're not looking.
5833 	 */
5834 	if (ptgt != NULL) {
5835 		mutex_enter(&ptgt->tgt_mutex);
5836 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 
5838 		/* ask transport to do its initialization on this pkt */
5839 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 		    != FC_SUCCESS) {
5841 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 			    "fc_ulp_init_packet failed");
5844 			mutex_exit(&ptgt->tgt_mutex);
5845 			goto fail;
5846 		}
5847 		mutex_exit(&ptgt->tgt_mutex);
5848 	} else {
5849 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 		    != FC_SUCCESS) {
5851 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 			    "fc_ulp_init_packet failed");
5854 			goto fail;
5855 		}
5856 	}
5857 
5858 	mutex_enter(&pptr->port_mutex);
5859 	if (pptr->port_state & (FCP_STATE_DETACHING |
5860 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 		int rval;
5862 
5863 		mutex_exit(&pptr->port_mutex);
5864 
5865 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 		ASSERT(rval == FC_SUCCESS);
5867 
5868 		goto fail;
5869 	}
5870 
5871 	if (ptgt != NULL) {
5872 		mutex_enter(&ptgt->tgt_mutex);
5873 		ptgt->tgt_ipkt_cnt++;
5874 		mutex_exit(&ptgt->tgt_mutex);
5875 	}
5876 
5877 	pptr->port_ipkt_cnt++;
5878 
5879 	mutex_exit(&pptr->port_mutex);
5880 
5881 	return (icmd);
5882 
5883 fail:
5884 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 		    sizeof (fc_ulp_rscn_info_t));
5887 		fpkt->pkt_ulp_rscn_infop = NULL;
5888 	}
5889 
5890 	if (dma_setup) {
5891 		fcp_free_dma(pptr, icmd);
5892 	}
5893 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 	    (size_t)pptr->port_dmacookie_sz);
5895 
5896 	return (NULL);
5897 }
5898 
5899 /*
5900  *     Function: fcp_icmd_free
5901  *
5902  *  Description: Frees the internal command passed by the caller.
5903  *
5904  *     Argument: *pptr		Fcp port.
5905  *		 *icmd		Internal packet to free.
5906  *
5907  * Return Value: None
5908  */
5909 static void
5910 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 {
5912 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5913 
5914 	/* Let the underlying layers do their cleanup. */
5915 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 	    icmd->ipkt_fpkt);
5917 
5918 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 		    sizeof (fc_ulp_rscn_info_t));
5921 	}
5922 
5923 	fcp_free_dma(pptr, icmd);
5924 
5925 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 	    (size_t)pptr->port_dmacookie_sz);
5927 
5928 	mutex_enter(&pptr->port_mutex);
5929 
5930 	if (ptgt) {
5931 		mutex_enter(&ptgt->tgt_mutex);
5932 		ptgt->tgt_ipkt_cnt--;
5933 		mutex_exit(&ptgt->tgt_mutex);
5934 	}
5935 
5936 	pptr->port_ipkt_cnt--;
5937 	mutex_exit(&pptr->port_mutex);
5938 }
5939 
5940 /*
5941  *     Function: fcp_alloc_dma
5942  *
5943  *  Description: Allocated the DMA resources required for the internal
5944  *		 packet.
5945  *
5946  *     Argument: *pptr	FCP port.
5947  *		 *icmd	Internal FCP packet.
5948  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5949  *		 flags	Allocation flags (Sleep or NoSleep).
5950  *
5951  * Return Value: FC_SUCCESS
5952  *		 FC_NOMEM
5953  */
5954 static int
5955 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956     int nodma, int flags)
5957 {
5958 	int		rval;
5959 	size_t		real_size;
5960 	uint_t		ccount;
5961 	int		bound = 0;
5962 	int		cmd_resp = 0;
5963 	fc_packet_t	*fpkt;
5964 	ddi_dma_cookie_t	pkt_data_cookie;
5965 	ddi_dma_cookie_t	*cp;
5966 	uint32_t		cnt;
5967 
5968 	fpkt = &icmd->ipkt_fc_packet;
5969 
5970 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 	    fpkt->pkt_resp_dma == NULL);
5972 
5973 	icmd->ipkt_nodma = nodma;
5974 
5975 	if (nodma) {
5976 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 		if (fpkt->pkt_cmd == NULL) {
5978 			goto fail;
5979 		}
5980 
5981 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 		if (fpkt->pkt_resp == NULL) {
5983 			goto fail;
5984 		}
5985 	} else {
5986 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 
5988 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 		if (rval == FC_FAILURE) {
5990 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 			    fpkt->pkt_resp_dma == NULL);
5992 			goto fail;
5993 		}
5994 		cmd_resp++;
5995 	}
5996 
5997 	if ((fpkt->pkt_datalen != 0) &&
5998 	    !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 		/*
6000 		 * set up DMA handle and memory for the data in this packet
6001 		 */
6002 		if (ddi_dma_alloc_handle(pptr->port_dip,
6003 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 			goto fail;
6006 		}
6007 
6008 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 			goto fail;
6013 		}
6014 
6015 		/* was DMA mem size gotten < size asked for/needed ?? */
6016 		if (real_size < fpkt->pkt_datalen) {
6017 			goto fail;
6018 		}
6019 
6020 		/* bind DMA address and handle together */
6021 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 			goto fail;
6026 		}
6027 		bound++;
6028 
6029 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 			goto fail;
6031 		}
6032 
6033 		fpkt->pkt_data_cookie_cnt = ccount;
6034 
6035 		cp = fpkt->pkt_data_cookie;
6036 		*cp = pkt_data_cookie;
6037 		cp++;
6038 
6039 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 			    &pkt_data_cookie);
6042 			*cp = pkt_data_cookie;
6043 		}
6044 
6045 	} else if (fpkt->pkt_datalen != 0) {
6046 		/*
6047 		 * If it's a pseudo FCA, then it can't support DMA even in
6048 		 * SCSI data phase.
6049 		 */
6050 		fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 		if (fpkt->pkt_data == NULL) {
6052 			goto fail;
6053 		}
6054 
6055 	}
6056 
6057 	return (FC_SUCCESS);
6058 
6059 fail:
6060 	if (bound) {
6061 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 	}
6063 
6064 	if (fpkt->pkt_data_dma) {
6065 		if (fpkt->pkt_data) {
6066 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 		}
6068 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 	} else {
6070 		if (fpkt->pkt_data) {
6071 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 		}
6073 	}
6074 
6075 	if (nodma) {
6076 		if (fpkt->pkt_cmd) {
6077 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 		}
6079 		if (fpkt->pkt_resp) {
6080 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 		}
6082 	} else {
6083 		if (cmd_resp) {
6084 			fcp_free_cmd_resp(pptr, fpkt);
6085 		}
6086 	}
6087 
6088 	return (FC_NOMEM);
6089 }
6090 
6091 
6092 static void
6093 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 {
6095 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 
6097 	if (fpkt->pkt_data_dma) {
6098 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 		if (fpkt->pkt_data) {
6100 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 		}
6102 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 	} else {
6104 		if (fpkt->pkt_data) {
6105 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 		}
6107 		/*
6108 		 * Need we reset pkt_* to zero???
6109 		 */
6110 	}
6111 
6112 	if (icmd->ipkt_nodma) {
6113 		if (fpkt->pkt_cmd) {
6114 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 		}
6116 		if (fpkt->pkt_resp) {
6117 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 		}
6119 	} else {
6120 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 
6122 		fcp_free_cmd_resp(pptr, fpkt);
6123 	}
6124 }
6125 
6126 /*
6127  *     Function: fcp_lookup_target
6128  *
6129  *  Description: Finds a target given a WWN.
6130  *
6131  *     Argument: *pptr	FCP port.
6132  *		 *wwn	World Wide Name of the device to look for.
6133  *
6134  * Return Value: NULL		No target found
6135  *		 Not NULL	Target structure
6136  *
6137  *	Context: Interrupt context.
6138  *		 The mutex pptr->port_mutex must be owned.
6139  */
6140 /* ARGSUSED */
6141 static struct fcp_tgt *
6142 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 {
6144 	int			hash;
6145 	struct fcp_tgt	*ptgt;
6146 
6147 	ASSERT(mutex_owned(&pptr->port_mutex));
6148 
6149 	hash = FCP_HASH(wwn);
6150 
6151 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 	    ptgt = ptgt->tgt_next) {
6153 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 			break;
6157 		}
6158 	}
6159 
6160 	return (ptgt);
6161 }
6162 
6163 
6164 /*
6165  * Find target structure given a port identifier
6166  */
6167 static struct fcp_tgt *
6168 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 {
6170 	fc_portid_t		port_id;
6171 	la_wwn_t		pwwn;
6172 	struct fcp_tgt	*ptgt = NULL;
6173 
6174 	port_id.priv_lilp_posit = 0;
6175 	port_id.port_id = d_id;
6176 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 	    &pwwn) == FC_SUCCESS) {
6178 		mutex_enter(&pptr->port_mutex);
6179 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 		mutex_exit(&pptr->port_mutex);
6181 	}
6182 
6183 	return (ptgt);
6184 }
6185 
6186 
6187 /*
6188  * the packet completion callback routine for info cmd pkts
6189  *
6190  * this means fpkt pts to a response to either a PLOGI or a PRLI
6191  *
6192  * if there is an error an attempt is made to call a routine to resend
6193  * the command that failed
6194  */
6195 static void
6196 fcp_icmd_callback(fc_packet_t *fpkt)
6197 {
6198 	struct fcp_ipkt	*icmd;
6199 	struct fcp_port	*pptr;
6200 	struct fcp_tgt	*ptgt;
6201 	struct la_els_prli	*prli;
6202 	struct la_els_prli	prli_s;
6203 	struct fcp_prli		*fprli;
6204 	struct fcp_lun	*plun;
6205 	int		free_pkt = 1;
6206 	int		rval;
6207 	ls_code_t	resp;
6208 	uchar_t		prli_acc = 0;
6209 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6210 	int		lun0_newalloc;
6211 
6212 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 
6214 	/* get ptrs to the port and target structs for the cmd */
6215 	pptr = icmd->ipkt_port;
6216 	ptgt = icmd->ipkt_tgt;
6217 
6218 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 
6220 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 		    sizeof (prli_s));
6223 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 	}
6225 
6226 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 	    ptgt->tgt_d_id);
6231 
6232 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 
6235 		mutex_enter(&ptgt->tgt_mutex);
6236 		if (ptgt->tgt_pd_handle == NULL) {
6237 			/*
6238 			 * in a fabric environment the port device handles
6239 			 * get created only after successful LOGIN into the
6240 			 * transport, so the transport makes this port
6241 			 * device (pd) handle available in this packet, so
6242 			 * save it now
6243 			 */
6244 			ASSERT(fpkt->pkt_pd != NULL);
6245 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 		}
6247 		mutex_exit(&ptgt->tgt_mutex);
6248 
6249 		/* which ELS cmd is this response for ?? */
6250 		switch (icmd->ipkt_opcode) {
6251 		case LA_ELS_PLOGI:
6252 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 			    ptgt->tgt_d_id,
6256 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 
6259 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 			    FCP_TGT_TRACE_15);
6261 
6262 			/* Note that we are not allocating a new icmd */
6263 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6266 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 				    FCP_TGT_TRACE_16);
6268 				goto fail;
6269 			}
6270 			break;
6271 
6272 		case LA_ELS_PRLI:
6273 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 
6277 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 			    FCP_TGT_TRACE_17);
6279 
6280 			prli = &prli_s;
6281 
6282 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 			    sizeof (prli_s));
6284 
6285 			fprli = (struct fcp_prli *)prli->service_params;
6286 
6287 			mutex_enter(&ptgt->tgt_mutex);
6288 			ptgt->tgt_icap = fprli->initiator_fn;
6289 			ptgt->tgt_tcap = fprli->target_fn;
6290 			mutex_exit(&ptgt->tgt_mutex);
6291 
6292 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 				/*
6294 				 * this FCP device does not support target mode
6295 				 */
6296 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 				    FCP_TGT_TRACE_18);
6298 				goto fail;
6299 			}
6300 			if (fprli->retry == 1) {
6301 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 				    &ptgt->tgt_port_wwn);
6303 			}
6304 
6305 			/* target is no longer offline */
6306 			mutex_enter(&pptr->port_mutex);
6307 			mutex_enter(&ptgt->tgt_mutex);
6308 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 				    FCP_TGT_MARK);
6311 			} else {
6312 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 				    "fcp_icmd_callback,1: state change "
6315 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 				mutex_exit(&ptgt->tgt_mutex);
6317 				mutex_exit(&pptr->port_mutex);
6318 				goto fail;
6319 			}
6320 			mutex_exit(&ptgt->tgt_mutex);
6321 			mutex_exit(&pptr->port_mutex);
6322 
6323 			/*
6324 			 * lun 0 should always respond to inquiry, so
6325 			 * get the LUN struct for LUN 0
6326 			 *
6327 			 * Currently we deal with first level of addressing.
6328 			 * If / when we start supporting 0x device types
6329 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 			 * this logic will need revisiting.
6331 			 */
6332 			lun0_newalloc = 0;
6333 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 				/*
6335 				 * no LUN struct for LUN 0 yet exists,
6336 				 * so create one
6337 				 */
6338 				plun = fcp_alloc_lun(ptgt);
6339 				if (plun == NULL) {
6340 					fcp_log(CE_WARN, pptr->port_dip,
6341 					    "!Failed to allocate lun 0 for"
6342 					    " D_ID=%x", ptgt->tgt_d_id);
6343 					goto fail;
6344 				}
6345 				lun0_newalloc = 1;
6346 			}
6347 
6348 			/* fill in LUN info */
6349 			mutex_enter(&ptgt->tgt_mutex);
6350 			/*
6351 			 * consider lun 0 as device not connected if it is
6352 			 * offlined or newly allocated
6353 			 */
6354 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 			    lun0_newalloc) {
6356 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 			}
6358 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 			ptgt->tgt_lun_cnt = 1;
6361 			ptgt->tgt_report_lun_cnt = 0;
6362 			mutex_exit(&ptgt->tgt_mutex);
6363 
6364 			/* Retrieve the rscn count (if a valid one exists) */
6365 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 				rscn_count = ((fc_ulp_rscn_info_t *)
6367 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 				    ->ulp_rscn_count;
6369 			} else {
6370 				rscn_count = FC_INVALID_RSCN_COUNT;
6371 			}
6372 
6373 			/* send Report Lun request to target */
6374 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 			    sizeof (struct fcp_reportlun_resp),
6376 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 				mutex_enter(&pptr->port_mutex);
6379 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 					fcp_log(CE_WARN, pptr->port_dip,
6381 					    "!Failed to send REPORT LUN to"
6382 					    "  D_ID=%x", ptgt->tgt_d_id);
6383 				} else {
6384 					FCP_TRACE(fcp_logq,
6385 					    pptr->port_instbuf, fcp_trace,
6386 					    FCP_BUF_LEVEL_5, 0,
6387 					    "fcp_icmd_callback,2:state change"
6388 					    " occured for D_ID=0x%x",
6389 					    ptgt->tgt_d_id);
6390 				}
6391 				mutex_exit(&pptr->port_mutex);
6392 
6393 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 				    FCP_TGT_TRACE_19);
6395 
6396 				goto fail;
6397 			} else {
6398 				free_pkt = 0;
6399 				fcp_icmd_free(pptr, icmd);
6400 			}
6401 			break;
6402 
6403 		default:
6404 			fcp_log(CE_WARN, pptr->port_dip,
6405 			    "!fcp_icmd_callback Invalid opcode");
6406 			goto fail;
6407 		}
6408 
6409 		return;
6410 	}
6411 
6412 
6413 	/*
6414 	 * Other PLOGI failures are not retried as the
6415 	 * transport does it already
6416 	 */
6417 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 		if (fcp_is_retryable(icmd) &&
6419 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 
6421 			if (FCP_MUST_RETRY(fpkt)) {
6422 				fcp_queue_ipkt(pptr, fpkt);
6423 				return;
6424 			}
6425 
6426 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 			    fpkt->pkt_reason);
6431 
6432 			/*
6433 			 * Retry by recalling the routine that
6434 			 * originally queued this packet
6435 			 */
6436 			mutex_enter(&pptr->port_mutex);
6437 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 				caddr_t msg;
6439 
6440 				mutex_exit(&pptr->port_mutex);
6441 
6442 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 
6444 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 					fpkt->pkt_timeout +=
6446 					    FCP_TIMEOUT_DELTA;
6447 				}
6448 
6449 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 				    fpkt);
6451 				if (rval == FC_SUCCESS) {
6452 					return;
6453 				}
6454 
6455 				if (rval == FC_STATEC_BUSY ||
6456 				    rval == FC_OFFLINE) {
6457 					fcp_queue_ipkt(pptr, fpkt);
6458 					return;
6459 				}
6460 				(void) fc_ulp_error(rval, &msg);
6461 
6462 				fcp_log(CE_NOTE, pptr->port_dip,
6463 				    "!ELS 0x%x failed to d_id=0x%x;"
6464 				    " %s", icmd->ipkt_opcode,
6465 				    ptgt->tgt_d_id, msg);
6466 			} else {
6467 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 				    "fcp_icmd_callback,3: state change "
6470 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 				mutex_exit(&pptr->port_mutex);
6472 			}
6473 		}
6474 	} else {
6475 		if (fcp_is_retryable(icmd) &&
6476 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 			if (FCP_MUST_RETRY(fpkt)) {
6478 				fcp_queue_ipkt(pptr, fpkt);
6479 				return;
6480 			}
6481 		}
6482 		mutex_enter(&pptr->port_mutex);
6483 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 			mutex_exit(&pptr->port_mutex);
6486 			fcp_print_error(fpkt);
6487 		} else {
6488 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 			    "fcp_icmd_callback,4: state change occured"
6491 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6492 			mutex_exit(&pptr->port_mutex);
6493 		}
6494 	}
6495 
6496 fail:
6497 	if (free_pkt) {
6498 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 		fcp_icmd_free(pptr, icmd);
6501 	}
6502 }
6503 
6504 
6505 /*
6506  * called internally to send an info cmd using the transport
6507  *
6508  * sends either an INQ or a REPORT_LUN
6509  *
6510  * when the packet is completed fcp_scsi_callback is called
6511  */
6512 static int
6513 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514     int lcount, int tcount, int cause, uint32_t rscn_count)
6515 {
6516 	int			nodma;
6517 	struct fcp_ipkt		*icmd;
6518 	struct fcp_tgt		*ptgt;
6519 	struct fcp_port		*pptr;
6520 	fc_frame_hdr_t		*hp;
6521 	fc_packet_t		*fpkt;
6522 	struct fcp_cmd		fcp_cmd;
6523 	struct fcp_cmd		*fcmd;
6524 	union scsi_cdb		*scsi_cdb;
6525 
6526 	ASSERT(plun != NULL);
6527 
6528 	ptgt = plun->lun_tgt;
6529 	ASSERT(ptgt != NULL);
6530 
6531 	pptr = ptgt->tgt_port;
6532 	ASSERT(pptr != NULL);
6533 
6534 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 
6538 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 	    rscn_count);
6542 
6543 	if (icmd == NULL) {
6544 		return (DDI_FAILURE);
6545 	}
6546 
6547 	fpkt = icmd->ipkt_fpkt;
6548 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 	icmd->ipkt_retries = 0;
6550 	icmd->ipkt_opcode = opcode;
6551 	icmd->ipkt_lun = plun;
6552 
6553 	if (nodma) {
6554 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 	} else {
6556 		fcmd = &fcp_cmd;
6557 	}
6558 	bzero(fcmd, sizeof (struct fcp_cmd));
6559 
6560 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 
6562 	hp = &fpkt->pkt_cmd_fhdr;
6563 
6564 	hp->s_id = pptr->port_id;
6565 	hp->d_id = ptgt->tgt_d_id;
6566 	hp->r_ctl = R_CTL_COMMAND;
6567 	hp->type = FC_TYPE_SCSI_FCP;
6568 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 	hp->rsvd = 0;
6570 	hp->seq_id = 0;
6571 	hp->seq_cnt = 0;
6572 	hp->ox_id = 0xffff;
6573 	hp->rx_id = 0xffff;
6574 	hp->ro = 0;
6575 
6576 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 
6578 	/*
6579 	 * Request SCSI target for expedited processing
6580 	 */
6581 
6582 	/*
6583 	 * Set up for untagged queuing because we do not
6584 	 * know if the fibre device supports queuing.
6585 	 */
6586 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 
6594 	switch (opcode) {
6595 	case SCMD_INQUIRY_PAGE83:
6596 		/*
6597 		 * Prepare to get the Inquiry VPD page 83 information
6598 		 */
6599 		fcmd->fcp_cntl.cntl_read_data = 1;
6600 		fcmd->fcp_cntl.cntl_write_data = 0;
6601 		fcmd->fcp_data_len = alloc_len;
6602 
6603 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 		fpkt->pkt_comp = fcp_scsi_callback;
6605 
6606 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 		scsi_cdb->g0_addr2 = 0x01;
6608 		scsi_cdb->g0_addr1 = 0x83;
6609 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 		break;
6611 
6612 	case SCMD_INQUIRY:
6613 		fcmd->fcp_cntl.cntl_read_data = 1;
6614 		fcmd->fcp_cntl.cntl_write_data = 0;
6615 		fcmd->fcp_data_len = alloc_len;
6616 
6617 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 		fpkt->pkt_comp = fcp_scsi_callback;
6619 
6620 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 		break;
6623 
6624 	case SCMD_REPORT_LUN: {
6625 		fc_portid_t	d_id;
6626 		opaque_t	fca_dev;
6627 
6628 		ASSERT(alloc_len >= 16);
6629 
6630 		d_id.priv_lilp_posit = 0;
6631 		d_id.port_id = ptgt->tgt_d_id;
6632 
6633 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 
6635 		mutex_enter(&ptgt->tgt_mutex);
6636 		ptgt->tgt_fca_dev = fca_dev;
6637 		mutex_exit(&ptgt->tgt_mutex);
6638 
6639 		fcmd->fcp_cntl.cntl_read_data = 1;
6640 		fcmd->fcp_cntl.cntl_write_data = 0;
6641 		fcmd->fcp_data_len = alloc_len;
6642 
6643 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 		fpkt->pkt_comp = fcp_scsi_callback;
6645 
6646 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 		break;
6652 	}
6653 
6654 	default:
6655 		fcp_log(CE_WARN, pptr->port_dip,
6656 		    "!fcp_send_scsi Invalid opcode");
6657 		break;
6658 	}
6659 
6660 	if (!nodma) {
6661 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 	}
6664 
6665 	mutex_enter(&pptr->port_mutex);
6666 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 
6668 		mutex_exit(&pptr->port_mutex);
6669 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 		    FC_SUCCESS) {
6671 			fcp_icmd_free(pptr, icmd);
6672 			return (DDI_FAILURE);
6673 		}
6674 		return (DDI_SUCCESS);
6675 	} else {
6676 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 		    "fcp_send_scsi,1: state change occured"
6679 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6680 		mutex_exit(&pptr->port_mutex);
6681 		fcp_icmd_free(pptr, icmd);
6682 		return (DDI_FAILURE);
6683 	}
6684 }
6685 
6686 
6687 /*
6688  * called by fcp_scsi_callback to check to handle the case where
6689  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690  */
6691 static int
6692 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 {
6694 	uchar_t				rqlen;
6695 	int				rval = DDI_FAILURE;
6696 	struct scsi_extended_sense	sense_info, *sense;
6697 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6698 	    fpkt->pkt_ulp_private;
6699 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6700 	struct fcp_port		*pptr = ptgt->tgt_port;
6701 
6702 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 
6704 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 		/*
6706 		 * SCSI-II Reserve Release support. Some older FC drives return
6707 		 * Reservation conflict for Report Luns command.
6708 		 */
6709 		if (icmd->ipkt_nodma) {
6710 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 		} else {
6714 			fcp_rsp_t	new_resp;
6715 
6716 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6718 
6719 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 
6723 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6725 		}
6726 
6727 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 
6730 		return (DDI_SUCCESS);
6731 	}
6732 
6733 	sense = &sense_info;
6734 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 		/* no need to continue if sense length is not set */
6736 		return (rval);
6737 	}
6738 
6739 	/* casting 64-bit integer to 8-bit */
6740 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 	    sizeof (struct scsi_extended_sense));
6742 
6743 	if (rqlen < 14) {
6744 		/* no need to continue if request length isn't long enough */
6745 		return (rval);
6746 	}
6747 
6748 	if (icmd->ipkt_nodma) {
6749 		/*
6750 		 * We can safely use fcp_response_len here since the
6751 		 * only path that calls fcp_check_reportlun,
6752 		 * fcp_scsi_callback, has already called
6753 		 * fcp_validate_fcp_response.
6754 		 */
6755 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 	} else {
6758 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 		    sizeof (struct scsi_extended_sense));
6761 	}
6762 
6763 	if (!FCP_SENSE_NO_LUN(sense)) {
6764 		mutex_enter(&ptgt->tgt_mutex);
6765 		/* clear the flag if any */
6766 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 		mutex_exit(&ptgt->tgt_mutex);
6768 	}
6769 
6770 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 	    (sense->es_add_code == 0x20)) {
6772 		if (icmd->ipkt_nodma) {
6773 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 		} else {
6777 			fcp_rsp_t	new_resp;
6778 
6779 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6781 
6782 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 
6786 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6788 		}
6789 
6790 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 
6793 		return (DDI_SUCCESS);
6794 	}
6795 
6796 	/*
6797 	 * This is for the STK library which returns a check condition,
6798 	 * to indicate device is not ready, manual assistance needed.
6799 	 * This is to a report lun command when the door is open.
6800 	 */
6801 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 		if (icmd->ipkt_nodma) {
6803 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 		} else {
6807 			fcp_rsp_t	new_resp;
6808 
6809 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6811 
6812 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 
6816 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6818 		}
6819 
6820 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 
6823 		return (DDI_SUCCESS);
6824 	}
6825 
6826 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 	    (FCP_SENSE_NO_LUN(sense))) {
6828 		mutex_enter(&ptgt->tgt_mutex);
6829 		if ((FCP_SENSE_NO_LUN(sense)) &&
6830 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 			mutex_exit(&ptgt->tgt_mutex);
6833 			/*
6834 			 * reconfig was triggred by ILLEGAL REQUEST but
6835 			 * got ILLEGAL REQUEST again
6836 			 */
6837 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 			    "!FCP: Unable to obtain Report Lun data"
6840 			    " target=%x", ptgt->tgt_d_id);
6841 		} else {
6842 			if (ptgt->tgt_tid == NULL) {
6843 				timeout_id_t	tid;
6844 				/*
6845 				 * REPORT LUN data has changed.	 Kick off
6846 				 * rediscovery
6847 				 */
6848 				tid = timeout(fcp_reconfigure_luns,
6849 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 
6851 				ptgt->tgt_tid = tid;
6852 				ptgt->tgt_state |= FCP_TGT_BUSY;
6853 			}
6854 			if (FCP_SENSE_NO_LUN(sense)) {
6855 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 			}
6857 			mutex_exit(&ptgt->tgt_mutex);
6858 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 				    "!FCP:Report Lun Has Changed"
6862 				    " target=%x", ptgt->tgt_d_id);
6863 			} else if (FCP_SENSE_NO_LUN(sense)) {
6864 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 				    "!FCP:LU Not Supported"
6867 				    " target=%x", ptgt->tgt_d_id);
6868 			}
6869 		}
6870 		rval = DDI_SUCCESS;
6871 	}
6872 
6873 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 	    "D_ID=%x, sense=%x, status=%x",
6876 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 	    rsp->fcp_u.fcp_status.scsi_status);
6878 
6879 	return (rval);
6880 }
6881 
6882 /*
6883  *     Function: fcp_scsi_callback
6884  *
6885  *  Description: This is the callback routine set by fcp_send_scsi() after
6886  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6887  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6888  *		 INQUIRY_PAGE83.
6889  *
6890  *     Argument: *fpkt	 FC packet used to convey the command
6891  *
6892  * Return Value: None
6893  */
6894 static void
6895 fcp_scsi_callback(fc_packet_t *fpkt)
6896 {
6897 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6898 	    fpkt->pkt_ulp_private;
6899 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6900 	struct fcp_port	*pptr;
6901 	struct fcp_tgt	*ptgt;
6902 	struct fcp_lun	*plun;
6903 	struct fcp_rsp		response, *rsp;
6904 
6905 	ptgt = icmd->ipkt_tgt;
6906 	pptr = ptgt->tgt_port;
6907 	plun = icmd->ipkt_lun;
6908 
6909 	if (icmd->ipkt_nodma) {
6910 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 	} else {
6912 		rsp = &response;
6913 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 		    sizeof (struct fcp_rsp));
6915 	}
6916 
6917 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 	    "status=%x, lun num=%x",
6921 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 
6924 	/*
6925 	 * Pre-init LUN GUID with NWWN if it is not a device that
6926 	 * supports multiple luns and we know it's not page83
6927 	 * compliant.  Although using a NWWN is not lun unique,
6928 	 * we will be fine since there is only one lun behind the taget
6929 	 * in this case.
6930 	 */
6931 	if ((plun->lun_guid_size == 0) &&
6932 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 	    (fcp_symmetric_device_probe(plun) == 0)) {
6934 
6935 		char ascii_wwn[FC_WWN_SIZE*2+1];
6936 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 	}
6939 
6940 	/*
6941 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 	 * when thay have more data than what is asked in CDB. An overrun
6943 	 * is really when FCP_DL is smaller than the data length in CDB.
6944 	 * In the case here we know that REPORT LUN command we formed within
6945 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 	 * behavior. In reality this is FC_SUCCESS.
6947 	 */
6948 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 		fpkt->pkt_state = FC_PKT_SUCCESS;
6952 	}
6953 
6954 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 		    ptgt->tgt_d_id);
6959 
6960 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 			/*
6962 			 * Inquiry VPD page command on A5K SES devices would
6963 			 * result in data CRC errors.
6964 			 */
6965 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 				(void) fcp_handle_page83(fpkt, icmd, 1);
6967 				return;
6968 			}
6969 		}
6970 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 		    FCP_MUST_RETRY(fpkt)) {
6972 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 			fcp_retry_scsi_cmd(fpkt);
6974 			return;
6975 		}
6976 
6977 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 		    FCP_TGT_TRACE_20);
6979 
6980 		mutex_enter(&pptr->port_mutex);
6981 		mutex_enter(&ptgt->tgt_mutex);
6982 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 			mutex_exit(&ptgt->tgt_mutex);
6984 			mutex_exit(&pptr->port_mutex);
6985 			fcp_print_error(fpkt);
6986 		} else {
6987 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 			    "fcp_scsi_callback,1: state change occured"
6990 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6991 			mutex_exit(&ptgt->tgt_mutex);
6992 			mutex_exit(&pptr->port_mutex);
6993 		}
6994 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 		fcp_icmd_free(pptr, icmd);
6997 		return;
6998 	}
6999 
7000 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 
7002 	mutex_enter(&pptr->port_mutex);
7003 	mutex_enter(&ptgt->tgt_mutex);
7004 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 		    "fcp_scsi_callback,2: state change occured"
7008 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7009 		mutex_exit(&ptgt->tgt_mutex);
7010 		mutex_exit(&pptr->port_mutex);
7011 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 		fcp_icmd_free(pptr, icmd);
7014 		return;
7015 	}
7016 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 
7018 	mutex_exit(&ptgt->tgt_mutex);
7019 	mutex_exit(&pptr->port_mutex);
7020 
7021 	if (icmd->ipkt_nodma) {
7022 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 		    sizeof (struct fcp_rsp));
7024 	} else {
7025 		bep = &fcp_rsp_err;
7026 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 	}
7029 
7030 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 		fcp_retry_scsi_cmd(fpkt);
7032 		return;
7033 	}
7034 
7035 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 	    FCP_NO_FAILURE) {
7037 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 		    "rsp_code=0x%x, rsp_len_set=0x%x",
7040 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 		fcp_retry_scsi_cmd(fpkt);
7042 		return;
7043 	}
7044 
7045 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 		fcp_queue_ipkt(pptr, fpkt);
7048 		return;
7049 	}
7050 
7051 	/*
7052 	 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 	 * with illegal request as per SCSI spec.
7054 	 * Crossbridge is one such device and Daktari's SES node is another.
7055 	 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 	 * SES nodes (Daktari only currently) are an exception to this.
7057 	 */
7058 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 
7061 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 		    "check condition. May enumerate as non-mpxio device",
7065 		    ptgt->tgt_d_id, plun->lun_type);
7066 
7067 		/*
7068 		 * If we let Daktari's SES be enumerated as a non-mpxio
7069 		 * device, there will be a discrepency in that the other
7070 		 * internal FC disks will get enumerated as mpxio devices.
7071 		 * Applications like luxadm expect this to be consistent.
7072 		 *
7073 		 * So, we put in a hack here to check if this is an SES device
7074 		 * and handle it here.
7075 		 */
7076 		if (plun->lun_type == DTYPE_ESI) {
7077 			/*
7078 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 			 * at this stage, we fake a failure here so that
7080 			 * fcp_handle_page83 will create a device path using
7081 			 * the WWN instead of the GUID which is not there anyway
7082 			 */
7083 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 			(void) fcp_handle_page83(fpkt, icmd, 1);
7085 			return;
7086 		}
7087 
7088 		mutex_enter(&ptgt->tgt_mutex);
7089 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7091 		mutex_exit(&ptgt->tgt_mutex);
7092 
7093 		(void) fcp_call_finish_init(pptr, ptgt,
7094 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 		    icmd->ipkt_cause);
7096 		fcp_icmd_free(pptr, icmd);
7097 		return;
7098 	}
7099 
7100 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 		int rval = DDI_FAILURE;
7102 
7103 		/*
7104 		 * handle cases where report lun isn't supported
7105 		 * by faking up our own REPORT_LUN response or
7106 		 * UNIT ATTENTION
7107 		 */
7108 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 			rval = fcp_check_reportlun(rsp, fpkt);
7110 
7111 			/*
7112 			 * fcp_check_reportlun might have modified the
7113 			 * FCP response. Copy it in again to get an updated
7114 			 * FCP response
7115 			 */
7116 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 				rsp = &response;
7118 
7119 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 				    fpkt->pkt_resp_acc,
7121 				    sizeof (struct fcp_rsp));
7122 			}
7123 		}
7124 
7125 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 			if (rval == DDI_SUCCESS) {
7127 				(void) fcp_call_finish_init(pptr, ptgt,
7128 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 				    icmd->ipkt_cause);
7130 				fcp_icmd_free(pptr, icmd);
7131 			} else {
7132 				fcp_retry_scsi_cmd(fpkt);
7133 			}
7134 
7135 			return;
7136 		}
7137 	} else {
7138 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 			mutex_enter(&ptgt->tgt_mutex);
7140 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 			mutex_exit(&ptgt->tgt_mutex);
7142 		}
7143 	}
7144 
7145 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 		(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 		    DDI_DMA_SYNC_FORCPU);
7149 	}
7150 
7151 	switch (icmd->ipkt_opcode) {
7152 	case SCMD_INQUIRY:
7153 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 		fcp_handle_inquiry(fpkt, icmd);
7155 		break;
7156 
7157 	case SCMD_REPORT_LUN:
7158 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 		    FCP_TGT_TRACE_22);
7160 		fcp_handle_reportlun(fpkt, icmd);
7161 		break;
7162 
7163 	case SCMD_INQUIRY_PAGE83:
7164 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 		(void) fcp_handle_page83(fpkt, icmd, 0);
7166 		break;
7167 
7168 	default:
7169 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 		fcp_icmd_free(pptr, icmd);
7173 		break;
7174 	}
7175 }
7176 
7177 
7178 static void
7179 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 {
7181 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7182 	    fpkt->pkt_ulp_private;
7183 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7184 	struct fcp_port	*pptr = ptgt->tgt_port;
7185 
7186 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 	    fcp_is_retryable(icmd)) {
7188 		mutex_enter(&pptr->port_mutex);
7189 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 			mutex_exit(&pptr->port_mutex);
7191 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 			    "Retrying %s to %x; state=%x, reason=%x",
7194 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 			    fpkt->pkt_state, fpkt->pkt_reason);
7197 
7198 			fcp_queue_ipkt(pptr, fpkt);
7199 		} else {
7200 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 			    "fcp_retry_scsi_cmd,1: state change occured"
7203 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7204 			mutex_exit(&pptr->port_mutex);
7205 			(void) fcp_call_finish_init(pptr, ptgt,
7206 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 			    icmd->ipkt_cause);
7208 			fcp_icmd_free(pptr, icmd);
7209 		}
7210 	} else {
7211 		fcp_print_error(fpkt);
7212 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 		fcp_icmd_free(pptr, icmd);
7215 	}
7216 }
7217 
7218 /*
7219  *     Function: fcp_handle_page83
7220  *
7221  *  Description: Treats the response to INQUIRY_PAGE83.
7222  *
7223  *     Argument: *fpkt	FC packet used to convey the command.
7224  *		 *icmd	Original fcp_ipkt structure.
7225  *		 ignore_page83_data
7226  *			if it's 1, that means it's a special devices's
7227  *			page83 response, it should be enumerated under mpxio
7228  *
7229  * Return Value: None
7230  */
7231 static void
7232 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233     int ignore_page83_data)
7234 {
7235 	struct fcp_port	*pptr;
7236 	struct fcp_lun	*plun;
7237 	struct fcp_tgt	*ptgt;
7238 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 	int			fail = 0;
7240 	ddi_devid_t		devid;
7241 	char			*guid = NULL;
7242 	int			ret;
7243 
7244 	ASSERT(icmd != NULL && fpkt != NULL);
7245 
7246 	pptr = icmd->ipkt_port;
7247 	ptgt = icmd->ipkt_tgt;
7248 	plun = icmd->ipkt_lun;
7249 
7250 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 
7253 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 
7256 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 		    "dtype=0x%x, lun num=%x",
7260 		    pptr->port_instance, ptgt->tgt_d_id,
7261 		    dev_id_page[0], plun->lun_num);
7262 
7263 		ret = ddi_devid_scsi_encode(
7264 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 		    NULL,		/* driver name */
7266 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7268 		    NULL,		/* page 80 data */
7269 		    0,		/* page 80 len */
7270 		    dev_id_page,	/* page 83 data */
7271 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 		    &devid);
7273 
7274 		if (ret == DDI_SUCCESS) {
7275 
7276 			guid = ddi_devid_to_guid(devid);
7277 
7278 			if (guid) {
7279 				/*
7280 				 * Check our current guid.  If it's non null
7281 				 * and it has changed, we need to copy it into
7282 				 * lun_old_guid since we might still need it.
7283 				 */
7284 				if (plun->lun_guid &&
7285 				    strcmp(guid, plun->lun_guid)) {
7286 					unsigned int len;
7287 
7288 					/*
7289 					 * If the guid of the LUN changes,
7290 					 * reconfiguration should be triggered
7291 					 * to reflect the changes.
7292 					 * i.e. we should offline the LUN with
7293 					 * the old guid, and online the LUN with
7294 					 * the new guid.
7295 					 */
7296 					plun->lun_state |= FCP_LUN_CHANGED;
7297 
7298 					if (plun->lun_old_guid) {
7299 						kmem_free(plun->lun_old_guid,
7300 						    plun->lun_old_guid_size);
7301 					}
7302 
7303 					len = plun->lun_guid_size;
7304 					plun->lun_old_guid_size = len;
7305 
7306 					plun->lun_old_guid = kmem_zalloc(len,
7307 					    KM_NOSLEEP);
7308 
7309 					if (plun->lun_old_guid) {
7310 						/*
7311 						 * The alloc was successful then
7312 						 * let's do the copy.
7313 						 */
7314 						bcopy(plun->lun_guid,
7315 						    plun->lun_old_guid, len);
7316 					} else {
7317 						fail = 1;
7318 						plun->lun_old_guid_size = 0;
7319 					}
7320 				}
7321 				if (!fail) {
7322 					if (fcp_copy_guid_2_lun_block(
7323 					    plun, guid)) {
7324 						fail = 1;
7325 					}
7326 				}
7327 				ddi_devid_free_guid(guid);
7328 
7329 			} else {
7330 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 				    "fcp_handle_page83: unable to create "
7333 				    "GUID");
7334 
7335 				/* couldn't create good guid from devid */
7336 				fail = 1;
7337 			}
7338 			ddi_devid_free(devid);
7339 
7340 		} else if (ret == DDI_NOT_WELL_FORMED) {
7341 			/* NULL filled data for page 83 */
7342 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 			    "fcp_handle_page83: retry GUID");
7345 
7346 			icmd->ipkt_retries = 0;
7347 			fcp_retry_scsi_cmd(fpkt);
7348 			return;
7349 		} else {
7350 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 			    ret);
7354 			/*
7355 			 * Since the page83 validation
7356 			 * introduced late, we are being
7357 			 * tolerant to the existing devices
7358 			 * that already found to be working
7359 			 * under mpxio, like A5200's SES device,
7360 			 * its page83 response will not be standard-compliant,
7361 			 * but we still want it to be enumerated under mpxio.
7362 			 */
7363 			if (fcp_symmetric_device_probe(plun) != 0) {
7364 				fail = 1;
7365 			}
7366 		}
7367 
7368 	} else {
7369 		/* bad packet state */
7370 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 
7372 		/*
7373 		 * For some special devices (A5K SES and Daktari's SES devices),
7374 		 * they should be enumerated under mpxio
7375 		 * or "luxadm dis" will fail
7376 		 */
7377 		if (ignore_page83_data) {
7378 			fail = 0;
7379 		} else {
7380 			fail = 1;
7381 		}
7382 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 		    "!Devid page cmd failed. "
7385 		    "fpkt_state: %x fpkt_reason: %x",
7386 		    "ignore_page83: %d",
7387 		    fpkt->pkt_state, fpkt->pkt_reason,
7388 		    ignore_page83_data);
7389 	}
7390 
7391 	mutex_enter(&pptr->port_mutex);
7392 	mutex_enter(&plun->lun_mutex);
7393 	/*
7394 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 	 * mismatch between lun_cip and lun_mpxio.
7396 	 */
7397 	if (plun->lun_cip == NULL) {
7398 		/*
7399 		 * If we don't have a guid for this lun it's because we were
7400 		 * unable to glean one from the page 83 response.  Set the
7401 		 * control flag to 0 here to make sure that we don't attempt to
7402 		 * enumerate it under mpxio.
7403 		 */
7404 		if (fail || pptr->port_mpxio == 0) {
7405 			plun->lun_mpxio = 0;
7406 		} else {
7407 			plun->lun_mpxio = 1;
7408 		}
7409 	}
7410 	mutex_exit(&plun->lun_mutex);
7411 	mutex_exit(&pptr->port_mutex);
7412 
7413 	mutex_enter(&ptgt->tgt_mutex);
7414 	plun->lun_state &=
7415 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 	mutex_exit(&ptgt->tgt_mutex);
7417 
7418 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 
7421 	fcp_icmd_free(pptr, icmd);
7422 }
7423 
7424 /*
7425  *     Function: fcp_handle_inquiry
7426  *
7427  *  Description: Called by fcp_scsi_callback to handle the response to an
7428  *		 INQUIRY request.
7429  *
7430  *     Argument: *fpkt	FC packet used to convey the command.
7431  *		 *icmd	Original fcp_ipkt structure.
7432  *
7433  * Return Value: None
7434  */
7435 static void
7436 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 {
7438 	struct fcp_port	*pptr;
7439 	struct fcp_lun	*plun;
7440 	struct fcp_tgt	*ptgt;
7441 	uchar_t		dtype;
7442 	uchar_t		pqual;
7443 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7444 
7445 	ASSERT(icmd != NULL && fpkt != NULL);
7446 
7447 	pptr = icmd->ipkt_port;
7448 	ptgt = icmd->ipkt_tgt;
7449 	plun = icmd->ipkt_lun;
7450 
7451 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 	    sizeof (struct scsi_inquiry));
7453 
7454 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 	pqual = plun->lun_inq.inq_dtype >> 5;
7456 
7457 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 	    plun->lun_num, dtype, pqual);
7462 
7463 	if (pqual != 0) {
7464 		/*
7465 		 * Non-zero peripheral qualifier
7466 		 */
7467 		fcp_log(CE_CONT, pptr->port_dip,
7468 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 		    "Device type=0x%x Peripheral qual=0x%x\n",
7470 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 
7472 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 		    "Device type=0x%x Peripheral qual=0x%x\n",
7476 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 
7478 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 
7480 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 		fcp_icmd_free(pptr, icmd);
7483 		return;
7484 	}
7485 
7486 	/*
7487 	 * If the device is already initialized, check the dtype
7488 	 * for a change. If it has changed then update the flags
7489 	 * so the create_luns will offline the old device and
7490 	 * create the new device. Refer to bug: 4764752
7491 	 */
7492 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 		plun->lun_state |= FCP_LUN_CHANGED;
7494 	}
7495 	plun->lun_type = plun->lun_inq.inq_dtype;
7496 
7497 	/*
7498 	 * This code is setting/initializing the throttling in the FCA
7499 	 * driver.
7500 	 */
7501 	mutex_enter(&pptr->port_mutex);
7502 	if (!pptr->port_notify) {
7503 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 			uint32_t cmd = 0;
7505 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 			    ((cmd & 0xFFFFFF00 >> 8) |
7507 			    FCP_SVE_THROTTLE << 8));
7508 			pptr->port_notify = 1;
7509 			mutex_exit(&pptr->port_mutex);
7510 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 			mutex_enter(&pptr->port_mutex);
7512 		}
7513 	}
7514 
7515 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 		    "fcp_handle_inquiry,1:state change occured"
7519 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7520 		mutex_exit(&pptr->port_mutex);
7521 
7522 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 		(void) fcp_call_finish_init(pptr, ptgt,
7524 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 		    icmd->ipkt_cause);
7526 		fcp_icmd_free(pptr, icmd);
7527 		return;
7528 	}
7529 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 	mutex_exit(&pptr->port_mutex);
7531 
7532 	/* Retrieve the rscn count (if a valid one exists) */
7533 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 		rscn_count = ((fc_ulp_rscn_info_t *)
7535 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 	} else {
7537 		rscn_count = FC_INVALID_RSCN_COUNT;
7538 	}
7539 
7540 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 		(void) fcp_call_finish_init(pptr, ptgt,
7547 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 		    icmd->ipkt_cause);
7549 	}
7550 
7551 	/*
7552 	 * Read Inquiry VPD Page 0x83 to uniquely
7553 	 * identify this logical unit.
7554 	 */
7555 	fcp_icmd_free(pptr, icmd);
7556 }
7557 
7558 /*
7559  *     Function: fcp_handle_reportlun
7560  *
7561  *  Description: Called by fcp_scsi_callback to handle the response to a
7562  *		 REPORT_LUN request.
7563  *
7564  *     Argument: *fpkt	FC packet used to convey the command.
7565  *		 *icmd	Original fcp_ipkt structure.
7566  *
7567  * Return Value: None
7568  */
7569 static void
7570 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 {
7572 	int				i;
7573 	int				nluns_claimed;
7574 	int				nluns_bufmax;
7575 	int				len;
7576 	uint16_t			lun_num;
7577 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7578 	struct fcp_port			*pptr;
7579 	struct fcp_tgt			*ptgt;
7580 	struct fcp_lun			*plun;
7581 	struct fcp_reportlun_resp	*report_lun;
7582 
7583 	pptr = icmd->ipkt_port;
7584 	ptgt = icmd->ipkt_tgt;
7585 	len = fpkt->pkt_datalen;
7586 
7587 	if ((len < FCP_LUN_HEADER) ||
7588 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 		fcp_icmd_free(pptr, icmd);
7592 		return;
7593 	}
7594 
7595 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 	    fpkt->pkt_datalen);
7597 
7598 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 	    pptr->port_instance, ptgt->tgt_d_id);
7602 
7603 	/*
7604 	 * Get the number of luns (which is supplied as LUNS * 8) the
7605 	 * device claims it has.
7606 	 */
7607 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 
7609 	/*
7610 	 * Get the maximum number of luns the buffer submitted can hold.
7611 	 */
7612 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 
7614 	/*
7615 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 	 */
7617 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 		kmem_free(report_lun, len);
7619 
7620 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 		    ptgt->tgt_d_id);
7623 
7624 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 		fcp_icmd_free(pptr, icmd);
7627 		return;
7628 	}
7629 
7630 	/*
7631 	 * If there are more LUNs than we have allocated memory for,
7632 	 * allocate more space and send down yet another report lun if
7633 	 * the maximum number of attempts hasn't been reached.
7634 	 */
7635 	mutex_enter(&ptgt->tgt_mutex);
7636 
7637 	if ((nluns_claimed > nluns_bufmax) &&
7638 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 
7640 		struct fcp_lun *plun;
7641 
7642 		ptgt->tgt_report_lun_cnt++;
7643 		plun = ptgt->tgt_lun;
7644 		ASSERT(plun != NULL);
7645 		mutex_exit(&ptgt->tgt_mutex);
7646 
7647 		kmem_free(report_lun, len);
7648 
7649 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7652 		    nluns_claimed, ptgt->tgt_d_id);
7653 
7654 		/* Retrieve the rscn count (if a valid one exists) */
7655 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 			rscn_count = ((fc_ulp_rscn_info_t *)
7657 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 			    ulp_rscn_count;
7659 		} else {
7660 			rscn_count = FC_INVALID_RSCN_COUNT;
7661 		}
7662 
7663 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 			(void) fcp_call_finish_init(pptr, ptgt,
7668 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 			    icmd->ipkt_cause);
7670 		}
7671 
7672 		fcp_icmd_free(pptr, icmd);
7673 		return;
7674 	}
7675 
7676 	if (nluns_claimed > nluns_bufmax) {
7677 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 		    "	 Number of LUNs lost=%x",
7681 		    ptgt->tgt_port_wwn.raw_wwn[0],
7682 		    ptgt->tgt_port_wwn.raw_wwn[1],
7683 		    ptgt->tgt_port_wwn.raw_wwn[2],
7684 		    ptgt->tgt_port_wwn.raw_wwn[3],
7685 		    ptgt->tgt_port_wwn.raw_wwn[4],
7686 		    ptgt->tgt_port_wwn.raw_wwn[5],
7687 		    ptgt->tgt_port_wwn.raw_wwn[6],
7688 		    ptgt->tgt_port_wwn.raw_wwn[7],
7689 		    nluns_claimed - nluns_bufmax);
7690 
7691 		nluns_claimed = nluns_bufmax;
7692 	}
7693 	ptgt->tgt_lun_cnt = nluns_claimed;
7694 
7695 	/*
7696 	 * Identify missing LUNs and print warning messages
7697 	 */
7698 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 		int offline;
7700 		int exists = 0;
7701 
7702 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 
7704 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 			uchar_t		*lun_string;
7706 
7707 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 
7709 			switch (lun_string[0] & 0xC0) {
7710 			case FCP_LUN_ADDRESSING:
7711 			case FCP_PD_ADDRESSING:
7712 			case FCP_VOLUME_ADDRESSING:
7713 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 				    lun_string[1];
7715 				if (plun->lun_num == lun_num) {
7716 					exists++;
7717 					break;
7718 				}
7719 				break;
7720 
7721 			default:
7722 				break;
7723 			}
7724 		}
7725 
7726 		if (!exists && !offline) {
7727 			mutex_exit(&ptgt->tgt_mutex);
7728 
7729 			mutex_enter(&pptr->port_mutex);
7730 			mutex_enter(&ptgt->tgt_mutex);
7731 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 				/*
7733 				 * set disappear flag when device was connected
7734 				 */
7735 				if (!(plun->lun_state &
7736 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 				}
7739 				mutex_exit(&ptgt->tgt_mutex);
7740 				mutex_exit(&pptr->port_mutex);
7741 				if (!(plun->lun_state &
7742 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 					fcp_log(CE_NOTE, pptr->port_dip,
7744 					    "!Lun=%x for target=%x disappeared",
7745 					    plun->lun_num, ptgt->tgt_d_id);
7746 				}
7747 				mutex_enter(&ptgt->tgt_mutex);
7748 			} else {
7749 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 				    "fcp_handle_reportlun,1: state change"
7752 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 				mutex_exit(&ptgt->tgt_mutex);
7754 				mutex_exit(&pptr->port_mutex);
7755 				kmem_free(report_lun, len);
7756 				(void) fcp_call_finish_init(pptr, ptgt,
7757 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 				    icmd->ipkt_cause);
7759 				fcp_icmd_free(pptr, icmd);
7760 				return;
7761 			}
7762 		} else if (exists) {
7763 			/*
7764 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 			 * actually exists in REPORT_LUN response
7766 			 */
7767 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 				plun->lun_state &=
7769 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 			}
7771 			if (offline || plun->lun_num == 0) {
7772 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7773 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 					mutex_exit(&ptgt->tgt_mutex);
7775 					fcp_log(CE_NOTE, pptr->port_dip,
7776 					    "!Lun=%x for target=%x reappeared",
7777 					    plun->lun_num, ptgt->tgt_d_id);
7778 					mutex_enter(&ptgt->tgt_mutex);
7779 				}
7780 			}
7781 		}
7782 	}
7783 
7784 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 	mutex_exit(&ptgt->tgt_mutex);
7786 
7787 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 
7792 	/* scan each lun */
7793 	for (i = 0; i < nluns_claimed; i++) {
7794 		uchar_t	*lun_string;
7795 
7796 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 
7798 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 		    lun_string[0]);
7803 
7804 		switch (lun_string[0] & 0xC0) {
7805 		case FCP_LUN_ADDRESSING:
7806 		case FCP_PD_ADDRESSING:
7807 		case FCP_VOLUME_ADDRESSING:
7808 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 
7810 			/* We will skip masked LUNs because of the blacklist. */
7811 			if (fcp_lun_blacklist != NULL) {
7812 				mutex_enter(&ptgt->tgt_mutex);
7813 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 				    lun_num) == TRUE) {
7815 					ptgt->tgt_lun_cnt--;
7816 					mutex_exit(&ptgt->tgt_mutex);
7817 					break;
7818 				}
7819 				mutex_exit(&ptgt->tgt_mutex);
7820 			}
7821 
7822 			/* see if this LUN is already allocated */
7823 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 				plun = fcp_alloc_lun(ptgt);
7825 				if (plun == NULL) {
7826 					fcp_log(CE_NOTE, pptr->port_dip,
7827 					    "!Lun allocation failed"
7828 					    " target=%x lun=%x",
7829 					    ptgt->tgt_d_id, lun_num);
7830 					break;
7831 				}
7832 			}
7833 
7834 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 			/* convert to LUN */
7836 			plun->lun_addr.ent_addr_0 =
7837 			    BE_16(*(uint16_t *)&(lun_string[0]));
7838 			plun->lun_addr.ent_addr_1 =
7839 			    BE_16(*(uint16_t *)&(lun_string[2]));
7840 			plun->lun_addr.ent_addr_2 =
7841 			    BE_16(*(uint16_t *)&(lun_string[4]));
7842 			plun->lun_addr.ent_addr_3 =
7843 			    BE_16(*(uint16_t *)&(lun_string[6]));
7844 
7845 			plun->lun_num = lun_num;
7846 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 
7850 			/* Retrieve the rscn count (if a valid one exists) */
7851 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 				rscn_count = ((fc_ulp_rscn_info_t *)
7853 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 				    ulp_rscn_count;
7855 			} else {
7856 				rscn_count = FC_INVALID_RSCN_COUNT;
7857 			}
7858 
7859 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 				mutex_enter(&pptr->port_mutex);
7863 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 					fcp_log(CE_NOTE, pptr->port_dip,
7866 					    "!failed to send INQUIRY"
7867 					    " target=%x lun=%x",
7868 					    ptgt->tgt_d_id, plun->lun_num);
7869 				} else {
7870 					FCP_TRACE(fcp_logq,
7871 					    pptr->port_instbuf, fcp_trace,
7872 					    FCP_BUF_LEVEL_5, 0,
7873 					    "fcp_handle_reportlun,2: state"
7874 					    " change occured for D_ID=0x%x",
7875 					    ptgt->tgt_d_id);
7876 				}
7877 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 				mutex_exit(&pptr->port_mutex);
7879 			} else {
7880 				continue;
7881 			}
7882 			break;
7883 
7884 		default:
7885 			fcp_log(CE_WARN, NULL,
7886 			    "!Unsupported LUN Addressing method %x "
7887 			    "in response to REPORT_LUN", lun_string[0]);
7888 			break;
7889 		}
7890 
7891 		/*
7892 		 * each time through this loop we should decrement
7893 		 * the tmp_cnt by one -- since we go through this loop
7894 		 * one time for each LUN, the tmp_cnt should never be <=0
7895 		 */
7896 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 	}
7899 
7900 	if (i == 0) {
7901 		fcp_log(CE_WARN, pptr->port_dip,
7902 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 	}
7906 
7907 	kmem_free(report_lun, len);
7908 	fcp_icmd_free(pptr, icmd);
7909 }
7910 
7911 
7912 /*
7913  * called internally to return a LUN given a target and a LUN number
7914  */
7915 static struct fcp_lun *
7916 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 {
7918 	struct fcp_lun	*plun;
7919 
7920 	mutex_enter(&ptgt->tgt_mutex);
7921 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 		if (plun->lun_num == lun_num) {
7923 			mutex_exit(&ptgt->tgt_mutex);
7924 			return (plun);
7925 		}
7926 	}
7927 	mutex_exit(&ptgt->tgt_mutex);
7928 
7929 	return (NULL);
7930 }
7931 
7932 
7933 /*
7934  * handle finishing one target for fcp_finish_init
7935  *
7936  * return true (non-zero) if we want finish_init to continue with the
7937  * next target
7938  *
7939  * called with the port mutex held
7940  */
7941 /*ARGSUSED*/
7942 static int
7943 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944     int link_cnt, int tgt_cnt, int cause)
7945 {
7946 	int	rval = 1;
7947 	ASSERT(pptr != NULL);
7948 	ASSERT(ptgt != NULL);
7949 
7950 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 	    ptgt->tgt_state);
7954 
7955 	ASSERT(mutex_owned(&pptr->port_mutex));
7956 
7957 	if ((pptr->port_link_cnt != link_cnt) ||
7958 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 		/*
7960 		 * oh oh -- another link reset or target change
7961 		 * must have occurred while we are in here
7962 		 */
7963 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 
7965 		return (0);
7966 	} else {
7967 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 	}
7969 
7970 	mutex_enter(&ptgt->tgt_mutex);
7971 
7972 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 		/*
7974 		 * tgt is not offline -- is it marked (i.e. needs
7975 		 * to be offlined) ??
7976 		 */
7977 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 			/*
7979 			 * this target not offline *and*
7980 			 * marked
7981 			 */
7982 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 			    tgt_cnt, 0, 0);
7985 		} else {
7986 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 
7988 			/* create the LUNs */
7989 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 				    cause);
7993 				ptgt->tgt_device_created = 1;
7994 			} else {
7995 				fcp_update_tgt_state(ptgt, FCP_RESET,
7996 				    FCP_LUN_BUSY);
7997 			}
7998 		}
7999 	}
8000 
8001 	mutex_exit(&ptgt->tgt_mutex);
8002 
8003 	return (rval);
8004 }
8005 
8006 
8007 /*
8008  * this routine is called to finish port initialization
8009  *
8010  * Each port has a "temp" counter -- when a state change happens (e.g.
8011  * port online), the temp count is set to the number of devices in the map.
8012  * Then, as each device gets "discovered", the temp counter is decremented
8013  * by one.  When this count reaches zero we know that all of the devices
8014  * in the map have been discovered (or an error has occurred), so we can
8015  * then finish initialization -- which is done by this routine (well, this
8016  * and fcp-finish_tgt())
8017  *
8018  * acquires and releases the global mutex
8019  *
8020  * called with the port mutex owned
8021  */
8022 static void
8023 fcp_finish_init(struct fcp_port *pptr)
8024 {
8025 #ifdef	DEBUG
8026 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 	    FCP_STACK_DEPTH);
8029 #endif /* DEBUG */
8030 
8031 	ASSERT(mutex_owned(&pptr->port_mutex));
8032 
8033 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 
8037 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 		pptr->port_state &= ~FCP_STATE_ONLINING;
8041 		pptr->port_state |= FCP_STATE_ONLINE;
8042 	}
8043 
8044 	/* Wake up threads waiting on config done */
8045 	cv_broadcast(&pptr->port_config_cv);
8046 }
8047 
8048 
8049 /*
8050  * called from fcp_finish_init to create the LUNs for a target
8051  *
8052  * called with the port mutex owned
8053  */
8054 static void
8055 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 {
8057 	struct fcp_lun	*plun;
8058 	struct fcp_port	*pptr;
8059 	child_info_t		*cip = NULL;
8060 
8061 	ASSERT(ptgt != NULL);
8062 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 
8064 	pptr = ptgt->tgt_port;
8065 
8066 	ASSERT(pptr != NULL);
8067 
8068 	/* scan all LUNs for this target */
8069 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 			continue;
8072 		}
8073 
8074 		if (plun->lun_state & FCP_LUN_MARK) {
8075 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 			    "fcp_create_luns: offlining marked LUN!");
8078 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 			continue;
8080 		}
8081 
8082 		plun->lun_state &= ~FCP_LUN_BUSY;
8083 
8084 		/*
8085 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 		 * but we have a valid plun->lun_cip. To cover this case also
8087 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 		 */
8089 		if (plun->lun_mpxio && plun->lun_cip &&
8090 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 		    0, 0))) {
8093 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 			    "fcp_create_luns: enable lun %p failed!",
8096 			    plun);
8097 		}
8098 
8099 		if (plun->lun_state & FCP_LUN_INIT &&
8100 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 			continue;
8102 		}
8103 
8104 		if (cause == FCP_CAUSE_USER_CREATE) {
8105 			continue;
8106 		}
8107 
8108 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 		    "create_luns: passing ONLINE elem to HP thread");
8111 
8112 		/*
8113 		 * If lun has changed, prepare for offlining the old path.
8114 		 * Do not offline the old path right now, since it may be
8115 		 * still opened.
8116 		 */
8117 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 		}
8120 
8121 		/* pass an ONLINE element to the hotplug thread */
8122 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 
8125 			/*
8126 			 * We can not synchronous attach (i.e pass
8127 			 * NDI_ONLINE_ATTACH) here as we might be
8128 			 * coming from an interrupt or callback
8129 			 * thread.
8130 			 */
8131 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 			    link_cnt, tgt_cnt, 0, 0)) {
8133 				fcp_log(CE_CONT, pptr->port_dip,
8134 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 			}
8137 		}
8138 	}
8139 }
8140 
8141 
8142 /*
8143  * function to online/offline devices
8144  */
8145 static int
8146 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147     int online, int lcount, int tcount, int flags)
8148 {
8149 	int			rval = NDI_FAILURE;
8150 	int			circ;
8151 	child_info_t		*ccip;
8152 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8153 	int			is_mpxio = pptr->port_mpxio;
8154 	dev_info_t		*cdip, *pdip;
8155 	char			*devname;
8156 
8157 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 		/*
8159 		 * When this event gets serviced, lun_cip and lun_mpxio
8160 		 * has changed, so it should be invalidated now.
8161 		 */
8162 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 		return (rval);
8166 	}
8167 
8168 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 	    "flags=%x mpxio=%x\n",
8172 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 	    plun->lun_mpxio);
8174 
8175 	/*
8176 	 * lun_mpxio needs checking here because we can end up in a race
8177 	 * condition where this task has been dispatched while lun_mpxio is
8178 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 	 * the flag. We rely on the serialization of the tasks here. We return
8181 	 * NDI_SUCCESS so any callers continue without reporting spurious
8182 	 * errors, and the still think we're an MPXIO LUN.
8183 	 */
8184 
8185 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8187 		if (plun->lun_mpxio) {
8188 			rval = fcp_update_mpxio_path(plun, cip, online);
8189 		} else {
8190 			rval = NDI_SUCCESS;
8191 		}
8192 		return (rval);
8193 	}
8194 
8195 	/*
8196 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8197 	 * executing devfs_clean() if parent lock is held.
8198 	 */
8199 	ASSERT(!servicing_interrupt());
8200 	if (online == FCP_OFFLINE) {
8201 		if (plun->lun_mpxio == 0) {
8202 			if (plun->lun_cip == cip) {
8203 				cdip = DIP(plun->lun_cip);
8204 			} else {
8205 				cdip = DIP(cip);
8206 			}
8207 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 		} else if ((plun->lun_cip != cip) && cip) {
8210 			/*
8211 			 * This means a DTYPE/GUID change, we shall get the
8212 			 * dip of the old cip instead of the current lun_cip.
8213 			 */
8214 			cdip = mdi_pi_get_client(PIP(cip));
8215 		}
8216 		if (cdip) {
8217 			if (i_ddi_devi_attached(cdip)) {
8218 				pdip = ddi_get_parent(cdip);
8219 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 				ndi_devi_enter(pdip, &circ);
8221 				(void) ddi_deviname(cdip, devname);
8222 				ndi_devi_exit(pdip, circ);
8223 				/*
8224 				 * Release parent lock before calling
8225 				 * devfs_clean().
8226 				 */
8227 				rval = devfs_clean(pdip, devname + 1,
8228 				    DV_CLEAN_FORCE);
8229 				kmem_free(devname, MAXNAMELEN + 1);
8230 				/*
8231 				 * Return if devfs_clean() fails for
8232 				 * non-MPXIO case.
8233 				 * For MPXIO case, another path could be
8234 				 * offlined.
8235 				 */
8236 				if (rval && plun->lun_mpxio == 0) {
8237 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8238 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8239 					    "fcp_trigger_lun: devfs_clean "
8240 					    "failed rval=%x  dip=%p",
8241 					    rval, pdip);
8242 					return (NDI_FAILURE);
8243 				}
8244 			}
8245 		}
8246 	}
8247 
8248 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8249 		return (NDI_FAILURE);
8250 	}
8251 
8252 	if (is_mpxio) {
8253 		mdi_devi_enter(pptr->port_dip, &circ);
8254 	} else {
8255 		ndi_devi_enter(pptr->port_dip, &circ);
8256 	}
8257 
8258 	mutex_enter(&pptr->port_mutex);
8259 	mutex_enter(&plun->lun_mutex);
8260 
8261 	if (online == FCP_ONLINE) {
8262 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8263 		if (ccip == NULL) {
8264 			goto fail;
8265 		}
8266 	} else {
8267 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8268 			goto fail;
8269 		}
8270 		ccip = cip;
8271 	}
8272 
8273 	if (online == FCP_ONLINE) {
8274 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8275 		    &circ);
8276 		fc_ulp_log_device_event(pptr->port_fp_handle,
8277 		    FC_ULP_DEVICE_ONLINE);
8278 	} else {
8279 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8280 		    &circ);
8281 		fc_ulp_log_device_event(pptr->port_fp_handle,
8282 		    FC_ULP_DEVICE_OFFLINE);
8283 	}
8284 
8285 fail:	mutex_exit(&plun->lun_mutex);
8286 	mutex_exit(&pptr->port_mutex);
8287 
8288 	if (is_mpxio) {
8289 		mdi_devi_exit(pptr->port_dip, circ);
8290 	} else {
8291 		ndi_devi_exit(pptr->port_dip, circ);
8292 	}
8293 
8294 	fc_ulp_idle_port(pptr->port_fp_handle);
8295 
8296 	return (rval);
8297 }
8298 
8299 
8300 /*
8301  * take a target offline by taking all of its LUNs offline
8302  */
8303 /*ARGSUSED*/
8304 static int
8305 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8306     int link_cnt, int tgt_cnt, int nowait, int flags)
8307 {
8308 	struct fcp_tgt_elem	*elem;
8309 
8310 	ASSERT(mutex_owned(&pptr->port_mutex));
8311 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8312 
8313 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8314 
8315 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8316 	    ptgt->tgt_change_cnt)) {
8317 		mutex_exit(&ptgt->tgt_mutex);
8318 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8319 		mutex_enter(&ptgt->tgt_mutex);
8320 
8321 		return (0);
8322 	}
8323 
8324 	ptgt->tgt_pd_handle = NULL;
8325 	mutex_exit(&ptgt->tgt_mutex);
8326 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8327 	mutex_enter(&ptgt->tgt_mutex);
8328 
8329 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8330 
8331 	if (ptgt->tgt_tcap &&
8332 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8333 		elem->flags = flags;
8334 		elem->time = fcp_watchdog_time;
8335 		if (nowait == 0) {
8336 			elem->time += fcp_offline_delay;
8337 		}
8338 		elem->ptgt = ptgt;
8339 		elem->link_cnt = link_cnt;
8340 		elem->tgt_cnt = tgt_cnt;
8341 		elem->next = pptr->port_offline_tgts;
8342 		pptr->port_offline_tgts = elem;
8343 	} else {
8344 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8345 	}
8346 
8347 	return (1);
8348 }
8349 
8350 
8351 static void
8352 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8353     int link_cnt, int tgt_cnt, int flags)
8354 {
8355 	ASSERT(mutex_owned(&pptr->port_mutex));
8356 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8357 
8358 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8359 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8360 	ptgt->tgt_pd_handle = NULL;
8361 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8362 }
8363 
8364 
8365 static void
8366 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8367     int flags)
8368 {
8369 	struct	fcp_lun	*plun;
8370 
8371 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8372 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8373 
8374 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8375 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8376 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8377 		}
8378 	}
8379 }
8380 
8381 
8382 /*
8383  * take a LUN offline
8384  *
8385  * enters and leaves with the target mutex held, releasing it in the process
8386  *
8387  * allocates memory in non-sleep mode
8388  */
8389 static void
8390 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8391     int nowait, int flags)
8392 {
8393 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8394 	struct fcp_lun_elem	*elem;
8395 
8396 	ASSERT(plun != NULL);
8397 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8398 
8399 	if (nowait) {
8400 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8401 		return;
8402 	}
8403 
8404 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8405 		elem->flags = flags;
8406 		elem->time = fcp_watchdog_time;
8407 		if (nowait == 0) {
8408 			elem->time += fcp_offline_delay;
8409 		}
8410 		elem->plun = plun;
8411 		elem->link_cnt = link_cnt;
8412 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8413 		elem->next = pptr->port_offline_luns;
8414 		pptr->port_offline_luns = elem;
8415 	} else {
8416 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8417 	}
8418 }
8419 
8420 
8421 static void
8422 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8423 {
8424 	struct fcp_pkt	*head = NULL;
8425 
8426 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8427 
8428 	mutex_exit(&LUN_TGT->tgt_mutex);
8429 
8430 	head = fcp_scan_commands(plun);
8431 	if (head != NULL) {
8432 		fcp_abort_commands(head, LUN_PORT);
8433 	}
8434 
8435 	mutex_enter(&LUN_TGT->tgt_mutex);
8436 
8437 	if (plun->lun_cip && plun->lun_mpxio) {
8438 		/*
8439 		 * Intimate MPxIO lun busy is cleared
8440 		 */
8441 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8442 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8443 		    0, 0)) {
8444 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8445 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8446 			    LUN_TGT->tgt_d_id, plun->lun_num);
8447 		}
8448 		/*
8449 		 * Intimate MPxIO that the lun is now marked for offline
8450 		 */
8451 		mutex_exit(&LUN_TGT->tgt_mutex);
8452 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8453 		mutex_enter(&LUN_TGT->tgt_mutex);
8454 	}
8455 }
8456 
8457 static void
8458 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8459     int flags)
8460 {
8461 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8462 
8463 	mutex_exit(&LUN_TGT->tgt_mutex);
8464 	fcp_update_offline_flags(plun);
8465 	mutex_enter(&LUN_TGT->tgt_mutex);
8466 
8467 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8468 
8469 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8470 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8471 	    "offline_lun: passing OFFLINE elem to HP thread");
8472 
8473 	if (plun->lun_cip) {
8474 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8475 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8476 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8477 		    LUN_TGT->tgt_trace);
8478 
8479 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8480 		    link_cnt, tgt_cnt, flags, 0)) {
8481 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8482 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8483 			    LUN_TGT->tgt_d_id, plun->lun_num);
8484 		}
8485 	}
8486 }
8487 
8488 static void
8489 fcp_scan_offline_luns(struct fcp_port *pptr)
8490 {
8491 	struct fcp_lun_elem	*elem;
8492 	struct fcp_lun_elem	*prev;
8493 	struct fcp_lun_elem	*next;
8494 
8495 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8496 
8497 	prev = NULL;
8498 	elem = pptr->port_offline_luns;
8499 	while (elem) {
8500 		next = elem->next;
8501 		if (elem->time <= fcp_watchdog_time) {
8502 			int			changed = 1;
8503 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8504 
8505 			mutex_enter(&ptgt->tgt_mutex);
8506 			if (pptr->port_link_cnt == elem->link_cnt &&
8507 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8508 				changed = 0;
8509 			}
8510 
8511 			if (!changed &&
8512 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8513 				fcp_offline_lun_now(elem->plun,
8514 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8515 			}
8516 			mutex_exit(&ptgt->tgt_mutex);
8517 
8518 			kmem_free(elem, sizeof (*elem));
8519 
8520 			if (prev) {
8521 				prev->next = next;
8522 			} else {
8523 				pptr->port_offline_luns = next;
8524 			}
8525 		} else {
8526 			prev = elem;
8527 		}
8528 		elem = next;
8529 	}
8530 }
8531 
8532 
8533 static void
8534 fcp_scan_offline_tgts(struct fcp_port *pptr)
8535 {
8536 	struct fcp_tgt_elem	*elem;
8537 	struct fcp_tgt_elem	*prev;
8538 	struct fcp_tgt_elem	*next;
8539 
8540 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8541 
8542 	prev = NULL;
8543 	elem = pptr->port_offline_tgts;
8544 	while (elem) {
8545 		next = elem->next;
8546 		if (elem->time <= fcp_watchdog_time) {
8547 			int		outdated = 1;
8548 			struct fcp_tgt	*ptgt = elem->ptgt;
8549 
8550 			mutex_enter(&ptgt->tgt_mutex);
8551 
8552 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8553 				/* No change on tgt since elem was created. */
8554 				outdated = 0;
8555 			} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8556 			    pptr->port_link_cnt == elem->link_cnt + 1 &&
8557 			    ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8558 				/*
8559 				 * Exactly one thing happened to the target
8560 				 * inbetween: the local port went offline.
8561 				 * For fp the remote port is already gone so
8562 				 * it will not tell us again to offline the
8563 				 * target. We must offline it now.
8564 				 */
8565 				outdated = 0;
8566 			}
8567 
8568 			if (!outdated && !(ptgt->tgt_state &
8569 			    FCP_TGT_OFFLINE)) {
8570 				fcp_offline_target_now(pptr,
8571 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8572 				    elem->flags);
8573 			}
8574 
8575 			mutex_exit(&ptgt->tgt_mutex);
8576 
8577 			kmem_free(elem, sizeof (*elem));
8578 
8579 			if (prev) {
8580 				prev->next = next;
8581 			} else {
8582 				pptr->port_offline_tgts = next;
8583 			}
8584 		} else {
8585 			prev = elem;
8586 		}
8587 		elem = next;
8588 	}
8589 }
8590 
8591 
8592 static void
8593 fcp_update_offline_flags(struct fcp_lun *plun)
8594 {
8595 	struct fcp_port	*pptr = LUN_PORT;
8596 	ASSERT(plun != NULL);
8597 
8598 	mutex_enter(&LUN_TGT->tgt_mutex);
8599 	plun->lun_state |= FCP_LUN_OFFLINE;
8600 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8601 
8602 	mutex_enter(&plun->lun_mutex);
8603 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8604 		dev_info_t *cdip = NULL;
8605 
8606 		mutex_exit(&LUN_TGT->tgt_mutex);
8607 
8608 		if (plun->lun_mpxio == 0) {
8609 			cdip = DIP(plun->lun_cip);
8610 		} else if (plun->lun_cip) {
8611 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8612 		}
8613 
8614 		mutex_exit(&plun->lun_mutex);
8615 		if (cdip) {
8616 			(void) ndi_event_retrieve_cookie(
8617 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8618 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8619 			(void) ndi_event_run_callbacks(
8620 			    pptr->port_ndi_event_hdl, cdip,
8621 			    fcp_remove_eid, NULL);
8622 		}
8623 	} else {
8624 		mutex_exit(&plun->lun_mutex);
8625 		mutex_exit(&LUN_TGT->tgt_mutex);
8626 	}
8627 }
8628 
8629 
8630 /*
8631  * Scan all of the command pkts for this port, moving pkts that
8632  * match our LUN onto our own list (headed by "head")
8633  */
8634 static struct fcp_pkt *
8635 fcp_scan_commands(struct fcp_lun *plun)
8636 {
8637 	struct fcp_port	*pptr = LUN_PORT;
8638 
8639 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8640 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8641 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8642 
8643 	struct fcp_pkt	*head = NULL;	/* head of our list */
8644 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8645 
8646 	int			cmds_found = 0;
8647 
8648 	mutex_enter(&pptr->port_pkt_mutex);
8649 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8650 		struct fcp_lun *tlun =
8651 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8652 
8653 		ncmd = cmd->cmd_next;	/* set next command */
8654 
8655 		/*
8656 		 * if this pkt is for a different LUN  or the
8657 		 * command is sent down, skip it.
8658 		 */
8659 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8660 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8661 			pcmd = cmd;
8662 			continue;
8663 		}
8664 		cmds_found++;
8665 		if (pcmd != NULL) {
8666 			ASSERT(pptr->port_pkt_head != cmd);
8667 			pcmd->cmd_next = cmd->cmd_next;
8668 		} else {
8669 			ASSERT(cmd == pptr->port_pkt_head);
8670 			pptr->port_pkt_head = cmd->cmd_next;
8671 		}
8672 
8673 		if (cmd == pptr->port_pkt_tail) {
8674 			pptr->port_pkt_tail = pcmd;
8675 			if (pcmd) {
8676 				pcmd->cmd_next = NULL;
8677 			}
8678 		}
8679 
8680 		if (head == NULL) {
8681 			head = tail = cmd;
8682 		} else {
8683 			ASSERT(tail != NULL);
8684 
8685 			tail->cmd_next = cmd;
8686 			tail = cmd;
8687 		}
8688 		cmd->cmd_next = NULL;
8689 	}
8690 	mutex_exit(&pptr->port_pkt_mutex);
8691 
8692 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8693 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8694 	    "scan commands: %d cmd(s) found", cmds_found);
8695 
8696 	return (head);
8697 }
8698 
8699 
8700 /*
8701  * Abort all the commands in the command queue
8702  */
8703 static void
8704 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8705 {
8706 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8707 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8708 
8709 	ASSERT(mutex_owned(&pptr->port_mutex));
8710 
8711 	/* scan through the pkts and invalid them */
8712 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8713 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8714 
8715 		ncmd = cmd->cmd_next;
8716 		ASSERT(pkt != NULL);
8717 
8718 		/*
8719 		 * The lun is going to be marked offline. Indicate
8720 		 * the target driver not to requeue or retry this command
8721 		 * as the device is going to be offlined pretty soon.
8722 		 */
8723 		pkt->pkt_reason = CMD_DEV_GONE;
8724 		pkt->pkt_statistics = 0;
8725 		pkt->pkt_state = 0;
8726 
8727 		/* reset cmd flags/state */
8728 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8729 		cmd->cmd_state = FCP_PKT_IDLE;
8730 
8731 		/*
8732 		 * ensure we have a packet completion routine,
8733 		 * then call it.
8734 		 */
8735 		ASSERT(pkt->pkt_comp != NULL);
8736 
8737 		mutex_exit(&pptr->port_mutex);
8738 		fcp_post_callback(cmd);
8739 		mutex_enter(&pptr->port_mutex);
8740 	}
8741 }
8742 
8743 
8744 /*
8745  * the pkt_comp callback for command packets
8746  */
8747 static void
8748 fcp_cmd_callback(fc_packet_t *fpkt)
8749 {
8750 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8751 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8752 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8753 
8754 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8755 
8756 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8757 		cmn_err(CE_PANIC, "Packet already completed %p",
8758 		    (void *)cmd);
8759 	}
8760 
8761 	/*
8762 	 * Watch thread should be freeing the packet, ignore the pkt.
8763 	 */
8764 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8765 		fcp_log(CE_CONT, pptr->port_dip,
8766 		    "!FCP: Pkt completed while aborting\n");
8767 		return;
8768 	}
8769 	cmd->cmd_state = FCP_PKT_IDLE;
8770 
8771 	fcp_complete_pkt(fpkt);
8772 
8773 #ifdef	DEBUG
8774 	mutex_enter(&pptr->port_pkt_mutex);
8775 	pptr->port_npkts--;
8776 	mutex_exit(&pptr->port_pkt_mutex);
8777 #endif /* DEBUG */
8778 
8779 	fcp_post_callback(cmd);
8780 }
8781 
8782 
8783 static void
8784 fcp_complete_pkt(fc_packet_t *fpkt)
8785 {
8786 	int			error = 0;
8787 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8788 	    fpkt->pkt_ulp_private;
8789 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8790 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8791 	struct fcp_lun	*plun;
8792 	struct fcp_tgt	*ptgt;
8793 	struct fcp_rsp		*rsp;
8794 	struct scsi_address	save;
8795 
8796 #ifdef	DEBUG
8797 	save = pkt->pkt_address;
8798 #endif /* DEBUG */
8799 
8800 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8801 
8802 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8803 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8804 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8805 			    sizeof (struct fcp_rsp));
8806 		}
8807 
8808 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8809 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8810 
8811 		pkt->pkt_resid = 0;
8812 
8813 		if (fpkt->pkt_datalen) {
8814 			pkt->pkt_state |= STATE_XFERRED_DATA;
8815 			if (fpkt->pkt_data_resid) {
8816 				error++;
8817 			}
8818 		}
8819 
8820 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8821 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8822 			/*
8823 			 * The next two checks make sure that if there
8824 			 * is no sense data or a valid response and
8825 			 * the command came back with check condition,
8826 			 * the command should be retried.
8827 			 */
8828 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8829 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8830 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8831 				pkt->pkt_resid = cmd->cmd_dmacount;
8832 			}
8833 		}
8834 
8835 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8836 			return;
8837 		}
8838 
8839 		plun = ADDR2LUN(&pkt->pkt_address);
8840 		ptgt = plun->lun_tgt;
8841 		ASSERT(ptgt != NULL);
8842 
8843 		/*
8844 		 * Update the transfer resid, if appropriate
8845 		 */
8846 		if (rsp->fcp_u.fcp_status.resid_over ||
8847 		    rsp->fcp_u.fcp_status.resid_under) {
8848 			pkt->pkt_resid = rsp->fcp_resid;
8849 		}
8850 
8851 		/*
8852 		 * First see if we got a FCP protocol error.
8853 		 */
8854 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8855 			struct fcp_rsp_info	*bep;
8856 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8857 			    sizeof (struct fcp_rsp));
8858 
8859 			if (fcp_validate_fcp_response(rsp, pptr) !=
8860 			    FC_SUCCESS) {
8861 				pkt->pkt_reason = CMD_CMPLT;
8862 				*(pkt->pkt_scbp) = STATUS_CHECK;
8863 
8864 				fcp_log(CE_WARN, pptr->port_dip,
8865 				    "!SCSI command to d_id=0x%x lun=0x%x"
8866 				    " failed, Bad FCP response values:"
8867 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8868 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8869 				    ptgt->tgt_d_id, plun->lun_num,
8870 				    rsp->reserved_0, rsp->reserved_1,
8871 				    rsp->fcp_u.fcp_status.reserved_0,
8872 				    rsp->fcp_u.fcp_status.reserved_1,
8873 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8874 
8875 				return;
8876 			}
8877 
8878 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8879 				FCP_CP_IN(fpkt->pkt_resp +
8880 				    sizeof (struct fcp_rsp), bep,
8881 				    fpkt->pkt_resp_acc,
8882 				    sizeof (struct fcp_rsp_info));
8883 			}
8884 
8885 			if (bep->rsp_code != FCP_NO_FAILURE) {
8886 				child_info_t	*cip;
8887 
8888 				pkt->pkt_reason = CMD_TRAN_ERR;
8889 
8890 				mutex_enter(&plun->lun_mutex);
8891 				cip = plun->lun_cip;
8892 				mutex_exit(&plun->lun_mutex);
8893 
8894 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8895 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8896 				    "FCP response error on cmd=%p"
8897 				    " target=0x%x, cip=%p", cmd,
8898 				    ptgt->tgt_d_id, cip);
8899 			}
8900 		}
8901 
8902 		/*
8903 		 * See if we got a SCSI error with sense data
8904 		 */
8905 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8906 			uchar_t				rqlen;
8907 			caddr_t				sense_from;
8908 			child_info_t			*cip;
8909 			timeout_id_t			tid;
8910 			struct scsi_arq_status		*arq;
8911 			struct scsi_extended_sense	*sense_to;
8912 
8913 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8914 			sense_to = &arq->sts_sensedata;
8915 
8916 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8917 			    sizeof (struct scsi_extended_sense));
8918 
8919 			sense_from = (caddr_t)fpkt->pkt_resp +
8920 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8921 
8922 			if (fcp_validate_fcp_response(rsp, pptr) !=
8923 			    FC_SUCCESS) {
8924 				pkt->pkt_reason = CMD_CMPLT;
8925 				*(pkt->pkt_scbp) = STATUS_CHECK;
8926 
8927 				fcp_log(CE_WARN, pptr->port_dip,
8928 				    "!SCSI command to d_id=0x%x lun=0x%x"
8929 				    " failed, Bad FCP response values:"
8930 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8931 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8932 				    ptgt->tgt_d_id, plun->lun_num,
8933 				    rsp->reserved_0, rsp->reserved_1,
8934 				    rsp->fcp_u.fcp_status.reserved_0,
8935 				    rsp->fcp_u.fcp_status.reserved_1,
8936 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8937 
8938 				return;
8939 			}
8940 
8941 			/*
8942 			 * copy in sense information
8943 			 */
8944 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8945 				FCP_CP_IN(sense_from, sense_to,
8946 				    fpkt->pkt_resp_acc, rqlen);
8947 			} else {
8948 				bcopy(sense_from, sense_to, rqlen);
8949 			}
8950 
8951 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8952 			    (FCP_SENSE_NO_LUN(sense_to))) {
8953 				mutex_enter(&ptgt->tgt_mutex);
8954 				if (ptgt->tgt_tid == NULL) {
8955 					/*
8956 					 * Kick off rediscovery
8957 					 */
8958 					tid = timeout(fcp_reconfigure_luns,
8959 					    (caddr_t)ptgt, drv_usectohz(1));
8960 
8961 					ptgt->tgt_tid = tid;
8962 					ptgt->tgt_state |= FCP_TGT_BUSY;
8963 				}
8964 				mutex_exit(&ptgt->tgt_mutex);
8965 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8966 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8967 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8968 					    "!FCP: Report Lun Has Changed"
8969 					    " target=%x", ptgt->tgt_d_id);
8970 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8971 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8972 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8973 					    "!FCP: LU Not Supported"
8974 					    " target=%x", ptgt->tgt_d_id);
8975 				}
8976 			}
8977 			ASSERT(pkt->pkt_scbp != NULL);
8978 
8979 			pkt->pkt_state |= STATE_ARQ_DONE;
8980 
8981 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8982 
8983 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8984 			arq->sts_rqpkt_reason = 0;
8985 			arq->sts_rqpkt_statistics = 0;
8986 
8987 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8988 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8989 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8990 			    STATE_XFERRED_DATA;
8991 
8992 			mutex_enter(&plun->lun_mutex);
8993 			cip = plun->lun_cip;
8994 			mutex_exit(&plun->lun_mutex);
8995 
8996 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8997 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8998 			    "SCSI Check condition on cmd=%p target=0x%x"
8999 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
9000 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9001 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
9002 			    rsp->fcp_u.fcp_status.scsi_status,
9003 			    sense_to->es_key, sense_to->es_add_code,
9004 			    sense_to->es_qual_code);
9005 		}
9006 	} else {
9007 		plun = ADDR2LUN(&pkt->pkt_address);
9008 		ptgt = plun->lun_tgt;
9009 		ASSERT(ptgt != NULL);
9010 
9011 		/*
9012 		 * Work harder to translate errors into target driver
9013 		 * understandable ones. Note with despair that the target
9014 		 * drivers don't decode pkt_state and pkt_reason exhaustively
9015 		 * They resort to using the big hammer most often, which
9016 		 * may not get fixed in the life time of this driver.
9017 		 */
9018 		pkt->pkt_state = 0;
9019 		pkt->pkt_statistics = 0;
9020 
9021 		switch (fpkt->pkt_state) {
9022 		case FC_PKT_TRAN_ERROR:
9023 			switch (fpkt->pkt_reason) {
9024 			case FC_REASON_OVERRUN:
9025 				pkt->pkt_reason = CMD_CMD_OVR;
9026 				pkt->pkt_statistics |= STAT_ABORTED;
9027 				break;
9028 
9029 			case FC_REASON_XCHG_BSY: {
9030 				caddr_t ptr;
9031 
9032 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9033 
9034 				ptr = (caddr_t)pkt->pkt_scbp;
9035 				if (ptr) {
9036 					*ptr = STATUS_BUSY;
9037 				}
9038 				break;
9039 			}
9040 
9041 			case FC_REASON_ABORTED:
9042 				pkt->pkt_reason = CMD_TRAN_ERR;
9043 				pkt->pkt_statistics |= STAT_ABORTED;
9044 				break;
9045 
9046 			case FC_REASON_ABORT_FAILED:
9047 				pkt->pkt_reason = CMD_ABORT_FAIL;
9048 				break;
9049 
9050 			case FC_REASON_NO_SEQ_INIT:
9051 			case FC_REASON_CRC_ERROR:
9052 				pkt->pkt_reason = CMD_TRAN_ERR;
9053 				pkt->pkt_statistics |= STAT_ABORTED;
9054 				break;
9055 			default:
9056 				pkt->pkt_reason = CMD_TRAN_ERR;
9057 				break;
9058 			}
9059 			break;
9060 
9061 		case FC_PKT_PORT_OFFLINE: {
9062 			dev_info_t	*cdip = NULL;
9063 			caddr_t		ptr;
9064 
9065 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9066 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9067 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
9068 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9069 				    ptgt->tgt_d_id);
9070 			}
9071 
9072 			mutex_enter(&plun->lun_mutex);
9073 			if (plun->lun_mpxio == 0) {
9074 				cdip = DIP(plun->lun_cip);
9075 			} else if (plun->lun_cip) {
9076 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9077 			}
9078 
9079 			mutex_exit(&plun->lun_mutex);
9080 
9081 			if (cdip) {
9082 				(void) ndi_event_retrieve_cookie(
9083 				    pptr->port_ndi_event_hdl, cdip,
9084 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9085 				    NDI_EVENT_NOPASS);
9086 				(void) ndi_event_run_callbacks(
9087 				    pptr->port_ndi_event_hdl, cdip,
9088 				    fcp_remove_eid, NULL);
9089 			}
9090 
9091 			/*
9092 			 * If the link goes off-line for a lip,
9093 			 * this will cause a error to the ST SG
9094 			 * SGEN drivers. By setting BUSY we will
9095 			 * give the drivers the chance to retry
9096 			 * before it blows of the job. ST will
9097 			 * remember how many times it has retried.
9098 			 */
9099 
9100 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9101 			    (plun->lun_type == DTYPE_CHANGER)) {
9102 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9103 				ptr = (caddr_t)pkt->pkt_scbp;
9104 				if (ptr) {
9105 					*ptr = STATUS_BUSY;
9106 				}
9107 			} else {
9108 				pkt->pkt_reason = CMD_TRAN_ERR;
9109 				pkt->pkt_statistics |= STAT_BUS_RESET;
9110 			}
9111 			break;
9112 		}
9113 
9114 		case FC_PKT_TRAN_BSY:
9115 			/*
9116 			 * Use the ssd Qfull handling here.
9117 			 */
9118 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9119 			pkt->pkt_state = STATE_GOT_BUS;
9120 			break;
9121 
9122 		case FC_PKT_TIMEOUT:
9123 			pkt->pkt_reason = CMD_TIMEOUT;
9124 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9125 				pkt->pkt_statistics |= STAT_TIMEOUT;
9126 			} else {
9127 				pkt->pkt_statistics |= STAT_ABORTED;
9128 			}
9129 			break;
9130 
9131 		case FC_PKT_LOCAL_RJT:
9132 			switch (fpkt->pkt_reason) {
9133 			case FC_REASON_OFFLINE: {
9134 				dev_info_t	*cdip = NULL;
9135 
9136 				mutex_enter(&plun->lun_mutex);
9137 				if (plun->lun_mpxio == 0) {
9138 					cdip = DIP(plun->lun_cip);
9139 				} else if (plun->lun_cip) {
9140 					cdip = mdi_pi_get_client(
9141 					    PIP(plun->lun_cip));
9142 				}
9143 				mutex_exit(&plun->lun_mutex);
9144 
9145 				if (cdip) {
9146 					(void) ndi_event_retrieve_cookie(
9147 					    pptr->port_ndi_event_hdl, cdip,
9148 					    FCAL_REMOVE_EVENT,
9149 					    &fcp_remove_eid,
9150 					    NDI_EVENT_NOPASS);
9151 					(void) ndi_event_run_callbacks(
9152 					    pptr->port_ndi_event_hdl,
9153 					    cdip, fcp_remove_eid, NULL);
9154 				}
9155 
9156 				pkt->pkt_reason = CMD_TRAN_ERR;
9157 				pkt->pkt_statistics |= STAT_BUS_RESET;
9158 
9159 				break;
9160 			}
9161 
9162 			case FC_REASON_NOMEM:
9163 			case FC_REASON_QFULL: {
9164 				caddr_t ptr;
9165 
9166 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9167 				ptr = (caddr_t)pkt->pkt_scbp;
9168 				if (ptr) {
9169 					*ptr = STATUS_BUSY;
9170 				}
9171 				break;
9172 			}
9173 
9174 			case FC_REASON_DMA_ERROR:
9175 				pkt->pkt_reason = CMD_DMA_DERR;
9176 				pkt->pkt_statistics |= STAT_ABORTED;
9177 				break;
9178 
9179 			case FC_REASON_CRC_ERROR:
9180 			case FC_REASON_UNDERRUN: {
9181 				uchar_t		status;
9182 				/*
9183 				 * Work around for Bugid: 4240945.
9184 				 * IB on A5k doesn't set the Underrun bit
9185 				 * in the fcp status, when it is transferring
9186 				 * less than requested amount of data. Work
9187 				 * around the ses problem to keep luxadm
9188 				 * happy till ibfirmware is fixed.
9189 				 */
9190 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9191 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9192 					    fpkt->pkt_resp_acc,
9193 					    sizeof (struct fcp_rsp));
9194 				}
9195 				status = rsp->fcp_u.fcp_status.scsi_status;
9196 				if (((plun->lun_type & DTYPE_MASK) ==
9197 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9198 					pkt->pkt_reason = CMD_CMPLT;
9199 					*pkt->pkt_scbp = status;
9200 					pkt->pkt_resid = 0;
9201 				} else {
9202 					pkt->pkt_reason = CMD_TRAN_ERR;
9203 					pkt->pkt_statistics |= STAT_ABORTED;
9204 				}
9205 				break;
9206 			}
9207 
9208 			case FC_REASON_NO_CONNECTION:
9209 			case FC_REASON_UNSUPPORTED:
9210 			case FC_REASON_ILLEGAL_REQ:
9211 			case FC_REASON_BAD_SID:
9212 			case FC_REASON_DIAG_BUSY:
9213 			case FC_REASON_FCAL_OPN_FAIL:
9214 			case FC_REASON_BAD_XID:
9215 			default:
9216 				pkt->pkt_reason = CMD_TRAN_ERR;
9217 				pkt->pkt_statistics |= STAT_ABORTED;
9218 				break;
9219 
9220 			}
9221 			break;
9222 
9223 		case FC_PKT_NPORT_RJT:
9224 		case FC_PKT_FABRIC_RJT:
9225 		case FC_PKT_NPORT_BSY:
9226 		case FC_PKT_FABRIC_BSY:
9227 		default:
9228 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9229 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9230 			    "FC Status 0x%x, reason 0x%x",
9231 			    fpkt->pkt_state, fpkt->pkt_reason);
9232 			pkt->pkt_reason = CMD_TRAN_ERR;
9233 			pkt->pkt_statistics |= STAT_ABORTED;
9234 			break;
9235 		}
9236 
9237 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9238 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9239 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9240 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9241 		    fpkt->pkt_reason);
9242 	}
9243 
9244 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9245 }
9246 
9247 
9248 static int
9249 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9250 {
9251 	if (rsp->reserved_0 || rsp->reserved_1 ||
9252 	    rsp->fcp_u.fcp_status.reserved_0 ||
9253 	    rsp->fcp_u.fcp_status.reserved_1) {
9254 		/*
9255 		 * These reserved fields should ideally be zero. FCP-2 does say
9256 		 * that the recipient need not check for reserved fields to be
9257 		 * zero. If they are not zero, we will not make a fuss about it
9258 		 * - just log it (in debug to both trace buffer and messages
9259 		 * file and to trace buffer only in non-debug) and move on.
9260 		 *
9261 		 * Non-zero reserved fields were seen with minnows.
9262 		 *
9263 		 * qlc takes care of some of this but we cannot assume that all
9264 		 * FCAs will do so.
9265 		 */
9266 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9267 		    FCP_BUF_LEVEL_5, 0,
9268 		    "Got fcp response packet with non-zero reserved fields "
9269 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9270 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9271 		    rsp->reserved_0, rsp->reserved_1,
9272 		    rsp->fcp_u.fcp_status.reserved_0,
9273 		    rsp->fcp_u.fcp_status.reserved_1);
9274 	}
9275 
9276 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9277 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9278 		return (FC_FAILURE);
9279 	}
9280 
9281 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9282 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9283 	    sizeof (struct fcp_rsp))) {
9284 		return (FC_FAILURE);
9285 	}
9286 
9287 	return (FC_SUCCESS);
9288 }
9289 
9290 
9291 /*
9292  * This is called when there is a change the in device state. The case we're
9293  * handling here is, if the d_id s does not match, offline this tgt and online
9294  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9295  * port_mutex held.
9296  */
9297 static int
9298 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9299     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9300 {
9301 	ASSERT(mutex_owned(&pptr->port_mutex));
9302 
9303 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9304 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9305 	    "Starting fcp_device_changed...");
9306 
9307 	/*
9308 	 * The two cases where the port_device_changed is called is
9309 	 * either it changes it's d_id or it's hard address.
9310 	 */
9311 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9312 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9313 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9314 
9315 		/* offline this target */
9316 		mutex_enter(&ptgt->tgt_mutex);
9317 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9318 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9319 			    0, 1, NDI_DEVI_REMOVE);
9320 		}
9321 		mutex_exit(&ptgt->tgt_mutex);
9322 
9323 		fcp_log(CE_NOTE, pptr->port_dip,
9324 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9325 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9326 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9327 		    map_entry->map_hard_addr.hard_addr);
9328 	}
9329 
9330 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9331 	    link_cnt, tgt_cnt, cause));
9332 }
9333 
9334 /*
9335  *     Function: fcp_alloc_lun
9336  *
9337  *  Description: Creates a new lun structure and adds it to the list
9338  *		 of luns of the target.
9339  *
9340  *     Argument: ptgt		Target the lun will belong to.
9341  *
9342  * Return Value: NULL		Failed
9343  *		 Not NULL	Succeeded
9344  *
9345  *	Context: Kernel context
9346  */
9347 static struct fcp_lun *
9348 fcp_alloc_lun(struct fcp_tgt *ptgt)
9349 {
9350 	struct fcp_lun *plun;
9351 
9352 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9353 	if (plun != NULL) {
9354 		/*
9355 		 * Initialize the mutex before putting in the target list
9356 		 * especially before releasing the target mutex.
9357 		 */
9358 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9359 		plun->lun_tgt = ptgt;
9360 
9361 		mutex_enter(&ptgt->tgt_mutex);
9362 		plun->lun_next = ptgt->tgt_lun;
9363 		ptgt->tgt_lun = plun;
9364 		plun->lun_old_guid = NULL;
9365 		plun->lun_old_guid_size = 0;
9366 		mutex_exit(&ptgt->tgt_mutex);
9367 	}
9368 
9369 	return (plun);
9370 }
9371 
9372 /*
9373  *     Function: fcp_dealloc_lun
9374  *
9375  *  Description: Frees the LUN structure passed by the caller.
9376  *
9377  *     Argument: plun		LUN structure to free.
9378  *
9379  * Return Value: None
9380  *
9381  *	Context: Kernel context.
9382  */
9383 static void
9384 fcp_dealloc_lun(struct fcp_lun *plun)
9385 {
9386 	mutex_enter(&plun->lun_mutex);
9387 	if (plun->lun_cip) {
9388 		fcp_remove_child(plun);
9389 	}
9390 	mutex_exit(&plun->lun_mutex);
9391 
9392 	mutex_destroy(&plun->lun_mutex);
9393 	if (plun->lun_guid) {
9394 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9395 	}
9396 	if (plun->lun_old_guid) {
9397 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9398 	}
9399 	kmem_free(plun, sizeof (*plun));
9400 }
9401 
9402 /*
9403  *     Function: fcp_alloc_tgt
9404  *
9405  *  Description: Creates a new target structure and adds it to the port
9406  *		 hash list.
9407  *
9408  *     Argument: pptr		fcp port structure
9409  *		 *map_entry	entry describing the target to create
9410  *		 link_cnt	Link state change counter
9411  *
9412  * Return Value: NULL		Failed
9413  *		 Not NULL	Succeeded
9414  *
9415  *	Context: Kernel context.
9416  */
9417 static struct fcp_tgt *
9418 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9419 {
9420 	int			hash;
9421 	uchar_t			*wwn;
9422 	struct fcp_tgt	*ptgt;
9423 
9424 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9425 	if (ptgt != NULL) {
9426 		mutex_enter(&pptr->port_mutex);
9427 		if (link_cnt != pptr->port_link_cnt) {
9428 			/*
9429 			 * oh oh -- another link reset
9430 			 * in progress -- give up
9431 			 */
9432 			mutex_exit(&pptr->port_mutex);
9433 			kmem_free(ptgt, sizeof (*ptgt));
9434 			ptgt = NULL;
9435 		} else {
9436 			/*
9437 			 * initialize the mutex before putting in the port
9438 			 * wwn list, especially before releasing the port
9439 			 * mutex.
9440 			 */
9441 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9442 
9443 			/* add new target entry to the port's hash list */
9444 			wwn = (uchar_t *)&map_entry->map_pwwn;
9445 			hash = FCP_HASH(wwn);
9446 
9447 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9448 			pptr->port_tgt_hash_table[hash] = ptgt;
9449 
9450 			/* save cross-ptr */
9451 			ptgt->tgt_port = pptr;
9452 
9453 			ptgt->tgt_change_cnt = 1;
9454 
9455 			/* initialize the target manual_config_only flag */
9456 			if (fcp_enable_auto_configuration) {
9457 				ptgt->tgt_manual_config_only = 0;
9458 			} else {
9459 				ptgt->tgt_manual_config_only = 1;
9460 			}
9461 
9462 			mutex_exit(&pptr->port_mutex);
9463 		}
9464 	}
9465 
9466 	return (ptgt);
9467 }
9468 
9469 /*
9470  *     Function: fcp_dealloc_tgt
9471  *
9472  *  Description: Frees the target structure passed by the caller.
9473  *
9474  *     Argument: ptgt		Target structure to free.
9475  *
9476  * Return Value: None
9477  *
9478  *	Context: Kernel context.
9479  */
9480 static void
9481 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9482 {
9483 	mutex_destroy(&ptgt->tgt_mutex);
9484 	kmem_free(ptgt, sizeof (*ptgt));
9485 }
9486 
9487 
9488 /*
9489  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9490  *
9491  *	Device discovery commands will not be retried for-ever as
9492  *	this will have repercussions on other devices that need to
9493  *	be submitted to the hotplug thread. After a quick glance
9494  *	at the SCSI-3 spec, it was found that the spec doesn't
9495  *	mandate a forever retry, rather recommends a delayed retry.
9496  *
9497  *	Since Photon IB is single threaded, STATUS_BUSY is common
9498  *	in a 4+initiator environment. Make sure the total time
9499  *	spent on retries (including command timeout) does not
9500  *	60 seconds
9501  */
9502 static void
9503 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9504 {
9505 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9506 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9507 
9508 	mutex_enter(&pptr->port_mutex);
9509 	mutex_enter(&ptgt->tgt_mutex);
9510 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9511 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9512 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9513 		    "fcp_queue_ipkt,1:state change occured"
9514 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9515 		mutex_exit(&ptgt->tgt_mutex);
9516 		mutex_exit(&pptr->port_mutex);
9517 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9518 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9519 		fcp_icmd_free(pptr, icmd);
9520 		return;
9521 	}
9522 	mutex_exit(&ptgt->tgt_mutex);
9523 
9524 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9525 
9526 	if (pptr->port_ipkt_list != NULL) {
9527 		/* add pkt to front of doubly-linked list */
9528 		pptr->port_ipkt_list->ipkt_prev = icmd;
9529 		icmd->ipkt_next = pptr->port_ipkt_list;
9530 		pptr->port_ipkt_list = icmd;
9531 		icmd->ipkt_prev = NULL;
9532 	} else {
9533 		/* this is the first/only pkt on the list */
9534 		pptr->port_ipkt_list = icmd;
9535 		icmd->ipkt_next = NULL;
9536 		icmd->ipkt_prev = NULL;
9537 	}
9538 	mutex_exit(&pptr->port_mutex);
9539 }
9540 
9541 /*
9542  *     Function: fcp_transport
9543  *
9544  *  Description: This function submits the Fibre Channel packet to the transort
9545  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9546  *		 fails the submission, the treatment depends on the value of
9547  *		 the variable internal.
9548  *
9549  *     Argument: port_handle	fp/fctl port handle.
9550  *		 *fpkt		Packet to submit to the transport layer.
9551  *		 internal	Not zero when it's an internal packet.
9552  *
9553  * Return Value: FC_TRAN_BUSY
9554  *		 FC_STATEC_BUSY
9555  *		 FC_OFFLINE
9556  *		 FC_LOGINREQ
9557  *		 FC_DEVICE_BUSY
9558  *		 FC_SUCCESS
9559  */
9560 static int
9561 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9562 {
9563 	int	rval;
9564 
9565 	rval = fc_ulp_transport(port_handle, fpkt);
9566 	if (rval == FC_SUCCESS) {
9567 		return (rval);
9568 	}
9569 
9570 	/*
9571 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9572 	 * a command, if the underlying modules see that there is a state
9573 	 * change, or if a port is OFFLINE, that means, that state change
9574 	 * hasn't reached FCP yet, so re-queue the command for deferred
9575 	 * submission.
9576 	 */
9577 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9578 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9579 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9580 		/*
9581 		 * Defer packet re-submission. Life hang is possible on
9582 		 * internal commands if the port driver sends FC_STATEC_BUSY
9583 		 * for ever, but that shouldn't happen in a good environment.
9584 		 * Limiting re-transport for internal commands is probably a
9585 		 * good idea..
9586 		 * A race condition can happen when a port sees barrage of
9587 		 * link transitions offline to online. If the FCTL has
9588 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9589 		 * internal commands should be queued to do the discovery.
9590 		 * The race condition is when an online comes and FCP starts
9591 		 * its internal discovery and the link goes offline. It is
9592 		 * possible that the statec_callback has not reached FCP
9593 		 * and FCP is carrying on with its internal discovery.
9594 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9595 		 * that the link has gone offline. At this point FCP should
9596 		 * drop all the internal commands and wait for the
9597 		 * statec_callback. It will be facilitated by incrementing
9598 		 * port_link_cnt.
9599 		 *
9600 		 * For external commands, the (FC)pkt_timeout is decremented
9601 		 * by the QUEUE Delay added by our driver, Care is taken to
9602 		 * ensure that it doesn't become zero (zero means no timeout)
9603 		 * If the time expires right inside driver queue itself,
9604 		 * the watch thread will return it to the original caller
9605 		 * indicating that the command has timed-out.
9606 		 */
9607 		if (internal) {
9608 			char			*op;
9609 			struct fcp_ipkt	*icmd;
9610 
9611 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9612 			switch (icmd->ipkt_opcode) {
9613 			case SCMD_REPORT_LUN:
9614 				op = "REPORT LUN";
9615 				break;
9616 
9617 			case SCMD_INQUIRY:
9618 				op = "INQUIRY";
9619 				break;
9620 
9621 			case SCMD_INQUIRY_PAGE83:
9622 				op = "INQUIRY-83";
9623 				break;
9624 
9625 			default:
9626 				op = "Internal SCSI COMMAND";
9627 				break;
9628 			}
9629 
9630 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9631 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9632 				rval = FC_SUCCESS;
9633 			}
9634 		} else {
9635 			struct fcp_pkt *cmd;
9636 			struct fcp_port *pptr;
9637 
9638 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9639 			cmd->cmd_state = FCP_PKT_IDLE;
9640 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9641 
9642 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9643 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9644 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9645 				    "fcp_transport: xport busy for pkt %p",
9646 				    cmd->cmd_pkt);
9647 				rval = FC_TRAN_BUSY;
9648 			} else {
9649 				fcp_queue_pkt(pptr, cmd);
9650 				rval = FC_SUCCESS;
9651 			}
9652 		}
9653 	}
9654 
9655 	return (rval);
9656 }
9657 
9658 /*VARARGS3*/
9659 static void
9660 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9661 {
9662 	char		buf[256];
9663 	va_list		ap;
9664 
9665 	if (dip == NULL) {
9666 		dip = fcp_global_dip;
9667 	}
9668 
9669 	va_start(ap, fmt);
9670 	(void) vsprintf(buf, fmt, ap);
9671 	va_end(ap);
9672 
9673 	scsi_log(dip, "fcp", level, buf);
9674 }
9675 
9676 /*
9677  * This function retries NS registry of FC4 type.
9678  * It assumes that fcp_mutex is held.
9679  * The function does nothing if topology is not fabric
9680  * So, the topology has to be set before this function can be called
9681  */
9682 static void
9683 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9684 {
9685 	int	rval;
9686 
9687 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9688 
9689 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9690 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9691 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9692 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9693 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9694 		}
9695 		return;
9696 	}
9697 	mutex_exit(&pptr->port_mutex);
9698 	rval = fcp_do_ns_registry(pptr, s_id);
9699 	mutex_enter(&pptr->port_mutex);
9700 
9701 	if (rval == 0) {
9702 		/* Registry successful. Reset flag */
9703 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9704 	}
9705 }
9706 
9707 /*
9708  * This function registers the ULP with the switch by calling transport i/f
9709  */
9710 static int
9711 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9712 {
9713 	fc_ns_cmd_t		ns_cmd;
9714 	ns_rfc_type_t		rfc;
9715 	uint32_t		types[8];
9716 
9717 	/*
9718 	 * Prepare the Name server structure to
9719 	 * register with the transport in case of
9720 	 * Fabric configuration.
9721 	 */
9722 	bzero(&rfc, sizeof (rfc));
9723 	bzero(types, sizeof (types));
9724 
9725 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9726 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9727 
9728 	rfc.rfc_port_id.port_id = s_id;
9729 	bcopy(types, rfc.rfc_types, sizeof (types));
9730 
9731 	ns_cmd.ns_flags = 0;
9732 	ns_cmd.ns_cmd = NS_RFT_ID;
9733 	ns_cmd.ns_req_len = sizeof (rfc);
9734 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9735 	ns_cmd.ns_resp_len = 0;
9736 	ns_cmd.ns_resp_payload = NULL;
9737 
9738 	/*
9739 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9740 	 */
9741 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9742 		fcp_log(CE_WARN, pptr->port_dip,
9743 		    "!ns_registry: failed name server registration");
9744 		return (1);
9745 	}
9746 
9747 	return (0);
9748 }
9749 
9750 /*
9751  *     Function: fcp_handle_port_attach
9752  *
9753  *  Description: This function is called from fcp_port_attach() to attach a
9754  *		 new port. This routine does the following:
9755  *
9756  *		1) Allocates an fcp_port structure and initializes it.
9757  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9758  *		   server.
9759  *		3) Kicks off the enumeration of the targets/luns visible
9760  *		   through this new port.  That is done by calling
9761  *		   fcp_statec_callback() if the port is online.
9762  *
9763  *     Argument: ulph		fp/fctl port handle.
9764  *		 *pinfo		Port information.
9765  *		 s_id		Port ID.
9766  *		 instance	Device instance number for the local port
9767  *				(returned by ddi_get_instance()).
9768  *
9769  * Return Value: DDI_SUCCESS
9770  *		 DDI_FAILURE
9771  *
9772  *	Context: User and Kernel context.
9773  */
9774 /*ARGSUSED*/
9775 int
9776 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9777     uint32_t s_id, int instance)
9778 {
9779 	int			res = DDI_FAILURE;
9780 	scsi_hba_tran_t		*tran;
9781 	int			mutex_initted = FALSE;
9782 	int			hba_attached = FALSE;
9783 	int			soft_state_linked = FALSE;
9784 	int			event_bind = FALSE;
9785 	struct fcp_port		*pptr;
9786 	fc_portmap_t		*tmp_list = NULL;
9787 	uint32_t		max_cnt, alloc_cnt;
9788 	uchar_t			*boot_wwn = NULL;
9789 	uint_t			nbytes;
9790 	int			manual_cfg;
9791 
9792 	/*
9793 	 * this port instance attaching for the first time (or after
9794 	 * being detached before)
9795 	 */
9796 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9797 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9798 
9799 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9800 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9801 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9802 		    instance);
9803 		return (res);
9804 	}
9805 
9806 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9807 		/* this shouldn't happen */
9808 		ddi_soft_state_free(fcp_softstate, instance);
9809 		cmn_err(CE_WARN, "fcp: bad soft state");
9810 		return (res);
9811 	}
9812 
9813 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9814 
9815 	/*
9816 	 * Make a copy of ulp_port_info as fctl allocates
9817 	 * a temp struct.
9818 	 */
9819 	(void) fcp_cp_pinfo(pptr, pinfo);
9820 
9821 	/*
9822 	 * Check for manual_configuration_only property.
9823 	 * Enable manual configurtion if the property is
9824 	 * set to 1, otherwise disable manual configuration.
9825 	 */
9826 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9827 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9828 	    MANUAL_CFG_ONLY,
9829 	    -1)) != -1) {
9830 		if (manual_cfg == 1) {
9831 			char	*pathname;
9832 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9833 			(void) ddi_pathname(pptr->port_dip, pathname);
9834 			cmn_err(CE_NOTE,
9835 			    "%s (%s%d) %s is enabled via %s.conf.",
9836 			    pathname,
9837 			    ddi_driver_name(pptr->port_dip),
9838 			    ddi_get_instance(pptr->port_dip),
9839 			    MANUAL_CFG_ONLY,
9840 			    ddi_driver_name(pptr->port_dip));
9841 			fcp_enable_auto_configuration = 0;
9842 			kmem_free(pathname, MAXPATHLEN);
9843 		}
9844 	}
9845 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9846 	pptr->port_link_cnt = 1;
9847 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9848 	pptr->port_id = s_id;
9849 	pptr->port_instance = instance;
9850 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9851 	pptr->port_state = FCP_STATE_INIT;
9852 	if (pinfo->port_acc_attr == NULL) {
9853 		/*
9854 		 * The corresponding FCA doesn't support DMA at all
9855 		 */
9856 		pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9857 	}
9858 
9859 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9860 
9861 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9862 		/*
9863 		 * If FCA supports DMA in SCSI data phase, we need preallocate
9864 		 * dma cookie, so stash the cookie size
9865 		 */
9866 		pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9867 		    pptr->port_data_dma_attr.dma_attr_sgllen;
9868 	}
9869 
9870 	/*
9871 	 * The two mutexes of fcp_port are initialized.	 The variable
9872 	 * mutex_initted is incremented to remember that fact.	That variable
9873 	 * is checked when the routine fails and the mutexes have to be
9874 	 * destroyed.
9875 	 */
9876 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9877 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9878 	mutex_initted++;
9879 
9880 	/*
9881 	 * The SCSI tran structure is allocate and initialized now.
9882 	 */
9883 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9884 		fcp_log(CE_WARN, pptr->port_dip,
9885 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9886 		goto fail;
9887 	}
9888 
9889 	/* link in the transport structure then fill it in */
9890 	pptr->port_tran = tran;
9891 	tran->tran_hba_private		= pptr;
9892 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9893 	tran->tran_tgt_probe		= NULL;
9894 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9895 	tran->tran_start		= fcp_scsi_start;
9896 	tran->tran_reset		= fcp_scsi_reset;
9897 	tran->tran_abort		= fcp_scsi_abort;
9898 	tran->tran_getcap		= fcp_scsi_getcap;
9899 	tran->tran_setcap		= fcp_scsi_setcap;
9900 	tran->tran_init_pkt		= NULL;
9901 	tran->tran_destroy_pkt		= NULL;
9902 	tran->tran_dmafree		= NULL;
9903 	tran->tran_sync_pkt		= NULL;
9904 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9905 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9906 	tran->tran_get_name		= fcp_scsi_get_name;
9907 	tran->tran_clear_aca		= NULL;
9908 	tran->tran_clear_task_set	= NULL;
9909 	tran->tran_terminate_task	= NULL;
9910 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9911 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9912 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9913 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9914 	tran->tran_quiesce		= NULL;
9915 	tran->tran_unquiesce		= NULL;
9916 	tran->tran_bus_reset		= NULL;
9917 	tran->tran_bus_config		= fcp_scsi_bus_config;
9918 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9919 	tran->tran_bus_power		= NULL;
9920 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9921 
9922 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9923 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9924 	tran->tran_setup_pkt		= fcp_pkt_setup;
9925 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9926 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9927 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9928 	if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9929 		/*
9930 		 * If FCA don't support DMA, then we use different vectors to
9931 		 * minimize the effects on DMA code flow path
9932 		 */
9933 		tran->tran_start	   = fcp_pseudo_start;
9934 		tran->tran_init_pkt	   = fcp_pseudo_init_pkt;
9935 		tran->tran_destroy_pkt	   = fcp_pseudo_destroy_pkt;
9936 		tran->tran_sync_pkt	   = fcp_pseudo_sync_pkt;
9937 		tran->tran_dmafree	   = fcp_pseudo_dmafree;
9938 		tran->tran_setup_pkt	   = NULL;
9939 		tran->tran_teardown_pkt	   = NULL;
9940 		tran->tran_pkt_constructor = NULL;
9941 		tran->tran_pkt_destructor  = NULL;
9942 		pptr->port_data_dma_attr   = pseudo_fca_dma_attr;
9943 	}
9944 
9945 	/*
9946 	 * Allocate an ndi event handle
9947 	 */
9948 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9949 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9950 
9951 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9952 	    sizeof (fcp_ndi_event_defs));
9953 
9954 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9955 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9956 
9957 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9958 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9959 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9960 
9961 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9962 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9963 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9964 		goto fail;
9965 	}
9966 	event_bind++;	/* Checked in fail case */
9967 
9968 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9969 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9970 	    != DDI_SUCCESS) {
9971 		fcp_log(CE_WARN, pptr->port_dip,
9972 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9973 		goto fail;
9974 	}
9975 	hba_attached++;	/* Checked in fail case */
9976 
9977 	pptr->port_mpxio = 0;
9978 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9979 	    MDI_SUCCESS) {
9980 		pptr->port_mpxio++;
9981 	}
9982 
9983 	/*
9984 	 * The following code is putting the new port structure in the global
9985 	 * list of ports and, if it is the first port to attach, it start the
9986 	 * fcp_watchdog_tick.
9987 	 *
9988 	 * Why put this new port in the global before we are done attaching it?
9989 	 * We are actually making the structure globally known before we are
9990 	 * done attaching it.  The reason for that is: because of the code that
9991 	 * follows.  At this point the resources to handle the port are
9992 	 * allocated.  This function is now going to do the following:
9993 	 *
9994 	 *   1) It is going to try to register with the name server advertizing
9995 	 *	the new FCP capability of the port.
9996 	 *   2) It is going to play the role of the fp/fctl layer by building
9997 	 *	a list of worlwide names reachable through this port and call
9998 	 *	itself on fcp_statec_callback().  That requires the port to
9999 	 *	be part of the global list.
10000 	 */
10001 	mutex_enter(&fcp_global_mutex);
10002 	if (fcp_port_head == NULL) {
10003 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10004 	}
10005 	pptr->port_next = fcp_port_head;
10006 	fcp_port_head = pptr;
10007 	soft_state_linked++;
10008 
10009 	if (fcp_watchdog_init++ == 0) {
10010 		fcp_watchdog_tick = fcp_watchdog_timeout *
10011 		    drv_usectohz(1000000);
10012 		fcp_watchdog_id = timeout(fcp_watch, NULL,
10013 		    fcp_watchdog_tick);
10014 	}
10015 	mutex_exit(&fcp_global_mutex);
10016 
10017 	/*
10018 	 * Here an attempt is made to register with the name server, the new
10019 	 * FCP capability.  That is done using an RTF_ID to the name server.
10020 	 * It is done synchronously.  The function fcp_do_ns_registry()
10021 	 * doesn't return till the name server responded.
10022 	 * On failures, just ignore it for now and it will get retried during
10023 	 * state change callbacks. We'll set a flag to show this failure
10024 	 */
10025 	if (fcp_do_ns_registry(pptr, s_id)) {
10026 		mutex_enter(&pptr->port_mutex);
10027 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10028 		mutex_exit(&pptr->port_mutex);
10029 	} else {
10030 		mutex_enter(&pptr->port_mutex);
10031 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10032 		mutex_exit(&pptr->port_mutex);
10033 	}
10034 
10035 	/*
10036 	 * Lookup for boot WWN property
10037 	 */
10038 	if (modrootloaded != 1) {
10039 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10040 		    ddi_get_parent(pinfo->port_dip),
10041 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10042 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10043 		    (nbytes == FC_WWN_SIZE)) {
10044 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10045 		}
10046 		if (boot_wwn) {
10047 			ddi_prop_free(boot_wwn);
10048 		}
10049 	}
10050 
10051 	/*
10052 	 * Handle various topologies and link states.
10053 	 */
10054 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10055 	case FC_STATE_OFFLINE:
10056 
10057 		/*
10058 		 * we're attaching a port where the link is offline
10059 		 *
10060 		 * Wait for ONLINE, at which time a state
10061 		 * change will cause a statec_callback
10062 		 *
10063 		 * in the mean time, do not do anything
10064 		 */
10065 		res = DDI_SUCCESS;
10066 		pptr->port_state |= FCP_STATE_OFFLINE;
10067 		break;
10068 
10069 	case FC_STATE_ONLINE: {
10070 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
10071 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10072 			res = DDI_SUCCESS;
10073 			break;
10074 		}
10075 		/*
10076 		 * discover devices and create nodes (a private
10077 		 * loop or point-to-point)
10078 		 */
10079 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10080 
10081 		/*
10082 		 * At this point we are going to build a list of all the ports
10083 		 * that	can be reached through this local port.	 It looks like
10084 		 * we cannot handle more than FCP_MAX_DEVICES per local port
10085 		 * (128).
10086 		 */
10087 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10088 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10089 		    KM_NOSLEEP)) == NULL) {
10090 			fcp_log(CE_WARN, pptr->port_dip,
10091 			    "!fcp%d: failed to allocate portmap",
10092 			    instance);
10093 			goto fail;
10094 		}
10095 
10096 		/*
10097 		 * fc_ulp_getportmap() is going to provide us with the list of
10098 		 * remote ports in the buffer we just allocated.  The way the
10099 		 * list is going to be retrieved depends on the topology.
10100 		 * However, if we are connected to a Fabric, a name server
10101 		 * request may be sent to get the list of FCP capable ports.
10102 		 * It should be noted that is the case the request is
10103 		 * synchronous.	 This means we are stuck here till the name
10104 		 * server replies.  A lot of things can change during that time
10105 		 * and including, may be, being called on
10106 		 * fcp_statec_callback() for different reasons. I'm not sure
10107 		 * the code can handle that.
10108 		 */
10109 		max_cnt = FCP_MAX_DEVICES;
10110 		alloc_cnt = FCP_MAX_DEVICES;
10111 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10112 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10113 		    FC_SUCCESS) {
10114 			caddr_t msg;
10115 
10116 			(void) fc_ulp_error(res, &msg);
10117 
10118 			/*
10119 			 * this	 just means the transport is
10120 			 * busy perhaps building a portmap so,
10121 			 * for now, succeed this port attach
10122 			 * when the transport has a new map,
10123 			 * it'll send us a state change then
10124 			 */
10125 			fcp_log(CE_WARN, pptr->port_dip,
10126 			    "!failed to get port map : %s", msg);
10127 
10128 			res = DDI_SUCCESS;
10129 			break;	/* go return result */
10130 		}
10131 		if (max_cnt > alloc_cnt) {
10132 			alloc_cnt = max_cnt;
10133 		}
10134 
10135 		/*
10136 		 * We are now going to call fcp_statec_callback() ourselves.
10137 		 * By issuing this call we are trying to kick off the enumera-
10138 		 * tion process.
10139 		 */
10140 		/*
10141 		 * let the state change callback do the SCSI device
10142 		 * discovery and create the devinfos
10143 		 */
10144 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10145 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10146 		    max_cnt, pptr->port_id);
10147 
10148 		res = DDI_SUCCESS;
10149 		break;
10150 	}
10151 
10152 	default:
10153 		/* unknown port state */
10154 		fcp_log(CE_WARN, pptr->port_dip,
10155 		    "!fcp%d: invalid port state at attach=0x%x",
10156 		    instance, pptr->port_phys_state);
10157 
10158 		mutex_enter(&pptr->port_mutex);
10159 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10160 		mutex_exit(&pptr->port_mutex);
10161 
10162 		res = DDI_SUCCESS;
10163 		break;
10164 	}
10165 
10166 	/* free temp list if used */
10167 	if (tmp_list != NULL) {
10168 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10169 	}
10170 
10171 	/* note the attach time */
10172 	pptr->port_attach_time = ddi_get_lbolt64();
10173 
10174 	/* all done */
10175 	return (res);
10176 
10177 	/* a failure we have to clean up after */
10178 fail:
10179 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10180 
10181 	if (soft_state_linked) {
10182 		/* remove this fcp_port from the linked list */
10183 		(void) fcp_soft_state_unlink(pptr);
10184 	}
10185 
10186 	/* unbind and free event set */
10187 	if (pptr->port_ndi_event_hdl) {
10188 		if (event_bind) {
10189 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10190 			    &pptr->port_ndi_events, NDI_SLEEP);
10191 		}
10192 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10193 	}
10194 
10195 	if (pptr->port_ndi_event_defs) {
10196 		(void) kmem_free(pptr->port_ndi_event_defs,
10197 		    sizeof (fcp_ndi_event_defs));
10198 	}
10199 
10200 	/*
10201 	 * Clean up mpxio stuff
10202 	 */
10203 	if (pptr->port_mpxio) {
10204 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10205 		pptr->port_mpxio--;
10206 	}
10207 
10208 	/* undo SCSI HBA setup */
10209 	if (hba_attached) {
10210 		(void) scsi_hba_detach(pptr->port_dip);
10211 	}
10212 	if (pptr->port_tran != NULL) {
10213 		scsi_hba_tran_free(pptr->port_tran);
10214 	}
10215 
10216 	mutex_enter(&fcp_global_mutex);
10217 
10218 	/*
10219 	 * We check soft_state_linked, because it is incremented right before
10220 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10221 	 * soft_state_linked is still FALSE, we do not want to decrement
10222 	 * fcp_watchdog_init or possibly call untimeout.
10223 	 */
10224 
10225 	if (soft_state_linked) {
10226 		if (--fcp_watchdog_init == 0) {
10227 			timeout_id_t	tid = fcp_watchdog_id;
10228 
10229 			mutex_exit(&fcp_global_mutex);
10230 			(void) untimeout(tid);
10231 		} else {
10232 			mutex_exit(&fcp_global_mutex);
10233 		}
10234 	} else {
10235 		mutex_exit(&fcp_global_mutex);
10236 	}
10237 
10238 	if (mutex_initted) {
10239 		mutex_destroy(&pptr->port_mutex);
10240 		mutex_destroy(&pptr->port_pkt_mutex);
10241 	}
10242 
10243 	if (tmp_list != NULL) {
10244 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10245 	}
10246 
10247 	/* this makes pptr invalid */
10248 	ddi_soft_state_free(fcp_softstate, instance);
10249 
10250 	return (DDI_FAILURE);
10251 }
10252 
10253 
10254 static int
10255 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10256 {
10257 	int count = 0;
10258 
10259 	mutex_enter(&pptr->port_mutex);
10260 
10261 	/*
10262 	 * if the port is powered down or suspended, nothing else
10263 	 * to do; just return.
10264 	 */
10265 	if (flag != FCP_STATE_DETACHING) {
10266 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10267 		    FCP_STATE_SUSPENDED)) {
10268 			pptr->port_state |= flag;
10269 			mutex_exit(&pptr->port_mutex);
10270 			return (FC_SUCCESS);
10271 		}
10272 	}
10273 
10274 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10275 		mutex_exit(&pptr->port_mutex);
10276 		return (FC_FAILURE);
10277 	}
10278 
10279 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10280 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10281 	    "fcp_handle_port_detach: port is detaching");
10282 
10283 	pptr->port_state |= flag;
10284 
10285 	/*
10286 	 * Wait for any ongoing reconfig/ipkt to complete, that
10287 	 * ensures the freeing to targets/luns is safe.
10288 	 * No more ref to this port should happen from statec/ioctl
10289 	 * after that as it was removed from the global port list.
10290 	 */
10291 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10292 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10293 		/*
10294 		 * Let's give sufficient time for reconfig/ipkt
10295 		 * to complete.
10296 		 */
10297 		if (count++ >= FCP_ICMD_DEADLINE) {
10298 			break;
10299 		}
10300 		mutex_exit(&pptr->port_mutex);
10301 		delay(drv_usectohz(1000000));
10302 		mutex_enter(&pptr->port_mutex);
10303 	}
10304 
10305 	/*
10306 	 * if the driver is still busy then fail to
10307 	 * suspend/power down.
10308 	 */
10309 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10310 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10311 		pptr->port_state &= ~flag;
10312 		mutex_exit(&pptr->port_mutex);
10313 		return (FC_FAILURE);
10314 	}
10315 
10316 	if (flag == FCP_STATE_DETACHING) {
10317 		pptr = fcp_soft_state_unlink(pptr);
10318 		ASSERT(pptr != NULL);
10319 	}
10320 
10321 	pptr->port_link_cnt++;
10322 	pptr->port_state |= FCP_STATE_OFFLINE;
10323 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10324 
10325 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10326 	    FCP_CAUSE_LINK_DOWN);
10327 	mutex_exit(&pptr->port_mutex);
10328 
10329 	/* kill watch dog timer if we're the last */
10330 	mutex_enter(&fcp_global_mutex);
10331 	if (--fcp_watchdog_init == 0) {
10332 		timeout_id_t	tid = fcp_watchdog_id;
10333 		mutex_exit(&fcp_global_mutex);
10334 		(void) untimeout(tid);
10335 	} else {
10336 		mutex_exit(&fcp_global_mutex);
10337 	}
10338 
10339 	/* clean up the port structures */
10340 	if (flag == FCP_STATE_DETACHING) {
10341 		fcp_cleanup_port(pptr, instance);
10342 	}
10343 
10344 	return (FC_SUCCESS);
10345 }
10346 
10347 
10348 static void
10349 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10350 {
10351 	ASSERT(pptr != NULL);
10352 
10353 	/* unbind and free event set */
10354 	if (pptr->port_ndi_event_hdl) {
10355 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10356 		    &pptr->port_ndi_events, NDI_SLEEP);
10357 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10358 	}
10359 
10360 	if (pptr->port_ndi_event_defs) {
10361 		(void) kmem_free(pptr->port_ndi_event_defs,
10362 		    sizeof (fcp_ndi_event_defs));
10363 	}
10364 
10365 	/* free the lun/target structures and devinfos */
10366 	fcp_free_targets(pptr);
10367 
10368 	/*
10369 	 * Clean up mpxio stuff
10370 	 */
10371 	if (pptr->port_mpxio) {
10372 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10373 		pptr->port_mpxio--;
10374 	}
10375 
10376 	/* clean up SCSA stuff */
10377 	(void) scsi_hba_detach(pptr->port_dip);
10378 	if (pptr->port_tran != NULL) {
10379 		scsi_hba_tran_free(pptr->port_tran);
10380 	}
10381 
10382 #ifdef	KSTATS_CODE
10383 	/* clean up kstats */
10384 	if (pptr->fcp_ksp != NULL) {
10385 		kstat_delete(pptr->fcp_ksp);
10386 	}
10387 #endif
10388 
10389 	/* clean up soft state mutexes/condition variables */
10390 	mutex_destroy(&pptr->port_mutex);
10391 	mutex_destroy(&pptr->port_pkt_mutex);
10392 
10393 	/* all done with soft state */
10394 	ddi_soft_state_free(fcp_softstate, instance);
10395 }
10396 
10397 /*
10398  *     Function: fcp_kmem_cache_constructor
10399  *
10400  *  Description: This function allocates and initializes the resources required
10401  *		 to build a scsi_pkt structure the target driver.  The result
10402  *		 of the allocation and initialization will be cached in the
10403  *		 memory cache.	As DMA resources may be allocated here, that
10404  *		 means DMA resources will be tied up in the cache manager.
10405  *		 This is a tradeoff that has been made for performance reasons.
10406  *
10407  *     Argument: *buf		Memory to preinitialize.
10408  *		 *arg		FCP port structure (fcp_port).
10409  *		 kmflags	Value passed to kmem_cache_alloc() and
10410  *				propagated to the constructor.
10411  *
10412  * Return Value: 0	Allocation/Initialization was successful.
10413  *		 -1	Allocation or Initialization failed.
10414  *
10415  *
10416  * If the returned value is 0, the buffer is initialized like this:
10417  *
10418  *		    +================================+
10419  *	     +----> |	      struct scsi_pkt	     |
10420  *	     |	    |				     |
10421  *	     | +--- | pkt_ha_private		     |
10422  *	     | |    |				     |
10423  *	     | |    +================================+
10424  *	     | |
10425  *	     | |    +================================+
10426  *	     | +--> |	    struct fcp_pkt	     | <---------+
10427  *	     |	    |				     |		 |
10428  *	     +----- | cmd_pkt			     |		 |
10429  *		    |			  cmd_fp_pkt | ---+	 |
10430  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10431  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10432  *	  |    |    |--------------------------------|	  |	 |
10433  *	  |    |    |	      struct fc_packet	     | <--+	 |
10434  *	  |    |    |				     |		 |
10435  *	  |    |    |		     pkt_ulp_private | ----------+
10436  *	  |    |    |		     pkt_fca_private | -----+
10437  *	  |    |    |		     pkt_data_cookie | ---+ |
10438  *	  |    |    | pkt_cmdlen		     |	  | |
10439  *	  |    |(a) | pkt_rsplen		     |	  | |
10440  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10441  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10442  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10443  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10444  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10445  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10446  *		    +================================+	  | |  |   |   |    |
10447  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10448  *		    |				     |	    |  |   |   |    |
10449  *		    +================================+	    |  |   |   |    |
10450  *		    |	      fca_private	     | <----+  |   |   |    |
10451  *		    |				     |	       |   |   |    |
10452  *		    +================================+	       |   |   |    |
10453  *							       |   |   |    |
10454  *							       |   |   |    |
10455  *		    +================================+	 (d)   |   |   |    |
10456  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10457  *		    |				     |		   |   |    |
10458  *		    +================================+		   |   |    |
10459  *								   |   |    |
10460  *		    +================================+	 (d)	   |   |    |
10461  *		    |		fcp_resp	     | <-----------+   |    |
10462  *		    |	(DMA resources associated)   |		       |    |
10463  *		    +================================+		       |    |
10464  *								       |    |
10465  *								       |    |
10466  *								       |    |
10467  *		    +================================+	 (c)	       |    |
10468  *		    |	     fcp_cmd cookies	     | <---------------+    |
10469  *		    |				     |			    |
10470  *		    +================================+			    |
10471  *									    |
10472  *		    +================================+	 (c)		    |
10473  *		    |		 fcp_cmd	     | <--------------------+
10474  *		    |	(DMA resources associated)   |
10475  *		    +================================+
10476  *
10477  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10478  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10479  * (c) Only if DMA is used for the FCP_CMD buffer.
10480  * (d) Only if DMA is used for the FCP_RESP buffer
10481  */
10482 static int
10483 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10484     int kmflags)
10485 {
10486 	struct fcp_pkt	*cmd;
10487 	struct fcp_port	*pptr;
10488 	fc_packet_t	*fpkt;
10489 
10490 	pptr = (struct fcp_port *)tran->tran_hba_private;
10491 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10492 	bzero(cmd, tran->tran_hba_len);
10493 
10494 	cmd->cmd_pkt = pkt;
10495 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10496 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10497 	cmd->cmd_fp_pkt = fpkt;
10498 
10499 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10500 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10501 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10502 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10503 
10504 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10505 	    sizeof (struct fcp_pkt));
10506 
10507 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10508 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10509 
10510 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10511 		/*
10512 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10513 		 * fcp_resp.  The transfer of information will be done by
10514 		 * bcopy.
10515 		 * The naming of the flags (that is actually a value) is
10516 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10517 		 * DMA" but instead "NO DMA".
10518 		 */
10519 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10520 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10521 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10522 	} else {
10523 		/*
10524 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10525 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10526 		 * interfaces.
10527 		 */
10528 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10529 			return (-1);
10530 		}
10531 	}
10532 
10533 	return (0);
10534 }
10535 
10536 /*
10537  *     Function: fcp_kmem_cache_destructor
10538  *
10539  *  Description: Called by the destructor of the cache managed by SCSA.
10540  *		 All the resources pre-allocated in fcp_pkt_constructor
10541  *		 and the data also pre-initialized in fcp_pkt_constructor
10542  *		 are freed and uninitialized here.
10543  *
10544  *     Argument: *buf		Memory to uninitialize.
10545  *		 *arg		FCP port structure (fcp_port).
10546  *
10547  * Return Value: None
10548  *
10549  *	Context: kernel
10550  */
10551 static void
10552 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10553 {
10554 	struct fcp_pkt	*cmd;
10555 	struct fcp_port	*pptr;
10556 
10557 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10558 	cmd = pkt->pkt_ha_private;
10559 
10560 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10561 		/*
10562 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10563 		 * buffer and DMA resources allocated to do so are released.
10564 		 */
10565 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10566 	}
10567 }
10568 
10569 /*
10570  *     Function: fcp_alloc_cmd_resp
10571  *
10572  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10573  *		 will be DMAed by the HBA.  The buffer is allocated applying
10574  *		 the DMA requirements for the HBA.  The buffers allocated will
10575  *		 also be bound.	 DMA resources are allocated in the process.
10576  *		 They will be released by fcp_free_cmd_resp().
10577  *
10578  *     Argument: *pptr	FCP port.
10579  *		 *fpkt	fc packet for which the cmd and resp packet should be
10580  *			allocated.
10581  *		 flags	Allocation flags.
10582  *
10583  * Return Value: FC_FAILURE
10584  *		 FC_SUCCESS
10585  *
10586  *	Context: User or Kernel context only if flags == KM_SLEEP.
10587  *		 Interrupt context if the KM_SLEEP is not specified.
10588  */
10589 static int
10590 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10591 {
10592 	int			rval;
10593 	int			cmd_len;
10594 	int			resp_len;
10595 	ulong_t			real_len;
10596 	int			(*cb) (caddr_t);
10597 	ddi_dma_cookie_t	pkt_cookie;
10598 	ddi_dma_cookie_t	*cp;
10599 	uint32_t		cnt;
10600 
10601 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10602 
10603 	cmd_len = fpkt->pkt_cmdlen;
10604 	resp_len = fpkt->pkt_rsplen;
10605 
10606 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10607 
10608 	/* Allocation of a DMA handle used in subsequent calls. */
10609 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10610 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10611 		return (FC_FAILURE);
10612 	}
10613 
10614 	/* A buffer is allocated that satisfies the DMA requirements. */
10615 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10616 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10617 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10618 
10619 	if (rval != DDI_SUCCESS) {
10620 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10621 		return (FC_FAILURE);
10622 	}
10623 
10624 	if (real_len < cmd_len) {
10625 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10626 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10627 		return (FC_FAILURE);
10628 	}
10629 
10630 	/* The buffer allocated is DMA bound. */
10631 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10632 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10633 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10634 
10635 	if (rval != DDI_DMA_MAPPED) {
10636 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10637 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10638 		return (FC_FAILURE);
10639 	}
10640 
10641 	if (fpkt->pkt_cmd_cookie_cnt >
10642 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10643 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10644 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10645 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10646 		return (FC_FAILURE);
10647 	}
10648 
10649 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10650 
10651 	/*
10652 	 * The buffer where the scatter/gather list is going to be built is
10653 	 * allocated.
10654 	 */
10655 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10656 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10657 	    KM_NOSLEEP);
10658 
10659 	if (cp == NULL) {
10660 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10661 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10662 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10663 		return (FC_FAILURE);
10664 	}
10665 
10666 	/*
10667 	 * The scatter/gather list for the buffer we just allocated is built
10668 	 * here.
10669 	 */
10670 	*cp = pkt_cookie;
10671 	cp++;
10672 
10673 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10674 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10675 		    &pkt_cookie);
10676 		*cp = pkt_cookie;
10677 	}
10678 
10679 	ASSERT(fpkt->pkt_resp_dma == NULL);
10680 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10681 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10682 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10683 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10684 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10685 		return (FC_FAILURE);
10686 	}
10687 
10688 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10689 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10690 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10691 	    &fpkt->pkt_resp_acc);
10692 
10693 	if (rval != DDI_SUCCESS) {
10694 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10695 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10696 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10697 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10698 		kmem_free(fpkt->pkt_cmd_cookie,
10699 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10700 		return (FC_FAILURE);
10701 	}
10702 
10703 	if (real_len < resp_len) {
10704 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10705 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10706 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10707 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10708 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10709 		kmem_free(fpkt->pkt_cmd_cookie,
10710 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10711 		return (FC_FAILURE);
10712 	}
10713 
10714 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10715 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10716 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10717 
10718 	if (rval != DDI_DMA_MAPPED) {
10719 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10720 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10721 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10722 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10723 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10724 		kmem_free(fpkt->pkt_cmd_cookie,
10725 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10726 		return (FC_FAILURE);
10727 	}
10728 
10729 	if (fpkt->pkt_resp_cookie_cnt >
10730 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10731 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10732 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10733 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10734 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10735 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10736 		kmem_free(fpkt->pkt_cmd_cookie,
10737 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10738 		return (FC_FAILURE);
10739 	}
10740 
10741 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10742 
10743 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10744 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10745 	    KM_NOSLEEP);
10746 
10747 	if (cp == NULL) {
10748 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10749 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10750 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10751 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10752 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10753 		kmem_free(fpkt->pkt_cmd_cookie,
10754 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10755 		return (FC_FAILURE);
10756 	}
10757 
10758 	*cp = pkt_cookie;
10759 	cp++;
10760 
10761 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10762 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10763 		    &pkt_cookie);
10764 		*cp = pkt_cookie;
10765 	}
10766 
10767 	return (FC_SUCCESS);
10768 }
10769 
10770 /*
10771  *     Function: fcp_free_cmd_resp
10772  *
10773  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10774  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10775  *		 associated with them.	That includes the DMA resources and the
10776  *		 buffer allocated for the cookies of each one of them.
10777  *
10778  *     Argument: *pptr		FCP port context.
10779  *		 *fpkt		fc packet containing the cmd and resp packet
10780  *				to be released.
10781  *
10782  * Return Value: None
10783  *
10784  *	Context: Interrupt, User and Kernel context.
10785  */
10786 /* ARGSUSED */
10787 static void
10788 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10789 {
10790 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10791 
10792 	if (fpkt->pkt_resp_dma) {
10793 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10794 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10795 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10796 	}
10797 
10798 	if (fpkt->pkt_resp_cookie) {
10799 		kmem_free(fpkt->pkt_resp_cookie,
10800 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10801 		fpkt->pkt_resp_cookie = NULL;
10802 	}
10803 
10804 	if (fpkt->pkt_cmd_dma) {
10805 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10806 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10807 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10808 	}
10809 
10810 	if (fpkt->pkt_cmd_cookie) {
10811 		kmem_free(fpkt->pkt_cmd_cookie,
10812 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10813 		fpkt->pkt_cmd_cookie = NULL;
10814 	}
10815 }
10816 
10817 
10818 /*
10819  * called by the transport to do our own target initialization
10820  *
10821  * can acquire and release the global mutex
10822  */
10823 /* ARGSUSED */
10824 static int
10825 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10826     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10827 {
10828 	uchar_t			*bytes;
10829 	uint_t			nbytes;
10830 	uint16_t		lun_num;
10831 	struct fcp_tgt	*ptgt;
10832 	struct fcp_lun	*plun;
10833 	struct fcp_port	*pptr = (struct fcp_port *)
10834 	    hba_tran->tran_hba_private;
10835 
10836 	ASSERT(pptr != NULL);
10837 
10838 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10839 	    FCP_BUF_LEVEL_8, 0,
10840 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10841 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10842 
10843 	/* get our port WWN property */
10844 	bytes = NULL;
10845 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10846 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10847 	    (nbytes != FC_WWN_SIZE)) {
10848 		/* no port WWN property */
10849 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10850 		    FCP_BUF_LEVEL_8, 0,
10851 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10852 		    " for %s (instance %d): bytes=%p nbytes=%x",
10853 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10854 		    nbytes);
10855 
10856 		if (bytes != NULL) {
10857 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10858 		}
10859 
10860 		return (DDI_NOT_WELL_FORMED);
10861 	}
10862 	ASSERT(bytes != NULL);
10863 
10864 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10865 	    LUN_PROP, 0xFFFF);
10866 	if (lun_num == 0xFFFF) {
10867 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10868 		    FCP_BUF_LEVEL_8, 0,
10869 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10870 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10871 		    ddi_get_instance(tgt_dip));
10872 
10873 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10874 		return (DDI_NOT_WELL_FORMED);
10875 	}
10876 
10877 	mutex_enter(&pptr->port_mutex);
10878 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10879 		mutex_exit(&pptr->port_mutex);
10880 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10881 		    FCP_BUF_LEVEL_8, 0,
10882 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10883 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10884 		    ddi_get_instance(tgt_dip));
10885 
10886 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10887 		return (DDI_FAILURE);
10888 	}
10889 
10890 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10891 	    FC_WWN_SIZE) == 0);
10892 	ASSERT(plun->lun_num == lun_num);
10893 
10894 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10895 
10896 	ptgt = plun->lun_tgt;
10897 
10898 	mutex_enter(&ptgt->tgt_mutex);
10899 	plun->lun_tgt_count++;
10900 	scsi_device_hba_private_set(sd, plun);
10901 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10902 	plun->lun_sd = sd;
10903 	mutex_exit(&ptgt->tgt_mutex);
10904 	mutex_exit(&pptr->port_mutex);
10905 
10906 	return (DDI_SUCCESS);
10907 }
10908 
10909 /*ARGSUSED*/
10910 static int
10911 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10912     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10913 {
10914 	uchar_t			*bytes;
10915 	uint_t			nbytes;
10916 	uint16_t		lun_num;
10917 	struct fcp_tgt	*ptgt;
10918 	struct fcp_lun	*plun;
10919 	struct fcp_port	*pptr = (struct fcp_port *)
10920 	    hba_tran->tran_hba_private;
10921 	child_info_t		*cip;
10922 
10923 	ASSERT(pptr != NULL);
10924 
10925 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10926 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10927 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10928 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10929 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10930 
10931 	cip = (child_info_t *)sd->sd_pathinfo;
10932 	if (cip == NULL) {
10933 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10934 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10935 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10936 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10937 		    ddi_get_instance(tgt_dip));
10938 
10939 		return (DDI_NOT_WELL_FORMED);
10940 	}
10941 
10942 	/* get our port WWN property */
10943 	bytes = NULL;
10944 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10945 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10946 	    (nbytes != FC_WWN_SIZE)) {
10947 		if (bytes) {
10948 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10949 		}
10950 		return (DDI_NOT_WELL_FORMED);
10951 	}
10952 
10953 	ASSERT(bytes != NULL);
10954 
10955 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10956 	    LUN_PROP, 0xFFFF);
10957 	if (lun_num == 0xFFFF) {
10958 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10959 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10960 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10961 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10962 		    ddi_get_instance(tgt_dip));
10963 
10964 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10965 		return (DDI_NOT_WELL_FORMED);
10966 	}
10967 
10968 	mutex_enter(&pptr->port_mutex);
10969 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10970 		mutex_exit(&pptr->port_mutex);
10971 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10972 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10973 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10974 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10975 		    ddi_get_instance(tgt_dip));
10976 
10977 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10978 		return (DDI_FAILURE);
10979 	}
10980 
10981 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10982 	    FC_WWN_SIZE) == 0);
10983 	ASSERT(plun->lun_num == lun_num);
10984 
10985 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10986 
10987 	ptgt = plun->lun_tgt;
10988 
10989 	mutex_enter(&ptgt->tgt_mutex);
10990 	plun->lun_tgt_count++;
10991 	scsi_device_hba_private_set(sd, plun);
10992 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10993 	plun->lun_sd = sd;
10994 	mutex_exit(&ptgt->tgt_mutex);
10995 	mutex_exit(&pptr->port_mutex);
10996 
10997 	return (DDI_SUCCESS);
10998 }
10999 
11000 
11001 /*
11002  * called by the transport to do our own target initialization
11003  *
11004  * can acquire and release the global mutex
11005  */
11006 /* ARGSUSED */
11007 static int
11008 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11009     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11010 {
11011 	struct fcp_port	*pptr = (struct fcp_port *)
11012 	    hba_tran->tran_hba_private;
11013 	int			rval;
11014 
11015 	ASSERT(pptr != NULL);
11016 
11017 	/*
11018 	 * Child node is getting initialized.  Look at the mpxio component
11019 	 * type on the child device to see if this device is mpxio managed
11020 	 * or not.
11021 	 */
11022 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11023 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11024 	} else {
11025 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11026 	}
11027 
11028 	return (rval);
11029 }
11030 
11031 
11032 /* ARGSUSED */
11033 static void
11034 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11035     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11036 {
11037 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
11038 	struct fcp_tgt	*ptgt;
11039 
11040 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11041 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
11042 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11043 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11044 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11045 
11046 	if (plun == NULL) {
11047 		return;
11048 	}
11049 	ptgt = plun->lun_tgt;
11050 
11051 	ASSERT(ptgt != NULL);
11052 
11053 	mutex_enter(&ptgt->tgt_mutex);
11054 	ASSERT(plun->lun_tgt_count > 0);
11055 
11056 	if (--plun->lun_tgt_count == 0) {
11057 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11058 	}
11059 	plun->lun_sd = NULL;
11060 	mutex_exit(&ptgt->tgt_mutex);
11061 }
11062 
11063 /*
11064  *     Function: fcp_scsi_start
11065  *
11066  *  Description: This function is called by the target driver to request a
11067  *		 command to be sent.
11068  *
11069  *     Argument: *ap		SCSI address of the device.
11070  *		 *pkt		SCSI packet containing the cmd to send.
11071  *
11072  * Return Value: TRAN_ACCEPT
11073  *		 TRAN_BUSY
11074  *		 TRAN_BADPKT
11075  *		 TRAN_FATAL_ERROR
11076  */
11077 static int
11078 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11079 {
11080 	struct fcp_port	*pptr = ADDR2FCP(ap);
11081 	struct fcp_lun	*plun = ADDR2LUN(ap);
11082 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
11083 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11084 	int			rval;
11085 
11086 	/* ensure command isn't already issued */
11087 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11088 
11089 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11090 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
11091 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11092 
11093 	/*
11094 	 * It is strange that we enter the fcp_port mutex and the target
11095 	 * mutex to check the lun state (which has a mutex of its own).
11096 	 */
11097 	mutex_enter(&pptr->port_mutex);
11098 	mutex_enter(&ptgt->tgt_mutex);
11099 
11100 	/*
11101 	 * If the device is offline and is not in the process of coming
11102 	 * online, fail the request.
11103 	 */
11104 
11105 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11106 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
11107 		mutex_exit(&ptgt->tgt_mutex);
11108 		mutex_exit(&pptr->port_mutex);
11109 
11110 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11111 			pkt->pkt_reason = CMD_DEV_GONE;
11112 		}
11113 
11114 		return (TRAN_FATAL_ERROR);
11115 	}
11116 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11117 
11118 	/*
11119 	 * If we are suspended, kernel is trying to dump, so don't
11120 	 * block, fail or defer requests - send them down right away.
11121 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11122 	 * assume we have been suspended.  There is hardware such as
11123 	 * the v880 that doesn't do PM.	 Thus, the check for
11124 	 * ddi_in_panic.
11125 	 *
11126 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11127 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11128 	 * either the device will have gone away or changed and we can fail
11129 	 * the request, or we can proceed if the device didn't change.
11130 	 *
11131 	 * If the pd in the target or the packet is NULL it's probably
11132 	 * because the device has gone away, we allow the request to be
11133 	 * put on the internal queue here in case the device comes back within
11134 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11135 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11136 	 * could be NULL because the device was disappearing during or since
11137 	 * packet initialization.
11138 	 */
11139 
11140 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11141 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11142 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11143 	    (ptgt->tgt_pd_handle == NULL) ||
11144 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11145 		/*
11146 		 * If ((LUN is busy AND
11147 		 *	LUN not suspended AND
11148 		 *	The system is not in panic state) OR
11149 		 *	(The port is coming up))
11150 		 *
11151 		 * We check to see if the any of the flags FLAG_NOINTR or
11152 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11153 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11154 		 */
11155 		mutex_exit(&ptgt->tgt_mutex);
11156 		mutex_exit(&pptr->port_mutex);
11157 
11158 		/* see if using interrupts is allowed (so queueing'll work) */
11159 		if (pkt->pkt_flags & FLAG_NOINTR) {
11160 			pkt->pkt_resid = 0;
11161 			return (TRAN_BUSY);
11162 		}
11163 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11164 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11165 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11166 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11167 			return (TRAN_BUSY);
11168 		}
11169 #ifdef	DEBUG
11170 		mutex_enter(&pptr->port_pkt_mutex);
11171 		pptr->port_npkts++;
11172 		mutex_exit(&pptr->port_pkt_mutex);
11173 #endif /* DEBUG */
11174 
11175 		/* got queue up the pkt for later */
11176 		fcp_queue_pkt(pptr, cmd);
11177 		return (TRAN_ACCEPT);
11178 	}
11179 	cmd->cmd_state = FCP_PKT_ISSUED;
11180 
11181 	mutex_exit(&ptgt->tgt_mutex);
11182 	mutex_exit(&pptr->port_mutex);
11183 
11184 	/*
11185 	 * Now that we released the mutexes, what was protected by them can
11186 	 * change.
11187 	 */
11188 
11189 	/*
11190 	 * If there is a reconfiguration in progress, wait for it to complete.
11191 	 */
11192 	fcp_reconfig_wait(pptr);
11193 
11194 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11195 	    pkt->pkt_time : 0;
11196 
11197 	/* prepare the packet */
11198 
11199 	fcp_prepare_pkt(pptr, cmd, plun);
11200 
11201 	if (cmd->cmd_pkt->pkt_time) {
11202 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11203 	} else {
11204 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11205 	}
11206 
11207 	/*
11208 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11209 	 * have to do polled I/O
11210 	 */
11211 	if (pkt->pkt_flags & FLAG_NOINTR) {
11212 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11213 		return (fcp_dopoll(pptr, cmd));
11214 	}
11215 
11216 #ifdef	DEBUG
11217 	mutex_enter(&pptr->port_pkt_mutex);
11218 	pptr->port_npkts++;
11219 	mutex_exit(&pptr->port_pkt_mutex);
11220 #endif /* DEBUG */
11221 
11222 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11223 	if (rval == FC_SUCCESS) {
11224 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11225 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11226 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11227 		return (TRAN_ACCEPT);
11228 	}
11229 
11230 	cmd->cmd_state = FCP_PKT_IDLE;
11231 
11232 #ifdef	DEBUG
11233 	mutex_enter(&pptr->port_pkt_mutex);
11234 	pptr->port_npkts--;
11235 	mutex_exit(&pptr->port_pkt_mutex);
11236 #endif /* DEBUG */
11237 
11238 	/*
11239 	 * For lack of clearer definitions, choose
11240 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11241 	 */
11242 
11243 	if (rval == FC_TRAN_BUSY) {
11244 		pkt->pkt_resid = 0;
11245 		rval = TRAN_BUSY;
11246 	} else {
11247 		mutex_enter(&ptgt->tgt_mutex);
11248 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11249 			child_info_t	*cip;
11250 
11251 			mutex_enter(&plun->lun_mutex);
11252 			cip = plun->lun_cip;
11253 			mutex_exit(&plun->lun_mutex);
11254 
11255 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11256 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11257 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11258 			    plun->lun_tgt->tgt_d_id, rval, cip);
11259 
11260 			rval = TRAN_FATAL_ERROR;
11261 		} else {
11262 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11263 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11264 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11265 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11266 				    pkt);
11267 				rval = TRAN_BUSY;
11268 			} else {
11269 				rval = TRAN_ACCEPT;
11270 				fcp_queue_pkt(pptr, cmd);
11271 			}
11272 		}
11273 		mutex_exit(&ptgt->tgt_mutex);
11274 	}
11275 
11276 	return (rval);
11277 }
11278 
11279 /*
11280  * called by the transport to abort a packet
11281  */
11282 /*ARGSUSED*/
11283 static int
11284 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11285 {
11286 	int tgt_cnt;
11287 	struct fcp_port		*pptr = ADDR2FCP(ap);
11288 	struct fcp_lun	*plun = ADDR2LUN(ap);
11289 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11290 
11291 	if (pkt == NULL) {
11292 		if (ptgt) {
11293 			mutex_enter(&ptgt->tgt_mutex);
11294 			tgt_cnt = ptgt->tgt_change_cnt;
11295 			mutex_exit(&ptgt->tgt_mutex);
11296 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11297 			return (TRUE);
11298 		}
11299 	}
11300 	return (FALSE);
11301 }
11302 
11303 
11304 /*
11305  * Perform reset
11306  */
11307 int
11308 fcp_scsi_reset(struct scsi_address *ap, int level)
11309 {
11310 	int			rval = 0;
11311 	struct fcp_port		*pptr = ADDR2FCP(ap);
11312 	struct fcp_lun	*plun = ADDR2LUN(ap);
11313 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11314 
11315 	if (level == RESET_ALL) {
11316 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11317 			rval = 1;
11318 		}
11319 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11320 		/*
11321 		 * If we are in the middle of discovery, return
11322 		 * SUCCESS as this target will be rediscovered
11323 		 * anyway
11324 		 */
11325 		mutex_enter(&ptgt->tgt_mutex);
11326 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11327 			mutex_exit(&ptgt->tgt_mutex);
11328 			return (1);
11329 		}
11330 		mutex_exit(&ptgt->tgt_mutex);
11331 
11332 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11333 			rval = 1;
11334 		}
11335 	}
11336 	return (rval);
11337 }
11338 
11339 
11340 /*
11341  * called by the framework to get a SCSI capability
11342  */
11343 static int
11344 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11345 {
11346 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11347 }
11348 
11349 
11350 /*
11351  * called by the framework to set a SCSI capability
11352  */
11353 static int
11354 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11355 {
11356 	return (fcp_commoncap(ap, cap, value, whom, 1));
11357 }
11358 
11359 /*
11360  *     Function: fcp_pkt_setup
11361  *
11362  *  Description: This function sets up the scsi_pkt structure passed by the
11363  *		 caller. This function assumes fcp_pkt_constructor has been
11364  *		 called previously for the packet passed by the caller.	 If
11365  *		 successful this call will have the following results:
11366  *
11367  *		   - The resources needed that will be constant through out
11368  *		     the whole transaction are allocated.
11369  *		   - The fields that will be constant through out the whole
11370  *		     transaction are initialized.
11371  *		   - The scsi packet will be linked to the LUN structure
11372  *		     addressed by the transaction.
11373  *
11374  *     Argument:
11375  *		 *pkt		Pointer to a scsi_pkt structure.
11376  *		 callback
11377  *		 arg
11378  *
11379  * Return Value: 0	Success
11380  *		 !0	Failure
11381  *
11382  *	Context: Kernel context or interrupt context
11383  */
11384 /* ARGSUSED */
11385 static int
11386 fcp_pkt_setup(struct scsi_pkt *pkt,
11387     int (*callback)(caddr_t arg),
11388     caddr_t arg)
11389 {
11390 	struct fcp_pkt	*cmd;
11391 	struct fcp_port	*pptr;
11392 	struct fcp_lun	*plun;
11393 	struct fcp_tgt	*ptgt;
11394 	int		kf;
11395 	fc_packet_t	*fpkt;
11396 	fc_frame_hdr_t	*hp;
11397 
11398 	pptr = ADDR2FCP(&pkt->pkt_address);
11399 	plun = ADDR2LUN(&pkt->pkt_address);
11400 	ptgt = plun->lun_tgt;
11401 
11402 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11403 	fpkt = cmd->cmd_fp_pkt;
11404 
11405 	/*
11406 	 * this request is for dma allocation only
11407 	 */
11408 	/*
11409 	 * First step of fcp_scsi_init_pkt: pkt allocation
11410 	 * We determine if the caller is willing to wait for the
11411 	 * resources.
11412 	 */
11413 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11414 
11415 	/*
11416 	 * Selective zeroing of the pkt.
11417 	 */
11418 	cmd->cmd_back = NULL;
11419 	cmd->cmd_next = NULL;
11420 
11421 	/*
11422 	 * Zero out fcp command
11423 	 */
11424 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11425 
11426 	cmd->cmd_state = FCP_PKT_IDLE;
11427 
11428 	fpkt = cmd->cmd_fp_pkt;
11429 	fpkt->pkt_data_acc = NULL;
11430 
11431 	/*
11432 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11433 	 * could be destroyed.	We need fail pkt_setup.
11434 	 */
11435 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11436 		return (-1);
11437 	}
11438 
11439 	mutex_enter(&ptgt->tgt_mutex);
11440 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11441 
11442 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11443 	    != FC_SUCCESS) {
11444 		mutex_exit(&ptgt->tgt_mutex);
11445 		return (-1);
11446 	}
11447 
11448 	mutex_exit(&ptgt->tgt_mutex);
11449 
11450 	/* Fill in the Fabric Channel Header */
11451 	hp = &fpkt->pkt_cmd_fhdr;
11452 	hp->r_ctl = R_CTL_COMMAND;
11453 	hp->rsvd = 0;
11454 	hp->type = FC_TYPE_SCSI_FCP;
11455 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11456 	hp->seq_id = 0;
11457 	hp->df_ctl  = 0;
11458 	hp->seq_cnt = 0;
11459 	hp->ox_id = 0xffff;
11460 	hp->rx_id = 0xffff;
11461 	hp->ro = 0;
11462 
11463 	/*
11464 	 * A doubly linked list (cmd_forw, cmd_back) is built
11465 	 * out of every allocated packet on a per-lun basis
11466 	 *
11467 	 * The packets are maintained in the list so as to satisfy
11468 	 * scsi_abort() requests. At present (which is unlikely to
11469 	 * change in the future) nobody performs a real scsi_abort
11470 	 * in the SCSI target drivers (as they don't keep the packets
11471 	 * after doing scsi_transport - so they don't know how to
11472 	 * abort a packet other than sending a NULL to abort all
11473 	 * outstanding packets)
11474 	 */
11475 	mutex_enter(&plun->lun_mutex);
11476 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11477 		plun->lun_pkt_head->cmd_back = cmd;
11478 	} else {
11479 		plun->lun_pkt_tail = cmd;
11480 	}
11481 	plun->lun_pkt_head = cmd;
11482 	mutex_exit(&plun->lun_mutex);
11483 	return (0);
11484 }
11485 
11486 /*
11487  *     Function: fcp_pkt_teardown
11488  *
11489  *  Description: This function releases a scsi_pkt structure and all the
11490  *		 resources attached to it.
11491  *
11492  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11493  *
11494  * Return Value: None
11495  *
11496  *	Context: User, Kernel or Interrupt context.
11497  */
11498 static void
11499 fcp_pkt_teardown(struct scsi_pkt *pkt)
11500 {
11501 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11502 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11503 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11504 
11505 	/*
11506 	 * Remove the packet from the per-lun list
11507 	 */
11508 	mutex_enter(&plun->lun_mutex);
11509 	if (cmd->cmd_back) {
11510 		ASSERT(cmd != plun->lun_pkt_head);
11511 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11512 	} else {
11513 		ASSERT(cmd == plun->lun_pkt_head);
11514 		plun->lun_pkt_head = cmd->cmd_forw;
11515 	}
11516 
11517 	if (cmd->cmd_forw) {
11518 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11519 	} else {
11520 		ASSERT(cmd == plun->lun_pkt_tail);
11521 		plun->lun_pkt_tail = cmd->cmd_back;
11522 	}
11523 
11524 	mutex_exit(&plun->lun_mutex);
11525 
11526 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11527 }
11528 
11529 /*
11530  * Routine for reset notification setup, to register or cancel.
11531  * This function is called by SCSA
11532  */
11533 /*ARGSUSED*/
11534 static int
11535 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11536     void (*callback)(caddr_t), caddr_t arg)
11537 {
11538 	struct fcp_port *pptr = ADDR2FCP(ap);
11539 
11540 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11541 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11542 }
11543 
11544 
11545 static int
11546 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11547     ddi_eventcookie_t *event_cookiep)
11548 {
11549 	struct fcp_port *pptr = fcp_dip2port(dip);
11550 
11551 	if (pptr == NULL) {
11552 		return (DDI_FAILURE);
11553 	}
11554 
11555 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11556 	    event_cookiep, NDI_EVENT_NOPASS));
11557 }
11558 
11559 
11560 static int
11561 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11562     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11563     ddi_callback_id_t *cb_id)
11564 {
11565 	struct fcp_port *pptr = fcp_dip2port(dip);
11566 
11567 	if (pptr == NULL) {
11568 		return (DDI_FAILURE);
11569 	}
11570 
11571 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11572 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11573 }
11574 
11575 
11576 static int
11577 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11578 {
11579 
11580 	struct fcp_port *pptr = fcp_dip2port(dip);
11581 
11582 	if (pptr == NULL) {
11583 		return (DDI_FAILURE);
11584 	}
11585 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11586 }
11587 
11588 
11589 /*
11590  * called by the transport to post an event
11591  */
11592 static int
11593 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11594     ddi_eventcookie_t eventid, void *impldata)
11595 {
11596 	struct fcp_port *pptr = fcp_dip2port(dip);
11597 
11598 	if (pptr == NULL) {
11599 		return (DDI_FAILURE);
11600 	}
11601 
11602 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11603 	    eventid, impldata));
11604 }
11605 
11606 
11607 /*
11608  * A target in in many cases in Fibre Channel has a one to one relation
11609  * with a port identifier (which is also known as D_ID and also as AL_PA
11610  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11611  * will most likely result in resetting all LUNs (which means a reset will
11612  * occur on all the SCSI devices connected at the other end of the bridge)
11613  * That is the latest favorite topic for discussion, for, one can debate as
11614  * hot as one likes and come up with arguably a best solution to one's
11615  * satisfaction
11616  *
11617  * To stay on track and not digress much, here are the problems stated
11618  * briefly:
11619  *
11620  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11621  *	target drivers use RESET_TARGET even if their instance is on a
11622  *	LUN. Doesn't that sound a bit broken ?
11623  *
11624  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11625  *	control fields of an FCP_CMND structure. It should have been
11626  *	fixed right there, giving flexibility to the initiators to
11627  *	minimize havoc that could be caused by resetting a target.
11628  */
11629 static int
11630 fcp_reset_target(struct scsi_address *ap, int level)
11631 {
11632 	int			rval = FC_FAILURE;
11633 	char			lun_id[25];
11634 	struct fcp_port		*pptr = ADDR2FCP(ap);
11635 	struct fcp_lun	*plun = ADDR2LUN(ap);
11636 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11637 	struct scsi_pkt		*pkt;
11638 	struct fcp_pkt	*cmd;
11639 	struct fcp_rsp		*rsp;
11640 	uint32_t		tgt_cnt;
11641 	struct fcp_rsp_info	*rsp_info;
11642 	struct fcp_reset_elem	*p;
11643 	int			bval;
11644 
11645 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11646 	    KM_NOSLEEP)) == NULL) {
11647 		return (rval);
11648 	}
11649 
11650 	mutex_enter(&ptgt->tgt_mutex);
11651 	if (level == RESET_TARGET) {
11652 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11653 			mutex_exit(&ptgt->tgt_mutex);
11654 			kmem_free(p, sizeof (struct fcp_reset_elem));
11655 			return (rval);
11656 		}
11657 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11658 		(void) strcpy(lun_id, " ");
11659 	} else {
11660 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11661 			mutex_exit(&ptgt->tgt_mutex);
11662 			kmem_free(p, sizeof (struct fcp_reset_elem));
11663 			return (rval);
11664 		}
11665 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11666 
11667 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11668 	}
11669 	tgt_cnt = ptgt->tgt_change_cnt;
11670 
11671 	mutex_exit(&ptgt->tgt_mutex);
11672 
11673 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11674 	    0, 0, NULL, 0)) == NULL) {
11675 		kmem_free(p, sizeof (struct fcp_reset_elem));
11676 		mutex_enter(&ptgt->tgt_mutex);
11677 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11678 		mutex_exit(&ptgt->tgt_mutex);
11679 		return (rval);
11680 	}
11681 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11682 
11683 	/* fill in cmd part of packet */
11684 	cmd = PKT2CMD(pkt);
11685 	if (level == RESET_TARGET) {
11686 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11687 	} else {
11688 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11689 	}
11690 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11691 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11692 
11693 	/* prepare a packet for transport */
11694 	fcp_prepare_pkt(pptr, cmd, plun);
11695 
11696 	if (cmd->cmd_pkt->pkt_time) {
11697 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11698 	} else {
11699 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11700 	}
11701 
11702 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11703 	bval = fcp_dopoll(pptr, cmd);
11704 	fc_ulp_idle_port(pptr->port_fp_handle);
11705 
11706 	/* submit the packet */
11707 	if (bval == TRAN_ACCEPT) {
11708 		int error = 3;
11709 
11710 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11711 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11712 		    sizeof (struct fcp_rsp));
11713 
11714 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11715 			if (fcp_validate_fcp_response(rsp, pptr) ==
11716 			    FC_SUCCESS) {
11717 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11718 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11719 					    sizeof (struct fcp_rsp), rsp_info,
11720 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11721 					    sizeof (struct fcp_rsp_info));
11722 				}
11723 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11724 					rval = FC_SUCCESS;
11725 					error = 0;
11726 				} else {
11727 					error = 1;
11728 				}
11729 			} else {
11730 				error = 2;
11731 			}
11732 		}
11733 
11734 		switch (error) {
11735 		case 0:
11736 			fcp_log(CE_WARN, pptr->port_dip,
11737 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11738 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11739 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11740 			break;
11741 
11742 		case 1:
11743 			fcp_log(CE_WARN, pptr->port_dip,
11744 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11745 			    " response code=%x",
11746 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11747 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11748 			    rsp_info->rsp_code);
11749 			break;
11750 
11751 		case 2:
11752 			fcp_log(CE_WARN, pptr->port_dip,
11753 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11754 			    " Bad FCP response values: rsvd1=%x,"
11755 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11756 			    " rsplen=%x, senselen=%x",
11757 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11758 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11759 			    rsp->reserved_0, rsp->reserved_1,
11760 			    rsp->fcp_u.fcp_status.reserved_0,
11761 			    rsp->fcp_u.fcp_status.reserved_1,
11762 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11763 			break;
11764 
11765 		default:
11766 			fcp_log(CE_WARN, pptr->port_dip,
11767 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11768 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11769 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11770 			break;
11771 		}
11772 	}
11773 	scsi_destroy_pkt(pkt);
11774 
11775 	if (rval == FC_FAILURE) {
11776 		mutex_enter(&ptgt->tgt_mutex);
11777 		if (level == RESET_TARGET) {
11778 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11779 		} else {
11780 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11781 		}
11782 		mutex_exit(&ptgt->tgt_mutex);
11783 		kmem_free(p, sizeof (struct fcp_reset_elem));
11784 		return (rval);
11785 	}
11786 
11787 	mutex_enter(&pptr->port_mutex);
11788 	if (level == RESET_TARGET) {
11789 		p->tgt = ptgt;
11790 		p->lun = NULL;
11791 	} else {
11792 		p->tgt = NULL;
11793 		p->lun = plun;
11794 	}
11795 	p->tgt = ptgt;
11796 	p->tgt_cnt = tgt_cnt;
11797 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11798 	p->next = pptr->port_reset_list;
11799 	pptr->port_reset_list = p;
11800 
11801 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11802 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11803 	    "Notify ssd of the reset to reinstate the reservations");
11804 
11805 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11806 	    &pptr->port_reset_notify_listf);
11807 
11808 	mutex_exit(&pptr->port_mutex);
11809 
11810 	return (rval);
11811 }
11812 
11813 
11814 /*
11815  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11816  * SCSI capabilities
11817  */
11818 /* ARGSUSED */
11819 static int
11820 fcp_commoncap(struct scsi_address *ap, char *cap,
11821     int val, int tgtonly, int doset)
11822 {
11823 	struct fcp_port		*pptr = ADDR2FCP(ap);
11824 	struct fcp_lun	*plun = ADDR2LUN(ap);
11825 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11826 	int			cidx;
11827 	int			rval = FALSE;
11828 
11829 	if (cap == (char *)0) {
11830 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11831 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11832 		    "fcp_commoncap: invalid arg");
11833 		return (rval);
11834 	}
11835 
11836 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11837 		return (UNDEFINED);
11838 	}
11839 
11840 	/*
11841 	 * Process setcap request.
11842 	 */
11843 	if (doset) {
11844 		/*
11845 		 * At present, we can only set binary (0/1) values
11846 		 */
11847 		switch (cidx) {
11848 		case SCSI_CAP_ARQ:
11849 			if (val == 0) {
11850 				rval = FALSE;
11851 			} else {
11852 				rval = TRUE;
11853 			}
11854 			break;
11855 
11856 		case SCSI_CAP_LUN_RESET:
11857 			if (val) {
11858 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11859 			} else {
11860 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11861 			}
11862 			rval = TRUE;
11863 			break;
11864 
11865 		case SCSI_CAP_SECTOR_SIZE:
11866 			rval = TRUE;
11867 			break;
11868 		default:
11869 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11870 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11871 			    "fcp_setcap: unsupported %d", cidx);
11872 			rval = UNDEFINED;
11873 			break;
11874 		}
11875 
11876 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11877 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11878 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11879 		    "0x%x/0x%x/0x%x/%d",
11880 		    cap, val, tgtonly, doset, rval);
11881 
11882 	} else {
11883 		/*
11884 		 * Process getcap request.
11885 		 */
11886 		switch (cidx) {
11887 		case SCSI_CAP_DMA_MAX:
11888 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11889 
11890 			/*
11891 			 * Need to make an adjustment qlc is uint_t 64
11892 			 * st is int, so we will make the adjustment here
11893 			 * being as nobody wants to touch this.
11894 			 * It still leaves the max single block length
11895 			 * of 2 gig. This should last .
11896 			 */
11897 
11898 			if (rval == -1) {
11899 				rval = MAX_INT_DMA;
11900 			}
11901 
11902 			break;
11903 
11904 		case SCSI_CAP_INITIATOR_ID:
11905 			rval = pptr->port_id;
11906 			break;
11907 
11908 		case SCSI_CAP_ARQ:
11909 		case SCSI_CAP_RESET_NOTIFICATION:
11910 		case SCSI_CAP_TAGGED_QING:
11911 			rval = TRUE;
11912 			break;
11913 
11914 		case SCSI_CAP_SCSI_VERSION:
11915 			rval = 3;
11916 			break;
11917 
11918 		case SCSI_CAP_INTERCONNECT_TYPE:
11919 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11920 			    (ptgt->tgt_hard_addr == 0)) {
11921 				rval = INTERCONNECT_FABRIC;
11922 			} else {
11923 				rval = INTERCONNECT_FIBRE;
11924 			}
11925 			break;
11926 
11927 		case SCSI_CAP_LUN_RESET:
11928 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11929 			    TRUE : FALSE;
11930 			break;
11931 
11932 		default:
11933 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11934 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11935 			    "fcp_getcap: unsupported %d", cidx);
11936 			rval = UNDEFINED;
11937 			break;
11938 		}
11939 
11940 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11941 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11942 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11943 		    "0x%x/0x%x/0x%x/%d",
11944 		    cap, val, tgtonly, doset, rval);
11945 	}
11946 
11947 	return (rval);
11948 }
11949 
11950 /*
11951  * called by the transport to get the port-wwn and lun
11952  * properties of this device, and to create a "name" based on them
11953  *
11954  * these properties don't exist on sun4m
11955  *
11956  * return 1 for success else return 0
11957  */
11958 /* ARGSUSED */
11959 static int
11960 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11961 {
11962 	int			i;
11963 	int			*lun;
11964 	int			numChars;
11965 	uint_t			nlun;
11966 	uint_t			count;
11967 	uint_t			nbytes;
11968 	uchar_t			*bytes;
11969 	uint16_t		lun_num;
11970 	uint32_t		tgt_id;
11971 	char			**conf_wwn;
11972 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11973 	uchar_t			barray[FC_WWN_SIZE];
11974 	dev_info_t		*tgt_dip;
11975 	struct fcp_tgt	*ptgt;
11976 	struct fcp_port	*pptr;
11977 	struct fcp_lun	*plun;
11978 
11979 	ASSERT(sd != NULL);
11980 	ASSERT(name != NULL);
11981 
11982 	tgt_dip = sd->sd_dev;
11983 	pptr = ddi_get_soft_state(fcp_softstate,
11984 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11985 	if (pptr == NULL) {
11986 		return (0);
11987 	}
11988 
11989 	ASSERT(tgt_dip != NULL);
11990 
11991 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11992 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11993 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11994 		name[0] = '\0';
11995 		return (0);
11996 	}
11997 
11998 	if (nlun == 0) {
11999 		ddi_prop_free(lun);
12000 		return (0);
12001 	}
12002 
12003 	lun_num = lun[0];
12004 	ddi_prop_free(lun);
12005 
12006 	/*
12007 	 * Lookup for .conf WWN property
12008 	 */
12009 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12010 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12011 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12012 		ASSERT(count >= 1);
12013 
12014 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12015 		ddi_prop_free(conf_wwn);
12016 		mutex_enter(&pptr->port_mutex);
12017 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12018 			mutex_exit(&pptr->port_mutex);
12019 			return (0);
12020 		}
12021 		ptgt = plun->lun_tgt;
12022 		mutex_exit(&pptr->port_mutex);
12023 
12024 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12025 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12026 
12027 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12028 		    ptgt->tgt_hard_addr != 0) {
12029 			tgt_id = (uint32_t)fcp_alpa_to_switch[
12030 			    ptgt->tgt_hard_addr];
12031 		} else {
12032 			tgt_id = ptgt->tgt_d_id;
12033 		}
12034 
12035 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12036 		    TARGET_PROP, tgt_id);
12037 	}
12038 
12039 	/* get the our port-wwn property */
12040 	bytes = NULL;
12041 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12042 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12043 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12044 		if (bytes != NULL) {
12045 			ddi_prop_free(bytes);
12046 		}
12047 		return (0);
12048 	}
12049 
12050 	for (i = 0; i < FC_WWN_SIZE; i++) {
12051 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12052 	}
12053 
12054 	/* Stick in the address of the form "wWWN,LUN" */
12055 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12056 
12057 	ASSERT(numChars < len);
12058 	if (numChars >= len) {
12059 		fcp_log(CE_WARN, pptr->port_dip,
12060 		    "!fcp_scsi_get_name: "
12061 		    "name parameter length too small, it needs to be %d",
12062 		    numChars+1);
12063 	}
12064 
12065 	ddi_prop_free(bytes);
12066 
12067 	return (1);
12068 }
12069 
12070 
12071 /*
12072  * called by the transport to get the SCSI target id value, returning
12073  * it in "name"
12074  *
12075  * this isn't needed/used on sun4m
12076  *
12077  * return 1 for success else return 0
12078  */
12079 /* ARGSUSED */
12080 static int
12081 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12082 {
12083 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
12084 	struct fcp_tgt	*ptgt;
12085 	int    numChars;
12086 
12087 	if (plun == NULL) {
12088 		return (0);
12089 	}
12090 
12091 	if ((ptgt = plun->lun_tgt) == NULL) {
12092 		return (0);
12093 	}
12094 
12095 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12096 
12097 	ASSERT(numChars < len);
12098 	if (numChars >= len) {
12099 		fcp_log(CE_WARN, NULL,
12100 		    "!fcp_scsi_get_bus_addr: "
12101 		    "name parameter length too small, it needs to be %d",
12102 		    numChars+1);
12103 	}
12104 
12105 	return (1);
12106 }
12107 
12108 
12109 /*
12110  * called internally to reset the link where the specified port lives
12111  */
12112 static int
12113 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12114 {
12115 	la_wwn_t		wwn;
12116 	struct fcp_lun	*plun;
12117 	struct fcp_tgt	*ptgt;
12118 
12119 	/* disable restart of lip if we're suspended */
12120 	mutex_enter(&pptr->port_mutex);
12121 
12122 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12123 	    FCP_STATE_POWER_DOWN)) {
12124 		mutex_exit(&pptr->port_mutex);
12125 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12126 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12127 		    "fcp_linkreset, fcp%d: link reset "
12128 		    "disabled due to DDI_SUSPEND",
12129 		    ddi_get_instance(pptr->port_dip));
12130 		return (FC_FAILURE);
12131 	}
12132 
12133 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12134 		mutex_exit(&pptr->port_mutex);
12135 		return (FC_SUCCESS);
12136 	}
12137 
12138 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12139 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12140 
12141 	/*
12142 	 * If ap == NULL assume local link reset.
12143 	 */
12144 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12145 		plun = ADDR2LUN(ap);
12146 		ptgt = plun->lun_tgt;
12147 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12148 	} else {
12149 		bzero((caddr_t)&wwn, sizeof (wwn));
12150 	}
12151 	mutex_exit(&pptr->port_mutex);
12152 
12153 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12154 }
12155 
12156 
12157 /*
12158  * called from fcp_port_attach() to resume a port
12159  * return DDI_* success/failure status
12160  * acquires and releases the global mutex
12161  * acquires and releases the port mutex
12162  */
12163 /*ARGSUSED*/
12164 
12165 static int
12166 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12167     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12168 {
12169 	int			res = DDI_FAILURE; /* default result */
12170 	struct fcp_port	*pptr;		/* port state ptr */
12171 	uint32_t		alloc_cnt;
12172 	uint32_t		max_cnt;
12173 	fc_portmap_t		*tmp_list = NULL;
12174 
12175 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12176 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12177 	    instance);
12178 
12179 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12180 		cmn_err(CE_WARN, "fcp: bad soft state");
12181 		return (res);
12182 	}
12183 
12184 	mutex_enter(&pptr->port_mutex);
12185 	switch (cmd) {
12186 	case FC_CMD_RESUME:
12187 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12188 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12189 		break;
12190 
12191 	case FC_CMD_POWER_UP:
12192 		/*
12193 		 * If the port is DDI_SUSPENded, defer rediscovery
12194 		 * until DDI_RESUME occurs
12195 		 */
12196 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12197 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12198 			mutex_exit(&pptr->port_mutex);
12199 			return (DDI_SUCCESS);
12200 		}
12201 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12202 	}
12203 	pptr->port_id = s_id;
12204 	pptr->port_state = FCP_STATE_INIT;
12205 	mutex_exit(&pptr->port_mutex);
12206 
12207 	/*
12208 	 * Make a copy of ulp_port_info as fctl allocates
12209 	 * a temp struct.
12210 	 */
12211 	(void) fcp_cp_pinfo(pptr, pinfo);
12212 
12213 	mutex_enter(&fcp_global_mutex);
12214 	if (fcp_watchdog_init++ == 0) {
12215 		fcp_watchdog_tick = fcp_watchdog_timeout *
12216 		    drv_usectohz(1000000);
12217 		fcp_watchdog_id = timeout(fcp_watch,
12218 		    NULL, fcp_watchdog_tick);
12219 	}
12220 	mutex_exit(&fcp_global_mutex);
12221 
12222 	/*
12223 	 * Handle various topologies and link states.
12224 	 */
12225 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12226 	case FC_STATE_OFFLINE:
12227 		/*
12228 		 * Wait for ONLINE, at which time a state
12229 		 * change will cause a statec_callback
12230 		 */
12231 		res = DDI_SUCCESS;
12232 		break;
12233 
12234 	case FC_STATE_ONLINE:
12235 
12236 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12237 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12238 			res = DDI_SUCCESS;
12239 			break;
12240 		}
12241 
12242 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12243 		    !fcp_enable_auto_configuration) {
12244 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12245 			if (tmp_list == NULL) {
12246 				if (!alloc_cnt) {
12247 					res = DDI_SUCCESS;
12248 				}
12249 				break;
12250 			}
12251 			max_cnt = alloc_cnt;
12252 		} else {
12253 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12254 
12255 			alloc_cnt = FCP_MAX_DEVICES;
12256 
12257 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12258 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12259 			    KM_NOSLEEP)) == NULL) {
12260 				fcp_log(CE_WARN, pptr->port_dip,
12261 				    "!fcp%d: failed to allocate portmap",
12262 				    instance);
12263 				break;
12264 			}
12265 
12266 			max_cnt = alloc_cnt;
12267 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12268 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12269 			    FC_SUCCESS) {
12270 				caddr_t msg;
12271 
12272 				(void) fc_ulp_error(res, &msg);
12273 
12274 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12275 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12276 				    "resume failed getportmap: reason=0x%x",
12277 				    res);
12278 
12279 				fcp_log(CE_WARN, pptr->port_dip,
12280 				    "!failed to get port map : %s", msg);
12281 				break;
12282 			}
12283 			if (max_cnt > alloc_cnt) {
12284 				alloc_cnt = max_cnt;
12285 			}
12286 		}
12287 
12288 		/*
12289 		 * do the SCSI device discovery and create
12290 		 * the devinfos
12291 		 */
12292 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12293 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12294 		    max_cnt, pptr->port_id);
12295 
12296 		res = DDI_SUCCESS;
12297 		break;
12298 
12299 	default:
12300 		fcp_log(CE_WARN, pptr->port_dip,
12301 		    "!fcp%d: invalid port state at attach=0x%x",
12302 		    instance, pptr->port_phys_state);
12303 
12304 		mutex_enter(&pptr->port_mutex);
12305 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12306 		mutex_exit(&pptr->port_mutex);
12307 		res = DDI_SUCCESS;
12308 
12309 		break;
12310 	}
12311 
12312 	if (tmp_list != NULL) {
12313 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12314 	}
12315 
12316 	return (res);
12317 }
12318 
12319 
12320 static void
12321 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12322 {
12323 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12324 	pptr->port_dip = pinfo->port_dip;
12325 	pptr->port_fp_handle = pinfo->port_handle;
12326 	if (pinfo->port_acc_attr != NULL) {
12327 		/*
12328 		 * FCA supports DMA
12329 		 */
12330 		pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12331 		pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12332 		pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12333 		pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12334 	}
12335 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12336 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12337 	pptr->port_phys_state = pinfo->port_state;
12338 	pptr->port_topology = pinfo->port_flags;
12339 	pptr->port_reset_action = pinfo->port_reset_action;
12340 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12341 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12342 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12343 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12344 
12345 	/* Clear FMA caps to avoid fm-capability ereport */
12346 	if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12347 		pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12348 	if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12349 		pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12350 	if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12351 		pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12352 }
12353 
12354 /*
12355  * If the elements wait field is set to 1 then
12356  * another thread is waiting for the operation to complete. Once
12357  * it is complete, the waiting thread is signaled and the element is
12358  * freed by the waiting thread. If the elements wait field is set to 0
12359  * the element is freed.
12360  */
12361 static void
12362 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12363 {
12364 	ASSERT(elem != NULL);
12365 	mutex_enter(&elem->mutex);
12366 	elem->result = result;
12367 	if (elem->wait) {
12368 		elem->wait = 0;
12369 		cv_signal(&elem->cv);
12370 		mutex_exit(&elem->mutex);
12371 	} else {
12372 		mutex_exit(&elem->mutex);
12373 		cv_destroy(&elem->cv);
12374 		mutex_destroy(&elem->mutex);
12375 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12376 	}
12377 }
12378 
12379 /*
12380  * This function is invoked from the taskq thread to allocate
12381  * devinfo nodes and to online/offline them.
12382  */
12383 static void
12384 fcp_hp_task(void *arg)
12385 {
12386 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12387 	struct fcp_lun	*plun = elem->lun;
12388 	struct fcp_port		*pptr = elem->port;
12389 	int			result;
12390 
12391 	ASSERT(elem->what == FCP_ONLINE ||
12392 	    elem->what == FCP_OFFLINE ||
12393 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12394 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12395 
12396 	mutex_enter(&pptr->port_mutex);
12397 	mutex_enter(&plun->lun_mutex);
12398 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12399 	    plun->lun_event_count != elem->event_cnt) ||
12400 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12401 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12402 		mutex_exit(&plun->lun_mutex);
12403 		mutex_exit(&pptr->port_mutex);
12404 		fcp_process_elem(elem, NDI_FAILURE);
12405 		return;
12406 	}
12407 	mutex_exit(&plun->lun_mutex);
12408 	mutex_exit(&pptr->port_mutex);
12409 
12410 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12411 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12412 	fcp_process_elem(elem, result);
12413 }
12414 
12415 
12416 static child_info_t *
12417 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12418     int tcount)
12419 {
12420 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12421 
12422 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12423 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12424 
12425 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12426 		/*
12427 		 * Child has not been created yet. Create the child device
12428 		 * based on the per-Lun flags.
12429 		 */
12430 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12431 			plun->lun_cip =
12432 			    CIP(fcp_create_dip(plun, lcount, tcount));
12433 			plun->lun_mpxio = 0;
12434 		} else {
12435 			plun->lun_cip =
12436 			    CIP(fcp_create_pip(plun, lcount, tcount));
12437 			plun->lun_mpxio = 1;
12438 		}
12439 	} else {
12440 		plun->lun_cip = cip;
12441 	}
12442 
12443 	return (plun->lun_cip);
12444 }
12445 
12446 
12447 static int
12448 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12449 {
12450 	int		rval = FC_FAILURE;
12451 	dev_info_t	*pdip;
12452 	struct dev_info	*dip;
12453 	int		circular;
12454 
12455 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12456 
12457 	pdip = plun->lun_tgt->tgt_port->port_dip;
12458 
12459 	if (plun->lun_cip == NULL) {
12460 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12461 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12462 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12463 		    "plun: %p lun state: %x num: %d target state: %x",
12464 		    plun, plun->lun_state, plun->lun_num,
12465 		    plun->lun_tgt->tgt_port->port_state);
12466 		return (rval);
12467 	}
12468 	ndi_devi_enter(pdip, &circular);
12469 	dip = DEVI(pdip)->devi_child;
12470 	while (dip) {
12471 		if (dip == DEVI(cdip)) {
12472 			rval = FC_SUCCESS;
12473 			break;
12474 		}
12475 		dip = dip->devi_sibling;
12476 	}
12477 	ndi_devi_exit(pdip, circular);
12478 	return (rval);
12479 }
12480 
12481 static int
12482 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12483 {
12484 	int		rval = FC_FAILURE;
12485 
12486 	ASSERT(plun != NULL);
12487 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12488 
12489 	if (plun->lun_mpxio == 0) {
12490 		rval = fcp_is_dip_present(plun, DIP(cip));
12491 	} else {
12492 		rval = fcp_is_pip_present(plun, PIP(cip));
12493 	}
12494 
12495 	return (rval);
12496 }
12497 
12498 /*
12499  *     Function: fcp_create_dip
12500  *
12501  *  Description: Creates a dev_info_t structure for the LUN specified by the
12502  *		 caller.
12503  *
12504  *     Argument: plun		Lun structure
12505  *		 link_cnt	Link state count.
12506  *		 tgt_cnt	Target state change count.
12507  *
12508  * Return Value: NULL if it failed
12509  *		 dev_info_t structure address if it succeeded
12510  *
12511  *	Context: Kernel context
12512  */
12513 static dev_info_t *
12514 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12515 {
12516 	int			failure = 0;
12517 	uint32_t		tgt_id;
12518 	uint64_t		sam_lun;
12519 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12520 	struct fcp_port	*pptr = ptgt->tgt_port;
12521 	dev_info_t		*pdip = pptr->port_dip;
12522 	dev_info_t		*cdip = NULL;
12523 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12524 	char			*nname = NULL;
12525 	char			**compatible = NULL;
12526 	int			ncompatible;
12527 	char			*scsi_binding_set;
12528 	char			t_pwwn[17];
12529 
12530 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12531 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12532 
12533 	/* get the 'scsi-binding-set' property */
12534 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12535 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12536 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12537 		scsi_binding_set = NULL;
12538 	}
12539 
12540 	/* determine the node name and compatible */
12541 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12542 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12543 	if (scsi_binding_set) {
12544 		ddi_prop_free(scsi_binding_set);
12545 	}
12546 
12547 	if (nname == NULL) {
12548 #ifdef	DEBUG
12549 		cmn_err(CE_WARN, "%s%d: no driver for "
12550 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12551 		    "	 compatible: %s",
12552 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12553 		    ptgt->tgt_port_wwn.raw_wwn[0],
12554 		    ptgt->tgt_port_wwn.raw_wwn[1],
12555 		    ptgt->tgt_port_wwn.raw_wwn[2],
12556 		    ptgt->tgt_port_wwn.raw_wwn[3],
12557 		    ptgt->tgt_port_wwn.raw_wwn[4],
12558 		    ptgt->tgt_port_wwn.raw_wwn[5],
12559 		    ptgt->tgt_port_wwn.raw_wwn[6],
12560 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12561 		    *compatible);
12562 #endif	/* DEBUG */
12563 		failure++;
12564 		goto end_of_fcp_create_dip;
12565 	}
12566 
12567 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12568 
12569 	/*
12570 	 * if the old_dip does not match the cdip, that means there is
12571 	 * some property change. since we'll be using the cdip, we need
12572 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12573 	 * then the dtype for the device has been updated. Offline the
12574 	 * the old device and create a new device with the new device type
12575 	 * Refer to bug: 4764752
12576 	 */
12577 	if (old_dip && (cdip != old_dip ||
12578 	    plun->lun_state & FCP_LUN_CHANGED)) {
12579 		plun->lun_state &= ~(FCP_LUN_INIT);
12580 		mutex_exit(&plun->lun_mutex);
12581 		mutex_exit(&pptr->port_mutex);
12582 
12583 		mutex_enter(&ptgt->tgt_mutex);
12584 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12585 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12586 		mutex_exit(&ptgt->tgt_mutex);
12587 
12588 #ifdef DEBUG
12589 		if (cdip != NULL) {
12590 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12591 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12592 			    "Old dip=%p; New dip=%p don't match", old_dip,
12593 			    cdip);
12594 		} else {
12595 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12596 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12597 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12598 		}
12599 #endif
12600 
12601 		mutex_enter(&pptr->port_mutex);
12602 		mutex_enter(&plun->lun_mutex);
12603 	}
12604 
12605 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12606 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12607 		if (ndi_devi_alloc(pptr->port_dip, nname,
12608 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12609 			failure++;
12610 			goto end_of_fcp_create_dip;
12611 		}
12612 	}
12613 
12614 	/*
12615 	 * Previously all the properties for the devinfo were destroyed here
12616 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12617 	 * the devid property (and other properties established by the target
12618 	 * driver or framework) which the code does not always recreate, this
12619 	 * call was removed.
12620 	 * This opens a theoretical possibility that we may return with a
12621 	 * stale devid on the node if the scsi entity behind the fibre channel
12622 	 * lun has changed.
12623 	 */
12624 
12625 	/* decorate the node with compatible */
12626 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12627 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12628 		failure++;
12629 		goto end_of_fcp_create_dip;
12630 	}
12631 
12632 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12633 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12634 		failure++;
12635 		goto end_of_fcp_create_dip;
12636 	}
12637 
12638 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12639 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12640 		failure++;
12641 		goto end_of_fcp_create_dip;
12642 	}
12643 
12644 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12645 	t_pwwn[16] = '\0';
12646 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12647 	    != DDI_PROP_SUCCESS) {
12648 		failure++;
12649 		goto end_of_fcp_create_dip;
12650 	}
12651 
12652 	/*
12653 	 * If there is no hard address - We might have to deal with
12654 	 * that by using WWN - Having said that it is important to
12655 	 * recognize this problem early so ssd can be informed of
12656 	 * the right interconnect type.
12657 	 */
12658 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12659 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12660 	} else {
12661 		tgt_id = ptgt->tgt_d_id;
12662 	}
12663 
12664 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12665 	    tgt_id) != DDI_PROP_SUCCESS) {
12666 		failure++;
12667 		goto end_of_fcp_create_dip;
12668 	}
12669 
12670 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12671 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12672 		failure++;
12673 		goto end_of_fcp_create_dip;
12674 	}
12675 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12676 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12677 	    sam_lun) != DDI_PROP_SUCCESS) {
12678 		failure++;
12679 		goto end_of_fcp_create_dip;
12680 	}
12681 
12682 end_of_fcp_create_dip:
12683 	scsi_hba_nodename_compatible_free(nname, compatible);
12684 
12685 	if (cdip != NULL && failure) {
12686 		(void) ndi_prop_remove_all(cdip);
12687 		(void) ndi_devi_free(cdip);
12688 		cdip = NULL;
12689 	}
12690 
12691 	return (cdip);
12692 }
12693 
12694 /*
12695  *     Function: fcp_create_pip
12696  *
12697  *  Description: Creates a Path Id for the LUN specified by the caller.
12698  *
12699  *     Argument: plun		Lun structure
12700  *		 link_cnt	Link state count.
12701  *		 tgt_cnt	Target state count.
12702  *
12703  * Return Value: NULL if it failed
12704  *		 mdi_pathinfo_t structure address if it succeeded
12705  *
12706  *	Context: Kernel context
12707  */
12708 static mdi_pathinfo_t *
12709 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12710 {
12711 	int			i;
12712 	char			buf[MAXNAMELEN];
12713 	char			uaddr[MAXNAMELEN];
12714 	int			failure = 0;
12715 	uint32_t		tgt_id;
12716 	uint64_t		sam_lun;
12717 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12718 	struct fcp_port	*pptr = ptgt->tgt_port;
12719 	dev_info_t		*pdip = pptr->port_dip;
12720 	mdi_pathinfo_t		*pip = NULL;
12721 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12722 	char			*nname = NULL;
12723 	char			**compatible = NULL;
12724 	int			ncompatible;
12725 	char			*scsi_binding_set;
12726 	char			t_pwwn[17];
12727 
12728 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12729 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12730 
12731 	scsi_binding_set = "vhci";
12732 
12733 	/* determine the node name and compatible */
12734 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12735 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12736 
12737 	if (nname == NULL) {
12738 #ifdef	DEBUG
12739 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12740 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12741 		    "	 compatible: %s",
12742 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12743 		    ptgt->tgt_port_wwn.raw_wwn[0],
12744 		    ptgt->tgt_port_wwn.raw_wwn[1],
12745 		    ptgt->tgt_port_wwn.raw_wwn[2],
12746 		    ptgt->tgt_port_wwn.raw_wwn[3],
12747 		    ptgt->tgt_port_wwn.raw_wwn[4],
12748 		    ptgt->tgt_port_wwn.raw_wwn[5],
12749 		    ptgt->tgt_port_wwn.raw_wwn[6],
12750 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12751 		    *compatible);
12752 #endif	/* DEBUG */
12753 		failure++;
12754 		goto end_of_fcp_create_pip;
12755 	}
12756 
12757 	pip = fcp_find_existing_pip(plun, pdip);
12758 
12759 	/*
12760 	 * if the old_dip does not match the cdip, that means there is
12761 	 * some property change. since we'll be using the cdip, we need
12762 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12763 	 * then the dtype for the device has been updated. Offline the
12764 	 * the old device and create a new device with the new device type
12765 	 * Refer to bug: 4764752
12766 	 */
12767 	if (old_pip && (pip != old_pip ||
12768 	    plun->lun_state & FCP_LUN_CHANGED)) {
12769 		plun->lun_state &= ~(FCP_LUN_INIT);
12770 		mutex_exit(&plun->lun_mutex);
12771 		mutex_exit(&pptr->port_mutex);
12772 
12773 		mutex_enter(&ptgt->tgt_mutex);
12774 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12775 		    FCP_OFFLINE, lcount, tcount,
12776 		    NDI_DEVI_REMOVE, 0);
12777 		mutex_exit(&ptgt->tgt_mutex);
12778 
12779 		if (pip != NULL) {
12780 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12781 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12782 			    "Old pip=%p; New pip=%p don't match",
12783 			    old_pip, pip);
12784 		} else {
12785 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12786 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12787 			    "Old pip=%p; New pip=NULL don't match",
12788 			    old_pip);
12789 		}
12790 
12791 		mutex_enter(&pptr->port_mutex);
12792 		mutex_enter(&plun->lun_mutex);
12793 	}
12794 
12795 	/*
12796 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12797 	 * lun_guid_size which is dependent on the target, I don't
12798 	 * believe the same trancation happens here UNLESS the standards
12799 	 * change the FC_WWN_SIZE value to something larger than
12800 	 * MAXNAMELEN(currently 255 bytes).
12801 	 */
12802 
12803 	for (i = 0; i < FC_WWN_SIZE; i++) {
12804 		(void) sprintf(&buf[i << 1], "%02x",
12805 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12806 	}
12807 
12808 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12809 	    buf, plun->lun_num);
12810 
12811 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12812 		/*
12813 		 * Release the locks before calling into
12814 		 * mdi_pi_alloc_compatible() since this can result in a
12815 		 * callback into fcp which can result in a deadlock
12816 		 * (see bug # 4870272).
12817 		 *
12818 		 * Basically, what we are trying to avoid is the scenario where
12819 		 * one thread does ndi_devi_enter() and tries to grab
12820 		 * fcp_mutex and another does it the other way round.
12821 		 *
12822 		 * But before we do that, make sure that nobody releases the
12823 		 * port in the meantime. We can do this by setting a flag.
12824 		 */
12825 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12826 		pptr->port_state |= FCP_STATE_IN_MDI;
12827 		mutex_exit(&plun->lun_mutex);
12828 		mutex_exit(&pptr->port_mutex);
12829 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12830 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12831 			fcp_log(CE_WARN, pptr->port_dip,
12832 			    "!path alloc failed:0x%x", plun);
12833 			mutex_enter(&pptr->port_mutex);
12834 			mutex_enter(&plun->lun_mutex);
12835 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12836 			failure++;
12837 			goto end_of_fcp_create_pip;
12838 		}
12839 		mutex_enter(&pptr->port_mutex);
12840 		mutex_enter(&plun->lun_mutex);
12841 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12842 	} else {
12843 		(void) mdi_prop_remove(pip, NULL);
12844 	}
12845 
12846 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12847 
12848 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12849 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12850 	    != DDI_PROP_SUCCESS) {
12851 		failure++;
12852 		goto end_of_fcp_create_pip;
12853 	}
12854 
12855 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12856 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12857 	    != DDI_PROP_SUCCESS) {
12858 		failure++;
12859 		goto end_of_fcp_create_pip;
12860 	}
12861 
12862 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12863 	t_pwwn[16] = '\0';
12864 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12865 	    != DDI_PROP_SUCCESS) {
12866 		failure++;
12867 		goto end_of_fcp_create_pip;
12868 	}
12869 
12870 	/*
12871 	 * If there is no hard address - We might have to deal with
12872 	 * that by using WWN - Having said that it is important to
12873 	 * recognize this problem early so ssd can be informed of
12874 	 * the right interconnect type.
12875 	 */
12876 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12877 	    ptgt->tgt_hard_addr != 0) {
12878 		tgt_id = (uint32_t)
12879 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12880 	} else {
12881 		tgt_id = ptgt->tgt_d_id;
12882 	}
12883 
12884 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12885 	    != DDI_PROP_SUCCESS) {
12886 		failure++;
12887 		goto end_of_fcp_create_pip;
12888 	}
12889 
12890 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12891 	    != DDI_PROP_SUCCESS) {
12892 		failure++;
12893 		goto end_of_fcp_create_pip;
12894 	}
12895 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12896 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12897 	    != DDI_PROP_SUCCESS) {
12898 		failure++;
12899 		goto end_of_fcp_create_pip;
12900 	}
12901 
12902 end_of_fcp_create_pip:
12903 	scsi_hba_nodename_compatible_free(nname, compatible);
12904 
12905 	if (pip != NULL && failure) {
12906 		(void) mdi_prop_remove(pip, NULL);
12907 		mutex_exit(&plun->lun_mutex);
12908 		mutex_exit(&pptr->port_mutex);
12909 		(void) mdi_pi_free(pip, 0);
12910 		mutex_enter(&pptr->port_mutex);
12911 		mutex_enter(&plun->lun_mutex);
12912 		pip = NULL;
12913 	}
12914 
12915 	return (pip);
12916 }
12917 
12918 static dev_info_t *
12919 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12920 {
12921 	uint_t			nbytes;
12922 	uchar_t			*bytes;
12923 	uint_t			nwords;
12924 	uint32_t		tgt_id;
12925 	int			*words;
12926 	dev_info_t		*cdip;
12927 	dev_info_t		*ndip;
12928 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12929 	struct fcp_port	*pptr = ptgt->tgt_port;
12930 	int			circular;
12931 
12932 	ndi_devi_enter(pdip, &circular);
12933 
12934 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12935 	while ((cdip = ndip) != NULL) {
12936 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12937 
12938 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12939 			continue;
12940 		}
12941 
12942 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12943 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12944 		    &nbytes) != DDI_PROP_SUCCESS) {
12945 			continue;
12946 		}
12947 
12948 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12949 			if (bytes != NULL) {
12950 				ddi_prop_free(bytes);
12951 			}
12952 			continue;
12953 		}
12954 		ASSERT(bytes != NULL);
12955 
12956 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12957 			ddi_prop_free(bytes);
12958 			continue;
12959 		}
12960 
12961 		ddi_prop_free(bytes);
12962 
12963 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12964 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12965 		    &nbytes) != DDI_PROP_SUCCESS) {
12966 			continue;
12967 		}
12968 
12969 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12970 			if (bytes != NULL) {
12971 				ddi_prop_free(bytes);
12972 			}
12973 			continue;
12974 		}
12975 		ASSERT(bytes != NULL);
12976 
12977 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12978 			ddi_prop_free(bytes);
12979 			continue;
12980 		}
12981 
12982 		ddi_prop_free(bytes);
12983 
12984 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12985 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12986 		    &nwords) != DDI_PROP_SUCCESS) {
12987 			continue;
12988 		}
12989 
12990 		if (nwords != 1 || words == NULL) {
12991 			if (words != NULL) {
12992 				ddi_prop_free(words);
12993 			}
12994 			continue;
12995 		}
12996 		ASSERT(words != NULL);
12997 
12998 		/*
12999 		 * If there is no hard address - We might have to deal with
13000 		 * that by using WWN - Having said that it is important to
13001 		 * recognize this problem early so ssd can be informed of
13002 		 * the right interconnect type.
13003 		 */
13004 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13005 		    ptgt->tgt_hard_addr != 0) {
13006 			tgt_id =
13007 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13008 		} else {
13009 			tgt_id = ptgt->tgt_d_id;
13010 		}
13011 
13012 		if (tgt_id != (uint32_t)*words) {
13013 			ddi_prop_free(words);
13014 			continue;
13015 		}
13016 		ddi_prop_free(words);
13017 
13018 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13019 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13020 		    &nwords) != DDI_PROP_SUCCESS) {
13021 			continue;
13022 		}
13023 
13024 		if (nwords != 1 || words == NULL) {
13025 			if (words != NULL) {
13026 				ddi_prop_free(words);
13027 			}
13028 			continue;
13029 		}
13030 		ASSERT(words != NULL);
13031 
13032 		if (plun->lun_num == (uint16_t)*words) {
13033 			ddi_prop_free(words);
13034 			break;
13035 		}
13036 		ddi_prop_free(words);
13037 	}
13038 	ndi_devi_exit(pdip, circular);
13039 
13040 	return (cdip);
13041 }
13042 
13043 
13044 static int
13045 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13046 {
13047 	dev_info_t	*pdip;
13048 	char		buf[MAXNAMELEN];
13049 	char		uaddr[MAXNAMELEN];
13050 	int		rval = FC_FAILURE;
13051 
13052 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13053 
13054 	pdip = plun->lun_tgt->tgt_port->port_dip;
13055 
13056 	/*
13057 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13058 	 * non-NULL even when the LUN is not there as in the case when a LUN is
13059 	 * configured and then deleted on the device end (for T3/T4 case). In
13060 	 * such cases, pip will be NULL.
13061 	 *
13062 	 * If the device generates an RSCN, it will end up getting offlined when
13063 	 * it disappeared and a new LUN will get created when it is rediscovered
13064 	 * on the device. If we check for lun_cip here, the LUN will not end
13065 	 * up getting onlined since this function will end up returning a
13066 	 * FC_SUCCESS.
13067 	 *
13068 	 * The behavior is different on other devices. For instance, on a HDS,
13069 	 * there was no RSCN generated by the device but the next I/O generated
13070 	 * a check condition and rediscovery got triggered that way. So, in
13071 	 * such cases, this path will not be exercised
13072 	 */
13073 	if (pip == NULL) {
13074 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13075 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
13076 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
13077 		    "plun: %p lun state: %x num: %d target state: %x",
13078 		    plun, plun->lun_state, plun->lun_num,
13079 		    plun->lun_tgt->tgt_port->port_state);
13080 		return (rval);
13081 	}
13082 
13083 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13084 
13085 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13086 
13087 	if (plun->lun_old_guid) {
13088 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13089 			rval = FC_SUCCESS;
13090 		}
13091 	} else {
13092 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13093 			rval = FC_SUCCESS;
13094 		}
13095 	}
13096 	return (rval);
13097 }
13098 
13099 static mdi_pathinfo_t *
13100 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13101 {
13102 	char			buf[MAXNAMELEN];
13103 	char			uaddr[MAXNAMELEN];
13104 	mdi_pathinfo_t		*pip;
13105 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13106 	struct fcp_port	*pptr = ptgt->tgt_port;
13107 
13108 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13109 
13110 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13111 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13112 
13113 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13114 
13115 	return (pip);
13116 }
13117 
13118 
13119 static int
13120 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13121     int tcount, int flags, int *circ)
13122 {
13123 	int			rval;
13124 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13125 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13126 	dev_info_t		*cdip = NULL;
13127 
13128 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13129 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13130 
13131 	if (plun->lun_cip == NULL) {
13132 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 		    "fcp_online_child: plun->lun_cip is NULL: "
13135 		    "plun: %p state: %x num: %d target state: %x",
13136 		    plun, plun->lun_state, plun->lun_num,
13137 		    plun->lun_tgt->tgt_port->port_state);
13138 		return (NDI_FAILURE);
13139 	}
13140 again:
13141 	if (plun->lun_mpxio == 0) {
13142 		cdip = DIP(cip);
13143 		mutex_exit(&plun->lun_mutex);
13144 		mutex_exit(&pptr->port_mutex);
13145 
13146 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13149 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13150 
13151 		/*
13152 		 * We could check for FCP_LUN_INIT here but chances
13153 		 * of getting here when it's already in FCP_LUN_INIT
13154 		 * is rare and a duplicate ndi_devi_online wouldn't
13155 		 * hurt either (as the node would already have been
13156 		 * in CF2)
13157 		 */
13158 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13159 			rval = ndi_devi_bind_driver(cdip, flags);
13160 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13161 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13162 			    "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13163 		} else {
13164 			rval = ndi_devi_online(cdip, flags);
13165 		}
13166 
13167 		/*
13168 		 * We log the message into trace buffer if the device
13169 		 * is "ses" and into syslog for any other device
13170 		 * type. This is to prevent the ndi_devi_online failure
13171 		 * message that appears for V880/A5K ses devices.
13172 		 */
13173 		if (rval == NDI_SUCCESS) {
13174 			mutex_enter(&ptgt->tgt_mutex);
13175 			plun->lun_state |= FCP_LUN_INIT;
13176 			mutex_exit(&ptgt->tgt_mutex);
13177 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13178 			fcp_log(CE_NOTE, pptr->port_dip,
13179 			    "!ndi_devi_online:"
13180 			    " failed for %s: target=%x lun=%x %x",
13181 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13182 			    plun->lun_num, rval);
13183 		} else {
13184 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13185 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13186 			    " !ndi_devi_online:"
13187 			    " failed for %s: target=%x lun=%x %x",
13188 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13189 			    plun->lun_num, rval);
13190 		}
13191 	} else {
13192 		cdip = mdi_pi_get_client(PIP(cip));
13193 		mutex_exit(&plun->lun_mutex);
13194 		mutex_exit(&pptr->port_mutex);
13195 
13196 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13197 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13198 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13199 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13200 
13201 		/*
13202 		 * Hold path and exit phci to avoid deadlock with power
13203 		 * management code during mdi_pi_online.
13204 		 */
13205 		mdi_hold_path(PIP(cip));
13206 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13207 
13208 		rval = mdi_pi_online(PIP(cip), flags);
13209 
13210 		mdi_devi_enter_phci(pptr->port_dip, circ);
13211 		mdi_rele_path(PIP(cip));
13212 
13213 		if (rval == MDI_SUCCESS) {
13214 			mutex_enter(&ptgt->tgt_mutex);
13215 			plun->lun_state |= FCP_LUN_INIT;
13216 			mutex_exit(&ptgt->tgt_mutex);
13217 
13218 			/*
13219 			 * Clear MPxIO path permanent disable in case
13220 			 * fcp hotplug dropped the offline event.
13221 			 */
13222 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13223 
13224 		} else if (rval == MDI_NOT_SUPPORTED) {
13225 			child_info_t	*old_cip = cip;
13226 
13227 			/*
13228 			 * MPxIO does not support this device yet.
13229 			 * Enumerate in legacy mode.
13230 			 */
13231 			mutex_enter(&pptr->port_mutex);
13232 			mutex_enter(&plun->lun_mutex);
13233 			plun->lun_mpxio = 0;
13234 			plun->lun_cip = NULL;
13235 			cdip = fcp_create_dip(plun, lcount, tcount);
13236 			plun->lun_cip = cip = CIP(cdip);
13237 			if (cip == NULL) {
13238 				fcp_log(CE_WARN, pptr->port_dip,
13239 				    "!fcp_online_child: "
13240 				    "Create devinfo failed for LU=%p", plun);
13241 				mutex_exit(&plun->lun_mutex);
13242 
13243 				mutex_enter(&ptgt->tgt_mutex);
13244 				plun->lun_state |= FCP_LUN_OFFLINE;
13245 				mutex_exit(&ptgt->tgt_mutex);
13246 
13247 				mutex_exit(&pptr->port_mutex);
13248 
13249 				/*
13250 				 * free the mdi_pathinfo node
13251 				 */
13252 				(void) mdi_pi_free(PIP(old_cip), 0);
13253 			} else {
13254 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13255 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13256 				    "fcp_online_child: creating devinfo "
13257 				    "node 0x%p for plun 0x%p",
13258 				    cip, plun);
13259 				mutex_exit(&plun->lun_mutex);
13260 				mutex_exit(&pptr->port_mutex);
13261 				/*
13262 				 * free the mdi_pathinfo node
13263 				 */
13264 				(void) mdi_pi_free(PIP(old_cip), 0);
13265 				mutex_enter(&pptr->port_mutex);
13266 				mutex_enter(&plun->lun_mutex);
13267 				goto again;
13268 			}
13269 		} else {
13270 			if (cdip) {
13271 				fcp_log(CE_NOTE, pptr->port_dip,
13272 				    "!fcp_online_child: mdi_pi_online:"
13273 				    " failed for %s: target=%x lun=%x %x",
13274 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13275 				    plun->lun_num, rval);
13276 			}
13277 		}
13278 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13279 	}
13280 
13281 	if (rval == NDI_SUCCESS) {
13282 		if (cdip) {
13283 			(void) ndi_event_retrieve_cookie(
13284 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13285 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13286 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13287 			    cdip, fcp_insert_eid, NULL);
13288 		}
13289 	}
13290 	mutex_enter(&pptr->port_mutex);
13291 	mutex_enter(&plun->lun_mutex);
13292 	return (rval);
13293 }
13294 
13295 /* ARGSUSED */
13296 static int
13297 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13298     int tcount, int flags, int *circ)
13299 {
13300 	int		rval;
13301 	int		lun_mpxio;
13302 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
13303 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13304 	dev_info_t	*cdip;
13305 
13306 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13307 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13308 
13309 	if (plun->lun_cip == NULL) {
13310 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13311 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13312 		    "fcp_offline_child: plun->lun_cip is NULL: "
13313 		    "plun: %p lun state: %x num: %d target state: %x",
13314 		    plun, plun->lun_state, plun->lun_num,
13315 		    plun->lun_tgt->tgt_port->port_state);
13316 		return (NDI_FAILURE);
13317 	}
13318 
13319 	/*
13320 	 * We will use this value twice. Make a copy to be sure we use
13321 	 * the same value in both places.
13322 	 */
13323 	lun_mpxio = plun->lun_mpxio;
13324 
13325 	if (lun_mpxio == 0) {
13326 		cdip = DIP(cip);
13327 		mutex_exit(&plun->lun_mutex);
13328 		mutex_exit(&pptr->port_mutex);
13329 		rval = ndi_devi_offline(DIP(cip), flags);
13330 		if (rval != NDI_SUCCESS) {
13331 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13332 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13333 			    "fcp_offline_child: ndi_devi_offline failed "
13334 			    "rval=%x cip=%p", rval, cip);
13335 		}
13336 	} else {
13337 		cdip = mdi_pi_get_client(PIP(cip));
13338 		mutex_exit(&plun->lun_mutex);
13339 		mutex_exit(&pptr->port_mutex);
13340 
13341 		/*
13342 		 * Exit phci to avoid deadlock with power management code
13343 		 * during mdi_pi_offline
13344 		 */
13345 		mdi_hold_path(PIP(cip));
13346 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13347 
13348 		rval = mdi_pi_offline(PIP(cip), flags);
13349 
13350 		mdi_devi_enter_phci(pptr->port_dip, circ);
13351 		mdi_rele_path(PIP(cip));
13352 
13353 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13354 	}
13355 
13356 	mutex_enter(&ptgt->tgt_mutex);
13357 	plun->lun_state &= ~FCP_LUN_INIT;
13358 	mutex_exit(&ptgt->tgt_mutex);
13359 
13360 	if (rval == NDI_SUCCESS) {
13361 		cdip = NULL;
13362 		if (flags & NDI_DEVI_REMOVE) {
13363 			mutex_enter(&plun->lun_mutex);
13364 			/*
13365 			 * If the guid of the LUN changes, lun_cip will not
13366 			 * equal to cip, and after offlining the LUN with the
13367 			 * old guid, we should keep lun_cip since it's the cip
13368 			 * of the LUN with the new guid.
13369 			 * Otherwise remove our reference to child node.
13370 			 *
13371 			 * This must be done before the child node is freed,
13372 			 * otherwise other threads could see a stale lun_cip
13373 			 * pointer.
13374 			 */
13375 			if (plun->lun_cip == cip) {
13376 				plun->lun_cip = NULL;
13377 			}
13378 			if (plun->lun_old_guid) {
13379 				kmem_free(plun->lun_old_guid,
13380 				    plun->lun_old_guid_size);
13381 				plun->lun_old_guid = NULL;
13382 				plun->lun_old_guid_size = 0;
13383 			}
13384 			mutex_exit(&plun->lun_mutex);
13385 		}
13386 	}
13387 
13388 	if (lun_mpxio != 0) {
13389 		if (rval == NDI_SUCCESS) {
13390 			/*
13391 			 * Clear MPxIO path permanent disable as the path is
13392 			 * already offlined.
13393 			 */
13394 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13395 
13396 			if (flags & NDI_DEVI_REMOVE) {
13397 				(void) mdi_pi_free(PIP(cip), 0);
13398 			}
13399 		} else {
13400 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13401 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13402 			    "fcp_offline_child: mdi_pi_offline failed "
13403 			    "rval=%x cip=%p", rval, cip);
13404 		}
13405 	}
13406 
13407 	mutex_enter(&pptr->port_mutex);
13408 	mutex_enter(&plun->lun_mutex);
13409 
13410 	if (cdip) {
13411 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13412 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13413 		    " target=%x lun=%x", "ndi_offline",
13414 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13415 	}
13416 
13417 	return (rval);
13418 }
13419 
13420 static void
13421 fcp_remove_child(struct fcp_lun *plun)
13422 {
13423 	child_info_t *cip;
13424 	int circ;
13425 
13426 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13427 
13428 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13429 		if (plun->lun_mpxio == 0) {
13430 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13431 			(void) ndi_devi_free(DIP(plun->lun_cip));
13432 			plun->lun_cip = NULL;
13433 		} else {
13434 			/*
13435 			 * Clear reference to the child node in the lun.
13436 			 * This must be done before freeing it with mdi_pi_free
13437 			 * and with lun_mutex held so that other threads always
13438 			 * see either valid lun_cip or NULL when holding
13439 			 * lun_mutex. We keep a copy in cip.
13440 			 */
13441 			cip = plun->lun_cip;
13442 			plun->lun_cip = NULL;
13443 
13444 			mutex_exit(&plun->lun_mutex);
13445 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13446 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13447 
13448 			mdi_devi_enter(
13449 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13450 
13451 			/*
13452 			 * Exit phci to avoid deadlock with power management
13453 			 * code during mdi_pi_offline
13454 			 */
13455 			mdi_hold_path(PIP(cip));
13456 			mdi_devi_exit_phci(
13457 			    plun->lun_tgt->tgt_port->port_dip, circ);
13458 			(void) mdi_pi_offline(PIP(cip),
13459 			    NDI_DEVI_REMOVE);
13460 			mdi_devi_enter_phci(
13461 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13462 			mdi_rele_path(PIP(cip));
13463 
13464 			mdi_devi_exit(
13465 			    plun->lun_tgt->tgt_port->port_dip, circ);
13466 
13467 			FCP_TRACE(fcp_logq,
13468 			    plun->lun_tgt->tgt_port->port_instbuf,
13469 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13470 			    "lun=%p pip freed %p", plun, cip);
13471 
13472 			(void) mdi_prop_remove(PIP(cip), NULL);
13473 			(void) mdi_pi_free(PIP(cip), 0);
13474 
13475 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13476 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13477 			mutex_enter(&plun->lun_mutex);
13478 		}
13479 	} else {
13480 		plun->lun_cip = NULL;
13481 	}
13482 }
13483 
13484 /*
13485  * called when a timeout occurs
13486  *
13487  * can be scheduled during an attach or resume (if not already running)
13488  *
13489  * one timeout is set up for all ports
13490  *
13491  * acquires and releases the global mutex
13492  */
13493 /*ARGSUSED*/
13494 static void
13495 fcp_watch(void *arg)
13496 {
13497 	struct fcp_port	*pptr;
13498 	struct fcp_ipkt	*icmd;
13499 	struct fcp_ipkt	*nicmd;
13500 	struct fcp_pkt	*cmd;
13501 	struct fcp_pkt	*ncmd;
13502 	struct fcp_pkt	*tail;
13503 	struct fcp_pkt	*pcmd;
13504 	struct fcp_pkt	*save_head;
13505 	struct fcp_port	*save_port;
13506 
13507 	/* increment global watchdog time */
13508 	fcp_watchdog_time += fcp_watchdog_timeout;
13509 
13510 	mutex_enter(&fcp_global_mutex);
13511 
13512 	/* scan each port in our list */
13513 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13514 		save_port = fcp_port_head;
13515 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13516 		mutex_exit(&fcp_global_mutex);
13517 
13518 		mutex_enter(&pptr->port_mutex);
13519 		if (pptr->port_ipkt_list == NULL &&
13520 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13521 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13522 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13523 			mutex_exit(&pptr->port_mutex);
13524 			mutex_enter(&fcp_global_mutex);
13525 			goto end_of_watchdog;
13526 		}
13527 
13528 		/*
13529 		 * We check if a list of targets need to be offlined.
13530 		 */
13531 		if (pptr->port_offline_tgts) {
13532 			fcp_scan_offline_tgts(pptr);
13533 		}
13534 
13535 		/*
13536 		 * We check if a list of luns need to be offlined.
13537 		 */
13538 		if (pptr->port_offline_luns) {
13539 			fcp_scan_offline_luns(pptr);
13540 		}
13541 
13542 		/*
13543 		 * We check if a list of targets or luns need to be reset.
13544 		 */
13545 		if (pptr->port_reset_list) {
13546 			fcp_check_reset_delay(pptr);
13547 		}
13548 
13549 		mutex_exit(&pptr->port_mutex);
13550 
13551 		/*
13552 		 * This is where the pending commands (pkt) are checked for
13553 		 * timeout.
13554 		 */
13555 		mutex_enter(&pptr->port_pkt_mutex);
13556 		tail = pptr->port_pkt_tail;
13557 
13558 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13559 		    cmd != NULL; cmd = ncmd) {
13560 			ncmd = cmd->cmd_next;
13561 			/*
13562 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13563 			 * must be set.
13564 			 */
13565 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13566 			/*
13567 			 * FCP_INVALID_TIMEOUT will be set for those
13568 			 * command that need to be failed. Mostly those
13569 			 * cmds that could not be queued down for the
13570 			 * "timeout" value. cmd->cmd_timeout is used
13571 			 * to try and requeue the command regularly.
13572 			 */
13573 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13574 				/*
13575 				 * This command hasn't timed out yet.  Let's
13576 				 * go to the next one.
13577 				 */
13578 				pcmd = cmd;
13579 				goto end_of_loop;
13580 			}
13581 
13582 			if (cmd == pptr->port_pkt_head) {
13583 				ASSERT(pcmd == NULL);
13584 				pptr->port_pkt_head = cmd->cmd_next;
13585 			} else {
13586 				ASSERT(pcmd != NULL);
13587 				pcmd->cmd_next = cmd->cmd_next;
13588 			}
13589 
13590 			if (cmd == pptr->port_pkt_tail) {
13591 				ASSERT(cmd->cmd_next == NULL);
13592 				pptr->port_pkt_tail = pcmd;
13593 				if (pcmd) {
13594 					pcmd->cmd_next = NULL;
13595 				}
13596 			}
13597 			cmd->cmd_next = NULL;
13598 
13599 			/*
13600 			 * save the current head before dropping the
13601 			 * mutex - If the head doesn't remain the
13602 			 * same after re acquiring the mutex, just
13603 			 * bail out and revisit on next tick.
13604 			 *
13605 			 * PS: The tail pointer can change as the commands
13606 			 * get requeued after failure to retransport
13607 			 */
13608 			save_head = pptr->port_pkt_head;
13609 			mutex_exit(&pptr->port_pkt_mutex);
13610 
13611 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13612 			    FCP_INVALID_TIMEOUT) {
13613 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13614 				struct fcp_lun	*plun;
13615 				struct fcp_tgt	*ptgt;
13616 
13617 				plun = ADDR2LUN(&pkt->pkt_address);
13618 				ptgt = plun->lun_tgt;
13619 
13620 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13621 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13622 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13623 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13624 
13625 				cmd->cmd_state == FCP_PKT_ABORTING ?
13626 				    fcp_fail_cmd(cmd, CMD_RESET,
13627 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13628 				    CMD_TIMEOUT, STAT_ABORTED);
13629 			} else {
13630 				fcp_retransport_cmd(pptr, cmd);
13631 			}
13632 			mutex_enter(&pptr->port_pkt_mutex);
13633 			if (save_head && save_head != pptr->port_pkt_head) {
13634 				/*
13635 				 * Looks like linked list got changed (mostly
13636 				 * happens when an an OFFLINE LUN code starts
13637 				 * returning overflow queue commands in
13638 				 * parallel. So bail out and revisit during
13639 				 * next tick
13640 				 */
13641 				break;
13642 			}
13643 		end_of_loop:
13644 			/*
13645 			 * Scan only upto the previously known tail pointer
13646 			 * to avoid excessive processing - lots of new packets
13647 			 * could have been added to the tail or the old ones
13648 			 * re-queued.
13649 			 */
13650 			if (cmd == tail) {
13651 				break;
13652 			}
13653 		}
13654 		mutex_exit(&pptr->port_pkt_mutex);
13655 
13656 		mutex_enter(&pptr->port_mutex);
13657 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13658 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13659 
13660 			nicmd = icmd->ipkt_next;
13661 			if ((icmd->ipkt_restart != 0) &&
13662 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13663 				/* packet has not timed out */
13664 				continue;
13665 			}
13666 
13667 			/* time for packet re-transport */
13668 			if (icmd == pptr->port_ipkt_list) {
13669 				pptr->port_ipkt_list = icmd->ipkt_next;
13670 				if (pptr->port_ipkt_list) {
13671 					pptr->port_ipkt_list->ipkt_prev =
13672 					    NULL;
13673 				}
13674 			} else {
13675 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13676 				if (icmd->ipkt_next) {
13677 					icmd->ipkt_next->ipkt_prev =
13678 					    icmd->ipkt_prev;
13679 				}
13680 			}
13681 			icmd->ipkt_next = NULL;
13682 			icmd->ipkt_prev = NULL;
13683 			mutex_exit(&pptr->port_mutex);
13684 
13685 			if (fcp_is_retryable(icmd)) {
13686 				fc_ulp_rscn_info_t *rscnp =
13687 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13688 				    pkt_ulp_rscn_infop;
13689 
13690 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13691 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13692 				    "%x to D_ID=%x Retrying..",
13693 				    icmd->ipkt_opcode,
13694 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13695 
13696 				/*
13697 				 * Update the RSCN count in the packet
13698 				 * before resending.
13699 				 */
13700 
13701 				if (rscnp != NULL) {
13702 					rscnp->ulp_rscn_count =
13703 					    fc_ulp_get_rscn_count(pptr->
13704 					    port_fp_handle);
13705 				}
13706 
13707 				mutex_enter(&pptr->port_mutex);
13708 				mutex_enter(&ptgt->tgt_mutex);
13709 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13710 					mutex_exit(&ptgt->tgt_mutex);
13711 					mutex_exit(&pptr->port_mutex);
13712 					switch (icmd->ipkt_opcode) {
13713 						int rval;
13714 					case LA_ELS_PLOGI:
13715 						if ((rval = fc_ulp_login(
13716 						    pptr->port_fp_handle,
13717 						    &icmd->ipkt_fpkt, 1)) ==
13718 						    FC_SUCCESS) {
13719 							mutex_enter(
13720 							    &pptr->port_mutex);
13721 							continue;
13722 						}
13723 						if (fcp_handle_ipkt_errors(
13724 						    pptr, ptgt, icmd, rval,
13725 						    "PLOGI") == DDI_SUCCESS) {
13726 							mutex_enter(
13727 							    &pptr->port_mutex);
13728 							continue;
13729 						}
13730 						break;
13731 
13732 					case LA_ELS_PRLI:
13733 						if ((rval = fc_ulp_issue_els(
13734 						    pptr->port_fp_handle,
13735 						    icmd->ipkt_fpkt)) ==
13736 						    FC_SUCCESS) {
13737 							mutex_enter(
13738 							    &pptr->port_mutex);
13739 							continue;
13740 						}
13741 						if (fcp_handle_ipkt_errors(
13742 						    pptr, ptgt, icmd, rval,
13743 						    "PRLI") == DDI_SUCCESS) {
13744 							mutex_enter(
13745 							    &pptr->port_mutex);
13746 							continue;
13747 						}
13748 						break;
13749 
13750 					default:
13751 						if ((rval = fcp_transport(
13752 						    pptr->port_fp_handle,
13753 						    icmd->ipkt_fpkt, 1)) ==
13754 						    FC_SUCCESS) {
13755 							mutex_enter(
13756 							    &pptr->port_mutex);
13757 							continue;
13758 						}
13759 						if (fcp_handle_ipkt_errors(
13760 						    pptr, ptgt, icmd, rval,
13761 						    "PRLI") == DDI_SUCCESS) {
13762 							mutex_enter(
13763 							    &pptr->port_mutex);
13764 							continue;
13765 						}
13766 						break;
13767 					}
13768 				} else {
13769 					mutex_exit(&ptgt->tgt_mutex);
13770 					mutex_exit(&pptr->port_mutex);
13771 				}
13772 			} else {
13773 				fcp_print_error(icmd->ipkt_fpkt);
13774 			}
13775 
13776 			(void) fcp_call_finish_init(pptr, ptgt,
13777 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13778 			    icmd->ipkt_cause);
13779 			fcp_icmd_free(pptr, icmd);
13780 			mutex_enter(&pptr->port_mutex);
13781 		}
13782 
13783 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13784 		mutex_exit(&pptr->port_mutex);
13785 		mutex_enter(&fcp_global_mutex);
13786 
13787 	end_of_watchdog:
13788 		/*
13789 		 * Bail out early before getting into trouble
13790 		 */
13791 		if (save_port != fcp_port_head) {
13792 			break;
13793 		}
13794 	}
13795 
13796 	if (fcp_watchdog_init > 0) {
13797 		/* reschedule timeout to go again */
13798 		fcp_watchdog_id =
13799 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13800 	}
13801 	mutex_exit(&fcp_global_mutex);
13802 }
13803 
13804 
13805 static void
13806 fcp_check_reset_delay(struct fcp_port *pptr)
13807 {
13808 	uint32_t		tgt_cnt;
13809 	int			level;
13810 	struct fcp_tgt	*ptgt;
13811 	struct fcp_lun	*plun;
13812 	struct fcp_reset_elem *cur = NULL;
13813 	struct fcp_reset_elem *next = NULL;
13814 	struct fcp_reset_elem *prev = NULL;
13815 
13816 	ASSERT(mutex_owned(&pptr->port_mutex));
13817 
13818 	next = pptr->port_reset_list;
13819 	while ((cur = next) != NULL) {
13820 		next = cur->next;
13821 
13822 		if (cur->timeout < fcp_watchdog_time) {
13823 			prev = cur;
13824 			continue;
13825 		}
13826 
13827 		ptgt = cur->tgt;
13828 		plun = cur->lun;
13829 		tgt_cnt = cur->tgt_cnt;
13830 
13831 		if (ptgt) {
13832 			level = RESET_TARGET;
13833 		} else {
13834 			ASSERT(plun != NULL);
13835 			level = RESET_LUN;
13836 			ptgt = plun->lun_tgt;
13837 		}
13838 		if (prev) {
13839 			prev->next = next;
13840 		} else {
13841 			/*
13842 			 * Because we drop port mutex while doing aborts for
13843 			 * packets, we can't rely on reset_list pointing to
13844 			 * our head
13845 			 */
13846 			if (cur == pptr->port_reset_list) {
13847 				pptr->port_reset_list = next;
13848 			} else {
13849 				struct fcp_reset_elem *which;
13850 
13851 				which = pptr->port_reset_list;
13852 				while (which && which->next != cur) {
13853 					which = which->next;
13854 				}
13855 				ASSERT(which != NULL);
13856 
13857 				which->next = next;
13858 				prev = which;
13859 			}
13860 		}
13861 
13862 		kmem_free(cur, sizeof (*cur));
13863 
13864 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13865 			mutex_enter(&ptgt->tgt_mutex);
13866 			if (level == RESET_TARGET) {
13867 				fcp_update_tgt_state(ptgt,
13868 				    FCP_RESET, FCP_LUN_BUSY);
13869 			} else {
13870 				fcp_update_lun_state(plun,
13871 				    FCP_RESET, FCP_LUN_BUSY);
13872 			}
13873 			mutex_exit(&ptgt->tgt_mutex);
13874 
13875 			mutex_exit(&pptr->port_mutex);
13876 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13877 			mutex_enter(&pptr->port_mutex);
13878 		}
13879 	}
13880 }
13881 
13882 
13883 static void
13884 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13885     struct fcp_lun *rlun, int tgt_cnt)
13886 {
13887 	int			rval;
13888 	struct fcp_lun	*tlun, *nlun;
13889 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13890 	    *cmd = NULL, *head = NULL,
13891 	    *tail = NULL;
13892 
13893 	mutex_enter(&pptr->port_pkt_mutex);
13894 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13895 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13896 		struct fcp_tgt *ptgt = plun->lun_tgt;
13897 
13898 		ncmd = cmd->cmd_next;
13899 
13900 		if (ptgt != ttgt && plun != rlun) {
13901 			pcmd = cmd;
13902 			continue;
13903 		}
13904 
13905 		if (pcmd != NULL) {
13906 			ASSERT(pptr->port_pkt_head != cmd);
13907 			pcmd->cmd_next = ncmd;
13908 		} else {
13909 			ASSERT(cmd == pptr->port_pkt_head);
13910 			pptr->port_pkt_head = ncmd;
13911 		}
13912 		if (pptr->port_pkt_tail == cmd) {
13913 			ASSERT(cmd->cmd_next == NULL);
13914 			pptr->port_pkt_tail = pcmd;
13915 			if (pcmd != NULL) {
13916 				pcmd->cmd_next = NULL;
13917 			}
13918 		}
13919 
13920 		if (head == NULL) {
13921 			head = tail = cmd;
13922 		} else {
13923 			ASSERT(tail != NULL);
13924 			tail->cmd_next = cmd;
13925 			tail = cmd;
13926 		}
13927 		cmd->cmd_next = NULL;
13928 	}
13929 	mutex_exit(&pptr->port_pkt_mutex);
13930 
13931 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13932 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13933 
13934 		ncmd = cmd->cmd_next;
13935 		ASSERT(pkt != NULL);
13936 
13937 		mutex_enter(&pptr->port_mutex);
13938 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13939 			mutex_exit(&pptr->port_mutex);
13940 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13941 			pkt->pkt_reason = CMD_RESET;
13942 			pkt->pkt_statistics |= STAT_DEV_RESET;
13943 			cmd->cmd_state = FCP_PKT_IDLE;
13944 			fcp_post_callback(cmd);
13945 		} else {
13946 			mutex_exit(&pptr->port_mutex);
13947 		}
13948 	}
13949 
13950 	/*
13951 	 * If the FCA will return all the commands in its queue then our
13952 	 * work is easy, just return.
13953 	 */
13954 
13955 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13956 		return;
13957 	}
13958 
13959 	/*
13960 	 * For RESET_LUN get hold of target pointer
13961 	 */
13962 	if (ttgt == NULL) {
13963 		ASSERT(rlun != NULL);
13964 
13965 		ttgt = rlun->lun_tgt;
13966 
13967 		ASSERT(ttgt != NULL);
13968 	}
13969 
13970 	/*
13971 	 * There are some severe race conditions here.
13972 	 * While we are trying to abort the pkt, it might be completing
13973 	 * so mark it aborted and if the abort does not succeed then
13974 	 * handle it in the watch thread.
13975 	 */
13976 	mutex_enter(&ttgt->tgt_mutex);
13977 	nlun = ttgt->tgt_lun;
13978 	mutex_exit(&ttgt->tgt_mutex);
13979 	while ((tlun = nlun) != NULL) {
13980 		int restart = 0;
13981 		if (rlun && rlun != tlun) {
13982 			mutex_enter(&ttgt->tgt_mutex);
13983 			nlun = tlun->lun_next;
13984 			mutex_exit(&ttgt->tgt_mutex);
13985 			continue;
13986 		}
13987 		mutex_enter(&tlun->lun_mutex);
13988 		cmd = tlun->lun_pkt_head;
13989 		while (cmd != NULL) {
13990 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13991 				struct scsi_pkt *pkt;
13992 
13993 				restart = 1;
13994 				cmd->cmd_state = FCP_PKT_ABORTING;
13995 				mutex_exit(&tlun->lun_mutex);
13996 				rval = fc_ulp_abort(pptr->port_fp_handle,
13997 				    cmd->cmd_fp_pkt, KM_SLEEP);
13998 				if (rval == FC_SUCCESS) {
13999 					pkt = cmd->cmd_pkt;
14000 					pkt->pkt_reason = CMD_RESET;
14001 					pkt->pkt_statistics |= STAT_DEV_RESET;
14002 					cmd->cmd_state = FCP_PKT_IDLE;
14003 					fcp_post_callback(cmd);
14004 				} else {
14005 					caddr_t msg;
14006 
14007 					(void) fc_ulp_error(rval, &msg);
14008 
14009 					/*
14010 					 * This part is tricky. The abort
14011 					 * failed and now the command could
14012 					 * be completing.  The cmd_state ==
14013 					 * FCP_PKT_ABORTING should save
14014 					 * us in fcp_cmd_callback. If we
14015 					 * are already aborting ignore the
14016 					 * command in fcp_cmd_callback.
14017 					 * Here we leave this packet for 20
14018 					 * sec to be aborted in the
14019 					 * fcp_watch thread.
14020 					 */
14021 					fcp_log(CE_WARN, pptr->port_dip,
14022 					    "!Abort failed after reset %s",
14023 					    msg);
14024 
14025 					cmd->cmd_timeout =
14026 					    fcp_watchdog_time +
14027 					    cmd->cmd_pkt->pkt_time +
14028 					    FCP_FAILED_DELAY;
14029 
14030 					cmd->cmd_fp_pkt->pkt_timeout =
14031 					    FCP_INVALID_TIMEOUT;
14032 					/*
14033 					 * This is a hack, cmd is put in the
14034 					 * overflow queue so that it can be
14035 					 * timed out finally
14036 					 */
14037 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
14038 
14039 					mutex_enter(&pptr->port_pkt_mutex);
14040 					if (pptr->port_pkt_head) {
14041 						ASSERT(pptr->port_pkt_tail
14042 						    != NULL);
14043 						pptr->port_pkt_tail->cmd_next
14044 						    = cmd;
14045 						pptr->port_pkt_tail = cmd;
14046 					} else {
14047 						ASSERT(pptr->port_pkt_tail
14048 						    == NULL);
14049 						pptr->port_pkt_head =
14050 						    pptr->port_pkt_tail
14051 						    = cmd;
14052 					}
14053 					cmd->cmd_next = NULL;
14054 					mutex_exit(&pptr->port_pkt_mutex);
14055 				}
14056 				mutex_enter(&tlun->lun_mutex);
14057 				cmd = tlun->lun_pkt_head;
14058 			} else {
14059 				cmd = cmd->cmd_forw;
14060 			}
14061 		}
14062 		mutex_exit(&tlun->lun_mutex);
14063 
14064 		mutex_enter(&ttgt->tgt_mutex);
14065 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14066 		mutex_exit(&ttgt->tgt_mutex);
14067 
14068 		mutex_enter(&pptr->port_mutex);
14069 		if (tgt_cnt != ttgt->tgt_change_cnt) {
14070 			mutex_exit(&pptr->port_mutex);
14071 			return;
14072 		} else {
14073 			mutex_exit(&pptr->port_mutex);
14074 		}
14075 	}
14076 }
14077 
14078 
14079 /*
14080  * unlink the soft state, returning the soft state found (if any)
14081  *
14082  * acquires and releases the global mutex
14083  */
14084 struct fcp_port *
14085 fcp_soft_state_unlink(struct fcp_port *pptr)
14086 {
14087 	struct fcp_port	*hptr;		/* ptr index */
14088 	struct fcp_port	*tptr;		/* prev hptr */
14089 
14090 	mutex_enter(&fcp_global_mutex);
14091 	for (hptr = fcp_port_head, tptr = NULL;
14092 	    hptr != NULL;
14093 	    tptr = hptr, hptr = hptr->port_next) {
14094 		if (hptr == pptr) {
14095 			/* we found a match -- remove this item */
14096 			if (tptr == NULL) {
14097 				/* we're at the head of the list */
14098 				fcp_port_head = hptr->port_next;
14099 			} else {
14100 				tptr->port_next = hptr->port_next;
14101 			}
14102 			break;			/* success */
14103 		}
14104 	}
14105 	if (fcp_port_head == NULL) {
14106 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
14107 	}
14108 	mutex_exit(&fcp_global_mutex);
14109 	return (hptr);
14110 }
14111 
14112 
14113 /*
14114  * called by fcp_scsi_hba_tgt_init to find a LUN given a
14115  * WWN and a LUN number
14116  */
14117 /* ARGSUSED */
14118 static struct fcp_lun *
14119 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14120 {
14121 	int hash;
14122 	struct fcp_tgt *ptgt;
14123 	struct fcp_lun *plun;
14124 
14125 	ASSERT(mutex_owned(&pptr->port_mutex));
14126 
14127 	hash = FCP_HASH(wwn);
14128 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14129 	    ptgt = ptgt->tgt_next) {
14130 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14131 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
14132 			mutex_enter(&ptgt->tgt_mutex);
14133 			for (plun = ptgt->tgt_lun;
14134 			    plun != NULL;
14135 			    plun = plun->lun_next) {
14136 				if (plun->lun_num == lun) {
14137 					mutex_exit(&ptgt->tgt_mutex);
14138 					return (plun);
14139 				}
14140 			}
14141 			mutex_exit(&ptgt->tgt_mutex);
14142 			return (NULL);
14143 		}
14144 	}
14145 	return (NULL);
14146 }
14147 
14148 /*
14149  *     Function: fcp_prepare_pkt
14150  *
14151  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
14152  *		 for fcp_start(). It binds the data or partially maps it.
14153  *		 Builds the FCP header and starts the initialization of the
14154  *		 Fibre Channel header.
14155  *
14156  *     Argument: *pptr		FCP port.
14157  *		 *cmd		FCP packet.
14158  *		 *plun		LUN the command will be sent to.
14159  *
14160  *	Context: User, Kernel and Interrupt context.
14161  */
14162 static void
14163 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14164     struct fcp_lun *plun)
14165 {
14166 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
14167 	struct fcp_tgt		*ptgt = plun->lun_tgt;
14168 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
14169 
14170 	ASSERT(cmd->cmd_pkt->pkt_comp ||
14171 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14172 
14173 	if (cmd->cmd_pkt->pkt_numcookies) {
14174 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14175 			fcmd->fcp_cntl.cntl_read_data = 1;
14176 			fcmd->fcp_cntl.cntl_write_data = 0;
14177 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14178 		} else {
14179 			fcmd->fcp_cntl.cntl_read_data = 0;
14180 			fcmd->fcp_cntl.cntl_write_data = 1;
14181 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14182 		}
14183 
14184 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14185 
14186 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14187 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14188 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14189 
14190 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14191 
14192 		/* FCA needs pkt_datalen to be set */
14193 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14194 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14195 	} else {
14196 		fcmd->fcp_cntl.cntl_read_data = 0;
14197 		fcmd->fcp_cntl.cntl_write_data = 0;
14198 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14199 		fpkt->pkt_datalen = 0;
14200 		fcmd->fcp_data_len = 0;
14201 	}
14202 
14203 	/* set up the Tagged Queuing type */
14204 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14205 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14206 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14207 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14208 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14209 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14210 	} else {
14211 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14212 	}
14213 
14214 	fcmd->fcp_ent_addr = plun->lun_addr;
14215 
14216 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14217 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14218 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14219 	} else {
14220 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14221 	}
14222 
14223 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14224 	cmd->cmd_pkt->pkt_state = 0;
14225 	cmd->cmd_pkt->pkt_statistics = 0;
14226 	cmd->cmd_pkt->pkt_resid = 0;
14227 
14228 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14229 
14230 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14231 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14232 		fpkt->pkt_comp = NULL;
14233 	} else {
14234 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14235 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14236 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14237 		}
14238 		fpkt->pkt_comp = fcp_cmd_callback;
14239 	}
14240 
14241 	mutex_enter(&pptr->port_mutex);
14242 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14243 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14244 	}
14245 	mutex_exit(&pptr->port_mutex);
14246 
14247 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14248 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14249 
14250 	/*
14251 	 * Save a few kernel cycles here
14252 	 */
14253 #ifndef	__lock_lint
14254 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14255 #endif /* __lock_lint */
14256 }
14257 
14258 static void
14259 fcp_post_callback(struct fcp_pkt *cmd)
14260 {
14261 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14262 }
14263 
14264 
14265 /*
14266  * called to do polled I/O by fcp_start()
14267  *
14268  * return a transport status value, i.e. TRAN_ACCECPT for success
14269  */
14270 static int
14271 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14272 {
14273 	int	rval;
14274 
14275 #ifdef	DEBUG
14276 	mutex_enter(&pptr->port_pkt_mutex);
14277 	pptr->port_npkts++;
14278 	mutex_exit(&pptr->port_pkt_mutex);
14279 #endif /* DEBUG */
14280 
14281 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14282 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14283 	} else {
14284 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14285 	}
14286 
14287 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14288 
14289 	cmd->cmd_state = FCP_PKT_ISSUED;
14290 
14291 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14292 
14293 #ifdef	DEBUG
14294 	mutex_enter(&pptr->port_pkt_mutex);
14295 	pptr->port_npkts--;
14296 	mutex_exit(&pptr->port_pkt_mutex);
14297 #endif /* DEBUG */
14298 
14299 	cmd->cmd_state = FCP_PKT_IDLE;
14300 
14301 	switch (rval) {
14302 	case FC_SUCCESS:
14303 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14304 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14305 			rval = TRAN_ACCEPT;
14306 		} else {
14307 			rval = TRAN_FATAL_ERROR;
14308 		}
14309 		break;
14310 
14311 	case FC_TRAN_BUSY:
14312 		rval = TRAN_BUSY;
14313 		cmd->cmd_pkt->pkt_resid = 0;
14314 		break;
14315 
14316 	case FC_BADPACKET:
14317 		rval = TRAN_BADPKT;
14318 		break;
14319 
14320 	default:
14321 		rval = TRAN_FATAL_ERROR;
14322 		break;
14323 	}
14324 
14325 	return (rval);
14326 }
14327 
14328 
14329 /*
14330  * called by some of the following transport-called routines to convert
14331  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14332  */
14333 static struct fcp_port *
14334 fcp_dip2port(dev_info_t *dip)
14335 {
14336 	int	instance;
14337 
14338 	instance = ddi_get_instance(dip);
14339 	return (ddi_get_soft_state(fcp_softstate, instance));
14340 }
14341 
14342 
14343 /*
14344  * called internally to return a LUN given a dip
14345  */
14346 struct fcp_lun *
14347 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14348 {
14349 	struct fcp_tgt *ptgt;
14350 	struct fcp_lun *plun;
14351 	int i;
14352 
14353 
14354 	ASSERT(mutex_owned(&pptr->port_mutex));
14355 
14356 	for (i = 0; i < FCP_NUM_HASH; i++) {
14357 		for (ptgt = pptr->port_tgt_hash_table[i];
14358 		    ptgt != NULL;
14359 		    ptgt = ptgt->tgt_next) {
14360 			mutex_enter(&ptgt->tgt_mutex);
14361 			for (plun = ptgt->tgt_lun; plun != NULL;
14362 			    plun = plun->lun_next) {
14363 				mutex_enter(&plun->lun_mutex);
14364 				if (plun->lun_cip == cip) {
14365 					mutex_exit(&plun->lun_mutex);
14366 					mutex_exit(&ptgt->tgt_mutex);
14367 					return (plun); /* match found */
14368 				}
14369 				mutex_exit(&plun->lun_mutex);
14370 			}
14371 			mutex_exit(&ptgt->tgt_mutex);
14372 		}
14373 	}
14374 	return (NULL);				/* no LUN found */
14375 }
14376 
14377 /*
14378  * pass an element to the hotplug list, kick the hotplug thread
14379  * and wait for the element to get processed by the hotplug thread.
14380  * on return the element is freed.
14381  *
14382  * return zero success and non-zero on failure
14383  *
14384  * acquires/releases the target mutex
14385  *
14386  */
14387 static int
14388 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14389     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14390 {
14391 	struct fcp_hp_elem	*elem;
14392 	int			rval;
14393 
14394 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14395 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14396 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14397 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14398 		fcp_log(CE_CONT, pptr->port_dip,
14399 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14400 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14401 		return (NDI_FAILURE);
14402 	}
14403 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14404 	mutex_enter(&elem->mutex);
14405 	if (elem->wait) {
14406 		while (elem->wait) {
14407 			cv_wait(&elem->cv, &elem->mutex);
14408 		}
14409 	}
14410 	rval = (elem->result);
14411 	mutex_exit(&elem->mutex);
14412 	mutex_destroy(&elem->mutex);
14413 	cv_destroy(&elem->cv);
14414 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14415 	return (rval);
14416 }
14417 
14418 /*
14419  * pass an element to the hotplug list, and then
14420  * kick the hotplug thread
14421  *
14422  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14423  *
14424  * acquires/releases the hotplug mutex
14425  *
14426  * called with the target mutex owned
14427  *
14428  * memory acquired in NOSLEEP mode
14429  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14430  *	 for the hp daemon to process the request and is responsible for
14431  *	 freeing the element
14432  */
14433 static struct fcp_hp_elem *
14434 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14435     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14436 {
14437 	struct fcp_hp_elem	*elem;
14438 	dev_info_t *pdip;
14439 
14440 	ASSERT(pptr != NULL);
14441 	ASSERT(plun != NULL);
14442 	ASSERT(plun->lun_tgt != NULL);
14443 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14444 
14445 	/* create space for a hotplug element */
14446 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14447 	    == NULL) {
14448 		fcp_log(CE_WARN, NULL,
14449 		    "!can't allocate memory for hotplug element");
14450 		return (NULL);
14451 	}
14452 
14453 	/* fill in hotplug element */
14454 	elem->port = pptr;
14455 	elem->lun = plun;
14456 	elem->cip = cip;
14457 	elem->old_lun_mpxio = plun->lun_mpxio;
14458 	elem->what = what;
14459 	elem->flags = flags;
14460 	elem->link_cnt = link_cnt;
14461 	elem->tgt_cnt = tgt_cnt;
14462 	elem->wait = wait;
14463 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14464 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14465 
14466 	/* schedule the hotplug task */
14467 	pdip = pptr->port_dip;
14468 	mutex_enter(&plun->lun_mutex);
14469 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14470 		plun->lun_event_count++;
14471 		elem->event_cnt = plun->lun_event_count;
14472 	}
14473 	mutex_exit(&plun->lun_mutex);
14474 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14475 	    (void *)elem, KM_NOSLEEP) == NULL) {
14476 		mutex_enter(&plun->lun_mutex);
14477 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14478 			plun->lun_event_count--;
14479 		}
14480 		mutex_exit(&plun->lun_mutex);
14481 		kmem_free(elem, sizeof (*elem));
14482 		return (0);
14483 	}
14484 
14485 	return (elem);
14486 }
14487 
14488 
14489 static void
14490 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14491 {
14492 	int			rval;
14493 	struct scsi_address	*ap;
14494 	struct fcp_lun	*plun;
14495 	struct fcp_tgt	*ptgt;
14496 	fc_packet_t	*fpkt;
14497 
14498 	ap = &cmd->cmd_pkt->pkt_address;
14499 	plun = ADDR2LUN(ap);
14500 	ptgt = plun->lun_tgt;
14501 
14502 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14503 
14504 	cmd->cmd_state = FCP_PKT_IDLE;
14505 
14506 	mutex_enter(&pptr->port_mutex);
14507 	mutex_enter(&ptgt->tgt_mutex);
14508 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14509 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14510 		fc_ulp_rscn_info_t *rscnp;
14511 
14512 		cmd->cmd_state = FCP_PKT_ISSUED;
14513 
14514 		/*
14515 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14516 		 * originally NULL, hence we try to set it to the pd pointed
14517 		 * to by the SCSI device we're trying to get to.
14518 		 */
14519 
14520 		fpkt = cmd->cmd_fp_pkt;
14521 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14522 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14523 			/*
14524 			 * We need to notify the transport that we now have a
14525 			 * reference to the remote port handle.
14526 			 */
14527 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14528 		}
14529 
14530 		mutex_exit(&ptgt->tgt_mutex);
14531 		mutex_exit(&pptr->port_mutex);
14532 
14533 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14534 
14535 		/* prepare the packet */
14536 
14537 		fcp_prepare_pkt(pptr, cmd, plun);
14538 
14539 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14540 		    pkt_ulp_rscn_infop;
14541 
14542 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14543 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14544 
14545 		if (rscnp != NULL) {
14546 			rscnp->ulp_rscn_count =
14547 			    fc_ulp_get_rscn_count(pptr->
14548 			    port_fp_handle);
14549 		}
14550 
14551 		rval = fcp_transport(pptr->port_fp_handle,
14552 		    cmd->cmd_fp_pkt, 0);
14553 
14554 		if (rval == FC_SUCCESS) {
14555 			return;
14556 		}
14557 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14558 	} else {
14559 		mutex_exit(&ptgt->tgt_mutex);
14560 		mutex_exit(&pptr->port_mutex);
14561 	}
14562 
14563 	fcp_queue_pkt(pptr, cmd);
14564 }
14565 
14566 
14567 static void
14568 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14569 {
14570 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14571 
14572 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14573 	cmd->cmd_state = FCP_PKT_IDLE;
14574 
14575 	cmd->cmd_pkt->pkt_reason = reason;
14576 	cmd->cmd_pkt->pkt_state = 0;
14577 	cmd->cmd_pkt->pkt_statistics = statistics;
14578 
14579 	fcp_post_callback(cmd);
14580 }
14581 
14582 /*
14583  *     Function: fcp_queue_pkt
14584  *
14585  *  Description: This function queues the packet passed by the caller into
14586  *		 the list of packets of the FCP port.
14587  *
14588  *     Argument: *pptr		FCP port.
14589  *		 *cmd		FCP packet to queue.
14590  *
14591  * Return Value: None
14592  *
14593  *	Context: User, Kernel and Interrupt context.
14594  */
14595 static void
14596 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14597 {
14598 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14599 
14600 	mutex_enter(&pptr->port_pkt_mutex);
14601 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14602 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14603 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14604 
14605 	/*
14606 	 * zero pkt_time means hang around for ever
14607 	 */
14608 	if (cmd->cmd_pkt->pkt_time) {
14609 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14610 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14611 		} else {
14612 			/*
14613 			 * Indicate the watch thread to fail the
14614 			 * command by setting it to highest value
14615 			 */
14616 			cmd->cmd_timeout = fcp_watchdog_time;
14617 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14618 		}
14619 	}
14620 
14621 	if (pptr->port_pkt_head) {
14622 		ASSERT(pptr->port_pkt_tail != NULL);
14623 
14624 		pptr->port_pkt_tail->cmd_next = cmd;
14625 		pptr->port_pkt_tail = cmd;
14626 	} else {
14627 		ASSERT(pptr->port_pkt_tail == NULL);
14628 
14629 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14630 	}
14631 	cmd->cmd_next = NULL;
14632 	mutex_exit(&pptr->port_pkt_mutex);
14633 }
14634 
14635 /*
14636  *     Function: fcp_update_targets
14637  *
14638  *  Description: This function applies the specified change of state to all
14639  *		 the targets listed.  The operation applied is 'set'.
14640  *
14641  *     Argument: *pptr		FCP port.
14642  *		 *dev_list	Array of fc_portmap_t structures.
14643  *		 count		Length of dev_list.
14644  *		 state		State bits to update.
14645  *		 cause		Reason for the update.
14646  *
14647  * Return Value: None
14648  *
14649  *	Context: User, Kernel and Interrupt context.
14650  *		 The mutex pptr->port_mutex must be held.
14651  */
14652 static void
14653 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14654     uint32_t count, uint32_t state, int cause)
14655 {
14656 	fc_portmap_t		*map_entry;
14657 	struct fcp_tgt	*ptgt;
14658 
14659 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14660 
14661 	while (count--) {
14662 		map_entry = &(dev_list[count]);
14663 		ptgt = fcp_lookup_target(pptr,
14664 		    (uchar_t *)&(map_entry->map_pwwn));
14665 		if (ptgt == NULL) {
14666 			continue;
14667 		}
14668 
14669 		mutex_enter(&ptgt->tgt_mutex);
14670 		ptgt->tgt_trace = 0;
14671 		ptgt->tgt_change_cnt++;
14672 		ptgt->tgt_statec_cause = cause;
14673 		ptgt->tgt_tmp_cnt = 1;
14674 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14675 		mutex_exit(&ptgt->tgt_mutex);
14676 	}
14677 }
14678 
14679 static int
14680 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681     int lcount, int tcount, int cause)
14682 {
14683 	int rval;
14684 
14685 	mutex_enter(&pptr->port_mutex);
14686 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14687 	mutex_exit(&pptr->port_mutex);
14688 
14689 	return (rval);
14690 }
14691 
14692 
14693 static int
14694 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14695     int lcount, int tcount, int cause)
14696 {
14697 	int	finish_init = 0;
14698 	int	finish_tgt = 0;
14699 	int	do_finish_init = 0;
14700 	int	rval = FCP_NO_CHANGE;
14701 
14702 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14703 	    cause == FCP_CAUSE_LINK_DOWN) {
14704 		do_finish_init = 1;
14705 	}
14706 
14707 	if (ptgt != NULL) {
14708 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14709 		    FCP_BUF_LEVEL_2, 0,
14710 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14711 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14712 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14713 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14714 		    ptgt->tgt_d_id, ptgt->tgt_done);
14715 
14716 		mutex_enter(&ptgt->tgt_mutex);
14717 
14718 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14719 			rval = FCP_DEV_CHANGE;
14720 			if (do_finish_init && ptgt->tgt_done == 0) {
14721 				ptgt->tgt_done++;
14722 				finish_init = 1;
14723 			}
14724 		} else {
14725 			if (--ptgt->tgt_tmp_cnt <= 0) {
14726 				ptgt->tgt_tmp_cnt = 0;
14727 				finish_tgt = 1;
14728 
14729 				if (do_finish_init) {
14730 					finish_init = 1;
14731 				}
14732 			}
14733 		}
14734 		mutex_exit(&ptgt->tgt_mutex);
14735 	} else {
14736 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14737 		    FCP_BUF_LEVEL_2, 0,
14738 		    "Call Finish Init for NO target");
14739 
14740 		if (do_finish_init) {
14741 			finish_init = 1;
14742 		}
14743 	}
14744 
14745 	if (finish_tgt) {
14746 		ASSERT(ptgt != NULL);
14747 
14748 		mutex_enter(&ptgt->tgt_mutex);
14749 #ifdef	DEBUG
14750 		bzero(ptgt->tgt_tmp_cnt_stack,
14751 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14752 
14753 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14754 		    FCP_STACK_DEPTH);
14755 #endif /* DEBUG */
14756 		mutex_exit(&ptgt->tgt_mutex);
14757 
14758 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14759 	}
14760 
14761 	if (finish_init && lcount == pptr->port_link_cnt) {
14762 		ASSERT(pptr->port_tmp_cnt > 0);
14763 		if (--pptr->port_tmp_cnt == 0) {
14764 			fcp_finish_init(pptr);
14765 		}
14766 	} else if (lcount != pptr->port_link_cnt) {
14767 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14768 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14769 		    "fcp_call_finish_init_held,1: state change occured"
14770 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14771 	}
14772 
14773 	return (rval);
14774 }
14775 
14776 static void
14777 fcp_reconfigure_luns(void * tgt_handle)
14778 {
14779 	uint32_t		dev_cnt;
14780 	fc_portmap_t		*devlist;
14781 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14782 	struct fcp_port		*pptr = ptgt->tgt_port;
14783 
14784 	/*
14785 	 * If the timer that fires this off got canceled too late, the
14786 	 * target could have been destroyed.
14787 	 */
14788 
14789 	if (ptgt->tgt_tid == NULL) {
14790 		return;
14791 	}
14792 
14793 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14794 	if (devlist == NULL) {
14795 		fcp_log(CE_WARN, pptr->port_dip,
14796 		    "!fcp%d: failed to allocate for portmap",
14797 		    pptr->port_instance);
14798 		return;
14799 	}
14800 
14801 	dev_cnt = 1;
14802 	devlist->map_pd = ptgt->tgt_pd_handle;
14803 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14804 	devlist->map_did.port_id = ptgt->tgt_d_id;
14805 
14806 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14807 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14808 
14809 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14810 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14811 	devlist->map_flags = 0;
14812 
14813 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14814 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14815 
14816 	/*
14817 	 * Clear the tgt_tid after no more references to
14818 	 * the fcp_tgt
14819 	 */
14820 	mutex_enter(&ptgt->tgt_mutex);
14821 	ptgt->tgt_tid = NULL;
14822 	mutex_exit(&ptgt->tgt_mutex);
14823 
14824 	kmem_free(devlist, sizeof (*devlist));
14825 }
14826 
14827 
14828 static void
14829 fcp_free_targets(struct fcp_port *pptr)
14830 {
14831 	int			i;
14832 	struct fcp_tgt	*ptgt;
14833 
14834 	mutex_enter(&pptr->port_mutex);
14835 	for (i = 0; i < FCP_NUM_HASH; i++) {
14836 		ptgt = pptr->port_tgt_hash_table[i];
14837 		while (ptgt != NULL) {
14838 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14839 
14840 			fcp_free_target(ptgt);
14841 			ptgt = next_tgt;
14842 		}
14843 	}
14844 	mutex_exit(&pptr->port_mutex);
14845 }
14846 
14847 
14848 static void
14849 fcp_free_target(struct fcp_tgt *ptgt)
14850 {
14851 	struct fcp_lun	*plun;
14852 	timeout_id_t		tid;
14853 
14854 	mutex_enter(&ptgt->tgt_mutex);
14855 	tid = ptgt->tgt_tid;
14856 
14857 	/*
14858 	 * Cancel any pending timeouts for this target.
14859 	 */
14860 
14861 	if (tid != NULL) {
14862 		/*
14863 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14864 		 * If tgt_tid is NULL, the callback will simply return.
14865 		 */
14866 		ptgt->tgt_tid = NULL;
14867 		mutex_exit(&ptgt->tgt_mutex);
14868 		(void) untimeout(tid);
14869 		mutex_enter(&ptgt->tgt_mutex);
14870 	}
14871 
14872 	plun = ptgt->tgt_lun;
14873 	while (plun != NULL) {
14874 		struct fcp_lun *next_lun = plun->lun_next;
14875 
14876 		fcp_dealloc_lun(plun);
14877 		plun = next_lun;
14878 	}
14879 
14880 	mutex_exit(&ptgt->tgt_mutex);
14881 	fcp_dealloc_tgt(ptgt);
14882 }
14883 
14884 /*
14885  *     Function: fcp_is_retryable
14886  *
14887  *  Description: Indicates if the internal packet is retryable.
14888  *
14889  *     Argument: *icmd		FCP internal packet.
14890  *
14891  * Return Value: 0	Not retryable
14892  *		 1	Retryable
14893  *
14894  *	Context: User, Kernel and Interrupt context
14895  */
14896 static int
14897 fcp_is_retryable(struct fcp_ipkt *icmd)
14898 {
14899 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14900 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14901 		return (0);
14902 	}
14903 
14904 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14905 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14906 }
14907 
14908 /*
14909  *     Function: fcp_create_on_demand
14910  *
14911  *     Argument: *pptr		FCP port.
14912  *		 *pwwn		Port WWN.
14913  *
14914  * Return Value: 0	Success
14915  *		 EIO
14916  *		 ENOMEM
14917  *		 EBUSY
14918  *		 EINVAL
14919  *
14920  *	Context: User and Kernel context
14921  */
14922 static int
14923 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14924 {
14925 	int			wait_ms;
14926 	int			tcount;
14927 	int			lcount;
14928 	int			ret;
14929 	int			error;
14930 	int			rval = EIO;
14931 	int			ntries;
14932 	fc_portmap_t		*devlist;
14933 	opaque_t		pd;
14934 	struct fcp_lun		*plun;
14935 	struct fcp_tgt		*ptgt;
14936 	int			old_manual = 0;
14937 
14938 	/* Allocates the fc_portmap_t structure. */
14939 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14940 
14941 	/*
14942 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14943 	 * in the commented statement below:
14944 	 *
14945 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14946 	 *
14947 	 * Below, the deadline for the discovery process is set.
14948 	 */
14949 	mutex_enter(&pptr->port_mutex);
14950 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14951 	mutex_exit(&pptr->port_mutex);
14952 
14953 	/*
14954 	 * We try to find the remote port based on the WWN provided by the
14955 	 * caller.  We actually ask fp/fctl if it has it.
14956 	 */
14957 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14958 	    (la_wwn_t *)pwwn, &error, 1);
14959 
14960 	if (pd == NULL) {
14961 		kmem_free(devlist, sizeof (*devlist));
14962 		return (rval);
14963 	}
14964 
14965 	/*
14966 	 * The remote port was found.  We ask fp/fctl to update our
14967 	 * fc_portmap_t structure.
14968 	 */
14969 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14970 	    (la_wwn_t *)pwwn, devlist);
14971 	if (ret != FC_SUCCESS) {
14972 		kmem_free(devlist, sizeof (*devlist));
14973 		return (rval);
14974 	}
14975 
14976 	/*
14977 	 * The map flag field is set to indicates that the creation is being
14978 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14979 	 */
14980 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14981 
14982 	mutex_enter(&pptr->port_mutex);
14983 
14984 	/*
14985 	 * We check to see if fcp already has a target that describes the
14986 	 * device being created.  If not it is created.
14987 	 */
14988 	ptgt = fcp_lookup_target(pptr, pwwn);
14989 	if (ptgt == NULL) {
14990 		lcount = pptr->port_link_cnt;
14991 		mutex_exit(&pptr->port_mutex);
14992 
14993 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14994 		if (ptgt == NULL) {
14995 			fcp_log(CE_WARN, pptr->port_dip,
14996 			    "!FC target allocation failed");
14997 			return (ENOMEM);
14998 		}
14999 
15000 		mutex_enter(&pptr->port_mutex);
15001 	}
15002 
15003 	mutex_enter(&ptgt->tgt_mutex);
15004 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
15005 	ptgt->tgt_tmp_cnt = 1;
15006 	ptgt->tgt_device_created = 0;
15007 	/*
15008 	 * If fabric and auto config is set but the target was
15009 	 * manually unconfigured then reset to the manual_config_only to
15010 	 * 0 so the device will get configured.
15011 	 */
15012 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15013 	    fcp_enable_auto_configuration &&
15014 	    ptgt->tgt_manual_config_only == 1) {
15015 		old_manual = 1;
15016 		ptgt->tgt_manual_config_only = 0;
15017 	}
15018 	mutex_exit(&ptgt->tgt_mutex);
15019 
15020 	fcp_update_targets(pptr, devlist, 1,
15021 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15022 
15023 	lcount = pptr->port_link_cnt;
15024 	tcount = ptgt->tgt_change_cnt;
15025 
15026 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15027 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15028 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15029 		    fcp_enable_auto_configuration && old_manual) {
15030 			mutex_enter(&ptgt->tgt_mutex);
15031 			ptgt->tgt_manual_config_only = 1;
15032 			mutex_exit(&ptgt->tgt_mutex);
15033 		}
15034 
15035 		if (pptr->port_link_cnt != lcount ||
15036 		    ptgt->tgt_change_cnt != tcount) {
15037 			rval = EBUSY;
15038 		}
15039 		mutex_exit(&pptr->port_mutex);
15040 
15041 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15042 		    FCP_BUF_LEVEL_3, 0,
15043 		    "fcp_create_on_demand: mapflags ptgt=%x, "
15044 		    "lcount=%x::port_link_cnt=%x, "
15045 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
15046 		    ptgt, lcount, pptr->port_link_cnt,
15047 		    tcount, ptgt->tgt_change_cnt, rval);
15048 		return (rval);
15049 	}
15050 
15051 	/*
15052 	 * Due to lack of synchronization mechanisms, we perform
15053 	 * periodic monitoring of our request; Because requests
15054 	 * get dropped when another one supercedes (either because
15055 	 * of a link change or a target change), it is difficult to
15056 	 * provide a clean synchronization mechanism (such as a
15057 	 * semaphore or a conditional variable) without exhaustively
15058 	 * rewriting the mainline discovery code of this driver.
15059 	 */
15060 	wait_ms = 500;
15061 
15062 	ntries = fcp_max_target_retries;
15063 
15064 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15065 	    FCP_BUF_LEVEL_3, 0,
15066 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15067 	    "lcount=%x::port_link_cnt=%x, "
15068 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15069 	    "tgt_tmp_cnt =%x",
15070 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15071 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15072 	    ptgt->tgt_tmp_cnt);
15073 
15074 	mutex_enter(&ptgt->tgt_mutex);
15075 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15076 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15077 		mutex_exit(&ptgt->tgt_mutex);
15078 		mutex_exit(&pptr->port_mutex);
15079 
15080 		delay(drv_usectohz(wait_ms * 1000));
15081 
15082 		mutex_enter(&pptr->port_mutex);
15083 		mutex_enter(&ptgt->tgt_mutex);
15084 	}
15085 
15086 
15087 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15088 		rval = EBUSY;
15089 	} else {
15090 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15091 		    FCP_TGT_NODE_PRESENT) {
15092 			rval = 0;
15093 		}
15094 	}
15095 
15096 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15097 	    FCP_BUF_LEVEL_3, 0,
15098 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15099 	    "lcount=%x::port_link_cnt=%x, "
15100 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15101 	    "tgt_tmp_cnt =%x",
15102 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15103 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15104 	    ptgt->tgt_tmp_cnt);
15105 
15106 	if (rval) {
15107 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15108 		    fcp_enable_auto_configuration && old_manual) {
15109 			ptgt->tgt_manual_config_only = 1;
15110 		}
15111 		mutex_exit(&ptgt->tgt_mutex);
15112 		mutex_exit(&pptr->port_mutex);
15113 		kmem_free(devlist, sizeof (*devlist));
15114 
15115 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15116 		    FCP_BUF_LEVEL_3, 0,
15117 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15118 		    "lcount=%x::port_link_cnt=%x, "
15119 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15120 		    "tgt_device_created=%x, tgt D_ID=%x",
15121 		    ntries, ptgt, lcount, pptr->port_link_cnt,
15122 		    tcount, ptgt->tgt_change_cnt, rval,
15123 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
15124 		return (rval);
15125 	}
15126 
15127 	if ((plun = ptgt->tgt_lun) != NULL) {
15128 		tcount = plun->lun_tgt->tgt_change_cnt;
15129 	} else {
15130 		rval = EINVAL;
15131 	}
15132 	lcount = pptr->port_link_cnt;
15133 
15134 	/*
15135 	 * Configuring the target with no LUNs will fail. We
15136 	 * should reset the node state so that it is not
15137 	 * automatically configured when the LUNs are added
15138 	 * to this target.
15139 	 */
15140 	if (ptgt->tgt_lun_cnt == 0) {
15141 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15142 	}
15143 	mutex_exit(&ptgt->tgt_mutex);
15144 	mutex_exit(&pptr->port_mutex);
15145 
15146 	while (plun) {
15147 		child_info_t	*cip;
15148 
15149 		mutex_enter(&plun->lun_mutex);
15150 		cip = plun->lun_cip;
15151 		mutex_exit(&plun->lun_mutex);
15152 
15153 		mutex_enter(&ptgt->tgt_mutex);
15154 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15155 			mutex_exit(&ptgt->tgt_mutex);
15156 
15157 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15158 			    FCP_ONLINE, lcount, tcount,
15159 			    NDI_ONLINE_ATTACH);
15160 			if (rval != NDI_SUCCESS) {
15161 				FCP_TRACE(fcp_logq,
15162 				    pptr->port_instbuf, fcp_trace,
15163 				    FCP_BUF_LEVEL_3, 0,
15164 				    "fcp_create_on_demand: "
15165 				    "pass_to_hp_and_wait failed "
15166 				    "rval=%x", rval);
15167 				rval = EIO;
15168 			} else {
15169 				mutex_enter(&LUN_TGT->tgt_mutex);
15170 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
15171 				    FCP_LUN_BUSY);
15172 				mutex_exit(&LUN_TGT->tgt_mutex);
15173 			}
15174 			mutex_enter(&ptgt->tgt_mutex);
15175 		}
15176 
15177 		plun = plun->lun_next;
15178 		mutex_exit(&ptgt->tgt_mutex);
15179 	}
15180 
15181 	kmem_free(devlist, sizeof (*devlist));
15182 
15183 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15184 	    fcp_enable_auto_configuration && old_manual) {
15185 		mutex_enter(&ptgt->tgt_mutex);
15186 		/* if successful then set manual to 0 */
15187 		if (rval == 0) {
15188 			ptgt->tgt_manual_config_only = 0;
15189 		} else {
15190 			/* reset to 1 so the user has to do the config */
15191 			ptgt->tgt_manual_config_only = 1;
15192 		}
15193 		mutex_exit(&ptgt->tgt_mutex);
15194 	}
15195 
15196 	return (rval);
15197 }
15198 
15199 
15200 static void
15201 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15202 {
15203 	int		count;
15204 	uchar_t		byte;
15205 
15206 	count = 0;
15207 	while (*string) {
15208 		byte = FCP_ATOB(*string); string++;
15209 		byte = byte << 4 | FCP_ATOB(*string); string++;
15210 		bytes[count++] = byte;
15211 
15212 		if (count >= byte_len) {
15213 			break;
15214 		}
15215 	}
15216 }
15217 
15218 static void
15219 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15220 {
15221 	int		i;
15222 
15223 	for (i = 0; i < FC_WWN_SIZE; i++) {
15224 		(void) sprintf(string + (i * 2),
15225 		    "%02x", wwn[i]);
15226 	}
15227 
15228 }
15229 
15230 static void
15231 fcp_print_error(fc_packet_t *fpkt)
15232 {
15233 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15234 	    fpkt->pkt_ulp_private;
15235 	struct fcp_port	*pptr;
15236 	struct fcp_tgt	*ptgt;
15237 	struct fcp_lun	*plun;
15238 	caddr_t			buf;
15239 	int			scsi_cmd = 0;
15240 
15241 	ptgt = icmd->ipkt_tgt;
15242 	plun = icmd->ipkt_lun;
15243 	pptr = ptgt->tgt_port;
15244 
15245 	buf = kmem_zalloc(256, KM_NOSLEEP);
15246 	if (buf == NULL) {
15247 		return;
15248 	}
15249 
15250 	switch (icmd->ipkt_opcode) {
15251 	case SCMD_REPORT_LUN:
15252 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15253 		    " lun=0x%%x failed");
15254 		scsi_cmd++;
15255 		break;
15256 
15257 	case SCMD_INQUIRY_PAGE83:
15258 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15259 		    " lun=0x%%x failed");
15260 		scsi_cmd++;
15261 		break;
15262 
15263 	case SCMD_INQUIRY:
15264 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15265 		    " lun=0x%%x failed");
15266 		scsi_cmd++;
15267 		break;
15268 
15269 	case LA_ELS_PLOGI:
15270 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15271 		break;
15272 
15273 	case LA_ELS_PRLI:
15274 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15275 		break;
15276 	}
15277 
15278 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15279 		struct fcp_rsp		response, *rsp;
15280 		uchar_t			asc, ascq;
15281 		caddr_t			sense_key = NULL;
15282 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15283 
15284 		if (icmd->ipkt_nodma) {
15285 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15286 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15287 			    sizeof (struct fcp_rsp));
15288 		} else {
15289 			rsp = &response;
15290 			bep = &fcp_rsp_err;
15291 
15292 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15293 			    sizeof (struct fcp_rsp));
15294 
15295 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15296 			    bep, fpkt->pkt_resp_acc,
15297 			    sizeof (struct fcp_rsp_info));
15298 		}
15299 
15300 
15301 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15302 			(void) sprintf(buf + strlen(buf),
15303 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15304 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15305 			    " senselen=%%x. Giving up");
15306 
15307 			fcp_log(CE_WARN, pptr->port_dip, buf,
15308 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15309 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15310 			    rsp->fcp_u.fcp_status.reserved_1,
15311 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15312 
15313 			kmem_free(buf, 256);
15314 			return;
15315 		}
15316 
15317 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15318 		    bep->rsp_code != FCP_NO_FAILURE) {
15319 			(void) sprintf(buf + strlen(buf),
15320 			    " FCP Response code = 0x%x", bep->rsp_code);
15321 		}
15322 
15323 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15324 			struct scsi_extended_sense sense_info, *sense_ptr;
15325 
15326 			if (icmd->ipkt_nodma) {
15327 				sense_ptr = (struct scsi_extended_sense *)
15328 				    ((caddr_t)fpkt->pkt_resp +
15329 				    sizeof (struct fcp_rsp) +
15330 				    rsp->fcp_response_len);
15331 			} else {
15332 				sense_ptr = &sense_info;
15333 
15334 				FCP_CP_IN(fpkt->pkt_resp +
15335 				    sizeof (struct fcp_rsp) +
15336 				    rsp->fcp_response_len, &sense_info,
15337 				    fpkt->pkt_resp_acc,
15338 				    sizeof (struct scsi_extended_sense));
15339 			}
15340 
15341 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15342 			    NUM_IMPL_SENSE_KEYS) {
15343 				sense_key = sense_keys[sense_ptr->es_key];
15344 			} else {
15345 				sense_key = "Undefined";
15346 			}
15347 
15348 			asc = sense_ptr->es_add_code;
15349 			ascq = sense_ptr->es_qual_code;
15350 
15351 			(void) sprintf(buf + strlen(buf),
15352 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15353 			    " Giving up");
15354 
15355 			fcp_log(CE_WARN, pptr->port_dip, buf,
15356 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15357 			    asc, ascq);
15358 		} else {
15359 			(void) sprintf(buf + strlen(buf),
15360 			    " : SCSI status=%%x. Giving up");
15361 
15362 			fcp_log(CE_WARN, pptr->port_dip, buf,
15363 			    ptgt->tgt_d_id, plun->lun_num,
15364 			    rsp->fcp_u.fcp_status.scsi_status);
15365 		}
15366 	} else {
15367 		caddr_t state, reason, action, expln;
15368 
15369 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15370 		    &action, &expln);
15371 
15372 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15373 		    " Reason:%%s. Giving up");
15374 
15375 		if (scsi_cmd) {
15376 			fcp_log(CE_WARN, pptr->port_dip, buf,
15377 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15378 		} else {
15379 			fcp_log(CE_WARN, pptr->port_dip, buf,
15380 			    ptgt->tgt_d_id, state, reason);
15381 		}
15382 	}
15383 
15384 	kmem_free(buf, 256);
15385 }
15386 
15387 
15388 static int
15389 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15390     struct fcp_ipkt *icmd, int rval, caddr_t op)
15391 {
15392 	int	ret = DDI_FAILURE;
15393 	char	*error;
15394 
15395 	switch (rval) {
15396 	case FC_DEVICE_BUSY_NEW_RSCN:
15397 		/*
15398 		 * This means that there was a new RSCN that the transport
15399 		 * knows about (which the ULP *may* know about too) but the
15400 		 * pkt that was sent down was related to an older RSCN. So, we
15401 		 * are just going to reset the retry count and deadline and
15402 		 * continue to retry. The idea is that transport is currently
15403 		 * working on the new RSCN and will soon let the ULPs know
15404 		 * about it and when it does the existing logic will kick in
15405 		 * where it will change the tcount to indicate that something
15406 		 * changed on the target. So, rediscovery will start and there
15407 		 * will not be an infinite retry.
15408 		 *
15409 		 * For a full flow of how the RSCN info is transferred back and
15410 		 * forth, see fp.c
15411 		 */
15412 		icmd->ipkt_retries = 0;
15413 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15414 		    FCP_ICMD_DEADLINE;
15415 
15416 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15417 		    FCP_BUF_LEVEL_3, 0,
15418 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15419 		    rval, ptgt->tgt_d_id);
15420 		/* FALLTHROUGH */
15421 
15422 	case FC_STATEC_BUSY:
15423 	case FC_DEVICE_BUSY:
15424 	case FC_PBUSY:
15425 	case FC_FBUSY:
15426 	case FC_TRAN_BUSY:
15427 	case FC_OFFLINE:
15428 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15429 		    FCP_BUF_LEVEL_3, 0,
15430 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15431 		    rval, ptgt->tgt_d_id);
15432 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15433 		    fcp_is_retryable(icmd)) {
15434 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15435 			ret = DDI_SUCCESS;
15436 		}
15437 		break;
15438 
15439 	case FC_LOGINREQ:
15440 		/*
15441 		 * FC_LOGINREQ used to be handled just like all the cases
15442 		 * above. It has been changed to handled a PRLI that fails
15443 		 * with FC_LOGINREQ different than other ipkts that fail
15444 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15445 		 * a simple matter to turn it into a PLOGI instead, so that's
15446 		 * exactly what we do here.
15447 		 */
15448 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15449 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15450 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15451 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15452 		} else {
15453 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15454 			    FCP_BUF_LEVEL_3, 0,
15455 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15456 			    rval, ptgt->tgt_d_id);
15457 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15458 			    fcp_is_retryable(icmd)) {
15459 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15460 				ret = DDI_SUCCESS;
15461 			}
15462 		}
15463 		break;
15464 
15465 	default:
15466 		mutex_enter(&pptr->port_mutex);
15467 		mutex_enter(&ptgt->tgt_mutex);
15468 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15469 			mutex_exit(&ptgt->tgt_mutex);
15470 			mutex_exit(&pptr->port_mutex);
15471 
15472 			(void) fc_ulp_error(rval, &error);
15473 			fcp_log(CE_WARN, pptr->port_dip,
15474 			    "!Failed to send %s to D_ID=%x error=%s",
15475 			    op, ptgt->tgt_d_id, error);
15476 		} else {
15477 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15478 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15479 			    "fcp_handle_ipkt_errors,1: state change occured"
15480 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15481 			mutex_exit(&ptgt->tgt_mutex);
15482 			mutex_exit(&pptr->port_mutex);
15483 		}
15484 		break;
15485 	}
15486 
15487 	return (ret);
15488 }
15489 
15490 
15491 /*
15492  * Check of outstanding commands on any LUN for this target
15493  */
15494 static int
15495 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15496 {
15497 	struct	fcp_lun	*plun;
15498 	struct	fcp_pkt	*cmd;
15499 
15500 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15501 		mutex_enter(&plun->lun_mutex);
15502 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15503 		    cmd = cmd->cmd_forw) {
15504 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15505 				mutex_exit(&plun->lun_mutex);
15506 				return (FC_SUCCESS);
15507 			}
15508 		}
15509 		mutex_exit(&plun->lun_mutex);
15510 	}
15511 
15512 	return (FC_FAILURE);
15513 }
15514 
15515 static fc_portmap_t *
15516 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15517 {
15518 	int			i;
15519 	fc_portmap_t		*devlist;
15520 	fc_portmap_t		*devptr = NULL;
15521 	struct fcp_tgt	*ptgt;
15522 
15523 	mutex_enter(&pptr->port_mutex);
15524 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15525 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15526 		    ptgt = ptgt->tgt_next) {
15527 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15528 				++*dev_cnt;
15529 			}
15530 		}
15531 	}
15532 
15533 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15534 	    KM_NOSLEEP);
15535 	if (devlist == NULL) {
15536 		mutex_exit(&pptr->port_mutex);
15537 		fcp_log(CE_WARN, pptr->port_dip,
15538 		    "!fcp%d: failed to allocate for portmap for construct map",
15539 		    pptr->port_instance);
15540 		return (devptr);
15541 	}
15542 
15543 	for (i = 0; i < FCP_NUM_HASH; i++) {
15544 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15545 		    ptgt = ptgt->tgt_next) {
15546 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15547 				int ret;
15548 
15549 				ret = fc_ulp_pwwn_to_portmap(
15550 				    pptr->port_fp_handle,
15551 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15552 				    devlist);
15553 
15554 				if (ret == FC_SUCCESS) {
15555 					devlist++;
15556 					continue;
15557 				}
15558 
15559 				devlist->map_pd = NULL;
15560 				devlist->map_did.port_id = ptgt->tgt_d_id;
15561 				devlist->map_hard_addr.hard_addr =
15562 				    ptgt->tgt_hard_addr;
15563 
15564 				devlist->map_state = PORT_DEVICE_INVALID;
15565 				devlist->map_type = PORT_DEVICE_OLD;
15566 
15567 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15568 				    &devlist->map_nwwn, FC_WWN_SIZE);
15569 
15570 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15571 				    &devlist->map_pwwn, FC_WWN_SIZE);
15572 
15573 				devlist++;
15574 			}
15575 		}
15576 	}
15577 
15578 	mutex_exit(&pptr->port_mutex);
15579 
15580 	return (devptr);
15581 }
15582 /*
15583  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15584  */
15585 static void
15586 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15587 {
15588 	int i;
15589 	struct fcp_tgt	*ptgt;
15590 	struct fcp_lun	*plun;
15591 
15592 	for (i = 0; i < FCP_NUM_HASH; i++) {
15593 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15594 		    ptgt = ptgt->tgt_next) {
15595 			mutex_enter(&ptgt->tgt_mutex);
15596 			for (plun = ptgt->tgt_lun; plun != NULL;
15597 			    plun = plun->lun_next) {
15598 				if (plun->lun_mpxio &&
15599 				    plun->lun_state & FCP_LUN_BUSY) {
15600 					if (!fcp_pass_to_hp(pptr, plun,
15601 					    plun->lun_cip,
15602 					    FCP_MPXIO_PATH_SET_BUSY,
15603 					    pptr->port_link_cnt,
15604 					    ptgt->tgt_change_cnt, 0, 0)) {
15605 						FCP_TRACE(fcp_logq,
15606 						    pptr->port_instbuf,
15607 						    fcp_trace,
15608 						    FCP_BUF_LEVEL_2, 0,
15609 						    "path_verifybusy: "
15610 						    "disable lun %p failed!",
15611 						    plun);
15612 					}
15613 				}
15614 			}
15615 			mutex_exit(&ptgt->tgt_mutex);
15616 		}
15617 	}
15618 }
15619 
15620 static int
15621 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15622 {
15623 	dev_info_t		*cdip = NULL;
15624 	dev_info_t		*pdip = NULL;
15625 
15626 	ASSERT(plun);
15627 
15628 	mutex_enter(&plun->lun_mutex);
15629 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15630 		mutex_exit(&plun->lun_mutex);
15631 		return (NDI_FAILURE);
15632 	}
15633 	mutex_exit(&plun->lun_mutex);
15634 	cdip = mdi_pi_get_client(PIP(cip));
15635 	pdip = mdi_pi_get_phci(PIP(cip));
15636 
15637 	ASSERT(cdip != NULL);
15638 	ASSERT(pdip != NULL);
15639 
15640 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15641 		/* LUN ready for IO */
15642 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15643 	} else {
15644 		/* LUN busy to accept IO */
15645 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15646 	}
15647 	return (NDI_SUCCESS);
15648 }
15649 
15650 /*
15651  * Caller must free the returned string of MAXPATHLEN len
15652  * If the device is offline (-1 instance number) NULL
15653  * will be returned.
15654  */
15655 static char *
15656 fcp_get_lun_path(struct fcp_lun *plun) {
15657 	dev_info_t	*dip = NULL;
15658 	char		*path = NULL;
15659 	mdi_pathinfo_t	*pip = NULL;
15660 
15661 	if (plun == NULL) {
15662 		return (NULL);
15663 	}
15664 
15665 	mutex_enter(&plun->lun_mutex);
15666 	if (plun->lun_mpxio == 0) {
15667 		dip = DIP(plun->lun_cip);
15668 		mutex_exit(&plun->lun_mutex);
15669 	} else {
15670 		/*
15671 		 * lun_cip must be accessed with lun_mutex held. Here
15672 		 * plun->lun_cip either points to a valid node or it is NULL.
15673 		 * Make a copy so that we can release lun_mutex.
15674 		 */
15675 		pip = PIP(plun->lun_cip);
15676 
15677 		/*
15678 		 * Increase ref count on the path so that we can release
15679 		 * lun_mutex and still be sure that the pathinfo node (and thus
15680 		 * also the client) is not deallocated. If pip is NULL, this
15681 		 * has no effect.
15682 		 */
15683 		mdi_hold_path(pip);
15684 
15685 		mutex_exit(&plun->lun_mutex);
15686 
15687 		/* Get the client. If pip is NULL, we get NULL. */
15688 		dip = mdi_pi_get_client(pip);
15689 	}
15690 
15691 	if (dip == NULL)
15692 		goto out;
15693 	if (ddi_get_instance(dip) < 0)
15694 		goto out;
15695 
15696 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15697 	if (path == NULL)
15698 		goto out;
15699 
15700 	(void) ddi_pathname(dip, path);
15701 
15702 	/* Clean up. */
15703 out:
15704 	if (pip != NULL)
15705 		mdi_rele_path(pip);
15706 
15707 	/*
15708 	 * In reality, the user wants a fully valid path (one they can open)
15709 	 * but this string is lacking the mount point, and the minor node.
15710 	 * It would be nice if we could "figure these out" somehow
15711 	 * and fill them in.  Otherwise, the userland code has to understand
15712 	 * driver specific details of which minor node is the "best" or
15713 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15714 	 * which tape doesn't rewind)
15715 	 */
15716 	return (path);
15717 }
15718 
15719 static int
15720 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15721     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15722 {
15723 	int64_t reset_delay;
15724 	int rval, retry = 0;
15725 	struct fcp_port *pptr = fcp_dip2port(parent);
15726 
15727 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15728 	    (ddi_get_lbolt64() - pptr->port_attach_time);
15729 	if (reset_delay < 0) {
15730 		reset_delay = 0;
15731 	}
15732 
15733 	if (fcp_bus_config_debug) {
15734 		flag |= NDI_DEVI_DEBUG;
15735 	}
15736 
15737 	switch (op) {
15738 	case BUS_CONFIG_ONE:
15739 		/*
15740 		 * Retry the command since we need to ensure
15741 		 * the fabric devices are available for root
15742 		 */
15743 		while (retry++ < fcp_max_bus_config_retries) {
15744 			rval =	(ndi_busop_bus_config(parent,
15745 			    flag | NDI_MDI_FALLBACK, op,
15746 			    arg, childp, (clock_t)reset_delay));
15747 			if (rval == 0) {
15748 				return (rval);
15749 			}
15750 		}
15751 
15752 		/*
15753 		 * drain taskq to make sure nodes are created and then
15754 		 * try again.
15755 		 */
15756 		taskq_wait(DEVI(parent)->devi_taskq);
15757 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15758 		    op, arg, childp, 0));
15759 
15760 	case BUS_CONFIG_DRIVER:
15761 	case BUS_CONFIG_ALL: {
15762 		/*
15763 		 * delay till all devices report in (port_tmp_cnt == 0)
15764 		 * or FCP_INIT_WAIT_TIMEOUT
15765 		 */
15766 		mutex_enter(&pptr->port_mutex);
15767 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15768 			(void) cv_timedwait(&pptr->port_config_cv,
15769 			    &pptr->port_mutex,
15770 			    ddi_get_lbolt() + (clock_t)reset_delay);
15771 			reset_delay =
15772 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15773 			    (ddi_get_lbolt64() - pptr->port_attach_time);
15774 		}
15775 		mutex_exit(&pptr->port_mutex);
15776 		/* drain taskq to make sure nodes are created */
15777 		taskq_wait(DEVI(parent)->devi_taskq);
15778 		return (ndi_busop_bus_config(parent, flag, op,
15779 		    arg, childp, 0));
15780 	}
15781 
15782 	default:
15783 		return (NDI_FAILURE);
15784 	}
15785 	/*NOTREACHED*/
15786 }
15787 
15788 static int
15789 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15790     ddi_bus_config_op_t op, void *arg)
15791 {
15792 	if (fcp_bus_config_debug) {
15793 		flag |= NDI_DEVI_DEBUG;
15794 	}
15795 
15796 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15797 }
15798 
15799 
15800 /*
15801  * Routine to copy GUID into the lun structure.
15802  * returns 0 if copy was successful and 1 if encountered a
15803  * failure and did not copy the guid.
15804  */
15805 static int
15806 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15807 {
15808 
15809 	int retval = 0;
15810 
15811 	/* add one for the null terminator */
15812 	const unsigned int len = strlen(guidp) + 1;
15813 
15814 	if ((guidp == NULL) || (plun == NULL)) {
15815 		return (1);
15816 	}
15817 
15818 	/*
15819 	 * if the plun->lun_guid already has been allocated,
15820 	 * then check the size. if the size is exact, reuse
15821 	 * it....if not free it an allocate the required size.
15822 	 * The reallocation should NOT typically happen
15823 	 * unless the GUIDs reported changes between passes.
15824 	 * We free up and alloc again even if the
15825 	 * size was more than required. This is due to the
15826 	 * fact that the field lun_guid_size - serves
15827 	 * dual role of indicating the size of the wwn
15828 	 * size and ALSO the allocation size.
15829 	 */
15830 	if (plun->lun_guid) {
15831 		if (plun->lun_guid_size != len) {
15832 			/*
15833 			 * free the allocated memory and
15834 			 * initialize the field
15835 			 * lun_guid_size to 0.
15836 			 */
15837 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15838 			plun->lun_guid = NULL;
15839 			plun->lun_guid_size = 0;
15840 		}
15841 	}
15842 	/*
15843 	 * alloc only if not already done.
15844 	 */
15845 	if (plun->lun_guid == NULL) {
15846 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15847 		if (plun->lun_guid == NULL) {
15848 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15849 			    "Unable to allocate"
15850 			    "Memory for GUID!!! size %d", len);
15851 			retval = 1;
15852 		} else {
15853 			plun->lun_guid_size = len;
15854 		}
15855 	}
15856 	if (plun->lun_guid) {
15857 		/*
15858 		 * now copy the GUID
15859 		 */
15860 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15861 	}
15862 	return (retval);
15863 }
15864 
15865 /*
15866  * fcp_reconfig_wait
15867  *
15868  * Wait for a rediscovery/reconfiguration to complete before continuing.
15869  */
15870 
15871 static void
15872 fcp_reconfig_wait(struct fcp_port *pptr)
15873 {
15874 	clock_t		reconfig_start, wait_timeout;
15875 
15876 	/*
15877 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15878 	 * reconfiguration in progress.
15879 	 */
15880 
15881 	mutex_enter(&pptr->port_mutex);
15882 	if (pptr->port_tmp_cnt == 0) {
15883 		mutex_exit(&pptr->port_mutex);
15884 		return;
15885 	}
15886 	mutex_exit(&pptr->port_mutex);
15887 
15888 	/*
15889 	 * If we cause a reconfig by raising power, delay until all devices
15890 	 * report in (port_tmp_cnt returns to 0)
15891 	 */
15892 
15893 	reconfig_start = ddi_get_lbolt();
15894 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15895 
15896 	mutex_enter(&pptr->port_mutex);
15897 
15898 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15899 	    pptr->port_tmp_cnt) {
15900 
15901 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15902 		    reconfig_start + wait_timeout);
15903 	}
15904 
15905 	mutex_exit(&pptr->port_mutex);
15906 
15907 	/*
15908 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15909 	 * we want may still be ok.  If not, it will error out later
15910 	 */
15911 }
15912 
15913 /*
15914  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15915  * We rely on the fcp_global_mutex to provide protection against changes to
15916  * the fcp_lun_blacklist.
15917  *
15918  * You can describe a list of target port WWNs and LUN numbers which will
15919  * not be configured. LUN numbers will be interpreted as decimal. White
15920  * spaces and ',' can be used in the list of LUN numbers.
15921  *
15922  * To prevent LUNs 1 and 2 from being configured for target
15923  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15924  *
15925  * pwwn-lun-blacklist=
15926  * "510000f010fd92a1,1,2",
15927  * "510000e012079df1,1,2";
15928  */
15929 static void
15930 fcp_read_blacklist(dev_info_t *dip,
15931     struct fcp_black_list_entry **pplun_blacklist) {
15932 	char **prop_array	= NULL;
15933 	char *curr_pwwn		= NULL;
15934 	char *curr_lun		= NULL;
15935 	uint32_t prop_item	= 0;
15936 	int idx			= 0;
15937 	int len			= 0;
15938 
15939 	ASSERT(mutex_owned(&fcp_global_mutex));
15940 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15941 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15942 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15943 		return;
15944 	}
15945 
15946 	for (idx = 0; idx < prop_item; idx++) {
15947 
15948 		curr_pwwn = prop_array[idx];
15949 		while (*curr_pwwn == ' ') {
15950 			curr_pwwn++;
15951 		}
15952 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15953 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15954 			    ", please check.", curr_pwwn);
15955 			continue;
15956 		}
15957 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15958 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15959 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15960 			    ", please check.", curr_pwwn);
15961 			continue;
15962 		}
15963 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15964 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15965 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15966 				    "blacklist, please check.", curr_pwwn);
15967 				break;
15968 			}
15969 		}
15970 		if (len != sizeof (la_wwn_t) * 2) {
15971 			continue;
15972 		}
15973 
15974 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15975 		*(curr_lun - 1) = '\0';
15976 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15977 	}
15978 
15979 	ddi_prop_free(prop_array);
15980 }
15981 
15982 /*
15983  * Get the masking info about one remote target port designated by wwn.
15984  * Lun ids could be separated by ',' or white spaces.
15985  */
15986 static void
15987 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15988     struct fcp_black_list_entry **pplun_blacklist) {
15989 	int		idx			= 0;
15990 	uint32_t	offset			= 0;
15991 	unsigned long	lun_id			= 0;
15992 	char		lunid_buf[16];
15993 	char		*pend			= NULL;
15994 	int		illegal_digit		= 0;
15995 
15996 	while (offset < strlen(curr_lun)) {
15997 		while ((curr_lun[offset + idx] != ',') &&
15998 		    (curr_lun[offset + idx] != '\0') &&
15999 		    (curr_lun[offset + idx] != ' ')) {
16000 			if (isdigit(curr_lun[offset + idx]) == 0) {
16001 				illegal_digit++;
16002 			}
16003 			idx++;
16004 		}
16005 		if (illegal_digit > 0) {
16006 			offset += (idx+1);	/* To the start of next lun */
16007 			idx = 0;
16008 			illegal_digit = 0;
16009 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16010 			    "the blacklist, please check digits.",
16011 			    curr_lun, curr_pwwn);
16012 			continue;
16013 		}
16014 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16015 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16016 			    "the blacklist, please check the length of LUN#.",
16017 			    curr_lun, curr_pwwn);
16018 			break;
16019 		}
16020 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
16021 		    offset++;
16022 		    continue;
16023 		}
16024 
16025 		bcopy(curr_lun + offset, lunid_buf, idx);
16026 		lunid_buf[idx] = '\0';
16027 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16028 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16029 		} else {
16030 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16031 			    "the blacklist, please check %s.",
16032 			    curr_lun, curr_pwwn, lunid_buf);
16033 		}
16034 		offset += (idx+1);	/* To the start of next lun */
16035 		idx = 0;
16036 	}
16037 }
16038 
16039 /*
16040  * Add one masking record
16041  */
16042 static void
16043 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16044     struct fcp_black_list_entry **pplun_blacklist) {
16045 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16046 	struct fcp_black_list_entry	*new_entry	= NULL;
16047 	la_wwn_t			wwn;
16048 
16049 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16050 	while (tmp_entry) {
16051 		if ((bcmp(&tmp_entry->wwn, &wwn,
16052 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16053 			return;
16054 		}
16055 
16056 		tmp_entry = tmp_entry->next;
16057 	}
16058 
16059 	/* add to black list */
16060 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16061 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16062 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16063 	new_entry->lun = lun_id;
16064 	new_entry->masked = 0;
16065 	new_entry->next = *pplun_blacklist;
16066 	*pplun_blacklist = new_entry;
16067 }
16068 
16069 /*
16070  * Check if we should mask the specified lun of this fcp_tgt
16071  */
16072 static int
16073 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16074 	struct fcp_black_list_entry *remote_port;
16075 
16076 	remote_port = fcp_lun_blacklist;
16077 	while (remote_port != NULL) {
16078 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16079 			if (remote_port->lun == lun_id) {
16080 				remote_port->masked++;
16081 				if (remote_port->masked == 1) {
16082 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
16083 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
16084 					    "is masked due to black listing.\n",
16085 					    lun_id, wwn->raw_wwn[0],
16086 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
16087 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
16088 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
16089 					    wwn->raw_wwn[7]);
16090 				}
16091 				return (TRUE);
16092 			}
16093 		}
16094 		remote_port = remote_port->next;
16095 	}
16096 	return (FALSE);
16097 }
16098 
16099 /*
16100  * Release all allocated resources
16101  */
16102 static void
16103 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16104 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16105 	struct fcp_black_list_entry	*current_entry	= NULL;
16106 
16107 	ASSERT(mutex_owned(&fcp_global_mutex));
16108 	/*
16109 	 * Traverse all luns
16110 	 */
16111 	while (tmp_entry) {
16112 		current_entry = tmp_entry;
16113 		tmp_entry = tmp_entry->next;
16114 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16115 	}
16116 	*pplun_blacklist = NULL;
16117 }
16118 
16119 /*
16120  * In fcp module,
16121  *   pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16122  */
16123 static struct scsi_pkt *
16124 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16125     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16126     int flags, int (*callback)(), caddr_t arg)
16127 {
16128 	fcp_port_t	*pptr = ADDR2FCP(ap);
16129 	fcp_pkt_t	*cmd  = NULL;
16130 	fc_frame_hdr_t	*hp;
16131 
16132 	/*
16133 	 * First step: get the packet
16134 	 */
16135 	if (pkt == NULL) {
16136 		pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16137 		    tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16138 		    callback, arg);
16139 		if (pkt == NULL) {
16140 			return (NULL);
16141 		}
16142 
16143 		/*
16144 		 * All fields in scsi_pkt will be initialized properly or
16145 		 * set to zero. We need do nothing for scsi_pkt.
16146 		 */
16147 		/*
16148 		 * But it's our responsibility to link other related data
16149 		 * structures. Their initialization will be done, just
16150 		 * before the scsi_pkt will be sent to FCA.
16151 		 */
16152 		cmd		= PKT2CMD(pkt);
16153 		cmd->cmd_pkt	= pkt;
16154 		cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16155 		/*
16156 		 * fc_packet_t
16157 		 */
16158 		cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16159 		cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16160 		    sizeof (struct fcp_pkt));
16161 		cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16162 		cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16163 		cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16164 		cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16165 		/*
16166 		 * Fill in the Fabric Channel Header
16167 		 */
16168 		hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16169 		hp->r_ctl = R_CTL_COMMAND;
16170 		hp->rsvd = 0;
16171 		hp->type = FC_TYPE_SCSI_FCP;
16172 		hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16173 		hp->seq_id = 0;
16174 		hp->df_ctl  = 0;
16175 		hp->seq_cnt = 0;
16176 		hp->ox_id = 0xffff;
16177 		hp->rx_id = 0xffff;
16178 		hp->ro = 0;
16179 	} else {
16180 		/*
16181 		 * We need think if we should reset any elements in
16182 		 * related data structures.
16183 		 */
16184 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
16185 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
16186 		    "reusing pkt, flags %d", flags);
16187 		cmd = PKT2CMD(pkt);
16188 		if (cmd->cmd_fp_pkt->pkt_pd) {
16189 			cmd->cmd_fp_pkt->pkt_pd = NULL;
16190 		}
16191 	}
16192 
16193 	/*
16194 	 * Second step:	 dma allocation/move
16195 	 */
16196 	if (bp && bp->b_bcount != 0) {
16197 		/*
16198 		 * Mark if it's read or write
16199 		 */
16200 		if (bp->b_flags & B_READ) {
16201 			cmd->cmd_flags |= CFLAG_IS_READ;
16202 		} else {
16203 			cmd->cmd_flags &= ~CFLAG_IS_READ;
16204 		}
16205 
16206 		bp_mapin(bp);
16207 		cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16208 		cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16209 		cmd->cmd_fp_pkt->pkt_data_resid = 0;
16210 	} else {
16211 		/*
16212 		 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16213 		 * to send zero-length read/write.
16214 		 */
16215 		cmd->cmd_fp_pkt->pkt_data = NULL;
16216 		cmd->cmd_fp_pkt->pkt_datalen = 0;
16217 	}
16218 
16219 	return (pkt);
16220 }
16221 
16222 static void
16223 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16224 {
16225 	fcp_port_t	*pptr = ADDR2FCP(ap);
16226 
16227 	/*
16228 	 * First we let FCA to uninitilize private part.
16229 	 */
16230 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16231 	    PKT2CMD(pkt)->cmd_fp_pkt);
16232 
16233 	/*
16234 	 * Then we uninitialize fc_packet.
16235 	 */
16236 
16237 	/*
16238 	 * Thirdly, we uninitializae fcp_pkt.
16239 	 */
16240 
16241 	/*
16242 	 * In the end, we free scsi_pkt.
16243 	 */
16244 	scsi_hba_pkt_free(ap, pkt);
16245 }
16246 
16247 static int
16248 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16249 {
16250 	fcp_port_t	*pptr = ADDR2FCP(ap);
16251 	fcp_lun_t	*plun = ADDR2LUN(ap);
16252 	fcp_tgt_t	*ptgt = plun->lun_tgt;
16253 	fcp_pkt_t	*cmd  = PKT2CMD(pkt);
16254 	fcp_cmd_t	*fcmd = &cmd->cmd_fcp_cmd;
16255 	fc_packet_t	*fpkt = cmd->cmd_fp_pkt;
16256 	int		 rval;
16257 
16258 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
16259 	(void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16260 
16261 	/*
16262 	 * Firstly, we need initialize fcp_pkt_t
16263 	 * Secondly, we need initialize fcp_cmd_t.
16264 	 */
16265 	bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16266 	fcmd->fcp_data_len = fpkt->pkt_datalen;
16267 	fcmd->fcp_ent_addr = plun->lun_addr;
16268 	if (pkt->pkt_flags & FLAG_HTAG) {
16269 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16270 	} else if (pkt->pkt_flags & FLAG_OTAG) {
16271 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16272 	} else if (pkt->pkt_flags & FLAG_STAG) {
16273 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16274 	} else {
16275 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16276 	}
16277 
16278 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16279 		fcmd->fcp_cntl.cntl_read_data = 1;
16280 		fcmd->fcp_cntl.cntl_write_data = 0;
16281 	} else {
16282 		fcmd->fcp_cntl.cntl_read_data = 0;
16283 		fcmd->fcp_cntl.cntl_write_data = 1;
16284 	}
16285 
16286 	/*
16287 	 * Then we need initialize fc_packet_t too.
16288 	 */
16289 	fpkt->pkt_timeout = pkt->pkt_time + 2;
16290 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16291 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16292 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16293 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16294 	} else {
16295 		fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16296 	}
16297 
16298 	if (pkt->pkt_flags & FLAG_NOINTR) {
16299 		fpkt->pkt_comp = NULL;
16300 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16301 	} else {
16302 		fpkt->pkt_comp = fcp_cmd_callback;
16303 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16304 		if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16305 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16306 		}
16307 	}
16308 
16309 	/*
16310 	 * Lastly, we need initialize scsi_pkt
16311 	 */
16312 	pkt->pkt_reason = CMD_CMPLT;
16313 	pkt->pkt_state = 0;
16314 	pkt->pkt_statistics = 0;
16315 	pkt->pkt_resid = 0;
16316 
16317 	/*
16318 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
16319 	 * have to do polled I/O
16320 	 */
16321 	if (pkt->pkt_flags & FLAG_NOINTR) {
16322 		return (fcp_dopoll(pptr, cmd));
16323 	}
16324 
16325 	cmd->cmd_state = FCP_PKT_ISSUED;
16326 	rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16327 	if (rval == FC_SUCCESS) {
16328 		return (TRAN_ACCEPT);
16329 	}
16330 
16331 	/*
16332 	 * Need more consideration
16333 	 *
16334 	 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16335 	 */
16336 	cmd->cmd_state = FCP_PKT_IDLE;
16337 	if (rval == FC_TRAN_BUSY) {
16338 		return (TRAN_BUSY);
16339 	} else {
16340 		return (TRAN_FATAL_ERROR);
16341 	}
16342 }
16343 
16344 /*
16345  * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16346  * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16347  */
16348 static void
16349 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16350 {
16351 	FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16352 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16353 }
16354 
16355 /*
16356  * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16357  */
16358 static void
16359 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16360 {
16361 	FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16362 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16363 }
16364