xref: /titanic_50/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision d09832051bb4b41ce2b3202c09fceedc089678af)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 extern struct mod_ops	mod_driverops;
628 /*
629  * This variable is defined in modctl.c and set to '1' after the root driver
630  * and fs are loaded.  It serves as an indication that the root filesystem can
631  * be used.
632  */
633 extern int		modrootloaded;
634 /*
635  * This table contains strings associated with the SCSI sense key codes.  It
636  * is used by FCP to print a clear explanation of the code returned in the
637  * sense information by a device.
638  */
639 extern char		*sense_keys[];
640 /*
641  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
642  * under this device that the paths to a physical device are created when
643  * MPxIO is used.
644  */
645 extern dev_info_t	*scsi_vhci_dip;
646 
647 /*
648  * Report lun processing
649  */
650 #define	FCP_LUN_ADDRESSING		0x80
651 #define	FCP_PD_ADDRESSING		0x00
652 #define	FCP_VOLUME_ADDRESSING		0x40
653 
654 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
655 #define	MAX_INT_DMA			0x7fffffff
656 #define	FCP_MAX_SENSE_LEN		252
657 #define	FCP_MAX_RESPONSE_LEN		0xffffff
658 /*
659  * Property definitions
660  */
661 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
662 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
663 #define	TARGET_PROP	(char *)fcp_target_prop
664 #define	LUN_PROP	(char *)fcp_lun_prop
665 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
666 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
667 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
668 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
669 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
670 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
671 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
672 /*
673  * Short hand macros.
674  */
675 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
676 #define	LUN_TGT		(plun->lun_tgt)
677 
678 /*
679  * Driver private macros
680  */
681 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
682 			((x) >= 'a' && (x) <= 'f') ?			\
683 			((x) - 'a' + 10) : ((x) - 'A' + 10))
684 
685 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
686 
687 #define	FCP_N_NDI_EVENTS						\
688 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
689 
690 #define	FCP_LINK_STATE_CHANGED(p, c)			\
691 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
692 
693 #define	FCP_TGT_STATE_CHANGED(t, c)			\
694 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
695 
696 #define	FCP_STATE_CHANGED(p, t, c)		\
697 	(FCP_TGT_STATE_CHANGED(t, c))
698 
699 #define	FCP_MUST_RETRY(fpkt)				\
700 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
701 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
702 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
703 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
704 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
705 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
706 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
707 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
708 
709 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
710 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
711 	(es)->es_add_code == 0x3f &&		\
712 	(es)->es_qual_code == 0x0e)
713 
714 #define	FCP_SENSE_NO_LUN(es)			\
715 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
716 	(es)->es_add_code == 0x25 &&		\
717 	(es)->es_qual_code == 0x0)
718 
719 #define	FCP_VERSION		"1.187"
720 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
721 
722 #define	FCP_NUM_ELEMENTS(array)			\
723 	(sizeof (array) / sizeof ((array)[0]))
724 
725 /*
726  * Debugging, Error reporting, and tracing
727  */
728 #define	FCP_LOG_SIZE		1024 * 1024
729 
730 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
731 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
732 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
733 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
734 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
735 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
736 #define	FCP_LEVEL_7		0x00040
737 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
738 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
739 
740 
741 
742 /*
743  * Log contents to system messages file
744  */
745 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
746 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
754 
755 
756 /*
757  * Log contents to trace buffer
758  */
759 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
760 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
768 
769 
770 /*
771  * Log contents to both system messages file and trace buffer
772  */
773 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
774 				FC_TRACE_LOG_MSG)
775 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
776 				FC_TRACE_LOG_MSG)
777 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
778 				FC_TRACE_LOG_MSG)
779 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
780 				FC_TRACE_LOG_MSG)
781 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
782 				FC_TRACE_LOG_MSG)
783 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
784 				FC_TRACE_LOG_MSG)
785 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
786 				FC_TRACE_LOG_MSG)
787 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #ifdef DEBUG
792 #define	FCP_DTRACE	fc_trace_debug
793 #else
794 #define	FCP_DTRACE
795 #endif
796 
797 #define	FCP_TRACE	fc_trace_debug
798 
799 static struct cb_ops fcp_cb_ops = {
800 	fcp_open,			/* open */
801 	fcp_close,			/* close */
802 	nodev,				/* strategy */
803 	nodev,				/* print */
804 	nodev,				/* dump */
805 	nodev,				/* read */
806 	nodev,				/* write */
807 	fcp_ioctl,			/* ioctl */
808 	nodev,				/* devmap */
809 	nodev,				/* mmap */
810 	nodev,				/* segmap */
811 	nochpoll,			/* chpoll */
812 	ddi_prop_op,			/* cb_prop_op */
813 	0,				/* streamtab */
814 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
815 	CB_REV,				/* rev */
816 	nodev,				/* aread */
817 	nodev				/* awrite */
818 };
819 
820 
821 static struct dev_ops fcp_ops = {
822 	DEVO_REV,
823 	0,
824 	ddi_getinfo_1to1,
825 	nulldev,		/* identify */
826 	nulldev,		/* probe */
827 	fcp_attach,		/* attach and detach are mandatory */
828 	fcp_detach,
829 	nodev,			/* reset */
830 	&fcp_cb_ops,		/* cb_ops */
831 	NULL,			/* bus_ops */
832 	NULL,			/* power */
833 };
834 
835 
836 char *fcp_version = FCP_NAME_VERSION;
837 
838 static struct modldrv modldrv = {
839 	&mod_driverops,
840 	FCP_NAME_VERSION,
841 	&fcp_ops
842 };
843 
844 
845 static struct modlinkage modlinkage = {
846 	MODREV_1,
847 	&modldrv,
848 	NULL
849 };
850 
851 
852 static fc_ulp_modinfo_t fcp_modinfo = {
853 	&fcp_modinfo,			/* ulp_handle */
854 	FCTL_ULP_MODREV_4,		/* ulp_rev */
855 	FC4_SCSI_FCP,			/* ulp_type */
856 	"fcp",				/* ulp_name */
857 	FCP_STATEC_MASK,		/* ulp_statec_mask */
858 	fcp_port_attach,		/* ulp_port_attach */
859 	fcp_port_detach,		/* ulp_port_detach */
860 	fcp_port_ioctl,			/* ulp_port_ioctl */
861 	fcp_els_callback,		/* ulp_els_callback */
862 	fcp_data_callback,		/* ulp_data_callback */
863 	fcp_statec_callback		/* ulp_statec_callback */
864 };
865 
866 #ifdef	DEBUG
867 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
868 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
869 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
870 				FCP_LEVEL_6 | FCP_LEVEL_7)
871 #else
872 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
873 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
874 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
875 				FCP_LEVEL_6 | FCP_LEVEL_7)
876 #endif
877 
878 /* FCP global variables */
879 int			fcp_bus_config_debug = 0;
880 static int		fcp_log_size = FCP_LOG_SIZE;
881 static int		fcp_trace = FCP_TRACE_DEFAULT;
882 static fc_trace_logq_t	*fcp_logq = NULL;
883 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
884 /*
885  * The auto-configuration is set by default.  The only way of disabling it is
886  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
887  */
888 static int		fcp_enable_auto_configuration = 1;
889 static int		fcp_max_bus_config_retries	= 4;
890 static int		fcp_lun_ready_retry = 300;
891 /*
892  * The value assigned to the following variable has changed several times due
893  * to a problem with the data underruns reporting of some firmware(s).	The
894  * current value of 50 gives a timeout value of 25 seconds for a max number
895  * of 256 LUNs.
896  */
897 static int		fcp_max_target_retries = 50;
898 /*
899  * Watchdog variables
900  * ------------------
901  *
902  * fcp_watchdog_init
903  *
904  *	Indicates if the watchdog timer is running or not.  This is actually
905  *	a counter of the number of Fibre Channel ports that attached.  When
906  *	the first port attaches the watchdog is started.  When the last port
907  *	detaches the watchdog timer is stopped.
908  *
909  * fcp_watchdog_time
910  *
911  *	This is the watchdog clock counter.  It is incremented by
912  *	fcp_watchdog_time each time the watchdog timer expires.
913  *
914  * fcp_watchdog_timeout
915  *
916  *	Increment value of the variable fcp_watchdog_time as well as the
917  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
918  *	is strange that this is not a #define	but a variable since the code
919  *	never changes this value.  The reason why it can be said that the
920  *	unit is 1 second is because the number of ticks for the watchdog
921  *	timer is determined like this:
922  *
923  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
924  *				  drv_usectohz(1000000);
925  *
926  *	The value 1000000 is hard coded in the code.
927  *
928  * fcp_watchdog_tick
929  *
930  *	Watchdog timer value in ticks.
931  */
932 static int		fcp_watchdog_init = 0;
933 static int		fcp_watchdog_time = 0;
934 static int		fcp_watchdog_timeout = 1;
935 static int		fcp_watchdog_tick;
936 
937 /*
938  * fcp_offline_delay is a global variable to enable customisation of
939  * the timeout on link offlines or RSCNs. The default value is set
940  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
941  * specified in FCP4 Chapter 11 (see www.t10.org).
942  *
943  * The variable fcp_offline_delay is specified in SECONDS.
944  *
945  * If we made this a static var then the user would not be able to
946  * change it. This variable is set in fcp_attach().
947  */
948 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
949 
950 static void		*fcp_softstate = NULL; /* for soft state */
951 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
952 static kmutex_t		fcp_global_mutex;
953 static kmutex_t		fcp_ioctl_mutex;
954 static dev_info_t	*fcp_global_dip = NULL;
955 static timeout_id_t	fcp_watchdog_id;
956 const char		*fcp_lun_prop = "lun";
957 const char		*fcp_sam_lun_prop = "sam-lun";
958 const char		*fcp_target_prop = "target";
959 /*
960  * NOTE: consumers of "node-wwn" property include stmsboot in ON
961  * consolidation.
962  */
963 const char		*fcp_node_wwn_prop = "node-wwn";
964 const char		*fcp_port_wwn_prop = "port-wwn";
965 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
966 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
967 const char		*fcp_manual_config_only = "manual_configuration_only";
968 const char		*fcp_init_port_prop = "initiator-port";
969 const char		*fcp_tgt_port_prop = "target-port";
970 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
971 
972 static struct fcp_port	*fcp_port_head = NULL;
973 static ddi_eventcookie_t	fcp_insert_eid;
974 static ddi_eventcookie_t	fcp_remove_eid;
975 
976 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
977 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
978 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
979 };
980 
981 /*
982  * List of valid commands for the scsi_ioctl call
983  */
984 static uint8_t scsi_ioctl_list[] = {
985 	SCMD_INQUIRY,
986 	SCMD_REPORT_LUN,
987 	SCMD_READ_CAPACITY
988 };
989 
990 /*
991  * this is used to dummy up a report lun response for cases
992  * where the target doesn't support it
993  */
994 static uchar_t fcp_dummy_lun[] = {
995 	0x00,		/* MSB length (length = no of luns * 8) */
996 	0x00,
997 	0x00,
998 	0x08,		/* LSB length */
999 	0x00,		/* MSB reserved */
1000 	0x00,
1001 	0x00,
1002 	0x00,		/* LSB reserved */
1003 	FCP_PD_ADDRESSING,
1004 	0x00,		/* LUN is ZERO at the first level */
1005 	0x00,
1006 	0x00,		/* second level is zero */
1007 	0x00,
1008 	0x00,		/* third level is zero */
1009 	0x00,
1010 	0x00		/* fourth level is zero */
1011 };
1012 
1013 static uchar_t fcp_alpa_to_switch[] = {
1014 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1015 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1017 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1018 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1019 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1020 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1021 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1022 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1023 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1024 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1025 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1026 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1027 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1030 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1031 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1032 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1033 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1034 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1035 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1036 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1037 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 };
1039 
1040 static caddr_t pid = "SESS01	      ";
1041 
1042 #if	!defined(lint)
1043 
1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1045     fcp_port::fcp_next fcp_watchdog_id))
1046 
1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1048 
1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1050     fcp_insert_eid
1051     fcp_remove_eid
1052     fcp_watchdog_time))
1053 
1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1055     fcp_cb_ops
1056     fcp_ops
1057     callb_cpr))
1058 
1059 #endif /* lint */
1060 
1061 /*
1062  * This table is used to determine whether or not it's safe to copy in
1063  * the target node name for a lun.  Since all luns behind the same target
1064  * have the same wwnn, only tagets that do not support multiple luns are
1065  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1066  */
1067 
1068 char *fcp_symmetric_disk_table[] = {
1069 	"SEAGATE ST",
1070 	"IBM	 DDYFT",
1071 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1072 	"SUN	 SENA",		/* SES device */
1073 	"SUN	 SESS01"	/* VICOM SVE box */
1074 };
1075 
1076 int fcp_symmetric_disk_table_size =
1077 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1078 
1079 /*
1080  * The _init(9e) return value should be that of mod_install(9f). Under
1081  * some circumstances, a failure may not be related mod_install(9f) and
1082  * one would then require a return value to indicate the failure. Looking
1083  * at mod_install(9f), it is expected to return 0 for success and non-zero
1084  * for failure. mod_install(9f) for device drivers, further goes down the
1085  * calling chain and ends up in ddi_installdrv(), whose return values are
1086  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1087  * calling chain of mod_install(9f) which return values like EINVAL and
1088  * in some even return -1.
1089  *
1090  * To work around the vagaries of the mod_install() calling chain, return
1091  * either 0 or ENODEV depending on the success or failure of mod_install()
1092  */
1093 int
1094 _init(void)
1095 {
1096 	int rval;
1097 
1098 	/*
1099 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1100 	 * before registering with the transport first.
1101 	 */
1102 	if (ddi_soft_state_init(&fcp_softstate,
1103 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1104 		return (EINVAL);
1105 	}
1106 
1107 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1108 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1109 
1110 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1111 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1112 		mutex_destroy(&fcp_global_mutex);
1113 		mutex_destroy(&fcp_ioctl_mutex);
1114 		ddi_soft_state_fini(&fcp_softstate);
1115 		return (ENODEV);
1116 	}
1117 
1118 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1119 
1120 	if ((rval = mod_install(&modlinkage)) != 0) {
1121 		fc_trace_free_logq(fcp_logq);
1122 		(void) fc_ulp_remove(&fcp_modinfo);
1123 		mutex_destroy(&fcp_global_mutex);
1124 		mutex_destroy(&fcp_ioctl_mutex);
1125 		ddi_soft_state_fini(&fcp_softstate);
1126 		rval = ENODEV;
1127 	}
1128 
1129 	return (rval);
1130 }
1131 
1132 
1133 /*
1134  * the system is done with us as a driver, so clean up
1135  */
1136 int
1137 _fini(void)
1138 {
1139 	int rval;
1140 
1141 	/*
1142 	 * don't start cleaning up until we know that the module remove
1143 	 * has worked  -- if this works, then we know that each instance
1144 	 * has successfully been DDI_DETACHed
1145 	 */
1146 	if ((rval = mod_remove(&modlinkage)) != 0) {
1147 		return (rval);
1148 	}
1149 
1150 	(void) fc_ulp_remove(&fcp_modinfo);
1151 
1152 	ddi_soft_state_fini(&fcp_softstate);
1153 	mutex_destroy(&fcp_global_mutex);
1154 	mutex_destroy(&fcp_ioctl_mutex);
1155 	fc_trace_free_logq(fcp_logq);
1156 
1157 	return (rval);
1158 }
1159 
1160 
1161 int
1162 _info(struct modinfo *modinfop)
1163 {
1164 	return (mod_info(&modlinkage, modinfop));
1165 }
1166 
1167 
1168 /*
1169  * attach the module
1170  */
1171 static int
1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1173 {
1174 	int rval = DDI_SUCCESS;
1175 
1176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1177 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1178 
1179 	if (cmd == DDI_ATTACH) {
1180 		/* The FCP pseudo device is created here. */
1181 		mutex_enter(&fcp_global_mutex);
1182 		fcp_global_dip = devi;
1183 		mutex_exit(&fcp_global_mutex);
1184 
1185 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1186 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1187 			ddi_report_dev(fcp_global_dip);
1188 		} else {
1189 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1190 			mutex_enter(&fcp_global_mutex);
1191 			fcp_global_dip = NULL;
1192 			mutex_exit(&fcp_global_mutex);
1193 
1194 			rval = DDI_FAILURE;
1195 		}
1196 		/*
1197 		 * We check the fcp_offline_delay property at this
1198 		 * point. This variable is global for the driver,
1199 		 * not specific to an instance.
1200 		 *
1201 		 * We do not recommend setting the value to less
1202 		 * than 10 seconds (RA_TOV_els), or greater than
1203 		 * 60 seconds.
1204 		 */
1205 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1206 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1207 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1208 		if ((fcp_offline_delay < 10) ||
1209 		    (fcp_offline_delay > 60)) {
1210 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1211 			    "to %d second(s). This is outside the "
1212 			    "recommended range of 10..60 seconds.",
1213 			    fcp_offline_delay);
1214 		}
1215 	}
1216 
1217 	return (rval);
1218 }
1219 
1220 
1221 /*ARGSUSED*/
1222 static int
1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1224 {
1225 	int	res = DDI_SUCCESS;
1226 
1227 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1228 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1229 
1230 	if (cmd == DDI_DETACH) {
1231 		/*
1232 		 * Check if there are active ports/threads. If there
1233 		 * are any, we will fail, else we will succeed (there
1234 		 * should not be much to clean up)
1235 		 */
1236 		mutex_enter(&fcp_global_mutex);
1237 		FCP_DTRACE(fcp_logq, "fcp",
1238 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1239 		    (void *) fcp_port_head);
1240 
1241 		if (fcp_port_head == NULL) {
1242 			ddi_remove_minor_node(fcp_global_dip, NULL);
1243 			fcp_global_dip = NULL;
1244 			mutex_exit(&fcp_global_mutex);
1245 		} else {
1246 			mutex_exit(&fcp_global_mutex);
1247 			res = DDI_FAILURE;
1248 		}
1249 	}
1250 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1251 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1252 
1253 	return (res);
1254 }
1255 
1256 
1257 /* ARGSUSED */
1258 static int
1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1260 {
1261 	if (otype != OTYP_CHR) {
1262 		return (EINVAL);
1263 	}
1264 
1265 	/*
1266 	 * Allow only root to talk;
1267 	 */
1268 	if (drv_priv(credp)) {
1269 		return (EPERM);
1270 	}
1271 
1272 	mutex_enter(&fcp_global_mutex);
1273 	if (fcp_oflag & FCP_EXCL) {
1274 		mutex_exit(&fcp_global_mutex);
1275 		return (EBUSY);
1276 	}
1277 
1278 	if (flag & FEXCL) {
1279 		if (fcp_oflag & FCP_OPEN) {
1280 			mutex_exit(&fcp_global_mutex);
1281 			return (EBUSY);
1282 		}
1283 		fcp_oflag |= FCP_EXCL;
1284 	}
1285 	fcp_oflag |= FCP_OPEN;
1286 	mutex_exit(&fcp_global_mutex);
1287 
1288 	return (0);
1289 }
1290 
1291 
1292 /* ARGSUSED */
1293 static int
1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1295 {
1296 	if (otype != OTYP_CHR) {
1297 		return (EINVAL);
1298 	}
1299 
1300 	mutex_enter(&fcp_global_mutex);
1301 	if (!(fcp_oflag & FCP_OPEN)) {
1302 		mutex_exit(&fcp_global_mutex);
1303 		return (ENODEV);
1304 	}
1305 	fcp_oflag = FCP_IDLE;
1306 	mutex_exit(&fcp_global_mutex);
1307 
1308 	return (0);
1309 }
1310 
1311 
1312 /*
1313  * fcp_ioctl
1314  *	Entry point for the FCP ioctls
1315  *
1316  * Input:
1317  *	See ioctl(9E)
1318  *
1319  * Output:
1320  *	See ioctl(9E)
1321  *
1322  * Returns:
1323  *	See ioctl(9E)
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 /* ARGSUSED */
1329 static int
1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1331     int *rval)
1332 {
1333 	int			ret = 0;
1334 
1335 	mutex_enter(&fcp_global_mutex);
1336 	if (!(fcp_oflag & FCP_OPEN)) {
1337 		mutex_exit(&fcp_global_mutex);
1338 		return (ENXIO);
1339 	}
1340 	mutex_exit(&fcp_global_mutex);
1341 
1342 	switch (cmd) {
1343 	case FCP_TGT_INQUIRY:
1344 	case FCP_TGT_CREATE:
1345 	case FCP_TGT_DELETE:
1346 		ret = fcp_setup_device_data_ioctl(cmd,
1347 		    (struct fcp_ioctl *)data, mode, rval);
1348 		break;
1349 
1350 	case FCP_TGT_SEND_SCSI:
1351 		mutex_enter(&fcp_ioctl_mutex);
1352 		ret = fcp_setup_scsi_ioctl(
1353 		    (struct fcp_scsi_cmd *)data, mode, rval);
1354 		mutex_exit(&fcp_ioctl_mutex);
1355 		break;
1356 
1357 	case FCP_STATE_COUNT:
1358 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1359 		    mode, rval);
1360 		break;
1361 	case FCP_GET_TARGET_MAPPINGS:
1362 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1363 		    mode, rval);
1364 		break;
1365 	default:
1366 		fcp_log(CE_WARN, NULL,
1367 		    "!Invalid ioctl opcode = 0x%x", cmd);
1368 		ret	= EINVAL;
1369 	}
1370 
1371 	return (ret);
1372 }
1373 
1374 
1375 /*
1376  * fcp_setup_device_data_ioctl
1377  *	Setup handler for the "device data" style of
1378  *	ioctl for FCP.	See "fcp_util.h" for data structure
1379  *	definition.
1380  *
1381  * Input:
1382  *	cmd	= FCP ioctl command
1383  *	data	= ioctl data
1384  *	mode	= See ioctl(9E)
1385  *
1386  * Output:
1387  *	data	= ioctl data
1388  *	rval	= return value - see ioctl(9E)
1389  *
1390  * Returns:
1391  *	See ioctl(9E)
1392  *
1393  * Context:
1394  *	Kernel context.
1395  */
1396 /* ARGSUSED */
1397 static int
1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1399     int *rval)
1400 {
1401 	struct fcp_port	*pptr;
1402 	struct	device_data	*dev_data;
1403 	uint32_t		link_cnt;
1404 	la_wwn_t		*wwn_ptr = NULL;
1405 	struct fcp_tgt		*ptgt = NULL;
1406 	struct fcp_lun		*plun = NULL;
1407 	int			i, error;
1408 	struct fcp_ioctl	fioctl;
1409 
1410 #ifdef	_MULTI_DATAMODEL
1411 	switch (ddi_model_convert_from(mode & FMODELS)) {
1412 	case DDI_MODEL_ILP32: {
1413 		struct fcp32_ioctl f32_ioctl;
1414 
1415 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1416 		    sizeof (struct fcp32_ioctl), mode)) {
1417 			return (EFAULT);
1418 		}
1419 		fioctl.fp_minor = f32_ioctl.fp_minor;
1420 		fioctl.listlen = f32_ioctl.listlen;
1421 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1422 		break;
1423 	}
1424 	case DDI_MODEL_NONE:
1425 		if (ddi_copyin((void *)data, (void *)&fioctl,
1426 		    sizeof (struct fcp_ioctl), mode)) {
1427 			return (EFAULT);
1428 		}
1429 		break;
1430 	}
1431 
1432 #else	/* _MULTI_DATAMODEL */
1433 	if (ddi_copyin((void *)data, (void *)&fioctl,
1434 	    sizeof (struct fcp_ioctl), mode)) {
1435 		return (EFAULT);
1436 	}
1437 #endif	/* _MULTI_DATAMODEL */
1438 
1439 	/*
1440 	 * Right now we can assume that the minor number matches with
1441 	 * this instance of fp. If this changes we will need to
1442 	 * revisit this logic.
1443 	 */
1444 	mutex_enter(&fcp_global_mutex);
1445 	pptr = fcp_port_head;
1446 	while (pptr) {
1447 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1448 			break;
1449 		} else {
1450 			pptr = pptr->port_next;
1451 		}
1452 	}
1453 	mutex_exit(&fcp_global_mutex);
1454 	if (pptr == NULL) {
1455 		return (ENXIO);
1456 	}
1457 	mutex_enter(&pptr->port_mutex);
1458 
1459 
1460 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1461 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1462 		mutex_exit(&pptr->port_mutex);
1463 		return (ENOMEM);
1464 	}
1465 
1466 	if (ddi_copyin(fioctl.list, dev_data,
1467 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1468 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1469 		mutex_exit(&pptr->port_mutex);
1470 		return (EFAULT);
1471 	}
1472 	link_cnt = pptr->port_link_cnt;
1473 
1474 	if (cmd == FCP_TGT_INQUIRY) {
1475 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1476 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1477 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1478 			/* This ioctl is requesting INQ info of local HBA */
1479 			mutex_exit(&pptr->port_mutex);
1480 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1481 			dev_data[0].dev_status = 0;
1482 			if (ddi_copyout(dev_data, fioctl.list,
1483 			    (sizeof (struct device_data)) * fioctl.listlen,
1484 			    mode)) {
1485 				kmem_free(dev_data,
1486 				    sizeof (*dev_data) * fioctl.listlen);
1487 				return (EFAULT);
1488 			}
1489 			kmem_free(dev_data,
1490 			    sizeof (*dev_data) * fioctl.listlen);
1491 #ifdef	_MULTI_DATAMODEL
1492 			switch (ddi_model_convert_from(mode & FMODELS)) {
1493 			case DDI_MODEL_ILP32: {
1494 				struct fcp32_ioctl f32_ioctl;
1495 				f32_ioctl.fp_minor = fioctl.fp_minor;
1496 				f32_ioctl.listlen = fioctl.listlen;
1497 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1498 				if (ddi_copyout((void *)&f32_ioctl,
1499 				    (void *)data,
1500 				    sizeof (struct fcp32_ioctl), mode)) {
1501 					return (EFAULT);
1502 				}
1503 				break;
1504 			}
1505 			case DDI_MODEL_NONE:
1506 				if (ddi_copyout((void *)&fioctl, (void *)data,
1507 				    sizeof (struct fcp_ioctl), mode)) {
1508 					return (EFAULT);
1509 				}
1510 				break;
1511 			}
1512 #else	/* _MULTI_DATAMODEL */
1513 			if (ddi_copyout((void *)&fioctl, (void *)data,
1514 			    sizeof (struct fcp_ioctl), mode)) {
1515 				return (EFAULT);
1516 			}
1517 #endif	/* _MULTI_DATAMODEL */
1518 			return (0);
1519 		}
1520 	}
1521 
1522 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1523 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1524 		mutex_exit(&pptr->port_mutex);
1525 		return (ENXIO);
1526 	}
1527 
1528 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1529 	    i++) {
1530 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1531 
1532 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1533 
1534 
1535 		dev_data[i].dev_status = ENXIO;
1536 
1537 		if ((ptgt = fcp_lookup_target(pptr,
1538 		    (uchar_t *)wwn_ptr)) == NULL) {
1539 			mutex_exit(&pptr->port_mutex);
1540 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1541 			    wwn_ptr, &error, 0) == NULL) {
1542 				dev_data[i].dev_status = ENODEV;
1543 				mutex_enter(&pptr->port_mutex);
1544 				continue;
1545 			} else {
1546 
1547 				dev_data[i].dev_status = EAGAIN;
1548 
1549 				mutex_enter(&pptr->port_mutex);
1550 				continue;
1551 			}
1552 		} else {
1553 			mutex_enter(&ptgt->tgt_mutex);
1554 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1555 			    FCP_TGT_BUSY)) {
1556 				dev_data[i].dev_status = EAGAIN;
1557 				mutex_exit(&ptgt->tgt_mutex);
1558 				continue;
1559 			}
1560 
1561 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1562 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1563 					dev_data[i].dev_status = ENOTSUP;
1564 				} else {
1565 					dev_data[i].dev_status = ENXIO;
1566 				}
1567 				mutex_exit(&ptgt->tgt_mutex);
1568 				continue;
1569 			}
1570 
1571 			switch (cmd) {
1572 			case FCP_TGT_INQUIRY:
1573 				/*
1574 				 * The reason we give device type of
1575 				 * lun 0 only even though in some
1576 				 * cases(like maxstrat) lun 0 device
1577 				 * type may be 0x3f(invalid) is that
1578 				 * for bridge boxes target will appear
1579 				 * as luns and the first lun could be
1580 				 * a device that utility may not care
1581 				 * about (like a tape device).
1582 				 */
1583 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1584 				dev_data[i].dev_status = 0;
1585 				mutex_exit(&ptgt->tgt_mutex);
1586 
1587 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1588 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1589 				} else {
1590 					dev_data[i].dev0_type = plun->lun_type;
1591 				}
1592 				mutex_enter(&ptgt->tgt_mutex);
1593 				break;
1594 
1595 			case FCP_TGT_CREATE:
1596 				mutex_exit(&ptgt->tgt_mutex);
1597 				mutex_exit(&pptr->port_mutex);
1598 
1599 				/*
1600 				 * serialize state change call backs.
1601 				 * only one call back will be handled
1602 				 * at a time.
1603 				 */
1604 				mutex_enter(&fcp_global_mutex);
1605 				if (fcp_oflag & FCP_BUSY) {
1606 					mutex_exit(&fcp_global_mutex);
1607 					if (dev_data) {
1608 						kmem_free(dev_data,
1609 						    sizeof (*dev_data) *
1610 						    fioctl.listlen);
1611 					}
1612 					return (EBUSY);
1613 				}
1614 				fcp_oflag |= FCP_BUSY;
1615 				mutex_exit(&fcp_global_mutex);
1616 
1617 				dev_data[i].dev_status =
1618 				    fcp_create_on_demand(pptr,
1619 				    wwn_ptr->raw_wwn);
1620 
1621 				if (dev_data[i].dev_status != 0) {
1622 					char	buf[25];
1623 
1624 					for (i = 0; i < FC_WWN_SIZE; i++) {
1625 						(void) sprintf(&buf[i << 1],
1626 						    "%02x",
1627 						    wwn_ptr->raw_wwn[i]);
1628 					}
1629 
1630 					fcp_log(CE_WARN, pptr->port_dip,
1631 					    "!Failed to create nodes for"
1632 					    " pwwn=%s; error=%x", buf,
1633 					    dev_data[i].dev_status);
1634 				}
1635 
1636 				/* allow state change call backs again */
1637 				mutex_enter(&fcp_global_mutex);
1638 				fcp_oflag &= ~FCP_BUSY;
1639 				mutex_exit(&fcp_global_mutex);
1640 
1641 				mutex_enter(&pptr->port_mutex);
1642 				mutex_enter(&ptgt->tgt_mutex);
1643 
1644 				break;
1645 
1646 			case FCP_TGT_DELETE:
1647 				break;
1648 
1649 			default:
1650 				fcp_log(CE_WARN, pptr->port_dip,
1651 				    "!Invalid device data ioctl "
1652 				    "opcode = 0x%x", cmd);
1653 			}
1654 			mutex_exit(&ptgt->tgt_mutex);
1655 		}
1656 	}
1657 	mutex_exit(&pptr->port_mutex);
1658 
1659 	if (ddi_copyout(dev_data, fioctl.list,
1660 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1661 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1662 		return (EFAULT);
1663 	}
1664 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1665 
1666 #ifdef	_MULTI_DATAMODEL
1667 	switch (ddi_model_convert_from(mode & FMODELS)) {
1668 	case DDI_MODEL_ILP32: {
1669 		struct fcp32_ioctl f32_ioctl;
1670 
1671 		f32_ioctl.fp_minor = fioctl.fp_minor;
1672 		f32_ioctl.listlen = fioctl.listlen;
1673 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1674 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1675 		    sizeof (struct fcp32_ioctl), mode)) {
1676 			return (EFAULT);
1677 		}
1678 		break;
1679 	}
1680 	case DDI_MODEL_NONE:
1681 		if (ddi_copyout((void *)&fioctl, (void *)data,
1682 		    sizeof (struct fcp_ioctl), mode)) {
1683 			return (EFAULT);
1684 		}
1685 		break;
1686 	}
1687 #else	/* _MULTI_DATAMODEL */
1688 
1689 	if (ddi_copyout((void *)&fioctl, (void *)data,
1690 	    sizeof (struct fcp_ioctl), mode)) {
1691 		return (EFAULT);
1692 	}
1693 #endif	/* _MULTI_DATAMODEL */
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Fetch the target mappings (path, etc.) for all LUNs
1700  * on this port.
1701  */
1702 /* ARGSUSED */
1703 static int
1704 fcp_get_target_mappings(struct fcp_ioctl *data,
1705     int mode, int *rval)
1706 {
1707 	struct fcp_port	    *pptr;
1708 	fc_hba_target_mappings_t    *mappings;
1709 	fc_hba_mapping_entry_t	    *map;
1710 	struct fcp_tgt	    *ptgt = NULL;
1711 	struct fcp_lun	    *plun = NULL;
1712 	int			    i, mapIndex, mappingSize;
1713 	int			    listlen;
1714 	struct fcp_ioctl	    fioctl;
1715 	char			    *path;
1716 	fcp_ent_addr_t		    sam_lun_addr;
1717 
1718 #ifdef	_MULTI_DATAMODEL
1719 	switch (ddi_model_convert_from(mode & FMODELS)) {
1720 	case DDI_MODEL_ILP32: {
1721 		struct fcp32_ioctl f32_ioctl;
1722 
1723 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1724 		    sizeof (struct fcp32_ioctl), mode)) {
1725 			return (EFAULT);
1726 		}
1727 		fioctl.fp_minor = f32_ioctl.fp_minor;
1728 		fioctl.listlen = f32_ioctl.listlen;
1729 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1730 		break;
1731 	}
1732 	case DDI_MODEL_NONE:
1733 		if (ddi_copyin((void *)data, (void *)&fioctl,
1734 		    sizeof (struct fcp_ioctl), mode)) {
1735 			return (EFAULT);
1736 		}
1737 		break;
1738 	}
1739 
1740 #else	/* _MULTI_DATAMODEL */
1741 	if (ddi_copyin((void *)data, (void *)&fioctl,
1742 	    sizeof (struct fcp_ioctl), mode)) {
1743 		return (EFAULT);
1744 	}
1745 #endif	/* _MULTI_DATAMODEL */
1746 
1747 	/*
1748 	 * Right now we can assume that the minor number matches with
1749 	 * this instance of fp. If this changes we will need to
1750 	 * revisit this logic.
1751 	 */
1752 	mutex_enter(&fcp_global_mutex);
1753 	pptr = fcp_port_head;
1754 	while (pptr) {
1755 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1756 			break;
1757 		} else {
1758 			pptr = pptr->port_next;
1759 		}
1760 	}
1761 	mutex_exit(&fcp_global_mutex);
1762 	if (pptr == NULL) {
1763 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1764 		    fioctl.fp_minor);
1765 		return (ENXIO);
1766 	}
1767 
1768 
1769 	/* We use listlen to show the total buffer size */
1770 	mappingSize = fioctl.listlen;
1771 
1772 	/* Now calculate how many mapping entries will fit */
1773 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1774 	    - sizeof (fc_hba_target_mappings_t);
1775 	if (listlen <= 0) {
1776 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1777 		return (ENXIO);
1778 	}
1779 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1780 
1781 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1782 		return (ENOMEM);
1783 	}
1784 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1785 
1786 	/* Now get to work */
1787 	mapIndex = 0;
1788 
1789 	mutex_enter(&pptr->port_mutex);
1790 	/* Loop through all targets on this port */
1791 	for (i = 0; i < FCP_NUM_HASH; i++) {
1792 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1793 		    ptgt = ptgt->tgt_next) {
1794 
1795 
1796 			/* Loop through all LUNs on this target */
1797 			for (plun = ptgt->tgt_lun; plun != NULL;
1798 			    plun = plun->lun_next) {
1799 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1800 					continue;
1801 				}
1802 
1803 				path = fcp_get_lun_path(plun);
1804 				if (path == NULL) {
1805 					continue;
1806 				}
1807 
1808 				if (mapIndex >= listlen) {
1809 					mapIndex ++;
1810 					kmem_free(path, MAXPATHLEN);
1811 					continue;
1812 				}
1813 				map = &mappings->entries[mapIndex++];
1814 				bcopy(path, map->targetDriver,
1815 				    sizeof (map->targetDriver));
1816 				map->d_id = ptgt->tgt_d_id;
1817 				map->busNumber = 0;
1818 				map->targetNumber = ptgt->tgt_d_id;
1819 				map->osLUN = plun->lun_num;
1820 
1821 				/*
1822 				 * We had swapped lun when we stored it in
1823 				 * lun_addr. We need to swap it back before
1824 				 * returning it to user land
1825 				 */
1826 
1827 				sam_lun_addr.ent_addr_0 =
1828 				    BE_16(plun->lun_addr.ent_addr_0);
1829 				sam_lun_addr.ent_addr_1 =
1830 				    BE_16(plun->lun_addr.ent_addr_1);
1831 				sam_lun_addr.ent_addr_2 =
1832 				    BE_16(plun->lun_addr.ent_addr_2);
1833 				sam_lun_addr.ent_addr_3 =
1834 				    BE_16(plun->lun_addr.ent_addr_3);
1835 
1836 				bcopy(&sam_lun_addr, &map->samLUN,
1837 				    FCP_LUN_SIZE);
1838 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1839 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1840 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1841 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1842 
1843 				if (plun->lun_guid) {
1844 
1845 					/* convert ascii wwn to bytes */
1846 					fcp_ascii_to_wwn(plun->lun_guid,
1847 					    map->guid, sizeof (map->guid));
1848 
1849 					if ((sizeof (map->guid)) <
1850 					    plun->lun_guid_size / 2) {
1851 						cmn_err(CE_WARN,
1852 						    "fcp_get_target_mappings:"
1853 						    "guid copy space "
1854 						    "insufficient."
1855 						    "Copy Truncation - "
1856 						    "available %d; need %d",
1857 						    (int)sizeof (map->guid),
1858 						    (int)
1859 						    plun->lun_guid_size / 2);
1860 					}
1861 				}
1862 				kmem_free(path, MAXPATHLEN);
1863 			}
1864 		}
1865 	}
1866 	mutex_exit(&pptr->port_mutex);
1867 	mappings->numLuns = mapIndex;
1868 
1869 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1870 		kmem_free(mappings, mappingSize);
1871 		return (EFAULT);
1872 	}
1873 	kmem_free(mappings, mappingSize);
1874 
1875 #ifdef	_MULTI_DATAMODEL
1876 	switch (ddi_model_convert_from(mode & FMODELS)) {
1877 	case DDI_MODEL_ILP32: {
1878 		struct fcp32_ioctl f32_ioctl;
1879 
1880 		f32_ioctl.fp_minor = fioctl.fp_minor;
1881 		f32_ioctl.listlen = fioctl.listlen;
1882 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1883 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1884 		    sizeof (struct fcp32_ioctl), mode)) {
1885 			return (EFAULT);
1886 		}
1887 		break;
1888 	}
1889 	case DDI_MODEL_NONE:
1890 		if (ddi_copyout((void *)&fioctl, (void *)data,
1891 		    sizeof (struct fcp_ioctl), mode)) {
1892 			return (EFAULT);
1893 		}
1894 		break;
1895 	}
1896 #else	/* _MULTI_DATAMODEL */
1897 
1898 	if (ddi_copyout((void *)&fioctl, (void *)data,
1899 	    sizeof (struct fcp_ioctl), mode)) {
1900 		return (EFAULT);
1901 	}
1902 #endif	/* _MULTI_DATAMODEL */
1903 
1904 	return (0);
1905 }
1906 
1907 /*
1908  * fcp_setup_scsi_ioctl
1909  *	Setup handler for the "scsi passthru" style of
1910  *	ioctl for FCP.	See "fcp_util.h" for data structure
1911  *	definition.
1912  *
1913  * Input:
1914  *	u_fscsi	= ioctl data (user address space)
1915  *	mode	= See ioctl(9E)
1916  *
1917  * Output:
1918  *	u_fscsi	= ioctl data (user address space)
1919  *	rval	= return value - see ioctl(9E)
1920  *
1921  * Returns:
1922  *	0	= OK
1923  *	EAGAIN	= See errno.h
1924  *	EBUSY	= See errno.h
1925  *	EFAULT	= See errno.h
1926  *	EINTR	= See errno.h
1927  *	EINVAL	= See errno.h
1928  *	EIO	= See errno.h
1929  *	ENOMEM	= See errno.h
1930  *	ENXIO	= See errno.h
1931  *
1932  * Context:
1933  *	Kernel context.
1934  */
1935 /* ARGSUSED */
1936 static int
1937 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1938     int mode, int *rval)
1939 {
1940 	int			ret		= 0;
1941 	int			temp_ret;
1942 	caddr_t			k_cdbbufaddr	= NULL;
1943 	caddr_t			k_bufaddr	= NULL;
1944 	caddr_t			k_rqbufaddr	= NULL;
1945 	caddr_t			u_cdbbufaddr;
1946 	caddr_t			u_bufaddr;
1947 	caddr_t			u_rqbufaddr;
1948 	struct fcp_scsi_cmd	k_fscsi;
1949 
1950 	/*
1951 	 * Get fcp_scsi_cmd array element from user address space
1952 	 */
1953 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1954 	    != 0) {
1955 		return (ret);
1956 	}
1957 
1958 
1959 	/*
1960 	 * Even though kmem_alloc() checks the validity of the
1961 	 * buffer length, this check is needed when the
1962 	 * kmem_flags set and the zero buffer length is passed.
1963 	 */
1964 	if ((k_fscsi.scsi_cdblen <= 0) ||
1965 	    (k_fscsi.scsi_buflen <= 0) ||
1966 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1967 	    (k_fscsi.scsi_rqlen <= 0) ||
1968 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1969 		return (EINVAL);
1970 	}
1971 
1972 	/*
1973 	 * Allocate data for fcp_scsi_cmd pointer fields
1974 	 */
1975 	if (ret == 0) {
1976 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1977 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1978 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1979 
1980 		if (k_cdbbufaddr == NULL ||
1981 		    k_bufaddr	 == NULL ||
1982 		    k_rqbufaddr	 == NULL) {
1983 			ret = ENOMEM;
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * Get fcp_scsi_cmd pointer fields from user
1989 	 * address space
1990 	 */
1991 	if (ret == 0) {
1992 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1993 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1994 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1995 
1996 		if (ddi_copyin(u_cdbbufaddr,
1997 		    k_cdbbufaddr,
1998 		    k_fscsi.scsi_cdblen,
1999 		    mode)) {
2000 			ret = EFAULT;
2001 		} else if (ddi_copyin(u_bufaddr,
2002 		    k_bufaddr,
2003 		    k_fscsi.scsi_buflen,
2004 		    mode)) {
2005 			ret = EFAULT;
2006 		} else if (ddi_copyin(u_rqbufaddr,
2007 		    k_rqbufaddr,
2008 		    k_fscsi.scsi_rqlen,
2009 		    mode)) {
2010 			ret = EFAULT;
2011 		}
2012 	}
2013 
2014 	/*
2015 	 * Send scsi command (blocking)
2016 	 */
2017 	if (ret == 0) {
2018 		/*
2019 		 * Prior to sending the scsi command, the
2020 		 * fcp_scsi_cmd data structure must contain kernel,
2021 		 * not user, addresses.
2022 		 */
2023 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2024 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2025 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2026 
2027 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2028 
2029 		/*
2030 		 * After sending the scsi command, the
2031 		 * fcp_scsi_cmd data structure must contain user,
2032 		 * not kernel, addresses.
2033 		 */
2034 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2035 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2036 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2037 	}
2038 
2039 	/*
2040 	 * Put fcp_scsi_cmd pointer fields to user address space
2041 	 */
2042 	if (ret == 0) {
2043 		if (ddi_copyout(k_cdbbufaddr,
2044 		    u_cdbbufaddr,
2045 		    k_fscsi.scsi_cdblen,
2046 		    mode)) {
2047 			ret = EFAULT;
2048 		} else if (ddi_copyout(k_bufaddr,
2049 		    u_bufaddr,
2050 		    k_fscsi.scsi_buflen,
2051 		    mode)) {
2052 			ret = EFAULT;
2053 		} else if (ddi_copyout(k_rqbufaddr,
2054 		    u_rqbufaddr,
2055 		    k_fscsi.scsi_rqlen,
2056 		    mode)) {
2057 			ret = EFAULT;
2058 		}
2059 	}
2060 
2061 	/*
2062 	 * Free data for fcp_scsi_cmd pointer fields
2063 	 */
2064 	if (k_cdbbufaddr != NULL) {
2065 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2066 	}
2067 	if (k_bufaddr != NULL) {
2068 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2069 	}
2070 	if (k_rqbufaddr != NULL) {
2071 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2072 	}
2073 
2074 	/*
2075 	 * Put fcp_scsi_cmd array element to user address space
2076 	 */
2077 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2078 	if (temp_ret != 0) {
2079 		ret = temp_ret;
2080 	}
2081 
2082 	/*
2083 	 * Return status
2084 	 */
2085 	return (ret);
2086 }
2087 
2088 
2089 /*
2090  * fcp_copyin_scsi_cmd
2091  *	Copy in fcp_scsi_cmd data structure from user address space.
2092  *	The data may be in 32 bit or 64 bit modes.
2093  *
2094  * Input:
2095  *	base_addr	= from address (user address space)
2096  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2097  *
2098  * Output:
2099  *	fscsi		= to address (kernel address space)
2100  *
2101  * Returns:
2102  *	0	= OK
2103  *	EFAULT	= Error
2104  *
2105  * Context:
2106  *	Kernel context.
2107  */
2108 static int
2109 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2110 {
2111 #ifdef	_MULTI_DATAMODEL
2112 	struct fcp32_scsi_cmd	f32scsi;
2113 
2114 	switch (ddi_model_convert_from(mode & FMODELS)) {
2115 	case DDI_MODEL_ILP32:
2116 		/*
2117 		 * Copy data from user address space
2118 		 */
2119 		if (ddi_copyin((void *)base_addr,
2120 		    &f32scsi,
2121 		    sizeof (struct fcp32_scsi_cmd),
2122 		    mode)) {
2123 			return (EFAULT);
2124 		}
2125 		/*
2126 		 * Convert from 32 bit to 64 bit
2127 		 */
2128 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2129 		break;
2130 	case DDI_MODEL_NONE:
2131 		/*
2132 		 * Copy data from user address space
2133 		 */
2134 		if (ddi_copyin((void *)base_addr,
2135 		    fscsi,
2136 		    sizeof (struct fcp_scsi_cmd),
2137 		    mode)) {
2138 			return (EFAULT);
2139 		}
2140 		break;
2141 	}
2142 #else	/* _MULTI_DATAMODEL */
2143 	/*
2144 	 * Copy data from user address space
2145 	 */
2146 	if (ddi_copyin((void *)base_addr,
2147 	    fscsi,
2148 	    sizeof (struct fcp_scsi_cmd),
2149 	    mode)) {
2150 		return (EFAULT);
2151 	}
2152 #endif	/* _MULTI_DATAMODEL */
2153 
2154 	return (0);
2155 }
2156 
2157 
2158 /*
2159  * fcp_copyout_scsi_cmd
2160  *	Copy out fcp_scsi_cmd data structure to user address space.
2161  *	The data may be in 32 bit or 64 bit modes.
2162  *
2163  * Input:
2164  *	fscsi		= to address (kernel address space)
2165  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2166  *
2167  * Output:
2168  *	base_addr	= from address (user address space)
2169  *
2170  * Returns:
2171  *	0	= OK
2172  *	EFAULT	= Error
2173  *
2174  * Context:
2175  *	Kernel context.
2176  */
2177 static int
2178 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2179 {
2180 #ifdef	_MULTI_DATAMODEL
2181 	struct fcp32_scsi_cmd	f32scsi;
2182 
2183 	switch (ddi_model_convert_from(mode & FMODELS)) {
2184 	case DDI_MODEL_ILP32:
2185 		/*
2186 		 * Convert from 64 bit to 32 bit
2187 		 */
2188 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2189 		/*
2190 		 * Copy data to user address space
2191 		 */
2192 		if (ddi_copyout(&f32scsi,
2193 		    (void *)base_addr,
2194 		    sizeof (struct fcp32_scsi_cmd),
2195 		    mode)) {
2196 			return (EFAULT);
2197 		}
2198 		break;
2199 	case DDI_MODEL_NONE:
2200 		/*
2201 		 * Copy data to user address space
2202 		 */
2203 		if (ddi_copyout(fscsi,
2204 		    (void *)base_addr,
2205 		    sizeof (struct fcp_scsi_cmd),
2206 		    mode)) {
2207 			return (EFAULT);
2208 		}
2209 		break;
2210 	}
2211 #else	/* _MULTI_DATAMODEL */
2212 	/*
2213 	 * Copy data to user address space
2214 	 */
2215 	if (ddi_copyout(fscsi,
2216 	    (void *)base_addr,
2217 	    sizeof (struct fcp_scsi_cmd),
2218 	    mode)) {
2219 		return (EFAULT);
2220 	}
2221 #endif	/* _MULTI_DATAMODEL */
2222 
2223 	return (0);
2224 }
2225 
2226 
2227 /*
2228  * fcp_send_scsi_ioctl
2229  *	Sends the SCSI command in blocking mode.
2230  *
2231  * Input:
2232  *	fscsi		= SCSI command data structure
2233  *
2234  * Output:
2235  *	fscsi		= SCSI command data structure
2236  *
2237  * Returns:
2238  *	0	= OK
2239  *	EAGAIN	= See errno.h
2240  *	EBUSY	= See errno.h
2241  *	EINTR	= See errno.h
2242  *	EINVAL	= See errno.h
2243  *	EIO	= See errno.h
2244  *	ENOMEM	= See errno.h
2245  *	ENXIO	= See errno.h
2246  *
2247  * Context:
2248  *	Kernel context.
2249  */
2250 static int
2251 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2252 {
2253 	struct fcp_lun	*plun		= NULL;
2254 	struct fcp_port	*pptr		= NULL;
2255 	struct fcp_tgt	*ptgt		= NULL;
2256 	fc_packet_t		*fpkt		= NULL;
2257 	struct fcp_ipkt	*icmd		= NULL;
2258 	int			target_created	= FALSE;
2259 	fc_frame_hdr_t		*hp;
2260 	struct fcp_cmd		fcp_cmd;
2261 	struct fcp_cmd		*fcmd;
2262 	union scsi_cdb		*scsi_cdb;
2263 	la_wwn_t		*wwn_ptr;
2264 	int			nodma;
2265 	struct fcp_rsp		*rsp;
2266 	struct fcp_rsp_info	*rsp_info;
2267 	caddr_t			rsp_sense;
2268 	int			buf_len;
2269 	int			info_len;
2270 	int			sense_len;
2271 	struct scsi_extended_sense	*sense_to = NULL;
2272 	timeout_id_t		tid;
2273 	uint8_t			reconfig_lun = FALSE;
2274 	uint8_t			reconfig_pending = FALSE;
2275 	uint8_t			scsi_cmd;
2276 	int			rsp_len;
2277 	int			cmd_index;
2278 	int			fc_status;
2279 	int			pkt_state;
2280 	int			pkt_action;
2281 	int			pkt_reason;
2282 	int			ret, xport_retval = ~FC_SUCCESS;
2283 	int			lcount;
2284 	int			tcount;
2285 	int			reconfig_status;
2286 	int			port_busy = FALSE;
2287 	uchar_t			*lun_string;
2288 
2289 	/*
2290 	 * Check valid SCSI command
2291 	 */
2292 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2293 	ret = EINVAL;
2294 	for (cmd_index = 0;
2295 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2296 	    ret != 0;
2297 	    cmd_index++) {
2298 		/*
2299 		 * First byte of CDB is the SCSI command
2300 		 */
2301 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2302 			ret = 0;
2303 		}
2304 	}
2305 
2306 	/*
2307 	 * Check inputs
2308 	 */
2309 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2310 		ret = EINVAL;
2311 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2312 		/* no larger than */
2313 		ret = EINVAL;
2314 	}
2315 
2316 
2317 	/*
2318 	 * Find FC port
2319 	 */
2320 	if (ret == 0) {
2321 		/*
2322 		 * Acquire global mutex
2323 		 */
2324 		mutex_enter(&fcp_global_mutex);
2325 
2326 		pptr = fcp_port_head;
2327 		while (pptr) {
2328 			if (pptr->port_instance ==
2329 			    (uint32_t)fscsi->scsi_fc_port_num) {
2330 				break;
2331 			} else {
2332 				pptr = pptr->port_next;
2333 			}
2334 		}
2335 
2336 		if (pptr == NULL) {
2337 			ret = ENXIO;
2338 		} else {
2339 			/*
2340 			 * fc_ulp_busy_port can raise power
2341 			 *  so, we must not hold any mutexes involved in PM
2342 			 */
2343 			mutex_exit(&fcp_global_mutex);
2344 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2345 		}
2346 
2347 		if (ret == 0) {
2348 
2349 			/* remember port is busy, so we will release later */
2350 			port_busy = TRUE;
2351 
2352 			/*
2353 			 * If there is a reconfiguration in progress, wait
2354 			 * for it to complete.
2355 			 */
2356 
2357 			fcp_reconfig_wait(pptr);
2358 
2359 			/* reacquire mutexes in order */
2360 			mutex_enter(&fcp_global_mutex);
2361 			mutex_enter(&pptr->port_mutex);
2362 
2363 			/*
2364 			 * Will port accept DMA?
2365 			 */
2366 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2367 			    ? 1 : 0;
2368 
2369 			/*
2370 			 * If init or offline, device not known
2371 			 *
2372 			 * If we are discovering (onlining), we can
2373 			 * NOT obviously provide reliable data about
2374 			 * devices until it is complete
2375 			 */
2376 			if (pptr->port_state &	  (FCP_STATE_INIT |
2377 			    FCP_STATE_OFFLINE)) {
2378 				ret = ENXIO;
2379 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2380 				ret = EBUSY;
2381 			} else {
2382 				/*
2383 				 * Find target from pwwn
2384 				 *
2385 				 * The wwn must be put into a local
2386 				 * variable to ensure alignment.
2387 				 */
2388 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2389 				ptgt = fcp_lookup_target(pptr,
2390 				    (uchar_t *)wwn_ptr);
2391 
2392 				/*
2393 				 * If target not found,
2394 				 */
2395 				if (ptgt == NULL) {
2396 					/*
2397 					 * Note: Still have global &
2398 					 * port mutexes
2399 					 */
2400 					mutex_exit(&pptr->port_mutex);
2401 					ptgt = fcp_port_create_tgt(pptr,
2402 					    wwn_ptr, &ret, &fc_status,
2403 					    &pkt_state, &pkt_action,
2404 					    &pkt_reason);
2405 					mutex_enter(&pptr->port_mutex);
2406 
2407 					fscsi->scsi_fc_status  = fc_status;
2408 					fscsi->scsi_pkt_state  =
2409 					    (uchar_t)pkt_state;
2410 					fscsi->scsi_pkt_reason = pkt_reason;
2411 					fscsi->scsi_pkt_action =
2412 					    (uchar_t)pkt_action;
2413 
2414 					if (ptgt != NULL) {
2415 						target_created = TRUE;
2416 					} else if (ret == 0) {
2417 						ret = ENOMEM;
2418 					}
2419 				}
2420 
2421 				if (ret == 0) {
2422 					/*
2423 					 * Acquire target
2424 					 */
2425 					mutex_enter(&ptgt->tgt_mutex);
2426 
2427 					/*
2428 					 * If target is mark or busy,
2429 					 * then target can not be used
2430 					 */
2431 					if (ptgt->tgt_state &
2432 					    (FCP_TGT_MARK |
2433 					    FCP_TGT_BUSY)) {
2434 						ret = EBUSY;
2435 					} else {
2436 						/*
2437 						 * Mark target as busy
2438 						 */
2439 						ptgt->tgt_state |=
2440 						    FCP_TGT_BUSY;
2441 					}
2442 
2443 					/*
2444 					 * Release target
2445 					 */
2446 					lcount = pptr->port_link_cnt;
2447 					tcount = ptgt->tgt_change_cnt;
2448 					mutex_exit(&ptgt->tgt_mutex);
2449 				}
2450 			}
2451 
2452 			/*
2453 			 * Release port
2454 			 */
2455 			mutex_exit(&pptr->port_mutex);
2456 		}
2457 
2458 		/*
2459 		 * Release global mutex
2460 		 */
2461 		mutex_exit(&fcp_global_mutex);
2462 	}
2463 
2464 	if (ret == 0) {
2465 		uint64_t belun = BE_64(fscsi->scsi_lun);
2466 
2467 		/*
2468 		 * If it's a target device, find lun from pwwn
2469 		 * The wwn must be put into a local
2470 		 * variable to ensure alignment.
2471 		 */
2472 		mutex_enter(&pptr->port_mutex);
2473 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2474 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2475 			/* this is not a target */
2476 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2477 			ret = ENXIO;
2478 		} else if ((belun << 16) != 0) {
2479 			/*
2480 			 * Since fcp only support PD and LU addressing method
2481 			 * so far, the last 6 bytes of a valid LUN are expected
2482 			 * to be filled with 00h.
2483 			 */
2484 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2485 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2486 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2487 			    (uint8_t)(belun >> 62), belun);
2488 			ret = ENXIO;
2489 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2490 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2491 			/*
2492 			 * This is a SCSI target, but no LUN at this
2493 			 * address.
2494 			 *
2495 			 * In the future, we may want to send this to
2496 			 * the target, and let it respond
2497 			 * appropriately
2498 			 */
2499 			ret = ENXIO;
2500 		}
2501 		mutex_exit(&pptr->port_mutex);
2502 	}
2503 
2504 	/*
2505 	 * Finished grabbing external resources
2506 	 * Allocate internal packet (icmd)
2507 	 */
2508 	if (ret == 0) {
2509 		/*
2510 		 * Calc rsp len assuming rsp info included
2511 		 */
2512 		rsp_len = sizeof (struct fcp_rsp) +
2513 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2514 
2515 		icmd = fcp_icmd_alloc(pptr, ptgt,
2516 		    sizeof (struct fcp_cmd),
2517 		    rsp_len,
2518 		    fscsi->scsi_buflen,
2519 		    nodma,
2520 		    lcount,			/* ipkt_link_cnt */
2521 		    tcount,			/* ipkt_change_cnt */
2522 		    0,				/* cause */
2523 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2524 
2525 		if (icmd == NULL) {
2526 			ret = ENOMEM;
2527 		} else {
2528 			/*
2529 			 * Setup internal packet as sema sync
2530 			 */
2531 			fcp_ipkt_sema_init(icmd);
2532 		}
2533 	}
2534 
2535 	if (ret == 0) {
2536 		/*
2537 		 * Init fpkt pointer for use.
2538 		 */
2539 
2540 		fpkt = icmd->ipkt_fpkt;
2541 
2542 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2543 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2544 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2545 
2546 		/*
2547 		 * Init fcmd pointer for use by SCSI command
2548 		 */
2549 
2550 		if (nodma) {
2551 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2552 		} else {
2553 			fcmd = &fcp_cmd;
2554 		}
2555 		bzero(fcmd, sizeof (struct fcp_cmd));
2556 		ptgt = plun->lun_tgt;
2557 
2558 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2559 
2560 		fcmd->fcp_ent_addr.ent_addr_0 =
2561 		    BE_16(*(uint16_t *)&(lun_string[0]));
2562 		fcmd->fcp_ent_addr.ent_addr_1 =
2563 		    BE_16(*(uint16_t *)&(lun_string[2]));
2564 		fcmd->fcp_ent_addr.ent_addr_2 =
2565 		    BE_16(*(uint16_t *)&(lun_string[4]));
2566 		fcmd->fcp_ent_addr.ent_addr_3 =
2567 		    BE_16(*(uint16_t *)&(lun_string[6]));
2568 
2569 		/*
2570 		 * Setup internal packet(icmd)
2571 		 */
2572 		icmd->ipkt_lun		= plun;
2573 		icmd->ipkt_restart	= 0;
2574 		icmd->ipkt_retries	= 0;
2575 		icmd->ipkt_opcode	= 0;
2576 
2577 		/*
2578 		 * Init the frame HEADER Pointer for use
2579 		 */
2580 		hp = &fpkt->pkt_cmd_fhdr;
2581 
2582 		hp->s_id	= pptr->port_id;
2583 		hp->d_id	= ptgt->tgt_d_id;
2584 		hp->r_ctl	= R_CTL_COMMAND;
2585 		hp->type	= FC_TYPE_SCSI_FCP;
2586 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2587 		hp->rsvd	= 0;
2588 		hp->seq_id	= 0;
2589 		hp->seq_cnt	= 0;
2590 		hp->ox_id	= 0xffff;
2591 		hp->rx_id	= 0xffff;
2592 		hp->ro		= 0;
2593 
2594 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2595 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2596 		fcmd->fcp_cntl.cntl_write_data	= 0;
2597 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2598 
2599 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2600 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2601 		    fscsi->scsi_cdblen);
2602 
2603 		if (!nodma) {
2604 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2605 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2606 		}
2607 
2608 		/*
2609 		 * Send SCSI command to FC transport
2610 		 */
2611 
2612 		if (ret == 0) {
2613 			mutex_enter(&ptgt->tgt_mutex);
2614 
2615 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2616 				mutex_exit(&ptgt->tgt_mutex);
2617 				fscsi->scsi_fc_status = xport_retval =
2618 				    fc_ulp_transport(pptr->port_fp_handle,
2619 				    fpkt);
2620 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2621 					ret = EIO;
2622 				}
2623 			} else {
2624 				mutex_exit(&ptgt->tgt_mutex);
2625 				ret = EBUSY;
2626 			}
2627 		}
2628 	}
2629 
2630 	/*
2631 	 * Wait for completion only if fc_ulp_transport was called and it
2632 	 * returned a success. This is the only time callback will happen.
2633 	 * Otherwise, there is no point in waiting
2634 	 */
2635 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2636 		ret = fcp_ipkt_sema_wait(icmd);
2637 	}
2638 
2639 	/*
2640 	 * Copy data to IOCTL data structures
2641 	 */
2642 	rsp = NULL;
2643 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2644 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2645 
2646 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2647 			fcp_log(CE_WARN, pptr->port_dip,
2648 			    "!SCSI command to d_id=0x%x lun=0x%x"
2649 			    " failed, Bad FCP response values:"
2650 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2651 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2652 			    ptgt->tgt_d_id, plun->lun_num,
2653 			    rsp->reserved_0, rsp->reserved_1,
2654 			    rsp->fcp_u.fcp_status.reserved_0,
2655 			    rsp->fcp_u.fcp_status.reserved_1,
2656 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2657 
2658 			ret = EIO;
2659 		}
2660 	}
2661 
2662 	if ((ret == 0) && (rsp != NULL)) {
2663 		/*
2664 		 * Calc response lengths
2665 		 */
2666 		sense_len = 0;
2667 		info_len = 0;
2668 
2669 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2670 			info_len = rsp->fcp_response_len;
2671 		}
2672 
2673 		rsp_info   = (struct fcp_rsp_info *)
2674 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2675 
2676 		/*
2677 		 * Get SCSI status
2678 		 */
2679 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2680 		/*
2681 		 * If a lun was just added or removed and the next command
2682 		 * comes through this interface, we need to capture the check
2683 		 * condition so we can discover the new topology.
2684 		 */
2685 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2686 		    rsp->fcp_u.fcp_status.sense_len_set) {
2687 			sense_len = rsp->fcp_sense_len;
2688 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2689 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2690 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2691 			    (FCP_SENSE_NO_LUN(sense_to))) {
2692 				reconfig_lun = TRUE;
2693 			}
2694 		}
2695 
2696 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2697 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2698 			if (reconfig_lun == FALSE) {
2699 				reconfig_status =
2700 				    fcp_is_reconfig_needed(ptgt, fpkt);
2701 			}
2702 
2703 			if ((reconfig_lun == TRUE) ||
2704 			    (reconfig_status == TRUE)) {
2705 				mutex_enter(&ptgt->tgt_mutex);
2706 				if (ptgt->tgt_tid == NULL) {
2707 					/*
2708 					 * Either we've been notified the
2709 					 * REPORT_LUN data has changed, or
2710 					 * we've determined on our own that
2711 					 * we're out of date.  Kick off
2712 					 * rediscovery.
2713 					 */
2714 					tid = timeout(fcp_reconfigure_luns,
2715 					    (caddr_t)ptgt, drv_usectohz(1));
2716 
2717 					ptgt->tgt_tid = tid;
2718 					ptgt->tgt_state |= FCP_TGT_BUSY;
2719 					ret = EBUSY;
2720 					reconfig_pending = TRUE;
2721 				}
2722 				mutex_exit(&ptgt->tgt_mutex);
2723 			}
2724 		}
2725 
2726 		/*
2727 		 * Calc residuals and buffer lengths
2728 		 */
2729 
2730 		if (ret == 0) {
2731 			buf_len = fscsi->scsi_buflen;
2732 			fscsi->scsi_bufresid	= 0;
2733 			if (rsp->fcp_u.fcp_status.resid_under) {
2734 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2735 					fscsi->scsi_bufresid = rsp->fcp_resid;
2736 				} else {
2737 					cmn_err(CE_WARN, "fcp: bad residue %x "
2738 					    "for txfer len %x", rsp->fcp_resid,
2739 					    fscsi->scsi_buflen);
2740 					fscsi->scsi_bufresid =
2741 					    fscsi->scsi_buflen;
2742 				}
2743 				buf_len -= fscsi->scsi_bufresid;
2744 			}
2745 			if (rsp->fcp_u.fcp_status.resid_over) {
2746 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2747 			}
2748 
2749 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2750 			if (fscsi->scsi_rqlen < sense_len) {
2751 				sense_len = fscsi->scsi_rqlen;
2752 			}
2753 
2754 			fscsi->scsi_fc_rspcode	= 0;
2755 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2756 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2757 			}
2758 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2759 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2760 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2761 
2762 			/*
2763 			 * Copy data and request sense
2764 			 *
2765 			 * Data must be copied by using the FCP_CP_IN macro.
2766 			 * This will ensure the proper byte order since the data
2767 			 * is being copied directly from the memory mapped
2768 			 * device register.
2769 			 *
2770 			 * The response (and request sense) will be in the
2771 			 * correct byte order.	No special copy is necessary.
2772 			 */
2773 
2774 			if (buf_len) {
2775 				FCP_CP_IN(fpkt->pkt_data,
2776 				    fscsi->scsi_bufaddr,
2777 				    fpkt->pkt_data_acc,
2778 				    buf_len);
2779 			}
2780 			bcopy((void *)rsp_sense,
2781 			    (void *)fscsi->scsi_rqbufaddr,
2782 			    sense_len);
2783 		}
2784 	}
2785 
2786 	/*
2787 	 * Cleanup transport data structures if icmd was alloc-ed
2788 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2789 	 */
2790 	if (icmd != NULL) {
2791 		fcp_ipkt_sema_cleanup(icmd);
2792 	}
2793 
2794 	/* restore pm busy/idle status */
2795 	if (port_busy) {
2796 		fc_ulp_idle_port(pptr->port_fp_handle);
2797 	}
2798 
2799 	/*
2800 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2801 	 * flag, it'll be cleared when the reconfig is complete.
2802 	 */
2803 	if ((ptgt != NULL) && !reconfig_pending) {
2804 		/*
2805 		 * If target was created,
2806 		 */
2807 		if (target_created) {
2808 			mutex_enter(&ptgt->tgt_mutex);
2809 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2810 			mutex_exit(&ptgt->tgt_mutex);
2811 		} else {
2812 			/*
2813 			 * De-mark target as busy
2814 			 */
2815 			mutex_enter(&ptgt->tgt_mutex);
2816 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2817 			mutex_exit(&ptgt->tgt_mutex);
2818 		}
2819 	}
2820 	return (ret);
2821 }
2822 
2823 
2824 static int
2825 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2826     fc_packet_t	*fpkt)
2827 {
2828 	uchar_t			*lun_string;
2829 	uint16_t		lun_num, i;
2830 	int			num_luns;
2831 	int			actual_luns;
2832 	int			num_masked_luns;
2833 	int			lun_buflen;
2834 	struct fcp_lun	*plun	= NULL;
2835 	struct fcp_reportlun_resp	*report_lun;
2836 	uint8_t			reconfig_needed = FALSE;
2837 	uint8_t			lun_exists = FALSE;
2838 
2839 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2840 
2841 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2842 	    fpkt->pkt_datalen);
2843 
2844 	/* get number of luns (which is supplied as LUNS * 8) */
2845 	num_luns = BE_32(report_lun->num_lun) >> 3;
2846 
2847 	/*
2848 	 * Figure out exactly how many lun strings our response buffer
2849 	 * can hold.
2850 	 */
2851 	lun_buflen = (fpkt->pkt_datalen -
2852 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2853 
2854 	/*
2855 	 * Is our response buffer full or not? We don't want to
2856 	 * potentially walk beyond the number of luns we have.
2857 	 */
2858 	if (num_luns <= lun_buflen) {
2859 		actual_luns = num_luns;
2860 	} else {
2861 		actual_luns = lun_buflen;
2862 	}
2863 
2864 	mutex_enter(&ptgt->tgt_mutex);
2865 
2866 	/* Scan each lun to see if we have masked it. */
2867 	num_masked_luns = 0;
2868 	if (fcp_lun_blacklist != NULL) {
2869 		for (i = 0; i < actual_luns; i++) {
2870 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2871 			switch (lun_string[0] & 0xC0) {
2872 			case FCP_LUN_ADDRESSING:
2873 			case FCP_PD_ADDRESSING:
2874 				lun_num = ((lun_string[0] & 0x3F) << 8)
2875 				    | lun_string[1];
2876 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2877 				    lun_num) == TRUE) {
2878 					num_masked_luns++;
2879 				}
2880 				break;
2881 			default:
2882 				break;
2883 			}
2884 		}
2885 	}
2886 
2887 	/*
2888 	 * The quick and easy check.  If the number of LUNs reported
2889 	 * doesn't match the number we currently know about, we need
2890 	 * to reconfigure.
2891 	 */
2892 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2893 		mutex_exit(&ptgt->tgt_mutex);
2894 		kmem_free(report_lun, fpkt->pkt_datalen);
2895 		return (TRUE);
2896 	}
2897 
2898 	/*
2899 	 * If the quick and easy check doesn't turn up anything, we walk
2900 	 * the list of luns from the REPORT_LUN response and look for
2901 	 * any luns we don't know about.  If we find one, we know we need
2902 	 * to reconfigure. We will skip LUNs that are masked because of the
2903 	 * blacklist.
2904 	 */
2905 	for (i = 0; i < actual_luns; i++) {
2906 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2907 		lun_exists = FALSE;
2908 		switch (lun_string[0] & 0xC0) {
2909 		case FCP_LUN_ADDRESSING:
2910 		case FCP_PD_ADDRESSING:
2911 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2912 
2913 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2914 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2915 				lun_exists = TRUE;
2916 				break;
2917 			}
2918 
2919 			for (plun = ptgt->tgt_lun; plun;
2920 			    plun = plun->lun_next) {
2921 				if (plun->lun_num == lun_num) {
2922 					lun_exists = TRUE;
2923 					break;
2924 				}
2925 			}
2926 			break;
2927 		default:
2928 			break;
2929 		}
2930 
2931 		if (lun_exists == FALSE) {
2932 			reconfig_needed = TRUE;
2933 			break;
2934 		}
2935 	}
2936 
2937 	mutex_exit(&ptgt->tgt_mutex);
2938 	kmem_free(report_lun, fpkt->pkt_datalen);
2939 
2940 	return (reconfig_needed);
2941 }
2942 
2943 /*
2944  * This function is called by fcp_handle_page83 and uses inquiry response data
2945  * stored in plun->lun_inq to determine whether or not a device is a member of
2946  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2947  * otherwise 1.
2948  */
2949 static int
2950 fcp_symmetric_device_probe(struct fcp_lun *plun)
2951 {
2952 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2953 	char			*devidptr;
2954 	int			i, len;
2955 
2956 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2957 		devidptr = fcp_symmetric_disk_table[i];
2958 		len = (int)strlen(devidptr);
2959 
2960 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2961 			return (0);
2962 		}
2963 	}
2964 	return (1);
2965 }
2966 
2967 
2968 /*
2969  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2970  * It basically returns the current count of # of state change callbacks
2971  * i.e the value of tgt_change_cnt.
2972  *
2973  * INPUT:
2974  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2975  *   fcp_ioctl.listlen	-> 1
2976  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
2977  */
2978 /*ARGSUSED2*/
2979 static int
2980 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2981 {
2982 	int			ret;
2983 	uint32_t		link_cnt;
2984 	struct fcp_ioctl	fioctl;
2985 	struct fcp_port	*pptr = NULL;
2986 
2987 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2988 	    &pptr)) != 0) {
2989 		return (ret);
2990 	}
2991 
2992 	ASSERT(pptr != NULL);
2993 
2994 	if (fioctl.listlen != 1) {
2995 		return (EINVAL);
2996 	}
2997 
2998 	mutex_enter(&pptr->port_mutex);
2999 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3000 		mutex_exit(&pptr->port_mutex);
3001 		return (ENXIO);
3002 	}
3003 
3004 	/*
3005 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3006 	 * When the fcp initially attaches to the port and there are nothing
3007 	 * hanging out of the port or if there was a repeat offline state change
3008 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3009 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3010 	 * will differentiate the 2 cases.
3011 	 */
3012 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3013 		mutex_exit(&pptr->port_mutex);
3014 		return (ENXIO);
3015 	}
3016 
3017 	link_cnt = pptr->port_link_cnt;
3018 	mutex_exit(&pptr->port_mutex);
3019 
3020 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3021 		return (EFAULT);
3022 	}
3023 
3024 #ifdef	_MULTI_DATAMODEL
3025 	switch (ddi_model_convert_from(mode & FMODELS)) {
3026 	case DDI_MODEL_ILP32: {
3027 		struct fcp32_ioctl f32_ioctl;
3028 
3029 		f32_ioctl.fp_minor = fioctl.fp_minor;
3030 		f32_ioctl.listlen = fioctl.listlen;
3031 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3032 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3033 		    sizeof (struct fcp32_ioctl), mode)) {
3034 			return (EFAULT);
3035 		}
3036 		break;
3037 	}
3038 	case DDI_MODEL_NONE:
3039 		if (ddi_copyout((void *)&fioctl, (void *)data,
3040 		    sizeof (struct fcp_ioctl), mode)) {
3041 			return (EFAULT);
3042 		}
3043 		break;
3044 	}
3045 #else	/* _MULTI_DATAMODEL */
3046 
3047 	if (ddi_copyout((void *)&fioctl, (void *)data,
3048 	    sizeof (struct fcp_ioctl), mode)) {
3049 		return (EFAULT);
3050 	}
3051 #endif	/* _MULTI_DATAMODEL */
3052 
3053 	return (0);
3054 }
3055 
3056 /*
3057  * This function copies the fcp_ioctl structure passed in from user land
3058  * into kernel land. Handles 32 bit applications.
3059  */
3060 /*ARGSUSED*/
3061 static int
3062 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3063     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3064 {
3065 	struct fcp_port	*t_pptr;
3066 
3067 #ifdef	_MULTI_DATAMODEL
3068 	switch (ddi_model_convert_from(mode & FMODELS)) {
3069 	case DDI_MODEL_ILP32: {
3070 		struct fcp32_ioctl f32_ioctl;
3071 
3072 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3073 		    sizeof (struct fcp32_ioctl), mode)) {
3074 			return (EFAULT);
3075 		}
3076 		fioctl->fp_minor = f32_ioctl.fp_minor;
3077 		fioctl->listlen = f32_ioctl.listlen;
3078 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3079 		break;
3080 	}
3081 	case DDI_MODEL_NONE:
3082 		if (ddi_copyin((void *)data, (void *)fioctl,
3083 		    sizeof (struct fcp_ioctl), mode)) {
3084 			return (EFAULT);
3085 		}
3086 		break;
3087 	}
3088 
3089 #else	/* _MULTI_DATAMODEL */
3090 	if (ddi_copyin((void *)data, (void *)fioctl,
3091 	    sizeof (struct fcp_ioctl), mode)) {
3092 		return (EFAULT);
3093 	}
3094 #endif	/* _MULTI_DATAMODEL */
3095 
3096 	/*
3097 	 * Right now we can assume that the minor number matches with
3098 	 * this instance of fp. If this changes we will need to
3099 	 * revisit this logic.
3100 	 */
3101 	mutex_enter(&fcp_global_mutex);
3102 	t_pptr = fcp_port_head;
3103 	while (t_pptr) {
3104 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3105 			break;
3106 		} else {
3107 			t_pptr = t_pptr->port_next;
3108 		}
3109 	}
3110 	*pptr = t_pptr;
3111 	mutex_exit(&fcp_global_mutex);
3112 	if (t_pptr == NULL) {
3113 		return (ENXIO);
3114 	}
3115 
3116 	return (0);
3117 }
3118 
3119 /*
3120  *     Function: fcp_port_create_tgt
3121  *
3122  *  Description: As the name suggest this function creates the target context
3123  *		 specified by the the WWN provided by the caller.  If the
3124  *		 creation goes well and the target is known by fp/fctl a PLOGI
3125  *		 followed by a PRLI are issued.
3126  *
3127  *     Argument: pptr		fcp port structure
3128  *		 pwwn		WWN of the target
3129  *		 ret_val	Address of the return code.  It could be:
3130  *				EIO, ENOMEM or 0.
3131  *		 fc_status	PLOGI or PRLI status completion
3132  *		 fc_pkt_state	PLOGI or PRLI state completion
3133  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3134  *		 fc_pkt_action	PLOGI or PRLI action completion
3135  *
3136  * Return Value: NULL if it failed
3137  *		 Target structure address if it succeeds
3138  */
3139 static struct fcp_tgt *
3140 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3141     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3142 {
3143 	struct fcp_tgt	*ptgt = NULL;
3144 	fc_portmap_t		devlist;
3145 	int			lcount;
3146 	int			error;
3147 
3148 	*ret_val = 0;
3149 
3150 	/*
3151 	 * Check FC port device & get port map
3152 	 */
3153 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3154 	    &error, 1) == NULL) {
3155 		*ret_val = EIO;
3156 	} else {
3157 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3158 		    &devlist) != FC_SUCCESS) {
3159 			*ret_val = EIO;
3160 		}
3161 	}
3162 
3163 	/* Set port map flags */
3164 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3165 
3166 	/* Allocate target */
3167 	if (*ret_val == 0) {
3168 		lcount = pptr->port_link_cnt;
3169 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3170 		if (ptgt == NULL) {
3171 			fcp_log(CE_WARN, pptr->port_dip,
3172 			    "!FC target allocation failed");
3173 			*ret_val = ENOMEM;
3174 		} else {
3175 			/* Setup target */
3176 			mutex_enter(&ptgt->tgt_mutex);
3177 
3178 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3179 			ptgt->tgt_tmp_cnt	= 1;
3180 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3181 			ptgt->tgt_hard_addr	=
3182 			    devlist.map_hard_addr.hard_addr;
3183 			ptgt->tgt_pd_handle	= devlist.map_pd;
3184 			ptgt->tgt_fca_dev	= NULL;
3185 
3186 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3187 			    FC_WWN_SIZE);
3188 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3189 			    FC_WWN_SIZE);
3190 
3191 			mutex_exit(&ptgt->tgt_mutex);
3192 		}
3193 	}
3194 
3195 	/* Release global mutex for PLOGI and PRLI */
3196 	mutex_exit(&fcp_global_mutex);
3197 
3198 	/* Send PLOGI (If necessary) */
3199 	if (*ret_val == 0) {
3200 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3201 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3202 	}
3203 
3204 	/* Send PRLI (If necessary) */
3205 	if (*ret_val == 0) {
3206 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3207 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3208 	}
3209 
3210 	mutex_enter(&fcp_global_mutex);
3211 
3212 	return (ptgt);
3213 }
3214 
3215 /*
3216  *     Function: fcp_tgt_send_plogi
3217  *
3218  *  Description: This function sends a PLOGI to the target specified by the
3219  *		 caller and waits till it completes.
3220  *
3221  *     Argument: ptgt		Target to send the plogi to.
3222  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3223  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3224  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3225  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3226  *
3227  * Return Value: 0
3228  *		 ENOMEM
3229  *		 EIO
3230  *
3231  *	Context: User context.
3232  */
3233 static int
3234 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3235     int *fc_pkt_reason, int *fc_pkt_action)
3236 {
3237 	struct fcp_port	*pptr;
3238 	struct fcp_ipkt	*icmd;
3239 	struct fc_packet	*fpkt;
3240 	fc_frame_hdr_t		*hp;
3241 	struct la_els_logi	logi;
3242 	int			tcount;
3243 	int			lcount;
3244 	int			ret, login_retval = ~FC_SUCCESS;
3245 
3246 	ret = 0;
3247 
3248 	pptr = ptgt->tgt_port;
3249 
3250 	lcount = pptr->port_link_cnt;
3251 	tcount = ptgt->tgt_change_cnt;
3252 
3253 	/* Alloc internal packet */
3254 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3255 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3256 	    FC_INVALID_RSCN_COUNT);
3257 
3258 	if (icmd == NULL) {
3259 		ret = ENOMEM;
3260 	} else {
3261 		/*
3262 		 * Setup internal packet as sema sync
3263 		 */
3264 		fcp_ipkt_sema_init(icmd);
3265 
3266 		/*
3267 		 * Setup internal packet (icmd)
3268 		 */
3269 		icmd->ipkt_lun		= NULL;
3270 		icmd->ipkt_restart	= 0;
3271 		icmd->ipkt_retries	= 0;
3272 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3273 
3274 		/*
3275 		 * Setup fc_packet
3276 		 */
3277 		fpkt = icmd->ipkt_fpkt;
3278 
3279 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3280 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3281 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3282 
3283 		/*
3284 		 * Setup FC frame header
3285 		 */
3286 		hp = &fpkt->pkt_cmd_fhdr;
3287 
3288 		hp->s_id	= pptr->port_id;	/* source ID */
3289 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3290 		hp->r_ctl	= R_CTL_ELS_REQ;
3291 		hp->type	= FC_TYPE_EXTENDED_LS;
3292 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3293 		hp->seq_id	= 0;
3294 		hp->rsvd	= 0;
3295 		hp->df_ctl	= 0;
3296 		hp->seq_cnt	= 0;
3297 		hp->ox_id	= 0xffff;		/* i.e. none */
3298 		hp->rx_id	= 0xffff;		/* i.e. none */
3299 		hp->ro		= 0;
3300 
3301 		/*
3302 		 * Setup PLOGI
3303 		 */
3304 		bzero(&logi, sizeof (struct la_els_logi));
3305 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3306 
3307 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3308 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3309 
3310 		/*
3311 		 * Send PLOGI
3312 		 */
3313 		*fc_status = login_retval =
3314 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3315 		if (*fc_status != FC_SUCCESS) {
3316 			ret = EIO;
3317 		}
3318 	}
3319 
3320 	/*
3321 	 * Wait for completion
3322 	 */
3323 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3324 		ret = fcp_ipkt_sema_wait(icmd);
3325 
3326 		*fc_pkt_state	= fpkt->pkt_state;
3327 		*fc_pkt_reason	= fpkt->pkt_reason;
3328 		*fc_pkt_action	= fpkt->pkt_action;
3329 	}
3330 
3331 	/*
3332 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3333 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3334 	 * Otherwise, cleanup happens in callback routine.
3335 	 */
3336 	if (icmd != NULL) {
3337 		fcp_ipkt_sema_cleanup(icmd);
3338 	}
3339 
3340 	return (ret);
3341 }
3342 
3343 /*
3344  *     Function: fcp_tgt_send_prli
3345  *
3346  *  Description: Does nothing as of today.
3347  *
3348  *     Argument: ptgt		Target to send the prli to.
3349  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3350  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3351  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3352  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3353  *
3354  * Return Value: 0
3355  */
3356 /*ARGSUSED*/
3357 static int
3358 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3359     int *fc_pkt_reason, int *fc_pkt_action)
3360 {
3361 	return (0);
3362 }
3363 
3364 /*
3365  *     Function: fcp_ipkt_sema_init
3366  *
3367  *  Description: Initializes the semaphore contained in the internal packet.
3368  *
3369  *     Argument: icmd	Internal packet the semaphore of which must be
3370  *			initialized.
3371  *
3372  * Return Value: None
3373  *
3374  *	Context: User context only.
3375  */
3376 static void
3377 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3378 {
3379 	struct fc_packet	*fpkt;
3380 
3381 	fpkt = icmd->ipkt_fpkt;
3382 
3383 	/* Create semaphore for sync */
3384 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3385 
3386 	/* Setup the completion callback */
3387 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3388 }
3389 
3390 /*
3391  *     Function: fcp_ipkt_sema_wait
3392  *
3393  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3394  *		 semaphore is released in the callback.
3395  *
3396  *     Argument: icmd	Internal packet to wait on for completion.
3397  *
3398  * Return Value: 0
3399  *		 EIO
3400  *		 EBUSY
3401  *		 EAGAIN
3402  *
3403  *	Context: User context only.
3404  *
3405  * This function does a conversion between the field pkt_state of the fc_packet
3406  * embedded in the internal packet (icmd) and the code it returns.
3407  */
3408 static int
3409 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3410 {
3411 	struct fc_packet	*fpkt;
3412 	int	ret;
3413 
3414 	ret = EIO;
3415 	fpkt = icmd->ipkt_fpkt;
3416 
3417 	/*
3418 	 * Wait on semaphore
3419 	 */
3420 	sema_p(&(icmd->ipkt_sema));
3421 
3422 	/*
3423 	 * Check the status of the FC packet
3424 	 */
3425 	switch (fpkt->pkt_state) {
3426 	case FC_PKT_SUCCESS:
3427 		ret = 0;
3428 		break;
3429 	case FC_PKT_LOCAL_RJT:
3430 		switch (fpkt->pkt_reason) {
3431 		case FC_REASON_SEQ_TIMEOUT:
3432 		case FC_REASON_RX_BUF_TIMEOUT:
3433 			ret = EAGAIN;
3434 			break;
3435 		case FC_REASON_PKT_BUSY:
3436 			ret = EBUSY;
3437 			break;
3438 		}
3439 		break;
3440 	case FC_PKT_TIMEOUT:
3441 		ret = EAGAIN;
3442 		break;
3443 	case FC_PKT_LOCAL_BSY:
3444 	case FC_PKT_TRAN_BSY:
3445 	case FC_PKT_NPORT_BSY:
3446 	case FC_PKT_FABRIC_BSY:
3447 		ret = EBUSY;
3448 		break;
3449 	case FC_PKT_LS_RJT:
3450 	case FC_PKT_BA_RJT:
3451 		switch (fpkt->pkt_reason) {
3452 		case FC_REASON_LOGICAL_BSY:
3453 			ret = EBUSY;
3454 			break;
3455 		}
3456 		break;
3457 	case FC_PKT_FS_RJT:
3458 		switch (fpkt->pkt_reason) {
3459 		case FC_REASON_FS_LOGICAL_BUSY:
3460 			ret = EBUSY;
3461 			break;
3462 		}
3463 		break;
3464 	}
3465 
3466 	return (ret);
3467 }
3468 
3469 /*
3470  *     Function: fcp_ipkt_sema_callback
3471  *
3472  *  Description: Registered as the completion callback function for the FC
3473  *		 transport when the ipkt semaphore is used for sync. This will
3474  *		 cleanup the used data structures, if necessary and wake up
3475  *		 the user thread to complete the transaction.
3476  *
3477  *     Argument: fpkt	FC packet (points to the icmd)
3478  *
3479  * Return Value: None
3480  *
3481  *	Context: User context only
3482  */
3483 static void
3484 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3485 {
3486 	struct fcp_ipkt	*icmd;
3487 
3488 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3489 
3490 	/*
3491 	 * Wake up user thread
3492 	 */
3493 	sema_v(&(icmd->ipkt_sema));
3494 }
3495 
3496 /*
3497  *     Function: fcp_ipkt_sema_cleanup
3498  *
3499  *  Description: Called to cleanup (if necessary) the data structures used
3500  *		 when ipkt sema is used for sync.  This function will detect
3501  *		 whether the caller is the last thread (via counter) and
3502  *		 cleanup only if necessary.
3503  *
3504  *     Argument: icmd	Internal command packet
3505  *
3506  * Return Value: None
3507  *
3508  *	Context: User context only
3509  */
3510 static void
3511 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3512 {
3513 	struct fcp_tgt	*ptgt;
3514 	struct fcp_port	*pptr;
3515 
3516 	ptgt = icmd->ipkt_tgt;
3517 	pptr = icmd->ipkt_port;
3518 
3519 	/*
3520 	 * Acquire data structure
3521 	 */
3522 	mutex_enter(&ptgt->tgt_mutex);
3523 
3524 	/*
3525 	 * Destroy semaphore
3526 	 */
3527 	sema_destroy(&(icmd->ipkt_sema));
3528 
3529 	/*
3530 	 * Cleanup internal packet
3531 	 */
3532 	mutex_exit(&ptgt->tgt_mutex);
3533 	fcp_icmd_free(pptr, icmd);
3534 }
3535 
3536 /*
3537  *     Function: fcp_port_attach
3538  *
3539  *  Description: Called by the transport framework to resume, suspend or
3540  *		 attach a new port.
3541  *
3542  *     Argument: ulph		Port handle
3543  *		 *pinfo		Port information
3544  *		 cmd		Command
3545  *		 s_id		Port ID
3546  *
3547  * Return Value: FC_FAILURE or FC_SUCCESS
3548  */
3549 /*ARGSUSED*/
3550 static int
3551 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3552     fc_attach_cmd_t cmd, uint32_t s_id)
3553 {
3554 	int	instance;
3555 	int	res = FC_FAILURE; /* default result */
3556 
3557 	ASSERT(pinfo != NULL);
3558 
3559 	instance = ddi_get_instance(pinfo->port_dip);
3560 
3561 	switch (cmd) {
3562 	case FC_CMD_ATTACH:
3563 		/*
3564 		 * this port instance attaching for the first time (or after
3565 		 * being detached before)
3566 		 */
3567 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3568 		    instance) == DDI_SUCCESS) {
3569 			res = FC_SUCCESS;
3570 		} else {
3571 			ASSERT(ddi_get_soft_state(fcp_softstate,
3572 			    instance) == NULL);
3573 		}
3574 		break;
3575 
3576 	case FC_CMD_RESUME:
3577 	case FC_CMD_POWER_UP:
3578 		/*
3579 		 * this port instance was attached and the suspended and
3580 		 * will now be resumed
3581 		 */
3582 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3583 		    instance) == DDI_SUCCESS) {
3584 			res = FC_SUCCESS;
3585 		}
3586 		break;
3587 
3588 	default:
3589 		/* shouldn't happen */
3590 		FCP_TRACE(fcp_logq, "fcp",
3591 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3592 		    "port_attach: unknown cmdcommand: %d", cmd);
3593 		break;
3594 	}
3595 
3596 	/* return result */
3597 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3598 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3599 
3600 	return (res);
3601 }
3602 
3603 
3604 /*
3605  * detach or suspend this port instance
3606  *
3607  * acquires and releases the global mutex
3608  *
3609  * acquires and releases the mutex for this port
3610  *
3611  * acquires and releases the hotplug mutex for this port
3612  */
3613 /*ARGSUSED*/
3614 static int
3615 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3616     fc_detach_cmd_t cmd)
3617 {
3618 	int			flag;
3619 	int			instance;
3620 	struct fcp_port		*pptr;
3621 
3622 	instance = ddi_get_instance(info->port_dip);
3623 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3624 
3625 	switch (cmd) {
3626 	case FC_CMD_SUSPEND:
3627 		FCP_DTRACE(fcp_logq, "fcp",
3628 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3629 		    "port suspend called for port %d", instance);
3630 		flag = FCP_STATE_SUSPENDED;
3631 		break;
3632 
3633 	case FC_CMD_POWER_DOWN:
3634 		FCP_DTRACE(fcp_logq, "fcp",
3635 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3636 		    "port power down called for port %d", instance);
3637 		flag = FCP_STATE_POWER_DOWN;
3638 		break;
3639 
3640 	case FC_CMD_DETACH:
3641 		FCP_DTRACE(fcp_logq, "fcp",
3642 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3643 		    "port detach called for port %d", instance);
3644 		flag = FCP_STATE_DETACHING;
3645 		break;
3646 
3647 	default:
3648 		/* shouldn't happen */
3649 		return (FC_FAILURE);
3650 	}
3651 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3652 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3653 
3654 	return (fcp_handle_port_detach(pptr, flag, instance));
3655 }
3656 
3657 
3658 /*
3659  * called for ioctls on the transport's devctl interface, and the transport
3660  * has passed it to us
3661  *
3662  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3663  *
3664  * return FC_SUCCESS if we decide to claim the ioctl,
3665  * else return FC_UNCLAIMED
3666  *
3667  * *rval is set iff we decide to claim the ioctl
3668  */
3669 /*ARGSUSED*/
3670 static int
3671 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3672     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3673 {
3674 	int			retval = FC_UNCLAIMED;	/* return value */
3675 	struct fcp_port		*pptr = NULL;		/* our soft state */
3676 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3677 	dev_info_t		*cdip;
3678 	mdi_pathinfo_t		*pip = NULL;
3679 	char			*ndi_nm;		/* NDI name */
3680 	char			*ndi_addr;		/* NDI addr */
3681 	int			is_mpxio, circ;
3682 	int			devi_entered = 0;
3683 	time_t			end_time;
3684 
3685 	ASSERT(rval != NULL);
3686 
3687 	FCP_DTRACE(fcp_logq, "fcp",
3688 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3690 
3691 	/* if already claimed then forget it */
3692 	if (claimed) {
3693 		/*
3694 		 * for now, if this ioctl has already been claimed, then
3695 		 * we just ignore it
3696 		 */
3697 		return (retval);
3698 	}
3699 
3700 	/* get our port info */
3701 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3702 		fcp_log(CE_WARN, NULL,
3703 		    "!fcp:Invalid port handle handle in ioctl");
3704 		*rval = ENXIO;
3705 		return (retval);
3706 	}
3707 	is_mpxio = pptr->port_mpxio;
3708 
3709 	switch (cmd) {
3710 	case DEVCTL_BUS_GETSTATE:
3711 	case DEVCTL_BUS_QUIESCE:
3712 	case DEVCTL_BUS_UNQUIESCE:
3713 	case DEVCTL_BUS_RESET:
3714 	case DEVCTL_BUS_RESETALL:
3715 
3716 	case DEVCTL_BUS_DEV_CREATE:
3717 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3718 			return (retval);
3719 		}
3720 		break;
3721 
3722 	case DEVCTL_DEVICE_GETSTATE:
3723 	case DEVCTL_DEVICE_OFFLINE:
3724 	case DEVCTL_DEVICE_ONLINE:
3725 	case DEVCTL_DEVICE_REMOVE:
3726 	case DEVCTL_DEVICE_RESET:
3727 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3728 			return (retval);
3729 		}
3730 
3731 		ASSERT(dcp != NULL);
3732 
3733 		/* ensure we have a name and address */
3734 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3735 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3736 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3737 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3738 			    "ioctl: can't get name (%s) or addr (%s)",
3739 			    ndi_nm ? ndi_nm : "<null ptr>",
3740 			    ndi_addr ? ndi_addr : "<null ptr>");
3741 			ndi_dc_freehdl(dcp);
3742 			return (retval);
3743 		}
3744 
3745 
3746 		/* get our child's DIP */
3747 		ASSERT(pptr != NULL);
3748 		if (is_mpxio) {
3749 			mdi_devi_enter(pptr->port_dip, &circ);
3750 		} else {
3751 			ndi_devi_enter(pptr->port_dip, &circ);
3752 		}
3753 		devi_entered = 1;
3754 
3755 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3756 		    ndi_addr)) == NULL) {
3757 			/* Look for virtually enumerated devices. */
3758 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3759 			if (pip == NULL ||
3760 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3761 				*rval = ENXIO;
3762 				goto out;
3763 			}
3764 		}
3765 		break;
3766 
3767 	default:
3768 		*rval = ENOTTY;
3769 		return (retval);
3770 	}
3771 
3772 	/* this ioctl is ours -- process it */
3773 
3774 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3775 
3776 	/* we assume it will be a success; else we'll set error value */
3777 	*rval = 0;
3778 
3779 
3780 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3781 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3782 	    "ioctl: claiming this one");
3783 
3784 	/* handle ioctls now */
3785 	switch (cmd) {
3786 	case DEVCTL_DEVICE_GETSTATE:
3787 		ASSERT(cdip != NULL);
3788 		ASSERT(dcp != NULL);
3789 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3790 			*rval = EFAULT;
3791 		}
3792 		break;
3793 
3794 	case DEVCTL_DEVICE_REMOVE:
3795 	case DEVCTL_DEVICE_OFFLINE: {
3796 		int			flag = 0;
3797 		int			lcount;
3798 		int			tcount;
3799 		struct fcp_pkt	*head = NULL;
3800 		struct fcp_lun	*plun;
3801 		child_info_t		*cip = CIP(cdip);
3802 		int			all = 1;
3803 		struct fcp_lun	*tplun;
3804 		struct fcp_tgt	*ptgt;
3805 
3806 		ASSERT(pptr != NULL);
3807 		ASSERT(cdip != NULL);
3808 
3809 		mutex_enter(&pptr->port_mutex);
3810 		if (pip != NULL) {
3811 			cip = CIP(pip);
3812 		}
3813 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3814 			mutex_exit(&pptr->port_mutex);
3815 			*rval = ENXIO;
3816 			break;
3817 		}
3818 
3819 		head = fcp_scan_commands(plun);
3820 		if (head != NULL) {
3821 			fcp_abort_commands(head, LUN_PORT);
3822 		}
3823 		lcount = pptr->port_link_cnt;
3824 		tcount = plun->lun_tgt->tgt_change_cnt;
3825 		mutex_exit(&pptr->port_mutex);
3826 
3827 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3828 			flag = NDI_DEVI_REMOVE;
3829 		}
3830 
3831 		if (is_mpxio) {
3832 			mdi_devi_exit(pptr->port_dip, circ);
3833 		} else {
3834 			ndi_devi_exit(pptr->port_dip, circ);
3835 		}
3836 		devi_entered = 0;
3837 
3838 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3839 		    FCP_OFFLINE, lcount, tcount, flag);
3840 
3841 		if (*rval != NDI_SUCCESS) {
3842 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3843 			break;
3844 		}
3845 
3846 		fcp_update_offline_flags(plun);
3847 
3848 		ptgt = plun->lun_tgt;
3849 		mutex_enter(&ptgt->tgt_mutex);
3850 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3851 		    tplun->lun_next) {
3852 			mutex_enter(&tplun->lun_mutex);
3853 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3854 				all = 0;
3855 			}
3856 			mutex_exit(&tplun->lun_mutex);
3857 		}
3858 
3859 		if (all) {
3860 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3861 			/*
3862 			 * The user is unconfiguring/offlining the device.
3863 			 * If fabric and the auto configuration is set
3864 			 * then make sure the user is the only one who
3865 			 * can reconfigure the device.
3866 			 */
3867 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3868 			    fcp_enable_auto_configuration) {
3869 				ptgt->tgt_manual_config_only = 1;
3870 			}
3871 		}
3872 		mutex_exit(&ptgt->tgt_mutex);
3873 		break;
3874 	}
3875 
3876 	case DEVCTL_DEVICE_ONLINE: {
3877 		int			lcount;
3878 		int			tcount;
3879 		struct fcp_lun	*plun;
3880 		child_info_t		*cip = CIP(cdip);
3881 
3882 		ASSERT(cdip != NULL);
3883 		ASSERT(pptr != NULL);
3884 
3885 		mutex_enter(&pptr->port_mutex);
3886 		if (pip != NULL) {
3887 			cip = CIP(pip);
3888 		}
3889 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3890 			mutex_exit(&pptr->port_mutex);
3891 			*rval = ENXIO;
3892 			break;
3893 		}
3894 		lcount = pptr->port_link_cnt;
3895 		tcount = plun->lun_tgt->tgt_change_cnt;
3896 		mutex_exit(&pptr->port_mutex);
3897 
3898 		/*
3899 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3900 		 * to allow the device attach to occur when the device is
3901 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3902 		 * from the scsi_probe()).
3903 		 */
3904 		mutex_enter(&LUN_TGT->tgt_mutex);
3905 		plun->lun_state |= FCP_LUN_ONLINING;
3906 		mutex_exit(&LUN_TGT->tgt_mutex);
3907 
3908 		if (is_mpxio) {
3909 			mdi_devi_exit(pptr->port_dip, circ);
3910 		} else {
3911 			ndi_devi_exit(pptr->port_dip, circ);
3912 		}
3913 		devi_entered = 0;
3914 
3915 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3916 		    FCP_ONLINE, lcount, tcount, 0);
3917 
3918 		if (*rval != NDI_SUCCESS) {
3919 			/* Reset the FCP_LUN_ONLINING bit */
3920 			mutex_enter(&LUN_TGT->tgt_mutex);
3921 			plun->lun_state &= ~FCP_LUN_ONLINING;
3922 			mutex_exit(&LUN_TGT->tgt_mutex);
3923 			*rval = EIO;
3924 			break;
3925 		}
3926 		mutex_enter(&LUN_TGT->tgt_mutex);
3927 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3928 		    FCP_LUN_ONLINING);
3929 		mutex_exit(&LUN_TGT->tgt_mutex);
3930 		break;
3931 	}
3932 
3933 	case DEVCTL_BUS_DEV_CREATE: {
3934 		uchar_t			*bytes = NULL;
3935 		uint_t			nbytes;
3936 		struct fcp_tgt		*ptgt = NULL;
3937 		struct fcp_lun		*plun = NULL;
3938 		dev_info_t		*useless_dip = NULL;
3939 
3940 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3941 		    DEVCTL_CONSTRUCT, &useless_dip);
3942 		if (*rval != 0 || useless_dip == NULL) {
3943 			break;
3944 		}
3945 
3946 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3947 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3948 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3949 			*rval = EINVAL;
3950 			(void) ndi_devi_free(useless_dip);
3951 			if (bytes != NULL) {
3952 				ddi_prop_free(bytes);
3953 			}
3954 			break;
3955 		}
3956 
3957 		*rval = fcp_create_on_demand(pptr, bytes);
3958 		if (*rval == 0) {
3959 			mutex_enter(&pptr->port_mutex);
3960 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3961 			if (ptgt) {
3962 				/*
3963 				 * We now have a pointer to the target that
3964 				 * was created. Lets point to the first LUN on
3965 				 * this new target.
3966 				 */
3967 				mutex_enter(&ptgt->tgt_mutex);
3968 
3969 				plun = ptgt->tgt_lun;
3970 				/*
3971 				 * There may be stale/offline LUN entries on
3972 				 * this list (this is by design) and so we have
3973 				 * to make sure we point to the first online
3974 				 * LUN
3975 				 */
3976 				while (plun &&
3977 				    plun->lun_state & FCP_LUN_OFFLINE) {
3978 					plun = plun->lun_next;
3979 				}
3980 
3981 				mutex_exit(&ptgt->tgt_mutex);
3982 			}
3983 			mutex_exit(&pptr->port_mutex);
3984 		}
3985 
3986 		if (*rval == 0 && ptgt && plun) {
3987 			mutex_enter(&plun->lun_mutex);
3988 			/*
3989 			 * Allow up to fcp_lun_ready_retry seconds to
3990 			 * configure all the luns behind the target.
3991 			 *
3992 			 * The intent here is to allow targets with long
3993 			 * reboot/reset-recovery times to become available
3994 			 * while limiting the maximum wait time for an
3995 			 * unresponsive target.
3996 			 */
3997 			end_time = ddi_get_lbolt() +
3998 			    SEC_TO_TICK(fcp_lun_ready_retry);
3999 
4000 			while (ddi_get_lbolt() < end_time) {
4001 				retval = FC_SUCCESS;
4002 
4003 				/*
4004 				 * The new ndi interfaces for on-demand creation
4005 				 * are inflexible, Do some more work to pass on
4006 				 * a path name of some LUN (design is broken !)
4007 				 */
4008 				if (plun->lun_cip) {
4009 					if (plun->lun_mpxio == 0) {
4010 						cdip = DIP(plun->lun_cip);
4011 					} else {
4012 						cdip = mdi_pi_get_client(
4013 						    PIP(plun->lun_cip));
4014 					}
4015 					if (cdip == NULL) {
4016 						*rval = ENXIO;
4017 						break;
4018 					}
4019 
4020 					if (!i_ddi_devi_attached(cdip)) {
4021 						mutex_exit(&plun->lun_mutex);
4022 						delay(drv_usectohz(1000000));
4023 						mutex_enter(&plun->lun_mutex);
4024 					} else {
4025 						/*
4026 						 * This Lun is ready, lets
4027 						 * check the next one.
4028 						 */
4029 						mutex_exit(&plun->lun_mutex);
4030 						plun = plun->lun_next;
4031 						while (plun && (plun->lun_state
4032 						    & FCP_LUN_OFFLINE)) {
4033 							plun = plun->lun_next;
4034 						}
4035 						if (!plun) {
4036 							break;
4037 						}
4038 						mutex_enter(&plun->lun_mutex);
4039 					}
4040 				} else {
4041 					/*
4042 					 * lun_cip field for a valid lun
4043 					 * should never be NULL. Fail the
4044 					 * command.
4045 					 */
4046 					*rval = ENXIO;
4047 					break;
4048 				}
4049 			}
4050 			if (plun) {
4051 				mutex_exit(&plun->lun_mutex);
4052 			} else {
4053 				char devnm[MAXNAMELEN];
4054 				int nmlen;
4055 
4056 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4057 				    ddi_node_name(cdip),
4058 				    ddi_get_name_addr(cdip));
4059 
4060 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4061 				    0) {
4062 					*rval = EFAULT;
4063 				}
4064 			}
4065 		} else {
4066 			int	i;
4067 			char	buf[25];
4068 
4069 			for (i = 0; i < FC_WWN_SIZE; i++) {
4070 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4071 			}
4072 
4073 			fcp_log(CE_WARN, pptr->port_dip,
4074 			    "!Failed to create nodes for pwwn=%s; error=%x",
4075 			    buf, *rval);
4076 		}
4077 
4078 		(void) ndi_devi_free(useless_dip);
4079 		ddi_prop_free(bytes);
4080 		break;
4081 	}
4082 
4083 	case DEVCTL_DEVICE_RESET: {
4084 		struct fcp_lun	*plun;
4085 		struct scsi_address	ap;
4086 		child_info_t		*cip = CIP(cdip);
4087 
4088 		ASSERT(cdip != NULL);
4089 		ASSERT(pptr != NULL);
4090 		mutex_enter(&pptr->port_mutex);
4091 		if (pip != NULL) {
4092 			cip = CIP(pip);
4093 		}
4094 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4095 			mutex_exit(&pptr->port_mutex);
4096 			*rval = ENXIO;
4097 			break;
4098 		}
4099 		mutex_exit(&pptr->port_mutex);
4100 
4101 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4102 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4103 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4104 			*rval = ENXIO;
4105 			break;
4106 		}
4107 		ap.a_hba_tran = plun->lun_tran;
4108 		ASSERT(pptr->port_tran != NULL);
4109 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4110 
4111 		/*
4112 		 * There is a chance lun_tran is NULL at this point. So check
4113 		 * for it. If it is NULL, it basically means that the tgt has
4114 		 * been freed. So, just return a "No such device or address"
4115 		 * error.
4116 		 */
4117 		if (ap.a_hba_tran == NULL) {
4118 			*rval = ENXIO;
4119 			break;
4120 		}
4121 
4122 		/*
4123 		 * set up ap so that fcp_reset can figure out
4124 		 * which target to reset
4125 		 */
4126 		if (fcp_scsi_reset(&ap, RESET_TARGET) == FALSE) {
4127 			*rval = EIO;
4128 		}
4129 		break;
4130 	}
4131 
4132 	case DEVCTL_BUS_GETSTATE:
4133 		ASSERT(dcp != NULL);
4134 		ASSERT(pptr != NULL);
4135 		ASSERT(pptr->port_dip != NULL);
4136 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4137 		    NDI_SUCCESS) {
4138 			*rval = EFAULT;
4139 		}
4140 		break;
4141 
4142 	case DEVCTL_BUS_QUIESCE:
4143 	case DEVCTL_BUS_UNQUIESCE:
4144 		*rval = ENOTSUP;
4145 		break;
4146 
4147 	case DEVCTL_BUS_RESET:
4148 	case DEVCTL_BUS_RESETALL:
4149 		ASSERT(pptr != NULL);
4150 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4151 		break;
4152 
4153 	default:
4154 		ASSERT(dcp != NULL);
4155 		*rval = ENOTTY;
4156 		break;
4157 	}
4158 
4159 	/* all done -- clean up and return */
4160 out:	if (devi_entered) {
4161 		if (is_mpxio) {
4162 			mdi_devi_exit(pptr->port_dip, circ);
4163 		} else {
4164 			ndi_devi_exit(pptr->port_dip, circ);
4165 		}
4166 	}
4167 
4168 	if (dcp != NULL) {
4169 		ndi_dc_freehdl(dcp);
4170 	}
4171 
4172 	return (retval);
4173 }
4174 
4175 
4176 /*ARGSUSED*/
4177 static int
4178 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4179     uint32_t claimed)
4180 {
4181 	uchar_t			r_ctl;
4182 	uchar_t			ls_code;
4183 	struct fcp_port	*pptr;
4184 
4185 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4186 		return (FC_UNCLAIMED);
4187 	}
4188 
4189 	mutex_enter(&pptr->port_mutex);
4190 	if (pptr->port_state & (FCP_STATE_DETACHING |
4191 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4192 		mutex_exit(&pptr->port_mutex);
4193 		return (FC_UNCLAIMED);
4194 	}
4195 	mutex_exit(&pptr->port_mutex);
4196 
4197 	r_ctl = buf->ub_frame.r_ctl;
4198 
4199 	switch (r_ctl & R_CTL_ROUTING) {
4200 	case R_CTL_EXTENDED_SVC:
4201 		if (r_ctl == R_CTL_ELS_REQ) {
4202 			ls_code = buf->ub_buffer[0];
4203 
4204 			switch (ls_code) {
4205 			case LA_ELS_PRLI:
4206 				/*
4207 				 * We really don't care if something fails.
4208 				 * If the PRLI was not sent out, then the
4209 				 * other end will time it out.
4210 				 */
4211 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4212 					return (FC_SUCCESS);
4213 				}
4214 				return (FC_UNCLAIMED);
4215 				/* NOTREACHED */
4216 
4217 			default:
4218 				break;
4219 			}
4220 		}
4221 		/* FALLTHROUGH */
4222 
4223 	default:
4224 		return (FC_UNCLAIMED);
4225 	}
4226 }
4227 
4228 
4229 /*ARGSUSED*/
4230 static int
4231 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4232     uint32_t claimed)
4233 {
4234 	return (FC_UNCLAIMED);
4235 }
4236 
4237 /*
4238  *     Function: fcp_statec_callback
4239  *
4240  *  Description: The purpose of this function is to handle a port state change.
4241  *		 It is called from fp/fctl and, in a few instances, internally.
4242  *
4243  *     Argument: ulph		fp/fctl port handle
4244  *		 port_handle	fcp_port structure
4245  *		 port_state	Physical state of the port
4246  *		 port_top	Topology
4247  *		 *devlist	Pointer to the first entry of a table
4248  *				containing the remote ports that can be
4249  *				reached.
4250  *		 dev_cnt	Number of entries pointed by devlist.
4251  *		 port_sid	Port ID of the local port.
4252  *
4253  * Return Value: None
4254  */
4255 /*ARGSUSED*/
4256 static void
4257 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4258     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4259     uint32_t dev_cnt, uint32_t port_sid)
4260 {
4261 	uint32_t		link_count;
4262 	int			map_len = 0;
4263 	struct fcp_port	*pptr;
4264 	fcp_map_tag_t		*map_tag = NULL;
4265 
4266 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4267 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4268 		return;			/* nothing to work with! */
4269 	}
4270 
4271 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4272 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4273 	    "fcp_statec_callback: port state/dev_cnt/top ="
4274 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4275 	    dev_cnt, port_top);
4276 
4277 	mutex_enter(&pptr->port_mutex);
4278 
4279 	/*
4280 	 * If a thread is in detach, don't do anything.
4281 	 */
4282 	if (pptr->port_state & (FCP_STATE_DETACHING |
4283 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4284 		mutex_exit(&pptr->port_mutex);
4285 		return;
4286 	}
4287 
4288 	/*
4289 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4290 	 * init_pkt is called, it knows whether or not the target's status
4291 	 * (or pd) might be changing.
4292 	 */
4293 
4294 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4295 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4296 	}
4297 
4298 	/*
4299 	 * the transport doesn't allocate or probe unless being
4300 	 * asked to by either the applications or ULPs
4301 	 *
4302 	 * in cases where the port is OFFLINE at the time of port
4303 	 * attach callback and the link comes ONLINE later, for
4304 	 * easier automatic node creation (i.e. without you having to
4305 	 * go out and run the utility to perform LOGINs) the
4306 	 * following conditional is helpful
4307 	 */
4308 	pptr->port_phys_state = port_state;
4309 
4310 	if (dev_cnt) {
4311 		mutex_exit(&pptr->port_mutex);
4312 
4313 		map_len = sizeof (*map_tag) * dev_cnt;
4314 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4315 		if (map_tag == NULL) {
4316 			fcp_log(CE_WARN, pptr->port_dip,
4317 			    "!fcp%d: failed to allocate for map tags; "
4318 			    " state change will not be processed",
4319 			    pptr->port_instance);
4320 
4321 			mutex_enter(&pptr->port_mutex);
4322 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4323 			mutex_exit(&pptr->port_mutex);
4324 
4325 			return;
4326 		}
4327 
4328 		mutex_enter(&pptr->port_mutex);
4329 	}
4330 
4331 	if (pptr->port_id != port_sid) {
4332 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4333 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4334 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4335 		    port_sid);
4336 		/*
4337 		 * The local port changed ID. It is the first time a port ID
4338 		 * is assigned or something drastic happened.  We might have
4339 		 * been unplugged and replugged on another loop or fabric port
4340 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4341 		 * the fabric we were plugged into.
4342 		 */
4343 		pptr->port_id = port_sid;
4344 	}
4345 
4346 	switch (FC_PORT_STATE_MASK(port_state)) {
4347 	case FC_STATE_OFFLINE:
4348 	case FC_STATE_RESET_REQUESTED:
4349 		/*
4350 		 * link has gone from online to offline -- just update the
4351 		 * state of this port to BUSY and MARKed to go offline
4352 		 */
4353 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4354 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4355 		    "link went offline");
4356 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4357 			/*
4358 			 * We were offline a while ago and this one
4359 			 * seems to indicate that the loop has gone
4360 			 * dead forever.
4361 			 */
4362 			pptr->port_tmp_cnt += dev_cnt;
4363 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4364 			pptr->port_state |= FCP_STATE_INIT;
4365 			link_count = pptr->port_link_cnt;
4366 			fcp_handle_devices(pptr, devlist, dev_cnt,
4367 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4368 		} else {
4369 			pptr->port_link_cnt++;
4370 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4371 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4372 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4373 			if (pptr->port_mpxio) {
4374 				fcp_update_mpxio_path_verifybusy(pptr);
4375 			}
4376 			pptr->port_state |= FCP_STATE_OFFLINE;
4377 			pptr->port_state &=
4378 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4379 			pptr->port_tmp_cnt = 0;
4380 		}
4381 		mutex_exit(&pptr->port_mutex);
4382 		break;
4383 
4384 	case FC_STATE_ONLINE:
4385 	case FC_STATE_LIP:
4386 	case FC_STATE_LIP_LBIT_SET:
4387 		/*
4388 		 * link has gone from offline to online
4389 		 */
4390 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4391 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4392 		    "link went online");
4393 
4394 		pptr->port_link_cnt++;
4395 
4396 		while (pptr->port_ipkt_cnt) {
4397 			mutex_exit(&pptr->port_mutex);
4398 			delay(drv_usectohz(1000000));
4399 			mutex_enter(&pptr->port_mutex);
4400 		}
4401 
4402 		pptr->port_topology = port_top;
4403 
4404 		/*
4405 		 * The state of the targets and luns accessible through this
4406 		 * port is updated.
4407 		 */
4408 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4409 		    FCP_CAUSE_LINK_CHANGE);
4410 
4411 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4412 		pptr->port_state |= FCP_STATE_ONLINING;
4413 		pptr->port_tmp_cnt = dev_cnt;
4414 		link_count = pptr->port_link_cnt;
4415 
4416 		pptr->port_deadline = fcp_watchdog_time +
4417 		    FCP_ICMD_DEADLINE;
4418 
4419 		if (!dev_cnt) {
4420 			/*
4421 			 * We go directly to the online state if no remote
4422 			 * ports were discovered.
4423 			 */
4424 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4425 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4426 			    "No remote ports discovered");
4427 
4428 			pptr->port_state &= ~FCP_STATE_ONLINING;
4429 			pptr->port_state |= FCP_STATE_ONLINE;
4430 		}
4431 
4432 		switch (port_top) {
4433 		case FC_TOP_FABRIC:
4434 		case FC_TOP_PUBLIC_LOOP:
4435 		case FC_TOP_PRIVATE_LOOP:
4436 		case FC_TOP_PT_PT:
4437 
4438 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4439 				fcp_retry_ns_registry(pptr, port_sid);
4440 			}
4441 
4442 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4443 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4444 			break;
4445 
4446 		default:
4447 			/*
4448 			 * We got here because we were provided with an unknown
4449 			 * topology.
4450 			 */
4451 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4452 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4453 			}
4454 
4455 			pptr->port_tmp_cnt -= dev_cnt;
4456 			fcp_log(CE_WARN, pptr->port_dip,
4457 			    "!unknown/unsupported topology (0x%x)", port_top);
4458 			break;
4459 		}
4460 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4461 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4462 		    "Notify ssd of the reset to reinstate the reservations");
4463 
4464 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4465 		    &pptr->port_reset_notify_listf);
4466 
4467 		mutex_exit(&pptr->port_mutex);
4468 
4469 		break;
4470 
4471 	case FC_STATE_RESET:
4472 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4473 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4474 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4475 		    "RESET state, waiting for Offline/Online state_cb");
4476 		mutex_exit(&pptr->port_mutex);
4477 		break;
4478 
4479 	case FC_STATE_DEVICE_CHANGE:
4480 		/*
4481 		 * We come here when an application has requested
4482 		 * Dynamic node creation/deletion in Fabric connectivity.
4483 		 */
4484 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4485 		    FCP_STATE_INIT)) {
4486 			/*
4487 			 * This case can happen when the FCTL is in the
4488 			 * process of giving us on online and the host on
4489 			 * the other side issues a PLOGI/PLOGO. Ideally
4490 			 * the state changes should be serialized unless
4491 			 * they are opposite (online-offline).
4492 			 * The transport will give us a final state change
4493 			 * so we can ignore this for the time being.
4494 			 */
4495 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4496 			mutex_exit(&pptr->port_mutex);
4497 			break;
4498 		}
4499 
4500 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4501 			fcp_retry_ns_registry(pptr, port_sid);
4502 		}
4503 
4504 		/*
4505 		 * Extend the deadline under steady state conditions
4506 		 * to provide more time for the device-change-commands
4507 		 */
4508 		if (!pptr->port_ipkt_cnt) {
4509 			pptr->port_deadline = fcp_watchdog_time +
4510 			    FCP_ICMD_DEADLINE;
4511 		}
4512 
4513 		/*
4514 		 * There is another race condition here, where if we were
4515 		 * in ONLINEING state and a devices in the map logs out,
4516 		 * fp will give another state change as DEVICE_CHANGE
4517 		 * and OLD. This will result in that target being offlined.
4518 		 * The pd_handle is freed. If from the first statec callback
4519 		 * we were going to fire a PLOGI/PRLI, the system will
4520 		 * panic in fc_ulp_transport with invalid pd_handle.
4521 		 * The fix is to check for the link_cnt before issuing
4522 		 * any command down.
4523 		 */
4524 		fcp_update_targets(pptr, devlist, dev_cnt,
4525 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4526 
4527 		link_count = pptr->port_link_cnt;
4528 
4529 		fcp_handle_devices(pptr, devlist, dev_cnt,
4530 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4531 
4532 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4533 
4534 		mutex_exit(&pptr->port_mutex);
4535 		break;
4536 
4537 	case FC_STATE_TARGET_PORT_RESET:
4538 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4539 			fcp_retry_ns_registry(pptr, port_sid);
4540 		}
4541 
4542 		/* Do nothing else */
4543 		mutex_exit(&pptr->port_mutex);
4544 		break;
4545 
4546 	default:
4547 		fcp_log(CE_WARN, pptr->port_dip,
4548 		    "!Invalid state change=0x%x", port_state);
4549 		mutex_exit(&pptr->port_mutex);
4550 		break;
4551 	}
4552 
4553 	if (map_tag) {
4554 		kmem_free(map_tag, map_len);
4555 	}
4556 }
4557 
4558 /*
4559  *     Function: fcp_handle_devices
4560  *
4561  *  Description: This function updates the devices currently known by
4562  *		 walking the list provided by the caller.  The list passed
4563  *		 by the caller is supposed to be the list of reachable
4564  *		 devices.
4565  *
4566  *     Argument: *pptr		Fcp port structure.
4567  *		 *devlist	Pointer to the first entry of a table
4568  *				containing the remote ports that can be
4569  *				reached.
4570  *		 dev_cnt	Number of entries pointed by devlist.
4571  *		 link_cnt	Link state count.
4572  *		 *map_tag	Array of fcp_map_tag_t structures.
4573  *		 cause		What caused this function to be called.
4574  *
4575  * Return Value: None
4576  *
4577  *	  Notes: The pptr->port_mutex must be held.
4578  */
4579 static void
4580 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4581     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4582 {
4583 	int			i;
4584 	int			check_finish_init = 0;
4585 	fc_portmap_t		*map_entry;
4586 	struct fcp_tgt	*ptgt = NULL;
4587 
4588 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4589 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4590 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4591 
4592 	if (dev_cnt) {
4593 		ASSERT(map_tag != NULL);
4594 	}
4595 
4596 	/*
4597 	 * The following code goes through the list of remote ports that are
4598 	 * accessible through this (pptr) local port (The list walked is the
4599 	 * one provided by the caller which is the list of the remote ports
4600 	 * currently reachable).  It checks if any of them was already
4601 	 * known by looking for the corresponding target structure based on
4602 	 * the world wide name.	 If a target is part of the list it is tagged
4603 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4604 	 *
4605 	 * Old comment
4606 	 * -----------
4607 	 * Before we drop port mutex; we MUST get the tags updated; This
4608 	 * two step process is somewhat slow, but more reliable.
4609 	 */
4610 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4611 		map_entry = &(devlist[i]);
4612 
4613 		/*
4614 		 * get ptr to this map entry in our port's
4615 		 * list (if any)
4616 		 */
4617 		ptgt = fcp_lookup_target(pptr,
4618 		    (uchar_t *)&(map_entry->map_pwwn));
4619 
4620 		if (ptgt) {
4621 			map_tag[i] = ptgt->tgt_change_cnt;
4622 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4623 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4624 			}
4625 		}
4626 	}
4627 
4628 	/*
4629 	 * At this point we know which devices of the new list were already
4630 	 * known (The field tgt_aux_state of the target structure has been
4631 	 * set to FCP_TGT_TAGGED).
4632 	 *
4633 	 * The following code goes through the list of targets currently known
4634 	 * by the local port (the list is actually a hashing table).  If a
4635 	 * target is found and is not tagged, it means the target cannot
4636 	 * be reached anymore through the local port (pptr).  It is offlined.
4637 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4638 	 */
4639 	for (i = 0; i < FCP_NUM_HASH; i++) {
4640 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4641 		    ptgt = ptgt->tgt_next) {
4642 			mutex_enter(&ptgt->tgt_mutex);
4643 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4644 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4645 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4646 				fcp_offline_target_now(pptr, ptgt,
4647 				    link_cnt, ptgt->tgt_change_cnt, 0);
4648 			}
4649 			mutex_exit(&ptgt->tgt_mutex);
4650 		}
4651 	}
4652 
4653 	/*
4654 	 * At this point, the devices that were known but cannot be reached
4655 	 * anymore, have most likely been offlined.
4656 	 *
4657 	 * The following section of code seems to go through the list of
4658 	 * remote ports that can now be reached.  For every single one it
4659 	 * checks if it is already known or if it is a new port.
4660 	 */
4661 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4662 
4663 		if (check_finish_init) {
4664 			ASSERT(i > 0);
4665 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4666 			    map_tag[i - 1], cause);
4667 			check_finish_init = 0;
4668 		}
4669 
4670 		/* get a pointer to this map entry */
4671 		map_entry = &(devlist[i]);
4672 
4673 		/*
4674 		 * Check for the duplicate map entry flag. If we have marked
4675 		 * this entry as a duplicate we skip it since the correct
4676 		 * (perhaps even same) state change will be encountered
4677 		 * later in the list.
4678 		 */
4679 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4680 			continue;
4681 		}
4682 
4683 		/* get ptr to this map entry in our port's list (if any) */
4684 		ptgt = fcp_lookup_target(pptr,
4685 		    (uchar_t *)&(map_entry->map_pwwn));
4686 
4687 		if (ptgt) {
4688 			/*
4689 			 * This device was already known.  The field
4690 			 * tgt_aux_state is reset (was probably set to
4691 			 * FCP_TGT_TAGGED previously in this routine).
4692 			 */
4693 			ptgt->tgt_aux_state = 0;
4694 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4695 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4696 			    "handle_devices: map did/state/type/flags = "
4697 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4698 			    "tgt_state=%d",
4699 			    map_entry->map_did.port_id, map_entry->map_state,
4700 			    map_entry->map_type, map_entry->map_flags,
4701 			    ptgt->tgt_d_id, ptgt->tgt_state);
4702 		}
4703 
4704 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4705 		    map_entry->map_type == PORT_DEVICE_NEW ||
4706 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4707 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4708 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4709 			    "map_type=%x, did = %x",
4710 			    map_entry->map_type,
4711 			    map_entry->map_did.port_id);
4712 		}
4713 
4714 		switch (map_entry->map_type) {
4715 		case PORT_DEVICE_NOCHANGE:
4716 		case PORT_DEVICE_USER_CREATE:
4717 		case PORT_DEVICE_USER_LOGIN:
4718 		case PORT_DEVICE_NEW:
4719 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4720 
4721 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4722 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4723 			    cause) == TRUE) {
4724 
4725 				FCP_TGT_TRACE(ptgt, map_tag[i],
4726 				    FCP_TGT_TRACE_2);
4727 				check_finish_init++;
4728 			}
4729 			break;
4730 
4731 		case PORT_DEVICE_OLD:
4732 			if (ptgt != NULL) {
4733 				FCP_TGT_TRACE(ptgt, map_tag[i],
4734 				    FCP_TGT_TRACE_3);
4735 
4736 				mutex_enter(&ptgt->tgt_mutex);
4737 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4738 					/*
4739 					 * Must do an in-line wait for I/Os
4740 					 * to get drained
4741 					 */
4742 					mutex_exit(&ptgt->tgt_mutex);
4743 					mutex_exit(&pptr->port_mutex);
4744 
4745 					mutex_enter(&ptgt->tgt_mutex);
4746 					while (ptgt->tgt_ipkt_cnt ||
4747 					    fcp_outstanding_lun_cmds(ptgt)
4748 					    == FC_SUCCESS) {
4749 						mutex_exit(&ptgt->tgt_mutex);
4750 						delay(drv_usectohz(1000000));
4751 						mutex_enter(&ptgt->tgt_mutex);
4752 					}
4753 					mutex_exit(&ptgt->tgt_mutex);
4754 
4755 					mutex_enter(&pptr->port_mutex);
4756 					mutex_enter(&ptgt->tgt_mutex);
4757 
4758 					(void) fcp_offline_target(pptr, ptgt,
4759 					    link_cnt, map_tag[i], 0, 0);
4760 				}
4761 				mutex_exit(&ptgt->tgt_mutex);
4762 			}
4763 			check_finish_init++;
4764 			break;
4765 
4766 		case PORT_DEVICE_USER_DELETE:
4767 		case PORT_DEVICE_USER_LOGOUT:
4768 			if (ptgt != NULL) {
4769 				FCP_TGT_TRACE(ptgt, map_tag[i],
4770 				    FCP_TGT_TRACE_4);
4771 
4772 				mutex_enter(&ptgt->tgt_mutex);
4773 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4774 					(void) fcp_offline_target(pptr, ptgt,
4775 					    link_cnt, map_tag[i], 1, 0);
4776 				}
4777 				mutex_exit(&ptgt->tgt_mutex);
4778 			}
4779 			check_finish_init++;
4780 			break;
4781 
4782 		case PORT_DEVICE_CHANGED:
4783 			if (ptgt != NULL) {
4784 				FCP_TGT_TRACE(ptgt, map_tag[i],
4785 				    FCP_TGT_TRACE_5);
4786 
4787 				if (fcp_device_changed(pptr, ptgt,
4788 				    map_entry, link_cnt, map_tag[i],
4789 				    cause) == TRUE) {
4790 					check_finish_init++;
4791 				}
4792 			} else {
4793 				if (fcp_handle_mapflags(pptr, ptgt,
4794 				    map_entry, link_cnt, 0, cause) == TRUE) {
4795 					check_finish_init++;
4796 				}
4797 			}
4798 			break;
4799 
4800 		default:
4801 			fcp_log(CE_WARN, pptr->port_dip,
4802 			    "!Invalid map_type=0x%x", map_entry->map_type);
4803 			check_finish_init++;
4804 			break;
4805 		}
4806 	}
4807 
4808 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4809 		ASSERT(i > 0);
4810 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4811 		    map_tag[i-1], cause);
4812 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4813 		fcp_offline_all(pptr, link_cnt, cause);
4814 	}
4815 }
4816 
4817 /*
4818  *     Function: fcp_handle_mapflags
4819  *
4820  *  Description: This function creates a target structure if the ptgt passed
4821  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4822  *		 into the target yet or the PRLI if we are logged into the
4823  *		 target already.  The rest of the treatment is done in the
4824  *		 callbacks of the PLOGI or PRLI.
4825  *
4826  *     Argument: *pptr		FCP Port structure.
4827  *		 *ptgt		Target structure.
4828  *		 *map_entry	Array of fc_portmap_t structures.
4829  *		 link_cnt	Link state count.
4830  *		 tgt_cnt	Target state count.
4831  *		 cause		What caused this function to be called.
4832  *
4833  * Return Value: TRUE	Failed
4834  *		 FALSE	Succeeded
4835  *
4836  *	  Notes: pptr->port_mutex must be owned.
4837  */
4838 static int
4839 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4840     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4841 {
4842 	int			lcount;
4843 	int			tcount;
4844 	int			ret = TRUE;
4845 	int			alloc;
4846 	struct fcp_ipkt	*icmd;
4847 	struct fcp_lun	*pseq_lun = NULL;
4848 	uchar_t			opcode;
4849 	int			valid_ptgt_was_passed = FALSE;
4850 
4851 	ASSERT(mutex_owned(&pptr->port_mutex));
4852 
4853 	/*
4854 	 * This case is possible where the FCTL has come up and done discovery
4855 	 * before FCP was loaded and attached. FCTL would have discovered the
4856 	 * devices and later the ULP came online. In this case ULP's would get
4857 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4858 	 */
4859 	if (ptgt == NULL) {
4860 		/* don't already have a target */
4861 		mutex_exit(&pptr->port_mutex);
4862 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4863 		mutex_enter(&pptr->port_mutex);
4864 
4865 		if (ptgt == NULL) {
4866 			fcp_log(CE_WARN, pptr->port_dip,
4867 			    "!FC target allocation failed");
4868 			return (ret);
4869 		}
4870 		mutex_enter(&ptgt->tgt_mutex);
4871 		ptgt->tgt_statec_cause = cause;
4872 		ptgt->tgt_tmp_cnt = 1;
4873 		mutex_exit(&ptgt->tgt_mutex);
4874 	} else {
4875 		valid_ptgt_was_passed = TRUE;
4876 	}
4877 
4878 	/*
4879 	 * Copy in the target parameters
4880 	 */
4881 	mutex_enter(&ptgt->tgt_mutex);
4882 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4883 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4884 	ptgt->tgt_pd_handle = map_entry->map_pd;
4885 	ptgt->tgt_fca_dev = NULL;
4886 
4887 	/* Copy port and node WWNs */
4888 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4889 	    FC_WWN_SIZE);
4890 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4891 	    FC_WWN_SIZE);
4892 
4893 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4894 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4895 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4896 	    valid_ptgt_was_passed) {
4897 		/*
4898 		 * determine if there are any tape LUNs on this target
4899 		 */
4900 		for (pseq_lun = ptgt->tgt_lun;
4901 		    pseq_lun != NULL;
4902 		    pseq_lun = pseq_lun->lun_next) {
4903 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4904 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4905 				fcp_update_tgt_state(ptgt, FCP_RESET,
4906 				    FCP_LUN_MARK);
4907 				mutex_exit(&ptgt->tgt_mutex);
4908 				return (ret);
4909 			}
4910 		}
4911 	}
4912 
4913 	/*
4914 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4915 	 * was never specifically initialized but zeroed out which means
4916 	 * FCP_TGT_NODE_NONE.
4917 	 */
4918 	switch (ptgt->tgt_node_state) {
4919 	case FCP_TGT_NODE_NONE:
4920 	case FCP_TGT_NODE_ON_DEMAND:
4921 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4922 		    !fcp_enable_auto_configuration &&
4923 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4924 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4925 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4926 		    fcp_enable_auto_configuration &&
4927 		    (ptgt->tgt_manual_config_only == 1) &&
4928 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4929 			/*
4930 			 * If auto configuration is set and
4931 			 * the tgt_manual_config_only flag is set then
4932 			 * we only want the user to be able to change
4933 			 * the state through create_on_demand.
4934 			 */
4935 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4936 		} else {
4937 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4938 		}
4939 		break;
4940 
4941 	case FCP_TGT_NODE_PRESENT:
4942 		break;
4943 	}
4944 	/*
4945 	 * If we are booting from a fabric device, make sure we
4946 	 * mark the node state appropriately for this target to be
4947 	 * enumerated
4948 	 */
4949 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
4950 		if (bcmp((caddr_t)pptr->port_boot_wwn,
4951 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
4952 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
4953 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4954 		}
4955 	}
4956 	mutex_exit(&ptgt->tgt_mutex);
4957 
4958 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4959 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4960 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
4961 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
4962 	    map_entry->map_rscn_info.ulp_rscn_count);
4963 
4964 	mutex_enter(&ptgt->tgt_mutex);
4965 
4966 	/*
4967 	 * Reset target OFFLINE state and mark the target BUSY
4968 	 */
4969 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
4970 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
4971 
4972 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
4973 	lcount = link_cnt;
4974 
4975 	mutex_exit(&ptgt->tgt_mutex);
4976 	mutex_exit(&pptr->port_mutex);
4977 
4978 	/*
4979 	 * if we are already logged in, then we do a PRLI, else
4980 	 * we do a PLOGI first (to get logged in)
4981 	 *
4982 	 * We will not check if we are the PLOGI initiator
4983 	 */
4984 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
4985 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
4986 
4987 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
4988 
4989 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
4990 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
4991 
4992 	if (icmd == NULL) {
4993 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
4994 		/*
4995 		 * We've exited port_mutex before calling fcp_icmd_alloc,
4996 		 * we need to make sure we reacquire it before returning.
4997 		 */
4998 		mutex_enter(&pptr->port_mutex);
4999 		return (FALSE);
5000 	}
5001 
5002 	/* TRUE is only returned while target is intended skipped */
5003 	ret = FALSE;
5004 	/* discover info about this target */
5005 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5006 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5007 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5008 	} else {
5009 		fcp_icmd_free(pptr, icmd);
5010 		ret = TRUE;
5011 	}
5012 	mutex_enter(&pptr->port_mutex);
5013 
5014 	return (ret);
5015 }
5016 
5017 /*
5018  *     Function: fcp_send_els
5019  *
5020  *  Description: Sends an ELS to the target specified by the caller.  Supports
5021  *		 PLOGI and PRLI.
5022  *
5023  *     Argument: *pptr		Fcp port.
5024  *		 *ptgt		Target to send the ELS to.
5025  *		 *icmd		Internal packet
5026  *		 opcode		ELS opcode
5027  *		 lcount		Link state change counter
5028  *		 tcount		Target state change counter
5029  *		 cause		What caused the call
5030  *
5031  * Return Value: DDI_SUCCESS
5032  *		 Others
5033  */
5034 static int
5035 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5036     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5037 {
5038 	fc_packet_t		*fpkt;
5039 	fc_frame_hdr_t		*hp;
5040 	int			internal = 0;
5041 	int			alloc;
5042 	int			cmd_len;
5043 	int			resp_len;
5044 	int			res = DDI_FAILURE; /* default result */
5045 	int			rval = DDI_FAILURE;
5046 
5047 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5048 	ASSERT(ptgt->tgt_port == pptr);
5049 
5050 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5051 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5052 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5053 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5054 
5055 	if (opcode == LA_ELS_PLOGI) {
5056 		cmd_len = sizeof (la_els_logi_t);
5057 		resp_len = sizeof (la_els_logi_t);
5058 	} else {
5059 		ASSERT(opcode == LA_ELS_PRLI);
5060 		cmd_len = sizeof (la_els_prli_t);
5061 		resp_len = sizeof (la_els_prli_t);
5062 	}
5063 
5064 	if (icmd == NULL) {
5065 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5066 		    sizeof (la_els_prli_t));
5067 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5068 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5069 		if (icmd == NULL) {
5070 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5071 			return (res);
5072 		}
5073 		internal++;
5074 	}
5075 	fpkt = icmd->ipkt_fpkt;
5076 
5077 	fpkt->pkt_cmdlen = cmd_len;
5078 	fpkt->pkt_rsplen = resp_len;
5079 	fpkt->pkt_datalen = 0;
5080 	icmd->ipkt_retries = 0;
5081 
5082 	/* fill in fpkt info */
5083 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5084 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5085 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5086 
5087 	/* get ptr to frame hdr in fpkt */
5088 	hp = &fpkt->pkt_cmd_fhdr;
5089 
5090 	/*
5091 	 * fill in frame hdr
5092 	 */
5093 	hp->r_ctl = R_CTL_ELS_REQ;
5094 	hp->s_id = pptr->port_id;	/* source ID */
5095 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5096 	hp->type = FC_TYPE_EXTENDED_LS;
5097 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5098 	hp->seq_id = 0;
5099 	hp->rsvd = 0;
5100 	hp->df_ctl  = 0;
5101 	hp->seq_cnt = 0;
5102 	hp->ox_id = 0xffff;		/* i.e. none */
5103 	hp->rx_id = 0xffff;		/* i.e. none */
5104 	hp->ro = 0;
5105 
5106 	/*
5107 	 * at this point we have a filled in cmd pkt
5108 	 *
5109 	 * fill in the respective info, then use the transport to send
5110 	 * the packet
5111 	 *
5112 	 * for a PLOGI call fc_ulp_login(), and
5113 	 * for a PRLI call fc_ulp_issue_els()
5114 	 */
5115 	switch (opcode) {
5116 	case LA_ELS_PLOGI: {
5117 		struct la_els_logi logi;
5118 
5119 		bzero(&logi, sizeof (struct la_els_logi));
5120 
5121 		hp = &fpkt->pkt_cmd_fhdr;
5122 		hp->r_ctl = R_CTL_ELS_REQ;
5123 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5124 		logi.ls_code.mbz = 0;
5125 
5126 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5127 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5128 
5129 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5130 
5131 		mutex_enter(&pptr->port_mutex);
5132 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5133 
5134 			mutex_exit(&pptr->port_mutex);
5135 
5136 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5137 			if (rval == FC_SUCCESS) {
5138 				res = DDI_SUCCESS;
5139 				break;
5140 			}
5141 
5142 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5143 
5144 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5145 			    rval, "PLOGI");
5146 		} else {
5147 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5148 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5149 			    "fcp_send_els1: state change occured"
5150 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5151 			mutex_exit(&pptr->port_mutex);
5152 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5153 		}
5154 		break;
5155 	}
5156 
5157 	case LA_ELS_PRLI: {
5158 		struct la_els_prli	prli;
5159 		struct fcp_prli		*fprli;
5160 
5161 		bzero(&prli, sizeof (struct la_els_prli));
5162 
5163 		hp = &fpkt->pkt_cmd_fhdr;
5164 		hp->r_ctl = R_CTL_ELS_REQ;
5165 
5166 		/* fill in PRLI cmd ELS fields */
5167 		prli.ls_code = LA_ELS_PRLI;
5168 		prli.page_length = 0x10;	/* huh? */
5169 		prli.payload_length = sizeof (struct la_els_prli);
5170 
5171 		icmd->ipkt_opcode = LA_ELS_PRLI;
5172 
5173 		/* get ptr to PRLI service params */
5174 		fprli = (struct fcp_prli *)prli.service_params;
5175 
5176 		/* fill in service params */
5177 		fprli->type = 0x08;
5178 		fprli->resvd1 = 0;
5179 		fprli->orig_process_assoc_valid = 0;
5180 		fprli->resp_process_assoc_valid = 0;
5181 		fprli->establish_image_pair = 1;
5182 		fprli->resvd2 = 0;
5183 		fprli->resvd3 = 0;
5184 		fprli->obsolete_1 = 0;
5185 		fprli->obsolete_2 = 0;
5186 		fprli->data_overlay_allowed = 0;
5187 		fprli->initiator_fn = 1;
5188 		fprli->confirmed_compl_allowed = 1;
5189 
5190 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5191 			fprli->target_fn = 1;
5192 		} else {
5193 			fprli->target_fn = 0;
5194 		}
5195 
5196 		fprli->retry = 1;
5197 		fprli->read_xfer_rdy_disabled = 1;
5198 		fprli->write_xfer_rdy_disabled = 0;
5199 
5200 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5201 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5202 
5203 		/* issue the PRLI request */
5204 
5205 		mutex_enter(&pptr->port_mutex);
5206 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5207 
5208 			mutex_exit(&pptr->port_mutex);
5209 
5210 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5211 			if (rval == FC_SUCCESS) {
5212 				res = DDI_SUCCESS;
5213 				break;
5214 			}
5215 
5216 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5217 
5218 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5219 			    rval, "PRLI");
5220 		} else {
5221 			mutex_exit(&pptr->port_mutex);
5222 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5223 		}
5224 		break;
5225 	}
5226 
5227 	default:
5228 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5229 		break;
5230 	}
5231 
5232 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5233 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5234 	    "fcp_send_els: returning %d", res);
5235 
5236 	if (res != DDI_SUCCESS) {
5237 		if (internal) {
5238 			fcp_icmd_free(pptr, icmd);
5239 		}
5240 	}
5241 
5242 	return (res);
5243 }
5244 
5245 
5246 /*
5247  * called internally update the state of all of the tgts and each LUN
5248  * for this port (i.e. each target  known to be attached to this port)
5249  * if they are not already offline
5250  *
5251  * must be called with the port mutex owned
5252  *
5253  * acquires and releases the target mutexes for each target attached
5254  * to this port
5255  */
5256 void
5257 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5258 {
5259 	int i;
5260 	struct fcp_tgt *ptgt;
5261 
5262 	ASSERT(mutex_owned(&pptr->port_mutex));
5263 
5264 	for (i = 0; i < FCP_NUM_HASH; i++) {
5265 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5266 		    ptgt = ptgt->tgt_next) {
5267 			mutex_enter(&ptgt->tgt_mutex);
5268 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5269 			ptgt->tgt_change_cnt++;
5270 			ptgt->tgt_statec_cause = cause;
5271 			ptgt->tgt_tmp_cnt = 1;
5272 			ptgt->tgt_done = 0;
5273 			mutex_exit(&ptgt->tgt_mutex);
5274 		}
5275 	}
5276 }
5277 
5278 
5279 static void
5280 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5281 {
5282 	int i;
5283 	int ndevs;
5284 	struct fcp_tgt *ptgt;
5285 
5286 	ASSERT(mutex_owned(&pptr->port_mutex));
5287 
5288 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5289 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5290 		    ptgt = ptgt->tgt_next) {
5291 			ndevs++;
5292 		}
5293 	}
5294 
5295 	if (ndevs == 0) {
5296 		return;
5297 	}
5298 	pptr->port_tmp_cnt = ndevs;
5299 
5300 	for (i = 0; i < FCP_NUM_HASH; i++) {
5301 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5302 		    ptgt = ptgt->tgt_next) {
5303 			(void) fcp_call_finish_init_held(pptr, ptgt,
5304 			    lcount, ptgt->tgt_change_cnt, cause);
5305 		}
5306 	}
5307 }
5308 
5309 /*
5310  *     Function: fcp_update_tgt_state
5311  *
5312  *  Description: This function updates the field tgt_state of a target.	 That
5313  *		 field is a bitmap and which bit can be set or reset
5314  *		 individually.	The action applied to the target state is also
5315  *		 applied to all the LUNs belonging to the target (provided the
5316  *		 LUN is not offline).  A side effect of applying the state
5317  *		 modification to the target and the LUNs is the field tgt_trace
5318  *		 of the target and lun_trace of the LUNs is set to zero.
5319  *
5320  *
5321  *     Argument: *ptgt	Target structure.
5322  *		 flag	Flag indication what action to apply (set/reset).
5323  *		 state	State bits to update.
5324  *
5325  * Return Value: None
5326  *
5327  *	Context: Interrupt, Kernel or User context.
5328  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5329  *		 calling this function.
5330  */
5331 void
5332 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5333 {
5334 	struct fcp_lun *plun;
5335 
5336 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5337 
5338 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5339 		/* The target is not offline. */
5340 		if (flag == FCP_SET) {
5341 			ptgt->tgt_state |= state;
5342 			ptgt->tgt_trace = 0;
5343 		} else {
5344 			ptgt->tgt_state &= ~state;
5345 		}
5346 
5347 		for (plun = ptgt->tgt_lun; plun != NULL;
5348 		    plun = plun->lun_next) {
5349 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5350 				/* The LUN is not offline. */
5351 				if (flag == FCP_SET) {
5352 					plun->lun_state |= state;
5353 					plun->lun_trace = 0;
5354 				} else {
5355 					plun->lun_state &= ~state;
5356 				}
5357 			}
5358 		}
5359 	}
5360 }
5361 
5362 /*
5363  *     Function: fcp_update_tgt_state
5364  *
5365  *  Description: This function updates the field lun_state of a LUN.  That
5366  *		 field is a bitmap and which bit can be set or reset
5367  *		 individually.
5368  *
5369  *     Argument: *plun	LUN structure.
5370  *		 flag	Flag indication what action to apply (set/reset).
5371  *		 state	State bits to update.
5372  *
5373  * Return Value: None
5374  *
5375  *	Context: Interrupt, Kernel or User context.
5376  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5377  *		 calling this function.
5378  */
5379 void
5380 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5381 {
5382 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5383 
5384 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5385 
5386 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5387 		if (flag == FCP_SET) {
5388 			plun->lun_state |= state;
5389 		} else {
5390 			plun->lun_state &= ~state;
5391 		}
5392 	}
5393 }
5394 
5395 /*
5396  *     Function: fcp_get_port
5397  *
5398  *  Description: This function returns the fcp_port structure from the opaque
5399  *		 handle passed by the caller.  That opaque handle is the handle
5400  *		 used by fp/fctl to identify a particular local port.  That
5401  *		 handle has been stored in the corresponding fcp_port
5402  *		 structure.  This function is going to walk the global list of
5403  *		 fcp_port structures till one has a port_fp_handle that matches
5404  *		 the handle passed by the caller.  This function enters the
5405  *		 mutex fcp_global_mutex while walking the global list and then
5406  *		 releases it.
5407  *
5408  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5409  *				particular port.
5410  *
5411  * Return Value: NULL		Not found.
5412  *		 Not NULL	Pointer to the fcp_port structure.
5413  *
5414  *	Context: Interrupt, Kernel or User context.
5415  */
5416 static struct fcp_port *
5417 fcp_get_port(opaque_t port_handle)
5418 {
5419 	struct fcp_port *pptr;
5420 
5421 	ASSERT(port_handle != NULL);
5422 
5423 	mutex_enter(&fcp_global_mutex);
5424 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5425 		if (pptr->port_fp_handle == port_handle) {
5426 			break;
5427 		}
5428 	}
5429 	mutex_exit(&fcp_global_mutex);
5430 
5431 	return (pptr);
5432 }
5433 
5434 
5435 static void
5436 fcp_unsol_callback(fc_packet_t *fpkt)
5437 {
5438 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5439 	struct fcp_port *pptr = icmd->ipkt_port;
5440 
5441 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5442 		caddr_t state, reason, action, expln;
5443 
5444 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5445 		    &action, &expln);
5446 
5447 		fcp_log(CE_WARN, pptr->port_dip,
5448 		    "!couldn't post response to unsolicited request: "
5449 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5450 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5451 		    fpkt->pkt_cmd_fhdr.rx_id);
5452 	}
5453 	fcp_icmd_free(pptr, icmd);
5454 }
5455 
5456 
5457 /*
5458  * Perform general purpose preparation of a response to an unsolicited request
5459  */
5460 static void
5461 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5462     uchar_t r_ctl, uchar_t type)
5463 {
5464 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5465 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5466 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5467 	pkt->pkt_cmd_fhdr.type = type;
5468 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5469 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5470 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5471 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5472 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5473 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5474 	pkt->pkt_cmd_fhdr.ro = 0;
5475 	pkt->pkt_cmd_fhdr.rsvd = 0;
5476 	pkt->pkt_comp = fcp_unsol_callback;
5477 	pkt->pkt_pd = NULL;
5478 }
5479 
5480 
5481 /*ARGSUSED*/
5482 static int
5483 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5484 {
5485 	fc_packet_t		*fpkt;
5486 	struct la_els_prli	prli;
5487 	struct fcp_prli		*fprli;
5488 	struct fcp_ipkt	*icmd;
5489 	struct la_els_prli	*from;
5490 	struct fcp_prli		*orig;
5491 	struct fcp_tgt	*ptgt;
5492 	int			tcount = 0;
5493 	int			lcount;
5494 
5495 	from = (struct la_els_prli *)buf->ub_buffer;
5496 	orig = (struct fcp_prli *)from->service_params;
5497 
5498 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5499 	    NULL) {
5500 		mutex_enter(&ptgt->tgt_mutex);
5501 		tcount = ptgt->tgt_change_cnt;
5502 		mutex_exit(&ptgt->tgt_mutex);
5503 	}
5504 	mutex_enter(&pptr->port_mutex);
5505 	lcount = pptr->port_link_cnt;
5506 	mutex_exit(&pptr->port_mutex);
5507 
5508 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5509 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5510 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5511 		return (FC_FAILURE);
5512 	}
5513 	fpkt = icmd->ipkt_fpkt;
5514 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5515 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5516 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5517 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5518 	fpkt->pkt_rsplen = 0;
5519 	fpkt->pkt_datalen = 0;
5520 
5521 	icmd->ipkt_opcode = LA_ELS_PRLI;
5522 
5523 	bzero(&prli, sizeof (struct la_els_prli));
5524 	fprli = (struct fcp_prli *)prli.service_params;
5525 	prli.ls_code = LA_ELS_ACC;
5526 	prli.page_length = 0x10;
5527 	prli.payload_length = sizeof (struct la_els_prli);
5528 
5529 	/* fill in service params */
5530 	fprli->type = 0x08;
5531 	fprli->resvd1 = 0;
5532 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5533 	fprli->orig_process_associator = orig->orig_process_associator;
5534 	fprli->resp_process_assoc_valid = 0;
5535 	fprli->establish_image_pair = 1;
5536 	fprli->resvd2 = 0;
5537 	fprli->resvd3 = 0;
5538 	fprli->obsolete_1 = 0;
5539 	fprli->obsolete_2 = 0;
5540 	fprli->data_overlay_allowed = 0;
5541 	fprli->initiator_fn = 1;
5542 	fprli->confirmed_compl_allowed = 1;
5543 
5544 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5545 		fprli->target_fn = 1;
5546 	} else {
5547 		fprli->target_fn = 0;
5548 	}
5549 
5550 	fprli->retry = 1;
5551 	fprli->read_xfer_rdy_disabled = 1;
5552 	fprli->write_xfer_rdy_disabled = 0;
5553 
5554 	/* save the unsol prli payload first */
5555 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5556 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5557 
5558 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5559 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5560 
5561 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5562 
5563 	mutex_enter(&pptr->port_mutex);
5564 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5565 		int rval;
5566 		mutex_exit(&pptr->port_mutex);
5567 
5568 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5569 		    FC_SUCCESS) {
5570 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5571 				fcp_queue_ipkt(pptr, fpkt);
5572 				return (FC_SUCCESS);
5573 			}
5574 			/* Let it timeout */
5575 			fcp_icmd_free(pptr, icmd);
5576 			return (FC_FAILURE);
5577 		}
5578 	} else {
5579 		mutex_exit(&pptr->port_mutex);
5580 		fcp_icmd_free(pptr, icmd);
5581 		return (FC_FAILURE);
5582 	}
5583 
5584 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5585 
5586 	return (FC_SUCCESS);
5587 }
5588 
5589 /*
5590  *     Function: fcp_icmd_alloc
5591  *
5592  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5593  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5594  *		 modified by the caller (such as fcp_send_scsi).  The
5595  *		 structure is also tied to the state of the line and of the
5596  *		 target at a particular time.  That link is established by
5597  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5598  *		 and tcount which came respectively from pptr->link_cnt and
5599  *		 ptgt->tgt_change_cnt.
5600  *
5601  *     Argument: *pptr		Fcp port.
5602  *		 *ptgt		Target (destination of the command).
5603  *		 cmd_len	Length of the command.
5604  *		 resp_len	Length of the expected response.
5605  *		 data_len	Length of the data.
5606  *		 nodma		Indicates weither the command and response.
5607  *				will be transfer through DMA or not.
5608  *		 lcount		Link state change counter.
5609  *		 tcount		Target state change counter.
5610  *		 cause		Reason that lead to this call.
5611  *
5612  * Return Value: NULL		Failed.
5613  *		 Not NULL	Internal packet address.
5614  */
5615 static struct fcp_ipkt *
5616 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5617     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5618     uint32_t rscn_count)
5619 {
5620 	int			dma_setup = 0;
5621 	fc_packet_t		*fpkt;
5622 	struct fcp_ipkt	*icmd = NULL;
5623 
5624 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5625 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5626 	    KM_NOSLEEP);
5627 	if (icmd == NULL) {
5628 		fcp_log(CE_WARN, pptr->port_dip,
5629 		    "!internal packet allocation failed");
5630 		return (NULL);
5631 	}
5632 
5633 	/*
5634 	 * initialize the allocated packet
5635 	 */
5636 	icmd->ipkt_nodma = nodma;
5637 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5638 	icmd->ipkt_lun = NULL;
5639 
5640 	icmd->ipkt_link_cnt = lcount;
5641 	icmd->ipkt_change_cnt = tcount;
5642 	icmd->ipkt_cause = cause;
5643 
5644 	mutex_enter(&pptr->port_mutex);
5645 	icmd->ipkt_port = pptr;
5646 	mutex_exit(&pptr->port_mutex);
5647 
5648 	/* keep track of amt of data to be sent in pkt */
5649 	icmd->ipkt_cmdlen = cmd_len;
5650 	icmd->ipkt_resplen = resp_len;
5651 	icmd->ipkt_datalen = data_len;
5652 
5653 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5654 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5655 
5656 	/* set pkt's private ptr to point to cmd pkt */
5657 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5658 
5659 	/* set FCA private ptr to memory just beyond */
5660 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5661 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5662 	    pptr->port_dmacookie_sz);
5663 
5664 	/* get ptr to fpkt substruct and fill it in */
5665 	fpkt = icmd->ipkt_fpkt;
5666 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5667 	    sizeof (struct fcp_ipkt));
5668 
5669 	if (ptgt != NULL) {
5670 		icmd->ipkt_tgt = ptgt;
5671 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5672 	}
5673 
5674 	fpkt->pkt_comp = fcp_icmd_callback;
5675 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5676 	fpkt->pkt_cmdlen = cmd_len;
5677 	fpkt->pkt_rsplen = resp_len;
5678 	fpkt->pkt_datalen = data_len;
5679 
5680 	/*
5681 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5682 	 * rscn_count as fcp knows down to the transport. If a valid count was
5683 	 * passed into this function, we allocate memory to actually pass down
5684 	 * this info.
5685 	 *
5686 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5687 	 * basically mean that fcp will not be able to help transport
5688 	 * distinguish if a new RSCN has come after fcp was last informed about
5689 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5690 	 * 5068068 where the device might end up going offline in case of RSCN
5691 	 * storms.
5692 	 */
5693 	fpkt->pkt_ulp_rscn_infop = NULL;
5694 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5695 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5696 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5697 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5698 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5699 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5700 			    "Failed to alloc memory to pass rscn info");
5701 		}
5702 	}
5703 
5704 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5705 		fc_ulp_rscn_info_t	*rscnp;
5706 
5707 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5708 		rscnp->ulp_rscn_count = rscn_count;
5709 	}
5710 
5711 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5712 		goto fail;
5713 	}
5714 	dma_setup++;
5715 
5716 	/*
5717 	 * Must hold target mutex across setting of pkt_pd and call to
5718 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5719 	 * away while we're not looking.
5720 	 */
5721 	if (ptgt != NULL) {
5722 		mutex_enter(&ptgt->tgt_mutex);
5723 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5724 
5725 		/* ask transport to do its initialization on this pkt */
5726 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5727 		    != FC_SUCCESS) {
5728 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5729 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5730 			    "fc_ulp_init_packet failed");
5731 			mutex_exit(&ptgt->tgt_mutex);
5732 			goto fail;
5733 		}
5734 		mutex_exit(&ptgt->tgt_mutex);
5735 	} else {
5736 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5737 		    != FC_SUCCESS) {
5738 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5739 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5740 			    "fc_ulp_init_packet failed");
5741 			goto fail;
5742 		}
5743 	}
5744 
5745 	mutex_enter(&pptr->port_mutex);
5746 	if (pptr->port_state & (FCP_STATE_DETACHING |
5747 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5748 		int rval;
5749 
5750 		mutex_exit(&pptr->port_mutex);
5751 
5752 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5753 		ASSERT(rval == FC_SUCCESS);
5754 
5755 		goto fail;
5756 	}
5757 
5758 	if (ptgt != NULL) {
5759 		mutex_enter(&ptgt->tgt_mutex);
5760 		ptgt->tgt_ipkt_cnt++;
5761 		mutex_exit(&ptgt->tgt_mutex);
5762 	}
5763 
5764 	pptr->port_ipkt_cnt++;
5765 
5766 	mutex_exit(&pptr->port_mutex);
5767 
5768 	return (icmd);
5769 
5770 fail:
5771 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5772 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5773 		    sizeof (fc_ulp_rscn_info_t));
5774 		fpkt->pkt_ulp_rscn_infop = NULL;
5775 	}
5776 
5777 	if (dma_setup) {
5778 		fcp_free_dma(pptr, icmd);
5779 	}
5780 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5781 	    (size_t)pptr->port_dmacookie_sz);
5782 
5783 	return (NULL);
5784 }
5785 
5786 /*
5787  *     Function: fcp_icmd_free
5788  *
5789  *  Description: Frees the internal command passed by the caller.
5790  *
5791  *     Argument: *pptr		Fcp port.
5792  *		 *icmd		Internal packet to free.
5793  *
5794  * Return Value: None
5795  */
5796 static void
5797 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5798 {
5799 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5800 
5801 	/* Let the underlying layers do their cleanup. */
5802 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5803 	    icmd->ipkt_fpkt);
5804 
5805 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5806 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5807 		    sizeof (fc_ulp_rscn_info_t));
5808 	}
5809 
5810 	fcp_free_dma(pptr, icmd);
5811 
5812 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5813 	    (size_t)pptr->port_dmacookie_sz);
5814 
5815 	mutex_enter(&pptr->port_mutex);
5816 
5817 	if (ptgt) {
5818 		mutex_enter(&ptgt->tgt_mutex);
5819 		ptgt->tgt_ipkt_cnt--;
5820 		mutex_exit(&ptgt->tgt_mutex);
5821 	}
5822 
5823 	pptr->port_ipkt_cnt--;
5824 	mutex_exit(&pptr->port_mutex);
5825 }
5826 
5827 /*
5828  *     Function: fcp_alloc_dma
5829  *
5830  *  Description: Allocated the DMA resources required for the internal
5831  *		 packet.
5832  *
5833  *     Argument: *pptr	FCP port.
5834  *		 *icmd	Internal FCP packet.
5835  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5836  *		 flags	Allocation flags (Sleep or NoSleep).
5837  *
5838  * Return Value: FC_SUCCESS
5839  *		 FC_NOMEM
5840  */
5841 static int
5842 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5843     int nodma, int flags)
5844 {
5845 	int		rval;
5846 	size_t		real_size;
5847 	uint_t		ccount;
5848 	int		bound = 0;
5849 	int		cmd_resp = 0;
5850 	fc_packet_t	*fpkt;
5851 	ddi_dma_cookie_t	pkt_data_cookie;
5852 	ddi_dma_cookie_t	*cp;
5853 	uint32_t		cnt;
5854 
5855 	fpkt = &icmd->ipkt_fc_packet;
5856 
5857 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5858 	    fpkt->pkt_resp_dma == NULL);
5859 
5860 	icmd->ipkt_nodma = nodma;
5861 
5862 	if (nodma) {
5863 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5864 		if (fpkt->pkt_cmd == NULL) {
5865 			goto fail;
5866 		}
5867 
5868 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5869 		if (fpkt->pkt_resp == NULL) {
5870 			goto fail;
5871 		}
5872 	} else {
5873 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5874 
5875 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5876 		if (rval == FC_FAILURE) {
5877 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5878 			    fpkt->pkt_resp_dma == NULL);
5879 			goto fail;
5880 		}
5881 		cmd_resp++;
5882 	}
5883 
5884 	if (fpkt->pkt_datalen != 0) {
5885 		/*
5886 		 * set up DMA handle and memory for the data in this packet
5887 		 */
5888 		if (ddi_dma_alloc_handle(pptr->port_dip,
5889 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5890 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5891 			goto fail;
5892 		}
5893 
5894 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5895 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5896 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5897 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5898 			goto fail;
5899 		}
5900 
5901 		/* was DMA mem size gotten < size asked for/needed ?? */
5902 		if (real_size < fpkt->pkt_datalen) {
5903 			goto fail;
5904 		}
5905 
5906 		/* bind DMA address and handle together */
5907 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5908 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5909 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5910 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5911 			goto fail;
5912 		}
5913 		bound++;
5914 
5915 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5916 			goto fail;
5917 		}
5918 
5919 		fpkt->pkt_data_cookie_cnt = ccount;
5920 
5921 		cp = fpkt->pkt_data_cookie;
5922 		*cp = pkt_data_cookie;
5923 		cp++;
5924 
5925 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5926 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5927 			    &pkt_data_cookie);
5928 			*cp = pkt_data_cookie;
5929 		}
5930 
5931 	}
5932 
5933 	return (FC_SUCCESS);
5934 
5935 fail:
5936 	if (bound) {
5937 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5938 	}
5939 
5940 	if (fpkt->pkt_data_dma) {
5941 		if (fpkt->pkt_data) {
5942 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5943 		}
5944 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5945 	}
5946 
5947 	if (nodma) {
5948 		if (fpkt->pkt_cmd) {
5949 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
5950 		}
5951 		if (fpkt->pkt_resp) {
5952 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
5953 		}
5954 	} else {
5955 		if (cmd_resp) {
5956 			fcp_free_cmd_resp(pptr, fpkt);
5957 		}
5958 	}
5959 
5960 	return (FC_NOMEM);
5961 }
5962 
5963 
5964 static void
5965 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5966 {
5967 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
5968 
5969 	if (fpkt->pkt_data_dma) {
5970 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5971 		if (fpkt->pkt_data) {
5972 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5973 		}
5974 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5975 	}
5976 
5977 	if (icmd->ipkt_nodma) {
5978 		if (fpkt->pkt_cmd) {
5979 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
5980 		}
5981 		if (fpkt->pkt_resp) {
5982 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
5983 		}
5984 	} else {
5985 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
5986 
5987 		fcp_free_cmd_resp(pptr, fpkt);
5988 	}
5989 }
5990 
5991 /*
5992  *     Function: fcp_lookup_target
5993  *
5994  *  Description: Finds a target given a WWN.
5995  *
5996  *     Argument: *pptr	FCP port.
5997  *		 *wwn	World Wide Name of the device to look for.
5998  *
5999  * Return Value: NULL		No target found
6000  *		 Not NULL	Target structure
6001  *
6002  *	Context: Interrupt context.
6003  *		 The mutex pptr->port_mutex must be owned.
6004  */
6005 /* ARGSUSED */
6006 static struct fcp_tgt *
6007 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6008 {
6009 	int			hash;
6010 	struct fcp_tgt	*ptgt;
6011 
6012 	ASSERT(mutex_owned(&pptr->port_mutex));
6013 
6014 	hash = FCP_HASH(wwn);
6015 
6016 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6017 	    ptgt = ptgt->tgt_next) {
6018 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6019 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6020 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6021 			break;
6022 		}
6023 	}
6024 
6025 	return (ptgt);
6026 }
6027 
6028 
6029 /*
6030  * Find target structure given a port identifier
6031  */
6032 static struct fcp_tgt *
6033 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6034 {
6035 	fc_portid_t		port_id;
6036 	la_wwn_t		pwwn;
6037 	struct fcp_tgt	*ptgt = NULL;
6038 
6039 	port_id.priv_lilp_posit = 0;
6040 	port_id.port_id = d_id;
6041 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6042 	    &pwwn) == FC_SUCCESS) {
6043 		mutex_enter(&pptr->port_mutex);
6044 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6045 		mutex_exit(&pptr->port_mutex);
6046 	}
6047 
6048 	return (ptgt);
6049 }
6050 
6051 
6052 /*
6053  * the packet completion callback routine for info cmd pkts
6054  *
6055  * this means fpkt pts to a response to either a PLOGI or a PRLI
6056  *
6057  * if there is an error an attempt is made to call a routine to resend
6058  * the command that failed
6059  */
6060 static void
6061 fcp_icmd_callback(fc_packet_t *fpkt)
6062 {
6063 	struct fcp_ipkt	*icmd;
6064 	struct fcp_port	*pptr;
6065 	struct fcp_tgt	*ptgt;
6066 	struct la_els_prli	*prli;
6067 	struct la_els_prli	prli_s;
6068 	struct fcp_prli		*fprli;
6069 	struct fcp_lun	*plun;
6070 	int		free_pkt = 1;
6071 	int		rval;
6072 	ls_code_t	resp;
6073 	uchar_t		prli_acc = 0;
6074 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6075 	int		lun0_newalloc;
6076 
6077 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6078 
6079 	/* get ptrs to the port and target structs for the cmd */
6080 	pptr = icmd->ipkt_port;
6081 	ptgt = icmd->ipkt_tgt;
6082 
6083 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6084 
6085 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6086 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6087 		    sizeof (prli_s));
6088 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6089 	}
6090 
6091 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6092 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6093 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6094 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6095 	    ptgt->tgt_d_id);
6096 
6097 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6098 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6099 
6100 		mutex_enter(&ptgt->tgt_mutex);
6101 		if (ptgt->tgt_pd_handle == NULL) {
6102 			/*
6103 			 * in a fabric environment the port device handles
6104 			 * get created only after successful LOGIN into the
6105 			 * transport, so the transport makes this port
6106 			 * device (pd) handle available in this packet, so
6107 			 * save it now
6108 			 */
6109 			ASSERT(fpkt->pkt_pd != NULL);
6110 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6111 		}
6112 		mutex_exit(&ptgt->tgt_mutex);
6113 
6114 		/* which ELS cmd is this response for ?? */
6115 		switch (icmd->ipkt_opcode) {
6116 		case LA_ELS_PLOGI:
6117 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6118 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6119 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6120 			    ptgt->tgt_d_id,
6121 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6122 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6123 
6124 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6125 			    FCP_TGT_TRACE_15);
6126 
6127 			/* Note that we are not allocating a new icmd */
6128 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6129 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6130 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6131 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6132 				    FCP_TGT_TRACE_16);
6133 				goto fail;
6134 			}
6135 			break;
6136 
6137 		case LA_ELS_PRLI:
6138 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6139 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6140 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6141 
6142 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6143 			    FCP_TGT_TRACE_17);
6144 
6145 			prli = &prli_s;
6146 
6147 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6148 			    sizeof (prli_s));
6149 
6150 			fprli = (struct fcp_prli *)prli->service_params;
6151 
6152 			mutex_enter(&ptgt->tgt_mutex);
6153 			ptgt->tgt_icap = fprli->initiator_fn;
6154 			ptgt->tgt_tcap = fprli->target_fn;
6155 			mutex_exit(&ptgt->tgt_mutex);
6156 
6157 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6158 				/*
6159 				 * this FCP device does not support target mode
6160 				 */
6161 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6162 				    FCP_TGT_TRACE_18);
6163 				goto fail;
6164 			}
6165 			if (fprli->retry == 1) {
6166 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6167 				    &ptgt->tgt_port_wwn);
6168 			}
6169 
6170 			/* target is no longer offline */
6171 			mutex_enter(&pptr->port_mutex);
6172 			mutex_enter(&ptgt->tgt_mutex);
6173 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6174 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6175 				    FCP_TGT_MARK);
6176 			} else {
6177 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6178 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6179 				    "fcp_icmd_callback,1: state change "
6180 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6181 				mutex_exit(&ptgt->tgt_mutex);
6182 				mutex_exit(&pptr->port_mutex);
6183 				goto fail;
6184 			}
6185 			mutex_exit(&ptgt->tgt_mutex);
6186 			mutex_exit(&pptr->port_mutex);
6187 
6188 			/*
6189 			 * lun 0 should always respond to inquiry, so
6190 			 * get the LUN struct for LUN 0
6191 			 *
6192 			 * Currently we deal with first level of addressing.
6193 			 * If / when we start supporting 0x device types
6194 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6195 			 * this logic will need revisiting.
6196 			 */
6197 			lun0_newalloc = 0;
6198 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6199 				/*
6200 				 * no LUN struct for LUN 0 yet exists,
6201 				 * so create one
6202 				 */
6203 				plun = fcp_alloc_lun(ptgt);
6204 				if (plun == NULL) {
6205 					fcp_log(CE_WARN, pptr->port_dip,
6206 					    "!Failed to allocate lun 0 for"
6207 					    " D_ID=%x", ptgt->tgt_d_id);
6208 					goto fail;
6209 				}
6210 				lun0_newalloc = 1;
6211 			}
6212 
6213 			/* fill in LUN info */
6214 			mutex_enter(&ptgt->tgt_mutex);
6215 			/*
6216 			 * consider lun 0 as device not connected if it is
6217 			 * offlined or newly allocated
6218 			 */
6219 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6220 			    lun0_newalloc) {
6221 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6222 			}
6223 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6224 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6225 			ptgt->tgt_lun_cnt = 1;
6226 			ptgt->tgt_report_lun_cnt = 0;
6227 			mutex_exit(&ptgt->tgt_mutex);
6228 
6229 			/* Retrieve the rscn count (if a valid one exists) */
6230 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6231 				rscn_count = ((fc_ulp_rscn_info_t *)
6232 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6233 				    ->ulp_rscn_count;
6234 			} else {
6235 				rscn_count = FC_INVALID_RSCN_COUNT;
6236 			}
6237 
6238 			/* send Report Lun request to target */
6239 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6240 			    sizeof (struct fcp_reportlun_resp),
6241 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6242 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6243 				mutex_enter(&pptr->port_mutex);
6244 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6245 					fcp_log(CE_WARN, pptr->port_dip,
6246 					    "!Failed to send REPORT LUN to"
6247 					    "  D_ID=%x", ptgt->tgt_d_id);
6248 				} else {
6249 					FCP_TRACE(fcp_logq,
6250 					    pptr->port_instbuf, fcp_trace,
6251 					    FCP_BUF_LEVEL_5, 0,
6252 					    "fcp_icmd_callback,2:state change"
6253 					    " occured for D_ID=0x%x",
6254 					    ptgt->tgt_d_id);
6255 				}
6256 				mutex_exit(&pptr->port_mutex);
6257 
6258 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6259 				    FCP_TGT_TRACE_19);
6260 
6261 				goto fail;
6262 			} else {
6263 				free_pkt = 0;
6264 				fcp_icmd_free(pptr, icmd);
6265 			}
6266 			break;
6267 
6268 		default:
6269 			fcp_log(CE_WARN, pptr->port_dip,
6270 			    "!fcp_icmd_callback Invalid opcode");
6271 			goto fail;
6272 		}
6273 
6274 		return;
6275 	}
6276 
6277 
6278 	/*
6279 	 * Other PLOGI failures are not retried as the
6280 	 * transport does it already
6281 	 */
6282 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6283 		if (fcp_is_retryable(icmd) &&
6284 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6285 
6286 			if (FCP_MUST_RETRY(fpkt)) {
6287 				fcp_queue_ipkt(pptr, fpkt);
6288 				return;
6289 			}
6290 
6291 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6292 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6293 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6294 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6295 			    fpkt->pkt_reason);
6296 
6297 			/*
6298 			 * Retry by recalling the routine that
6299 			 * originally queued this packet
6300 			 */
6301 			mutex_enter(&pptr->port_mutex);
6302 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6303 				caddr_t msg;
6304 
6305 				mutex_exit(&pptr->port_mutex);
6306 
6307 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6308 
6309 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6310 					fpkt->pkt_timeout +=
6311 					    FCP_TIMEOUT_DELTA;
6312 				}
6313 
6314 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6315 				    fpkt);
6316 				if (rval == FC_SUCCESS) {
6317 					return;
6318 				}
6319 
6320 				if (rval == FC_STATEC_BUSY ||
6321 				    rval == FC_OFFLINE) {
6322 					fcp_queue_ipkt(pptr, fpkt);
6323 					return;
6324 				}
6325 				(void) fc_ulp_error(rval, &msg);
6326 
6327 				fcp_log(CE_NOTE, pptr->port_dip,
6328 				    "!ELS 0x%x failed to d_id=0x%x;"
6329 				    " %s", icmd->ipkt_opcode,
6330 				    ptgt->tgt_d_id, msg);
6331 			} else {
6332 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6333 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6334 				    "fcp_icmd_callback,3: state change "
6335 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6336 				mutex_exit(&pptr->port_mutex);
6337 			}
6338 		}
6339 	} else {
6340 		if (fcp_is_retryable(icmd) &&
6341 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6342 			if (FCP_MUST_RETRY(fpkt)) {
6343 				fcp_queue_ipkt(pptr, fpkt);
6344 				return;
6345 			}
6346 		}
6347 		mutex_enter(&pptr->port_mutex);
6348 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6349 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6350 			mutex_exit(&pptr->port_mutex);
6351 			fcp_print_error(fpkt);
6352 		} else {
6353 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6354 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6355 			    "fcp_icmd_callback,4: state change occured"
6356 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6357 			mutex_exit(&pptr->port_mutex);
6358 		}
6359 	}
6360 
6361 fail:
6362 	if (free_pkt) {
6363 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6364 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6365 		fcp_icmd_free(pptr, icmd);
6366 	}
6367 }
6368 
6369 
6370 /*
6371  * called internally to send an info cmd using the transport
6372  *
6373  * sends either an INQ or a REPORT_LUN
6374  *
6375  * when the packet is completed fcp_scsi_callback is called
6376  */
6377 static int
6378 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6379     int lcount, int tcount, int cause, uint32_t rscn_count)
6380 {
6381 	int			nodma;
6382 	struct fcp_ipkt		*icmd;
6383 	struct fcp_tgt		*ptgt;
6384 	struct fcp_port		*pptr;
6385 	fc_frame_hdr_t		*hp;
6386 	fc_packet_t		*fpkt;
6387 	struct fcp_cmd		fcp_cmd;
6388 	struct fcp_cmd		*fcmd;
6389 	union scsi_cdb		*scsi_cdb;
6390 
6391 	ASSERT(plun != NULL);
6392 
6393 	ptgt = plun->lun_tgt;
6394 	ASSERT(ptgt != NULL);
6395 
6396 	pptr = ptgt->tgt_port;
6397 	ASSERT(pptr != NULL);
6398 
6399 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6400 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6401 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6402 
6403 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6404 
6405 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6406 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6407 	    rscn_count);
6408 
6409 	if (icmd == NULL) {
6410 		return (DDI_FAILURE);
6411 	}
6412 
6413 	fpkt = icmd->ipkt_fpkt;
6414 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6415 	icmd->ipkt_retries = 0;
6416 	icmd->ipkt_opcode = opcode;
6417 	icmd->ipkt_lun = plun;
6418 
6419 	if (nodma) {
6420 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6421 	} else {
6422 		fcmd = &fcp_cmd;
6423 	}
6424 	bzero(fcmd, sizeof (struct fcp_cmd));
6425 
6426 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6427 
6428 	hp = &fpkt->pkt_cmd_fhdr;
6429 
6430 	hp->s_id = pptr->port_id;
6431 	hp->d_id = ptgt->tgt_d_id;
6432 	hp->r_ctl = R_CTL_COMMAND;
6433 	hp->type = FC_TYPE_SCSI_FCP;
6434 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6435 	hp->rsvd = 0;
6436 	hp->seq_id = 0;
6437 	hp->seq_cnt = 0;
6438 	hp->ox_id = 0xffff;
6439 	hp->rx_id = 0xffff;
6440 	hp->ro = 0;
6441 
6442 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6443 
6444 	/*
6445 	 * Request SCSI target for expedited processing
6446 	 */
6447 
6448 	/*
6449 	 * Set up for untagged queuing because we do not
6450 	 * know if the fibre device supports queuing.
6451 	 */
6452 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6453 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6454 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6455 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6456 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6457 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6458 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6459 
6460 	switch (opcode) {
6461 	case SCMD_INQUIRY_PAGE83:
6462 		/*
6463 		 * Prepare to get the Inquiry VPD page 83 information
6464 		 */
6465 		fcmd->fcp_cntl.cntl_read_data = 1;
6466 		fcmd->fcp_cntl.cntl_write_data = 0;
6467 		fcmd->fcp_data_len = alloc_len;
6468 
6469 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6470 		fpkt->pkt_comp = fcp_scsi_callback;
6471 
6472 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6473 		scsi_cdb->g0_addr2 = 0x01;
6474 		scsi_cdb->g0_addr1 = 0x83;
6475 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6476 		break;
6477 
6478 	case SCMD_INQUIRY:
6479 		fcmd->fcp_cntl.cntl_read_data = 1;
6480 		fcmd->fcp_cntl.cntl_write_data = 0;
6481 		fcmd->fcp_data_len = alloc_len;
6482 
6483 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6484 		fpkt->pkt_comp = fcp_scsi_callback;
6485 
6486 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6487 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6488 		break;
6489 
6490 	case SCMD_REPORT_LUN: {
6491 		fc_portid_t	d_id;
6492 		opaque_t	fca_dev;
6493 
6494 		ASSERT(alloc_len >= 16);
6495 
6496 		d_id.priv_lilp_posit = 0;
6497 		d_id.port_id = ptgt->tgt_d_id;
6498 
6499 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6500 
6501 		mutex_enter(&ptgt->tgt_mutex);
6502 		ptgt->tgt_fca_dev = fca_dev;
6503 		mutex_exit(&ptgt->tgt_mutex);
6504 
6505 		fcmd->fcp_cntl.cntl_read_data = 1;
6506 		fcmd->fcp_cntl.cntl_write_data = 0;
6507 		fcmd->fcp_data_len = alloc_len;
6508 
6509 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6510 		fpkt->pkt_comp = fcp_scsi_callback;
6511 
6512 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6513 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6514 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6515 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6516 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6517 		break;
6518 	}
6519 
6520 	default:
6521 		fcp_log(CE_WARN, pptr->port_dip,
6522 		    "!fcp_send_scsi Invalid opcode");
6523 		break;
6524 	}
6525 
6526 	if (!nodma) {
6527 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6528 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6529 	}
6530 
6531 	mutex_enter(&pptr->port_mutex);
6532 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6533 
6534 		mutex_exit(&pptr->port_mutex);
6535 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6536 		    FC_SUCCESS) {
6537 			fcp_icmd_free(pptr, icmd);
6538 			return (DDI_FAILURE);
6539 		}
6540 		return (DDI_SUCCESS);
6541 	} else {
6542 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6543 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6544 		    "fcp_send_scsi,1: state change occured"
6545 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6546 		mutex_exit(&pptr->port_mutex);
6547 		fcp_icmd_free(pptr, icmd);
6548 		return (DDI_FAILURE);
6549 	}
6550 }
6551 
6552 
6553 /*
6554  * called by fcp_scsi_callback to check to handle the case where
6555  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6556  */
6557 static int
6558 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6559 {
6560 	uchar_t				rqlen;
6561 	int				rval = DDI_FAILURE;
6562 	struct scsi_extended_sense	sense_info, *sense;
6563 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6564 	    fpkt->pkt_ulp_private;
6565 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6566 	struct fcp_port		*pptr = ptgt->tgt_port;
6567 
6568 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6569 
6570 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6571 		/*
6572 		 * SCSI-II Reserve Release support. Some older FC drives return
6573 		 * Reservation conflict for Report Luns command.
6574 		 */
6575 		if (icmd->ipkt_nodma) {
6576 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6577 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6578 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6579 		} else {
6580 			fcp_rsp_t	new_resp;
6581 
6582 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6583 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6584 
6585 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6586 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6587 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6588 
6589 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6590 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6591 		}
6592 
6593 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6594 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6595 
6596 		return (DDI_SUCCESS);
6597 	}
6598 
6599 	sense = &sense_info;
6600 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6601 		/* no need to continue if sense length is not set */
6602 		return (rval);
6603 	}
6604 
6605 	/* casting 64-bit integer to 8-bit */
6606 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6607 	    sizeof (struct scsi_extended_sense));
6608 
6609 	if (rqlen < 14) {
6610 		/* no need to continue if request length isn't long enough */
6611 		return (rval);
6612 	}
6613 
6614 	if (icmd->ipkt_nodma) {
6615 		/*
6616 		 * We can safely use fcp_response_len here since the
6617 		 * only path that calls fcp_check_reportlun,
6618 		 * fcp_scsi_callback, has already called
6619 		 * fcp_validate_fcp_response.
6620 		 */
6621 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6622 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6623 	} else {
6624 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6625 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6626 		    sizeof (struct scsi_extended_sense));
6627 	}
6628 
6629 	if (!FCP_SENSE_NO_LUN(sense)) {
6630 		mutex_enter(&ptgt->tgt_mutex);
6631 		/* clear the flag if any */
6632 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6633 		mutex_exit(&ptgt->tgt_mutex);
6634 	}
6635 
6636 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6637 	    (sense->es_add_code == 0x20)) {
6638 		if (icmd->ipkt_nodma) {
6639 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6640 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6641 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6642 		} else {
6643 			fcp_rsp_t	new_resp;
6644 
6645 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6646 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6647 
6648 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6649 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6650 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6651 
6652 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6653 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6654 		}
6655 
6656 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6657 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6658 
6659 		return (DDI_SUCCESS);
6660 	}
6661 
6662 	/*
6663 	 * This is for the STK library which returns a check condition,
6664 	 * to indicate device is not ready, manual assistance needed.
6665 	 * This is to a report lun command when the door is open.
6666 	 */
6667 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6668 		if (icmd->ipkt_nodma) {
6669 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6670 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6671 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6672 		} else {
6673 			fcp_rsp_t	new_resp;
6674 
6675 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6676 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6677 
6678 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6679 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6680 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6681 
6682 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6683 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6684 		}
6685 
6686 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6687 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6688 
6689 		return (DDI_SUCCESS);
6690 	}
6691 
6692 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6693 	    (FCP_SENSE_NO_LUN(sense))) {
6694 		mutex_enter(&ptgt->tgt_mutex);
6695 		if ((FCP_SENSE_NO_LUN(sense)) &&
6696 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6697 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6698 			mutex_exit(&ptgt->tgt_mutex);
6699 			/*
6700 			 * reconfig was triggred by ILLEGAL REQUEST but
6701 			 * got ILLEGAL REQUEST again
6702 			 */
6703 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6704 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6705 			    "!FCP: Unable to obtain Report Lun data"
6706 			    " target=%x", ptgt->tgt_d_id);
6707 		} else {
6708 			if (ptgt->tgt_tid == NULL) {
6709 				timeout_id_t	tid;
6710 				/*
6711 				 * REPORT LUN data has changed.	 Kick off
6712 				 * rediscovery
6713 				 */
6714 				tid = timeout(fcp_reconfigure_luns,
6715 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6716 
6717 				ptgt->tgt_tid = tid;
6718 				ptgt->tgt_state |= FCP_TGT_BUSY;
6719 			}
6720 			if (FCP_SENSE_NO_LUN(sense)) {
6721 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6722 			}
6723 			mutex_exit(&ptgt->tgt_mutex);
6724 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6725 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6726 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6727 				    "!FCP:Report Lun Has Changed"
6728 				    " target=%x", ptgt->tgt_d_id);
6729 			} else if (FCP_SENSE_NO_LUN(sense)) {
6730 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6731 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6732 				    "!FCP:LU Not Supported"
6733 				    " target=%x", ptgt->tgt_d_id);
6734 			}
6735 		}
6736 		rval = DDI_SUCCESS;
6737 	}
6738 
6739 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6740 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6741 	    "D_ID=%x, sense=%x, status=%x",
6742 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6743 	    rsp->fcp_u.fcp_status.scsi_status);
6744 
6745 	return (rval);
6746 }
6747 
6748 /*
6749  *     Function: fcp_scsi_callback
6750  *
6751  *  Description: This is the callback routine set by fcp_send_scsi() after
6752  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6753  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6754  *		 INQUIRY_PAGE83.
6755  *
6756  *     Argument: *fpkt	 FC packet used to convey the command
6757  *
6758  * Return Value: None
6759  */
6760 static void
6761 fcp_scsi_callback(fc_packet_t *fpkt)
6762 {
6763 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6764 	    fpkt->pkt_ulp_private;
6765 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6766 	struct fcp_port	*pptr;
6767 	struct fcp_tgt	*ptgt;
6768 	struct fcp_lun	*plun;
6769 	struct fcp_rsp		response, *rsp;
6770 
6771 	if (icmd->ipkt_nodma) {
6772 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6773 	} else {
6774 		rsp = &response;
6775 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6776 		    sizeof (struct fcp_rsp));
6777 	}
6778 
6779 	ptgt = icmd->ipkt_tgt;
6780 	pptr = ptgt->tgt_port;
6781 	plun = icmd->ipkt_lun;
6782 
6783 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6784 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6785 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6786 	    "status=%x, lun num=%x",
6787 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6788 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6789 
6790 	/*
6791 	 * Pre-init LUN GUID with NWWN if it is not a device that
6792 	 * supports multiple luns and we know it's not page83
6793 	 * compliant.  Although using a NWWN is not lun unique,
6794 	 * we will be fine since there is only one lun behind the taget
6795 	 * in this case.
6796 	 */
6797 	if ((plun->lun_guid_size == 0) &&
6798 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6799 	    (fcp_symmetric_device_probe(plun) == 0)) {
6800 
6801 		char ascii_wwn[FC_WWN_SIZE*2+1];
6802 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6803 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6804 	}
6805 
6806 	/*
6807 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6808 	 * when thay have more data than what is asked in CDB. An overrun
6809 	 * is really when FCP_DL is smaller than the data length in CDB.
6810 	 * In the case here we know that REPORT LUN command we formed within
6811 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6812 	 * behavior. In reality this is FC_SUCCESS.
6813 	 */
6814 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6815 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6816 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6817 		fpkt->pkt_state = FC_PKT_SUCCESS;
6818 	}
6819 
6820 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6821 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6822 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6823 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6824 		    ptgt->tgt_d_id);
6825 
6826 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6827 			/*
6828 			 * Inquiry VPD page command on A5K SES devices would
6829 			 * result in data CRC errors.
6830 			 */
6831 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6832 				(void) fcp_handle_page83(fpkt, icmd, 1);
6833 				return;
6834 			}
6835 		}
6836 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6837 		    FCP_MUST_RETRY(fpkt)) {
6838 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6839 			fcp_retry_scsi_cmd(fpkt);
6840 			return;
6841 		}
6842 
6843 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6844 		    FCP_TGT_TRACE_20);
6845 
6846 		mutex_enter(&pptr->port_mutex);
6847 		mutex_enter(&ptgt->tgt_mutex);
6848 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6849 			mutex_exit(&ptgt->tgt_mutex);
6850 			mutex_exit(&pptr->port_mutex);
6851 			fcp_print_error(fpkt);
6852 		} else {
6853 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6854 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6855 			    "fcp_scsi_callback,1: state change occured"
6856 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6857 			mutex_exit(&ptgt->tgt_mutex);
6858 			mutex_exit(&pptr->port_mutex);
6859 		}
6860 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6861 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6862 		fcp_icmd_free(pptr, icmd);
6863 		return;
6864 	}
6865 
6866 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6867 
6868 	mutex_enter(&pptr->port_mutex);
6869 	mutex_enter(&ptgt->tgt_mutex);
6870 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6871 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6872 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6873 		    "fcp_scsi_callback,2: state change occured"
6874 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6875 		mutex_exit(&ptgt->tgt_mutex);
6876 		mutex_exit(&pptr->port_mutex);
6877 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6878 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6879 		fcp_icmd_free(pptr, icmd);
6880 		return;
6881 	}
6882 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6883 
6884 	mutex_exit(&ptgt->tgt_mutex);
6885 	mutex_exit(&pptr->port_mutex);
6886 
6887 	if (icmd->ipkt_nodma) {
6888 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6889 		    sizeof (struct fcp_rsp));
6890 	} else {
6891 		bep = &fcp_rsp_err;
6892 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6893 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6894 	}
6895 
6896 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6897 		fcp_retry_scsi_cmd(fpkt);
6898 		return;
6899 	}
6900 
6901 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6902 	    FCP_NO_FAILURE) {
6903 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6904 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6905 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6906 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6907 		fcp_retry_scsi_cmd(fpkt);
6908 		return;
6909 	}
6910 
6911 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6912 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6913 		fcp_queue_ipkt(pptr, fpkt);
6914 		return;
6915 	}
6916 
6917 	/*
6918 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6919 	 * with illegal request as per SCSI spec.
6920 	 * Crossbridge is one such device and Daktari's SES node is another.
6921 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6922 	 * SES nodes (Daktari only currently) are an exception to this.
6923 	 */
6924 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6925 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6926 
6927 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6928 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6929 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
6930 		    "check condition. May enumerate as non-mpxio device",
6931 		    ptgt->tgt_d_id, plun->lun_type);
6932 
6933 		/*
6934 		 * If we let Daktari's SES be enumerated as a non-mpxio
6935 		 * device, there will be a discrepency in that the other
6936 		 * internal FC disks will get enumerated as mpxio devices.
6937 		 * Applications like luxadm expect this to be consistent.
6938 		 *
6939 		 * So, we put in a hack here to check if this is an SES device
6940 		 * and handle it here.
6941 		 */
6942 		if (plun->lun_type == DTYPE_ESI) {
6943 			/*
6944 			 * Since, pkt_state is actually FC_PKT_SUCCESS
6945 			 * at this stage, we fake a failure here so that
6946 			 * fcp_handle_page83 will create a device path using
6947 			 * the WWN instead of the GUID which is not there anyway
6948 			 */
6949 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
6950 			(void) fcp_handle_page83(fpkt, icmd, 1);
6951 			return;
6952 		}
6953 
6954 		mutex_enter(&ptgt->tgt_mutex);
6955 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
6956 		    FCP_LUN_MARK | FCP_LUN_BUSY);
6957 		mutex_exit(&ptgt->tgt_mutex);
6958 
6959 		(void) fcp_call_finish_init(pptr, ptgt,
6960 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6961 		    icmd->ipkt_cause);
6962 		fcp_icmd_free(pptr, icmd);
6963 		return;
6964 	}
6965 
6966 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6967 		int rval = DDI_FAILURE;
6968 
6969 		/*
6970 		 * handle cases where report lun isn't supported
6971 		 * by faking up our own REPORT_LUN response or
6972 		 * UNIT ATTENTION
6973 		 */
6974 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6975 			rval = fcp_check_reportlun(rsp, fpkt);
6976 
6977 			/*
6978 			 * fcp_check_reportlun might have modified the
6979 			 * FCP response. Copy it in again to get an updated
6980 			 * FCP response
6981 			 */
6982 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
6983 				rsp = &response;
6984 
6985 				FCP_CP_IN(fpkt->pkt_resp, rsp,
6986 				    fpkt->pkt_resp_acc,
6987 				    sizeof (struct fcp_rsp));
6988 			}
6989 		}
6990 
6991 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6992 			if (rval == DDI_SUCCESS) {
6993 				(void) fcp_call_finish_init(pptr, ptgt,
6994 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6995 				    icmd->ipkt_cause);
6996 				fcp_icmd_free(pptr, icmd);
6997 			} else {
6998 				fcp_retry_scsi_cmd(fpkt);
6999 			}
7000 
7001 			return;
7002 		}
7003 	} else {
7004 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7005 			mutex_enter(&ptgt->tgt_mutex);
7006 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7007 			mutex_exit(&ptgt->tgt_mutex);
7008 		}
7009 	}
7010 
7011 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7012 
7013 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
7014 
7015 	switch (icmd->ipkt_opcode) {
7016 	case SCMD_INQUIRY:
7017 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7018 		fcp_handle_inquiry(fpkt, icmd);
7019 		break;
7020 
7021 	case SCMD_REPORT_LUN:
7022 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7023 		    FCP_TGT_TRACE_22);
7024 		fcp_handle_reportlun(fpkt, icmd);
7025 		break;
7026 
7027 	case SCMD_INQUIRY_PAGE83:
7028 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7029 		(void) fcp_handle_page83(fpkt, icmd, 0);
7030 		break;
7031 
7032 	default:
7033 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7034 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7035 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7036 		fcp_icmd_free(pptr, icmd);
7037 		break;
7038 	}
7039 }
7040 
7041 
7042 static void
7043 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7044 {
7045 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7046 	    fpkt->pkt_ulp_private;
7047 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7048 	struct fcp_port	*pptr = ptgt->tgt_port;
7049 
7050 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7051 	    fcp_is_retryable(icmd)) {
7052 		mutex_enter(&pptr->port_mutex);
7053 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7054 			mutex_exit(&pptr->port_mutex);
7055 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7056 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7057 			    "Retrying %s to %x; state=%x, reason=%x",
7058 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7059 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7060 			    fpkt->pkt_state, fpkt->pkt_reason);
7061 
7062 			fcp_queue_ipkt(pptr, fpkt);
7063 		} else {
7064 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7065 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7066 			    "fcp_retry_scsi_cmd,1: state change occured"
7067 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7068 			mutex_exit(&pptr->port_mutex);
7069 			(void) fcp_call_finish_init(pptr, ptgt,
7070 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7071 			    icmd->ipkt_cause);
7072 			fcp_icmd_free(pptr, icmd);
7073 		}
7074 	} else {
7075 		fcp_print_error(fpkt);
7076 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7077 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7078 		fcp_icmd_free(pptr, icmd);
7079 	}
7080 }
7081 
7082 /*
7083  *     Function: fcp_handle_page83
7084  *
7085  *  Description: Treats the response to INQUIRY_PAGE83.
7086  *
7087  *     Argument: *fpkt	FC packet used to convey the command.
7088  *		 *icmd	Original fcp_ipkt structure.
7089  *		 ignore_page83_data
7090  *			if it's 1, that means it's a special devices's
7091  *			page83 response, it should be enumerated under mpxio
7092  *
7093  * Return Value: None
7094  */
7095 static void
7096 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7097     int ignore_page83_data)
7098 {
7099 	struct fcp_port	*pptr;
7100 	struct fcp_lun	*plun;
7101 	struct fcp_tgt	*ptgt;
7102 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7103 	int			fail = 0;
7104 	ddi_devid_t		devid;
7105 	char			*guid = NULL;
7106 	int			ret;
7107 
7108 	ASSERT(icmd != NULL && fpkt != NULL);
7109 
7110 	pptr = icmd->ipkt_port;
7111 	ptgt = icmd->ipkt_tgt;
7112 	plun = icmd->ipkt_lun;
7113 
7114 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7115 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7116 
7117 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7118 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7119 
7120 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7121 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7122 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7123 		    "dtype=0x%x, lun num=%x",
7124 		    pptr->port_instance, ptgt->tgt_d_id,
7125 		    dev_id_page[0], plun->lun_num);
7126 
7127 		ret = ddi_devid_scsi_encode(
7128 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7129 		    NULL,		/* driver name */
7130 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7131 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7132 		    NULL,		/* page 80 data */
7133 		    0,		/* page 80 len */
7134 		    dev_id_page,	/* page 83 data */
7135 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7136 		    &devid);
7137 
7138 		if (ret == DDI_SUCCESS) {
7139 
7140 			guid = ddi_devid_to_guid(devid);
7141 
7142 			if (guid) {
7143 				/*
7144 				 * Check our current guid.  If it's non null
7145 				 * and it has changed, we need to copy it into
7146 				 * lun_old_guid since we might still need it.
7147 				 */
7148 				if (plun->lun_guid &&
7149 				    strcmp(guid, plun->lun_guid)) {
7150 					unsigned int len;
7151 
7152 					/*
7153 					 * If the guid of the LUN changes,
7154 					 * reconfiguration should be triggered
7155 					 * to reflect the changes.
7156 					 * i.e. we should offline the LUN with
7157 					 * the old guid, and online the LUN with
7158 					 * the new guid.
7159 					 */
7160 					plun->lun_state |= FCP_LUN_CHANGED;
7161 
7162 					if (plun->lun_old_guid) {
7163 						kmem_free(plun->lun_old_guid,
7164 						    plun->lun_old_guid_size);
7165 					}
7166 
7167 					len = plun->lun_guid_size;
7168 					plun->lun_old_guid_size = len;
7169 
7170 					plun->lun_old_guid = kmem_zalloc(len,
7171 					    KM_NOSLEEP);
7172 
7173 					if (plun->lun_old_guid) {
7174 						/*
7175 						 * The alloc was successful then
7176 						 * let's do the copy.
7177 						 */
7178 						bcopy(plun->lun_guid,
7179 						    plun->lun_old_guid, len);
7180 					} else {
7181 						fail = 1;
7182 						plun->lun_old_guid_size = 0;
7183 					}
7184 				}
7185 				if (!fail) {
7186 					if (fcp_copy_guid_2_lun_block(
7187 					    plun, guid)) {
7188 						fail = 1;
7189 					}
7190 				}
7191 				ddi_devid_free_guid(guid);
7192 
7193 			} else {
7194 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7195 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7196 				    "fcp_handle_page83: unable to create "
7197 				    "GUID");
7198 
7199 				/* couldn't create good guid from devid */
7200 				fail = 1;
7201 			}
7202 			ddi_devid_free(devid);
7203 
7204 		} else if (ret == DDI_NOT_WELL_FORMED) {
7205 			/* NULL filled data for page 83 */
7206 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7207 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7208 			    "fcp_handle_page83: retry GUID");
7209 
7210 			icmd->ipkt_retries = 0;
7211 			fcp_retry_scsi_cmd(fpkt);
7212 			return;
7213 		} else {
7214 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7215 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7216 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7217 			    ret);
7218 			/*
7219 			 * Since the page83 validation
7220 			 * introduced late, we are being
7221 			 * tolerant to the existing devices
7222 			 * that already found to be working
7223 			 * under mpxio, like A5200's SES device,
7224 			 * its page83 response will not be standard-compliant,
7225 			 * but we still want it to be enumerated under mpxio.
7226 			 */
7227 			if (fcp_symmetric_device_probe(plun) != 0) {
7228 				fail = 1;
7229 			}
7230 		}
7231 
7232 	} else {
7233 		/* bad packet state */
7234 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7235 
7236 		/*
7237 		 * For some special devices (A5K SES and Daktari's SES devices),
7238 		 * they should be enumerated under mpxio
7239 		 * or "luxadm dis" will fail
7240 		 */
7241 		if (ignore_page83_data) {
7242 			fail = 0;
7243 		} else {
7244 			fail = 1;
7245 		}
7246 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7247 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7248 		    "!Devid page cmd failed. "
7249 		    "fpkt_state: %x fpkt_reason: %x",
7250 		    "ignore_page83: %d",
7251 		    fpkt->pkt_state, fpkt->pkt_reason,
7252 		    ignore_page83_data);
7253 	}
7254 
7255 	mutex_enter(&pptr->port_mutex);
7256 	mutex_enter(&plun->lun_mutex);
7257 	/*
7258 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7259 	 * mismatch between lun_cip and lun_mpxio.
7260 	 */
7261 	if (plun->lun_cip == NULL) {
7262 		/*
7263 		 * If we don't have a guid for this lun it's because we were
7264 		 * unable to glean one from the page 83 response.  Set the
7265 		 * control flag to 0 here to make sure that we don't attempt to
7266 		 * enumerate it under mpxio.
7267 		 */
7268 		if (fail || pptr->port_mpxio == 0) {
7269 			plun->lun_mpxio = 0;
7270 		} else {
7271 			plun->lun_mpxio = 1;
7272 		}
7273 	}
7274 	mutex_exit(&plun->lun_mutex);
7275 	mutex_exit(&pptr->port_mutex);
7276 
7277 	mutex_enter(&ptgt->tgt_mutex);
7278 	plun->lun_state &=
7279 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7280 	mutex_exit(&ptgt->tgt_mutex);
7281 
7282 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7283 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7284 
7285 	fcp_icmd_free(pptr, icmd);
7286 }
7287 
7288 /*
7289  *     Function: fcp_handle_inquiry
7290  *
7291  *  Description: Called by fcp_scsi_callback to handle the response to an
7292  *		 INQUIRY request.
7293  *
7294  *     Argument: *fpkt	FC packet used to convey the command.
7295  *		 *icmd	Original fcp_ipkt structure.
7296  *
7297  * Return Value: None
7298  */
7299 static void
7300 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7301 {
7302 	struct fcp_port	*pptr;
7303 	struct fcp_lun	*plun;
7304 	struct fcp_tgt	*ptgt;
7305 	uchar_t		dtype;
7306 	uchar_t		pqual;
7307 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7308 
7309 	ASSERT(icmd != NULL && fpkt != NULL);
7310 
7311 	pptr = icmd->ipkt_port;
7312 	ptgt = icmd->ipkt_tgt;
7313 	plun = icmd->ipkt_lun;
7314 
7315 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7316 	    sizeof (struct scsi_inquiry));
7317 
7318 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7319 	pqual = plun->lun_inq.inq_dtype >> 5;
7320 
7321 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7322 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7323 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7324 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7325 	    plun->lun_num, dtype, pqual);
7326 
7327 	if (pqual != 0) {
7328 		/*
7329 		 * Non-zero peripheral qualifier
7330 		 */
7331 		fcp_log(CE_CONT, pptr->port_dip,
7332 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7333 		    "Device type=0x%x Peripheral qual=0x%x\n",
7334 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7335 
7336 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7337 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7338 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7339 		    "Device type=0x%x Peripheral qual=0x%x\n",
7340 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7341 
7342 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7343 
7344 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7345 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7346 		fcp_icmd_free(pptr, icmd);
7347 		return;
7348 	}
7349 
7350 	/*
7351 	 * If the device is already initialized, check the dtype
7352 	 * for a change. If it has changed then update the flags
7353 	 * so the create_luns will offline the old device and
7354 	 * create the new device. Refer to bug: 4764752
7355 	 */
7356 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7357 		plun->lun_state |= FCP_LUN_CHANGED;
7358 	}
7359 	plun->lun_type = plun->lun_inq.inq_dtype;
7360 
7361 	/*
7362 	 * This code is setting/initializing the throttling in the FCA
7363 	 * driver.
7364 	 */
7365 	mutex_enter(&pptr->port_mutex);
7366 	if (!pptr->port_notify) {
7367 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7368 			uint32_t cmd = 0;
7369 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7370 			    ((cmd & 0xFFFFFF00 >> 8) |
7371 			    FCP_SVE_THROTTLE << 8));
7372 			pptr->port_notify = 1;
7373 			mutex_exit(&pptr->port_mutex);
7374 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7375 			mutex_enter(&pptr->port_mutex);
7376 		}
7377 	}
7378 
7379 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7380 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7381 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7382 		    "fcp_handle_inquiry,1:state change occured"
7383 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7384 		mutex_exit(&pptr->port_mutex);
7385 
7386 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7387 		(void) fcp_call_finish_init(pptr, ptgt,
7388 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7389 		    icmd->ipkt_cause);
7390 		fcp_icmd_free(pptr, icmd);
7391 		return;
7392 	}
7393 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7394 	mutex_exit(&pptr->port_mutex);
7395 
7396 	/* Retrieve the rscn count (if a valid one exists) */
7397 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7398 		rscn_count = ((fc_ulp_rscn_info_t *)
7399 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7400 	} else {
7401 		rscn_count = FC_INVALID_RSCN_COUNT;
7402 	}
7403 
7404 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7405 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7406 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7407 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7408 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7409 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7410 		(void) fcp_call_finish_init(pptr, ptgt,
7411 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7412 		    icmd->ipkt_cause);
7413 	}
7414 
7415 	/*
7416 	 * Read Inquiry VPD Page 0x83 to uniquely
7417 	 * identify this logical unit.
7418 	 */
7419 	fcp_icmd_free(pptr, icmd);
7420 }
7421 
7422 /*
7423  *     Function: fcp_handle_reportlun
7424  *
7425  *  Description: Called by fcp_scsi_callback to handle the response to a
7426  *		 REPORT_LUN request.
7427  *
7428  *     Argument: *fpkt	FC packet used to convey the command.
7429  *		 *icmd	Original fcp_ipkt structure.
7430  *
7431  * Return Value: None
7432  */
7433 static void
7434 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7435 {
7436 	int				i;
7437 	int				nluns_claimed;
7438 	int				nluns_bufmax;
7439 	int				len;
7440 	uint16_t			lun_num;
7441 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7442 	struct fcp_port			*pptr;
7443 	struct fcp_tgt			*ptgt;
7444 	struct fcp_lun			*plun;
7445 	struct fcp_reportlun_resp	*report_lun;
7446 
7447 	pptr = icmd->ipkt_port;
7448 	ptgt = icmd->ipkt_tgt;
7449 	len = fpkt->pkt_datalen;
7450 
7451 	if ((len < FCP_LUN_HEADER) ||
7452 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7453 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7454 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7455 		fcp_icmd_free(pptr, icmd);
7456 		return;
7457 	}
7458 
7459 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7460 	    fpkt->pkt_datalen);
7461 
7462 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7463 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7464 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7465 	    pptr->port_instance, ptgt->tgt_d_id);
7466 
7467 	/*
7468 	 * Get the number of luns (which is supplied as LUNS * 8) the
7469 	 * device claims it has.
7470 	 */
7471 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7472 
7473 	/*
7474 	 * Get the maximum number of luns the buffer submitted can hold.
7475 	 */
7476 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7477 
7478 	/*
7479 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7480 	 */
7481 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7482 		kmem_free(report_lun, len);
7483 
7484 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7485 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7486 		    ptgt->tgt_d_id);
7487 
7488 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7489 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7490 		fcp_icmd_free(pptr, icmd);
7491 		return;
7492 	}
7493 
7494 	/*
7495 	 * If there are more LUNs than we have allocated memory for,
7496 	 * allocate more space and send down yet another report lun if
7497 	 * the maximum number of attempts hasn't been reached.
7498 	 */
7499 	mutex_enter(&ptgt->tgt_mutex);
7500 
7501 	if ((nluns_claimed > nluns_bufmax) &&
7502 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7503 
7504 		struct fcp_lun *plun;
7505 
7506 		ptgt->tgt_report_lun_cnt++;
7507 		plun = ptgt->tgt_lun;
7508 		ASSERT(plun != NULL);
7509 		mutex_exit(&ptgt->tgt_mutex);
7510 
7511 		kmem_free(report_lun, len);
7512 
7513 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7514 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7515 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7516 		    nluns_claimed, ptgt->tgt_d_id);
7517 
7518 		/* Retrieve the rscn count (if a valid one exists) */
7519 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7520 			rscn_count = ((fc_ulp_rscn_info_t *)
7521 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7522 			    ulp_rscn_count;
7523 		} else {
7524 			rscn_count = FC_INVALID_RSCN_COUNT;
7525 		}
7526 
7527 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7528 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7529 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7530 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7531 			(void) fcp_call_finish_init(pptr, ptgt,
7532 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7533 			    icmd->ipkt_cause);
7534 		}
7535 
7536 		fcp_icmd_free(pptr, icmd);
7537 		return;
7538 	}
7539 
7540 	if (nluns_claimed > nluns_bufmax) {
7541 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7542 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7543 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7544 		    "	 Number of LUNs lost=%x",
7545 		    ptgt->tgt_port_wwn.raw_wwn[0],
7546 		    ptgt->tgt_port_wwn.raw_wwn[1],
7547 		    ptgt->tgt_port_wwn.raw_wwn[2],
7548 		    ptgt->tgt_port_wwn.raw_wwn[3],
7549 		    ptgt->tgt_port_wwn.raw_wwn[4],
7550 		    ptgt->tgt_port_wwn.raw_wwn[5],
7551 		    ptgt->tgt_port_wwn.raw_wwn[6],
7552 		    ptgt->tgt_port_wwn.raw_wwn[7],
7553 		    nluns_claimed - nluns_bufmax);
7554 
7555 		nluns_claimed = nluns_bufmax;
7556 	}
7557 	ptgt->tgt_lun_cnt = nluns_claimed;
7558 
7559 	/*
7560 	 * Identify missing LUNs and print warning messages
7561 	 */
7562 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7563 		int offline;
7564 		int exists = 0;
7565 
7566 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7567 
7568 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7569 			uchar_t		*lun_string;
7570 
7571 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7572 
7573 			switch (lun_string[0] & 0xC0) {
7574 			case FCP_LUN_ADDRESSING:
7575 			case FCP_PD_ADDRESSING:
7576 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7577 				    lun_string[1];
7578 				if (plun->lun_num == lun_num) {
7579 					exists++;
7580 					break;
7581 				}
7582 				break;
7583 
7584 			default:
7585 				break;
7586 			}
7587 		}
7588 
7589 		if (!exists && !offline) {
7590 			mutex_exit(&ptgt->tgt_mutex);
7591 
7592 			mutex_enter(&pptr->port_mutex);
7593 			mutex_enter(&ptgt->tgt_mutex);
7594 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7595 				/*
7596 				 * set disappear flag when device was connected
7597 				 */
7598 				if (!(plun->lun_state &
7599 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7600 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7601 				}
7602 				mutex_exit(&ptgt->tgt_mutex);
7603 				mutex_exit(&pptr->port_mutex);
7604 				if (!(plun->lun_state &
7605 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7606 					fcp_log(CE_NOTE, pptr->port_dip,
7607 					    "!Lun=%x for target=%x disappeared",
7608 					    plun->lun_num, ptgt->tgt_d_id);
7609 				}
7610 				mutex_enter(&ptgt->tgt_mutex);
7611 			} else {
7612 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7613 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7614 				    "fcp_handle_reportlun,1: state change"
7615 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7616 				mutex_exit(&ptgt->tgt_mutex);
7617 				mutex_exit(&pptr->port_mutex);
7618 				kmem_free(report_lun, len);
7619 				(void) fcp_call_finish_init(pptr, ptgt,
7620 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7621 				    icmd->ipkt_cause);
7622 				fcp_icmd_free(pptr, icmd);
7623 				return;
7624 			}
7625 		} else if (exists) {
7626 			/*
7627 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7628 			 * actually exists in REPORT_LUN response
7629 			 */
7630 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7631 				plun->lun_state &=
7632 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7633 			}
7634 			if (offline || plun->lun_num == 0) {
7635 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7636 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7637 					mutex_exit(&ptgt->tgt_mutex);
7638 					fcp_log(CE_NOTE, pptr->port_dip,
7639 					    "!Lun=%x for target=%x reappeared",
7640 					    plun->lun_num, ptgt->tgt_d_id);
7641 					mutex_enter(&ptgt->tgt_mutex);
7642 				}
7643 			}
7644 		}
7645 	}
7646 
7647 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7648 	mutex_exit(&ptgt->tgt_mutex);
7649 
7650 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7651 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7652 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7653 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7654 
7655 	/* scan each lun */
7656 	for (i = 0; i < nluns_claimed; i++) {
7657 		uchar_t	*lun_string;
7658 
7659 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7660 
7661 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7662 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7663 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7664 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7665 		    lun_string[0]);
7666 
7667 		switch (lun_string[0] & 0xC0) {
7668 		case FCP_LUN_ADDRESSING:
7669 		case FCP_PD_ADDRESSING:
7670 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7671 
7672 			/* We will skip masked LUNs because of the blacklist. */
7673 			if (fcp_lun_blacklist != NULL) {
7674 				mutex_enter(&ptgt->tgt_mutex);
7675 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7676 				    lun_num) == TRUE) {
7677 					ptgt->tgt_lun_cnt--;
7678 					mutex_exit(&ptgt->tgt_mutex);
7679 					break;
7680 				}
7681 				mutex_exit(&ptgt->tgt_mutex);
7682 			}
7683 
7684 			/* see if this LUN is already allocated */
7685 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7686 				plun = fcp_alloc_lun(ptgt);
7687 				if (plun == NULL) {
7688 					fcp_log(CE_NOTE, pptr->port_dip,
7689 					    "!Lun allocation failed"
7690 					    " target=%x lun=%x",
7691 					    ptgt->tgt_d_id, lun_num);
7692 					break;
7693 				}
7694 			}
7695 
7696 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7697 			/* convert to LUN */
7698 			plun->lun_addr.ent_addr_0 =
7699 			    BE_16(*(uint16_t *)&(lun_string[0]));
7700 			plun->lun_addr.ent_addr_1 =
7701 			    BE_16(*(uint16_t *)&(lun_string[2]));
7702 			plun->lun_addr.ent_addr_2 =
7703 			    BE_16(*(uint16_t *)&(lun_string[4]));
7704 			plun->lun_addr.ent_addr_3 =
7705 			    BE_16(*(uint16_t *)&(lun_string[6]));
7706 
7707 			plun->lun_num = lun_num;
7708 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7709 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7710 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7711 
7712 			/* Retrieve the rscn count (if a valid one exists) */
7713 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7714 				rscn_count = ((fc_ulp_rscn_info_t *)
7715 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7716 				    ulp_rscn_count;
7717 			} else {
7718 				rscn_count = FC_INVALID_RSCN_COUNT;
7719 			}
7720 
7721 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7722 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7723 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7724 				mutex_enter(&pptr->port_mutex);
7725 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7726 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7727 					fcp_log(CE_NOTE, pptr->port_dip,
7728 					    "!failed to send INQUIRY"
7729 					    " target=%x lun=%x",
7730 					    ptgt->tgt_d_id, plun->lun_num);
7731 				} else {
7732 					FCP_TRACE(fcp_logq,
7733 					    pptr->port_instbuf, fcp_trace,
7734 					    FCP_BUF_LEVEL_5, 0,
7735 					    "fcp_handle_reportlun,2: state"
7736 					    " change occured for D_ID=0x%x",
7737 					    ptgt->tgt_d_id);
7738 				}
7739 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7740 				mutex_exit(&pptr->port_mutex);
7741 			} else {
7742 				continue;
7743 			}
7744 			break;
7745 
7746 		case FCP_VOLUME_ADDRESSING:
7747 			/* FALLTHROUGH */
7748 		default:
7749 			fcp_log(CE_WARN, NULL,
7750 			    "!Unsupported LUN Addressing method %x "
7751 			    "in response to REPORT_LUN", lun_string[0]);
7752 			break;
7753 		}
7754 
7755 		/*
7756 		 * each time through this loop we should decrement
7757 		 * the tmp_cnt by one -- since we go through this loop
7758 		 * one time for each LUN, the tmp_cnt should never be <=0
7759 		 */
7760 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7761 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7762 	}
7763 
7764 	if (i == 0) {
7765 		fcp_log(CE_WARN, pptr->port_dip,
7766 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7767 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7768 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7769 	}
7770 
7771 	kmem_free(report_lun, len);
7772 	fcp_icmd_free(pptr, icmd);
7773 }
7774 
7775 
7776 /*
7777  * called internally to return a LUN given a target and a LUN number
7778  */
7779 static struct fcp_lun *
7780 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7781 {
7782 	struct fcp_lun	*plun;
7783 
7784 	mutex_enter(&ptgt->tgt_mutex);
7785 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7786 		if (plun->lun_num == lun_num) {
7787 			mutex_exit(&ptgt->tgt_mutex);
7788 			return (plun);
7789 		}
7790 	}
7791 	mutex_exit(&ptgt->tgt_mutex);
7792 
7793 	return (NULL);
7794 }
7795 
7796 
7797 /*
7798  * handle finishing one target for fcp_finish_init
7799  *
7800  * return true (non-zero) if we want finish_init to continue with the
7801  * next target
7802  *
7803  * called with the port mutex held
7804  */
7805 /*ARGSUSED*/
7806 static int
7807 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7808     int link_cnt, int tgt_cnt, int cause)
7809 {
7810 	int	rval = 1;
7811 	ASSERT(pptr != NULL);
7812 	ASSERT(ptgt != NULL);
7813 
7814 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7815 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7816 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7817 	    ptgt->tgt_state);
7818 
7819 	ASSERT(mutex_owned(&pptr->port_mutex));
7820 
7821 	if ((pptr->port_link_cnt != link_cnt) ||
7822 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7823 		/*
7824 		 * oh oh -- another link reset or target change
7825 		 * must have occurred while we are in here
7826 		 */
7827 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7828 
7829 		return (0);
7830 	} else {
7831 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7832 	}
7833 
7834 	mutex_enter(&ptgt->tgt_mutex);
7835 
7836 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7837 		/*
7838 		 * tgt is not offline -- is it marked (i.e. needs
7839 		 * to be offlined) ??
7840 		 */
7841 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7842 			/*
7843 			 * this target not offline *and*
7844 			 * marked
7845 			 */
7846 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7847 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7848 			    tgt_cnt, 0, 0);
7849 		} else {
7850 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7851 
7852 			/* create the LUNs */
7853 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7854 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7855 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7856 				    cause);
7857 				ptgt->tgt_device_created = 1;
7858 			} else {
7859 				fcp_update_tgt_state(ptgt, FCP_RESET,
7860 				    FCP_LUN_BUSY);
7861 			}
7862 		}
7863 	}
7864 
7865 	mutex_exit(&ptgt->tgt_mutex);
7866 
7867 	return (rval);
7868 }
7869 
7870 
7871 /*
7872  * this routine is called to finish port initialization
7873  *
7874  * Each port has a "temp" counter -- when a state change happens (e.g.
7875  * port online), the temp count is set to the number of devices in the map.
7876  * Then, as each device gets "discovered", the temp counter is decremented
7877  * by one.  When this count reaches zero we know that all of the devices
7878  * in the map have been discovered (or an error has occurred), so we can
7879  * then finish initialization -- which is done by this routine (well, this
7880  * and fcp-finish_tgt())
7881  *
7882  * acquires and releases the global mutex
7883  *
7884  * called with the port mutex owned
7885  */
7886 static void
7887 fcp_finish_init(struct fcp_port *pptr)
7888 {
7889 #ifdef	DEBUG
7890 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7891 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7892 	    FCP_STACK_DEPTH);
7893 #endif /* DEBUG */
7894 
7895 	ASSERT(mutex_owned(&pptr->port_mutex));
7896 
7897 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7898 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7899 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7900 
7901 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7902 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7903 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7904 		pptr->port_state &= ~FCP_STATE_ONLINING;
7905 		pptr->port_state |= FCP_STATE_ONLINE;
7906 	}
7907 
7908 	/* Wake up threads waiting on config done */
7909 	cv_broadcast(&pptr->port_config_cv);
7910 }
7911 
7912 
7913 /*
7914  * called from fcp_finish_init to create the LUNs for a target
7915  *
7916  * called with the port mutex owned
7917  */
7918 static void
7919 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7920 {
7921 	struct fcp_lun	*plun;
7922 	struct fcp_port	*pptr;
7923 	child_info_t		*cip = NULL;
7924 
7925 	ASSERT(ptgt != NULL);
7926 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7927 
7928 	pptr = ptgt->tgt_port;
7929 
7930 	ASSERT(pptr != NULL);
7931 
7932 	/* scan all LUNs for this target */
7933 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7934 		if (plun->lun_state & FCP_LUN_OFFLINE) {
7935 			continue;
7936 		}
7937 
7938 		if (plun->lun_state & FCP_LUN_MARK) {
7939 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7940 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7941 			    "fcp_create_luns: offlining marked LUN!");
7942 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
7943 			continue;
7944 		}
7945 
7946 		plun->lun_state &= ~FCP_LUN_BUSY;
7947 
7948 		/*
7949 		 * There are conditions in which FCP_LUN_INIT flag is cleared
7950 		 * but we have a valid plun->lun_cip. To cover this case also
7951 		 * CLEAR_BUSY whenever we have a valid lun_cip.
7952 		 */
7953 		if (plun->lun_mpxio && plun->lun_cip &&
7954 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
7955 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
7956 		    0, 0))) {
7957 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7958 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7959 			    "fcp_create_luns: enable lun %p failed!",
7960 			    plun);
7961 		}
7962 
7963 		if (plun->lun_state & FCP_LUN_INIT &&
7964 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
7965 			continue;
7966 		}
7967 
7968 		if (cause == FCP_CAUSE_USER_CREATE) {
7969 			continue;
7970 		}
7971 
7972 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7973 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
7974 		    "create_luns: passing ONLINE elem to HP thread");
7975 
7976 		/*
7977 		 * If lun has changed, prepare for offlining the old path.
7978 		 * Do not offline the old path right now, since it may be
7979 		 * still opened.
7980 		 */
7981 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
7982 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
7983 		}
7984 
7985 		/* pass an ONLINE element to the hotplug thread */
7986 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7987 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
7988 
7989 			/*
7990 			 * We can not synchronous attach (i.e pass
7991 			 * NDI_ONLINE_ATTACH) here as we might be
7992 			 * coming from an interrupt or callback
7993 			 * thread.
7994 			 */
7995 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7996 			    link_cnt, tgt_cnt, 0, 0)) {
7997 				fcp_log(CE_CONT, pptr->port_dip,
7998 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
7999 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8000 			}
8001 		}
8002 	}
8003 }
8004 
8005 
8006 /*
8007  * function to online/offline devices
8008  */
8009 static int
8010 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8011     int online, int lcount, int tcount, int flags)
8012 {
8013 	int			rval = NDI_FAILURE;
8014 	int			circ;
8015 	child_info_t		*ccip;
8016 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8017 	int			is_mpxio = pptr->port_mpxio;
8018 	dev_info_t		*cdip, *pdip;
8019 	char			*devname;
8020 
8021 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8022 		/*
8023 		 * When this event gets serviced, lun_cip and lun_mpxio
8024 		 * has changed, so it should be invalidated now.
8025 		 */
8026 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8027 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8028 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8029 		return (rval);
8030 	}
8031 
8032 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8033 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8034 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8035 	    "flags=%x mpxio=%x\n",
8036 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8037 	    plun->lun_mpxio);
8038 
8039 	/*
8040 	 * lun_mpxio needs checking here because we can end up in a race
8041 	 * condition where this task has been dispatched while lun_mpxio is
8042 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8043 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8044 	 * the flag. We rely on the serialization of the tasks here. We return
8045 	 * NDI_SUCCESS so any callers continue without reporting spurious
8046 	 * errors, and the still think we're an MPXIO LUN.
8047 	 */
8048 
8049 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8050 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8051 		if (plun->lun_mpxio) {
8052 			rval = fcp_update_mpxio_path(plun, cip, online);
8053 		} else {
8054 			rval = NDI_SUCCESS;
8055 		}
8056 		return (rval);
8057 	}
8058 
8059 	/*
8060 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8061 	 * executing devfs_clean() if parent lock is held.
8062 	 */
8063 	ASSERT(!servicing_interrupt());
8064 	if (online == FCP_OFFLINE) {
8065 		if (plun->lun_mpxio == 0) {
8066 			if (plun->lun_cip == cip) {
8067 				cdip = DIP(plun->lun_cip);
8068 			} else {
8069 				cdip = DIP(cip);
8070 			}
8071 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8072 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8073 		} else if ((plun->lun_cip != cip) && cip) {
8074 			/*
8075 			 * This means a DTYPE/GUID change, we shall get the
8076 			 * dip of the old cip instead of the current lun_cip.
8077 			 */
8078 			cdip = mdi_pi_get_client(PIP(cip));
8079 		}
8080 		if (cdip) {
8081 			if (i_ddi_devi_attached(cdip)) {
8082 				pdip = ddi_get_parent(cdip);
8083 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8084 				ndi_devi_enter(pdip, &circ);
8085 				(void) ddi_deviname(cdip, devname);
8086 				ndi_devi_exit(pdip, circ);
8087 				/*
8088 				 * Release parent lock before calling
8089 				 * devfs_clean().
8090 				 */
8091 				rval = devfs_clean(pdip, devname + 1,
8092 				    DV_CLEAN_FORCE);
8093 				kmem_free(devname, MAXNAMELEN + 1);
8094 				/*
8095 				 * Return if devfs_clean() fails for
8096 				 * non-MPXIO case.
8097 				 * For MPXIO case, another path could be
8098 				 * offlined.
8099 				 */
8100 				if (rval && plun->lun_mpxio == 0) {
8101 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8102 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8103 					    "fcp_trigger_lun: devfs_clean "
8104 					    "failed rval=%x  dip=%p",
8105 					    rval, pdip);
8106 					return (NDI_FAILURE);
8107 				}
8108 			}
8109 		}
8110 	}
8111 
8112 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8113 		return (NDI_FAILURE);
8114 	}
8115 
8116 	if (is_mpxio) {
8117 		mdi_devi_enter(pptr->port_dip, &circ);
8118 	} else {
8119 		ndi_devi_enter(pptr->port_dip, &circ);
8120 	}
8121 
8122 	mutex_enter(&pptr->port_mutex);
8123 	mutex_enter(&plun->lun_mutex);
8124 
8125 	if (online == FCP_ONLINE) {
8126 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8127 		if (ccip == NULL) {
8128 			goto fail;
8129 		}
8130 	} else {
8131 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8132 			goto fail;
8133 		}
8134 		ccip = cip;
8135 	}
8136 
8137 	if (online == FCP_ONLINE) {
8138 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8139 		    &circ);
8140 		fc_ulp_log_device_event(pptr->port_fp_handle,
8141 		    FC_ULP_DEVICE_ONLINE);
8142 	} else {
8143 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8144 		    &circ);
8145 		fc_ulp_log_device_event(pptr->port_fp_handle,
8146 		    FC_ULP_DEVICE_OFFLINE);
8147 	}
8148 
8149 fail:	mutex_exit(&plun->lun_mutex);
8150 	mutex_exit(&pptr->port_mutex);
8151 
8152 	if (is_mpxio) {
8153 		mdi_devi_exit(pptr->port_dip, circ);
8154 	} else {
8155 		ndi_devi_exit(pptr->port_dip, circ);
8156 	}
8157 
8158 	fc_ulp_idle_port(pptr->port_fp_handle);
8159 
8160 	return (rval);
8161 }
8162 
8163 
8164 /*
8165  * take a target offline by taking all of its LUNs offline
8166  */
8167 /*ARGSUSED*/
8168 static int
8169 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8170     int link_cnt, int tgt_cnt, int nowait, int flags)
8171 {
8172 	struct fcp_tgt_elem	*elem;
8173 
8174 	ASSERT(mutex_owned(&pptr->port_mutex));
8175 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8176 
8177 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8178 
8179 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8180 	    ptgt->tgt_change_cnt)) {
8181 		mutex_exit(&ptgt->tgt_mutex);
8182 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8183 		mutex_enter(&ptgt->tgt_mutex);
8184 
8185 		return (0);
8186 	}
8187 
8188 	ptgt->tgt_pd_handle = NULL;
8189 	mutex_exit(&ptgt->tgt_mutex);
8190 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8191 	mutex_enter(&ptgt->tgt_mutex);
8192 
8193 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8194 
8195 	if (ptgt->tgt_tcap &&
8196 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8197 		elem->flags = flags;
8198 		elem->time = fcp_watchdog_time;
8199 		if (nowait == 0) {
8200 			elem->time += fcp_offline_delay;
8201 		}
8202 		elem->ptgt = ptgt;
8203 		elem->link_cnt = link_cnt;
8204 		elem->tgt_cnt = tgt_cnt;
8205 		elem->next = pptr->port_offline_tgts;
8206 		pptr->port_offline_tgts = elem;
8207 	} else {
8208 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8209 	}
8210 
8211 	return (1);
8212 }
8213 
8214 
8215 static void
8216 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8217     int link_cnt, int tgt_cnt, int flags)
8218 {
8219 	ASSERT(mutex_owned(&pptr->port_mutex));
8220 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8221 
8222 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8223 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8224 	ptgt->tgt_pd_handle = NULL;
8225 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8226 }
8227 
8228 
8229 static void
8230 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8231     int flags)
8232 {
8233 	struct	fcp_lun	*plun;
8234 
8235 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8236 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8237 
8238 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8239 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8240 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8241 		}
8242 	}
8243 }
8244 
8245 
8246 /*
8247  * take a LUN offline
8248  *
8249  * enters and leaves with the target mutex held, releasing it in the process
8250  *
8251  * allocates memory in non-sleep mode
8252  */
8253 static void
8254 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8255     int nowait, int flags)
8256 {
8257 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8258 	struct fcp_lun_elem	*elem;
8259 
8260 	ASSERT(plun != NULL);
8261 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8262 
8263 	if (nowait) {
8264 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8265 		return;
8266 	}
8267 
8268 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8269 		elem->flags = flags;
8270 		elem->time = fcp_watchdog_time;
8271 		if (nowait == 0) {
8272 			elem->time += fcp_offline_delay;
8273 		}
8274 		elem->plun = plun;
8275 		elem->link_cnt = link_cnt;
8276 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8277 		elem->next = pptr->port_offline_luns;
8278 		pptr->port_offline_luns = elem;
8279 	} else {
8280 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8281 	}
8282 }
8283 
8284 
8285 static void
8286 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8287 {
8288 	struct fcp_pkt	*head = NULL;
8289 
8290 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8291 
8292 	mutex_exit(&LUN_TGT->tgt_mutex);
8293 
8294 	head = fcp_scan_commands(plun);
8295 	if (head != NULL) {
8296 		fcp_abort_commands(head, LUN_PORT);
8297 	}
8298 
8299 	mutex_enter(&LUN_TGT->tgt_mutex);
8300 
8301 	if (plun->lun_cip && plun->lun_mpxio) {
8302 		/*
8303 		 * Intimate MPxIO lun busy is cleared
8304 		 */
8305 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8306 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8307 		    0, 0)) {
8308 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8309 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8310 			    LUN_TGT->tgt_d_id, plun->lun_num);
8311 		}
8312 		/*
8313 		 * Intimate MPxIO that the lun is now marked for offline
8314 		 */
8315 		mutex_exit(&LUN_TGT->tgt_mutex);
8316 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8317 		mutex_enter(&LUN_TGT->tgt_mutex);
8318 	}
8319 }
8320 
8321 static void
8322 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8323     int flags)
8324 {
8325 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8326 
8327 	mutex_exit(&LUN_TGT->tgt_mutex);
8328 	fcp_update_offline_flags(plun);
8329 	mutex_enter(&LUN_TGT->tgt_mutex);
8330 
8331 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8332 
8333 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8334 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8335 	    "offline_lun: passing OFFLINE elem to HP thread");
8336 
8337 	if (plun->lun_cip) {
8338 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8339 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8340 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8341 		    LUN_TGT->tgt_trace);
8342 
8343 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8344 		    link_cnt, tgt_cnt, flags, 0)) {
8345 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8346 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8347 			    LUN_TGT->tgt_d_id, plun->lun_num);
8348 		}
8349 	}
8350 }
8351 
8352 static void
8353 fcp_scan_offline_luns(struct fcp_port *pptr)
8354 {
8355 	struct fcp_lun_elem	*elem;
8356 	struct fcp_lun_elem	*prev;
8357 	struct fcp_lun_elem	*next;
8358 
8359 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8360 
8361 	prev = NULL;
8362 	elem = pptr->port_offline_luns;
8363 	while (elem) {
8364 		next = elem->next;
8365 		if (elem->time <= fcp_watchdog_time) {
8366 			int			changed = 1;
8367 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8368 
8369 			mutex_enter(&ptgt->tgt_mutex);
8370 			if (pptr->port_link_cnt == elem->link_cnt &&
8371 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8372 				changed = 0;
8373 			}
8374 
8375 			if (!changed &&
8376 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8377 				fcp_offline_lun_now(elem->plun,
8378 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8379 			}
8380 			mutex_exit(&ptgt->tgt_mutex);
8381 
8382 			kmem_free(elem, sizeof (*elem));
8383 
8384 			if (prev) {
8385 				prev->next = next;
8386 			} else {
8387 				pptr->port_offline_luns = next;
8388 			}
8389 		} else {
8390 			prev = elem;
8391 		}
8392 		elem = next;
8393 	}
8394 }
8395 
8396 
8397 static void
8398 fcp_scan_offline_tgts(struct fcp_port *pptr)
8399 {
8400 	struct fcp_tgt_elem	*elem;
8401 	struct fcp_tgt_elem	*prev;
8402 	struct fcp_tgt_elem	*next;
8403 
8404 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8405 
8406 	prev = NULL;
8407 	elem = pptr->port_offline_tgts;
8408 	while (elem) {
8409 		next = elem->next;
8410 		if (elem->time <= fcp_watchdog_time) {
8411 			int			changed = 1;
8412 			struct fcp_tgt	*ptgt = elem->ptgt;
8413 
8414 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8415 				changed = 0;
8416 			}
8417 
8418 			mutex_enter(&ptgt->tgt_mutex);
8419 			if (!changed && !(ptgt->tgt_state &
8420 			    FCP_TGT_OFFLINE)) {
8421 				fcp_offline_target_now(pptr,
8422 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8423 				    elem->flags);
8424 			}
8425 			mutex_exit(&ptgt->tgt_mutex);
8426 
8427 			kmem_free(elem, sizeof (*elem));
8428 
8429 			if (prev) {
8430 				prev->next = next;
8431 			} else {
8432 				pptr->port_offline_tgts = next;
8433 			}
8434 		} else {
8435 			prev = elem;
8436 		}
8437 		elem = next;
8438 	}
8439 }
8440 
8441 
8442 static void
8443 fcp_update_offline_flags(struct fcp_lun *plun)
8444 {
8445 	struct fcp_port	*pptr = LUN_PORT;
8446 	ASSERT(plun != NULL);
8447 
8448 	mutex_enter(&LUN_TGT->tgt_mutex);
8449 	plun->lun_state |= FCP_LUN_OFFLINE;
8450 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8451 
8452 	mutex_enter(&plun->lun_mutex);
8453 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8454 		dev_info_t *cdip = NULL;
8455 
8456 		mutex_exit(&LUN_TGT->tgt_mutex);
8457 
8458 		if (plun->lun_mpxio == 0) {
8459 			cdip = DIP(plun->lun_cip);
8460 		} else if (plun->lun_cip) {
8461 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8462 		}
8463 
8464 		mutex_exit(&plun->lun_mutex);
8465 		if (cdip) {
8466 			(void) ndi_event_retrieve_cookie(
8467 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8468 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8469 			(void) ndi_event_run_callbacks(
8470 			    pptr->port_ndi_event_hdl, cdip,
8471 			    fcp_remove_eid, NULL);
8472 		}
8473 	} else {
8474 		mutex_exit(&plun->lun_mutex);
8475 		mutex_exit(&LUN_TGT->tgt_mutex);
8476 	}
8477 }
8478 
8479 
8480 /*
8481  * Scan all of the command pkts for this port, moving pkts that
8482  * match our LUN onto our own list (headed by "head")
8483  */
8484 static struct fcp_pkt *
8485 fcp_scan_commands(struct fcp_lun *plun)
8486 {
8487 	struct fcp_port	*pptr = LUN_PORT;
8488 
8489 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8490 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8491 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8492 
8493 	struct fcp_pkt	*head = NULL;	/* head of our list */
8494 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8495 
8496 	int			cmds_found = 0;
8497 
8498 	mutex_enter(&pptr->port_pkt_mutex);
8499 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8500 		struct fcp_lun *tlun =
8501 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8502 
8503 		ncmd = cmd->cmd_next;	/* set next command */
8504 
8505 		/*
8506 		 * if this pkt is for a different LUN  or the
8507 		 * command is sent down, skip it.
8508 		 */
8509 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8510 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8511 			pcmd = cmd;
8512 			continue;
8513 		}
8514 		cmds_found++;
8515 		if (pcmd != NULL) {
8516 			ASSERT(pptr->port_pkt_head != cmd);
8517 			pcmd->cmd_next = cmd->cmd_next;
8518 		} else {
8519 			ASSERT(cmd == pptr->port_pkt_head);
8520 			pptr->port_pkt_head = cmd->cmd_next;
8521 		}
8522 
8523 		if (cmd == pptr->port_pkt_tail) {
8524 			pptr->port_pkt_tail = pcmd;
8525 			if (pcmd) {
8526 				pcmd->cmd_next = NULL;
8527 			}
8528 		}
8529 
8530 		if (head == NULL) {
8531 			head = tail = cmd;
8532 		} else {
8533 			ASSERT(tail != NULL);
8534 
8535 			tail->cmd_next = cmd;
8536 			tail = cmd;
8537 		}
8538 		cmd->cmd_next = NULL;
8539 	}
8540 	mutex_exit(&pptr->port_pkt_mutex);
8541 
8542 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8543 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8544 	    "scan commands: %d cmd(s) found", cmds_found);
8545 
8546 	return (head);
8547 }
8548 
8549 
8550 /*
8551  * Abort all the commands in the command queue
8552  */
8553 static void
8554 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8555 {
8556 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8557 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8558 
8559 	ASSERT(mutex_owned(&pptr->port_mutex));
8560 
8561 	/* scan through the pkts and invalid them */
8562 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8563 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8564 
8565 		ncmd = cmd->cmd_next;
8566 		ASSERT(pkt != NULL);
8567 
8568 		/*
8569 		 * The lun is going to be marked offline. Indicate
8570 		 * the target driver not to requeue or retry this command
8571 		 * as the device is going to be offlined pretty soon.
8572 		 */
8573 		pkt->pkt_reason = CMD_DEV_GONE;
8574 		pkt->pkt_statistics = 0;
8575 		pkt->pkt_state = 0;
8576 
8577 		/* reset cmd flags/state */
8578 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8579 		cmd->cmd_state = FCP_PKT_IDLE;
8580 
8581 		/*
8582 		 * ensure we have a packet completion routine,
8583 		 * then call it.
8584 		 */
8585 		ASSERT(pkt->pkt_comp != NULL);
8586 
8587 		mutex_exit(&pptr->port_mutex);
8588 		fcp_post_callback(cmd);
8589 		mutex_enter(&pptr->port_mutex);
8590 	}
8591 }
8592 
8593 
8594 /*
8595  * the pkt_comp callback for command packets
8596  */
8597 static void
8598 fcp_cmd_callback(fc_packet_t *fpkt)
8599 {
8600 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8601 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8602 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8603 
8604 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8605 
8606 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8607 		cmn_err(CE_PANIC, "Packet already completed %p",
8608 		    (void *)cmd);
8609 	}
8610 
8611 	/*
8612 	 * Watch thread should be freeing the packet, ignore the pkt.
8613 	 */
8614 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8615 		fcp_log(CE_CONT, pptr->port_dip,
8616 		    "!FCP: Pkt completed while aborting\n");
8617 		return;
8618 	}
8619 	cmd->cmd_state = FCP_PKT_IDLE;
8620 
8621 	fcp_complete_pkt(fpkt);
8622 
8623 #ifdef	DEBUG
8624 	mutex_enter(&pptr->port_pkt_mutex);
8625 	pptr->port_npkts--;
8626 	mutex_exit(&pptr->port_pkt_mutex);
8627 #endif /* DEBUG */
8628 
8629 	fcp_post_callback(cmd);
8630 }
8631 
8632 
8633 static void
8634 fcp_complete_pkt(fc_packet_t *fpkt)
8635 {
8636 	int			error = 0;
8637 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8638 	    fpkt->pkt_ulp_private;
8639 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8640 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8641 	struct fcp_lun	*plun;
8642 	struct fcp_tgt	*ptgt;
8643 	struct fcp_rsp		*rsp;
8644 	struct scsi_address	save;
8645 
8646 #ifdef	DEBUG
8647 	save = pkt->pkt_address;
8648 #endif /* DEBUG */
8649 
8650 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8651 
8652 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8653 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8654 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8655 			    sizeof (struct fcp_rsp));
8656 		}
8657 
8658 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8659 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8660 
8661 		pkt->pkt_resid = 0;
8662 
8663 		if (cmd->cmd_pkt->pkt_numcookies) {
8664 			pkt->pkt_state |= STATE_XFERRED_DATA;
8665 			if (fpkt->pkt_data_resid) {
8666 				error++;
8667 			}
8668 		}
8669 
8670 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8671 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8672 			/*
8673 			 * The next two checks make sure that if there
8674 			 * is no sense data or a valid response and
8675 			 * the command came back with check condition,
8676 			 * the command should be retried.
8677 			 */
8678 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8679 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8680 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8681 				pkt->pkt_resid = cmd->cmd_dmacount;
8682 			}
8683 		}
8684 
8685 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8686 			return;
8687 		}
8688 
8689 		plun = ADDR2LUN(&pkt->pkt_address);
8690 		ptgt = plun->lun_tgt;
8691 		ASSERT(ptgt != NULL);
8692 
8693 		/*
8694 		 * Update the transfer resid, if appropriate
8695 		 */
8696 		if (rsp->fcp_u.fcp_status.resid_over ||
8697 		    rsp->fcp_u.fcp_status.resid_under) {
8698 			pkt->pkt_resid = rsp->fcp_resid;
8699 		}
8700 
8701 		/*
8702 		 * First see if we got a FCP protocol error.
8703 		 */
8704 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8705 			struct fcp_rsp_info	*bep;
8706 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8707 			    sizeof (struct fcp_rsp));
8708 
8709 			if (fcp_validate_fcp_response(rsp, pptr) !=
8710 			    FC_SUCCESS) {
8711 				pkt->pkt_reason = CMD_CMPLT;
8712 				*(pkt->pkt_scbp) = STATUS_CHECK;
8713 
8714 				fcp_log(CE_WARN, pptr->port_dip,
8715 				    "!SCSI command to d_id=0x%x lun=0x%x"
8716 				    " failed, Bad FCP response values:"
8717 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8718 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8719 				    ptgt->tgt_d_id, plun->lun_num,
8720 				    rsp->reserved_0, rsp->reserved_1,
8721 				    rsp->fcp_u.fcp_status.reserved_0,
8722 				    rsp->fcp_u.fcp_status.reserved_1,
8723 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8724 
8725 				return;
8726 			}
8727 
8728 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8729 				FCP_CP_IN(fpkt->pkt_resp +
8730 				    sizeof (struct fcp_rsp), bep,
8731 				    fpkt->pkt_resp_acc,
8732 				    sizeof (struct fcp_rsp_info));
8733 			}
8734 
8735 			if (bep->rsp_code != FCP_NO_FAILURE) {
8736 				child_info_t	*cip;
8737 
8738 				pkt->pkt_reason = CMD_TRAN_ERR;
8739 
8740 				mutex_enter(&plun->lun_mutex);
8741 				cip = plun->lun_cip;
8742 				mutex_exit(&plun->lun_mutex);
8743 
8744 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8745 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8746 				    "FCP response error on cmd=%p"
8747 				    " target=0x%x, cip=%p", cmd,
8748 				    ptgt->tgt_d_id, cip);
8749 			}
8750 		}
8751 
8752 		/*
8753 		 * See if we got a SCSI error with sense data
8754 		 */
8755 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8756 			uchar_t				rqlen;
8757 			caddr_t				sense_from;
8758 			child_info_t			*cip;
8759 			timeout_id_t			tid;
8760 			struct scsi_arq_status		*arq;
8761 			struct scsi_extended_sense	*sense_to;
8762 
8763 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8764 			sense_to = &arq->sts_sensedata;
8765 
8766 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8767 			    sizeof (struct scsi_extended_sense));
8768 
8769 			sense_from = (caddr_t)fpkt->pkt_resp +
8770 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8771 
8772 			if (fcp_validate_fcp_response(rsp, pptr) !=
8773 			    FC_SUCCESS) {
8774 				pkt->pkt_reason = CMD_CMPLT;
8775 				*(pkt->pkt_scbp) = STATUS_CHECK;
8776 
8777 				fcp_log(CE_WARN, pptr->port_dip,
8778 				    "!SCSI command to d_id=0x%x lun=0x%x"
8779 				    " failed, Bad FCP response values:"
8780 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8781 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8782 				    ptgt->tgt_d_id, plun->lun_num,
8783 				    rsp->reserved_0, rsp->reserved_1,
8784 				    rsp->fcp_u.fcp_status.reserved_0,
8785 				    rsp->fcp_u.fcp_status.reserved_1,
8786 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8787 
8788 				return;
8789 			}
8790 
8791 			/*
8792 			 * copy in sense information
8793 			 */
8794 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8795 				FCP_CP_IN(sense_from, sense_to,
8796 				    fpkt->pkt_resp_acc, rqlen);
8797 			} else {
8798 				bcopy(sense_from, sense_to, rqlen);
8799 			}
8800 
8801 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8802 			    (FCP_SENSE_NO_LUN(sense_to))) {
8803 				mutex_enter(&ptgt->tgt_mutex);
8804 				if (ptgt->tgt_tid == NULL) {
8805 					/*
8806 					 * Kick off rediscovery
8807 					 */
8808 					tid = timeout(fcp_reconfigure_luns,
8809 					    (caddr_t)ptgt, drv_usectohz(1));
8810 
8811 					ptgt->tgt_tid = tid;
8812 					ptgt->tgt_state |= FCP_TGT_BUSY;
8813 				}
8814 				mutex_exit(&ptgt->tgt_mutex);
8815 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8816 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8817 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8818 					    "!FCP: Report Lun Has Changed"
8819 					    " target=%x", ptgt->tgt_d_id);
8820 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8821 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8822 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8823 					    "!FCP: LU Not Supported"
8824 					    " target=%x", ptgt->tgt_d_id);
8825 				}
8826 			}
8827 			ASSERT(pkt->pkt_scbp != NULL);
8828 
8829 			pkt->pkt_state |= STATE_ARQ_DONE;
8830 
8831 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8832 
8833 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8834 			arq->sts_rqpkt_reason = 0;
8835 			arq->sts_rqpkt_statistics = 0;
8836 
8837 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8838 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8839 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8840 			    STATE_XFERRED_DATA;
8841 
8842 			mutex_enter(&plun->lun_mutex);
8843 			cip = plun->lun_cip;
8844 			mutex_exit(&plun->lun_mutex);
8845 
8846 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8847 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8848 			    "SCSI Check condition on cmd=%p target=0x%x"
8849 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8850 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8851 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8852 			    rsp->fcp_u.fcp_status.scsi_status,
8853 			    sense_to->es_key, sense_to->es_add_code,
8854 			    sense_to->es_qual_code);
8855 		}
8856 	} else {
8857 		plun = ADDR2LUN(&pkt->pkt_address);
8858 		ptgt = plun->lun_tgt;
8859 		ASSERT(ptgt != NULL);
8860 
8861 		/*
8862 		 * Work harder to translate errors into target driver
8863 		 * understandable ones. Note with despair that the target
8864 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8865 		 * They resort to using the big hammer most often, which
8866 		 * may not get fixed in the life time of this driver.
8867 		 */
8868 		pkt->pkt_state = 0;
8869 		pkt->pkt_statistics = 0;
8870 
8871 		switch (fpkt->pkt_state) {
8872 		case FC_PKT_TRAN_ERROR:
8873 			switch (fpkt->pkt_reason) {
8874 			case FC_REASON_OVERRUN:
8875 				pkt->pkt_reason = CMD_CMD_OVR;
8876 				pkt->pkt_statistics |= STAT_ABORTED;
8877 				break;
8878 
8879 			case FC_REASON_XCHG_BSY: {
8880 				caddr_t ptr;
8881 
8882 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8883 
8884 				ptr = (caddr_t)pkt->pkt_scbp;
8885 				if (ptr) {
8886 					*ptr = STATUS_BUSY;
8887 				}
8888 				break;
8889 			}
8890 
8891 			case FC_REASON_ABORTED:
8892 				pkt->pkt_reason = CMD_TRAN_ERR;
8893 				pkt->pkt_statistics |= STAT_ABORTED;
8894 				break;
8895 
8896 			case FC_REASON_ABORT_FAILED:
8897 				pkt->pkt_reason = CMD_ABORT_FAIL;
8898 				break;
8899 
8900 			case FC_REASON_NO_SEQ_INIT:
8901 			case FC_REASON_CRC_ERROR:
8902 				pkt->pkt_reason = CMD_TRAN_ERR;
8903 				pkt->pkt_statistics |= STAT_ABORTED;
8904 				break;
8905 			default:
8906 				pkt->pkt_reason = CMD_TRAN_ERR;
8907 				break;
8908 			}
8909 			break;
8910 
8911 		case FC_PKT_PORT_OFFLINE: {
8912 			dev_info_t	*cdip = NULL;
8913 			caddr_t		ptr;
8914 
8915 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8916 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8917 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8918 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8919 				    ptgt->tgt_d_id);
8920 			}
8921 
8922 			mutex_enter(&plun->lun_mutex);
8923 			if (plun->lun_mpxio == 0) {
8924 				cdip = DIP(plun->lun_cip);
8925 			} else if (plun->lun_cip) {
8926 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8927 			}
8928 
8929 			mutex_exit(&plun->lun_mutex);
8930 
8931 			if (cdip) {
8932 				(void) ndi_event_retrieve_cookie(
8933 				    pptr->port_ndi_event_hdl, cdip,
8934 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
8935 				    NDI_EVENT_NOPASS);
8936 				(void) ndi_event_run_callbacks(
8937 				    pptr->port_ndi_event_hdl, cdip,
8938 				    fcp_remove_eid, NULL);
8939 			}
8940 
8941 			/*
8942 			 * If the link goes off-line for a lip,
8943 			 * this will cause a error to the ST SG
8944 			 * SGEN drivers. By setting BUSY we will
8945 			 * give the drivers the chance to retry
8946 			 * before it blows of the job. ST will
8947 			 * remember how many times it has retried.
8948 			 */
8949 
8950 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
8951 			    (plun->lun_type == DTYPE_CHANGER)) {
8952 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8953 				ptr = (caddr_t)pkt->pkt_scbp;
8954 				if (ptr) {
8955 					*ptr = STATUS_BUSY;
8956 				}
8957 			} else {
8958 				pkt->pkt_reason = CMD_TRAN_ERR;
8959 				pkt->pkt_statistics |= STAT_BUS_RESET;
8960 			}
8961 			break;
8962 		}
8963 
8964 		case FC_PKT_TRAN_BSY:
8965 			/*
8966 			 * Use the ssd Qfull handling here.
8967 			 */
8968 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
8969 			pkt->pkt_state = STATE_GOT_BUS;
8970 			break;
8971 
8972 		case FC_PKT_TIMEOUT:
8973 			pkt->pkt_reason = CMD_TIMEOUT;
8974 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
8975 				pkt->pkt_statistics |= STAT_TIMEOUT;
8976 			} else {
8977 				pkt->pkt_statistics |= STAT_ABORTED;
8978 			}
8979 			break;
8980 
8981 		case FC_PKT_LOCAL_RJT:
8982 			switch (fpkt->pkt_reason) {
8983 			case FC_REASON_OFFLINE: {
8984 				dev_info_t	*cdip = NULL;
8985 
8986 				mutex_enter(&plun->lun_mutex);
8987 				if (plun->lun_mpxio == 0) {
8988 					cdip = DIP(plun->lun_cip);
8989 				} else if (plun->lun_cip) {
8990 					cdip = mdi_pi_get_client(
8991 					    PIP(plun->lun_cip));
8992 				}
8993 				mutex_exit(&plun->lun_mutex);
8994 
8995 				if (cdip) {
8996 					(void) ndi_event_retrieve_cookie(
8997 					    pptr->port_ndi_event_hdl, cdip,
8998 					    FCAL_REMOVE_EVENT,
8999 					    &fcp_remove_eid,
9000 					    NDI_EVENT_NOPASS);
9001 					(void) ndi_event_run_callbacks(
9002 					    pptr->port_ndi_event_hdl,
9003 					    cdip, fcp_remove_eid, NULL);
9004 				}
9005 
9006 				pkt->pkt_reason = CMD_TRAN_ERR;
9007 				pkt->pkt_statistics |= STAT_BUS_RESET;
9008 
9009 				break;
9010 			}
9011 
9012 			case FC_REASON_NOMEM:
9013 			case FC_REASON_QFULL: {
9014 				caddr_t ptr;
9015 
9016 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9017 				ptr = (caddr_t)pkt->pkt_scbp;
9018 				if (ptr) {
9019 					*ptr = STATUS_BUSY;
9020 				}
9021 				break;
9022 			}
9023 
9024 			case FC_REASON_DMA_ERROR:
9025 				pkt->pkt_reason = CMD_DMA_DERR;
9026 				pkt->pkt_statistics |= STAT_ABORTED;
9027 				break;
9028 
9029 			case FC_REASON_CRC_ERROR:
9030 			case FC_REASON_UNDERRUN: {
9031 				uchar_t		status;
9032 				/*
9033 				 * Work around for Bugid: 4240945.
9034 				 * IB on A5k doesn't set the Underrun bit
9035 				 * in the fcp status, when it is transferring
9036 				 * less than requested amount of data. Work
9037 				 * around the ses problem to keep luxadm
9038 				 * happy till ibfirmware is fixed.
9039 				 */
9040 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9041 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9042 					    fpkt->pkt_resp_acc,
9043 					    sizeof (struct fcp_rsp));
9044 				}
9045 				status = rsp->fcp_u.fcp_status.scsi_status;
9046 				if (((plun->lun_type & DTYPE_MASK) ==
9047 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9048 					pkt->pkt_reason = CMD_CMPLT;
9049 					*pkt->pkt_scbp = status;
9050 					pkt->pkt_resid = 0;
9051 				} else {
9052 					pkt->pkt_reason = CMD_TRAN_ERR;
9053 					pkt->pkt_statistics |= STAT_ABORTED;
9054 				}
9055 				break;
9056 			}
9057 
9058 			case FC_REASON_NO_CONNECTION:
9059 			case FC_REASON_UNSUPPORTED:
9060 			case FC_REASON_ILLEGAL_REQ:
9061 			case FC_REASON_BAD_SID:
9062 			case FC_REASON_DIAG_BUSY:
9063 			case FC_REASON_FCAL_OPN_FAIL:
9064 			case FC_REASON_BAD_XID:
9065 			default:
9066 				pkt->pkt_reason = CMD_TRAN_ERR;
9067 				pkt->pkt_statistics |= STAT_ABORTED;
9068 				break;
9069 
9070 			}
9071 			break;
9072 
9073 		case FC_PKT_NPORT_RJT:
9074 		case FC_PKT_FABRIC_RJT:
9075 		case FC_PKT_NPORT_BSY:
9076 		case FC_PKT_FABRIC_BSY:
9077 		default:
9078 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9079 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9080 			    "FC Status 0x%x, reason 0x%x",
9081 			    fpkt->pkt_state, fpkt->pkt_reason);
9082 			pkt->pkt_reason = CMD_TRAN_ERR;
9083 			pkt->pkt_statistics |= STAT_ABORTED;
9084 			break;
9085 		}
9086 
9087 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9088 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9089 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9090 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9091 		    fpkt->pkt_reason);
9092 	}
9093 
9094 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9095 }
9096 
9097 
9098 static int
9099 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9100 {
9101 	if (rsp->reserved_0 || rsp->reserved_1 ||
9102 	    rsp->fcp_u.fcp_status.reserved_0 ||
9103 	    rsp->fcp_u.fcp_status.reserved_1) {
9104 		/*
9105 		 * These reserved fields should ideally be zero. FCP-2 does say
9106 		 * that the recipient need not check for reserved fields to be
9107 		 * zero. If they are not zero, we will not make a fuss about it
9108 		 * - just log it (in debug to both trace buffer and messages
9109 		 * file and to trace buffer only in non-debug) and move on.
9110 		 *
9111 		 * Non-zero reserved fields were seen with minnows.
9112 		 *
9113 		 * qlc takes care of some of this but we cannot assume that all
9114 		 * FCAs will do so.
9115 		 */
9116 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9117 		    FCP_BUF_LEVEL_5, 0,
9118 		    "Got fcp response packet with non-zero reserved fields "
9119 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9120 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9121 		    rsp->reserved_0, rsp->reserved_1,
9122 		    rsp->fcp_u.fcp_status.reserved_0,
9123 		    rsp->fcp_u.fcp_status.reserved_1);
9124 	}
9125 
9126 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9127 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9128 		return (FC_FAILURE);
9129 	}
9130 
9131 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9132 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9133 	    sizeof (struct fcp_rsp))) {
9134 		return (FC_FAILURE);
9135 	}
9136 
9137 	return (FC_SUCCESS);
9138 }
9139 
9140 
9141 /*
9142  * This is called when there is a change the in device state. The case we're
9143  * handling here is, if the d_id s does not match, offline this tgt and online
9144  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9145  * port_mutex held.
9146  */
9147 static int
9148 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9149     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9150 {
9151 	ASSERT(mutex_owned(&pptr->port_mutex));
9152 
9153 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9154 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9155 	    "Starting fcp_device_changed...");
9156 
9157 	/*
9158 	 * The two cases where the port_device_changed is called is
9159 	 * either it changes it's d_id or it's hard address.
9160 	 */
9161 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9162 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9163 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9164 
9165 		/* offline this target */
9166 		mutex_enter(&ptgt->tgt_mutex);
9167 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9168 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9169 			    0, 1, NDI_DEVI_REMOVE);
9170 		}
9171 		mutex_exit(&ptgt->tgt_mutex);
9172 
9173 		fcp_log(CE_NOTE, pptr->port_dip,
9174 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9175 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9176 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9177 		    map_entry->map_hard_addr.hard_addr);
9178 	}
9179 
9180 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9181 	    link_cnt, tgt_cnt, cause));
9182 }
9183 
9184 /*
9185  *     Function: fcp_alloc_lun
9186  *
9187  *  Description: Creates a new lun structure and adds it to the list
9188  *		 of luns of the target.
9189  *
9190  *     Argument: ptgt		Target the lun will belong to.
9191  *
9192  * Return Value: NULL		Failed
9193  *		 Not NULL	Succeeded
9194  *
9195  *	Context: Kernel context
9196  */
9197 static struct fcp_lun *
9198 fcp_alloc_lun(struct fcp_tgt *ptgt)
9199 {
9200 	struct fcp_lun *plun;
9201 
9202 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9203 	if (plun != NULL) {
9204 		/*
9205 		 * Initialize the mutex before putting in the target list
9206 		 * especially before releasing the target mutex.
9207 		 */
9208 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9209 		plun->lun_tgt = ptgt;
9210 
9211 		mutex_enter(&ptgt->tgt_mutex);
9212 		plun->lun_next = ptgt->tgt_lun;
9213 		ptgt->tgt_lun = plun;
9214 		plun->lun_old_guid = NULL;
9215 		plun->lun_old_guid_size = 0;
9216 		mutex_exit(&ptgt->tgt_mutex);
9217 	}
9218 
9219 	return (plun);
9220 }
9221 
9222 /*
9223  *     Function: fcp_dealloc_lun
9224  *
9225  *  Description: Frees the LUN structure passed by the caller.
9226  *
9227  *     Argument: plun		LUN structure to free.
9228  *
9229  * Return Value: None
9230  *
9231  *	Context: Kernel context.
9232  */
9233 static void
9234 fcp_dealloc_lun(struct fcp_lun *plun)
9235 {
9236 	mutex_enter(&plun->lun_mutex);
9237 	if (plun->lun_cip) {
9238 		fcp_remove_child(plun);
9239 	}
9240 	mutex_exit(&plun->lun_mutex);
9241 
9242 	mutex_destroy(&plun->lun_mutex);
9243 	if (plun->lun_guid) {
9244 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9245 	}
9246 	if (plun->lun_old_guid) {
9247 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9248 	}
9249 	kmem_free(plun, sizeof (*plun));
9250 }
9251 
9252 /*
9253  *     Function: fcp_alloc_tgt
9254  *
9255  *  Description: Creates a new target structure and adds it to the port
9256  *		 hash list.
9257  *
9258  *     Argument: pptr		fcp port structure
9259  *		 *map_entry	entry describing the target to create
9260  *		 link_cnt	Link state change counter
9261  *
9262  * Return Value: NULL		Failed
9263  *		 Not NULL	Succeeded
9264  *
9265  *	Context: Kernel context.
9266  */
9267 static struct fcp_tgt *
9268 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9269 {
9270 	int			hash;
9271 	uchar_t			*wwn;
9272 	struct fcp_tgt	*ptgt;
9273 
9274 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9275 	if (ptgt != NULL) {
9276 		mutex_enter(&pptr->port_mutex);
9277 		if (link_cnt != pptr->port_link_cnt) {
9278 			/*
9279 			 * oh oh -- another link reset
9280 			 * in progress -- give up
9281 			 */
9282 			mutex_exit(&pptr->port_mutex);
9283 			kmem_free(ptgt, sizeof (*ptgt));
9284 			ptgt = NULL;
9285 		} else {
9286 			/*
9287 			 * initialize the mutex before putting in the port
9288 			 * wwn list, especially before releasing the port
9289 			 * mutex.
9290 			 */
9291 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9292 
9293 			/* add new target entry to the port's hash list */
9294 			wwn = (uchar_t *)&map_entry->map_pwwn;
9295 			hash = FCP_HASH(wwn);
9296 
9297 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9298 			pptr->port_tgt_hash_table[hash] = ptgt;
9299 
9300 			/* save cross-ptr */
9301 			ptgt->tgt_port = pptr;
9302 
9303 			ptgt->tgt_change_cnt = 1;
9304 
9305 			/* initialize the target manual_config_only flag */
9306 			if (fcp_enable_auto_configuration) {
9307 				ptgt->tgt_manual_config_only = 0;
9308 			} else {
9309 				ptgt->tgt_manual_config_only = 1;
9310 			}
9311 
9312 			mutex_exit(&pptr->port_mutex);
9313 		}
9314 	}
9315 
9316 	return (ptgt);
9317 }
9318 
9319 /*
9320  *     Function: fcp_dealloc_tgt
9321  *
9322  *  Description: Frees the target structure passed by the caller.
9323  *
9324  *     Argument: ptgt		Target structure to free.
9325  *
9326  * Return Value: None
9327  *
9328  *	Context: Kernel context.
9329  */
9330 static void
9331 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9332 {
9333 	mutex_destroy(&ptgt->tgt_mutex);
9334 	kmem_free(ptgt, sizeof (*ptgt));
9335 }
9336 
9337 
9338 /*
9339  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9340  *
9341  *	Device discovery commands will not be retried for-ever as
9342  *	this will have repercussions on other devices that need to
9343  *	be submitted to the hotplug thread. After a quick glance
9344  *	at the SCSI-3 spec, it was found that the spec doesn't
9345  *	mandate a forever retry, rather recommends a delayed retry.
9346  *
9347  *	Since Photon IB is single threaded, STATUS_BUSY is common
9348  *	in a 4+initiator environment. Make sure the total time
9349  *	spent on retries (including command timeout) does not
9350  *	60 seconds
9351  */
9352 static void
9353 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9354 {
9355 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9356 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9357 
9358 	mutex_enter(&pptr->port_mutex);
9359 	mutex_enter(&ptgt->tgt_mutex);
9360 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9361 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9362 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9363 		    "fcp_queue_ipkt,1:state change occured"
9364 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9365 		mutex_exit(&ptgt->tgt_mutex);
9366 		mutex_exit(&pptr->port_mutex);
9367 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9368 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9369 		fcp_icmd_free(pptr, icmd);
9370 		return;
9371 	}
9372 	mutex_exit(&ptgt->tgt_mutex);
9373 
9374 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9375 
9376 	if (pptr->port_ipkt_list != NULL) {
9377 		/* add pkt to front of doubly-linked list */
9378 		pptr->port_ipkt_list->ipkt_prev = icmd;
9379 		icmd->ipkt_next = pptr->port_ipkt_list;
9380 		pptr->port_ipkt_list = icmd;
9381 		icmd->ipkt_prev = NULL;
9382 	} else {
9383 		/* this is the first/only pkt on the list */
9384 		pptr->port_ipkt_list = icmd;
9385 		icmd->ipkt_next = NULL;
9386 		icmd->ipkt_prev = NULL;
9387 	}
9388 	mutex_exit(&pptr->port_mutex);
9389 }
9390 
9391 /*
9392  *     Function: fcp_transport
9393  *
9394  *  Description: This function submits the Fibre Channel packet to the transort
9395  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9396  *		 fails the submission, the treatment depends on the value of
9397  *		 the variable internal.
9398  *
9399  *     Argument: port_handle	fp/fctl port handle.
9400  *		 *fpkt		Packet to submit to the transport layer.
9401  *		 internal	Not zero when it's an internal packet.
9402  *
9403  * Return Value: FC_TRAN_BUSY
9404  *		 FC_STATEC_BUSY
9405  *		 FC_OFFLINE
9406  *		 FC_LOGINREQ
9407  *		 FC_DEVICE_BUSY
9408  *		 FC_SUCCESS
9409  */
9410 static int
9411 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9412 {
9413 	int	rval;
9414 
9415 	rval = fc_ulp_transport(port_handle, fpkt);
9416 	if (rval == FC_SUCCESS) {
9417 		return (rval);
9418 	}
9419 
9420 	/*
9421 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9422 	 * a command, if the underlying modules see that there is a state
9423 	 * change, or if a port is OFFLINE, that means, that state change
9424 	 * hasn't reached FCP yet, so re-queue the command for deferred
9425 	 * submission.
9426 	 */
9427 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9428 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9429 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9430 		/*
9431 		 * Defer packet re-submission. Life hang is possible on
9432 		 * internal commands if the port driver sends FC_STATEC_BUSY
9433 		 * for ever, but that shouldn't happen in a good environment.
9434 		 * Limiting re-transport for internal commands is probably a
9435 		 * good idea..
9436 		 * A race condition can happen when a port sees barrage of
9437 		 * link transitions offline to online. If the FCTL has
9438 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9439 		 * internal commands should be queued to do the discovery.
9440 		 * The race condition is when an online comes and FCP starts
9441 		 * its internal discovery and the link goes offline. It is
9442 		 * possible that the statec_callback has not reached FCP
9443 		 * and FCP is carrying on with its internal discovery.
9444 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9445 		 * that the link has gone offline. At this point FCP should
9446 		 * drop all the internal commands and wait for the
9447 		 * statec_callback. It will be facilitated by incrementing
9448 		 * port_link_cnt.
9449 		 *
9450 		 * For external commands, the (FC)pkt_timeout is decremented
9451 		 * by the QUEUE Delay added by our driver, Care is taken to
9452 		 * ensure that it doesn't become zero (zero means no timeout)
9453 		 * If the time expires right inside driver queue itself,
9454 		 * the watch thread will return it to the original caller
9455 		 * indicating that the command has timed-out.
9456 		 */
9457 		if (internal) {
9458 			char			*op;
9459 			struct fcp_ipkt	*icmd;
9460 
9461 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9462 			switch (icmd->ipkt_opcode) {
9463 			case SCMD_REPORT_LUN:
9464 				op = "REPORT LUN";
9465 				break;
9466 
9467 			case SCMD_INQUIRY:
9468 				op = "INQUIRY";
9469 				break;
9470 
9471 			case SCMD_INQUIRY_PAGE83:
9472 				op = "INQUIRY-83";
9473 				break;
9474 
9475 			default:
9476 				op = "Internal SCSI COMMAND";
9477 				break;
9478 			}
9479 
9480 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9481 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9482 				rval = FC_SUCCESS;
9483 			}
9484 		} else {
9485 			struct fcp_pkt *cmd;
9486 			struct fcp_port *pptr;
9487 
9488 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9489 			cmd->cmd_state = FCP_PKT_IDLE;
9490 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9491 
9492 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9493 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9494 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9495 				    "fcp_transport: xport busy for pkt %p",
9496 				    cmd->cmd_pkt);
9497 				rval = FC_TRAN_BUSY;
9498 			} else {
9499 				fcp_queue_pkt(pptr, cmd);
9500 				rval = FC_SUCCESS;
9501 			}
9502 		}
9503 	}
9504 
9505 	return (rval);
9506 }
9507 
9508 /*VARARGS3*/
9509 static void
9510 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9511 {
9512 	char		buf[256];
9513 	va_list		ap;
9514 
9515 	if (dip == NULL) {
9516 		dip = fcp_global_dip;
9517 	}
9518 
9519 	va_start(ap, fmt);
9520 	(void) vsprintf(buf, fmt, ap);
9521 	va_end(ap);
9522 
9523 	scsi_log(dip, "fcp", level, buf);
9524 }
9525 
9526 /*
9527  * This function retries NS registry of FC4 type.
9528  * It assumes that fcp_mutex is held.
9529  * The function does nothing if topology is not fabric
9530  * So, the topology has to be set before this function can be called
9531  */
9532 static void
9533 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9534 {
9535 	int	rval;
9536 
9537 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9538 
9539 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9540 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9541 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9542 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9543 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9544 		}
9545 		return;
9546 	}
9547 	mutex_exit(&pptr->port_mutex);
9548 	rval = fcp_do_ns_registry(pptr, s_id);
9549 	mutex_enter(&pptr->port_mutex);
9550 
9551 	if (rval == 0) {
9552 		/* Registry successful. Reset flag */
9553 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9554 	}
9555 }
9556 
9557 /*
9558  * This function registers the ULP with the switch by calling transport i/f
9559  */
9560 static int
9561 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9562 {
9563 	fc_ns_cmd_t		ns_cmd;
9564 	ns_rfc_type_t		rfc;
9565 	uint32_t		types[8];
9566 
9567 	/*
9568 	 * Prepare the Name server structure to
9569 	 * register with the transport in case of
9570 	 * Fabric configuration.
9571 	 */
9572 	bzero(&rfc, sizeof (rfc));
9573 	bzero(types, sizeof (types));
9574 
9575 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9576 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9577 
9578 	rfc.rfc_port_id.port_id = s_id;
9579 	bcopy(types, rfc.rfc_types, sizeof (types));
9580 
9581 	ns_cmd.ns_flags = 0;
9582 	ns_cmd.ns_cmd = NS_RFT_ID;
9583 	ns_cmd.ns_req_len = sizeof (rfc);
9584 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9585 	ns_cmd.ns_resp_len = 0;
9586 	ns_cmd.ns_resp_payload = NULL;
9587 
9588 	/*
9589 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9590 	 */
9591 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9592 		fcp_log(CE_WARN, pptr->port_dip,
9593 		    "!ns_registry: failed name server registration");
9594 		return (1);
9595 	}
9596 
9597 	return (0);
9598 }
9599 
9600 /*
9601  *     Function: fcp_handle_port_attach
9602  *
9603  *  Description: This function is called from fcp_port_attach() to attach a
9604  *		 new port. This routine does the following:
9605  *
9606  *		1) Allocates an fcp_port structure and initializes it.
9607  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9608  *		   server.
9609  *		3) Kicks off the enumeration of the targets/luns visible
9610  *		   through this new port.  That is done by calling
9611  *		   fcp_statec_callback() if the port is online.
9612  *
9613  *     Argument: ulph		fp/fctl port handle.
9614  *		 *pinfo		Port information.
9615  *		 s_id		Port ID.
9616  *		 instance	Device instance number for the local port
9617  *				(returned by ddi_get_instance()).
9618  *
9619  * Return Value: DDI_SUCCESS
9620  *		 DDI_FAILURE
9621  *
9622  *	Context: User and Kernel context.
9623  */
9624 /*ARGSUSED*/
9625 int
9626 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9627     uint32_t s_id, int instance)
9628 {
9629 	int			res = DDI_FAILURE;
9630 	scsi_hba_tran_t		*tran;
9631 	int			mutex_initted = FALSE;
9632 	int			hba_attached = FALSE;
9633 	int			soft_state_linked = FALSE;
9634 	int			event_bind = FALSE;
9635 	struct fcp_port		*pptr;
9636 	fc_portmap_t		*tmp_list = NULL;
9637 	uint32_t		max_cnt, alloc_cnt;
9638 	uchar_t			*boot_wwn = NULL;
9639 	uint_t			nbytes;
9640 	int			manual_cfg;
9641 
9642 	/*
9643 	 * this port instance attaching for the first time (or after
9644 	 * being detached before)
9645 	 */
9646 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9647 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9648 
9649 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9650 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9651 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9652 		    instance);
9653 		return (res);
9654 	}
9655 
9656 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9657 		/* this shouldn't happen */
9658 		ddi_soft_state_free(fcp_softstate, instance);
9659 		cmn_err(CE_WARN, "fcp: bad soft state");
9660 		return (res);
9661 	}
9662 
9663 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9664 
9665 	/*
9666 	 * Make a copy of ulp_port_info as fctl allocates
9667 	 * a temp struct.
9668 	 */
9669 	(void) fcp_cp_pinfo(pptr, pinfo);
9670 
9671 	/*
9672 	 * Check for manual_configuration_only property.
9673 	 * Enable manual configurtion if the property is
9674 	 * set to 1, otherwise disable manual configuration.
9675 	 */
9676 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9677 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9678 	    MANUAL_CFG_ONLY,
9679 	    -1)) != -1) {
9680 		if (manual_cfg == 1) {
9681 			char	*pathname;
9682 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9683 			(void) ddi_pathname(pptr->port_dip, pathname);
9684 			cmn_err(CE_NOTE,
9685 			    "%s (%s%d) %s is enabled via %s.conf.",
9686 			    pathname,
9687 			    ddi_driver_name(pptr->port_dip),
9688 			    ddi_get_instance(pptr->port_dip),
9689 			    MANUAL_CFG_ONLY,
9690 			    ddi_driver_name(pptr->port_dip));
9691 			fcp_enable_auto_configuration = 0;
9692 			kmem_free(pathname, MAXPATHLEN);
9693 		}
9694 	}
9695 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9696 	pptr->port_link_cnt = 1;
9697 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9698 	pptr->port_id = s_id;
9699 	pptr->port_instance = instance;
9700 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9701 	pptr->port_state = FCP_STATE_INIT;
9702 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9703 
9704 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9705 	    sizeof (ddi_dma_cookie_t));
9706 
9707 	/*
9708 	 * The two mutexes of fcp_port are initialized.	 The variable
9709 	 * mutex_initted is incremented to remember that fact.	That variable
9710 	 * is checked when the routine fails and the mutexes have to be
9711 	 * destroyed.
9712 	 */
9713 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9714 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9715 	mutex_initted++;
9716 
9717 	/*
9718 	 * The SCSI tran structure is allocate and initialized now.
9719 	 */
9720 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9721 		fcp_log(CE_WARN, pptr->port_dip,
9722 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9723 		goto fail;
9724 	}
9725 
9726 	/* link in the transport structure then fill it in */
9727 	pptr->port_tran = tran;
9728 	tran->tran_hba_private		= pptr;
9729 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9730 	tran->tran_tgt_probe		= NULL;
9731 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9732 	tran->tran_start		= fcp_scsi_start;
9733 	tran->tran_reset		= fcp_scsi_reset;
9734 	tran->tran_abort		= fcp_scsi_abort;
9735 	tran->tran_getcap		= fcp_scsi_getcap;
9736 	tran->tran_setcap		= fcp_scsi_setcap;
9737 	tran->tran_init_pkt		= NULL;
9738 	tran->tran_destroy_pkt		= NULL;
9739 	tran->tran_dmafree		= NULL;
9740 	tran->tran_sync_pkt		= NULL;
9741 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9742 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9743 	tran->tran_get_name		= fcp_scsi_get_name;
9744 	tran->tran_clear_aca		= NULL;
9745 	tran->tran_clear_task_set	= NULL;
9746 	tran->tran_terminate_task	= NULL;
9747 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9748 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9749 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9750 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9751 	tran->tran_quiesce		= NULL;
9752 	tran->tran_unquiesce		= NULL;
9753 	tran->tran_bus_reset		= NULL;
9754 	tran->tran_bus_config		= fcp_scsi_bus_config;
9755 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9756 	tran->tran_bus_power		= NULL;
9757 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9758 
9759 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9760 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9761 	tran->tran_setup_pkt		= fcp_pkt_setup;
9762 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9763 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9764 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9765 
9766 	/*
9767 	 * Allocate an ndi event handle
9768 	 */
9769 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9770 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9771 
9772 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9773 	    sizeof (fcp_ndi_event_defs));
9774 
9775 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9776 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9777 
9778 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9779 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9780 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9781 
9782 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9783 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9784 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9785 		goto fail;
9786 	}
9787 	event_bind++;	/* Checked in fail case */
9788 
9789 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9790 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9791 	    != DDI_SUCCESS) {
9792 		fcp_log(CE_WARN, pptr->port_dip,
9793 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9794 		goto fail;
9795 	}
9796 	hba_attached++;	/* Checked in fail case */
9797 
9798 	pptr->port_mpxio = 0;
9799 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9800 	    MDI_SUCCESS) {
9801 		pptr->port_mpxio++;
9802 	}
9803 
9804 	/*
9805 	 * The following code is putting the new port structure in the global
9806 	 * list of ports and, if it is the first port to attach, it start the
9807 	 * fcp_watchdog_tick.
9808 	 *
9809 	 * Why put this new port in the global before we are done attaching it?
9810 	 * We are actually making the structure globally known before we are
9811 	 * done attaching it.  The reason for that is: because of the code that
9812 	 * follows.  At this point the resources to handle the port are
9813 	 * allocated.  This function is now going to do the following:
9814 	 *
9815 	 *   1) It is going to try to register with the name server advertizing
9816 	 *	the new FCP capability of the port.
9817 	 *   2) It is going to play the role of the fp/fctl layer by building
9818 	 *	a list of worlwide names reachable through this port and call
9819 	 *	itself on fcp_statec_callback().  That requires the port to
9820 	 *	be part of the global list.
9821 	 */
9822 	mutex_enter(&fcp_global_mutex);
9823 	if (fcp_port_head == NULL) {
9824 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9825 	}
9826 	pptr->port_next = fcp_port_head;
9827 	fcp_port_head = pptr;
9828 	soft_state_linked++;
9829 
9830 	if (fcp_watchdog_init++ == 0) {
9831 		fcp_watchdog_tick = fcp_watchdog_timeout *
9832 		    drv_usectohz(1000000);
9833 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9834 		    fcp_watchdog_tick);
9835 	}
9836 	mutex_exit(&fcp_global_mutex);
9837 
9838 	/*
9839 	 * Here an attempt is made to register with the name server, the new
9840 	 * FCP capability.  That is done using an RTF_ID to the name server.
9841 	 * It is done synchronously.  The function fcp_do_ns_registry()
9842 	 * doesn't return till the name server responded.
9843 	 * On failures, just ignore it for now and it will get retried during
9844 	 * state change callbacks. We'll set a flag to show this failure
9845 	 */
9846 	if (fcp_do_ns_registry(pptr, s_id)) {
9847 		mutex_enter(&pptr->port_mutex);
9848 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9849 		mutex_exit(&pptr->port_mutex);
9850 	} else {
9851 		mutex_enter(&pptr->port_mutex);
9852 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9853 		mutex_exit(&pptr->port_mutex);
9854 	}
9855 
9856 	/*
9857 	 * Lookup for boot WWN property
9858 	 */
9859 	if (modrootloaded != 1) {
9860 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9861 		    ddi_get_parent(pinfo->port_dip),
9862 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9863 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9864 		    (nbytes == FC_WWN_SIZE)) {
9865 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9866 		}
9867 		if (boot_wwn) {
9868 			ddi_prop_free(boot_wwn);
9869 		}
9870 	}
9871 
9872 	/*
9873 	 * Handle various topologies and link states.
9874 	 */
9875 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9876 	case FC_STATE_OFFLINE:
9877 
9878 		/*
9879 		 * we're attaching a port where the link is offline
9880 		 *
9881 		 * Wait for ONLINE, at which time a state
9882 		 * change will cause a statec_callback
9883 		 *
9884 		 * in the mean time, do not do anything
9885 		 */
9886 		res = DDI_SUCCESS;
9887 		pptr->port_state |= FCP_STATE_OFFLINE;
9888 		break;
9889 
9890 	case FC_STATE_ONLINE: {
9891 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9892 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9893 			res = DDI_SUCCESS;
9894 			break;
9895 		}
9896 		/*
9897 		 * discover devices and create nodes (a private
9898 		 * loop or point-to-point)
9899 		 */
9900 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9901 
9902 		/*
9903 		 * At this point we are going to build a list of all the ports
9904 		 * that	can be reached through this local port.	 It looks like
9905 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9906 		 * (128).
9907 		 */
9908 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9909 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9910 		    KM_NOSLEEP)) == NULL) {
9911 			fcp_log(CE_WARN, pptr->port_dip,
9912 			    "!fcp%d: failed to allocate portmap",
9913 			    instance);
9914 			goto fail;
9915 		}
9916 
9917 		/*
9918 		 * fc_ulp_getportmap() is going to provide us with the list of
9919 		 * remote ports in the buffer we just allocated.  The way the
9920 		 * list is going to be retrieved depends on the topology.
9921 		 * However, if we are connected to a Fabric, a name server
9922 		 * request may be sent to get the list of FCP capable ports.
9923 		 * It should be noted that is the case the request is
9924 		 * synchronous.	 This means we are stuck here till the name
9925 		 * server replies.  A lot of things can change during that time
9926 		 * and including, may be, being called on
9927 		 * fcp_statec_callback() for different reasons. I'm not sure
9928 		 * the code can handle that.
9929 		 */
9930 		max_cnt = FCP_MAX_DEVICES;
9931 		alloc_cnt = FCP_MAX_DEVICES;
9932 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
9933 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
9934 		    FC_SUCCESS) {
9935 			caddr_t msg;
9936 
9937 			(void) fc_ulp_error(res, &msg);
9938 
9939 			/*
9940 			 * this	 just means the transport is
9941 			 * busy perhaps building a portmap so,
9942 			 * for now, succeed this port attach
9943 			 * when the transport has a new map,
9944 			 * it'll send us a state change then
9945 			 */
9946 			fcp_log(CE_WARN, pptr->port_dip,
9947 			    "!failed to get port map : %s", msg);
9948 
9949 			res = DDI_SUCCESS;
9950 			break;	/* go return result */
9951 		}
9952 		if (max_cnt > alloc_cnt) {
9953 			alloc_cnt = max_cnt;
9954 		}
9955 
9956 		/*
9957 		 * We are now going to call fcp_statec_callback() ourselves.
9958 		 * By issuing this call we are trying to kick off the enumera-
9959 		 * tion process.
9960 		 */
9961 		/*
9962 		 * let the state change callback do the SCSI device
9963 		 * discovery and create the devinfos
9964 		 */
9965 		fcp_statec_callback(ulph, pptr->port_fp_handle,
9966 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
9967 		    max_cnt, pptr->port_id);
9968 
9969 		res = DDI_SUCCESS;
9970 		break;
9971 	}
9972 
9973 	default:
9974 		/* unknown port state */
9975 		fcp_log(CE_WARN, pptr->port_dip,
9976 		    "!fcp%d: invalid port state at attach=0x%x",
9977 		    instance, pptr->port_phys_state);
9978 
9979 		mutex_enter(&pptr->port_mutex);
9980 		pptr->port_phys_state = FCP_STATE_OFFLINE;
9981 		mutex_exit(&pptr->port_mutex);
9982 
9983 		res = DDI_SUCCESS;
9984 		break;
9985 	}
9986 
9987 	/* free temp list if used */
9988 	if (tmp_list != NULL) {
9989 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
9990 	}
9991 
9992 	/* note the attach time */
9993 	pptr->port_attach_time = lbolt64;
9994 
9995 	/* all done */
9996 	return (res);
9997 
9998 	/* a failure we have to clean up after */
9999 fail:
10000 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10001 
10002 	if (soft_state_linked) {
10003 		/* remove this fcp_port from the linked list */
10004 		(void) fcp_soft_state_unlink(pptr);
10005 	}
10006 
10007 	/* unbind and free event set */
10008 	if (pptr->port_ndi_event_hdl) {
10009 		if (event_bind) {
10010 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10011 			    &pptr->port_ndi_events, NDI_SLEEP);
10012 		}
10013 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10014 	}
10015 
10016 	if (pptr->port_ndi_event_defs) {
10017 		(void) kmem_free(pptr->port_ndi_event_defs,
10018 		    sizeof (fcp_ndi_event_defs));
10019 	}
10020 
10021 	/*
10022 	 * Clean up mpxio stuff
10023 	 */
10024 	if (pptr->port_mpxio) {
10025 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10026 		pptr->port_mpxio--;
10027 	}
10028 
10029 	/* undo SCSI HBA setup */
10030 	if (hba_attached) {
10031 		(void) scsi_hba_detach(pptr->port_dip);
10032 	}
10033 	if (pptr->port_tran != NULL) {
10034 		scsi_hba_tran_free(pptr->port_tran);
10035 	}
10036 
10037 	mutex_enter(&fcp_global_mutex);
10038 
10039 	/*
10040 	 * We check soft_state_linked, because it is incremented right before
10041 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10042 	 * soft_state_linked is still FALSE, we do not want to decrement
10043 	 * fcp_watchdog_init or possibly call untimeout.
10044 	 */
10045 
10046 	if (soft_state_linked) {
10047 		if (--fcp_watchdog_init == 0) {
10048 			timeout_id_t	tid = fcp_watchdog_id;
10049 
10050 			mutex_exit(&fcp_global_mutex);
10051 			(void) untimeout(tid);
10052 		} else {
10053 			mutex_exit(&fcp_global_mutex);
10054 		}
10055 	} else {
10056 		mutex_exit(&fcp_global_mutex);
10057 	}
10058 
10059 	if (mutex_initted) {
10060 		mutex_destroy(&pptr->port_mutex);
10061 		mutex_destroy(&pptr->port_pkt_mutex);
10062 	}
10063 
10064 	if (tmp_list != NULL) {
10065 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10066 	}
10067 
10068 	/* this makes pptr invalid */
10069 	ddi_soft_state_free(fcp_softstate, instance);
10070 
10071 	return (DDI_FAILURE);
10072 }
10073 
10074 
10075 static int
10076 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10077 {
10078 	int count = 0;
10079 
10080 	mutex_enter(&pptr->port_mutex);
10081 
10082 	/*
10083 	 * if the port is powered down or suspended, nothing else
10084 	 * to do; just return.
10085 	 */
10086 	if (flag != FCP_STATE_DETACHING) {
10087 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10088 		    FCP_STATE_SUSPENDED)) {
10089 			pptr->port_state |= flag;
10090 			mutex_exit(&pptr->port_mutex);
10091 			return (FC_SUCCESS);
10092 		}
10093 	}
10094 
10095 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10096 		mutex_exit(&pptr->port_mutex);
10097 		return (FC_FAILURE);
10098 	}
10099 
10100 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10101 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10102 	    "fcp_handle_port_detach: port is detaching");
10103 
10104 	pptr->port_state |= flag;
10105 
10106 	/*
10107 	 * Wait for any ongoing reconfig/ipkt to complete, that
10108 	 * ensures the freeing to targets/luns is safe.
10109 	 * No more ref to this port should happen from statec/ioctl
10110 	 * after that as it was removed from the global port list.
10111 	 */
10112 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10113 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10114 		/*
10115 		 * Let's give sufficient time for reconfig/ipkt
10116 		 * to complete.
10117 		 */
10118 		if (count++ >= FCP_ICMD_DEADLINE) {
10119 			break;
10120 		}
10121 		mutex_exit(&pptr->port_mutex);
10122 		delay(drv_usectohz(1000000));
10123 		mutex_enter(&pptr->port_mutex);
10124 	}
10125 
10126 	/*
10127 	 * if the driver is still busy then fail to
10128 	 * suspend/power down.
10129 	 */
10130 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10131 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10132 		pptr->port_state &= ~flag;
10133 		mutex_exit(&pptr->port_mutex);
10134 		return (FC_FAILURE);
10135 	}
10136 
10137 	if (flag == FCP_STATE_DETACHING) {
10138 		pptr = fcp_soft_state_unlink(pptr);
10139 		ASSERT(pptr != NULL);
10140 	}
10141 
10142 	pptr->port_link_cnt++;
10143 	pptr->port_state |= FCP_STATE_OFFLINE;
10144 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10145 
10146 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10147 	    FCP_CAUSE_LINK_DOWN);
10148 	mutex_exit(&pptr->port_mutex);
10149 
10150 	/* kill watch dog timer if we're the last */
10151 	mutex_enter(&fcp_global_mutex);
10152 	if (--fcp_watchdog_init == 0) {
10153 		timeout_id_t	tid = fcp_watchdog_id;
10154 		mutex_exit(&fcp_global_mutex);
10155 		(void) untimeout(tid);
10156 	} else {
10157 		mutex_exit(&fcp_global_mutex);
10158 	}
10159 
10160 	/* clean up the port structures */
10161 	if (flag == FCP_STATE_DETACHING) {
10162 		fcp_cleanup_port(pptr, instance);
10163 	}
10164 
10165 	return (FC_SUCCESS);
10166 }
10167 
10168 
10169 static void
10170 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10171 {
10172 	ASSERT(pptr != NULL);
10173 
10174 	/* unbind and free event set */
10175 	if (pptr->port_ndi_event_hdl) {
10176 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10177 		    &pptr->port_ndi_events, NDI_SLEEP);
10178 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10179 	}
10180 
10181 	if (pptr->port_ndi_event_defs) {
10182 		(void) kmem_free(pptr->port_ndi_event_defs,
10183 		    sizeof (fcp_ndi_event_defs));
10184 	}
10185 
10186 	/* free the lun/target structures and devinfos */
10187 	fcp_free_targets(pptr);
10188 
10189 	/*
10190 	 * Clean up mpxio stuff
10191 	 */
10192 	if (pptr->port_mpxio) {
10193 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10194 		pptr->port_mpxio--;
10195 	}
10196 
10197 	/* clean up SCSA stuff */
10198 	(void) scsi_hba_detach(pptr->port_dip);
10199 	if (pptr->port_tran != NULL) {
10200 		scsi_hba_tran_free(pptr->port_tran);
10201 	}
10202 
10203 #ifdef	KSTATS_CODE
10204 	/* clean up kstats */
10205 	if (pptr->fcp_ksp != NULL) {
10206 		kstat_delete(pptr->fcp_ksp);
10207 	}
10208 #endif
10209 
10210 	/* clean up soft state mutexes/condition variables */
10211 	mutex_destroy(&pptr->port_mutex);
10212 	mutex_destroy(&pptr->port_pkt_mutex);
10213 
10214 	/* all done with soft state */
10215 	ddi_soft_state_free(fcp_softstate, instance);
10216 }
10217 
10218 /*
10219  *     Function: fcp_kmem_cache_constructor
10220  *
10221  *  Description: This function allocates and initializes the resources required
10222  *		 to build a scsi_pkt structure the target driver.  The result
10223  *		 of the allocation and initialization will be cached in the
10224  *		 memory cache.	As DMA resources may be allocated here, that
10225  *		 means DMA resources will be tied up in the cache manager.
10226  *		 This is a tradeoff that has been made for performance reasons.
10227  *
10228  *     Argument: *buf		Memory to preinitialize.
10229  *		 *arg		FCP port structure (fcp_port).
10230  *		 kmflags	Value passed to kmem_cache_alloc() and
10231  *				propagated to the constructor.
10232  *
10233  * Return Value: 0	Allocation/Initialization was successful.
10234  *		 -1	Allocation or Initialization failed.
10235  *
10236  *
10237  * If the returned value is 0, the buffer is initialized like this:
10238  *
10239  *		    +================================+
10240  *	     +----> |	      struct scsi_pkt	     |
10241  *	     |	    |				     |
10242  *	     | +--- | pkt_ha_private		     |
10243  *	     | |    |				     |
10244  *	     | |    +================================+
10245  *	     | |
10246  *	     | |    +================================+
10247  *	     | +--> |	    struct fcp_pkt	     | <---------+
10248  *	     |	    |				     |		 |
10249  *	     +----- | cmd_pkt			     |		 |
10250  *		    |			  cmd_fp_pkt | ---+	 |
10251  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10252  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10253  *	  |    |    |--------------------------------|	  |	 |
10254  *	  |    |    |	      struct fc_packet	     | <--+	 |
10255  *	  |    |    |				     |		 |
10256  *	  |    |    |		     pkt_ulp_private | ----------+
10257  *	  |    |    |		     pkt_fca_private | -----+
10258  *	  |    |    |		     pkt_data_cookie | ---+ |
10259  *	  |    |    | pkt_cmdlen		     |	  | |
10260  *	  |    |(a) | pkt_rsplen		     |	  | |
10261  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10262  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10263  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10264  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10265  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10266  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10267  *		    +================================+	  | |  |   |   |    |
10268  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10269  *		    |				     |	    |  |   |   |    |
10270  *		    +================================+	    |  |   |   |    |
10271  *		    |	      fca_private	     | <----+  |   |   |    |
10272  *		    |				     |	       |   |   |    |
10273  *		    +================================+	       |   |   |    |
10274  *							       |   |   |    |
10275  *							       |   |   |    |
10276  *		    +================================+	 (d)   |   |   |    |
10277  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10278  *		    |				     |		   |   |    |
10279  *		    +================================+		   |   |    |
10280  *								   |   |    |
10281  *		    +================================+	 (d)	   |   |    |
10282  *		    |		fcp_resp	     | <-----------+   |    |
10283  *		    |	(DMA resources associated)   |		       |    |
10284  *		    +================================+		       |    |
10285  *								       |    |
10286  *								       |    |
10287  *								       |    |
10288  *		    +================================+	 (c)	       |    |
10289  *		    |	     fcp_cmd cookies	     | <---------------+    |
10290  *		    |				     |			    |
10291  *		    +================================+			    |
10292  *									    |
10293  *		    +================================+	 (c)		    |
10294  *		    |		 fcp_cmd	     | <--------------------+
10295  *		    |	(DMA resources associated)   |
10296  *		    +================================+
10297  *
10298  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10299  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10300  * (c) Only if DMA is used for the FCP_CMD buffer.
10301  * (d) Only if DMA is used for the FCP_RESP buffer
10302  */
10303 static int
10304 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10305     int kmflags)
10306 {
10307 	struct fcp_pkt	*cmd;
10308 	struct fcp_port	*pptr;
10309 	fc_packet_t	*fpkt;
10310 
10311 	pptr = (struct fcp_port *)tran->tran_hba_private;
10312 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10313 	bzero(cmd, tran->tran_hba_len);
10314 
10315 	cmd->cmd_pkt = pkt;
10316 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10317 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10318 	cmd->cmd_fp_pkt = fpkt;
10319 
10320 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10321 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10322 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10323 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10324 
10325 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10326 	    sizeof (struct fcp_pkt));
10327 
10328 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10329 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10330 
10331 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10332 		/*
10333 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10334 		 * fcp_resp.  The transfer of information will be done by
10335 		 * bcopy.
10336 		 * The naming of the flags (that is actually a value) is
10337 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10338 		 * DMA" but instead "NO DMA".
10339 		 */
10340 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10341 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10342 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10343 	} else {
10344 		/*
10345 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10346 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10347 		 * interfaces.
10348 		 */
10349 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10350 			return (-1);
10351 		}
10352 	}
10353 
10354 	return (0);
10355 }
10356 
10357 /*
10358  *     Function: fcp_kmem_cache_destructor
10359  *
10360  *  Description: Called by the destructor of the cache managed by SCSA.
10361  *		 All the resources pre-allocated in fcp_pkt_constructor
10362  *		 and the data also pre-initialized in fcp_pkt_constructor
10363  *		 are freed and uninitialized here.
10364  *
10365  *     Argument: *buf		Memory to uninitialize.
10366  *		 *arg		FCP port structure (fcp_port).
10367  *
10368  * Return Value: None
10369  *
10370  *	Context: kernel
10371  */
10372 static void
10373 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10374 {
10375 	struct fcp_pkt	*cmd;
10376 	struct fcp_port	*pptr;
10377 
10378 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10379 	cmd = pkt->pkt_ha_private;
10380 
10381 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10382 		/*
10383 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10384 		 * buffer and DMA resources allocated to do so are released.
10385 		 */
10386 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10387 	}
10388 }
10389 
10390 /*
10391  *     Function: fcp_alloc_cmd_resp
10392  *
10393  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10394  *		 will be DMAed by the HBA.  The buffer is allocated applying
10395  *		 the DMA requirements for the HBA.  The buffers allocated will
10396  *		 also be bound.	 DMA resources are allocated in the process.
10397  *		 They will be released by fcp_free_cmd_resp().
10398  *
10399  *     Argument: *pptr	FCP port.
10400  *		 *fpkt	fc packet for which the cmd and resp packet should be
10401  *			allocated.
10402  *		 flags	Allocation flags.
10403  *
10404  * Return Value: FC_FAILURE
10405  *		 FC_SUCCESS
10406  *
10407  *	Context: User or Kernel context only if flags == KM_SLEEP.
10408  *		 Interrupt context if the KM_SLEEP is not specified.
10409  */
10410 static int
10411 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10412 {
10413 	int			rval;
10414 	int			cmd_len;
10415 	int			resp_len;
10416 	ulong_t			real_len;
10417 	int			(*cb) (caddr_t);
10418 	ddi_dma_cookie_t	pkt_cookie;
10419 	ddi_dma_cookie_t	*cp;
10420 	uint32_t		cnt;
10421 
10422 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10423 
10424 	cmd_len = fpkt->pkt_cmdlen;
10425 	resp_len = fpkt->pkt_rsplen;
10426 
10427 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10428 
10429 	/* Allocation of a DMA handle used in subsequent calls. */
10430 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10431 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10432 		return (FC_FAILURE);
10433 	}
10434 
10435 	/* A buffer is allocated that satisfies the DMA requirements. */
10436 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10437 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10438 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10439 
10440 	if (rval != DDI_SUCCESS) {
10441 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10442 		return (FC_FAILURE);
10443 	}
10444 
10445 	if (real_len < cmd_len) {
10446 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10447 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10448 		return (FC_FAILURE);
10449 	}
10450 
10451 	/* The buffer allocated is DMA bound. */
10452 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10453 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10454 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10455 
10456 	if (rval != DDI_DMA_MAPPED) {
10457 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10458 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10459 		return (FC_FAILURE);
10460 	}
10461 
10462 	if (fpkt->pkt_cmd_cookie_cnt >
10463 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10464 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10465 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10466 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10467 		return (FC_FAILURE);
10468 	}
10469 
10470 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10471 
10472 	/*
10473 	 * The buffer where the scatter/gather list is going to be built is
10474 	 * allocated.
10475 	 */
10476 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10477 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10478 	    KM_NOSLEEP);
10479 
10480 	if (cp == NULL) {
10481 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10482 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10483 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10484 		return (FC_FAILURE);
10485 	}
10486 
10487 	/*
10488 	 * The scatter/gather list for the buffer we just allocated is built
10489 	 * here.
10490 	 */
10491 	*cp = pkt_cookie;
10492 	cp++;
10493 
10494 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10495 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10496 		    &pkt_cookie);
10497 		*cp = pkt_cookie;
10498 	}
10499 
10500 	ASSERT(fpkt->pkt_resp_dma == NULL);
10501 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10502 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10503 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10504 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10505 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10506 		return (FC_FAILURE);
10507 	}
10508 
10509 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10510 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10511 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10512 	    &fpkt->pkt_resp_acc);
10513 
10514 	if (rval != DDI_SUCCESS) {
10515 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10516 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10517 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10518 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10519 		kmem_free(fpkt->pkt_cmd_cookie,
10520 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10521 		return (FC_FAILURE);
10522 	}
10523 
10524 	if (real_len < resp_len) {
10525 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10526 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10527 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10528 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10529 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10530 		kmem_free(fpkt->pkt_cmd_cookie,
10531 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10532 		return (FC_FAILURE);
10533 	}
10534 
10535 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10536 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10537 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10538 
10539 	if (rval != DDI_DMA_MAPPED) {
10540 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10541 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10542 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10543 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10544 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10545 		kmem_free(fpkt->pkt_cmd_cookie,
10546 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10547 		return (FC_FAILURE);
10548 	}
10549 
10550 	if (fpkt->pkt_resp_cookie_cnt >
10551 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10552 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10553 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10554 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10555 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10556 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10557 		kmem_free(fpkt->pkt_cmd_cookie,
10558 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10559 		return (FC_FAILURE);
10560 	}
10561 
10562 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10563 
10564 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10565 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10566 	    KM_NOSLEEP);
10567 
10568 	if (cp == NULL) {
10569 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10570 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10571 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10572 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10573 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10574 		kmem_free(fpkt->pkt_cmd_cookie,
10575 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10576 		return (FC_FAILURE);
10577 	}
10578 
10579 	*cp = pkt_cookie;
10580 	cp++;
10581 
10582 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10583 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10584 		    &pkt_cookie);
10585 		*cp = pkt_cookie;
10586 	}
10587 
10588 	return (FC_SUCCESS);
10589 }
10590 
10591 /*
10592  *     Function: fcp_free_cmd_resp
10593  *
10594  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10595  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10596  *		 associated with them.	That includes the DMA resources and the
10597  *		 buffer allocated for the cookies of each one of them.
10598  *
10599  *     Argument: *pptr		FCP port context.
10600  *		 *fpkt		fc packet containing the cmd and resp packet
10601  *				to be released.
10602  *
10603  * Return Value: None
10604  *
10605  *	Context: Interrupt, User and Kernel context.
10606  */
10607 /* ARGSUSED */
10608 static void
10609 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10610 {
10611 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10612 
10613 	if (fpkt->pkt_resp_dma) {
10614 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10615 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10616 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10617 	}
10618 
10619 	if (fpkt->pkt_resp_cookie) {
10620 		kmem_free(fpkt->pkt_resp_cookie,
10621 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10622 		fpkt->pkt_resp_cookie = NULL;
10623 	}
10624 
10625 	if (fpkt->pkt_cmd_dma) {
10626 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10627 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10628 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10629 	}
10630 
10631 	if (fpkt->pkt_cmd_cookie) {
10632 		kmem_free(fpkt->pkt_cmd_cookie,
10633 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10634 		fpkt->pkt_cmd_cookie = NULL;
10635 	}
10636 }
10637 
10638 
10639 /*
10640  * called by the transport to do our own target initialization
10641  *
10642  * can acquire and release the global mutex
10643  */
10644 /* ARGSUSED */
10645 static int
10646 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10647     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10648 {
10649 	int			*words;
10650 	uchar_t			*bytes;
10651 	uint_t			nbytes;
10652 	uint_t			nwords;
10653 	struct fcp_tgt	*ptgt;
10654 	struct fcp_lun	*plun;
10655 	struct fcp_port	*pptr = (struct fcp_port *)
10656 	    hba_tran->tran_hba_private;
10657 
10658 	ASSERT(pptr != NULL);
10659 
10660 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10661 	    FCP_BUF_LEVEL_8, 0,
10662 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10663 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10664 
10665 	/* get our port WWN property */
10666 	bytes = NULL;
10667 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
10668 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
10669 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10670 		/* no port WWN property */
10671 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10672 		    FCP_BUF_LEVEL_8, 0,
10673 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10674 		    " for %s (instance %d): bytes=%p nbytes=%x",
10675 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10676 		    nbytes);
10677 
10678 		if (bytes != NULL) {
10679 			ddi_prop_free(bytes);
10680 		}
10681 
10682 		return (DDI_NOT_WELL_FORMED);
10683 	}
10684 
10685 	words = NULL;
10686 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, tgt_dip,
10687 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
10688 	    LUN_PROP, &words, &nwords) != DDI_PROP_SUCCESS) {
10689 		ASSERT(bytes != NULL);
10690 
10691 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10692 		    FCP_BUF_LEVEL_8, 0,
10693 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10694 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10695 		    ddi_get_instance(tgt_dip));
10696 
10697 		ddi_prop_free(bytes);
10698 
10699 		return (DDI_NOT_WELL_FORMED);
10700 	}
10701 
10702 	if (nwords == 0) {
10703 		ddi_prop_free(bytes);
10704 		ddi_prop_free(words);
10705 		return (DDI_NOT_WELL_FORMED);
10706 	}
10707 
10708 	ASSERT(bytes != NULL && words != NULL);
10709 
10710 	mutex_enter(&pptr->port_mutex);
10711 	if ((plun = fcp_lookup_lun(pptr, bytes, *words)) == NULL) {
10712 		mutex_exit(&pptr->port_mutex);
10713 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10714 		    FCP_BUF_LEVEL_8, 0,
10715 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10716 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10717 		    ddi_get_instance(tgt_dip));
10718 
10719 		ddi_prop_free(bytes);
10720 		ddi_prop_free(words);
10721 
10722 		return (DDI_FAILURE);
10723 	}
10724 
10725 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10726 	    FC_WWN_SIZE) == 0);
10727 	ASSERT(plun->lun_num == (uint16_t)*words);
10728 
10729 	ddi_prop_free(bytes);
10730 	ddi_prop_free(words);
10731 
10732 	ptgt = plun->lun_tgt;
10733 
10734 	mutex_enter(&ptgt->tgt_mutex);
10735 	plun->lun_tgt_count++;
10736 	scsi_device_hba_private_set(sd, plun);
10737 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10738 	plun->lun_tran = hba_tran;
10739 	mutex_exit(&ptgt->tgt_mutex);
10740 	mutex_exit(&pptr->port_mutex);
10741 
10742 	return (DDI_SUCCESS);
10743 }
10744 
10745 /*ARGSUSED*/
10746 static int
10747 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10748     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10749 {
10750 	int			words;
10751 	uchar_t			*bytes;
10752 	uint_t			nbytes;
10753 	struct fcp_tgt	*ptgt;
10754 	struct fcp_lun	*plun;
10755 	struct fcp_port	*pptr = (struct fcp_port *)
10756 	    hba_tran->tran_hba_private;
10757 	child_info_t		*cip;
10758 
10759 	ASSERT(pptr != NULL);
10760 
10761 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10762 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10763 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10764 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10765 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10766 
10767 	cip = (child_info_t *)sd->sd_pathinfo;
10768 	if (cip == NULL) {
10769 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10770 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10771 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10772 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10773 		    ddi_get_instance(tgt_dip));
10774 
10775 		return (DDI_NOT_WELL_FORMED);
10776 	}
10777 
10778 	/* get our port WWN property */
10779 	bytes = NULL;
10780 	if ((mdi_prop_lookup_byte_array(PIP(cip), PORT_WWN_PROP, &bytes,
10781 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10782 		if (bytes) {
10783 			(void) mdi_prop_free(bytes);
10784 		}
10785 		return (DDI_NOT_WELL_FORMED);
10786 	}
10787 
10788 	words = 0;
10789 	if (mdi_prop_lookup_int(PIP(cip), LUN_PROP, &words) !=
10790 	    DDI_PROP_SUCCESS) {
10791 		ASSERT(bytes != NULL);
10792 
10793 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10794 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10795 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10796 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10797 		    ddi_get_instance(tgt_dip));
10798 
10799 		(void) mdi_prop_free(bytes);
10800 		return (DDI_NOT_WELL_FORMED);
10801 	}
10802 
10803 	ASSERT(bytes != NULL);
10804 
10805 	mutex_enter(&pptr->port_mutex);
10806 	if ((plun = fcp_lookup_lun(pptr, bytes, words)) == NULL) {
10807 		mutex_exit(&pptr->port_mutex);
10808 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10809 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10810 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10811 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10812 		    ddi_get_instance(tgt_dip));
10813 
10814 		(void) mdi_prop_free(bytes);
10815 		(void) mdi_prop_free(&words);
10816 
10817 		return (DDI_FAILURE);
10818 	}
10819 
10820 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10821 	    FC_WWN_SIZE) == 0);
10822 	ASSERT(plun->lun_num == (uint16_t)words);
10823 
10824 	(void) mdi_prop_free(bytes);
10825 	(void) mdi_prop_free(&words);
10826 
10827 	ptgt = plun->lun_tgt;
10828 
10829 	mutex_enter(&ptgt->tgt_mutex);
10830 	plun->lun_tgt_count++;
10831 	scsi_device_hba_private_set(sd, plun);
10832 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10833 	plun->lun_tran = hba_tran;
10834 	mutex_exit(&ptgt->tgt_mutex);
10835 	mutex_exit(&pptr->port_mutex);
10836 
10837 	return (DDI_SUCCESS);
10838 }
10839 
10840 
10841 /*
10842  * called by the transport to do our own target initialization
10843  *
10844  * can acquire and release the global mutex
10845  */
10846 /* ARGSUSED */
10847 static int
10848 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10849     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10850 {
10851 	struct fcp_port	*pptr = (struct fcp_port *)
10852 	    hba_tran->tran_hba_private;
10853 	int			rval;
10854 
10855 	ASSERT(pptr != NULL);
10856 
10857 	/*
10858 	 * Child node is getting initialized.  Look at the mpxio component
10859 	 * type on the child device to see if this device is mpxio managed
10860 	 * or not.
10861 	 */
10862 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10863 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10864 	} else {
10865 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10866 	}
10867 
10868 	return (rval);
10869 }
10870 
10871 
10872 /* ARGSUSED */
10873 static void
10874 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10875     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10876 {
10877 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
10878 	struct fcp_tgt	*ptgt;
10879 
10880 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10881 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10882 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10883 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10884 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10885 
10886 	if (plun == NULL) {
10887 		return;
10888 	}
10889 	ptgt = plun->lun_tgt;
10890 
10891 	ASSERT(ptgt != NULL);
10892 
10893 	mutex_enter(&ptgt->tgt_mutex);
10894 	ASSERT(plun->lun_tgt_count > 0);
10895 
10896 	if (--plun->lun_tgt_count == 0) {
10897 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10898 	}
10899 	plun->lun_tran = NULL;
10900 	mutex_exit(&ptgt->tgt_mutex);
10901 }
10902 
10903 /*
10904  *     Function: fcp_scsi_start
10905  *
10906  *  Description: This function is called by the target driver to request a
10907  *		 command to be sent.
10908  *
10909  *     Argument: *ap		SCSI address of the device.
10910  *		 *pkt		SCSI packet containing the cmd to send.
10911  *
10912  * Return Value: TRAN_ACCEPT
10913  *		 TRAN_BUSY
10914  *		 TRAN_BADPKT
10915  *		 TRAN_FATAL_ERROR
10916  */
10917 static int
10918 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10919 {
10920 	struct fcp_port	*pptr = ADDR2FCP(ap);
10921 	struct fcp_lun	*plun = ADDR2LUN(ap);
10922 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10923 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10924 	int			rval;
10925 
10926 	/* ensure command isn't already issued */
10927 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10928 
10929 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10930 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10931 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10932 
10933 	/*
10934 	 * It is strange that we enter the fcp_port mutex and the target
10935 	 * mutex to check the lun state (which has a mutex of its own).
10936 	 */
10937 	mutex_enter(&pptr->port_mutex);
10938 	mutex_enter(&ptgt->tgt_mutex);
10939 
10940 	/*
10941 	 * If the device is offline and is not in the process of coming
10942 	 * online, fail the request.
10943 	 */
10944 
10945 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10946 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10947 		mutex_exit(&ptgt->tgt_mutex);
10948 		mutex_exit(&pptr->port_mutex);
10949 
10950 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
10951 			pkt->pkt_reason = CMD_DEV_GONE;
10952 		}
10953 
10954 		return (TRAN_FATAL_ERROR);
10955 	}
10956 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
10957 
10958 	/*
10959 	 * If we are suspended, kernel is trying to dump, so don't
10960 	 * block, fail or defer requests - send them down right away.
10961 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
10962 	 * assume we have been suspended.  There is hardware such as
10963 	 * the v880 that doesn't do PM.	 Thus, the check for
10964 	 * ddi_in_panic.
10965 	 *
10966 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
10967 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
10968 	 * either the device will have gone away or changed and we can fail
10969 	 * the request, or we can proceed if the device didn't change.
10970 	 *
10971 	 * If the pd in the target or the packet is NULL it's probably
10972 	 * because the device has gone away, we allow the request to be
10973 	 * put on the internal queue here in case the device comes back within
10974 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
10975 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
10976 	 * could be NULL because the device was disappearing during or since
10977 	 * packet initialization.
10978 	 */
10979 
10980 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
10981 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
10982 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
10983 	    (ptgt->tgt_pd_handle == NULL) ||
10984 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
10985 		/*
10986 		 * If ((LUN is busy AND
10987 		 *	LUN not suspended AND
10988 		 *	The system is not in panic state) OR
10989 		 *	(The port is coming up))
10990 		 *
10991 		 * We check to see if the any of the flags FLAG_NOINTR or
10992 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
10993 		 * returned will be TRAN_BUSY.	If not, the request is queued.
10994 		 */
10995 		mutex_exit(&ptgt->tgt_mutex);
10996 		mutex_exit(&pptr->port_mutex);
10997 
10998 		/* see if using interrupts is allowed (so queueing'll work) */
10999 		if (pkt->pkt_flags & FLAG_NOINTR) {
11000 			pkt->pkt_resid = 0;
11001 			return (TRAN_BUSY);
11002 		}
11003 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11004 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11005 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11006 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11007 			return (TRAN_BUSY);
11008 		}
11009 #ifdef	DEBUG
11010 		mutex_enter(&pptr->port_pkt_mutex);
11011 		pptr->port_npkts++;
11012 		mutex_exit(&pptr->port_pkt_mutex);
11013 #endif /* DEBUG */
11014 
11015 		/* got queue up the pkt for later */
11016 		fcp_queue_pkt(pptr, cmd);
11017 		return (TRAN_ACCEPT);
11018 	}
11019 	cmd->cmd_state = FCP_PKT_ISSUED;
11020 
11021 	mutex_exit(&ptgt->tgt_mutex);
11022 	mutex_exit(&pptr->port_mutex);
11023 
11024 	/*
11025 	 * Now that we released the mutexes, what was protected by them can
11026 	 * change.
11027 	 */
11028 
11029 	/*
11030 	 * If there is a reconfiguration in progress, wait for it to complete.
11031 	 */
11032 	fcp_reconfig_wait(pptr);
11033 
11034 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11035 	    pkt->pkt_time : 0;
11036 
11037 	/* prepare the packet */
11038 
11039 	fcp_prepare_pkt(pptr, cmd, plun);
11040 
11041 	if (cmd->cmd_pkt->pkt_time) {
11042 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11043 	} else {
11044 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11045 	}
11046 
11047 	/*
11048 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11049 	 * have to do polled I/O
11050 	 */
11051 	if (pkt->pkt_flags & FLAG_NOINTR) {
11052 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11053 		return (fcp_dopoll(pptr, cmd));
11054 	}
11055 
11056 #ifdef	DEBUG
11057 	mutex_enter(&pptr->port_pkt_mutex);
11058 	pptr->port_npkts++;
11059 	mutex_exit(&pptr->port_pkt_mutex);
11060 #endif /* DEBUG */
11061 
11062 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11063 	if (rval == FC_SUCCESS) {
11064 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11065 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11066 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11067 		return (TRAN_ACCEPT);
11068 	}
11069 
11070 	cmd->cmd_state = FCP_PKT_IDLE;
11071 
11072 #ifdef	DEBUG
11073 	mutex_enter(&pptr->port_pkt_mutex);
11074 	pptr->port_npkts--;
11075 	mutex_exit(&pptr->port_pkt_mutex);
11076 #endif /* DEBUG */
11077 
11078 	/*
11079 	 * For lack of clearer definitions, choose
11080 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11081 	 */
11082 
11083 	if (rval == FC_TRAN_BUSY) {
11084 		pkt->pkt_resid = 0;
11085 		rval = TRAN_BUSY;
11086 	} else {
11087 		mutex_enter(&ptgt->tgt_mutex);
11088 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11089 			child_info_t	*cip;
11090 
11091 			mutex_enter(&plun->lun_mutex);
11092 			cip = plun->lun_cip;
11093 			mutex_exit(&plun->lun_mutex);
11094 
11095 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11096 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11097 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11098 			    plun->lun_tgt->tgt_d_id, rval, cip);
11099 
11100 			rval = TRAN_FATAL_ERROR;
11101 		} else {
11102 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11103 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11104 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11105 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11106 				    pkt);
11107 				rval = TRAN_BUSY;
11108 			} else {
11109 				rval = TRAN_ACCEPT;
11110 				fcp_queue_pkt(pptr, cmd);
11111 			}
11112 		}
11113 		mutex_exit(&ptgt->tgt_mutex);
11114 	}
11115 
11116 	return (rval);
11117 }
11118 
11119 /*
11120  * called by the transport to abort a packet
11121  */
11122 /*ARGSUSED*/
11123 static int
11124 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11125 {
11126 	int tgt_cnt;
11127 	struct fcp_port		*pptr = ADDR2FCP(ap);
11128 	struct fcp_lun	*plun = ADDR2LUN(ap);
11129 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11130 
11131 	if (pkt == NULL) {
11132 		if (ptgt) {
11133 			mutex_enter(&ptgt->tgt_mutex);
11134 			tgt_cnt = ptgt->tgt_change_cnt;
11135 			mutex_exit(&ptgt->tgt_mutex);
11136 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11137 			return (TRUE);
11138 		}
11139 	}
11140 	return (FALSE);
11141 }
11142 
11143 
11144 /*
11145  * Perform reset
11146  */
11147 int
11148 fcp_scsi_reset(struct scsi_address *ap, int level)
11149 {
11150 	int			rval = 0;
11151 	struct fcp_port		*pptr = ADDR2FCP(ap);
11152 	struct fcp_lun	*plun = ADDR2LUN(ap);
11153 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11154 
11155 	if (level == RESET_ALL) {
11156 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11157 			rval = 1;
11158 		}
11159 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11160 		/*
11161 		 * If we are in the middle of discovery, return
11162 		 * SUCCESS as this target will be rediscovered
11163 		 * anyway
11164 		 */
11165 		mutex_enter(&ptgt->tgt_mutex);
11166 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11167 			mutex_exit(&ptgt->tgt_mutex);
11168 			return (1);
11169 		}
11170 		mutex_exit(&ptgt->tgt_mutex);
11171 
11172 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11173 			rval = 1;
11174 		}
11175 	}
11176 	return (rval);
11177 }
11178 
11179 
11180 /*
11181  * called by the framework to get a SCSI capability
11182  */
11183 static int
11184 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11185 {
11186 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11187 }
11188 
11189 
11190 /*
11191  * called by the framework to set a SCSI capability
11192  */
11193 static int
11194 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11195 {
11196 	return (fcp_commoncap(ap, cap, value, whom, 1));
11197 }
11198 
11199 /*
11200  *     Function: fcp_pkt_setup
11201  *
11202  *  Description: This function sets up the scsi_pkt structure passed by the
11203  *		 caller. This function assumes fcp_pkt_constructor has been
11204  *		 called previously for the packet passed by the caller.	 If
11205  *		 successful this call will have the following results:
11206  *
11207  *		   - The resources needed that will be constant through out
11208  *		     the whole transaction are allocated.
11209  *		   - The fields that will be constant through out the whole
11210  *		     transaction are initialized.
11211  *		   - The scsi packet will be linked to the LUN structure
11212  *		     addressed by the transaction.
11213  *
11214  *     Argument:
11215  *		 *pkt		Pointer to a scsi_pkt structure.
11216  *		 callback
11217  *		 arg
11218  *
11219  * Return Value: 0	Success
11220  *		 !0	Failure
11221  *
11222  *	Context: Kernel context or interrupt context
11223  */
11224 /* ARGSUSED */
11225 static int
11226 fcp_pkt_setup(struct scsi_pkt *pkt,
11227     int (*callback)(caddr_t arg),
11228     caddr_t arg)
11229 {
11230 	struct fcp_pkt	*cmd;
11231 	struct fcp_port	*pptr;
11232 	struct fcp_lun	*plun;
11233 	struct fcp_tgt	*ptgt;
11234 	int		kf;
11235 	fc_packet_t	*fpkt;
11236 	fc_frame_hdr_t	*hp;
11237 
11238 	pptr = ADDR2FCP(&pkt->pkt_address);
11239 	plun = ADDR2LUN(&pkt->pkt_address);
11240 	ptgt = plun->lun_tgt;
11241 
11242 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11243 	fpkt = cmd->cmd_fp_pkt;
11244 
11245 	/*
11246 	 * this request is for dma allocation only
11247 	 */
11248 	/*
11249 	 * First step of fcp_scsi_init_pkt: pkt allocation
11250 	 * We determine if the caller is willing to wait for the
11251 	 * resources.
11252 	 */
11253 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11254 
11255 	/*
11256 	 * Selective zeroing of the pkt.
11257 	 */
11258 	cmd->cmd_back = NULL;
11259 	cmd->cmd_next = NULL;
11260 
11261 	/*
11262 	 * Zero out fcp command
11263 	 */
11264 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11265 
11266 	cmd->cmd_state = FCP_PKT_IDLE;
11267 
11268 	fpkt = cmd->cmd_fp_pkt;
11269 	fpkt->pkt_data_acc = NULL;
11270 
11271 	mutex_enter(&ptgt->tgt_mutex);
11272 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11273 
11274 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11275 	    != FC_SUCCESS) {
11276 		mutex_exit(&ptgt->tgt_mutex);
11277 		return (-1);
11278 	}
11279 
11280 	mutex_exit(&ptgt->tgt_mutex);
11281 
11282 	/* Fill in the Fabric Channel Header */
11283 	hp = &fpkt->pkt_cmd_fhdr;
11284 	hp->r_ctl = R_CTL_COMMAND;
11285 	hp->rsvd = 0;
11286 	hp->type = FC_TYPE_SCSI_FCP;
11287 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11288 	hp->seq_id = 0;
11289 	hp->df_ctl  = 0;
11290 	hp->seq_cnt = 0;
11291 	hp->ox_id = 0xffff;
11292 	hp->rx_id = 0xffff;
11293 	hp->ro = 0;
11294 
11295 	/*
11296 	 * A doubly linked list (cmd_forw, cmd_back) is built
11297 	 * out of every allocated packet on a per-lun basis
11298 	 *
11299 	 * The packets are maintained in the list so as to satisfy
11300 	 * scsi_abort() requests. At present (which is unlikely to
11301 	 * change in the future) nobody performs a real scsi_abort
11302 	 * in the SCSI target drivers (as they don't keep the packets
11303 	 * after doing scsi_transport - so they don't know how to
11304 	 * abort a packet other than sending a NULL to abort all
11305 	 * outstanding packets)
11306 	 */
11307 	mutex_enter(&plun->lun_mutex);
11308 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11309 		plun->lun_pkt_head->cmd_back = cmd;
11310 	} else {
11311 		plun->lun_pkt_tail = cmd;
11312 	}
11313 	plun->lun_pkt_head = cmd;
11314 	mutex_exit(&plun->lun_mutex);
11315 	return (0);
11316 }
11317 
11318 /*
11319  *     Function: fcp_pkt_teardown
11320  *
11321  *  Description: This function releases a scsi_pkt structure and all the
11322  *		 resources attached to it.
11323  *
11324  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11325  *
11326  * Return Value: None
11327  *
11328  *	Context: User, Kernel or Interrupt context.
11329  */
11330 static void
11331 fcp_pkt_teardown(struct scsi_pkt *pkt)
11332 {
11333 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11334 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11335 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11336 
11337 	/*
11338 	 * Remove the packet from the per-lun list
11339 	 */
11340 	mutex_enter(&plun->lun_mutex);
11341 	if (cmd->cmd_back) {
11342 		ASSERT(cmd != plun->lun_pkt_head);
11343 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11344 	} else {
11345 		ASSERT(cmd == plun->lun_pkt_head);
11346 		plun->lun_pkt_head = cmd->cmd_forw;
11347 	}
11348 
11349 	if (cmd->cmd_forw) {
11350 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11351 	} else {
11352 		ASSERT(cmd == plun->lun_pkt_tail);
11353 		plun->lun_pkt_tail = cmd->cmd_back;
11354 	}
11355 
11356 	mutex_exit(&plun->lun_mutex);
11357 
11358 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11359 }
11360 
11361 /*
11362  * Routine for reset notification setup, to register or cancel.
11363  * This function is called by SCSA
11364  */
11365 /*ARGSUSED*/
11366 static int
11367 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11368     void (*callback)(caddr_t), caddr_t arg)
11369 {
11370 	struct fcp_port *pptr = ADDR2FCP(ap);
11371 
11372 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11373 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11374 }
11375 
11376 
11377 static int
11378 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11379     ddi_eventcookie_t *event_cookiep)
11380 {
11381 	struct fcp_port *pptr = fcp_dip2port(dip);
11382 
11383 	if (pptr == NULL) {
11384 		return (DDI_FAILURE);
11385 	}
11386 
11387 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11388 	    event_cookiep, NDI_EVENT_NOPASS));
11389 }
11390 
11391 
11392 static int
11393 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11394     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11395     ddi_callback_id_t *cb_id)
11396 {
11397 	struct fcp_port *pptr = fcp_dip2port(dip);
11398 
11399 	if (pptr == NULL) {
11400 		return (DDI_FAILURE);
11401 	}
11402 
11403 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11404 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11405 }
11406 
11407 
11408 static int
11409 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11410 {
11411 
11412 	struct fcp_port *pptr = fcp_dip2port(dip);
11413 
11414 	if (pptr == NULL) {
11415 		return (DDI_FAILURE);
11416 	}
11417 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11418 }
11419 
11420 
11421 /*
11422  * called by the transport to post an event
11423  */
11424 static int
11425 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11426     ddi_eventcookie_t eventid, void *impldata)
11427 {
11428 	struct fcp_port *pptr = fcp_dip2port(dip);
11429 
11430 	if (pptr == NULL) {
11431 		return (DDI_FAILURE);
11432 	}
11433 
11434 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11435 	    eventid, impldata));
11436 }
11437 
11438 
11439 /*
11440  * A target in in many cases in Fibre Channel has a one to one relation
11441  * with a port identifier (which is also known as D_ID and also as AL_PA
11442  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11443  * will most likely result in resetting all LUNs (which means a reset will
11444  * occur on all the SCSI devices connected at the other end of the bridge)
11445  * That is the latest favorite topic for discussion, for, one can debate as
11446  * hot as one likes and come up with arguably a best solution to one's
11447  * satisfaction
11448  *
11449  * To stay on track and not digress much, here are the problems stated
11450  * briefly:
11451  *
11452  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11453  *	target drivers use RESET_TARGET even if their instance is on a
11454  *	LUN. Doesn't that sound a bit broken ?
11455  *
11456  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11457  *	control fields of an FCP_CMND structure. It should have been
11458  *	fixed right there, giving flexibility to the initiators to
11459  *	minimize havoc that could be caused by resetting a target.
11460  */
11461 static int
11462 fcp_reset_target(struct scsi_address *ap, int level)
11463 {
11464 	int			rval = FC_FAILURE;
11465 	char			lun_id[25];
11466 	struct fcp_port		*pptr = ADDR2FCP(ap);
11467 	struct fcp_lun	*plun = ADDR2LUN(ap);
11468 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11469 	struct scsi_pkt		*pkt;
11470 	struct fcp_pkt	*cmd;
11471 	struct fcp_rsp		*rsp;
11472 	uint32_t		tgt_cnt;
11473 	struct fcp_rsp_info	*rsp_info;
11474 	struct fcp_reset_elem	*p;
11475 	int			bval;
11476 
11477 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11478 	    KM_NOSLEEP)) == NULL) {
11479 		return (rval);
11480 	}
11481 
11482 	mutex_enter(&ptgt->tgt_mutex);
11483 	if (level == RESET_TARGET) {
11484 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11485 			mutex_exit(&ptgt->tgt_mutex);
11486 			kmem_free(p, sizeof (struct fcp_reset_elem));
11487 			return (rval);
11488 		}
11489 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11490 		(void) strcpy(lun_id, " ");
11491 	} else {
11492 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11493 			mutex_exit(&ptgt->tgt_mutex);
11494 			kmem_free(p, sizeof (struct fcp_reset_elem));
11495 			return (rval);
11496 		}
11497 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11498 
11499 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11500 	}
11501 	tgt_cnt = ptgt->tgt_change_cnt;
11502 
11503 	mutex_exit(&ptgt->tgt_mutex);
11504 
11505 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11506 	    0, 0, NULL, 0)) == NULL) {
11507 		kmem_free(p, sizeof (struct fcp_reset_elem));
11508 		mutex_enter(&ptgt->tgt_mutex);
11509 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11510 		mutex_exit(&ptgt->tgt_mutex);
11511 		return (rval);
11512 	}
11513 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11514 
11515 	/* fill in cmd part of packet */
11516 	cmd = PKT2CMD(pkt);
11517 	if (level == RESET_TARGET) {
11518 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11519 	} else {
11520 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11521 	}
11522 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11523 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11524 
11525 	/* prepare a packet for transport */
11526 	fcp_prepare_pkt(pptr, cmd, plun);
11527 
11528 	if (cmd->cmd_pkt->pkt_time) {
11529 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11530 	} else {
11531 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11532 	}
11533 
11534 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11535 	bval = fcp_dopoll(pptr, cmd);
11536 	fc_ulp_idle_port(pptr->port_fp_handle);
11537 
11538 	/* submit the packet */
11539 	if (bval == TRAN_ACCEPT) {
11540 		int error = 3;
11541 
11542 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11543 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11544 		    sizeof (struct fcp_rsp));
11545 
11546 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11547 			if (fcp_validate_fcp_response(rsp, pptr) ==
11548 			    FC_SUCCESS) {
11549 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11550 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11551 					    sizeof (struct fcp_rsp), rsp_info,
11552 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11553 					    sizeof (struct fcp_rsp_info));
11554 				}
11555 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11556 					rval = FC_SUCCESS;
11557 					error = 0;
11558 				} else {
11559 					error = 1;
11560 				}
11561 			} else {
11562 				error = 2;
11563 			}
11564 		}
11565 
11566 		switch (error) {
11567 		case 0:
11568 			fcp_log(CE_WARN, pptr->port_dip,
11569 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11570 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11571 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11572 			break;
11573 
11574 		case 1:
11575 			fcp_log(CE_WARN, pptr->port_dip,
11576 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11577 			    " response code=%x",
11578 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11579 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11580 			    rsp_info->rsp_code);
11581 			break;
11582 
11583 		case 2:
11584 			fcp_log(CE_WARN, pptr->port_dip,
11585 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11586 			    " Bad FCP response values: rsvd1=%x,"
11587 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11588 			    " rsplen=%x, senselen=%x",
11589 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11590 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11591 			    rsp->reserved_0, rsp->reserved_1,
11592 			    rsp->fcp_u.fcp_status.reserved_0,
11593 			    rsp->fcp_u.fcp_status.reserved_1,
11594 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11595 			break;
11596 
11597 		default:
11598 			fcp_log(CE_WARN, pptr->port_dip,
11599 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11600 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11601 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11602 			break;
11603 		}
11604 	}
11605 	scsi_destroy_pkt(pkt);
11606 
11607 	if (rval == FC_FAILURE) {
11608 		mutex_enter(&ptgt->tgt_mutex);
11609 		if (level == RESET_TARGET) {
11610 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11611 		} else {
11612 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11613 		}
11614 		mutex_exit(&ptgt->tgt_mutex);
11615 		kmem_free(p, sizeof (struct fcp_reset_elem));
11616 		return (rval);
11617 	}
11618 
11619 	mutex_enter(&pptr->port_mutex);
11620 	if (level == RESET_TARGET) {
11621 		p->tgt = ptgt;
11622 		p->lun = NULL;
11623 	} else {
11624 		p->tgt = NULL;
11625 		p->lun = plun;
11626 	}
11627 	p->tgt = ptgt;
11628 	p->tgt_cnt = tgt_cnt;
11629 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11630 	p->next = pptr->port_reset_list;
11631 	pptr->port_reset_list = p;
11632 
11633 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11634 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11635 	    "Notify ssd of the reset to reinstate the reservations");
11636 
11637 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11638 	    &pptr->port_reset_notify_listf);
11639 
11640 	mutex_exit(&pptr->port_mutex);
11641 
11642 	return (rval);
11643 }
11644 
11645 
11646 /*
11647  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11648  * SCSI capabilities
11649  */
11650 /* ARGSUSED */
11651 static int
11652 fcp_commoncap(struct scsi_address *ap, char *cap,
11653     int val, int tgtonly, int doset)
11654 {
11655 	struct fcp_port		*pptr = ADDR2FCP(ap);
11656 	struct fcp_lun	*plun = ADDR2LUN(ap);
11657 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11658 	int			cidx;
11659 	int			rval = FALSE;
11660 
11661 	if (cap == (char *)0) {
11662 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11663 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11664 		    "fcp_commoncap: invalid arg");
11665 		return (rval);
11666 	}
11667 
11668 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11669 		return (UNDEFINED);
11670 	}
11671 
11672 	/*
11673 	 * Process setcap request.
11674 	 */
11675 	if (doset) {
11676 		/*
11677 		 * At present, we can only set binary (0/1) values
11678 		 */
11679 		switch (cidx) {
11680 		case SCSI_CAP_ARQ:
11681 			if (val == 0) {
11682 				rval = FALSE;
11683 			} else {
11684 				rval = TRUE;
11685 			}
11686 			break;
11687 
11688 		case SCSI_CAP_LUN_RESET:
11689 			if (val) {
11690 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11691 			} else {
11692 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11693 			}
11694 			rval = TRUE;
11695 			break;
11696 
11697 		case SCSI_CAP_SECTOR_SIZE:
11698 			rval = TRUE;
11699 			break;
11700 		default:
11701 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11702 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11703 			    "fcp_setcap: unsupported %d", cidx);
11704 			rval = UNDEFINED;
11705 			break;
11706 		}
11707 
11708 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11709 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11710 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11711 		    "0x%x/0x%x/0x%x/%d",
11712 		    cap, val, tgtonly, doset, rval);
11713 
11714 	} else {
11715 		/*
11716 		 * Process getcap request.
11717 		 */
11718 		switch (cidx) {
11719 		case SCSI_CAP_DMA_MAX:
11720 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11721 
11722 			/*
11723 			 * Need to make an adjustment qlc is uint_t 64
11724 			 * st is int, so we will make the adjustment here
11725 			 * being as nobody wants to touch this.
11726 			 * It still leaves the max single block length
11727 			 * of 2 gig. This should last .
11728 			 */
11729 
11730 			if (rval == -1) {
11731 				rval = MAX_INT_DMA;
11732 			}
11733 
11734 			break;
11735 
11736 		case SCSI_CAP_INITIATOR_ID:
11737 			rval = pptr->port_id;
11738 			break;
11739 
11740 		case SCSI_CAP_ARQ:
11741 		case SCSI_CAP_RESET_NOTIFICATION:
11742 		case SCSI_CAP_TAGGED_QING:
11743 			rval = TRUE;
11744 			break;
11745 
11746 		case SCSI_CAP_SCSI_VERSION:
11747 			rval = 3;
11748 			break;
11749 
11750 		case SCSI_CAP_INTERCONNECT_TYPE:
11751 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11752 			    (ptgt->tgt_hard_addr == 0)) {
11753 				rval = INTERCONNECT_FABRIC;
11754 			} else {
11755 				rval = INTERCONNECT_FIBRE;
11756 			}
11757 			break;
11758 
11759 		case SCSI_CAP_LUN_RESET:
11760 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11761 			    TRUE : FALSE;
11762 			break;
11763 
11764 		default:
11765 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11766 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11767 			    "fcp_getcap: unsupported %d", cidx);
11768 			rval = UNDEFINED;
11769 			break;
11770 		}
11771 
11772 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11773 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11774 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11775 		    "0x%x/0x%x/0x%x/%d",
11776 		    cap, val, tgtonly, doset, rval);
11777 	}
11778 
11779 	return (rval);
11780 }
11781 
11782 /*
11783  * called by the transport to get the port-wwn and lun
11784  * properties of this device, and to create a "name" based on them
11785  *
11786  * these properties don't exist on sun4m
11787  *
11788  * return 1 for success else return 0
11789  */
11790 /* ARGSUSED */
11791 static int
11792 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11793 {
11794 	int			i;
11795 	int			*lun;
11796 	int			numChars;
11797 	uint_t			nlun;
11798 	uint_t			count;
11799 	uint_t			nbytes;
11800 	uchar_t			*bytes;
11801 	uint16_t		lun_num;
11802 	uint32_t		tgt_id;
11803 	char			**conf_wwn;
11804 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11805 	uchar_t			barray[FC_WWN_SIZE];
11806 	dev_info_t		*tgt_dip;
11807 	struct fcp_tgt	*ptgt;
11808 	struct fcp_port	*pptr;
11809 	struct fcp_lun	*plun;
11810 
11811 	ASSERT(sd != NULL);
11812 	ASSERT(name != NULL);
11813 
11814 	tgt_dip = sd->sd_dev;
11815 	pptr = ddi_get_soft_state(fcp_softstate,
11816 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11817 	if (pptr == NULL) {
11818 		return (0);
11819 	}
11820 
11821 	ASSERT(tgt_dip != NULL);
11822 
11823 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11824 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11825 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11826 		name[0] = '\0';
11827 		return (0);
11828 	}
11829 
11830 	if (nlun == 0) {
11831 		ddi_prop_free(lun);
11832 		return (0);
11833 	}
11834 
11835 	lun_num = lun[0];
11836 	ddi_prop_free(lun);
11837 
11838 	/*
11839 	 * Lookup for .conf WWN property
11840 	 */
11841 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11842 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11843 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11844 		ASSERT(count >= 1);
11845 
11846 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11847 		ddi_prop_free(conf_wwn);
11848 		mutex_enter(&pptr->port_mutex);
11849 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11850 			mutex_exit(&pptr->port_mutex);
11851 			return (0);
11852 		}
11853 		ptgt = plun->lun_tgt;
11854 		mutex_exit(&pptr->port_mutex);
11855 
11856 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11857 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11858 
11859 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11860 		    ptgt->tgt_hard_addr != 0) {
11861 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11862 			    ptgt->tgt_hard_addr];
11863 		} else {
11864 			tgt_id = ptgt->tgt_d_id;
11865 		}
11866 
11867 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11868 		    TARGET_PROP, tgt_id);
11869 	}
11870 
11871 	/* get the our port-wwn property */
11872 	bytes = NULL;
11873 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11874 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11875 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11876 		if (bytes != NULL) {
11877 			ddi_prop_free(bytes);
11878 		}
11879 		return (0);
11880 	}
11881 
11882 	for (i = 0; i < FC_WWN_SIZE; i++) {
11883 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11884 	}
11885 
11886 	/* Stick in the address of the form "wWWN,LUN" */
11887 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11888 
11889 	ASSERT(numChars < len);
11890 	if (numChars >= len) {
11891 		fcp_log(CE_WARN, pptr->port_dip,
11892 		    "!fcp_scsi_get_name: "
11893 		    "name parameter length too small, it needs to be %d",
11894 		    numChars+1);
11895 	}
11896 
11897 	ddi_prop_free(bytes);
11898 
11899 	return (1);
11900 }
11901 
11902 
11903 /*
11904  * called by the transport to get the SCSI target id value, returning
11905  * it in "name"
11906  *
11907  * this isn't needed/used on sun4m
11908  *
11909  * return 1 for success else return 0
11910  */
11911 /* ARGSUSED */
11912 static int
11913 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11914 {
11915 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11916 	struct fcp_tgt	*ptgt;
11917 	int    numChars;
11918 
11919 	if (plun == NULL) {
11920 		return (0);
11921 	}
11922 
11923 	if ((ptgt = plun->lun_tgt) == NULL) {
11924 		return (0);
11925 	}
11926 
11927 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11928 
11929 	ASSERT(numChars < len);
11930 	if (numChars >= len) {
11931 		fcp_log(CE_WARN, NULL,
11932 		    "!fcp_scsi_get_bus_addr: "
11933 		    "name parameter length too small, it needs to be %d",
11934 		    numChars+1);
11935 	}
11936 
11937 	return (1);
11938 }
11939 
11940 
11941 /*
11942  * called internally to reset the link where the specified port lives
11943  */
11944 static int
11945 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
11946 {
11947 	la_wwn_t		wwn;
11948 	struct fcp_lun	*plun;
11949 	struct fcp_tgt	*ptgt;
11950 
11951 	/* disable restart of lip if we're suspended */
11952 	mutex_enter(&pptr->port_mutex);
11953 
11954 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
11955 	    FCP_STATE_POWER_DOWN)) {
11956 		mutex_exit(&pptr->port_mutex);
11957 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11958 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
11959 		    "fcp_linkreset, fcp%d: link reset "
11960 		    "disabled due to DDI_SUSPEND",
11961 		    ddi_get_instance(pptr->port_dip));
11962 		return (FC_FAILURE);
11963 	}
11964 
11965 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
11966 		mutex_exit(&pptr->port_mutex);
11967 		return (FC_SUCCESS);
11968 	}
11969 
11970 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11971 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
11972 
11973 	/*
11974 	 * If ap == NULL assume local link reset.
11975 	 */
11976 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
11977 		plun = ADDR2LUN(ap);
11978 		ptgt = plun->lun_tgt;
11979 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
11980 	} else {
11981 		bzero((caddr_t)&wwn, sizeof (wwn));
11982 	}
11983 	mutex_exit(&pptr->port_mutex);
11984 
11985 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
11986 }
11987 
11988 
11989 /*
11990  * called from fcp_port_attach() to resume a port
11991  * return DDI_* success/failure status
11992  * acquires and releases the global mutex
11993  * acquires and releases the port mutex
11994  */
11995 /*ARGSUSED*/
11996 
11997 static int
11998 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
11999     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12000 {
12001 	int			res = DDI_FAILURE; /* default result */
12002 	struct fcp_port	*pptr;		/* port state ptr */
12003 	uint32_t		alloc_cnt;
12004 	uint32_t		max_cnt;
12005 	fc_portmap_t		*tmp_list = NULL;
12006 
12007 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12008 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12009 	    instance);
12010 
12011 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12012 		cmn_err(CE_WARN, "fcp: bad soft state");
12013 		return (res);
12014 	}
12015 
12016 	mutex_enter(&pptr->port_mutex);
12017 	switch (cmd) {
12018 	case FC_CMD_RESUME:
12019 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12020 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12021 		break;
12022 
12023 	case FC_CMD_POWER_UP:
12024 		/*
12025 		 * If the port is DDI_SUSPENded, defer rediscovery
12026 		 * until DDI_RESUME occurs
12027 		 */
12028 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12029 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12030 			mutex_exit(&pptr->port_mutex);
12031 			return (DDI_SUCCESS);
12032 		}
12033 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12034 	}
12035 	pptr->port_id = s_id;
12036 	pptr->port_state = FCP_STATE_INIT;
12037 	mutex_exit(&pptr->port_mutex);
12038 
12039 	/*
12040 	 * Make a copy of ulp_port_info as fctl allocates
12041 	 * a temp struct.
12042 	 */
12043 	(void) fcp_cp_pinfo(pptr, pinfo);
12044 
12045 	mutex_enter(&fcp_global_mutex);
12046 	if (fcp_watchdog_init++ == 0) {
12047 		fcp_watchdog_tick = fcp_watchdog_timeout *
12048 		    drv_usectohz(1000000);
12049 		fcp_watchdog_id = timeout(fcp_watch,
12050 		    NULL, fcp_watchdog_tick);
12051 	}
12052 	mutex_exit(&fcp_global_mutex);
12053 
12054 	/*
12055 	 * Handle various topologies and link states.
12056 	 */
12057 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12058 	case FC_STATE_OFFLINE:
12059 		/*
12060 		 * Wait for ONLINE, at which time a state
12061 		 * change will cause a statec_callback
12062 		 */
12063 		res = DDI_SUCCESS;
12064 		break;
12065 
12066 	case FC_STATE_ONLINE:
12067 
12068 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12069 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12070 			res = DDI_SUCCESS;
12071 			break;
12072 		}
12073 
12074 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12075 		    !fcp_enable_auto_configuration) {
12076 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12077 			if (tmp_list == NULL) {
12078 				if (!alloc_cnt) {
12079 					res = DDI_SUCCESS;
12080 				}
12081 				break;
12082 			}
12083 			max_cnt = alloc_cnt;
12084 		} else {
12085 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12086 
12087 			alloc_cnt = FCP_MAX_DEVICES;
12088 
12089 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12090 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12091 			    KM_NOSLEEP)) == NULL) {
12092 				fcp_log(CE_WARN, pptr->port_dip,
12093 				    "!fcp%d: failed to allocate portmap",
12094 				    instance);
12095 				break;
12096 			}
12097 
12098 			max_cnt = alloc_cnt;
12099 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12100 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12101 			    FC_SUCCESS) {
12102 				caddr_t msg;
12103 
12104 				(void) fc_ulp_error(res, &msg);
12105 
12106 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12107 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12108 				    "resume failed getportmap: reason=0x%x",
12109 				    res);
12110 
12111 				fcp_log(CE_WARN, pptr->port_dip,
12112 				    "!failed to get port map : %s", msg);
12113 				break;
12114 			}
12115 			if (max_cnt > alloc_cnt) {
12116 				alloc_cnt = max_cnt;
12117 			}
12118 		}
12119 
12120 		/*
12121 		 * do the SCSI device discovery and create
12122 		 * the devinfos
12123 		 */
12124 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12125 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12126 		    max_cnt, pptr->port_id);
12127 
12128 		res = DDI_SUCCESS;
12129 		break;
12130 
12131 	default:
12132 		fcp_log(CE_WARN, pptr->port_dip,
12133 		    "!fcp%d: invalid port state at attach=0x%x",
12134 		    instance, pptr->port_phys_state);
12135 
12136 		mutex_enter(&pptr->port_mutex);
12137 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12138 		mutex_exit(&pptr->port_mutex);
12139 		res = DDI_SUCCESS;
12140 
12141 		break;
12142 	}
12143 
12144 	if (tmp_list != NULL) {
12145 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12146 	}
12147 
12148 	return (res);
12149 }
12150 
12151 
12152 static void
12153 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12154 {
12155 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12156 	pptr->port_dip = pinfo->port_dip;
12157 	pptr->port_fp_handle = pinfo->port_handle;
12158 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12159 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12160 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12161 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12162 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12163 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12164 	pptr->port_phys_state = pinfo->port_state;
12165 	pptr->port_topology = pinfo->port_flags;
12166 	pptr->port_reset_action = pinfo->port_reset_action;
12167 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12168 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12169 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12170 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12171 }
12172 
12173 /*
12174  * If the elements wait field is set to 1 then
12175  * another thread is waiting for the operation to complete. Once
12176  * it is complete, the waiting thread is signaled and the element is
12177  * freed by the waiting thread. If the elements wait field is set to 0
12178  * the element is freed.
12179  */
12180 static void
12181 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12182 {
12183 	ASSERT(elem != NULL);
12184 	mutex_enter(&elem->mutex);
12185 	elem->result = result;
12186 	if (elem->wait) {
12187 		elem->wait = 0;
12188 		cv_signal(&elem->cv);
12189 		mutex_exit(&elem->mutex);
12190 	} else {
12191 		mutex_exit(&elem->mutex);
12192 		cv_destroy(&elem->cv);
12193 		mutex_destroy(&elem->mutex);
12194 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12195 	}
12196 }
12197 
12198 /*
12199  * This function is invoked from the taskq thread to allocate
12200  * devinfo nodes and to online/offline them.
12201  */
12202 static void
12203 fcp_hp_task(void *arg)
12204 {
12205 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12206 	struct fcp_lun	*plun = elem->lun;
12207 	struct fcp_port		*pptr = elem->port;
12208 	int			result;
12209 
12210 	ASSERT(elem->what == FCP_ONLINE ||
12211 	    elem->what == FCP_OFFLINE ||
12212 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12213 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12214 
12215 	mutex_enter(&pptr->port_mutex);
12216 	mutex_enter(&plun->lun_mutex);
12217 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12218 	    plun->lun_event_count != elem->event_cnt) ||
12219 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12220 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12221 		mutex_exit(&plun->lun_mutex);
12222 		mutex_exit(&pptr->port_mutex);
12223 		fcp_process_elem(elem, NDI_FAILURE);
12224 		return;
12225 	}
12226 	mutex_exit(&plun->lun_mutex);
12227 	mutex_exit(&pptr->port_mutex);
12228 
12229 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12230 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12231 	fcp_process_elem(elem, result);
12232 }
12233 
12234 
12235 static child_info_t *
12236 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12237     int tcount)
12238 {
12239 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12240 
12241 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12242 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12243 
12244 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12245 		/*
12246 		 * Child has not been created yet. Create the child device
12247 		 * based on the per-Lun flags.
12248 		 */
12249 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12250 			plun->lun_cip =
12251 			    CIP(fcp_create_dip(plun, lcount, tcount));
12252 			plun->lun_mpxio = 0;
12253 		} else {
12254 			plun->lun_cip =
12255 			    CIP(fcp_create_pip(plun, lcount, tcount));
12256 			plun->lun_mpxio = 1;
12257 		}
12258 	} else {
12259 		plun->lun_cip = cip;
12260 	}
12261 
12262 	return (plun->lun_cip);
12263 }
12264 
12265 
12266 static int
12267 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12268 {
12269 	int		rval = FC_FAILURE;
12270 	dev_info_t	*pdip;
12271 	struct dev_info	*dip;
12272 	int		circular;
12273 
12274 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12275 
12276 	pdip = plun->lun_tgt->tgt_port->port_dip;
12277 
12278 	if (plun->lun_cip == NULL) {
12279 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12280 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12281 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12282 		    "plun: %p lun state: %x num: %d target state: %x",
12283 		    plun, plun->lun_state, plun->lun_num,
12284 		    plun->lun_tgt->tgt_port->port_state);
12285 		return (rval);
12286 	}
12287 	ndi_devi_enter(pdip, &circular);
12288 	dip = DEVI(pdip)->devi_child;
12289 	while (dip) {
12290 		if (dip == DEVI(cdip)) {
12291 			rval = FC_SUCCESS;
12292 			break;
12293 		}
12294 		dip = dip->devi_sibling;
12295 	}
12296 	ndi_devi_exit(pdip, circular);
12297 	return (rval);
12298 }
12299 
12300 static int
12301 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12302 {
12303 	int		rval = FC_FAILURE;
12304 
12305 	ASSERT(plun != NULL);
12306 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12307 
12308 	if (plun->lun_mpxio == 0) {
12309 		rval = fcp_is_dip_present(plun, DIP(cip));
12310 	} else {
12311 		rval = fcp_is_pip_present(plun, PIP(cip));
12312 	}
12313 
12314 	return (rval);
12315 }
12316 
12317 /*
12318  *     Function: fcp_create_dip
12319  *
12320  *  Description: Creates a dev_info_t structure for the LUN specified by the
12321  *		 caller.
12322  *
12323  *     Argument: plun		Lun structure
12324  *		 link_cnt	Link state count.
12325  *		 tgt_cnt	Target state change count.
12326  *
12327  * Return Value: NULL if it failed
12328  *		 dev_info_t structure address if it succeeded
12329  *
12330  *	Context: Kernel context
12331  */
12332 static dev_info_t *
12333 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12334 {
12335 	int			failure = 0;
12336 	uint32_t		tgt_id;
12337 	uint64_t		sam_lun;
12338 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12339 	struct fcp_port	*pptr = ptgt->tgt_port;
12340 	dev_info_t		*pdip = pptr->port_dip;
12341 	dev_info_t		*cdip = NULL;
12342 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12343 	char			*nname = NULL;
12344 	char			**compatible = NULL;
12345 	int			ncompatible;
12346 	char			*scsi_binding_set;
12347 	char			t_pwwn[17];
12348 
12349 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12350 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12351 
12352 	/* get the 'scsi-binding-set' property */
12353 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12354 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12355 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12356 		scsi_binding_set = NULL;
12357 	}
12358 
12359 	/* determine the node name and compatible */
12360 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12361 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12362 	if (scsi_binding_set) {
12363 		ddi_prop_free(scsi_binding_set);
12364 	}
12365 
12366 	if (nname == NULL) {
12367 #ifdef	DEBUG
12368 		cmn_err(CE_WARN, "%s%d: no driver for "
12369 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12370 		    "	 compatible: %s",
12371 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12372 		    ptgt->tgt_port_wwn.raw_wwn[0],
12373 		    ptgt->tgt_port_wwn.raw_wwn[1],
12374 		    ptgt->tgt_port_wwn.raw_wwn[2],
12375 		    ptgt->tgt_port_wwn.raw_wwn[3],
12376 		    ptgt->tgt_port_wwn.raw_wwn[4],
12377 		    ptgt->tgt_port_wwn.raw_wwn[5],
12378 		    ptgt->tgt_port_wwn.raw_wwn[6],
12379 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12380 		    *compatible);
12381 #endif	/* DEBUG */
12382 		failure++;
12383 		goto end_of_fcp_create_dip;
12384 	}
12385 
12386 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12387 
12388 	/*
12389 	 * if the old_dip does not match the cdip, that means there is
12390 	 * some property change. since we'll be using the cdip, we need
12391 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12392 	 * then the dtype for the device has been updated. Offline the
12393 	 * the old device and create a new device with the new device type
12394 	 * Refer to bug: 4764752
12395 	 */
12396 	if (old_dip && (cdip != old_dip ||
12397 	    plun->lun_state & FCP_LUN_CHANGED)) {
12398 		plun->lun_state &= ~(FCP_LUN_INIT);
12399 		mutex_exit(&plun->lun_mutex);
12400 		mutex_exit(&pptr->port_mutex);
12401 
12402 		mutex_enter(&ptgt->tgt_mutex);
12403 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12404 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12405 		mutex_exit(&ptgt->tgt_mutex);
12406 
12407 #ifdef DEBUG
12408 		if (cdip != NULL) {
12409 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12410 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12411 			    "Old dip=%p; New dip=%p don't match", old_dip,
12412 			    cdip);
12413 		} else {
12414 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12415 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12416 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12417 		}
12418 #endif
12419 
12420 		mutex_enter(&pptr->port_mutex);
12421 		mutex_enter(&plun->lun_mutex);
12422 	}
12423 
12424 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12425 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12426 		if (ndi_devi_alloc(pptr->port_dip, nname,
12427 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12428 			failure++;
12429 			goto end_of_fcp_create_dip;
12430 		}
12431 	}
12432 
12433 	/*
12434 	 * Previously all the properties for the devinfo were destroyed here
12435 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12436 	 * the devid property (and other properties established by the target
12437 	 * driver or framework) which the code does not always recreate, this
12438 	 * call was removed.
12439 	 * This opens a theoretical possibility that we may return with a
12440 	 * stale devid on the node if the scsi entity behind the fibre channel
12441 	 * lun has changed.
12442 	 */
12443 
12444 	/* decorate the node with compatible */
12445 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12446 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12447 		failure++;
12448 		goto end_of_fcp_create_dip;
12449 	}
12450 
12451 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12452 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12453 		failure++;
12454 		goto end_of_fcp_create_dip;
12455 	}
12456 
12457 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12458 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12459 		failure++;
12460 		goto end_of_fcp_create_dip;
12461 	}
12462 
12463 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12464 	t_pwwn[16] = '\0';
12465 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12466 	    != DDI_PROP_SUCCESS) {
12467 		failure++;
12468 		goto end_of_fcp_create_dip;
12469 	}
12470 
12471 	/*
12472 	 * If there is no hard address - We might have to deal with
12473 	 * that by using WWN - Having said that it is important to
12474 	 * recognize this problem early so ssd can be informed of
12475 	 * the right interconnect type.
12476 	 */
12477 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12478 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12479 	} else {
12480 		tgt_id = ptgt->tgt_d_id;
12481 	}
12482 
12483 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12484 	    tgt_id) != DDI_PROP_SUCCESS) {
12485 		failure++;
12486 		goto end_of_fcp_create_dip;
12487 	}
12488 
12489 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12490 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12491 		failure++;
12492 		goto end_of_fcp_create_dip;
12493 	}
12494 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12495 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12496 	    sam_lun) != DDI_PROP_SUCCESS) {
12497 		failure++;
12498 		goto end_of_fcp_create_dip;
12499 	}
12500 
12501 end_of_fcp_create_dip:
12502 	scsi_hba_nodename_compatible_free(nname, compatible);
12503 
12504 	if (cdip != NULL && failure) {
12505 		(void) ndi_prop_remove_all(cdip);
12506 		(void) ndi_devi_free(cdip);
12507 		cdip = NULL;
12508 	}
12509 
12510 	return (cdip);
12511 }
12512 
12513 /*
12514  *     Function: fcp_create_pip
12515  *
12516  *  Description: Creates a Path Id for the LUN specified by the caller.
12517  *
12518  *     Argument: plun		Lun structure
12519  *		 link_cnt	Link state count.
12520  *		 tgt_cnt	Target state count.
12521  *
12522  * Return Value: NULL if it failed
12523  *		 mdi_pathinfo_t structure address if it succeeded
12524  *
12525  *	Context: Kernel context
12526  */
12527 static mdi_pathinfo_t *
12528 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12529 {
12530 	int			i;
12531 	char			buf[MAXNAMELEN];
12532 	char			uaddr[MAXNAMELEN];
12533 	int			failure = 0;
12534 	uint32_t		tgt_id;
12535 	uint64_t		sam_lun;
12536 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12537 	struct fcp_port	*pptr = ptgt->tgt_port;
12538 	dev_info_t		*pdip = pptr->port_dip;
12539 	mdi_pathinfo_t		*pip = NULL;
12540 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12541 	char			*nname = NULL;
12542 	char			**compatible = NULL;
12543 	int			ncompatible;
12544 	char			*scsi_binding_set;
12545 	char			t_pwwn[17];
12546 
12547 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12548 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12549 
12550 	scsi_binding_set = "vhci";
12551 
12552 	/* determine the node name and compatible */
12553 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12554 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12555 
12556 	if (nname == NULL) {
12557 #ifdef	DEBUG
12558 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12559 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12560 		    "	 compatible: %s",
12561 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12562 		    ptgt->tgt_port_wwn.raw_wwn[0],
12563 		    ptgt->tgt_port_wwn.raw_wwn[1],
12564 		    ptgt->tgt_port_wwn.raw_wwn[2],
12565 		    ptgt->tgt_port_wwn.raw_wwn[3],
12566 		    ptgt->tgt_port_wwn.raw_wwn[4],
12567 		    ptgt->tgt_port_wwn.raw_wwn[5],
12568 		    ptgt->tgt_port_wwn.raw_wwn[6],
12569 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12570 		    *compatible);
12571 #endif	/* DEBUG */
12572 		failure++;
12573 		goto end_of_fcp_create_pip;
12574 	}
12575 
12576 	pip = fcp_find_existing_pip(plun, pdip);
12577 
12578 	/*
12579 	 * if the old_dip does not match the cdip, that means there is
12580 	 * some property change. since we'll be using the cdip, we need
12581 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12582 	 * then the dtype for the device has been updated. Offline the
12583 	 * the old device and create a new device with the new device type
12584 	 * Refer to bug: 4764752
12585 	 */
12586 	if (old_pip && (pip != old_pip ||
12587 	    plun->lun_state & FCP_LUN_CHANGED)) {
12588 		plun->lun_state &= ~(FCP_LUN_INIT);
12589 		mutex_exit(&plun->lun_mutex);
12590 		mutex_exit(&pptr->port_mutex);
12591 
12592 		mutex_enter(&ptgt->tgt_mutex);
12593 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12594 		    FCP_OFFLINE, lcount, tcount,
12595 		    NDI_DEVI_REMOVE, 0);
12596 		mutex_exit(&ptgt->tgt_mutex);
12597 
12598 		if (pip != NULL) {
12599 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12600 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12601 			    "Old pip=%p; New pip=%p don't match",
12602 			    old_pip, pip);
12603 		} else {
12604 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12605 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12606 			    "Old pip=%p; New pip=NULL don't match",
12607 			    old_pip);
12608 		}
12609 
12610 		mutex_enter(&pptr->port_mutex);
12611 		mutex_enter(&plun->lun_mutex);
12612 	}
12613 
12614 	/*
12615 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12616 	 * lun_guid_size which is dependent on the target, I don't
12617 	 * believe the same trancation happens here UNLESS the standards
12618 	 * change the FC_WWN_SIZE value to something larger than
12619 	 * MAXNAMELEN(currently 255 bytes).
12620 	 */
12621 
12622 	for (i = 0; i < FC_WWN_SIZE; i++) {
12623 		(void) sprintf(&buf[i << 1], "%02x",
12624 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12625 	}
12626 
12627 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12628 	    buf, plun->lun_num);
12629 
12630 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12631 		/*
12632 		 * Release the locks before calling into
12633 		 * mdi_pi_alloc_compatible() since this can result in a
12634 		 * callback into fcp which can result in a deadlock
12635 		 * (see bug # 4870272).
12636 		 *
12637 		 * Basically, what we are trying to avoid is the scenario where
12638 		 * one thread does ndi_devi_enter() and tries to grab
12639 		 * fcp_mutex and another does it the other way round.
12640 		 *
12641 		 * But before we do that, make sure that nobody releases the
12642 		 * port in the meantime. We can do this by setting a flag.
12643 		 */
12644 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12645 		pptr->port_state |= FCP_STATE_IN_MDI;
12646 		mutex_exit(&plun->lun_mutex);
12647 		mutex_exit(&pptr->port_mutex);
12648 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12649 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12650 			fcp_log(CE_WARN, pptr->port_dip,
12651 			    "!path alloc failed:0x%x", plun);
12652 			mutex_enter(&pptr->port_mutex);
12653 			mutex_enter(&plun->lun_mutex);
12654 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12655 			failure++;
12656 			goto end_of_fcp_create_pip;
12657 		}
12658 		mutex_enter(&pptr->port_mutex);
12659 		mutex_enter(&plun->lun_mutex);
12660 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12661 	} else {
12662 		(void) mdi_prop_remove(pip, NULL);
12663 	}
12664 
12665 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12666 
12667 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12668 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12669 	    != DDI_PROP_SUCCESS) {
12670 		failure++;
12671 		goto end_of_fcp_create_pip;
12672 	}
12673 
12674 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12675 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12676 	    != DDI_PROP_SUCCESS) {
12677 		failure++;
12678 		goto end_of_fcp_create_pip;
12679 	}
12680 
12681 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12682 	t_pwwn[16] = '\0';
12683 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12684 	    != DDI_PROP_SUCCESS) {
12685 		failure++;
12686 		goto end_of_fcp_create_pip;
12687 	}
12688 
12689 	/*
12690 	 * If there is no hard address - We might have to deal with
12691 	 * that by using WWN - Having said that it is important to
12692 	 * recognize this problem early so ssd can be informed of
12693 	 * the right interconnect type.
12694 	 */
12695 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12696 	    ptgt->tgt_hard_addr != 0) {
12697 		tgt_id = (uint32_t)
12698 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12699 	} else {
12700 		tgt_id = ptgt->tgt_d_id;
12701 	}
12702 
12703 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12704 	    != DDI_PROP_SUCCESS) {
12705 		failure++;
12706 		goto end_of_fcp_create_pip;
12707 	}
12708 
12709 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12710 	    != DDI_PROP_SUCCESS) {
12711 		failure++;
12712 		goto end_of_fcp_create_pip;
12713 	}
12714 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12715 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12716 	    != DDI_PROP_SUCCESS) {
12717 		failure++;
12718 		goto end_of_fcp_create_pip;
12719 	}
12720 
12721 end_of_fcp_create_pip:
12722 	scsi_hba_nodename_compatible_free(nname, compatible);
12723 
12724 	if (pip != NULL && failure) {
12725 		(void) mdi_prop_remove(pip, NULL);
12726 		mutex_exit(&plun->lun_mutex);
12727 		mutex_exit(&pptr->port_mutex);
12728 		(void) mdi_pi_free(pip, 0);
12729 		mutex_enter(&pptr->port_mutex);
12730 		mutex_enter(&plun->lun_mutex);
12731 		pip = NULL;
12732 	}
12733 
12734 	return (pip);
12735 }
12736 
12737 static dev_info_t *
12738 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12739 {
12740 	uint_t			nbytes;
12741 	uchar_t			*bytes;
12742 	uint_t			nwords;
12743 	uint32_t		tgt_id;
12744 	int			*words;
12745 	dev_info_t		*cdip;
12746 	dev_info_t		*ndip;
12747 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12748 	struct fcp_port	*pptr = ptgt->tgt_port;
12749 	int			circular;
12750 
12751 	ndi_devi_enter(pdip, &circular);
12752 
12753 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12754 	while ((cdip = ndip) != NULL) {
12755 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12756 
12757 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12758 			continue;
12759 		}
12760 
12761 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12762 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12763 		    &nbytes) != DDI_PROP_SUCCESS) {
12764 			continue;
12765 		}
12766 
12767 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12768 			if (bytes != NULL) {
12769 				ddi_prop_free(bytes);
12770 			}
12771 			continue;
12772 		}
12773 		ASSERT(bytes != NULL);
12774 
12775 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12776 			ddi_prop_free(bytes);
12777 			continue;
12778 		}
12779 
12780 		ddi_prop_free(bytes);
12781 
12782 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12783 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12784 		    &nbytes) != DDI_PROP_SUCCESS) {
12785 			continue;
12786 		}
12787 
12788 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12789 			if (bytes != NULL) {
12790 				ddi_prop_free(bytes);
12791 			}
12792 			continue;
12793 		}
12794 		ASSERT(bytes != NULL);
12795 
12796 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12797 			ddi_prop_free(bytes);
12798 			continue;
12799 		}
12800 
12801 		ddi_prop_free(bytes);
12802 
12803 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12804 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12805 		    &nwords) != DDI_PROP_SUCCESS) {
12806 			continue;
12807 		}
12808 
12809 		if (nwords != 1 || words == NULL) {
12810 			if (words != NULL) {
12811 				ddi_prop_free(words);
12812 			}
12813 			continue;
12814 		}
12815 		ASSERT(words != NULL);
12816 
12817 		/*
12818 		 * If there is no hard address - We might have to deal with
12819 		 * that by using WWN - Having said that it is important to
12820 		 * recognize this problem early so ssd can be informed of
12821 		 * the right interconnect type.
12822 		 */
12823 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12824 		    ptgt->tgt_hard_addr != 0) {
12825 			tgt_id =
12826 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12827 		} else {
12828 			tgt_id = ptgt->tgt_d_id;
12829 		}
12830 
12831 		if (tgt_id != (uint32_t)*words) {
12832 			ddi_prop_free(words);
12833 			continue;
12834 		}
12835 		ddi_prop_free(words);
12836 
12837 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12838 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12839 		    &nwords) != DDI_PROP_SUCCESS) {
12840 			continue;
12841 		}
12842 
12843 		if (nwords != 1 || words == NULL) {
12844 			if (words != NULL) {
12845 				ddi_prop_free(words);
12846 			}
12847 			continue;
12848 		}
12849 		ASSERT(words != NULL);
12850 
12851 		if (plun->lun_num == (uint16_t)*words) {
12852 			ddi_prop_free(words);
12853 			break;
12854 		}
12855 		ddi_prop_free(words);
12856 	}
12857 	ndi_devi_exit(pdip, circular);
12858 
12859 	return (cdip);
12860 }
12861 
12862 
12863 static int
12864 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12865 {
12866 	dev_info_t	*pdip;
12867 	char		buf[MAXNAMELEN];
12868 	char		uaddr[MAXNAMELEN];
12869 	int		rval = FC_FAILURE;
12870 
12871 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12872 
12873 	pdip = plun->lun_tgt->tgt_port->port_dip;
12874 
12875 	/*
12876 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12877 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12878 	 * configured and then deleted on the device end (for T3/T4 case). In
12879 	 * such cases, pip will be NULL.
12880 	 *
12881 	 * If the device generates an RSCN, it will end up getting offlined when
12882 	 * it disappeared and a new LUN will get created when it is rediscovered
12883 	 * on the device. If we check for lun_cip here, the LUN will not end
12884 	 * up getting onlined since this function will end up returning a
12885 	 * FC_SUCCESS.
12886 	 *
12887 	 * The behavior is different on other devices. For instance, on a HDS,
12888 	 * there was no RSCN generated by the device but the next I/O generated
12889 	 * a check condition and rediscovery got triggered that way. So, in
12890 	 * such cases, this path will not be exercised
12891 	 */
12892 	if (pip == NULL) {
12893 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12894 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12895 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12896 		    "plun: %p lun state: %x num: %d target state: %x",
12897 		    plun, plun->lun_state, plun->lun_num,
12898 		    plun->lun_tgt->tgt_port->port_state);
12899 		return (rval);
12900 	}
12901 
12902 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12903 
12904 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12905 
12906 	if (plun->lun_old_guid) {
12907 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12908 			rval = FC_SUCCESS;
12909 		}
12910 	} else {
12911 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12912 			rval = FC_SUCCESS;
12913 		}
12914 	}
12915 	return (rval);
12916 }
12917 
12918 static mdi_pathinfo_t *
12919 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12920 {
12921 	char			buf[MAXNAMELEN];
12922 	char			uaddr[MAXNAMELEN];
12923 	mdi_pathinfo_t		*pip;
12924 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12925 	struct fcp_port	*pptr = ptgt->tgt_port;
12926 
12927 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12928 
12929 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12930 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12931 
12932 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12933 
12934 	return (pip);
12935 }
12936 
12937 
12938 static int
12939 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12940     int tcount, int flags, int *circ)
12941 {
12942 	int			rval;
12943 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
12944 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12945 	dev_info_t		*cdip = NULL;
12946 
12947 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12948 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12949 
12950 	if (plun->lun_cip == NULL) {
12951 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12952 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12953 		    "fcp_online_child: plun->lun_cip is NULL: "
12954 		    "plun: %p state: %x num: %d target state: %x",
12955 		    plun, plun->lun_state, plun->lun_num,
12956 		    plun->lun_tgt->tgt_port->port_state);
12957 		return (NDI_FAILURE);
12958 	}
12959 again:
12960 	if (plun->lun_mpxio == 0) {
12961 		cdip = DIP(cip);
12962 		mutex_exit(&plun->lun_mutex);
12963 		mutex_exit(&pptr->port_mutex);
12964 
12965 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12966 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12967 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
12968 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12969 
12970 		/*
12971 		 * We could check for FCP_LUN_INIT here but chances
12972 		 * of getting here when it's already in FCP_LUN_INIT
12973 		 * is rare and a duplicate ndi_devi_online wouldn't
12974 		 * hurt either (as the node would already have been
12975 		 * in CF2)
12976 		 */
12977 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
12978 			rval = ndi_devi_bind_driver(cdip, flags);
12979 		} else {
12980 			rval = ndi_devi_online(cdip, flags);
12981 		}
12982 		/*
12983 		 * We log the message into trace buffer if the device
12984 		 * is "ses" and into syslog for any other device
12985 		 * type. This is to prevent the ndi_devi_online failure
12986 		 * message that appears for V880/A5K ses devices.
12987 		 */
12988 		if (rval == NDI_SUCCESS) {
12989 			mutex_enter(&ptgt->tgt_mutex);
12990 			plun->lun_state |= FCP_LUN_INIT;
12991 			mutex_exit(&ptgt->tgt_mutex);
12992 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
12993 			fcp_log(CE_NOTE, pptr->port_dip,
12994 			    "!ndi_devi_online:"
12995 			    " failed for %s: target=%x lun=%x %x",
12996 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12997 			    plun->lun_num, rval);
12998 		} else {
12999 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13000 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13001 			    " !ndi_devi_online:"
13002 			    " failed for %s: target=%x lun=%x %x",
13003 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13004 			    plun->lun_num, rval);
13005 		}
13006 	} else {
13007 		cdip = mdi_pi_get_client(PIP(cip));
13008 		mutex_exit(&plun->lun_mutex);
13009 		mutex_exit(&pptr->port_mutex);
13010 
13011 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13012 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13013 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13014 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13015 
13016 		/*
13017 		 * Hold path and exit phci to avoid deadlock with power
13018 		 * management code during mdi_pi_online.
13019 		 */
13020 		mdi_hold_path(PIP(cip));
13021 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13022 
13023 		rval = mdi_pi_online(PIP(cip), flags);
13024 
13025 		mdi_devi_enter_phci(pptr->port_dip, circ);
13026 		mdi_rele_path(PIP(cip));
13027 
13028 		if (rval == MDI_SUCCESS) {
13029 			mutex_enter(&ptgt->tgt_mutex);
13030 			plun->lun_state |= FCP_LUN_INIT;
13031 			mutex_exit(&ptgt->tgt_mutex);
13032 
13033 			/*
13034 			 * Clear MPxIO path permanent disable in case
13035 			 * fcp hotplug dropped the offline event.
13036 			 */
13037 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13038 
13039 		} else if (rval == MDI_NOT_SUPPORTED) {
13040 			child_info_t	*old_cip = cip;
13041 
13042 			/*
13043 			 * MPxIO does not support this device yet.
13044 			 * Enumerate in legacy mode.
13045 			 */
13046 			mutex_enter(&pptr->port_mutex);
13047 			mutex_enter(&plun->lun_mutex);
13048 			plun->lun_mpxio = 0;
13049 			plun->lun_cip = NULL;
13050 			cdip = fcp_create_dip(plun, lcount, tcount);
13051 			plun->lun_cip = cip = CIP(cdip);
13052 			if (cip == NULL) {
13053 				fcp_log(CE_WARN, pptr->port_dip,
13054 				    "!fcp_online_child: "
13055 				    "Create devinfo failed for LU=%p", plun);
13056 				mutex_exit(&plun->lun_mutex);
13057 
13058 				mutex_enter(&ptgt->tgt_mutex);
13059 				plun->lun_state |= FCP_LUN_OFFLINE;
13060 				mutex_exit(&ptgt->tgt_mutex);
13061 
13062 				mutex_exit(&pptr->port_mutex);
13063 
13064 				/*
13065 				 * free the mdi_pathinfo node
13066 				 */
13067 				(void) mdi_pi_free(PIP(old_cip), 0);
13068 			} else {
13069 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13070 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13071 				    "fcp_online_child: creating devinfo "
13072 				    "node 0x%p for plun 0x%p",
13073 				    cip, plun);
13074 				mutex_exit(&plun->lun_mutex);
13075 				mutex_exit(&pptr->port_mutex);
13076 				/*
13077 				 * free the mdi_pathinfo node
13078 				 */
13079 				(void) mdi_pi_free(PIP(old_cip), 0);
13080 				mutex_enter(&pptr->port_mutex);
13081 				mutex_enter(&plun->lun_mutex);
13082 				goto again;
13083 			}
13084 		} else {
13085 			if (cdip) {
13086 				fcp_log(CE_NOTE, pptr->port_dip,
13087 				    "!fcp_online_child: mdi_pi_online:"
13088 				    " failed for %s: target=%x lun=%x %x",
13089 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13090 				    plun->lun_num, rval);
13091 			}
13092 		}
13093 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13094 	}
13095 
13096 	if (rval == NDI_SUCCESS) {
13097 		if (cdip) {
13098 			(void) ndi_event_retrieve_cookie(
13099 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13100 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13101 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13102 			    cdip, fcp_insert_eid, NULL);
13103 		}
13104 	}
13105 	mutex_enter(&pptr->port_mutex);
13106 	mutex_enter(&plun->lun_mutex);
13107 	return (rval);
13108 }
13109 
13110 /* ARGSUSED */
13111 static int
13112 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13113     int tcount, int flags, int *circ)
13114 {
13115 	int rval;
13116 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13117 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13118 	dev_info_t		*cdip;
13119 
13120 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13121 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13122 
13123 	if (plun->lun_cip == NULL) {
13124 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13125 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13126 		    "fcp_offline_child: plun->lun_cip is NULL: "
13127 		    "plun: %p lun state: %x num: %d target state: %x",
13128 		    plun, plun->lun_state, plun->lun_num,
13129 		    plun->lun_tgt->tgt_port->port_state);
13130 		return (NDI_FAILURE);
13131 	}
13132 
13133 	if (plun->lun_mpxio == 0) {
13134 		cdip = DIP(cip);
13135 		mutex_exit(&plun->lun_mutex);
13136 		mutex_exit(&pptr->port_mutex);
13137 		rval = ndi_devi_offline(DIP(cip), flags);
13138 		if (rval != NDI_SUCCESS) {
13139 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13140 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13141 			    "fcp_offline_child: ndi_devi_offline failed "
13142 			    "rval=%x cip=%p", rval, cip);
13143 		}
13144 	} else {
13145 		cdip = mdi_pi_get_client(PIP(cip));
13146 		mutex_exit(&plun->lun_mutex);
13147 		mutex_exit(&pptr->port_mutex);
13148 
13149 		/*
13150 		 * Exit phci to avoid deadlock with power management code
13151 		 * during mdi_pi_offline
13152 		 */
13153 		mdi_hold_path(PIP(cip));
13154 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13155 
13156 		rval = mdi_pi_offline(PIP(cip), flags);
13157 
13158 		mdi_devi_enter_phci(pptr->port_dip, circ);
13159 		mdi_rele_path(PIP(cip));
13160 
13161 		if (rval == MDI_SUCCESS) {
13162 			/*
13163 			 * Clear MPxIO path permanent disable as the path is
13164 			 * already offlined.
13165 			 */
13166 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13167 
13168 			if (flags & NDI_DEVI_REMOVE) {
13169 				(void) mdi_pi_free(PIP(cip), 0);
13170 			}
13171 		} else {
13172 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13173 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13174 			    "fcp_offline_child: mdi_pi_offline failed "
13175 			    "rval=%x cip=%p", rval, cip);
13176 		}
13177 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13178 	}
13179 
13180 	mutex_enter(&ptgt->tgt_mutex);
13181 	plun->lun_state &= ~FCP_LUN_INIT;
13182 	mutex_exit(&ptgt->tgt_mutex);
13183 
13184 	mutex_enter(&pptr->port_mutex);
13185 	mutex_enter(&plun->lun_mutex);
13186 
13187 	if (rval == NDI_SUCCESS) {
13188 		cdip = NULL;
13189 		if (flags & NDI_DEVI_REMOVE) {
13190 			/*
13191 			 * If the guid of the LUN changes, lun_cip will not
13192 			 * equal to cip, and after offlining the LUN with the
13193 			 * old guid, we should keep lun_cip since it's the cip
13194 			 * of the LUN with the new guid.
13195 			 * Otherwise remove our reference to child node.
13196 			 */
13197 			if (plun->lun_cip == cip) {
13198 				plun->lun_cip = NULL;
13199 			}
13200 			if (plun->lun_old_guid) {
13201 				kmem_free(plun->lun_old_guid,
13202 				    plun->lun_old_guid_size);
13203 				plun->lun_old_guid = NULL;
13204 				plun->lun_old_guid_size = 0;
13205 			}
13206 		}
13207 	}
13208 
13209 	if (cdip) {
13210 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13211 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13212 		    " target=%x lun=%x", "ndi_offline",
13213 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13214 	}
13215 
13216 	return (rval);
13217 }
13218 
13219 static void
13220 fcp_remove_child(struct fcp_lun *plun)
13221 {
13222 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13223 
13224 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13225 		if (plun->lun_mpxio == 0) {
13226 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13227 			(void) ndi_devi_free(DIP(plun->lun_cip));
13228 		} else {
13229 			mutex_exit(&plun->lun_mutex);
13230 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13231 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13232 			FCP_TRACE(fcp_logq,
13233 			    plun->lun_tgt->tgt_port->port_instbuf,
13234 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13235 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13236 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13237 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13238 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13239 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13240 			mutex_enter(&plun->lun_mutex);
13241 		}
13242 	}
13243 
13244 	plun->lun_cip = NULL;
13245 }
13246 
13247 /*
13248  * called when a timeout occurs
13249  *
13250  * can be scheduled during an attach or resume (if not already running)
13251  *
13252  * one timeout is set up for all ports
13253  *
13254  * acquires and releases the global mutex
13255  */
13256 /*ARGSUSED*/
13257 static void
13258 fcp_watch(void *arg)
13259 {
13260 	struct fcp_port	*pptr;
13261 	struct fcp_ipkt	*icmd;
13262 	struct fcp_ipkt	*nicmd;
13263 	struct fcp_pkt	*cmd;
13264 	struct fcp_pkt	*ncmd;
13265 	struct fcp_pkt	*tail;
13266 	struct fcp_pkt	*pcmd;
13267 	struct fcp_pkt	*save_head;
13268 	struct fcp_port	*save_port;
13269 
13270 	/* increment global watchdog time */
13271 	fcp_watchdog_time += fcp_watchdog_timeout;
13272 
13273 	mutex_enter(&fcp_global_mutex);
13274 
13275 	/* scan each port in our list */
13276 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13277 		save_port = fcp_port_head;
13278 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13279 		mutex_exit(&fcp_global_mutex);
13280 
13281 		mutex_enter(&pptr->port_mutex);
13282 		if (pptr->port_ipkt_list == NULL &&
13283 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13284 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13285 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13286 			mutex_exit(&pptr->port_mutex);
13287 			mutex_enter(&fcp_global_mutex);
13288 			goto end_of_watchdog;
13289 		}
13290 
13291 		/*
13292 		 * We check if a list of targets need to be offlined.
13293 		 */
13294 		if (pptr->port_offline_tgts) {
13295 			fcp_scan_offline_tgts(pptr);
13296 		}
13297 
13298 		/*
13299 		 * We check if a list of luns need to be offlined.
13300 		 */
13301 		if (pptr->port_offline_luns) {
13302 			fcp_scan_offline_luns(pptr);
13303 		}
13304 
13305 		/*
13306 		 * We check if a list of targets or luns need to be reset.
13307 		 */
13308 		if (pptr->port_reset_list) {
13309 			fcp_check_reset_delay(pptr);
13310 		}
13311 
13312 		mutex_exit(&pptr->port_mutex);
13313 
13314 		/*
13315 		 * This is where the pending commands (pkt) are checked for
13316 		 * timeout.
13317 		 */
13318 		mutex_enter(&pptr->port_pkt_mutex);
13319 		tail = pptr->port_pkt_tail;
13320 
13321 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13322 		    cmd != NULL; cmd = ncmd) {
13323 			ncmd = cmd->cmd_next;
13324 			/*
13325 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13326 			 * must be set.
13327 			 */
13328 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13329 			/*
13330 			 * FCP_INVALID_TIMEOUT will be set for those
13331 			 * command that need to be failed. Mostly those
13332 			 * cmds that could not be queued down for the
13333 			 * "timeout" value. cmd->cmd_timeout is used
13334 			 * to try and requeue the command regularly.
13335 			 */
13336 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13337 				/*
13338 				 * This command hasn't timed out yet.  Let's
13339 				 * go to the next one.
13340 				 */
13341 				pcmd = cmd;
13342 				goto end_of_loop;
13343 			}
13344 
13345 			if (cmd == pptr->port_pkt_head) {
13346 				ASSERT(pcmd == NULL);
13347 				pptr->port_pkt_head = cmd->cmd_next;
13348 			} else {
13349 				ASSERT(pcmd != NULL);
13350 				pcmd->cmd_next = cmd->cmd_next;
13351 			}
13352 
13353 			if (cmd == pptr->port_pkt_tail) {
13354 				ASSERT(cmd->cmd_next == NULL);
13355 				pptr->port_pkt_tail = pcmd;
13356 				if (pcmd) {
13357 					pcmd->cmd_next = NULL;
13358 				}
13359 			}
13360 			cmd->cmd_next = NULL;
13361 
13362 			/*
13363 			 * save the current head before dropping the
13364 			 * mutex - If the head doesn't remain the
13365 			 * same after re acquiring the mutex, just
13366 			 * bail out and revisit on next tick.
13367 			 *
13368 			 * PS: The tail pointer can change as the commands
13369 			 * get requeued after failure to retransport
13370 			 */
13371 			save_head = pptr->port_pkt_head;
13372 			mutex_exit(&pptr->port_pkt_mutex);
13373 
13374 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13375 			    FCP_INVALID_TIMEOUT) {
13376 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13377 				struct fcp_lun	*plun;
13378 				struct fcp_tgt	*ptgt;
13379 
13380 				plun = ADDR2LUN(&pkt->pkt_address);
13381 				ptgt = plun->lun_tgt;
13382 
13383 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13384 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13385 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13386 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13387 
13388 				cmd->cmd_state == FCP_PKT_ABORTING ?
13389 				    fcp_fail_cmd(cmd, CMD_RESET,
13390 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13391 				    CMD_TIMEOUT, STAT_ABORTED);
13392 			} else {
13393 				fcp_retransport_cmd(pptr, cmd);
13394 			}
13395 			mutex_enter(&pptr->port_pkt_mutex);
13396 			if (save_head && save_head != pptr->port_pkt_head) {
13397 				/*
13398 				 * Looks like linked list got changed (mostly
13399 				 * happens when an an OFFLINE LUN code starts
13400 				 * returning overflow queue commands in
13401 				 * parallel. So bail out and revisit during
13402 				 * next tick
13403 				 */
13404 				break;
13405 			}
13406 		end_of_loop:
13407 			/*
13408 			 * Scan only upto the previously known tail pointer
13409 			 * to avoid excessive processing - lots of new packets
13410 			 * could have been added to the tail or the old ones
13411 			 * re-queued.
13412 			 */
13413 			if (cmd == tail) {
13414 				break;
13415 			}
13416 		}
13417 		mutex_exit(&pptr->port_pkt_mutex);
13418 
13419 		mutex_enter(&pptr->port_mutex);
13420 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13421 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13422 
13423 			nicmd = icmd->ipkt_next;
13424 			if ((icmd->ipkt_restart != 0) &&
13425 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13426 				/* packet has not timed out */
13427 				continue;
13428 			}
13429 
13430 			/* time for packet re-transport */
13431 			if (icmd == pptr->port_ipkt_list) {
13432 				pptr->port_ipkt_list = icmd->ipkt_next;
13433 				if (pptr->port_ipkt_list) {
13434 					pptr->port_ipkt_list->ipkt_prev =
13435 					    NULL;
13436 				}
13437 			} else {
13438 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13439 				if (icmd->ipkt_next) {
13440 					icmd->ipkt_next->ipkt_prev =
13441 					    icmd->ipkt_prev;
13442 				}
13443 			}
13444 			icmd->ipkt_next = NULL;
13445 			icmd->ipkt_prev = NULL;
13446 			mutex_exit(&pptr->port_mutex);
13447 
13448 			if (fcp_is_retryable(icmd)) {
13449 				fc_ulp_rscn_info_t *rscnp =
13450 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13451 				    pkt_ulp_rscn_infop;
13452 
13453 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13454 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13455 				    "%x to D_ID=%x Retrying..",
13456 				    icmd->ipkt_opcode,
13457 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13458 
13459 				/*
13460 				 * Update the RSCN count in the packet
13461 				 * before resending.
13462 				 */
13463 
13464 				if (rscnp != NULL) {
13465 					rscnp->ulp_rscn_count =
13466 					    fc_ulp_get_rscn_count(pptr->
13467 					    port_fp_handle);
13468 				}
13469 
13470 				mutex_enter(&pptr->port_mutex);
13471 				mutex_enter(&ptgt->tgt_mutex);
13472 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13473 					mutex_exit(&ptgt->tgt_mutex);
13474 					mutex_exit(&pptr->port_mutex);
13475 					switch (icmd->ipkt_opcode) {
13476 						int rval;
13477 					case LA_ELS_PLOGI:
13478 						if ((rval = fc_ulp_login(
13479 						    pptr->port_fp_handle,
13480 						    &icmd->ipkt_fpkt, 1)) ==
13481 						    FC_SUCCESS) {
13482 							mutex_enter(
13483 							    &pptr->port_mutex);
13484 							continue;
13485 						}
13486 						if (fcp_handle_ipkt_errors(
13487 						    pptr, ptgt, icmd, rval,
13488 						    "PLOGI") == DDI_SUCCESS) {
13489 							mutex_enter(
13490 							    &pptr->port_mutex);
13491 							continue;
13492 						}
13493 						break;
13494 
13495 					case LA_ELS_PRLI:
13496 						if ((rval = fc_ulp_issue_els(
13497 						    pptr->port_fp_handle,
13498 						    icmd->ipkt_fpkt)) ==
13499 						    FC_SUCCESS) {
13500 							mutex_enter(
13501 							    &pptr->port_mutex);
13502 							continue;
13503 						}
13504 						if (fcp_handle_ipkt_errors(
13505 						    pptr, ptgt, icmd, rval,
13506 						    "PRLI") == DDI_SUCCESS) {
13507 							mutex_enter(
13508 							    &pptr->port_mutex);
13509 							continue;
13510 						}
13511 						break;
13512 
13513 					default:
13514 						if ((rval = fcp_transport(
13515 						    pptr->port_fp_handle,
13516 						    icmd->ipkt_fpkt, 1)) ==
13517 						    FC_SUCCESS) {
13518 							mutex_enter(
13519 							    &pptr->port_mutex);
13520 							continue;
13521 						}
13522 						if (fcp_handle_ipkt_errors(
13523 						    pptr, ptgt, icmd, rval,
13524 						    "PRLI") == DDI_SUCCESS) {
13525 							mutex_enter(
13526 							    &pptr->port_mutex);
13527 							continue;
13528 						}
13529 						break;
13530 					}
13531 				} else {
13532 					mutex_exit(&ptgt->tgt_mutex);
13533 					mutex_exit(&pptr->port_mutex);
13534 				}
13535 			} else {
13536 				fcp_print_error(icmd->ipkt_fpkt);
13537 			}
13538 
13539 			(void) fcp_call_finish_init(pptr, ptgt,
13540 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13541 			    icmd->ipkt_cause);
13542 			fcp_icmd_free(pptr, icmd);
13543 			mutex_enter(&pptr->port_mutex);
13544 		}
13545 
13546 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13547 		mutex_exit(&pptr->port_mutex);
13548 		mutex_enter(&fcp_global_mutex);
13549 
13550 	end_of_watchdog:
13551 		/*
13552 		 * Bail out early before getting into trouble
13553 		 */
13554 		if (save_port != fcp_port_head) {
13555 			break;
13556 		}
13557 	}
13558 
13559 	if (fcp_watchdog_init > 0) {
13560 		/* reschedule timeout to go again */
13561 		fcp_watchdog_id =
13562 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13563 	}
13564 	mutex_exit(&fcp_global_mutex);
13565 }
13566 
13567 
13568 static void
13569 fcp_check_reset_delay(struct fcp_port *pptr)
13570 {
13571 	uint32_t		tgt_cnt;
13572 	int			level;
13573 	struct fcp_tgt	*ptgt;
13574 	struct fcp_lun	*plun;
13575 	struct fcp_reset_elem *cur = NULL;
13576 	struct fcp_reset_elem *next = NULL;
13577 	struct fcp_reset_elem *prev = NULL;
13578 
13579 	ASSERT(mutex_owned(&pptr->port_mutex));
13580 
13581 	next = pptr->port_reset_list;
13582 	while ((cur = next) != NULL) {
13583 		next = cur->next;
13584 
13585 		if (cur->timeout < fcp_watchdog_time) {
13586 			prev = cur;
13587 			continue;
13588 		}
13589 
13590 		ptgt = cur->tgt;
13591 		plun = cur->lun;
13592 		tgt_cnt = cur->tgt_cnt;
13593 
13594 		if (ptgt) {
13595 			level = RESET_TARGET;
13596 		} else {
13597 			ASSERT(plun != NULL);
13598 			level = RESET_LUN;
13599 			ptgt = plun->lun_tgt;
13600 		}
13601 		if (prev) {
13602 			prev->next = next;
13603 		} else {
13604 			/*
13605 			 * Because we drop port mutex while doing aborts for
13606 			 * packets, we can't rely on reset_list pointing to
13607 			 * our head
13608 			 */
13609 			if (cur == pptr->port_reset_list) {
13610 				pptr->port_reset_list = next;
13611 			} else {
13612 				struct fcp_reset_elem *which;
13613 
13614 				which = pptr->port_reset_list;
13615 				while (which && which->next != cur) {
13616 					which = which->next;
13617 				}
13618 				ASSERT(which != NULL);
13619 
13620 				which->next = next;
13621 				prev = which;
13622 			}
13623 		}
13624 
13625 		kmem_free(cur, sizeof (*cur));
13626 
13627 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13628 			mutex_enter(&ptgt->tgt_mutex);
13629 			if (level == RESET_TARGET) {
13630 				fcp_update_tgt_state(ptgt,
13631 				    FCP_RESET, FCP_LUN_BUSY);
13632 			} else {
13633 				fcp_update_lun_state(plun,
13634 				    FCP_RESET, FCP_LUN_BUSY);
13635 			}
13636 			mutex_exit(&ptgt->tgt_mutex);
13637 
13638 			mutex_exit(&pptr->port_mutex);
13639 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13640 			mutex_enter(&pptr->port_mutex);
13641 		}
13642 	}
13643 }
13644 
13645 
13646 static void
13647 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13648     struct fcp_lun *rlun, int tgt_cnt)
13649 {
13650 	int			rval;
13651 	struct fcp_lun	*tlun, *nlun;
13652 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13653 	    *cmd = NULL, *head = NULL,
13654 	    *tail = NULL;
13655 
13656 	mutex_enter(&pptr->port_pkt_mutex);
13657 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13658 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13659 		struct fcp_tgt *ptgt = plun->lun_tgt;
13660 
13661 		ncmd = cmd->cmd_next;
13662 
13663 		if (ptgt != ttgt && plun != rlun) {
13664 			pcmd = cmd;
13665 			continue;
13666 		}
13667 
13668 		if (pcmd != NULL) {
13669 			ASSERT(pptr->port_pkt_head != cmd);
13670 			pcmd->cmd_next = ncmd;
13671 		} else {
13672 			ASSERT(cmd == pptr->port_pkt_head);
13673 			pptr->port_pkt_head = ncmd;
13674 		}
13675 		if (pptr->port_pkt_tail == cmd) {
13676 			ASSERT(cmd->cmd_next == NULL);
13677 			pptr->port_pkt_tail = pcmd;
13678 			if (pcmd != NULL) {
13679 				pcmd->cmd_next = NULL;
13680 			}
13681 		}
13682 
13683 		if (head == NULL) {
13684 			head = tail = cmd;
13685 		} else {
13686 			ASSERT(tail != NULL);
13687 			tail->cmd_next = cmd;
13688 			tail = cmd;
13689 		}
13690 		cmd->cmd_next = NULL;
13691 	}
13692 	mutex_exit(&pptr->port_pkt_mutex);
13693 
13694 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13695 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13696 
13697 		ncmd = cmd->cmd_next;
13698 		ASSERT(pkt != NULL);
13699 
13700 		mutex_enter(&pptr->port_mutex);
13701 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13702 			mutex_exit(&pptr->port_mutex);
13703 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13704 			pkt->pkt_reason = CMD_RESET;
13705 			pkt->pkt_statistics |= STAT_DEV_RESET;
13706 			cmd->cmd_state = FCP_PKT_IDLE;
13707 			fcp_post_callback(cmd);
13708 		} else {
13709 			mutex_exit(&pptr->port_mutex);
13710 		}
13711 	}
13712 
13713 	/*
13714 	 * If the FCA will return all the commands in its queue then our
13715 	 * work is easy, just return.
13716 	 */
13717 
13718 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13719 		return;
13720 	}
13721 
13722 	/*
13723 	 * For RESET_LUN get hold of target pointer
13724 	 */
13725 	if (ttgt == NULL) {
13726 		ASSERT(rlun != NULL);
13727 
13728 		ttgt = rlun->lun_tgt;
13729 
13730 		ASSERT(ttgt != NULL);
13731 	}
13732 
13733 	/*
13734 	 * There are some severe race conditions here.
13735 	 * While we are trying to abort the pkt, it might be completing
13736 	 * so mark it aborted and if the abort does not succeed then
13737 	 * handle it in the watch thread.
13738 	 */
13739 	mutex_enter(&ttgt->tgt_mutex);
13740 	nlun = ttgt->tgt_lun;
13741 	mutex_exit(&ttgt->tgt_mutex);
13742 	while ((tlun = nlun) != NULL) {
13743 		int restart = 0;
13744 		if (rlun && rlun != tlun) {
13745 			mutex_enter(&ttgt->tgt_mutex);
13746 			nlun = tlun->lun_next;
13747 			mutex_exit(&ttgt->tgt_mutex);
13748 			continue;
13749 		}
13750 		mutex_enter(&tlun->lun_mutex);
13751 		cmd = tlun->lun_pkt_head;
13752 		while (cmd != NULL) {
13753 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13754 				struct scsi_pkt *pkt;
13755 
13756 				restart = 1;
13757 				cmd->cmd_state = FCP_PKT_ABORTING;
13758 				mutex_exit(&tlun->lun_mutex);
13759 				rval = fc_ulp_abort(pptr->port_fp_handle,
13760 				    cmd->cmd_fp_pkt, KM_SLEEP);
13761 				if (rval == FC_SUCCESS) {
13762 					pkt = cmd->cmd_pkt;
13763 					pkt->pkt_reason = CMD_RESET;
13764 					pkt->pkt_statistics |= STAT_DEV_RESET;
13765 					cmd->cmd_state = FCP_PKT_IDLE;
13766 					fcp_post_callback(cmd);
13767 				} else {
13768 					caddr_t msg;
13769 
13770 					(void) fc_ulp_error(rval, &msg);
13771 
13772 					/*
13773 					 * This part is tricky. The abort
13774 					 * failed and now the command could
13775 					 * be completing.  The cmd_state ==
13776 					 * FCP_PKT_ABORTING should save
13777 					 * us in fcp_cmd_callback. If we
13778 					 * are already aborting ignore the
13779 					 * command in fcp_cmd_callback.
13780 					 * Here we leave this packet for 20
13781 					 * sec to be aborted in the
13782 					 * fcp_watch thread.
13783 					 */
13784 					fcp_log(CE_WARN, pptr->port_dip,
13785 					    "!Abort failed after reset %s",
13786 					    msg);
13787 
13788 					cmd->cmd_timeout =
13789 					    fcp_watchdog_time +
13790 					    cmd->cmd_pkt->pkt_time +
13791 					    FCP_FAILED_DELAY;
13792 
13793 					cmd->cmd_fp_pkt->pkt_timeout =
13794 					    FCP_INVALID_TIMEOUT;
13795 					/*
13796 					 * This is a hack, cmd is put in the
13797 					 * overflow queue so that it can be
13798 					 * timed out finally
13799 					 */
13800 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13801 
13802 					mutex_enter(&pptr->port_pkt_mutex);
13803 					if (pptr->port_pkt_head) {
13804 						ASSERT(pptr->port_pkt_tail
13805 						    != NULL);
13806 						pptr->port_pkt_tail->cmd_next
13807 						    = cmd;
13808 						pptr->port_pkt_tail = cmd;
13809 					} else {
13810 						ASSERT(pptr->port_pkt_tail
13811 						    == NULL);
13812 						pptr->port_pkt_head =
13813 						    pptr->port_pkt_tail
13814 						    = cmd;
13815 					}
13816 					cmd->cmd_next = NULL;
13817 					mutex_exit(&pptr->port_pkt_mutex);
13818 				}
13819 				mutex_enter(&tlun->lun_mutex);
13820 				cmd = tlun->lun_pkt_head;
13821 			} else {
13822 				cmd = cmd->cmd_forw;
13823 			}
13824 		}
13825 		mutex_exit(&tlun->lun_mutex);
13826 
13827 		mutex_enter(&ttgt->tgt_mutex);
13828 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13829 		mutex_exit(&ttgt->tgt_mutex);
13830 
13831 		mutex_enter(&pptr->port_mutex);
13832 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13833 			mutex_exit(&pptr->port_mutex);
13834 			return;
13835 		} else {
13836 			mutex_exit(&pptr->port_mutex);
13837 		}
13838 	}
13839 }
13840 
13841 
13842 /*
13843  * unlink the soft state, returning the soft state found (if any)
13844  *
13845  * acquires and releases the global mutex
13846  */
13847 struct fcp_port *
13848 fcp_soft_state_unlink(struct fcp_port *pptr)
13849 {
13850 	struct fcp_port	*hptr;		/* ptr index */
13851 	struct fcp_port	*tptr;		/* prev hptr */
13852 
13853 	mutex_enter(&fcp_global_mutex);
13854 	for (hptr = fcp_port_head, tptr = NULL;
13855 	    hptr != NULL;
13856 	    tptr = hptr, hptr = hptr->port_next) {
13857 		if (hptr == pptr) {
13858 			/* we found a match -- remove this item */
13859 			if (tptr == NULL) {
13860 				/* we're at the head of the list */
13861 				fcp_port_head = hptr->port_next;
13862 			} else {
13863 				tptr->port_next = hptr->port_next;
13864 			}
13865 			break;			/* success */
13866 		}
13867 	}
13868 	if (fcp_port_head == NULL) {
13869 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13870 	}
13871 	mutex_exit(&fcp_global_mutex);
13872 	return (hptr);
13873 }
13874 
13875 
13876 /*
13877  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13878  * WWN and a LUN number
13879  */
13880 /* ARGSUSED */
13881 static struct fcp_lun *
13882 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13883 {
13884 	int hash;
13885 	struct fcp_tgt *ptgt;
13886 	struct fcp_lun *plun;
13887 
13888 	ASSERT(mutex_owned(&pptr->port_mutex));
13889 
13890 	hash = FCP_HASH(wwn);
13891 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13892 	    ptgt = ptgt->tgt_next) {
13893 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13894 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13895 			mutex_enter(&ptgt->tgt_mutex);
13896 			for (plun = ptgt->tgt_lun;
13897 			    plun != NULL;
13898 			    plun = plun->lun_next) {
13899 				if (plun->lun_num == lun) {
13900 					mutex_exit(&ptgt->tgt_mutex);
13901 					return (plun);
13902 				}
13903 			}
13904 			mutex_exit(&ptgt->tgt_mutex);
13905 			return (NULL);
13906 		}
13907 	}
13908 	return (NULL);
13909 }
13910 
13911 /*
13912  *     Function: fcp_prepare_pkt
13913  *
13914  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13915  *		 for fcp_start(). It binds the data or partially maps it.
13916  *		 Builds the FCP header and starts the initialization of the
13917  *		 Fibre Channel header.
13918  *
13919  *     Argument: *pptr		FCP port.
13920  *		 *cmd		FCP packet.
13921  *		 *plun		LUN the command will be sent to.
13922  *
13923  *	Context: User, Kernel and Interrupt context.
13924  */
13925 static void
13926 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13927     struct fcp_lun *plun)
13928 {
13929 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13930 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13931 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
13932 
13933 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13934 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13935 
13936 	if (cmd->cmd_pkt->pkt_numcookies) {
13937 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13938 			fcmd->fcp_cntl.cntl_read_data = 1;
13939 			fcmd->fcp_cntl.cntl_write_data = 0;
13940 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
13941 		} else {
13942 			fcmd->fcp_cntl.cntl_read_data = 0;
13943 			fcmd->fcp_cntl.cntl_write_data = 1;
13944 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
13945 		}
13946 
13947 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
13948 
13949 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
13950 		ASSERT(fpkt->pkt_data_cookie_cnt <=
13951 		    pptr->port_data_dma_attr.dma_attr_sgllen);
13952 
13953 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
13954 
13955 		/* FCA needs pkt_datalen to be set */
13956 		fpkt->pkt_datalen = cmd->cmd_dmacount;
13957 		fcmd->fcp_data_len = cmd->cmd_dmacount;
13958 	} else {
13959 		fcmd->fcp_cntl.cntl_read_data = 0;
13960 		fcmd->fcp_cntl.cntl_write_data = 0;
13961 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
13962 		fpkt->pkt_datalen = 0;
13963 		fcmd->fcp_data_len = 0;
13964 	}
13965 
13966 	/* set up the Tagged Queuing type */
13967 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
13968 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
13969 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
13970 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
13971 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
13972 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
13973 	} else {
13974 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
13975 	}
13976 
13977 	fcmd->fcp_ent_addr = plun->lun_addr;
13978 
13979 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
13980 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
13981 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
13982 	} else {
13983 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
13984 	}
13985 
13986 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
13987 	cmd->cmd_pkt->pkt_state = 0;
13988 	cmd->cmd_pkt->pkt_statistics = 0;
13989 	cmd->cmd_pkt->pkt_resid = 0;
13990 
13991 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
13992 
13993 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
13994 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
13995 		fpkt->pkt_comp = NULL;
13996 	} else {
13997 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
13998 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
13999 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14000 		}
14001 		fpkt->pkt_comp = fcp_cmd_callback;
14002 	}
14003 
14004 	mutex_enter(&pptr->port_mutex);
14005 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14006 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14007 	}
14008 	mutex_exit(&pptr->port_mutex);
14009 
14010 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14011 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14012 
14013 	/*
14014 	 * Save a few kernel cycles here
14015 	 */
14016 #ifndef	__lock_lint
14017 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14018 #endif /* __lock_lint */
14019 }
14020 
14021 static void
14022 fcp_post_callback(struct fcp_pkt *cmd)
14023 {
14024 	if (cmd->cmd_pkt->pkt_comp) {
14025 		(*cmd->cmd_pkt->pkt_comp) (cmd->cmd_pkt);
14026 	}
14027 }
14028 
14029 
14030 /*
14031  * called to do polled I/O by fcp_start()
14032  *
14033  * return a transport status value, i.e. TRAN_ACCECPT for success
14034  */
14035 static int
14036 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14037 {
14038 	int	rval;
14039 
14040 #ifdef	DEBUG
14041 	mutex_enter(&pptr->port_pkt_mutex);
14042 	pptr->port_npkts++;
14043 	mutex_exit(&pptr->port_pkt_mutex);
14044 #endif /* DEBUG */
14045 
14046 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14047 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14048 	} else {
14049 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14050 	}
14051 
14052 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14053 
14054 	cmd->cmd_state = FCP_PKT_ISSUED;
14055 
14056 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14057 
14058 #ifdef	DEBUG
14059 	mutex_enter(&pptr->port_pkt_mutex);
14060 	pptr->port_npkts--;
14061 	mutex_exit(&pptr->port_pkt_mutex);
14062 #endif /* DEBUG */
14063 
14064 	cmd->cmd_state = FCP_PKT_IDLE;
14065 
14066 	switch (rval) {
14067 	case FC_SUCCESS:
14068 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14069 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14070 			rval = TRAN_ACCEPT;
14071 		} else {
14072 			rval = TRAN_FATAL_ERROR;
14073 		}
14074 		break;
14075 
14076 	case FC_TRAN_BUSY:
14077 		rval = TRAN_BUSY;
14078 		cmd->cmd_pkt->pkt_resid = 0;
14079 		break;
14080 
14081 	case FC_BADPACKET:
14082 		rval = TRAN_BADPKT;
14083 		break;
14084 
14085 	default:
14086 		rval = TRAN_FATAL_ERROR;
14087 		break;
14088 	}
14089 
14090 	return (rval);
14091 }
14092 
14093 
14094 /*
14095  * called by some of the following transport-called routines to convert
14096  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14097  */
14098 static struct fcp_port *
14099 fcp_dip2port(dev_info_t *dip)
14100 {
14101 	int	instance;
14102 
14103 	instance = ddi_get_instance(dip);
14104 	return (ddi_get_soft_state(fcp_softstate, instance));
14105 }
14106 
14107 
14108 /*
14109  * called internally to return a LUN given a dip
14110  */
14111 struct fcp_lun *
14112 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14113 {
14114 	struct fcp_tgt *ptgt;
14115 	struct fcp_lun *plun;
14116 	int i;
14117 
14118 
14119 	ASSERT(mutex_owned(&pptr->port_mutex));
14120 
14121 	for (i = 0; i < FCP_NUM_HASH; i++) {
14122 		for (ptgt = pptr->port_tgt_hash_table[i];
14123 		    ptgt != NULL;
14124 		    ptgt = ptgt->tgt_next) {
14125 			mutex_enter(&ptgt->tgt_mutex);
14126 			for (plun = ptgt->tgt_lun; plun != NULL;
14127 			    plun = plun->lun_next) {
14128 				mutex_enter(&plun->lun_mutex);
14129 				if (plun->lun_cip == cip) {
14130 					mutex_exit(&plun->lun_mutex);
14131 					mutex_exit(&ptgt->tgt_mutex);
14132 					return (plun); /* match found */
14133 				}
14134 				mutex_exit(&plun->lun_mutex);
14135 			}
14136 			mutex_exit(&ptgt->tgt_mutex);
14137 		}
14138 	}
14139 	return (NULL);				/* no LUN found */
14140 }
14141 
14142 /*
14143  * pass an element to the hotplug list, kick the hotplug thread
14144  * and wait for the element to get processed by the hotplug thread.
14145  * on return the element is freed.
14146  *
14147  * return zero success and non-zero on failure
14148  *
14149  * acquires/releases the target mutex
14150  *
14151  */
14152 static int
14153 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14154     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14155 {
14156 	struct fcp_hp_elem	*elem;
14157 	int			rval;
14158 
14159 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14160 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14161 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14162 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14163 		fcp_log(CE_CONT, pptr->port_dip,
14164 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14165 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14166 		return (NDI_FAILURE);
14167 	}
14168 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14169 	mutex_enter(&elem->mutex);
14170 	if (elem->wait) {
14171 		while (elem->wait) {
14172 			cv_wait(&elem->cv, &elem->mutex);
14173 		}
14174 	}
14175 	rval = (elem->result);
14176 	mutex_exit(&elem->mutex);
14177 	mutex_destroy(&elem->mutex);
14178 	cv_destroy(&elem->cv);
14179 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14180 	return (rval);
14181 }
14182 
14183 /*
14184  * pass an element to the hotplug list, and then
14185  * kick the hotplug thread
14186  *
14187  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14188  *
14189  * acquires/releases the hotplug mutex
14190  *
14191  * called with the target mutex owned
14192  *
14193  * memory acquired in NOSLEEP mode
14194  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14195  *	 for the hp daemon to process the request and is responsible for
14196  *	 freeing the element
14197  */
14198 static struct fcp_hp_elem *
14199 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14200     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14201 {
14202 	struct fcp_hp_elem	*elem;
14203 	dev_info_t *pdip;
14204 
14205 	ASSERT(pptr != NULL);
14206 	ASSERT(plun != NULL);
14207 	ASSERT(plun->lun_tgt != NULL);
14208 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14209 
14210 	/* create space for a hotplug element */
14211 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14212 	    == NULL) {
14213 		fcp_log(CE_WARN, NULL,
14214 		    "!can't allocate memory for hotplug element");
14215 		return (NULL);
14216 	}
14217 
14218 	/* fill in hotplug element */
14219 	elem->port = pptr;
14220 	elem->lun = plun;
14221 	elem->cip = cip;
14222 	elem->old_lun_mpxio = plun->lun_mpxio;
14223 	elem->what = what;
14224 	elem->flags = flags;
14225 	elem->link_cnt = link_cnt;
14226 	elem->tgt_cnt = tgt_cnt;
14227 	elem->wait = wait;
14228 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14229 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14230 
14231 	/* schedule the hotplug task */
14232 	pdip = pptr->port_dip;
14233 	mutex_enter(&plun->lun_mutex);
14234 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14235 		plun->lun_event_count++;
14236 		elem->event_cnt = plun->lun_event_count;
14237 	}
14238 	mutex_exit(&plun->lun_mutex);
14239 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14240 	    (void *)elem, KM_NOSLEEP) == NULL) {
14241 		mutex_enter(&plun->lun_mutex);
14242 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14243 			plun->lun_event_count--;
14244 		}
14245 		mutex_exit(&plun->lun_mutex);
14246 		kmem_free(elem, sizeof (*elem));
14247 		return (0);
14248 	}
14249 
14250 	return (elem);
14251 }
14252 
14253 
14254 static void
14255 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14256 {
14257 	int			rval;
14258 	struct scsi_address	*ap;
14259 	struct fcp_lun	*plun;
14260 	struct fcp_tgt	*ptgt;
14261 	fc_packet_t	*fpkt;
14262 
14263 	ap = &cmd->cmd_pkt->pkt_address;
14264 	plun = ADDR2LUN(ap);
14265 	ptgt = plun->lun_tgt;
14266 
14267 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14268 
14269 	cmd->cmd_state = FCP_PKT_IDLE;
14270 
14271 	mutex_enter(&pptr->port_mutex);
14272 	mutex_enter(&ptgt->tgt_mutex);
14273 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14274 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14275 		fc_ulp_rscn_info_t *rscnp;
14276 
14277 		cmd->cmd_state = FCP_PKT_ISSUED;
14278 
14279 		/*
14280 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14281 		 * originally NULL, hence we try to set it to the pd pointed
14282 		 * to by the SCSI device we're trying to get to.
14283 		 */
14284 
14285 		fpkt = cmd->cmd_fp_pkt;
14286 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14287 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14288 			/*
14289 			 * We need to notify the transport that we now have a
14290 			 * reference to the remote port handle.
14291 			 */
14292 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14293 		}
14294 
14295 		mutex_exit(&ptgt->tgt_mutex);
14296 		mutex_exit(&pptr->port_mutex);
14297 
14298 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14299 
14300 		/* prepare the packet */
14301 
14302 		fcp_prepare_pkt(pptr, cmd, plun);
14303 
14304 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14305 		    pkt_ulp_rscn_infop;
14306 
14307 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14308 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14309 
14310 		if (rscnp != NULL) {
14311 			rscnp->ulp_rscn_count =
14312 			    fc_ulp_get_rscn_count(pptr->
14313 			    port_fp_handle);
14314 		}
14315 
14316 		rval = fcp_transport(pptr->port_fp_handle,
14317 		    cmd->cmd_fp_pkt, 0);
14318 
14319 		if (rval == FC_SUCCESS) {
14320 			return;
14321 		}
14322 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14323 	} else {
14324 		mutex_exit(&ptgt->tgt_mutex);
14325 		mutex_exit(&pptr->port_mutex);
14326 	}
14327 
14328 	fcp_queue_pkt(pptr, cmd);
14329 }
14330 
14331 
14332 static void
14333 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14334 {
14335 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14336 
14337 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14338 	cmd->cmd_state = FCP_PKT_IDLE;
14339 
14340 	cmd->cmd_pkt->pkt_reason = reason;
14341 	cmd->cmd_pkt->pkt_state = 0;
14342 	cmd->cmd_pkt->pkt_statistics = statistics;
14343 
14344 	fcp_post_callback(cmd);
14345 }
14346 
14347 /*
14348  *     Function: fcp_queue_pkt
14349  *
14350  *  Description: This function queues the packet passed by the caller into
14351  *		 the list of packets of the FCP port.
14352  *
14353  *     Argument: *pptr		FCP port.
14354  *		 *cmd		FCP packet to queue.
14355  *
14356  * Return Value: None
14357  *
14358  *	Context: User, Kernel and Interrupt context.
14359  */
14360 static void
14361 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14362 {
14363 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14364 
14365 	mutex_enter(&pptr->port_pkt_mutex);
14366 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14367 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14368 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14369 
14370 	/*
14371 	 * zero pkt_time means hang around for ever
14372 	 */
14373 	if (cmd->cmd_pkt->pkt_time) {
14374 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14375 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14376 		} else {
14377 			/*
14378 			 * Indicate the watch thread to fail the
14379 			 * command by setting it to highest value
14380 			 */
14381 			cmd->cmd_timeout = fcp_watchdog_time;
14382 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14383 		}
14384 	}
14385 
14386 	if (pptr->port_pkt_head) {
14387 		ASSERT(pptr->port_pkt_tail != NULL);
14388 
14389 		pptr->port_pkt_tail->cmd_next = cmd;
14390 		pptr->port_pkt_tail = cmd;
14391 	} else {
14392 		ASSERT(pptr->port_pkt_tail == NULL);
14393 
14394 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14395 	}
14396 	cmd->cmd_next = NULL;
14397 	mutex_exit(&pptr->port_pkt_mutex);
14398 }
14399 
14400 /*
14401  *     Function: fcp_update_targets
14402  *
14403  *  Description: This function applies the specified change of state to all
14404  *		 the targets listed.  The operation applied is 'set'.
14405  *
14406  *     Argument: *pptr		FCP port.
14407  *		 *dev_list	Array of fc_portmap_t structures.
14408  *		 count		Length of dev_list.
14409  *		 state		State bits to update.
14410  *		 cause		Reason for the update.
14411  *
14412  * Return Value: None
14413  *
14414  *	Context: User, Kernel and Interrupt context.
14415  *		 The mutex pptr->port_mutex must be held.
14416  */
14417 static void
14418 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14419     uint32_t count, uint32_t state, int cause)
14420 {
14421 	fc_portmap_t		*map_entry;
14422 	struct fcp_tgt	*ptgt;
14423 
14424 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14425 
14426 	while (count--) {
14427 		map_entry = &(dev_list[count]);
14428 		ptgt = fcp_lookup_target(pptr,
14429 		    (uchar_t *)&(map_entry->map_pwwn));
14430 		if (ptgt == NULL) {
14431 			continue;
14432 		}
14433 
14434 		mutex_enter(&ptgt->tgt_mutex);
14435 		ptgt->tgt_trace = 0;
14436 		ptgt->tgt_change_cnt++;
14437 		ptgt->tgt_statec_cause = cause;
14438 		ptgt->tgt_tmp_cnt = 1;
14439 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14440 		mutex_exit(&ptgt->tgt_mutex);
14441 	}
14442 }
14443 
14444 static int
14445 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14446     int lcount, int tcount, int cause)
14447 {
14448 	int rval;
14449 
14450 	mutex_enter(&pptr->port_mutex);
14451 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14452 	mutex_exit(&pptr->port_mutex);
14453 
14454 	return (rval);
14455 }
14456 
14457 
14458 static int
14459 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14460     int lcount, int tcount, int cause)
14461 {
14462 	int	finish_init = 0;
14463 	int	finish_tgt = 0;
14464 	int	do_finish_init = 0;
14465 	int	rval = FCP_NO_CHANGE;
14466 
14467 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14468 	    cause == FCP_CAUSE_LINK_DOWN) {
14469 		do_finish_init = 1;
14470 	}
14471 
14472 	if (ptgt != NULL) {
14473 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14474 		    FCP_BUF_LEVEL_2, 0,
14475 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14476 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14477 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14478 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14479 		    ptgt->tgt_d_id, ptgt->tgt_done);
14480 
14481 		mutex_enter(&ptgt->tgt_mutex);
14482 
14483 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14484 			rval = FCP_DEV_CHANGE;
14485 			if (do_finish_init && ptgt->tgt_done == 0) {
14486 				ptgt->tgt_done++;
14487 				finish_init = 1;
14488 			}
14489 		} else {
14490 			if (--ptgt->tgt_tmp_cnt <= 0) {
14491 				ptgt->tgt_tmp_cnt = 0;
14492 				finish_tgt = 1;
14493 
14494 				if (do_finish_init) {
14495 					finish_init = 1;
14496 				}
14497 			}
14498 		}
14499 		mutex_exit(&ptgt->tgt_mutex);
14500 	} else {
14501 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14502 		    FCP_BUF_LEVEL_2, 0,
14503 		    "Call Finish Init for NO target");
14504 
14505 		if (do_finish_init) {
14506 			finish_init = 1;
14507 		}
14508 	}
14509 
14510 	if (finish_tgt) {
14511 		ASSERT(ptgt != NULL);
14512 
14513 		mutex_enter(&ptgt->tgt_mutex);
14514 #ifdef	DEBUG
14515 		bzero(ptgt->tgt_tmp_cnt_stack,
14516 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14517 
14518 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14519 		    FCP_STACK_DEPTH);
14520 #endif /* DEBUG */
14521 		mutex_exit(&ptgt->tgt_mutex);
14522 
14523 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14524 	}
14525 
14526 	if (finish_init && lcount == pptr->port_link_cnt) {
14527 		ASSERT(pptr->port_tmp_cnt > 0);
14528 		if (--pptr->port_tmp_cnt == 0) {
14529 			fcp_finish_init(pptr);
14530 		}
14531 	} else if (lcount != pptr->port_link_cnt) {
14532 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14533 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14534 		    "fcp_call_finish_init_held,1: state change occured"
14535 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14536 	}
14537 
14538 	return (rval);
14539 }
14540 
14541 
14542 static void
14543 fcp_reconfigure_luns(void * tgt_handle)
14544 {
14545 	uint32_t		dev_cnt;
14546 	fc_portmap_t		*devlist;
14547 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14548 	struct fcp_port		*pptr = ptgt->tgt_port;
14549 
14550 	/*
14551 	 * If the timer that fires this off got canceled too late, the
14552 	 * target could have been destroyed.
14553 	 */
14554 
14555 	if (ptgt->tgt_tid == NULL) {
14556 		return;
14557 	}
14558 
14559 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14560 	if (devlist == NULL) {
14561 		fcp_log(CE_WARN, pptr->port_dip,
14562 		    "!fcp%d: failed to allocate for portmap",
14563 		    pptr->port_instance);
14564 		return;
14565 	}
14566 
14567 	dev_cnt = 1;
14568 	devlist->map_pd = ptgt->tgt_pd_handle;
14569 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14570 	devlist->map_did.port_id = ptgt->tgt_d_id;
14571 
14572 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14573 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14574 
14575 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14576 	devlist->map_type = PORT_DEVICE_NEW;
14577 	devlist->map_flags = 0;
14578 
14579 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14580 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14581 
14582 	/*
14583 	 * Clear the tgt_tid after no more references to
14584 	 * the fcp_tgt
14585 	 */
14586 	mutex_enter(&ptgt->tgt_mutex);
14587 	ptgt->tgt_tid = NULL;
14588 	mutex_exit(&ptgt->tgt_mutex);
14589 
14590 	kmem_free(devlist, sizeof (*devlist));
14591 }
14592 
14593 
14594 static void
14595 fcp_free_targets(struct fcp_port *pptr)
14596 {
14597 	int			i;
14598 	struct fcp_tgt	*ptgt;
14599 
14600 	mutex_enter(&pptr->port_mutex);
14601 	for (i = 0; i < FCP_NUM_HASH; i++) {
14602 		ptgt = pptr->port_tgt_hash_table[i];
14603 		while (ptgt != NULL) {
14604 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14605 
14606 			fcp_free_target(ptgt);
14607 			ptgt = next_tgt;
14608 		}
14609 	}
14610 	mutex_exit(&pptr->port_mutex);
14611 }
14612 
14613 
14614 static void
14615 fcp_free_target(struct fcp_tgt *ptgt)
14616 {
14617 	struct fcp_lun	*plun;
14618 	timeout_id_t		tid;
14619 
14620 	mutex_enter(&ptgt->tgt_mutex);
14621 	tid = ptgt->tgt_tid;
14622 
14623 	/*
14624 	 * Cancel any pending timeouts for this target.
14625 	 */
14626 
14627 	if (tid != NULL) {
14628 		/*
14629 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14630 		 * If tgt_tid is NULL, the callback will simply return.
14631 		 */
14632 		ptgt->tgt_tid = NULL;
14633 		mutex_exit(&ptgt->tgt_mutex);
14634 		(void) untimeout(tid);
14635 		mutex_enter(&ptgt->tgt_mutex);
14636 	}
14637 
14638 	plun = ptgt->tgt_lun;
14639 	while (plun != NULL) {
14640 		struct fcp_lun *next_lun = plun->lun_next;
14641 
14642 		fcp_dealloc_lun(plun);
14643 		plun = next_lun;
14644 	}
14645 
14646 	mutex_exit(&ptgt->tgt_mutex);
14647 	fcp_dealloc_tgt(ptgt);
14648 }
14649 
14650 /*
14651  *     Function: fcp_is_retryable
14652  *
14653  *  Description: Indicates if the internal packet is retryable.
14654  *
14655  *     Argument: *icmd		FCP internal packet.
14656  *
14657  * Return Value: 0	Not retryable
14658  *		 1	Retryable
14659  *
14660  *	Context: User, Kernel and Interrupt context
14661  */
14662 static int
14663 fcp_is_retryable(struct fcp_ipkt *icmd)
14664 {
14665 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14666 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14667 		return (0);
14668 	}
14669 
14670 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14671 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14672 }
14673 
14674 /*
14675  *     Function: fcp_create_on_demand
14676  *
14677  *     Argument: *pptr		FCP port.
14678  *		 *pwwn		Port WWN.
14679  *
14680  * Return Value: 0	Success
14681  *		 EIO
14682  *		 ENOMEM
14683  *		 EBUSY
14684  *		 EINVAL
14685  *
14686  *	Context: User and Kernel context
14687  */
14688 static int
14689 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14690 {
14691 	int			wait_ms;
14692 	int			tcount;
14693 	int			lcount;
14694 	int			ret;
14695 	int			error;
14696 	int			rval = EIO;
14697 	int			ntries;
14698 	fc_portmap_t		*devlist;
14699 	opaque_t		pd;
14700 	struct fcp_lun		*plun;
14701 	struct fcp_tgt		*ptgt;
14702 	int			old_manual = 0;
14703 
14704 	/* Allocates the fc_portmap_t structure. */
14705 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14706 
14707 	/*
14708 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14709 	 * in the commented statement below:
14710 	 *
14711 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14712 	 *
14713 	 * Below, the deadline for the discovery process is set.
14714 	 */
14715 	mutex_enter(&pptr->port_mutex);
14716 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14717 	mutex_exit(&pptr->port_mutex);
14718 
14719 	/*
14720 	 * We try to find the remote port based on the WWN provided by the
14721 	 * caller.  We actually ask fp/fctl if it has it.
14722 	 */
14723 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14724 	    (la_wwn_t *)pwwn, &error, 1);
14725 
14726 	if (pd == NULL) {
14727 		kmem_free(devlist, sizeof (*devlist));
14728 		return (rval);
14729 	}
14730 
14731 	/*
14732 	 * The remote port was found.  We ask fp/fctl to update our
14733 	 * fc_portmap_t structure.
14734 	 */
14735 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14736 	    (la_wwn_t *)pwwn, devlist);
14737 	if (ret != FC_SUCCESS) {
14738 		kmem_free(devlist, sizeof (*devlist));
14739 		return (rval);
14740 	}
14741 
14742 	/*
14743 	 * The map flag field is set to indicates that the creation is being
14744 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14745 	 */
14746 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14747 
14748 	mutex_enter(&pptr->port_mutex);
14749 
14750 	/*
14751 	 * We check to see if fcp already has a target that describes the
14752 	 * device being created.  If not it is created.
14753 	 */
14754 	ptgt = fcp_lookup_target(pptr, pwwn);
14755 	if (ptgt == NULL) {
14756 		lcount = pptr->port_link_cnt;
14757 		mutex_exit(&pptr->port_mutex);
14758 
14759 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14760 		if (ptgt == NULL) {
14761 			fcp_log(CE_WARN, pptr->port_dip,
14762 			    "!FC target allocation failed");
14763 			return (ENOMEM);
14764 		}
14765 
14766 		mutex_enter(&pptr->port_mutex);
14767 	}
14768 
14769 	mutex_enter(&ptgt->tgt_mutex);
14770 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14771 	ptgt->tgt_tmp_cnt = 1;
14772 	ptgt->tgt_device_created = 0;
14773 	/*
14774 	 * If fabric and auto config is set but the target was
14775 	 * manually unconfigured then reset to the manual_config_only to
14776 	 * 0 so the device will get configured.
14777 	 */
14778 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14779 	    fcp_enable_auto_configuration &&
14780 	    ptgt->tgt_manual_config_only == 1) {
14781 		old_manual = 1;
14782 		ptgt->tgt_manual_config_only = 0;
14783 	}
14784 	mutex_exit(&ptgt->tgt_mutex);
14785 
14786 	fcp_update_targets(pptr, devlist, 1,
14787 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14788 
14789 	lcount = pptr->port_link_cnt;
14790 	tcount = ptgt->tgt_change_cnt;
14791 
14792 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14793 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14794 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14795 		    fcp_enable_auto_configuration && old_manual) {
14796 			mutex_enter(&ptgt->tgt_mutex);
14797 			ptgt->tgt_manual_config_only = 1;
14798 			mutex_exit(&ptgt->tgt_mutex);
14799 		}
14800 
14801 		if (pptr->port_link_cnt != lcount ||
14802 		    ptgt->tgt_change_cnt != tcount) {
14803 			rval = EBUSY;
14804 		}
14805 		mutex_exit(&pptr->port_mutex);
14806 
14807 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14808 		    FCP_BUF_LEVEL_3, 0,
14809 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14810 		    "lcount=%x::port_link_cnt=%x, "
14811 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14812 		    ptgt, lcount, pptr->port_link_cnt,
14813 		    tcount, ptgt->tgt_change_cnt, rval);
14814 		return (rval);
14815 	}
14816 
14817 	/*
14818 	 * Due to lack of synchronization mechanisms, we perform
14819 	 * periodic monitoring of our request; Because requests
14820 	 * get dropped when another one supercedes (either because
14821 	 * of a link change or a target change), it is difficult to
14822 	 * provide a clean synchronization mechanism (such as a
14823 	 * semaphore or a conditional variable) without exhaustively
14824 	 * rewriting the mainline discovery code of this driver.
14825 	 */
14826 	wait_ms = 500;
14827 
14828 	ntries = fcp_max_target_retries;
14829 
14830 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14831 	    FCP_BUF_LEVEL_3, 0,
14832 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14833 	    "lcount=%x::port_link_cnt=%x, "
14834 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14835 	    "tgt_tmp_cnt =%x",
14836 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14837 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14838 	    ptgt->tgt_tmp_cnt);
14839 
14840 	mutex_enter(&ptgt->tgt_mutex);
14841 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14842 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14843 		mutex_exit(&ptgt->tgt_mutex);
14844 		mutex_exit(&pptr->port_mutex);
14845 
14846 		delay(drv_usectohz(wait_ms * 1000));
14847 
14848 		mutex_enter(&pptr->port_mutex);
14849 		mutex_enter(&ptgt->tgt_mutex);
14850 	}
14851 
14852 
14853 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14854 		rval = EBUSY;
14855 	} else {
14856 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14857 		    FCP_TGT_NODE_PRESENT) {
14858 			rval = 0;
14859 		}
14860 	}
14861 
14862 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14863 	    FCP_BUF_LEVEL_3, 0,
14864 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14865 	    "lcount=%x::port_link_cnt=%x, "
14866 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14867 	    "tgt_tmp_cnt =%x",
14868 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14869 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14870 	    ptgt->tgt_tmp_cnt);
14871 
14872 	if (rval) {
14873 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14874 		    fcp_enable_auto_configuration && old_manual) {
14875 			ptgt->tgt_manual_config_only = 1;
14876 		}
14877 		mutex_exit(&ptgt->tgt_mutex);
14878 		mutex_exit(&pptr->port_mutex);
14879 		kmem_free(devlist, sizeof (*devlist));
14880 
14881 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14882 		    FCP_BUF_LEVEL_3, 0,
14883 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14884 		    "lcount=%x::port_link_cnt=%x, "
14885 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14886 		    "tgt_device_created=%x, tgt D_ID=%x",
14887 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14888 		    tcount, ptgt->tgt_change_cnt, rval,
14889 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14890 		return (rval);
14891 	}
14892 
14893 	if ((plun = ptgt->tgt_lun) != NULL) {
14894 		tcount = plun->lun_tgt->tgt_change_cnt;
14895 	} else {
14896 		rval = EINVAL;
14897 	}
14898 	lcount = pptr->port_link_cnt;
14899 
14900 	/*
14901 	 * Configuring the target with no LUNs will fail. We
14902 	 * should reset the node state so that it is not
14903 	 * automatically configured when the LUNs are added
14904 	 * to this target.
14905 	 */
14906 	if (ptgt->tgt_lun_cnt == 0) {
14907 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14908 	}
14909 	mutex_exit(&ptgt->tgt_mutex);
14910 	mutex_exit(&pptr->port_mutex);
14911 
14912 	while (plun) {
14913 		child_info_t	*cip;
14914 
14915 		mutex_enter(&plun->lun_mutex);
14916 		cip = plun->lun_cip;
14917 		mutex_exit(&plun->lun_mutex);
14918 
14919 		mutex_enter(&ptgt->tgt_mutex);
14920 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14921 			mutex_exit(&ptgt->tgt_mutex);
14922 
14923 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14924 			    FCP_ONLINE, lcount, tcount,
14925 			    NDI_ONLINE_ATTACH);
14926 			if (rval != NDI_SUCCESS) {
14927 				FCP_TRACE(fcp_logq,
14928 				    pptr->port_instbuf, fcp_trace,
14929 				    FCP_BUF_LEVEL_3, 0,
14930 				    "fcp_create_on_demand: "
14931 				    "pass_to_hp_and_wait failed "
14932 				    "rval=%x", rval);
14933 				rval = EIO;
14934 			} else {
14935 				mutex_enter(&LUN_TGT->tgt_mutex);
14936 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14937 				    FCP_LUN_BUSY);
14938 				mutex_exit(&LUN_TGT->tgt_mutex);
14939 			}
14940 			mutex_enter(&ptgt->tgt_mutex);
14941 		}
14942 
14943 		plun = plun->lun_next;
14944 		mutex_exit(&ptgt->tgt_mutex);
14945 	}
14946 
14947 	kmem_free(devlist, sizeof (*devlist));
14948 
14949 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14950 	    fcp_enable_auto_configuration && old_manual) {
14951 		mutex_enter(&ptgt->tgt_mutex);
14952 		/* if successful then set manual to 0 */
14953 		if (rval == 0) {
14954 			ptgt->tgt_manual_config_only = 0;
14955 		} else {
14956 			/* reset to 1 so the user has to do the config */
14957 			ptgt->tgt_manual_config_only = 1;
14958 		}
14959 		mutex_exit(&ptgt->tgt_mutex);
14960 	}
14961 
14962 	return (rval);
14963 }
14964 
14965 
14966 static void
14967 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
14968 {
14969 	int		count;
14970 	uchar_t		byte;
14971 
14972 	count = 0;
14973 	while (*string) {
14974 		byte = FCP_ATOB(*string); string++;
14975 		byte = byte << 4 | FCP_ATOB(*string); string++;
14976 		bytes[count++] = byte;
14977 
14978 		if (count >= byte_len) {
14979 			break;
14980 		}
14981 	}
14982 }
14983 
14984 static void
14985 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
14986 {
14987 	int		i;
14988 
14989 	for (i = 0; i < FC_WWN_SIZE; i++) {
14990 		(void) sprintf(string + (i * 2),
14991 		    "%02x", wwn[i]);
14992 	}
14993 
14994 }
14995 
14996 static void
14997 fcp_print_error(fc_packet_t *fpkt)
14998 {
14999 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15000 	    fpkt->pkt_ulp_private;
15001 	struct fcp_port	*pptr;
15002 	struct fcp_tgt	*ptgt;
15003 	struct fcp_lun	*plun;
15004 	caddr_t			buf;
15005 	int			scsi_cmd = 0;
15006 
15007 	ptgt = icmd->ipkt_tgt;
15008 	plun = icmd->ipkt_lun;
15009 	pptr = ptgt->tgt_port;
15010 
15011 	buf = kmem_zalloc(256, KM_NOSLEEP);
15012 	if (buf == NULL) {
15013 		return;
15014 	}
15015 
15016 	switch (icmd->ipkt_opcode) {
15017 	case SCMD_REPORT_LUN:
15018 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15019 		    " lun=0x%%x failed");
15020 		scsi_cmd++;
15021 		break;
15022 
15023 	case SCMD_INQUIRY_PAGE83:
15024 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15025 		    " lun=0x%%x failed");
15026 		scsi_cmd++;
15027 		break;
15028 
15029 	case SCMD_INQUIRY:
15030 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15031 		    " lun=0x%%x failed");
15032 		scsi_cmd++;
15033 		break;
15034 
15035 	case LA_ELS_PLOGI:
15036 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15037 		break;
15038 
15039 	case LA_ELS_PRLI:
15040 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15041 		break;
15042 	}
15043 
15044 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15045 		struct fcp_rsp		response, *rsp;
15046 		uchar_t			asc, ascq;
15047 		caddr_t			sense_key = NULL;
15048 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15049 
15050 		if (icmd->ipkt_nodma) {
15051 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15052 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15053 			    sizeof (struct fcp_rsp));
15054 		} else {
15055 			rsp = &response;
15056 			bep = &fcp_rsp_err;
15057 
15058 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15059 			    sizeof (struct fcp_rsp));
15060 
15061 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15062 			    bep, fpkt->pkt_resp_acc,
15063 			    sizeof (struct fcp_rsp_info));
15064 		}
15065 
15066 
15067 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15068 			(void) sprintf(buf + strlen(buf),
15069 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15070 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15071 			    " senselen=%%x. Giving up");
15072 
15073 			fcp_log(CE_WARN, pptr->port_dip, buf,
15074 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15075 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15076 			    rsp->fcp_u.fcp_status.reserved_1,
15077 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15078 
15079 			kmem_free(buf, 256);
15080 			return;
15081 		}
15082 
15083 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15084 		    bep->rsp_code != FCP_NO_FAILURE) {
15085 			(void) sprintf(buf + strlen(buf),
15086 			    " FCP Response code = 0x%x", bep->rsp_code);
15087 		}
15088 
15089 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15090 			struct scsi_extended_sense sense_info, *sense_ptr;
15091 
15092 			if (icmd->ipkt_nodma) {
15093 				sense_ptr = (struct scsi_extended_sense *)
15094 				    ((caddr_t)fpkt->pkt_resp +
15095 				    sizeof (struct fcp_rsp) +
15096 				    rsp->fcp_response_len);
15097 			} else {
15098 				sense_ptr = &sense_info;
15099 
15100 				FCP_CP_IN(fpkt->pkt_resp +
15101 				    sizeof (struct fcp_rsp) +
15102 				    rsp->fcp_response_len, &sense_info,
15103 				    fpkt->pkt_resp_acc,
15104 				    sizeof (struct scsi_extended_sense));
15105 			}
15106 
15107 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15108 			    NUM_IMPL_SENSE_KEYS) {
15109 				sense_key = sense_keys[sense_ptr->es_key];
15110 			} else {
15111 				sense_key = "Undefined";
15112 			}
15113 
15114 			asc = sense_ptr->es_add_code;
15115 			ascq = sense_ptr->es_qual_code;
15116 
15117 			(void) sprintf(buf + strlen(buf),
15118 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15119 			    " Giving up");
15120 
15121 			fcp_log(CE_WARN, pptr->port_dip, buf,
15122 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15123 			    asc, ascq);
15124 		} else {
15125 			(void) sprintf(buf + strlen(buf),
15126 			    " : SCSI status=%%x. Giving up");
15127 
15128 			fcp_log(CE_WARN, pptr->port_dip, buf,
15129 			    ptgt->tgt_d_id, plun->lun_num,
15130 			    rsp->fcp_u.fcp_status.scsi_status);
15131 		}
15132 	} else {
15133 		caddr_t state, reason, action, expln;
15134 
15135 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15136 		    &action, &expln);
15137 
15138 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15139 		    " Reason:%%s. Giving up");
15140 
15141 		if (scsi_cmd) {
15142 			fcp_log(CE_WARN, pptr->port_dip, buf,
15143 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15144 		} else {
15145 			fcp_log(CE_WARN, pptr->port_dip, buf,
15146 			    ptgt->tgt_d_id, state, reason);
15147 		}
15148 	}
15149 
15150 	kmem_free(buf, 256);
15151 }
15152 
15153 
15154 static int
15155 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15156     struct fcp_ipkt *icmd, int rval, caddr_t op)
15157 {
15158 	int	ret = DDI_FAILURE;
15159 	char	*error;
15160 
15161 	switch (rval) {
15162 	case FC_DEVICE_BUSY_NEW_RSCN:
15163 		/*
15164 		 * This means that there was a new RSCN that the transport
15165 		 * knows about (which the ULP *may* know about too) but the
15166 		 * pkt that was sent down was related to an older RSCN. So, we
15167 		 * are just going to reset the retry count and deadline and
15168 		 * continue to retry. The idea is that transport is currently
15169 		 * working on the new RSCN and will soon let the ULPs know
15170 		 * about it and when it does the existing logic will kick in
15171 		 * where it will change the tcount to indicate that something
15172 		 * changed on the target. So, rediscovery will start and there
15173 		 * will not be an infinite retry.
15174 		 *
15175 		 * For a full flow of how the RSCN info is transferred back and
15176 		 * forth, see fp.c
15177 		 */
15178 		icmd->ipkt_retries = 0;
15179 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15180 		    FCP_ICMD_DEADLINE;
15181 
15182 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15183 		    FCP_BUF_LEVEL_3, 0,
15184 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15185 		    rval, ptgt->tgt_d_id);
15186 		/* FALLTHROUGH */
15187 
15188 	case FC_STATEC_BUSY:
15189 	case FC_DEVICE_BUSY:
15190 	case FC_PBUSY:
15191 	case FC_FBUSY:
15192 	case FC_TRAN_BUSY:
15193 	case FC_OFFLINE:
15194 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15195 		    FCP_BUF_LEVEL_3, 0,
15196 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15197 		    rval, ptgt->tgt_d_id);
15198 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15199 		    fcp_is_retryable(icmd)) {
15200 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15201 			ret = DDI_SUCCESS;
15202 		}
15203 		break;
15204 
15205 	case FC_LOGINREQ:
15206 		/*
15207 		 * FC_LOGINREQ used to be handled just like all the cases
15208 		 * above. It has been changed to handled a PRLI that fails
15209 		 * with FC_LOGINREQ different than other ipkts that fail
15210 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15211 		 * a simple matter to turn it into a PLOGI instead, so that's
15212 		 * exactly what we do here.
15213 		 */
15214 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15215 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15216 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15217 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15218 		} else {
15219 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15220 			    FCP_BUF_LEVEL_3, 0,
15221 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15222 			    rval, ptgt->tgt_d_id);
15223 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15224 			    fcp_is_retryable(icmd)) {
15225 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15226 				ret = DDI_SUCCESS;
15227 			}
15228 		}
15229 		break;
15230 
15231 	default:
15232 		mutex_enter(&pptr->port_mutex);
15233 		mutex_enter(&ptgt->tgt_mutex);
15234 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15235 			mutex_exit(&ptgt->tgt_mutex);
15236 			mutex_exit(&pptr->port_mutex);
15237 
15238 			(void) fc_ulp_error(rval, &error);
15239 			fcp_log(CE_WARN, pptr->port_dip,
15240 			    "!Failed to send %s to D_ID=%x error=%s",
15241 			    op, ptgt->tgt_d_id, error);
15242 		} else {
15243 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15244 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15245 			    "fcp_handle_ipkt_errors,1: state change occured"
15246 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15247 			mutex_exit(&ptgt->tgt_mutex);
15248 			mutex_exit(&pptr->port_mutex);
15249 		}
15250 		break;
15251 	}
15252 
15253 	return (ret);
15254 }
15255 
15256 
15257 /*
15258  * Check of outstanding commands on any LUN for this target
15259  */
15260 static int
15261 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15262 {
15263 	struct	fcp_lun	*plun;
15264 	struct	fcp_pkt	*cmd;
15265 
15266 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15267 		mutex_enter(&plun->lun_mutex);
15268 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15269 		    cmd = cmd->cmd_forw) {
15270 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15271 				mutex_exit(&plun->lun_mutex);
15272 				return (FC_SUCCESS);
15273 			}
15274 		}
15275 		mutex_exit(&plun->lun_mutex);
15276 	}
15277 
15278 	return (FC_FAILURE);
15279 }
15280 
15281 static fc_portmap_t *
15282 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15283 {
15284 	int			i;
15285 	fc_portmap_t		*devlist;
15286 	fc_portmap_t		*devptr = NULL;
15287 	struct fcp_tgt	*ptgt;
15288 
15289 	mutex_enter(&pptr->port_mutex);
15290 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15291 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15292 		    ptgt = ptgt->tgt_next) {
15293 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15294 				++*dev_cnt;
15295 			}
15296 		}
15297 	}
15298 
15299 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15300 	    KM_NOSLEEP);
15301 	if (devlist == NULL) {
15302 		mutex_exit(&pptr->port_mutex);
15303 		fcp_log(CE_WARN, pptr->port_dip,
15304 		    "!fcp%d: failed to allocate for portmap for construct map",
15305 		    pptr->port_instance);
15306 		return (devptr);
15307 	}
15308 
15309 	for (i = 0; i < FCP_NUM_HASH; i++) {
15310 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15311 		    ptgt = ptgt->tgt_next) {
15312 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15313 				int ret;
15314 
15315 				ret = fc_ulp_pwwn_to_portmap(
15316 				    pptr->port_fp_handle,
15317 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15318 				    devlist);
15319 
15320 				if (ret == FC_SUCCESS) {
15321 					devlist++;
15322 					continue;
15323 				}
15324 
15325 				devlist->map_pd = NULL;
15326 				devlist->map_did.port_id = ptgt->tgt_d_id;
15327 				devlist->map_hard_addr.hard_addr =
15328 				    ptgt->tgt_hard_addr;
15329 
15330 				devlist->map_state = PORT_DEVICE_INVALID;
15331 				devlist->map_type = PORT_DEVICE_OLD;
15332 
15333 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15334 				    &devlist->map_nwwn, FC_WWN_SIZE);
15335 
15336 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15337 				    &devlist->map_pwwn, FC_WWN_SIZE);
15338 
15339 				devlist++;
15340 			}
15341 		}
15342 	}
15343 
15344 	mutex_exit(&pptr->port_mutex);
15345 
15346 	return (devptr);
15347 }
15348 /*
15349  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15350  */
15351 static void
15352 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15353 {
15354 	int i;
15355 	struct fcp_tgt	*ptgt;
15356 	struct fcp_lun	*plun;
15357 
15358 	for (i = 0; i < FCP_NUM_HASH; i++) {
15359 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15360 		    ptgt = ptgt->tgt_next) {
15361 			mutex_enter(&ptgt->tgt_mutex);
15362 			for (plun = ptgt->tgt_lun; plun != NULL;
15363 			    plun = plun->lun_next) {
15364 				if (plun->lun_mpxio &&
15365 				    plun->lun_state & FCP_LUN_BUSY) {
15366 					if (!fcp_pass_to_hp(pptr, plun,
15367 					    plun->lun_cip,
15368 					    FCP_MPXIO_PATH_SET_BUSY,
15369 					    pptr->port_link_cnt,
15370 					    ptgt->tgt_change_cnt, 0, 0)) {
15371 						FCP_TRACE(fcp_logq,
15372 						    pptr->port_instbuf,
15373 						    fcp_trace,
15374 						    FCP_BUF_LEVEL_2, 0,
15375 						    "path_verifybusy: "
15376 						    "disable lun %p failed!",
15377 						    plun);
15378 					}
15379 				}
15380 			}
15381 			mutex_exit(&ptgt->tgt_mutex);
15382 		}
15383 	}
15384 }
15385 
15386 static int
15387 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15388 {
15389 	dev_info_t		*cdip = NULL;
15390 	dev_info_t		*pdip = NULL;
15391 
15392 	ASSERT(plun);
15393 
15394 	mutex_enter(&plun->lun_mutex);
15395 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15396 		mutex_exit(&plun->lun_mutex);
15397 		return (NDI_FAILURE);
15398 	}
15399 	mutex_exit(&plun->lun_mutex);
15400 	cdip = mdi_pi_get_client(PIP(cip));
15401 	pdip = mdi_pi_get_phci(PIP(cip));
15402 
15403 	ASSERT(cdip != NULL);
15404 	ASSERT(pdip != NULL);
15405 
15406 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15407 		/* LUN ready for IO */
15408 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15409 	} else {
15410 		/* LUN busy to accept IO */
15411 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15412 	}
15413 	return (NDI_SUCCESS);
15414 }
15415 
15416 /*
15417  * Caller must free the returned string of MAXPATHLEN len
15418  * If the device is offline (-1 instance number) NULL
15419  * will be returned.
15420  */
15421 static char *
15422 fcp_get_lun_path(struct fcp_lun *plun) {
15423 	dev_info_t	*dip = NULL;
15424 	char	*path = NULL;
15425 	if (plun == NULL) {
15426 		return (NULL);
15427 	}
15428 	if (plun->lun_mpxio == 0) {
15429 		dip = DIP(plun->lun_cip);
15430 	} else {
15431 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15432 	}
15433 	if (dip == NULL) {
15434 		return (NULL);
15435 	}
15436 	if (ddi_get_instance(dip) < 0) {
15437 		return (NULL);
15438 	}
15439 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15440 	if (path == NULL) {
15441 		return (NULL);
15442 	}
15443 
15444 	(void) ddi_pathname(dip, path);
15445 	/*
15446 	 * In reality, the user wants a fully valid path (one they can open)
15447 	 * but this string is lacking the mount point, and the minor node.
15448 	 * It would be nice if we could "figure these out" somehow
15449 	 * and fill them in.  Otherwise, the userland code has to understand
15450 	 * driver specific details of which minor node is the "best" or
15451 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15452 	 * which tape doesn't rewind)
15453 	 */
15454 	return (path);
15455 }
15456 
15457 static int
15458 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15459     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15460 {
15461 	int64_t reset_delay;
15462 	int rval, retry = 0;
15463 	struct fcp_port *pptr = fcp_dip2port(parent);
15464 
15465 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15466 	    (lbolt64 - pptr->port_attach_time);
15467 	if (reset_delay < 0) {
15468 		reset_delay = 0;
15469 	}
15470 
15471 	if (fcp_bus_config_debug) {
15472 		flag |= NDI_DEVI_DEBUG;
15473 	}
15474 
15475 	switch (op) {
15476 	case BUS_CONFIG_ONE:
15477 		/*
15478 		 * Retry the command since we need to ensure
15479 		 * the fabric devices are available for root
15480 		 */
15481 		while (retry++ < fcp_max_bus_config_retries) {
15482 			rval =	(ndi_busop_bus_config(parent,
15483 			    flag | NDI_MDI_FALLBACK, op,
15484 			    arg, childp, (clock_t)reset_delay));
15485 			if (rval == 0) {
15486 				return (rval);
15487 			}
15488 		}
15489 
15490 		/*
15491 		 * drain taskq to make sure nodes are created and then
15492 		 * try again.
15493 		 */
15494 		taskq_wait(DEVI(parent)->devi_taskq);
15495 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15496 		    op, arg, childp, 0));
15497 
15498 	case BUS_CONFIG_DRIVER:
15499 	case BUS_CONFIG_ALL: {
15500 		/*
15501 		 * delay till all devices report in (port_tmp_cnt == 0)
15502 		 * or FCP_INIT_WAIT_TIMEOUT
15503 		 */
15504 		mutex_enter(&pptr->port_mutex);
15505 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15506 			(void) cv_timedwait(&pptr->port_config_cv,
15507 			    &pptr->port_mutex,
15508 			    ddi_get_lbolt() + (clock_t)reset_delay);
15509 			reset_delay =
15510 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15511 			    (lbolt64 - pptr->port_attach_time);
15512 		}
15513 		mutex_exit(&pptr->port_mutex);
15514 		/* drain taskq to make sure nodes are created */
15515 		taskq_wait(DEVI(parent)->devi_taskq);
15516 		return (ndi_busop_bus_config(parent, flag, op,
15517 		    arg, childp, 0));
15518 	}
15519 
15520 	default:
15521 		return (NDI_FAILURE);
15522 	}
15523 	/*NOTREACHED*/
15524 }
15525 
15526 static int
15527 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15528     ddi_bus_config_op_t op, void *arg)
15529 {
15530 	if (fcp_bus_config_debug) {
15531 		flag |= NDI_DEVI_DEBUG;
15532 	}
15533 
15534 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15535 }
15536 
15537 
15538 /*
15539  * Routine to copy GUID into the lun structure.
15540  * returns 0 if copy was successful and 1 if encountered a
15541  * failure and did not copy the guid.
15542  */
15543 static int
15544 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15545 {
15546 
15547 	int retval = 0;
15548 
15549 	/* add one for the null terminator */
15550 	const unsigned int len = strlen(guidp) + 1;
15551 
15552 	if ((guidp == NULL) || (plun == NULL)) {
15553 		return (1);
15554 	}
15555 
15556 	/*
15557 	 * if the plun->lun_guid already has been allocated,
15558 	 * then check the size. if the size is exact, reuse
15559 	 * it....if not free it an allocate the required size.
15560 	 * The reallocation should NOT typically happen
15561 	 * unless the GUIDs reported changes between passes.
15562 	 * We free up and alloc again even if the
15563 	 * size was more than required. This is due to the
15564 	 * fact that the field lun_guid_size - serves
15565 	 * dual role of indicating the size of the wwn
15566 	 * size and ALSO the allocation size.
15567 	 */
15568 	if (plun->lun_guid) {
15569 		if (plun->lun_guid_size != len) {
15570 			/*
15571 			 * free the allocated memory and
15572 			 * initialize the field
15573 			 * lun_guid_size to 0.
15574 			 */
15575 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15576 			plun->lun_guid = NULL;
15577 			plun->lun_guid_size = 0;
15578 		}
15579 	}
15580 	/*
15581 	 * alloc only if not already done.
15582 	 */
15583 	if (plun->lun_guid == NULL) {
15584 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15585 		if (plun->lun_guid == NULL) {
15586 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15587 			    "Unable to allocate"
15588 			    "Memory for GUID!!! size %d", len);
15589 			retval = 1;
15590 		} else {
15591 			plun->lun_guid_size = len;
15592 		}
15593 	}
15594 	if (plun->lun_guid) {
15595 		/*
15596 		 * now copy the GUID
15597 		 */
15598 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15599 	}
15600 	return (retval);
15601 }
15602 
15603 /*
15604  * fcp_reconfig_wait
15605  *
15606  * Wait for a rediscovery/reconfiguration to complete before continuing.
15607  */
15608 
15609 static void
15610 fcp_reconfig_wait(struct fcp_port *pptr)
15611 {
15612 	clock_t		reconfig_start, wait_timeout;
15613 
15614 	/*
15615 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15616 	 * reconfiguration in progress.
15617 	 */
15618 
15619 	mutex_enter(&pptr->port_mutex);
15620 	if (pptr->port_tmp_cnt == 0) {
15621 		mutex_exit(&pptr->port_mutex);
15622 		return;
15623 	}
15624 	mutex_exit(&pptr->port_mutex);
15625 
15626 	/*
15627 	 * If we cause a reconfig by raising power, delay until all devices
15628 	 * report in (port_tmp_cnt returns to 0)
15629 	 */
15630 
15631 	reconfig_start = ddi_get_lbolt();
15632 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15633 
15634 	mutex_enter(&pptr->port_mutex);
15635 
15636 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15637 	    pptr->port_tmp_cnt) {
15638 
15639 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15640 		    reconfig_start + wait_timeout);
15641 	}
15642 
15643 	mutex_exit(&pptr->port_mutex);
15644 
15645 	/*
15646 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15647 	 * we want may still be ok.  If not, it will error out later
15648 	 */
15649 }
15650 
15651 /*
15652  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15653  * We rely on the fcp_global_mutex to provide protection against changes to
15654  * the fcp_lun_blacklist.
15655  *
15656  * You can describe a list of target port WWNs and LUN numbers which will
15657  * not be configured. LUN numbers will be interpreted as decimal. White
15658  * spaces and ',' can be used in the list of LUN numbers.
15659  *
15660  * To prevent LUNs 1 and 2 from being configured for target
15661  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15662  *
15663  * pwwn-lun-blacklist=
15664  * "510000f010fd92a1,1,2",
15665  * "510000e012079df1,1,2";
15666  */
15667 static void
15668 fcp_read_blacklist(dev_info_t *dip,
15669     struct fcp_black_list_entry **pplun_blacklist) {
15670 	char **prop_array	= NULL;
15671 	char *curr_pwwn		= NULL;
15672 	char *curr_lun		= NULL;
15673 	uint32_t prop_item	= 0;
15674 	int idx			= 0;
15675 	int len			= 0;
15676 
15677 	ASSERT(mutex_owned(&fcp_global_mutex));
15678 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15679 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15680 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15681 		return;
15682 	}
15683 
15684 	for (idx = 0; idx < prop_item; idx++) {
15685 
15686 		curr_pwwn = prop_array[idx];
15687 		while (*curr_pwwn == ' ') {
15688 			curr_pwwn++;
15689 		}
15690 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15691 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15692 			    ", please check.", curr_pwwn);
15693 			continue;
15694 		}
15695 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15696 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15697 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15698 			    ", please check.", curr_pwwn);
15699 			continue;
15700 		}
15701 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15702 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15703 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15704 				    "blacklist, please check.", curr_pwwn);
15705 				break;
15706 			}
15707 		}
15708 		if (len != sizeof (la_wwn_t) * 2) {
15709 			continue;
15710 		}
15711 
15712 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15713 		*(curr_lun - 1) = '\0';
15714 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15715 	}
15716 
15717 	ddi_prop_free(prop_array);
15718 }
15719 
15720 /*
15721  * Get the masking info about one remote target port designated by wwn.
15722  * Lun ids could be separated by ',' or white spaces.
15723  */
15724 static void
15725 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15726     struct fcp_black_list_entry **pplun_blacklist) {
15727 	int		idx			= 0;
15728 	uint32_t	offset			= 0;
15729 	unsigned long	lun_id			= 0;
15730 	char		lunid_buf[16];
15731 	char		*pend			= NULL;
15732 	int		illegal_digit		= 0;
15733 
15734 	while (offset < strlen(curr_lun)) {
15735 		while ((curr_lun[offset + idx] != ',') &&
15736 		    (curr_lun[offset + idx] != '\0') &&
15737 		    (curr_lun[offset + idx] != ' ')) {
15738 			if (isdigit(curr_lun[offset + idx]) == 0) {
15739 				illegal_digit++;
15740 			}
15741 			idx++;
15742 		}
15743 		if (illegal_digit > 0) {
15744 			offset += (idx+1);	/* To the start of next lun */
15745 			idx = 0;
15746 			illegal_digit = 0;
15747 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15748 			    "the blacklist, please check digits.",
15749 			    curr_lun, curr_pwwn);
15750 			continue;
15751 		}
15752 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15753 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15754 			    "the blacklist, please check the length of LUN#.",
15755 			    curr_lun, curr_pwwn);
15756 			break;
15757 		}
15758 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15759 		    offset++;
15760 		    continue;
15761 		}
15762 
15763 		bcopy(curr_lun + offset, lunid_buf, idx);
15764 		lunid_buf[idx] = '\0';
15765 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15766 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15767 		} else {
15768 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15769 			    "the blacklist, please check %s.",
15770 			    curr_lun, curr_pwwn, lunid_buf);
15771 		}
15772 		offset += (idx+1);	/* To the start of next lun */
15773 		idx = 0;
15774 	}
15775 }
15776 
15777 /*
15778  * Add one masking record
15779  */
15780 static void
15781 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15782     struct fcp_black_list_entry **pplun_blacklist) {
15783 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15784 	struct fcp_black_list_entry	*new_entry	= NULL;
15785 	la_wwn_t			wwn;
15786 
15787 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15788 	while (tmp_entry) {
15789 		if ((bcmp(&tmp_entry->wwn, &wwn,
15790 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15791 			return;
15792 		}
15793 
15794 		tmp_entry = tmp_entry->next;
15795 	}
15796 
15797 	/* add to black list */
15798 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15799 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15800 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15801 	new_entry->lun = lun_id;
15802 	new_entry->masked = 0;
15803 	new_entry->next = *pplun_blacklist;
15804 	*pplun_blacklist = new_entry;
15805 }
15806 
15807 /*
15808  * Check if we should mask the specified lun of this fcp_tgt
15809  */
15810 static int
15811 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15812 	struct fcp_black_list_entry *remote_port;
15813 
15814 	remote_port = fcp_lun_blacklist;
15815 	while (remote_port != NULL) {
15816 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15817 			if (remote_port->lun == lun_id) {
15818 				remote_port->masked++;
15819 				if (remote_port->masked == 1) {
15820 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15821 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
15822 					    "is masked due to black listing.\n",
15823 					    lun_id, wwn->raw_wwn[0],
15824 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
15825 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
15826 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
15827 					    wwn->raw_wwn[7]);
15828 				}
15829 				return (TRUE);
15830 			}
15831 		}
15832 		remote_port = remote_port->next;
15833 	}
15834 	return (FALSE);
15835 }
15836 
15837 /*
15838  * Release all allocated resources
15839  */
15840 static void
15841 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15842 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15843 	struct fcp_black_list_entry	*current_entry	= NULL;
15844 
15845 	ASSERT(mutex_owned(&fcp_global_mutex));
15846 	/*
15847 	 * Traverse all luns
15848 	 */
15849 	while (tmp_entry) {
15850 		current_entry = tmp_entry;
15851 		tmp_entry = tmp_entry->next;
15852 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15853 	}
15854 	*pplun_blacklist = NULL;
15855 }
15856