xref: /titanic_50/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 2654012f83cec5dc15b61dfe3e4a4915f186e7a6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.  In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback   |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices   |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags   |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback    |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|    fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /   \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \   /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |  fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |		        |			  |
171  *		 |		        |			  |
172  *		 |		        |			  |
173  *		 v		        v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi  |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY     |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |  fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *		              |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |  fcp_finish_tgt  |			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |   not marked and tgt_node_state   |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /   \_ _ _ _ _ _ _ _		  |
204  *	       |	    \   /		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |  fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.  It	|     |  fcp_pass_to_hp   |	  |
214  * | will be unqueued by the    |     |			  |	  |
215  * | watchdog timer.		|     | Called for each   |	  |
216  * +----------------------------+     | LUN. Dispatches   |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun     |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\  Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI. 	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.  The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 extern struct mod_ops 	mod_driverops;
628 /*
629  * This variable is defined in modctl.c and set to '1' after the root driver
630  * and fs are loaded.  It serves as an indication that the root filesystem can
631  * be used.
632  */
633 extern int 		modrootloaded;
634 /*
635  * This table contains strings associated with the SCSI sense key codes.  It
636  * is used by FCP to print a clear explanation of the code returned in the
637  * sense information by a device.
638  */
639 extern char 		*sense_keys[];
640 /*
641  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
642  * under this device that the paths to a physical device are created when
643  * MPxIO is used.
644  */
645 extern dev_info_t	*scsi_vhci_dip;
646 
647 /*
648  * Report lun processing
649  */
650 #define	FCP_LUN_ADDRESSING		0x80
651 #define	FCP_PD_ADDRESSING		0x00
652 #define	FCP_VOLUME_ADDRESSING		0x40
653 
654 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
655 #define	MAX_INT_DMA			0x7fffffff
656 #define	FCP_MAX_SENSE_LEN		252
657 #define	FCP_MAX_RESPONSE_LEN		0xffffff
658 /*
659  * Property definitions
660  */
661 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
662 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
663 #define	TARGET_PROP	(char *)fcp_target_prop
664 #define	LUN_PROP	(char *)fcp_lun_prop
665 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
666 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
667 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
668 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
669 #define	INIT_PORT_PROP  (char *)fcp_init_port_prop
670 #define	TGT_PORT_PROP   (char *)fcp_tgt_port_prop
671 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
672 /*
673  * Short hand macros.
674  */
675 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
676 #define	LUN_TGT		(plun->lun_tgt)
677 
678 /*
679  * Driver private macros
680  */
681 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :\
682 			((x) >= 'a' && (x) <= 'f') ?\
683 			((x) - 'a' + 10) : ((x) - 'A' + 10))
684 
685 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
686 
687 #define	FCP_N_NDI_EVENTS \
688 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
689 
690 #define	FCP_LINK_STATE_CHANGED(p, c)\
691 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
692 
693 #define	FCP_TGT_STATE_CHANGED(t, c)\
694 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
695 
696 #define	FCP_STATE_CHANGED(p, t, c)\
697 	(FCP_TGT_STATE_CHANGED(t, c))
698 
699 #define	FCP_MUST_RETRY(fpkt)\
700 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||\
701 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||\
702 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||\
703 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||\
704 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||\
705 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||\
706 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||\
707 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
708 
709 #define	FCP_SENSE_REPORTLUN_CHANGED(es)\
710 	((es)->es_key == KEY_UNIT_ATTENTION &&\
711 	(es)->es_add_code == 0x3f &&\
712 	(es)->es_qual_code == 0x0e)
713 
714 #define	FCP_SENSE_NO_LUN(es)\
715 	((es)->es_key == KEY_ILLEGAL_REQUEST &&\
716 	(es)->es_add_code == 0x25 &&\
717 	(es)->es_qual_code == 0x0)
718 
719 #define	FCP_VERSION		"1.185"
720 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
721 
722 #define	FCP_NUM_ELEMENTS(array)\
723 		(sizeof (array) / sizeof ((array)[0]))
724 
725 /*
726  * Debugging, Error reporting, and tracing
727  */
728 #define	FCP_LOG_SIZE		1024 * 1024
729 
730 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
731 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
732 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
733 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
734 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
735 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
736 #define	FCP_LEVEL_7		0x00040
737 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
738 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
739 
740 
741 
742 /*
743  * Log contents to system messages file
744  */
745 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
746 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
754 
755 
756 /*
757  * Log contents to trace buffer
758  */
759 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
760 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
768 
769 
770 /*
771  * Log contents to both system messages file and trace buffer
772  */
773 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |\
774 				FC_TRACE_LOG_MSG)
775 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |\
776 				FC_TRACE_LOG_MSG)
777 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |\
778 				FC_TRACE_LOG_MSG)
779 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |\
780 				FC_TRACE_LOG_MSG)
781 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |\
782 				FC_TRACE_LOG_MSG)
783 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |\
784 				FC_TRACE_LOG_MSG)
785 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |\
786 				FC_TRACE_LOG_MSG)
787 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |\
790 				FC_TRACE_LOG_MSG)
791 #ifdef DEBUG
792 #define	FCP_DTRACE	fc_trace_debug
793 #else
794 #define	FCP_DTRACE
795 #endif
796 
797 #define	FCP_TRACE	fc_trace_debug
798 
799 static struct cb_ops fcp_cb_ops = {
800 	fcp_open,			/* open */
801 	fcp_close,			/* close */
802 	nodev,				/* strategy */
803 	nodev,				/* print */
804 	nodev,				/* dump */
805 	nodev,				/* read */
806 	nodev,				/* write */
807 	fcp_ioctl,			/* ioctl */
808 	nodev,				/* devmap */
809 	nodev,				/* mmap */
810 	nodev,				/* segmap */
811 	nochpoll,			/* chpoll */
812 	ddi_prop_op,			/* cb_prop_op */
813 	0,				/* streamtab */
814 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
815 	CB_REV,				/* rev */
816 	nodev,				/* aread */
817 	nodev				/* awrite */
818 };
819 
820 
821 static struct dev_ops fcp_ops = {
822 	DEVO_REV,
823 	0,
824 	ddi_getinfo_1to1,
825 	nulldev,		/* identify */
826 	nulldev,		/* probe */
827 	fcp_attach,		/* attach and detach are mandatory */
828 	fcp_detach,
829 	nodev,			/* reset */
830 	&fcp_cb_ops,		/* cb_ops */
831 	NULL,			/* bus_ops */
832 	NULL,			/* power */
833 };
834 
835 
836 char *fcp_version = FCP_NAME_VERSION;
837 
838 static struct modldrv modldrv = {
839 	&mod_driverops,
840 	FCP_NAME_VERSION,
841 	&fcp_ops
842 };
843 
844 
845 static struct modlinkage modlinkage = {
846 	MODREV_1,
847 	&modldrv,
848 	NULL
849 };
850 
851 
852 static fc_ulp_modinfo_t fcp_modinfo = {
853 	&fcp_modinfo,			/* ulp_handle */
854 	FCTL_ULP_MODREV_4,		/* ulp_rev */
855 	FC4_SCSI_FCP,			/* ulp_type */
856 	"fcp",				/* ulp_name */
857 	FCP_STATEC_MASK,		/* ulp_statec_mask */
858 	fcp_port_attach,		/* ulp_port_attach */
859 	fcp_port_detach,		/* ulp_port_detach */
860 	fcp_port_ioctl,			/* ulp_port_ioctl */
861 	fcp_els_callback,		/* ulp_els_callback */
862 	fcp_data_callback,		/* ulp_data_callback */
863 	fcp_statec_callback		/* ulp_statec_callback */
864 };
865 
866 #ifdef	DEBUG
867 #define	FCP_TRACE_DEFAULT 	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |\
868 				FCP_LEVEL_2 | FCP_LEVEL_3 |\
869 				FCP_LEVEL_4 | FCP_LEVEL_5 |\
870 				FCP_LEVEL_6 | FCP_LEVEL_7)
871 #else
872 #define	FCP_TRACE_DEFAULT 	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |\
873 				FCP_LEVEL_2 | FCP_LEVEL_3 |\
874 				FCP_LEVEL_4 | FCP_LEVEL_5 |\
875 				FCP_LEVEL_6 | FCP_LEVEL_7)
876 #endif
877 
878 /* FCP global variables */
879 int			fcp_bus_config_debug = 0;
880 static int		fcp_log_size = FCP_LOG_SIZE;
881 static int		fcp_trace = FCP_TRACE_DEFAULT;
882 static fc_trace_logq_t	*fcp_logq = NULL;
883 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
884 /*
885  * The auto-configuration is set by default.  The only way of disabling it is
886  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
887  */
888 static int		fcp_enable_auto_configuration = 1;
889 static int		fcp_max_bus_config_retries	= 4;
890 static int		fcp_lun_ready_retry = 300;
891 /*
892  * The value assigned to the following variable has changed several times due
893  * to a problem with the data underruns reporting of some firmware(s).  The
894  * current value of 50 gives a timeout value of 25 seconds for a max number
895  * of 256 LUNs.
896  */
897 static int		fcp_max_target_retries = 50;
898 /*
899  * Watchdog variables
900  * ------------------
901  *
902  * fcp_watchdog_init
903  *
904  *	Indicates if the watchdog timer is running or not.  This is actually
905  *	a counter of the number of Fibre Channel ports that attached.  When
906  *	the first port attaches the watchdog is started.  When the last port
907  *	detaches the watchdog timer is stopped.
908  *
909  * fcp_watchdog_time
910  *
911  *	This is the watchdog clock counter.  It is incremented by
912  *	fcp_watchdog_time each time the watchdog timer expires.
913  *
914  * fcp_watchdog_timeout
915  *
916  *	Increment value of the variable fcp_watchdog_time as well as the
917  *	the timeout value of the watchdog timer.  The unit is 1 second.  It
918  *	is strange that this is not a #define but a variable since the code
919  *	never changes this value.  The reason why it can be said that the
920  *	unit is 1 second is because the number of ticks for the watchdog
921  *	timer is determined like this:
922  *
923  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
924  *				  drv_usectohz(1000000);
925  *
926  *	The value 1000000 is hard coded in the code.
927  *
928  * fcp_watchdog_tick
929  *
930  *	Watchdog timer value in ticks.
931  */
932 static int		fcp_watchdog_init = 0;
933 static int		fcp_watchdog_time = 0;
934 static int		fcp_watchdog_timeout = 1;
935 static int		fcp_watchdog_tick;
936 
937 /*
938  * fcp_offline_delay is a global variable to enable customisation of
939  * the timeout on link offlines or RSCNs. The default value is set
940  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
941  * specified in FCP4 Chapter 11 (see www.t10.org).
942  *
943  * The variable fcp_offline_delay is specified in SECONDS.
944  *
945  * If we made this a static var then the user would not be able to
946  * change it. This variable is set in fcp_attach().
947  */
948 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
949 
950 static void		*fcp_softstate = NULL; /* for soft state */
951 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
952 static kmutex_t		fcp_global_mutex;
953 static kmutex_t		fcp_ioctl_mutex;
954 static dev_info_t	*fcp_global_dip = NULL;
955 static timeout_id_t	fcp_watchdog_id;
956 const char		*fcp_lun_prop = "lun";
957 const char		*fcp_sam_lun_prop = "sam-lun";
958 const char		*fcp_target_prop = "target";
959 /*
960  * NOTE: consumers of "node-wwn" property include stmsboot in ON
961  * consolidation.
962  */
963 const char		*fcp_node_wwn_prop = "node-wwn";
964 const char		*fcp_port_wwn_prop = "port-wwn";
965 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
966 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
967 const char		*fcp_manual_config_only = "manual_configuration_only";
968 const char		*fcp_init_port_prop = "initiator-port";
969 const char		*fcp_tgt_port_prop = "target-port";
970 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
971 
972 static struct fcp_port 	*fcp_port_head = NULL;
973 static ddi_eventcookie_t	fcp_insert_eid;
974 static ddi_eventcookie_t	fcp_remove_eid;
975 
976 static ndi_event_definition_t   fcp_ndi_event_defs[] = {
977 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
978 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
979 };
980 
981 /*
982  * List of valid commands for the scsi_ioctl call
983  */
984 static uint8_t scsi_ioctl_list[] = {
985 	SCMD_INQUIRY,
986 	SCMD_REPORT_LUN,
987 	SCMD_READ_CAPACITY
988 };
989 
990 /*
991  * this is used to dummy up a report lun response for cases
992  * where the target doesn't support it
993  */
994 static uchar_t fcp_dummy_lun[] = {
995 	0x00,		/* MSB length (length = no of luns * 8) */
996 	0x00,
997 	0x00,
998 	0x08,		/* LSB length */
999 	0x00,		/* MSB reserved */
1000 	0x00,
1001 	0x00,
1002 	0x00,		/* LSB reserved */
1003 	FCP_PD_ADDRESSING,
1004 	0x00,		/* LUN is ZERO at the first level */
1005 	0x00,
1006 	0x00,		/* second level is zero */
1007 	0x00,
1008 	0x00,		/* third level is zero */
1009 	0x00,
1010 	0x00		/* fourth level is zero */
1011 };
1012 
1013 static uchar_t fcp_alpa_to_switch[] = {
1014 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1015 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1017 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1018 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1019 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1020 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1021 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1022 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1023 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1024 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1025 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1026 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1027 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1030 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1031 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1032 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1033 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1034 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1035 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1036 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1037 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 };
1039 
1040 static caddr_t pid = "SESS01          ";
1041 
1042 #if	!defined(lint)
1043 
1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1045     fcp_port::fcp_next fcp_watchdog_id))
1046 
1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1048 
1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1050     fcp_insert_eid
1051     fcp_remove_eid
1052     fcp_watchdog_time))
1053 
1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1055     fcp_cb_ops
1056     fcp_ops
1057     callb_cpr))
1058 
1059 #endif /* lint */
1060 
1061 /*
1062  * This table is used to determine whether or not it's safe to copy in
1063  * the target node name for a lun.  Since all luns behind the same target
1064  * have the same wwnn, only tagets that do not support multiple luns are
1065  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1066  */
1067 
1068 char *fcp_symmetric_disk_table[] = {
1069 	"SEAGATE ST",
1070 	"IBM     DDYFT",
1071 	"SUNW    SUNWGS",	/* Daktari enclosure */
1072 	"SUN     SENA",		/* SES device */
1073 	"SUN     SESS01"	/* VICOM SVE box */
1074 };
1075 
1076 int fcp_symmetric_disk_table_size =
1077     sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1078 
1079 /*
1080  * The _init(9e) return value should be that of mod_install(9f). Under
1081  * some circumstances, a failure may not be related mod_install(9f) and
1082  * one would then require a return value to indicate the failure. Looking
1083  * at mod_install(9f), it is expected to return 0 for success and non-zero
1084  * for failure. mod_install(9f) for device drivers, further goes down the
1085  * calling chain and ends up in ddi_installdrv(), whose return values are
1086  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1087  * calling chain of mod_install(9f) which return values like EINVAL and
1088  * in some even return -1.
1089  *
1090  * To work around the vagaries of the mod_install() calling chain, return
1091  * either 0 or ENODEV depending on the success or failure of mod_install()
1092  */
1093 int
1094 _init(void)
1095 {
1096 	int rval;
1097 
1098 	/*
1099 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1100 	 * before registering with the transport first.
1101 	 */
1102 	if (ddi_soft_state_init(&fcp_softstate,
1103 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1104 		return (EINVAL);
1105 	}
1106 
1107 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1108 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1109 
1110 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1111 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1112 		mutex_destroy(&fcp_global_mutex);
1113 		mutex_destroy(&fcp_ioctl_mutex);
1114 		ddi_soft_state_fini(&fcp_softstate);
1115 		return (ENODEV);
1116 	}
1117 
1118 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1119 
1120 	if ((rval = mod_install(&modlinkage)) != 0) {
1121 		fc_trace_free_logq(fcp_logq);
1122 		(void) fc_ulp_remove(&fcp_modinfo);
1123 		mutex_destroy(&fcp_global_mutex);
1124 		mutex_destroy(&fcp_ioctl_mutex);
1125 		ddi_soft_state_fini(&fcp_softstate);
1126 		rval = ENODEV;
1127 	}
1128 
1129 	return (rval);
1130 }
1131 
1132 
1133 /*
1134  * the system is done with us as a driver, so clean up
1135  */
1136 int
1137 _fini(void)
1138 {
1139 	int rval;
1140 
1141 	/*
1142 	 * don't start cleaning up until we know that the module remove
1143 	 * has worked  -- if this works, then we know that each instance
1144 	 * has successfully been DDI_DETACHed
1145 	 */
1146 	if ((rval = mod_remove(&modlinkage)) != 0) {
1147 		return (rval);
1148 	}
1149 
1150 	(void) fc_ulp_remove(&fcp_modinfo);
1151 
1152 	ddi_soft_state_fini(&fcp_softstate);
1153 	mutex_destroy(&fcp_global_mutex);
1154 	mutex_destroy(&fcp_ioctl_mutex);
1155 	fc_trace_free_logq(fcp_logq);
1156 
1157 	return (rval);
1158 }
1159 
1160 
1161 int
1162 _info(struct modinfo *modinfop)
1163 {
1164 	return (mod_info(&modlinkage, modinfop));
1165 }
1166 
1167 
1168 /*
1169  * attach the module
1170  */
1171 static int
1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1173 {
1174 	int rval = DDI_SUCCESS;
1175 
1176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1177 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1178 
1179 	if (cmd == DDI_ATTACH) {
1180 		/* The FCP pseudo device is created here. */
1181 		mutex_enter(&fcp_global_mutex);
1182 		fcp_global_dip = devi;
1183 		mutex_exit(&fcp_global_mutex);
1184 
1185 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1186 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1187 			ddi_report_dev(fcp_global_dip);
1188 		} else {
1189 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1190 			mutex_enter(&fcp_global_mutex);
1191 			fcp_global_dip = NULL;
1192 			mutex_exit(&fcp_global_mutex);
1193 
1194 			rval = DDI_FAILURE;
1195 		}
1196 		/*
1197 		 * We check the fcp_offline_delay property at this
1198 		 * point. This variable is global for the driver,
1199 		 * not specific to an instance.
1200 		 *
1201 		 * We do not recommend setting the value to less
1202 		 * than 10 seconds (RA_TOV_els), or greater than
1203 		 * 60 seconds.
1204 		 */
1205 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1206 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1207 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1208 		if ((fcp_offline_delay < 10) ||
1209 		    (fcp_offline_delay > 60)) {
1210 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1211 			    "to %d second(s). This is outside the "
1212 			    "recommended range of 10..60 seconds.",
1213 			    fcp_offline_delay);
1214 		}
1215 	}
1216 
1217 	return (rval);
1218 }
1219 
1220 
1221 /*ARGSUSED*/
1222 static int
1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1224 {
1225 	int	res = DDI_SUCCESS;
1226 
1227 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1228 	    FCP_BUF_LEVEL_8, 0,  "module detach: cmd=0x%x", cmd);
1229 
1230 	if (cmd == DDI_DETACH) {
1231 		/*
1232 		 * Check if there are active ports/threads. If there
1233 		 * are any, we will fail, else we will succeed (there
1234 		 * should not be much to clean up)
1235 		 */
1236 		mutex_enter(&fcp_global_mutex);
1237 		FCP_DTRACE(fcp_logq, "fcp",
1238 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1239 		    (void *) fcp_port_head);
1240 
1241 		if (fcp_port_head == NULL) {
1242 			ddi_remove_minor_node(fcp_global_dip, NULL);
1243 			fcp_global_dip = NULL;
1244 			mutex_exit(&fcp_global_mutex);
1245 		} else {
1246 			mutex_exit(&fcp_global_mutex);
1247 			res = DDI_FAILURE;
1248 		}
1249 	}
1250 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1251 	    FCP_BUF_LEVEL_8, 0,  "module detach returning %d", res);
1252 
1253 	return (res);
1254 }
1255 
1256 
1257 /* ARGSUSED */
1258 static int
1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1260 {
1261 	if (otype != OTYP_CHR) {
1262 		return (EINVAL);
1263 	}
1264 
1265 	/*
1266 	 * Allow only root to talk;
1267 	 */
1268 	if (drv_priv(credp)) {
1269 		return (EPERM);
1270 	}
1271 
1272 	mutex_enter(&fcp_global_mutex);
1273 	if (fcp_oflag & FCP_EXCL) {
1274 		mutex_exit(&fcp_global_mutex);
1275 		return (EBUSY);
1276 	}
1277 
1278 	if (flag & FEXCL) {
1279 		if (fcp_oflag & FCP_OPEN) {
1280 			mutex_exit(&fcp_global_mutex);
1281 			return (EBUSY);
1282 		}
1283 		fcp_oflag |= FCP_EXCL;
1284 	}
1285 	fcp_oflag |= FCP_OPEN;
1286 	mutex_exit(&fcp_global_mutex);
1287 
1288 	return (0);
1289 }
1290 
1291 
1292 /* ARGSUSED */
1293 static int
1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1295 {
1296 	if (otype != OTYP_CHR) {
1297 		return (EINVAL);
1298 	}
1299 
1300 	mutex_enter(&fcp_global_mutex);
1301 	if (!(fcp_oflag & FCP_OPEN)) {
1302 		mutex_exit(&fcp_global_mutex);
1303 		return (ENODEV);
1304 	}
1305 	fcp_oflag = FCP_IDLE;
1306 	mutex_exit(&fcp_global_mutex);
1307 
1308 	return (0);
1309 }
1310 
1311 
1312 /*
1313  * fcp_ioctl
1314  * 	Entry point for the FCP ioctls
1315  *
1316  * Input:
1317  *	See ioctl(9E)
1318  *
1319  * Output:
1320  *      See ioctl(9E)
1321  *
1322  * Returns:
1323  *      See ioctl(9E)
1324  *
1325  * Context:
1326  *      Kernel context.
1327  */
1328 /* ARGSUSED */
1329 static int
1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1331     int *rval)
1332 {
1333 	int			ret = 0;
1334 
1335 	mutex_enter(&fcp_global_mutex);
1336 	if (!(fcp_oflag & FCP_OPEN)) {
1337 		mutex_exit(&fcp_global_mutex);
1338 		return (ENXIO);
1339 	}
1340 	mutex_exit(&fcp_global_mutex);
1341 
1342 	switch (cmd) {
1343 	case FCP_TGT_INQUIRY:
1344 	case FCP_TGT_CREATE:
1345 	case FCP_TGT_DELETE:
1346 		ret = fcp_setup_device_data_ioctl(cmd,
1347 				(struct fcp_ioctl *)data, mode, rval);
1348 		break;
1349 
1350 	case FCP_TGT_SEND_SCSI:
1351 		mutex_enter(&fcp_ioctl_mutex);
1352 		ret = fcp_setup_scsi_ioctl(
1353 				(struct fcp_scsi_cmd *)data, mode, rval);
1354 		mutex_exit(&fcp_ioctl_mutex);
1355 		break;
1356 
1357 	case FCP_STATE_COUNT:
1358 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1359 							mode, rval);
1360 		break;
1361 	case FCP_GET_TARGET_MAPPINGS:
1362 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1363 							mode, rval);
1364 		break;
1365 	default:
1366 		fcp_log(CE_WARN, NULL,
1367 		    "!Invalid ioctl opcode = 0x%x", cmd);
1368 		ret	= EINVAL;
1369 	}
1370 
1371 	return (ret);
1372 }
1373 
1374 
1375 /*
1376  * fcp_setup_device_data_ioctl
1377  * 	Setup handler for the "device data" style of
1378  *	ioctl for FCP.  See "fcp_util.h" for data structure
1379  *	definition.
1380  *
1381  * Input:
1382  *	cmd	= FCP ioctl command
1383  *	data	= ioctl data
1384  *	mode	= See ioctl(9E)
1385  *
1386  * Output:
1387  *      data	= ioctl data
1388  *	rval	= return value - see ioctl(9E)
1389  *
1390  * Returns:
1391  *      See ioctl(9E)
1392  *
1393  * Context:
1394  *      Kernel context.
1395  */
1396 /* ARGSUSED */
1397 static int
1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1399     int *rval)
1400 {
1401 	struct fcp_port	*pptr;
1402 	struct 	device_data 	*dev_data;
1403 	uint32_t		link_cnt;
1404 	la_wwn_t		*wwn_ptr = NULL;
1405 	struct fcp_tgt		*ptgt = NULL;
1406 	struct fcp_lun		*plun = NULL;
1407 	int 			i, error;
1408 	struct fcp_ioctl	fioctl;
1409 
1410 #ifdef	_MULTI_DATAMODEL
1411 	switch (ddi_model_convert_from(mode & FMODELS)) {
1412 	case DDI_MODEL_ILP32: {
1413 		struct fcp32_ioctl f32_ioctl;
1414 
1415 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1416 		    sizeof (struct fcp32_ioctl), mode)) {
1417 			return (EFAULT);
1418 		}
1419 		fioctl.fp_minor = f32_ioctl.fp_minor;
1420 		fioctl.listlen = f32_ioctl.listlen;
1421 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1422 		break;
1423 	}
1424 	case DDI_MODEL_NONE:
1425 		if (ddi_copyin((void *)data, (void *)&fioctl,
1426 		    sizeof (struct fcp_ioctl), mode)) {
1427 			return (EFAULT);
1428 		}
1429 		break;
1430 	}
1431 
1432 #else	/* _MULTI_DATAMODEL */
1433 	if (ddi_copyin((void *)data, (void *)&fioctl,
1434 	    sizeof (struct fcp_ioctl), mode)) {
1435 		return (EFAULT);
1436 	}
1437 #endif	/* _MULTI_DATAMODEL */
1438 
1439 	/*
1440 	 * Right now we can assume that the minor number matches with
1441 	 * this instance of fp. If this changes we will need to
1442 	 * revisit this logic.
1443 	 */
1444 	mutex_enter(&fcp_global_mutex);
1445 	pptr = fcp_port_head;
1446 	while (pptr) {
1447 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor)
1448 			break;
1449 		else
1450 			pptr = pptr->port_next;
1451 	}
1452 	mutex_exit(&fcp_global_mutex);
1453 	if (pptr == NULL) {
1454 		return (ENXIO);
1455 	}
1456 	mutex_enter(&pptr->port_mutex);
1457 
1458 
1459 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1460 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1461 		mutex_exit(&pptr->port_mutex);
1462 		return (ENOMEM);
1463 	}
1464 
1465 	if (ddi_copyin(fioctl.list, dev_data,
1466 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1467 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1468 		mutex_exit(&pptr->port_mutex);
1469 		return (EFAULT);
1470 	}
1471 	link_cnt = pptr->port_link_cnt;
1472 
1473 	if (cmd == FCP_TGT_INQUIRY) {
1474 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1475 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1476 			sizeof (wwn_ptr->raw_wwn)) == 0) {
1477 			/* This ioctl is requesting INQ info of local HBA */
1478 			mutex_exit(&pptr->port_mutex);
1479 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1480 			dev_data[0].dev_status = 0;
1481 			if (ddi_copyout(dev_data, fioctl.list,
1482 				(sizeof (struct device_data)) * fioctl.listlen,
1483 				mode)) {
1484 				kmem_free(dev_data,
1485 				sizeof (*dev_data) * fioctl.listlen);
1486 				return (EFAULT);
1487 			}
1488 			kmem_free(dev_data,
1489 			    sizeof (*dev_data) * fioctl.listlen);
1490 #ifdef  _MULTI_DATAMODEL
1491 			switch (ddi_model_convert_from(mode & FMODELS)) {
1492 			case DDI_MODEL_ILP32: {
1493 				struct fcp32_ioctl f32_ioctl;
1494 				f32_ioctl.fp_minor = fioctl.fp_minor;
1495 				f32_ioctl.listlen = fioctl.listlen;
1496 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1497 				if (ddi_copyout((void *)&f32_ioctl,
1498 					(void *)data,
1499 					sizeof (struct fcp32_ioctl), mode)) {
1500 					return (EFAULT);
1501 				}
1502 				break;
1503 			}
1504 			case DDI_MODEL_NONE:
1505 				if (ddi_copyout((void *)&fioctl, (void *)data,
1506 					sizeof (struct fcp_ioctl), mode)) {
1507 					return (EFAULT);
1508 				}
1509 				break;
1510 			}
1511 #else   /* _MULTI_DATAMODEL */
1512 			if (ddi_copyout((void *)&fioctl, (void *)data,
1513 				sizeof (struct fcp_ioctl), mode)) {
1514 				return (EFAULT);
1515 			}
1516 #endif  /* _MULTI_DATAMODEL */
1517 			return (0);
1518 		}
1519 	}
1520 
1521 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1522 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1523 		mutex_exit(&pptr->port_mutex);
1524 		return (ENXIO);
1525 	}
1526 
1527 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1528 	    i++) {
1529 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1530 
1531 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1532 
1533 
1534 		dev_data[i].dev_status = ENXIO;
1535 
1536 		if ((ptgt = fcp_lookup_target(pptr,
1537 		    (uchar_t *)wwn_ptr)) == NULL) {
1538 			mutex_exit(&pptr->port_mutex);
1539 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1540 			    wwn_ptr, &error, 0) == NULL) {
1541 				dev_data[i].dev_status = ENODEV;
1542 				mutex_enter(&pptr->port_mutex);
1543 				continue;
1544 			} else {
1545 
1546 				dev_data[i].dev_status = EAGAIN;
1547 
1548 				mutex_enter(&pptr->port_mutex);
1549 				continue;
1550 			}
1551 		} else {
1552 			mutex_enter(&ptgt->tgt_mutex);
1553 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1554 			    FCP_TGT_BUSY)) {
1555 				dev_data[i].dev_status = EAGAIN;
1556 				mutex_exit(&ptgt->tgt_mutex);
1557 				continue;
1558 			}
1559 
1560 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1561 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1562 					dev_data[i].dev_status = ENOTSUP;
1563 				} else {
1564 					dev_data[i].dev_status = ENXIO;
1565 				}
1566 				mutex_exit(&ptgt->tgt_mutex);
1567 				continue;
1568 			}
1569 
1570 			switch (cmd) {
1571 			case FCP_TGT_INQUIRY:
1572 				/*
1573 				 * The reason we give device type of
1574 				 * lun 0 only even though in some
1575 				 * cases(like maxstrat) lun 0 device
1576 				 * type may be 0x3f(invalid) is that
1577 				 * for bridge boxes target will appear
1578 				 * as luns and the first lun could be
1579 				 * a device that utility may not care
1580 				 * about (like a tape device).
1581 				 */
1582 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1583 				dev_data[i].dev_status = 0;
1584 				mutex_exit(&ptgt->tgt_mutex);
1585 
1586 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1587 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1588 				} else {
1589 					dev_data[i].dev0_type = plun->lun_type;
1590 				}
1591 				mutex_enter(&ptgt->tgt_mutex);
1592 				break;
1593 
1594 			case FCP_TGT_CREATE:
1595 				mutex_exit(&ptgt->tgt_mutex);
1596 				mutex_exit(&pptr->port_mutex);
1597 
1598 				/*
1599 				 * serialize state change call backs.
1600 				 * only one call back will be handled
1601 				 * at a time.
1602 				 */
1603 				mutex_enter(&fcp_global_mutex);
1604 				if (fcp_oflag & FCP_BUSY) {
1605 					mutex_exit(&fcp_global_mutex);
1606 					if (dev_data) {
1607 						kmem_free(dev_data,
1608 						    sizeof (*dev_data) *
1609 						    fioctl.listlen);
1610 					}
1611 					return (EBUSY);
1612 				}
1613 				fcp_oflag |= FCP_BUSY;
1614 				mutex_exit(&fcp_global_mutex);
1615 
1616 				dev_data[i].dev_status =
1617 				    fcp_create_on_demand(pptr,
1618 				    wwn_ptr->raw_wwn);
1619 
1620 				if (dev_data[i].dev_status != 0) {
1621 					char 	buf[25];
1622 
1623 					for (i = 0; i < FC_WWN_SIZE; i++) {
1624 						(void) sprintf(&buf[i << 1],
1625 						    "%02x",
1626 						    wwn_ptr->raw_wwn[i]);
1627 					}
1628 
1629 					fcp_log(CE_WARN, pptr->port_dip,
1630 					    "!Failed to create nodes for"
1631 					    " pwwn=%s; error=%x", buf,
1632 					    dev_data[i].dev_status);
1633 				}
1634 
1635 				/* allow state change call backs again */
1636 				mutex_enter(&fcp_global_mutex);
1637 				fcp_oflag &= ~FCP_BUSY;
1638 				mutex_exit(&fcp_global_mutex);
1639 
1640 				mutex_enter(&pptr->port_mutex);
1641 				mutex_enter(&ptgt->tgt_mutex);
1642 
1643 				break;
1644 
1645 			case FCP_TGT_DELETE:
1646 				break;
1647 
1648 			default:
1649 				fcp_log(CE_WARN, pptr->port_dip,
1650 				    "!Invalid device data ioctl "
1651 				    "opcode = 0x%x", cmd);
1652 			}
1653 			mutex_exit(&ptgt->tgt_mutex);
1654 		}
1655 	}
1656 	mutex_exit(&pptr->port_mutex);
1657 
1658 	if (ddi_copyout(dev_data, fioctl.list,
1659 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1660 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1661 		return (EFAULT);
1662 	}
1663 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1664 
1665 #ifdef	_MULTI_DATAMODEL
1666 	switch (ddi_model_convert_from(mode & FMODELS)) {
1667 	case DDI_MODEL_ILP32: {
1668 		struct fcp32_ioctl f32_ioctl;
1669 
1670 		f32_ioctl.fp_minor = fioctl.fp_minor;
1671 		f32_ioctl.listlen = fioctl.listlen;
1672 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1673 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1674 		    sizeof (struct fcp32_ioctl), mode)) {
1675 			return (EFAULT);
1676 		}
1677 		break;
1678 	}
1679 	case DDI_MODEL_NONE:
1680 		if (ddi_copyout((void *)&fioctl, (void *)data,
1681 		    sizeof (struct fcp_ioctl), mode)) {
1682 			return (EFAULT);
1683 		}
1684 		break;
1685 	}
1686 #else	/* _MULTI_DATAMODEL */
1687 
1688 	if (ddi_copyout((void *)&fioctl, (void *)data,
1689 	    sizeof (struct fcp_ioctl), mode)) {
1690 		return (EFAULT);
1691 	}
1692 #endif	/* _MULTI_DATAMODEL */
1693 
1694 	return (0);
1695 }
1696 
1697 /*
1698  * Fetch the target mappings (path, etc.) for all LUNs
1699  * on this port.
1700  */
1701 /* ARGSUSED */
1702 static int
1703 fcp_get_target_mappings(struct fcp_ioctl *data,
1704 	int mode, int *rval)
1705 {
1706 	struct fcp_port	    *pptr;
1707 	fc_hba_target_mappings_t    *mappings;
1708 	fc_hba_mapping_entry_t	    *map;
1709 	struct fcp_tgt	    *ptgt = NULL;
1710 	struct fcp_lun	    *plun = NULL;
1711 	int			    i, mapIndex, mappingSize;
1712 	int			    listlen;
1713 	struct fcp_ioctl	    fioctl;
1714 	char			    *path;
1715 	fcp_ent_addr_t		    sam_lun_addr;
1716 
1717 #ifdef	_MULTI_DATAMODEL
1718 	switch (ddi_model_convert_from(mode & FMODELS)) {
1719 	case DDI_MODEL_ILP32: {
1720 		struct fcp32_ioctl f32_ioctl;
1721 
1722 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1723 		    sizeof (struct fcp32_ioctl), mode)) {
1724 			return (EFAULT);
1725 		}
1726 		fioctl.fp_minor = f32_ioctl.fp_minor;
1727 		fioctl.listlen = f32_ioctl.listlen;
1728 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1729 		break;
1730 	}
1731 	case DDI_MODEL_NONE:
1732 		if (ddi_copyin((void *)data, (void *)&fioctl,
1733 		    sizeof (struct fcp_ioctl), mode)) {
1734 			return (EFAULT);
1735 		}
1736 		break;
1737 	}
1738 
1739 #else	/* _MULTI_DATAMODEL */
1740 	if (ddi_copyin((void *)data, (void *)&fioctl,
1741 	    sizeof (struct fcp_ioctl), mode)) {
1742 		return (EFAULT);
1743 	}
1744 #endif	/* _MULTI_DATAMODEL */
1745 
1746 	/*
1747 	 * Right now we can assume that the minor number matches with
1748 	 * this instance of fp. If this changes we will need to
1749 	 * revisit this logic.
1750 	 */
1751 	mutex_enter(&fcp_global_mutex);
1752 	pptr = fcp_port_head;
1753 	while (pptr) {
1754 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor)
1755 			break;
1756 		else
1757 			pptr = pptr->port_next;
1758 	}
1759 	mutex_exit(&fcp_global_mutex);
1760 	if (pptr == NULL) {
1761 	    cmn_err(CE_NOTE, "target mappings: unknown instance number : %d",
1762 		    fioctl.fp_minor);
1763 	    return (ENXIO);
1764 	}
1765 
1766 
1767 	/* We use listlen to show the total buffer size */
1768 	mappingSize = fioctl.listlen;
1769 
1770 	/* Now calculate how many mapping entries will fit */
1771 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1772 		- sizeof (fc_hba_target_mappings_t);
1773 	if (listlen <= 0) {
1774 	    cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1775 	    return (ENXIO);
1776 	}
1777 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1778 
1779 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1780 	    return (ENOMEM);
1781 	}
1782 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1783 
1784 	/* Now get to work */
1785 	mapIndex = 0;
1786 
1787 	mutex_enter(&pptr->port_mutex);
1788 	/* Loop through all targets on this port */
1789 	for (i = 0; i < FCP_NUM_HASH; i++) {
1790 	    for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1791 		    ptgt = ptgt->tgt_next) {
1792 
1793 
1794 		/* Loop through all LUNs on this target */
1795 		for (plun = ptgt->tgt_lun; plun != NULL;
1796 			plun = plun->lun_next) {
1797 		    if (plun->lun_state & FCP_LUN_OFFLINE) {
1798 			continue;
1799 		    }
1800 
1801 		    path = fcp_get_lun_path(plun);
1802 		    if (path == NULL) {
1803 			continue;
1804 		    }
1805 
1806 		    if (mapIndex >= listlen) {
1807 			mapIndex ++;
1808 			kmem_free(path, MAXPATHLEN);
1809 			continue;
1810 		    }
1811 		    map = &mappings->entries[mapIndex++];
1812 		    bcopy(path, map->targetDriver, sizeof (map->targetDriver));
1813 		    map->d_id = ptgt->tgt_d_id;
1814 		    map->busNumber = 0;
1815 		    map->targetNumber = ptgt->tgt_d_id;
1816 		    map->osLUN = plun->lun_num;
1817 
1818 			/*
1819 			 * We had swapped lun when we stored it in
1820 			 * lun_addr. We need to swap it back before
1821 			 * returning it to user land
1822 			 */
1823 
1824 		    sam_lun_addr.ent_addr_0 = BE_16(plun->lun_addr.ent_addr_0);
1825 		    sam_lun_addr.ent_addr_1 = BE_16(plun->lun_addr.ent_addr_1);
1826 		    sam_lun_addr.ent_addr_2 = BE_16(plun->lun_addr.ent_addr_2);
1827 		    sam_lun_addr.ent_addr_3 = BE_16(plun->lun_addr.ent_addr_3);
1828 
1829 		    bcopy(&sam_lun_addr, &map->samLUN, FCP_LUN_SIZE);
1830 
1831 		    bcopy(ptgt->tgt_node_wwn.raw_wwn, map->NodeWWN.raw_wwn,
1832 			sizeof (la_wwn_t));
1833 		    bcopy(ptgt->tgt_port_wwn.raw_wwn, map->PortWWN.raw_wwn,
1834 			sizeof (la_wwn_t));
1835 
1836 		    if (plun->lun_guid) {
1837 
1838 			/* convert ascii wwn to bytes */
1839 			fcp_ascii_to_wwn(plun->lun_guid, map->guid,
1840 			    sizeof (map->guid));
1841 
1842 			if ((sizeof (map->guid)) < plun->lun_guid_size/2) {
1843 				cmn_err(CE_WARN, "fcp_get_target_mappings:"
1844 					"guid copy space insufficient."
1845 					"Copy Truncation - "
1846 					"available %d; need %d",
1847 					(int)sizeof (map->guid),
1848 					(int)plun->lun_guid_size/2);
1849 			}
1850 		    }
1851 		    kmem_free(path, MAXPATHLEN);
1852 		}
1853 	    }
1854 	}
1855 	mutex_exit(&pptr->port_mutex);
1856 	mappings->numLuns = mapIndex;
1857 
1858 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1859 	    kmem_free(mappings, mappingSize);
1860 	    return (EFAULT);
1861 	}
1862 	kmem_free(mappings, mappingSize);
1863 
1864 #ifdef	_MULTI_DATAMODEL
1865 	switch (ddi_model_convert_from(mode & FMODELS)) {
1866 	case DDI_MODEL_ILP32: {
1867 		struct fcp32_ioctl f32_ioctl;
1868 
1869 		f32_ioctl.fp_minor = fioctl.fp_minor;
1870 		f32_ioctl.listlen = fioctl.listlen;
1871 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1872 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1873 		    sizeof (struct fcp32_ioctl), mode)) {
1874 			return (EFAULT);
1875 		}
1876 		break;
1877 	}
1878 	case DDI_MODEL_NONE:
1879 		if (ddi_copyout((void *)&fioctl, (void *)data,
1880 		    sizeof (struct fcp_ioctl), mode)) {
1881 			return (EFAULT);
1882 		}
1883 		break;
1884 	}
1885 #else	/* _MULTI_DATAMODEL */
1886 
1887 	if (ddi_copyout((void *)&fioctl, (void *)data,
1888 	    sizeof (struct fcp_ioctl), mode)) {
1889 		return (EFAULT);
1890 	}
1891 #endif	/* _MULTI_DATAMODEL */
1892 
1893 	return (0);
1894 }
1895 
1896 /*
1897  * fcp_setup_scsi_ioctl
1898  * 	Setup handler for the "scsi passthru" style of
1899  *	ioctl for FCP.  See "fcp_util.h" for data structure
1900  *	definition.
1901  *
1902  * Input:
1903  *	u_fscsi	= ioctl data (user address space)
1904  *	mode	= See ioctl(9E)
1905  *
1906  * Output:
1907  *      u_fscsi	= ioctl data (user address space)
1908  *	rval	= return value - see ioctl(9E)
1909  *
1910  * Returns:
1911  *      0	= OK
1912  *	EAGAIN	= See errno.h
1913  *	EBUSY	= See errno.h
1914  *	EFAULT	= See errno.h
1915  *	EINTR	= See errno.h
1916  *	EINVAL	= See errno.h
1917  *	EIO	= See errno.h
1918  *	ENOMEM	= See errno.h
1919  *	ENXIO	= See errno.h
1920  *
1921  * Context:
1922  *      Kernel context.
1923  */
1924 /* ARGSUSED */
1925 static int
1926 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1927 	int mode, int *rval)
1928 {
1929 	int			ret		= 0;
1930 	int			temp_ret;
1931 	caddr_t			k_cdbbufaddr	= NULL;
1932 	caddr_t			k_bufaddr	= NULL;
1933 	caddr_t			k_rqbufaddr	= NULL;
1934 	caddr_t			u_cdbbufaddr;
1935 	caddr_t			u_bufaddr;
1936 	caddr_t			u_rqbufaddr;
1937 	struct fcp_scsi_cmd	k_fscsi;
1938 
1939 	/*
1940 	 * Get fcp_scsi_cmd array element from user address space
1941 	 */
1942 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1943 		!= 0) {
1944 		return (ret);
1945 	}
1946 
1947 
1948 	/*
1949 	 * Even though kmem_alloc() checks the validity of the
1950 	 * buffer length, this check is needed when the
1951 	 * kmem_flags set and the zero buffer length is passed.
1952 	 */
1953 	if ((k_fscsi.scsi_cdblen <= 0) ||
1954 	    (k_fscsi.scsi_buflen <= 0) ||
1955 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1956 	    (k_fscsi.scsi_rqlen <= 0) ||
1957 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1958 		return (EINVAL);
1959 	}
1960 
1961 	/*
1962 	 * Allocate data for fcp_scsi_cmd pointer fields
1963 	 */
1964 	if (ret == 0) {
1965 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1966 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1967 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1968 
1969 		if (k_cdbbufaddr == NULL ||
1970 		    k_bufaddr    == NULL ||
1971 		    k_rqbufaddr  == NULL) {
1972 			ret = ENOMEM;
1973 		}
1974 	}
1975 
1976 	/*
1977 	 * Get fcp_scsi_cmd pointer fields from user
1978 	 * address space
1979 	 */
1980 	if (ret == 0) {
1981 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1982 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1983 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1984 
1985 		if (ddi_copyin(u_cdbbufaddr,
1986 				k_cdbbufaddr,
1987 				k_fscsi.scsi_cdblen,
1988 				mode)) {
1989 			ret = EFAULT;
1990 		} else if (ddi_copyin(u_bufaddr,
1991 				k_bufaddr,
1992 				k_fscsi.scsi_buflen,
1993 				mode)) {
1994 			ret = EFAULT;
1995 		} else if (ddi_copyin(u_rqbufaddr,
1996 				k_rqbufaddr,
1997 				k_fscsi.scsi_rqlen,
1998 				mode)) {
1999 			ret = EFAULT;
2000 		}
2001 	}
2002 
2003 	/*
2004 	 * Send scsi command (blocking)
2005 	 */
2006 	if (ret == 0) {
2007 		/*
2008 		 * Prior to sending the scsi command, the
2009 		 * fcp_scsi_cmd data structure must contain kernel,
2010 		 * not user, addresses.
2011 		 */
2012 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2013 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2014 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2015 
2016 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2017 
2018 		/*
2019 		 * After sending the scsi command, the
2020 		 * fcp_scsi_cmd data structure must contain user,
2021 		 * not kernel, addresses.
2022 		 */
2023 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2024 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2025 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2026 	}
2027 
2028 	/*
2029 	 * Put fcp_scsi_cmd pointer fields to user address space
2030 	 */
2031 	if (ret == 0) {
2032 		if (ddi_copyout(k_cdbbufaddr,
2033 				u_cdbbufaddr,
2034 				k_fscsi.scsi_cdblen,
2035 				mode)) {
2036 			ret = EFAULT;
2037 		} else if (ddi_copyout(k_bufaddr,
2038 				u_bufaddr,
2039 				k_fscsi.scsi_buflen,
2040 				mode)) {
2041 			ret = EFAULT;
2042 		} else if (ddi_copyout(k_rqbufaddr,
2043 				u_rqbufaddr,
2044 				k_fscsi.scsi_rqlen,
2045 				mode)) {
2046 			ret = EFAULT;
2047 		}
2048 	}
2049 
2050 	/*
2051 	 * Free data for fcp_scsi_cmd pointer fields
2052 	 */
2053 	if (k_cdbbufaddr != NULL) {
2054 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2055 	}
2056 	if (k_bufaddr != NULL) {
2057 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2058 	}
2059 	if (k_rqbufaddr != NULL) {
2060 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2061 	}
2062 
2063 	/*
2064 	 * Put fcp_scsi_cmd array element to user address space
2065 	 */
2066 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2067 	if (temp_ret != 0) {
2068 		ret = temp_ret;
2069 	}
2070 
2071 	/*
2072 	 * Return status
2073 	 */
2074 	return (ret);
2075 }
2076 
2077 
2078 /*
2079  * fcp_copyin_scsi_cmd
2080  *	Copy in fcp_scsi_cmd data structure from user address space.
2081  *	The data may be in 32 bit or 64 bit modes.
2082  *
2083  * Input:
2084  *	base_addr	= from address (user address space)
2085  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2086  *
2087  * Output:
2088  *      fscsi		= to address (kernel address space)
2089  *
2090  * Returns:
2091  *      0	= OK
2092  *	EFAULT	= Error
2093  *
2094  * Context:
2095  *      Kernel context.
2096  */
2097 static int
2098 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2099 {
2100 #ifdef	_MULTI_DATAMODEL
2101 	struct fcp32_scsi_cmd	f32scsi;
2102 
2103 	switch (ddi_model_convert_from(mode & FMODELS)) {
2104 	case DDI_MODEL_ILP32:
2105 		/*
2106 		 * Copy data from user address space
2107 		 */
2108 		if (ddi_copyin((void *)base_addr,
2109 				&f32scsi,
2110 				sizeof (struct fcp32_scsi_cmd),
2111 				mode)) {
2112 			return (EFAULT);
2113 		}
2114 		/*
2115 		 * Convert from 32 bit to 64 bit
2116 		 */
2117 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2118 		break;
2119 	case DDI_MODEL_NONE:
2120 		/*
2121 		 * Copy data from user address space
2122 		 */
2123 		if (ddi_copyin((void *)base_addr,
2124 				fscsi,
2125 				sizeof (struct fcp_scsi_cmd),
2126 				mode)) {
2127 			return (EFAULT);
2128 		}
2129 		break;
2130 	}
2131 #else	/* _MULTI_DATAMODEL */
2132 	/*
2133 	 * Copy data from user address space
2134 	 */
2135 	if (ddi_copyin((void *)base_addr,
2136 			fscsi,
2137 			sizeof (struct fcp_scsi_cmd),
2138 			mode)) {
2139 		return (EFAULT);
2140 	}
2141 #endif	/* _MULTI_DATAMODEL */
2142 
2143 	return (0);
2144 }
2145 
2146 
2147 /*
2148  * fcp_copyout_scsi_cmd
2149  *	Copy out fcp_scsi_cmd data structure to user address space.
2150  *	The data may be in 32 bit or 64 bit modes.
2151  *
2152  * Input:
2153  *      fscsi		= to address (kernel address space)
2154  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2155  *
2156  * Output:
2157  *	base_addr	= from address (user address space)
2158  *
2159  * Returns:
2160  *      0	= OK
2161  *	EFAULT	= Error
2162  *
2163  * Context:
2164  *      Kernel context.
2165  */
2166 static int
2167 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2168 {
2169 #ifdef	_MULTI_DATAMODEL
2170 	struct fcp32_scsi_cmd	f32scsi;
2171 
2172 	switch (ddi_model_convert_from(mode & FMODELS)) {
2173 	case DDI_MODEL_ILP32:
2174 		/*
2175 		 * Convert from 64 bit to 32 bit
2176 		 */
2177 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2178 		/*
2179 		 * Copy data to user address space
2180 		 */
2181 		if (ddi_copyout(&f32scsi,
2182 				(void *)base_addr,
2183 				sizeof (struct fcp32_scsi_cmd),
2184 				mode)) {
2185 			return (EFAULT);
2186 		}
2187 		break;
2188 	case DDI_MODEL_NONE:
2189 		/*
2190 		 * Copy data to user address space
2191 		 */
2192 		if (ddi_copyout(fscsi,
2193 				(void *)base_addr,
2194 				sizeof (struct fcp_scsi_cmd),
2195 				mode)) {
2196 			return (EFAULT);
2197 		}
2198 		break;
2199 	}
2200 #else	/* _MULTI_DATAMODEL */
2201 	/*
2202 	 * Copy data to user address space
2203 	 */
2204 	if (ddi_copyout(fscsi,
2205 			(void *)base_addr,
2206 			sizeof (struct fcp_scsi_cmd),
2207 			mode)) {
2208 		return (EFAULT);
2209 	}
2210 #endif	/* _MULTI_DATAMODEL */
2211 
2212 	return (0);
2213 }
2214 
2215 
2216 /*
2217  * fcp_send_scsi_ioctl
2218  *	Sends the SCSI command in blocking mode.
2219  *
2220  * Input:
2221  *      fscsi		= SCSI command data structure
2222  *
2223  * Output:
2224  *      fscsi		= SCSI command data structure
2225  *
2226  * Returns:
2227  *      0	= OK
2228  *	EAGAIN	= See errno.h
2229  *	EBUSY	= See errno.h
2230  *	EINTR	= See errno.h
2231  *	EINVAL	= See errno.h
2232  *	EIO	= See errno.h
2233  *	ENOMEM	= See errno.h
2234  *	ENXIO	= See errno.h
2235  *
2236  * Context:
2237  *      Kernel context.
2238  */
2239 static int
2240 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2241 {
2242 	struct fcp_lun	*plun		= NULL;
2243 	struct fcp_port	*pptr		= NULL;
2244 	struct fcp_tgt	*ptgt		= NULL;
2245 	fc_packet_t		*fpkt		= NULL;
2246 	struct fcp_ipkt	*icmd		= NULL;
2247 	int			target_created	= FALSE;
2248 	fc_frame_hdr_t		*hp;
2249 	struct fcp_cmd		fcp_cmd;
2250 	struct fcp_cmd		*fcmd;
2251 	union scsi_cdb		*scsi_cdb;
2252 	la_wwn_t		*wwn_ptr;
2253 	int			nodma;
2254 	struct fcp_rsp		*rsp;
2255 	struct fcp_rsp_info	*rsp_info;
2256 	caddr_t			rsp_sense;
2257 	int			buf_len;
2258 	int			info_len;
2259 	int			sense_len;
2260 	struct scsi_extended_sense	*sense_to = NULL;
2261 	timeout_id_t		tid;
2262 	uint8_t			reconfig_lun = FALSE;
2263 	uint8_t			reconfig_pending = FALSE;
2264 	uint8_t			scsi_cmd;
2265 	int			rsp_len;
2266 	int			cmd_index;
2267 	int			fc_status;
2268 	int			pkt_state;
2269 	int			pkt_action;
2270 	int			pkt_reason;
2271 	int			ret, xport_retval = ~FC_SUCCESS;
2272 	int			lcount;
2273 	int			tcount;
2274 	int			reconfig_status;
2275 	int			port_busy = FALSE;
2276 	uchar_t			*lun_string;
2277 
2278 	/*
2279 	 * Check valid SCSI command
2280 	 */
2281 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2282 	ret = EINVAL;
2283 	for (cmd_index = 0;
2284 		cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2285 		ret != 0;
2286 		cmd_index++) {
2287 		/*
2288 		 * First byte of CDB is the SCSI command
2289 		 */
2290 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2291 			ret = 0;
2292 		}
2293 	}
2294 
2295 	/*
2296 	 * Check inputs
2297 	 */
2298 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2299 		ret = EINVAL;
2300 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) { /* no larger than */
2301 		ret = EINVAL;
2302 	}
2303 
2304 
2305 	/*
2306 	 * Find FC port
2307 	 */
2308 	if (ret == 0) {
2309 		/*
2310 		 * Acquire global mutex
2311 		 */
2312 		mutex_enter(&fcp_global_mutex);
2313 
2314 		pptr = fcp_port_head;
2315 		while (pptr) {
2316 			if (pptr->port_instance ==
2317 					(uint32_t)fscsi->scsi_fc_port_num)
2318 				break;
2319 			else
2320 				pptr = pptr->port_next;
2321 		}
2322 
2323 		if (pptr == NULL) {
2324 			ret = ENXIO;
2325 		} else {
2326 			/*
2327 			 * fc_ulp_busy_port can raise power
2328 			 *  so, we must not hold any mutexes involved in PM
2329 			 */
2330 			mutex_exit(&fcp_global_mutex);
2331 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2332 		}
2333 
2334 		if (ret == 0) {
2335 
2336 			/* remember port is busy, so we will release later */
2337 			port_busy = TRUE;
2338 
2339 			/*
2340 			 * If there is a reconfiguration in progress, wait
2341 			 * for it to complete.
2342 			 */
2343 
2344 			fcp_reconfig_wait(pptr);
2345 
2346 			/* reacquire mutexes in order */
2347 			mutex_enter(&fcp_global_mutex);
2348 			mutex_enter(&pptr->port_mutex);
2349 
2350 			/*
2351 			 * Will port accept DMA?
2352 			 */
2353 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2354 					? 1 : 0;
2355 
2356 			/*
2357 			 * If init or offline, device not known
2358 			 *
2359 			 * If we are discovering (onlining), we can
2360 			 * NOT obviously provide reliable data about
2361 			 * devices until it is complete
2362 			 */
2363 			if (pptr->port_state &    (FCP_STATE_INIT |
2364 						    FCP_STATE_OFFLINE)) {
2365 				ret = ENXIO;
2366 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2367 				ret = EBUSY;
2368 			} else {
2369 				/*
2370 				 * Find target from pwwn
2371 				 *
2372 				 * The wwn must be put into a local
2373 				 * variable to ensure alignment.
2374 				 */
2375 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2376 				ptgt = fcp_lookup_target(pptr,
2377 						(uchar_t *)wwn_ptr);
2378 
2379 				/*
2380 				 * If target not found,
2381 				 */
2382 				if (ptgt == NULL) {
2383 					/*
2384 					 * Note: Still have global &
2385 					 * port mutexes
2386 					 */
2387 					mutex_exit(&pptr->port_mutex);
2388 					ptgt = fcp_port_create_tgt(pptr,
2389 					    wwn_ptr, &ret, &fc_status,
2390 					    &pkt_state, &pkt_action,
2391 					    &pkt_reason);
2392 					mutex_enter(&pptr->port_mutex);
2393 
2394 					fscsi->scsi_fc_status  = fc_status;
2395 					fscsi->scsi_pkt_state  =
2396 					    (uchar_t)pkt_state;
2397 					fscsi->scsi_pkt_reason = pkt_reason;
2398 					fscsi->scsi_pkt_action =
2399 					    (uchar_t)pkt_action;
2400 
2401 					if (ptgt != NULL) {
2402 						target_created = TRUE;
2403 					} else if (ret == 0) {
2404 						ret = ENOMEM;
2405 					}
2406 				}
2407 
2408 				if (ret == 0) {
2409 					/*
2410 					 * Acquire target
2411 					 */
2412 					mutex_enter(&ptgt->tgt_mutex);
2413 
2414 					/*
2415 					 * If target is mark or busy,
2416 					 * then target can not be used
2417 					 */
2418 					if (ptgt->tgt_state &
2419 						(FCP_TGT_MARK |
2420 						FCP_TGT_BUSY)) {
2421 						ret = EBUSY;
2422 					} else {
2423 						/*
2424 						 * Mark target as busy
2425 						 */
2426 						ptgt->tgt_state |=
2427 							FCP_TGT_BUSY;
2428 					}
2429 
2430 					/*
2431 					 * Release target
2432 					 */
2433 					lcount = pptr->port_link_cnt;
2434 					tcount = ptgt->tgt_change_cnt;
2435 					mutex_exit(&ptgt->tgt_mutex);
2436 				}
2437 			}
2438 
2439 			/*
2440 			 * Release port
2441 			 */
2442 			mutex_exit(&pptr->port_mutex);
2443 		}
2444 
2445 		/*
2446 		 * Release global mutex
2447 		 */
2448 		mutex_exit(&fcp_global_mutex);
2449 	}
2450 
2451 	if (ret == 0) {
2452 		uint64_t belun = BE_64(fscsi->scsi_lun);
2453 
2454 		/*
2455 		 * If it's a target device, find lun from pwwn
2456 		 * The wwn must be put into a local
2457 		 * variable to ensure alignment.
2458 		 */
2459 		mutex_enter(&pptr->port_mutex);
2460 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2461 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2462 			/* this is not a target */
2463 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2464 			ret = ENXIO;
2465 		} else if ((belun << 16) != 0) {
2466 			/*
2467 			 * Since fcp only support PD and LU addressing method
2468 			 * so far, the last 6 bytes of a valid LUN are expected
2469 			 * to be filled with 00h.
2470 			 */
2471 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2472 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2473 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2474 			    (uint8_t)(belun >> 62), belun);
2475 			ret = ENXIO;
2476 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2477 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2478 			/*
2479 			 * This is a SCSI target, but no LUN at this
2480 			 * address.
2481 			 *
2482 			 * In the future, we may want to send this to
2483 			 * the target, and let it respond
2484 			 * appropriately
2485 			 */
2486 			ret = ENXIO;
2487 		}
2488 		mutex_exit(&pptr->port_mutex);
2489 	}
2490 
2491 	/*
2492 	 * Finished grabbing external resources
2493 	 * Allocate internal packet (icmd)
2494 	 */
2495 	if (ret == 0) {
2496 		/*
2497 		 * Calc rsp len assuming rsp info included
2498 		 */
2499 		rsp_len = sizeof (struct fcp_rsp) +
2500 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2501 
2502 		icmd = fcp_icmd_alloc(pptr, ptgt,
2503 		    sizeof (struct fcp_cmd),
2504 		    rsp_len,
2505 		    fscsi->scsi_buflen,
2506 		    nodma,
2507 		    lcount,			/* ipkt_link_cnt */
2508 		    tcount,			/* ipkt_change_cnt */
2509 		    0,				/* cause */
2510 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2511 
2512 		if (icmd == NULL) {
2513 			ret = ENOMEM;
2514 		} else {
2515 			/*
2516 			 * Setup internal packet as sema sync
2517 			 */
2518 			fcp_ipkt_sema_init(icmd);
2519 		}
2520 	}
2521 
2522 	if (ret == 0) {
2523 		/*
2524 		 * Init fpkt pointer for use.
2525 		 */
2526 
2527 		fpkt = icmd->ipkt_fpkt;
2528 
2529 		fpkt->pkt_tran_flags    = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2530 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2531 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2532 
2533 		/*
2534 		 * Init fcmd pointer for use by SCSI command
2535 		 */
2536 
2537 		if (nodma) {
2538 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2539 		} else {
2540 			fcmd = &fcp_cmd;
2541 		}
2542 		bzero(fcmd, sizeof (struct fcp_cmd));
2543 		ptgt = plun->lun_tgt;
2544 
2545 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2546 
2547 		fcmd->fcp_ent_addr.ent_addr_0 =
2548 		    BE_16(*(uint16_t *)&(lun_string[0]));
2549 		fcmd->fcp_ent_addr.ent_addr_1 =
2550 		    BE_16(*(uint16_t *)&(lun_string[2]));
2551 		fcmd->fcp_ent_addr.ent_addr_2 =
2552 		    BE_16(*(uint16_t *)&(lun_string[4]));
2553 		fcmd->fcp_ent_addr.ent_addr_3 =
2554 		    BE_16(*(uint16_t *)&(lun_string[6]));
2555 
2556 		/*
2557 		 * Setup internal packet(icmd)
2558 		 */
2559 		icmd->ipkt_lun		= plun;
2560 		icmd->ipkt_restart	= 0;
2561 		icmd->ipkt_retries	= 0;
2562 		icmd->ipkt_opcode	= 0;
2563 
2564 		/*
2565 		 * Init the frame HEADER Pointer for use
2566 		 */
2567 		hp = &fpkt->pkt_cmd_fhdr;
2568 
2569 		hp->s_id	= pptr->port_id;
2570 		hp->d_id	= ptgt->tgt_d_id;
2571 		hp->r_ctl	= R_CTL_COMMAND;
2572 		hp->type	= FC_TYPE_SCSI_FCP;
2573 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2574 		hp->rsvd	= 0;
2575 		hp->seq_id	= 0;
2576 		hp->seq_cnt	= 0;
2577 		hp->ox_id	= 0xffff;
2578 		hp->rx_id	= 0xffff;
2579 		hp->ro		= 0;
2580 
2581 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2582 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2583 		fcmd->fcp_cntl.cntl_write_data	= 0;
2584 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2585 
2586 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2587 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2588 			fscsi->scsi_cdblen);
2589 
2590 		if (!nodma) {
2591 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2592 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2593 		}
2594 
2595 		/*
2596 		 * Send SCSI command to FC transport
2597 		 */
2598 
2599 		if (ret == 0) {
2600 			mutex_enter(&ptgt->tgt_mutex);
2601 
2602 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2603 				mutex_exit(&ptgt->tgt_mutex);
2604 				fscsi->scsi_fc_status = xport_retval =
2605 					fc_ulp_transport(pptr->port_fp_handle,
2606 					fpkt);
2607 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2608 					ret = EIO;
2609 				}
2610 			} else {
2611 				mutex_exit(&ptgt->tgt_mutex);
2612 				ret = EBUSY;
2613 			}
2614 		}
2615 	}
2616 
2617 	/*
2618 	 * Wait for completion only if fc_ulp_transport was called and it
2619 	 * returned a success. This is the only time callback will happen.
2620 	 * Otherwise, there is no point in waiting
2621 	 */
2622 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2623 		ret = fcp_ipkt_sema_wait(icmd);
2624 	}
2625 
2626 	/*
2627 	 * Copy data to IOCTL data structures
2628 	 */
2629 	rsp = NULL;
2630 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2631 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2632 
2633 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2634 			fcp_log(CE_WARN, pptr->port_dip,
2635 			    "!SCSI command to d_id=0x%x lun=0x%x"
2636 			    " failed, Bad FCP response values:"
2637 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2638 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2639 			    ptgt->tgt_d_id, plun->lun_num,
2640 			    rsp->reserved_0, rsp->reserved_1,
2641 			    rsp->fcp_u.fcp_status.reserved_0,
2642 			    rsp->fcp_u.fcp_status.reserved_1,
2643 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2644 
2645 			ret = EIO;
2646 		}
2647 	}
2648 
2649 	if ((ret == 0) && (rsp != NULL)) {
2650 		/*
2651 		 * Calc response lengths
2652 		 */
2653 		sense_len = 0;
2654 		info_len = 0;
2655 
2656 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2657 			info_len = rsp->fcp_response_len;
2658 		}
2659 
2660 		rsp_info   = (struct fcp_rsp_info *)
2661 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2662 
2663 		/*
2664 		 * Get SCSI status
2665 		 */
2666 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2667 		/*
2668 		 * If a lun was just added or removed and the next command
2669 		 * comes through this interface, we need to capture the check
2670 		 * condition so we can discover the new topology.
2671 		 */
2672 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2673 		    rsp->fcp_u.fcp_status.sense_len_set) {
2674 			sense_len = rsp->fcp_sense_len;
2675 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2676 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2677 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2678 			    (FCP_SENSE_NO_LUN(sense_to))) {
2679 				reconfig_lun = TRUE;
2680 			}
2681 		}
2682 
2683 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2684 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2685 			if (reconfig_lun == FALSE) {
2686 				reconfig_status =
2687 				    fcp_is_reconfig_needed(ptgt, fpkt);
2688 			}
2689 
2690 			if ((reconfig_lun == TRUE) ||
2691 			    (reconfig_status == TRUE)) {
2692 				mutex_enter(&ptgt->tgt_mutex);
2693 				if (ptgt->tgt_tid == NULL) {
2694 					/*
2695 					 * Either we've been notified the
2696 					 * REPORT_LUN data has changed, or
2697 					 * we've determined on our own that
2698 					 * we're out of date.  Kick off
2699 					 * rediscovery.
2700 					 */
2701 					tid = timeout(fcp_reconfigure_luns,
2702 					    (caddr_t)ptgt, drv_usectohz(1));
2703 
2704 					ptgt->tgt_tid = tid;
2705 					ptgt->tgt_state |= FCP_TGT_BUSY;
2706 					ret = EBUSY;
2707 					reconfig_pending = TRUE;
2708 				}
2709 				mutex_exit(&ptgt->tgt_mutex);
2710 			}
2711 		}
2712 
2713 		/*
2714 		 * Calc residuals and buffer lengths
2715 		 */
2716 
2717 		if (ret == 0) {
2718 			buf_len = fscsi->scsi_buflen;
2719 			fscsi->scsi_bufresid	= 0;
2720 			if (rsp->fcp_u.fcp_status.resid_under) {
2721 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2722 					fscsi->scsi_bufresid = rsp->fcp_resid;
2723 				} else {
2724 					cmn_err(CE_WARN, "fcp: bad residue %x "
2725 					    "for txfer len %x", rsp->fcp_resid,
2726 					    fscsi->scsi_buflen);
2727 					fscsi->scsi_bufresid =
2728 					    fscsi->scsi_buflen;
2729 				}
2730 				buf_len -= fscsi->scsi_bufresid;
2731 			}
2732 			if (rsp->fcp_u.fcp_status.resid_over) {
2733 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2734 			}
2735 
2736 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2737 			if (fscsi->scsi_rqlen < sense_len) {
2738 				sense_len = fscsi->scsi_rqlen;
2739 			}
2740 
2741 			fscsi->scsi_fc_rspcode	= 0;
2742 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2743 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2744 			}
2745 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2746 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2747 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2748 
2749 			/*
2750 			 * Copy data and request sense
2751 			 *
2752 			 * Data must be copied by using the FCP_CP_IN macro.
2753 			 * This will ensure the proper byte order since the data
2754 			 * is being copied directly from the memory mapped
2755 			 * device register.
2756 			 *
2757 			 * The response (and request sense) will be in the
2758 			 * correct byte order.  No special copy is necessary.
2759 			 */
2760 
2761 			if (buf_len) {
2762 				FCP_CP_IN(fpkt->pkt_data,
2763 				    fscsi->scsi_bufaddr,
2764 				    fpkt->pkt_data_acc,
2765 				    buf_len);
2766 			}
2767 			bcopy((void *)rsp_sense,
2768 				(void *)fscsi->scsi_rqbufaddr,
2769 				sense_len);
2770 		}
2771 	}
2772 
2773 	/*
2774 	 * Cleanup transport data structures if icmd was alloc-ed
2775 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2776 	 */
2777 	if (icmd != NULL) {
2778 		fcp_ipkt_sema_cleanup(icmd);
2779 	}
2780 
2781 	/* restore pm busy/idle status */
2782 	if (port_busy) {
2783 		fc_ulp_idle_port(pptr->port_fp_handle);
2784 	}
2785 
2786 	/*
2787 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2788 	 * flag, it'll be cleared when the reconfig is complete.
2789 	 */
2790 	if ((ptgt != NULL) && !reconfig_pending) {
2791 		/*
2792 		 * If target was created,
2793 		 */
2794 		if (target_created) {
2795 			mutex_enter(&ptgt->tgt_mutex);
2796 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2797 			mutex_exit(&ptgt->tgt_mutex);
2798 		} else {
2799 			/*
2800 			 * De-mark target as busy
2801 			 */
2802 			mutex_enter(&ptgt->tgt_mutex);
2803 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2804 			mutex_exit(&ptgt->tgt_mutex);
2805 		}
2806 	}
2807 	return (ret);
2808 }
2809 
2810 
2811 static int
2812 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2813 	fc_packet_t	*fpkt)
2814 {
2815 	uchar_t			*lun_string;
2816 	uint16_t		lun_num, i;
2817 	int			num_luns;
2818 	int			actual_luns;
2819 	int			num_masked_luns;
2820 	int			lun_buflen;
2821 	struct fcp_lun	*plun	= NULL;
2822 	struct fcp_reportlun_resp 	*report_lun;
2823 	uint8_t			reconfig_needed = FALSE;
2824 	uint8_t			lun_exists = FALSE;
2825 
2826 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2827 
2828 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2829 	    fpkt->pkt_datalen);
2830 
2831 	/* get number of luns (which is supplied as LUNS * 8) */
2832 	num_luns = BE_32(report_lun->num_lun) >> 3;
2833 
2834 	/*
2835 	 * Figure out exactly how many lun strings our response buffer
2836 	 * can hold.
2837 	 */
2838 	lun_buflen = (fpkt->pkt_datalen -
2839 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2840 
2841 	/*
2842 	 * Is our response buffer full or not? We don't want to
2843 	 * potentially walk beyond the number of luns we have.
2844 	 */
2845 	if (num_luns <= lun_buflen) {
2846 		actual_luns = num_luns;
2847 	} else {
2848 		actual_luns = lun_buflen;
2849 	}
2850 
2851 	mutex_enter(&ptgt->tgt_mutex);
2852 
2853 	/* Scan each lun to see if we have masked it. */
2854 	num_masked_luns = 0;
2855 	if (fcp_lun_blacklist != NULL) {
2856 		for (i = 0; i < num_luns; i++) {
2857 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2858 			switch (lun_string[0] & 0xC0) {
2859 			case FCP_LUN_ADDRESSING:
2860 			case FCP_PD_ADDRESSING:
2861 				lun_num = ((lun_string[0] & 0x3F) << 8)
2862 				    | lun_string[1];
2863 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2864 				    lun_num) == TRUE) {
2865 					num_masked_luns++;
2866 				}
2867 				break;
2868 			default:
2869 				break;
2870 			}
2871 		}
2872 	}
2873 
2874 	/*
2875 	 * The quick and easy check.  If the number of LUNs reported
2876 	 * doesn't match the number we currently know about, we need
2877 	 * to reconfigure.
2878 	 */
2879 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2880 		mutex_exit(&ptgt->tgt_mutex);
2881 		kmem_free(report_lun, fpkt->pkt_datalen);
2882 		return (TRUE);
2883 	}
2884 
2885 	/*
2886 	 * If the quick and easy check doesn't turn up anything, we walk
2887 	 * the list of luns from the REPORT_LUN response and look for
2888 	 * any luns we don't know about.  If we find one, we know we need
2889 	 * to reconfigure. We will skip LUNs that are masked because of the
2890 	 * blacklist.
2891 	 */
2892 	for (i = 0; i < actual_luns; i++) {
2893 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2894 		lun_exists = FALSE;
2895 		switch (lun_string[0] & 0xC0) {
2896 		case FCP_LUN_ADDRESSING:
2897 		case FCP_PD_ADDRESSING:
2898 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2899 
2900 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2901 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2902 				lun_exists = TRUE;
2903 				break;
2904 			}
2905 
2906 			for (plun = ptgt->tgt_lun; plun;
2907 			    plun = plun->lun_next) {
2908 				if (plun->lun_num == lun_num) {
2909 					lun_exists = TRUE;
2910 					break;
2911 				}
2912 			}
2913 			break;
2914 		default:
2915 			break;
2916 		}
2917 
2918 		if (lun_exists == FALSE) {
2919 			reconfig_needed = TRUE;
2920 			break;
2921 		}
2922 	}
2923 
2924 	mutex_exit(&ptgt->tgt_mutex);
2925 	kmem_free(report_lun, fpkt->pkt_datalen);
2926 
2927 	return (reconfig_needed);
2928 }
2929 
2930 /*
2931  * This function is called by fcp_handle_page83 and uses inquiry response data
2932  * stored in plun->lun_inq to determine whether or not a device is a member of
2933  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2934  * otherwise 1.
2935  */
2936 static int
2937 fcp_symmetric_device_probe(struct fcp_lun *plun)
2938 {
2939 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2940 	char			*devidptr;
2941 	int			i, len;
2942 
2943 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2944 		devidptr = fcp_symmetric_disk_table[i];
2945 		len = (int)strlen(devidptr);
2946 
2947 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2948 			return (0);
2949 		}
2950 	}
2951 	return (1);
2952 }
2953 
2954 
2955 /*
2956  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2957  * It basically returns the current count of # of state change callbacks
2958  * i.e the value of tgt_change_cnt.
2959  *
2960  * INPUT:
2961  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2962  *   fcp_ioctl.listlen  -> 1
2963  *   fcp_ioctl.list     -> Pointer to a 32 bit integer
2964  */
2965 /*ARGSUSED2*/
2966 static int
2967 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2968 {
2969 	int			ret;
2970 	uint32_t		link_cnt;
2971 	struct fcp_ioctl	fioctl;
2972 	struct fcp_port	*pptr = NULL;
2973 
2974 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2975 			    &pptr)) != 0) {
2976 		return (ret);
2977 	}
2978 
2979 	ASSERT(pptr != NULL);
2980 
2981 	if (fioctl.listlen != 1)
2982 		return (EINVAL);
2983 
2984 	mutex_enter(&pptr->port_mutex);
2985 	if (pptr->port_state & FCP_STATE_OFFLINE) {
2986 		mutex_exit(&pptr->port_mutex);
2987 		return (ENXIO);
2988 	}
2989 
2990 	/*
2991 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
2992 	 * When the fcp initially attaches to the port and there are nothing
2993 	 * hanging out of the port or if there was a repeat offline state change
2994 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
2995 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
2996 	 * will differentiate the 2 cases.
2997 	 */
2998 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
2999 		mutex_exit(&pptr->port_mutex);
3000 		return (ENXIO);
3001 	}
3002 
3003 	link_cnt = pptr->port_link_cnt;
3004 	mutex_exit(&pptr->port_mutex);
3005 
3006 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3007 		return (EFAULT);
3008 	}
3009 
3010 #ifdef	_MULTI_DATAMODEL
3011 	switch (ddi_model_convert_from(mode & FMODELS)) {
3012 	case DDI_MODEL_ILP32: {
3013 		struct fcp32_ioctl f32_ioctl;
3014 
3015 		f32_ioctl.fp_minor = fioctl.fp_minor;
3016 		f32_ioctl.listlen = fioctl.listlen;
3017 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3018 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3019 		    sizeof (struct fcp32_ioctl), mode)) {
3020 			return (EFAULT);
3021 		}
3022 		break;
3023 	}
3024 	case DDI_MODEL_NONE:
3025 		if (ddi_copyout((void *)&fioctl, (void *)data,
3026 		    sizeof (struct fcp_ioctl), mode)) {
3027 			return (EFAULT);
3028 		}
3029 		break;
3030 	}
3031 #else	/* _MULTI_DATAMODEL */
3032 
3033 	if (ddi_copyout((void *)&fioctl, (void *)data,
3034 	    sizeof (struct fcp_ioctl), mode)) {
3035 		return (EFAULT);
3036 	}
3037 #endif	/* _MULTI_DATAMODEL */
3038 
3039 	return (0);
3040 }
3041 
3042 /*
3043  * This function copies the fcp_ioctl structure passed in from user land
3044  * into kernel land. Handles 32 bit applications.
3045  */
3046 /*ARGSUSED*/
3047 static int
3048 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3049     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3050 {
3051 	struct fcp_port	*t_pptr;
3052 
3053 #ifdef	_MULTI_DATAMODEL
3054 	switch (ddi_model_convert_from(mode & FMODELS)) {
3055 	case DDI_MODEL_ILP32: {
3056 		struct fcp32_ioctl f32_ioctl;
3057 
3058 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3059 		    sizeof (struct fcp32_ioctl), mode)) {
3060 			return (EFAULT);
3061 		}
3062 		fioctl->fp_minor = f32_ioctl.fp_minor;
3063 		fioctl->listlen = f32_ioctl.listlen;
3064 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3065 		break;
3066 	}
3067 	case DDI_MODEL_NONE:
3068 		if (ddi_copyin((void *)data, (void *)fioctl,
3069 		    sizeof (struct fcp_ioctl), mode)) {
3070 			return (EFAULT);
3071 		}
3072 		break;
3073 	}
3074 
3075 #else	/* _MULTI_DATAMODEL */
3076 	if (ddi_copyin((void *)data, (void *)fioctl,
3077 	    sizeof (struct fcp_ioctl), mode)) {
3078 		return (EFAULT);
3079 	}
3080 #endif	/* _MULTI_DATAMODEL */
3081 
3082 	/*
3083 	 * Right now we can assume that the minor number matches with
3084 	 * this instance of fp. If this changes we will need to
3085 	 * revisit this logic.
3086 	 */
3087 	mutex_enter(&fcp_global_mutex);
3088 	t_pptr = fcp_port_head;
3089 	while (t_pptr) {
3090 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor)
3091 			break;
3092 		else
3093 			t_pptr = t_pptr->port_next;
3094 	}
3095 	*pptr = t_pptr;
3096 	mutex_exit(&fcp_global_mutex);
3097 	if (t_pptr == NULL)
3098 		return (ENXIO);
3099 
3100 	return (0);
3101 }
3102 
3103 /*
3104  *     Function: fcp_port_create_tgt
3105  *
3106  *  Description: As the name suggest this function creates the target context
3107  *		 specified by the the WWN provided by the caller.  If the
3108  *		 creation goes well and the target is known by fp/fctl a PLOGI
3109  *		 followed by a PRLI are issued.
3110  *
3111  *     Argument: pptr		fcp port structure
3112  *		 pwwn		WWN of the target
3113  *		 ret_val	Address of the return code.  It could be:
3114  *				EIO, ENOMEM or 0.
3115  *		 fc_status	PLOGI or PRLI status completion
3116  *		 fc_pkt_state	PLOGI or PRLI state completion
3117  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3118  *		 fc_pkt_action	PLOGI or PRLI action completion
3119  *
3120  * Return Value: NULL if it failed
3121  *		 Target structure address if it succeeds
3122  */
3123 static struct fcp_tgt *
3124 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3125     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3126 {
3127 	struct fcp_tgt	*ptgt = NULL;
3128 	fc_portmap_t 		devlist;
3129 	int			lcount;
3130 	int			error;
3131 
3132 	*ret_val = 0;
3133 
3134 	/*
3135 	 * Check FC port device & get port map
3136 	 */
3137 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3138 	    &error, 1) == NULL) {
3139 		*ret_val = EIO;
3140 	} else {
3141 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3142 		    &devlist) != FC_SUCCESS) {
3143 			*ret_val = EIO;
3144 		}
3145 	}
3146 
3147 	/* Set port map flags */
3148 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3149 
3150 	/* Allocate target */
3151 	if (*ret_val == 0) {
3152 		lcount = pptr->port_link_cnt;
3153 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3154 		if (ptgt == NULL) {
3155 			fcp_log(CE_WARN, pptr->port_dip,
3156 			    "!FC target allocation failed");
3157 			*ret_val = ENOMEM;
3158 		} else {
3159 			/* Setup target */
3160 			mutex_enter(&ptgt->tgt_mutex);
3161 
3162 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3163 			ptgt->tgt_tmp_cnt	= 1;
3164 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3165 			ptgt->tgt_hard_addr	=
3166 			    devlist.map_hard_addr.hard_addr;
3167 			ptgt->tgt_pd_handle	= devlist.map_pd;
3168 			ptgt->tgt_fca_dev	= NULL;
3169 
3170 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3171 			    FC_WWN_SIZE);
3172 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3173 			    FC_WWN_SIZE);
3174 
3175 			mutex_exit(&ptgt->tgt_mutex);
3176 		}
3177 	}
3178 
3179 	/* Release global mutex for PLOGI and PRLI */
3180 	mutex_exit(&fcp_global_mutex);
3181 
3182 	/* Send PLOGI (If necessary) */
3183 	if (*ret_val == 0) {
3184 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3185 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3186 	}
3187 
3188 	/* Send PRLI (If necessary) */
3189 	if (*ret_val == 0) {
3190 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3191 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3192 	}
3193 
3194 	mutex_enter(&fcp_global_mutex);
3195 
3196 	return (ptgt);
3197 }
3198 
3199 /*
3200  *     Function: fcp_tgt_send_plogi
3201  *
3202  *  Description: This function sends a PLOGI to the target specified by the
3203  *		 caller and waits till it completes.
3204  *
3205  *     Argument: ptgt		Target to send the plogi to.
3206  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3207  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3208  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3209  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3210  *
3211  * Return Value: 0
3212  *		 ENOMEM
3213  *		 EIO
3214  *
3215  *      Context: User context.
3216  */
3217 static int
3218 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3219     int *fc_pkt_reason, int *fc_pkt_action)
3220 {
3221 	struct fcp_port	*pptr;
3222 	struct fcp_ipkt	*icmd;
3223 	struct fc_packet	*fpkt;
3224 	fc_frame_hdr_t		*hp;
3225 	struct la_els_logi	logi;
3226 	int			tcount;
3227 	int			lcount;
3228 	int			ret, login_retval = ~FC_SUCCESS;
3229 
3230 	ret = 0;
3231 
3232 	pptr = ptgt->tgt_port;
3233 
3234 	lcount = pptr->port_link_cnt;
3235 	tcount = ptgt->tgt_change_cnt;
3236 
3237 	/* Alloc internal packet */
3238 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3239 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3240 	    FC_INVALID_RSCN_COUNT);
3241 
3242 	if (icmd == NULL) {
3243 		ret = ENOMEM;
3244 	} else {
3245 		/*
3246 		 * Setup internal packet as sema sync
3247 		 */
3248 		fcp_ipkt_sema_init(icmd);
3249 
3250 		/*
3251 		 * Setup internal packet (icmd)
3252 		 */
3253 		icmd->ipkt_lun		= NULL;
3254 		icmd->ipkt_restart	= 0;
3255 		icmd->ipkt_retries	= 0;
3256 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3257 
3258 		/*
3259 		 * Setup fc_packet
3260 		 */
3261 		fpkt = icmd->ipkt_fpkt;
3262 
3263 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3264 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3265 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3266 
3267 		/*
3268 		 * Setup FC frame header
3269 		 */
3270 		hp = &fpkt->pkt_cmd_fhdr;
3271 
3272 		hp->s_id	= pptr->port_id;	/* source ID */
3273 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3274 		hp->r_ctl	= R_CTL_ELS_REQ;
3275 		hp->type	= FC_TYPE_EXTENDED_LS;
3276 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3277 		hp->seq_id	= 0;
3278 		hp->rsvd	= 0;
3279 		hp->df_ctl	= 0;
3280 		hp->seq_cnt	= 0;
3281 		hp->ox_id	= 0xffff;		/* i.e. none */
3282 		hp->rx_id	= 0xffff;		/* i.e. none */
3283 		hp->ro		= 0;
3284 
3285 		/*
3286 		 * Setup PLOGI
3287 		 */
3288 		bzero(&logi, sizeof (struct la_els_logi));
3289 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3290 
3291 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3292 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3293 
3294 		/*
3295 		 * Send PLOGI
3296 		 */
3297 		*fc_status = login_retval =
3298 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3299 		if (*fc_status != FC_SUCCESS) {
3300 			ret = EIO;
3301 		}
3302 	}
3303 
3304 	/*
3305 	 * Wait for completion
3306 	 */
3307 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3308 		ret = fcp_ipkt_sema_wait(icmd);
3309 
3310 		*fc_pkt_state	= fpkt->pkt_state;
3311 		*fc_pkt_reason	= fpkt->pkt_reason;
3312 		*fc_pkt_action	= fpkt->pkt_action;
3313 	}
3314 
3315 	/*
3316 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3317 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3318 	 * Otherwise, cleanup happens in callback routine.
3319 	 */
3320 	if (icmd != NULL) {
3321 		fcp_ipkt_sema_cleanup(icmd);
3322 	}
3323 
3324 	return (ret);
3325 }
3326 
3327 /*
3328  *     Function: fcp_tgt_send_prli
3329  *
3330  *  Description: Does nothing as of today.
3331  *
3332  *     Argument: ptgt		Target to send the prli to.
3333  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3334  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3335  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3336  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3337  *
3338  * Return Value: 0
3339  */
3340 /*ARGSUSED*/
3341 static int
3342 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3343     int *fc_pkt_reason, int *fc_pkt_action)
3344 {
3345 	return (0);
3346 }
3347 
3348 /*
3349  *     Function: fcp_ipkt_sema_init
3350  *
3351  *  Description: Initializes the semaphore contained in the internal packet.
3352  *
3353  *     Argument: icmd	Internal packet the semaphore of which must be
3354  *			initialized.
3355  *
3356  * Return Value: None
3357  *
3358  *      Context: User context only.
3359  */
3360 static void
3361 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3362 {
3363 	struct fc_packet	*fpkt;
3364 
3365 	fpkt = icmd->ipkt_fpkt;
3366 
3367 	/* Create semaphore for sync */
3368 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3369 
3370 	/* Setup the completion callback */
3371 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3372 }
3373 
3374 /*
3375  *     Function: fcp_ipkt_sema_wait
3376  *
3377  *  Description: Wait on the semaphore embedded in the internal packet.  The
3378  *		 semaphore is released in the callback.
3379  *
3380  *     Argument: icmd	Internal packet to wait on for completion.
3381  *
3382  * Return Value: 0
3383  *		 EIO
3384  *		 EBUSY
3385  *		 EAGAIN
3386  *
3387  *      Context: User context only.
3388  *
3389  * This function does a conversion between the field pkt_state of the fc_packet
3390  * embedded in the internal packet (icmd) and the code it returns.
3391  */
3392 static int
3393 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3394 {
3395 	struct fc_packet	*fpkt;
3396 	int	ret;
3397 
3398 	ret = EIO;
3399 	fpkt = icmd->ipkt_fpkt;
3400 
3401 	/*
3402 	 * Wait on semaphore
3403 	 */
3404 	sema_p(&(icmd->ipkt_sema));
3405 
3406 	/*
3407 	 * Check the status of the FC packet
3408 	 */
3409 	switch (fpkt->pkt_state) {
3410 	case FC_PKT_SUCCESS:
3411 		ret = 0;
3412 		break;
3413 	case FC_PKT_LOCAL_RJT:
3414 		switch (fpkt->pkt_reason) {
3415 		case FC_REASON_SEQ_TIMEOUT:
3416 		case FC_REASON_RX_BUF_TIMEOUT:
3417 			ret = EAGAIN;
3418 			break;
3419 		case FC_REASON_PKT_BUSY:
3420 			ret = EBUSY;
3421 			break;
3422 		}
3423 		break;
3424 	case FC_PKT_TIMEOUT:
3425 		ret = EAGAIN;
3426 		break;
3427 	case FC_PKT_LOCAL_BSY:
3428 	case FC_PKT_TRAN_BSY:
3429 	case FC_PKT_NPORT_BSY:
3430 	case FC_PKT_FABRIC_BSY:
3431 		ret = EBUSY;
3432 		break;
3433 	case FC_PKT_LS_RJT:
3434 	case FC_PKT_BA_RJT:
3435 		switch (fpkt->pkt_reason) {
3436 		case FC_REASON_LOGICAL_BSY:
3437 			ret = EBUSY;
3438 			break;
3439 		}
3440 		break;
3441 	case FC_PKT_FS_RJT:
3442 		switch (fpkt->pkt_reason) {
3443 		case FC_REASON_FS_LOGICAL_BUSY:
3444 			ret = EBUSY;
3445 			break;
3446 		}
3447 		break;
3448 	}
3449 
3450 	return (ret);
3451 }
3452 
3453 /*
3454  *     Function: fcp_ipkt_sema_callback
3455  *
3456  *  Description: Registered as the completion callback function for the FC
3457  *		 transport when the ipkt semaphore is used for sync. This will
3458  *		 cleanup the used data structures, if necessary and wake up
3459  *		 the user thread to complete the transaction.
3460  *
3461  *     Argument: fpkt	FC packet (points to the icmd)
3462  *
3463  * Return Value: None
3464  *
3465  *      Context: User context only
3466  */
3467 static void
3468 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3469 {
3470 	struct fcp_ipkt	*icmd;
3471 
3472 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3473 
3474 	/*
3475 	 * Wake up user thread
3476 	 */
3477 	sema_v(&(icmd->ipkt_sema));
3478 }
3479 
3480 /*
3481  *     Function: fcp_ipkt_sema_cleanup
3482  *
3483  *  Description: Called to cleanup (if necessary) the data structures used
3484  *		 when ipkt sema is used for sync.  This function will detect
3485  *		 whether the caller is the last thread (via counter) and
3486  *		 cleanup only if necessary.
3487  *
3488  *     Argument: icmd	Internal command packet
3489  *
3490  * Return Value: None
3491  *
3492  *      Context: User context only
3493  */
3494 static void
3495 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3496 {
3497 	struct fcp_tgt	*ptgt;
3498 	struct fcp_port	*pptr;
3499 
3500 	ptgt = icmd->ipkt_tgt;
3501 	pptr = icmd->ipkt_port;
3502 
3503 	/*
3504 	 * Acquire data structure
3505 	 */
3506 	mutex_enter(&ptgt->tgt_mutex);
3507 
3508 	/*
3509 	 * Destroy semaphore
3510 	 */
3511 	sema_destroy(&(icmd->ipkt_sema));
3512 
3513 	/*
3514 	 * Cleanup internal packet
3515 	 */
3516 	mutex_exit(&ptgt->tgt_mutex);
3517 	fcp_icmd_free(pptr, icmd);
3518 }
3519 
3520 /*
3521  *     Function: fcp_port_attach
3522  *
3523  *  Description: Called by the transport framework to resume, suspend or
3524  *		 attach a new port.
3525  *
3526  *     Argument: ulph		Port handle
3527  *		 *pinfo		Port information
3528  *		 cmd		Command
3529  *		 s_id		Port ID
3530  *
3531  * Return Value: FC_FAILURE or FC_SUCCESS
3532  */
3533 /*ARGSUSED*/
3534 static int
3535 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3536     fc_attach_cmd_t cmd, uint32_t s_id)
3537 {
3538 	int	instance;
3539 	int	res = FC_FAILURE; /* default result */
3540 
3541 	ASSERT(pinfo != NULL);
3542 
3543 	instance = ddi_get_instance(pinfo->port_dip);
3544 
3545 	switch (cmd) {
3546 	case FC_CMD_ATTACH:
3547 		/*
3548 		 * this port instance attaching for the first time (or after
3549 		 * being detached before)
3550 		 */
3551 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3552 		    instance) == DDI_SUCCESS) {
3553 			res = FC_SUCCESS;
3554 		} else {
3555 			ASSERT(ddi_get_soft_state(fcp_softstate,
3556 			    instance) == NULL);
3557 		}
3558 		break;
3559 
3560 	case FC_CMD_RESUME:
3561 	case FC_CMD_POWER_UP:
3562 		/*
3563 		 * this port instance was attached and the suspended and
3564 		 * will now be resumed
3565 		 */
3566 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3567 		    instance) == DDI_SUCCESS) {
3568 			res = FC_SUCCESS;
3569 		}
3570 		break;
3571 
3572 	default:
3573 		/* shouldn't happen */
3574 		FCP_TRACE(fcp_logq, "fcp",
3575 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3576 		    "port_attach: unknown cmdcommand: %d", cmd);
3577 		break;
3578 	}
3579 
3580 	/* return result */
3581 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3582 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3583 
3584 	return (res);
3585 }
3586 
3587 
3588 /*
3589  * detach or suspend this port instance
3590  *
3591  * acquires and releases the global mutex
3592  *
3593  * acquires and releases the mutex for this port
3594  *
3595  * acquires and releases the hotplug mutex for this port
3596  */
3597 /*ARGSUSED*/
3598 static int
3599 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3600     fc_detach_cmd_t cmd)
3601 {
3602 	int			flag;
3603 	int			instance;
3604 	struct fcp_port		*pptr;
3605 
3606 	instance = ddi_get_instance(info->port_dip);
3607 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3608 
3609 	switch (cmd) {
3610 	case FC_CMD_SUSPEND:
3611 		FCP_DTRACE(fcp_logq, "fcp",
3612 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3613 		    "port suspend called for port %d", instance);
3614 		flag = FCP_STATE_SUSPENDED;
3615 		break;
3616 
3617 	case FC_CMD_POWER_DOWN:
3618 		FCP_DTRACE(fcp_logq, "fcp",
3619 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3620 		    "port power down called for port %d", instance);
3621 		flag = FCP_STATE_POWER_DOWN;
3622 		break;
3623 
3624 	case FC_CMD_DETACH:
3625 		FCP_DTRACE(fcp_logq, "fcp",
3626 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3627 		    "port detach called for port %d", instance);
3628 		flag = FCP_STATE_DETACHING;
3629 		break;
3630 
3631 	default:
3632 		/* shouldn't happen */
3633 		return (FC_FAILURE);
3634 	}
3635 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3636 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3637 
3638 	return (fcp_handle_port_detach(pptr, flag, instance));
3639 }
3640 
3641 
3642 /*
3643  * called for ioctls on the transport's devctl interface, and the transport
3644  * has passed it to us
3645  *
3646  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3647  *
3648  * return FC_SUCCESS if we decide to claim the ioctl,
3649  * else return FC_UNCLAIMED
3650  *
3651  * *rval is set iff we decide to claim the ioctl
3652  */
3653 /*ARGSUSED*/
3654 static int
3655 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3656     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3657 {
3658 	int			retval = FC_UNCLAIMED;	/* return value */
3659 	struct fcp_port		*pptr = NULL;		/* our soft state */
3660 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3661 	dev_info_t		*cdip;
3662 	mdi_pathinfo_t		*pip = NULL;
3663 	char			*ndi_nm;		/* NDI name */
3664 	char			*ndi_addr;		/* NDI addr */
3665 	int			is_mpxio, circ;
3666 	int			devi_entered = 0;
3667 	time_t			end_time;
3668 
3669 	ASSERT(rval != NULL);
3670 
3671 	FCP_DTRACE(fcp_logq, "fcp",
3672 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3673 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3674 
3675 	/* if already claimed then forget it */
3676 	if (claimed) {
3677 		/*
3678 		 * for now, if this ioctl has already been claimed, then
3679 		 * we just ignore it
3680 		 */
3681 		return (retval);
3682 	}
3683 
3684 	/* get our port info */
3685 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3686 		fcp_log(CE_WARN, NULL,
3687 		    "!fcp:Invalid port handle handle in ioctl");
3688 		*rval = ENXIO;
3689 		return (retval);
3690 	}
3691 	is_mpxio = pptr->port_mpxio;
3692 
3693 	switch (cmd) {
3694 	case DEVCTL_BUS_GETSTATE:
3695 	case DEVCTL_BUS_QUIESCE:
3696 	case DEVCTL_BUS_UNQUIESCE:
3697 	case DEVCTL_BUS_RESET:
3698 	case DEVCTL_BUS_RESETALL:
3699 
3700 	case DEVCTL_BUS_DEV_CREATE:
3701 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3702 			return (retval);
3703 		}
3704 		break;
3705 
3706 	case DEVCTL_DEVICE_GETSTATE:
3707 	case DEVCTL_DEVICE_OFFLINE:
3708 	case DEVCTL_DEVICE_ONLINE:
3709 	case DEVCTL_DEVICE_REMOVE:
3710 	case DEVCTL_DEVICE_RESET:
3711 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3712 			return (retval);
3713 		}
3714 
3715 		ASSERT(dcp != NULL);
3716 
3717 		/* ensure we have a name and address */
3718 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3719 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3720 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3721 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3722 			    "ioctl: can't get name (%s) or addr (%s)",
3723 			    ndi_nm ? ndi_nm : "<null ptr>",
3724 			    ndi_addr ? ndi_addr : "<null ptr>");
3725 			ndi_dc_freehdl(dcp);
3726 			return (retval);
3727 		}
3728 
3729 
3730 		/* get our child's DIP */
3731 		ASSERT(pptr != NULL);
3732 		if (is_mpxio)
3733 			mdi_devi_enter(pptr->port_dip, &circ);
3734 		else
3735 			ndi_devi_enter(pptr->port_dip, &circ);
3736 		devi_entered = 1;
3737 
3738 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3739 		    ndi_addr)) == NULL) {
3740 			/* Look for virtually enumerated devices. */
3741 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3742 			if (pip == NULL ||
3743 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3744 				*rval = ENXIO;
3745 				goto out;
3746 			}
3747 		}
3748 		break;
3749 
3750 	default:
3751 		*rval = ENOTTY;
3752 		return (retval);
3753 	}
3754 
3755 	/* this ioctl is ours -- process it */
3756 
3757 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3758 
3759 	/* we assume it will be a success; else we'll set error value */
3760 	*rval = 0;
3761 
3762 
3763 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3764 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3765 	    "ioctl: claiming this one");
3766 
3767 	/* handle ioctls now */
3768 	switch (cmd) {
3769 	case DEVCTL_DEVICE_GETSTATE:
3770 		ASSERT(cdip != NULL);
3771 		ASSERT(dcp != NULL);
3772 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3773 			*rval = EFAULT;
3774 		}
3775 		break;
3776 
3777 	case DEVCTL_DEVICE_REMOVE:
3778 	case DEVCTL_DEVICE_OFFLINE: {
3779 		int			flag = 0;
3780 		int			lcount;
3781 		int			tcount;
3782 		struct fcp_pkt	*head = NULL;
3783 		struct fcp_lun	*plun;
3784 		child_info_t		*cip = CIP(cdip);
3785 		int			all = 1;
3786 		struct fcp_lun 	*tplun;
3787 		struct fcp_tgt 	*ptgt;
3788 
3789 		ASSERT(pptr != NULL);
3790 		ASSERT(cdip != NULL);
3791 
3792 		mutex_enter(&pptr->port_mutex);
3793 		if (pip != NULL) {
3794 			cip = CIP(pip);
3795 		}
3796 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3797 			mutex_exit(&pptr->port_mutex);
3798 			*rval = ENXIO;
3799 			break;
3800 		}
3801 
3802 		head = fcp_scan_commands(plun);
3803 		if (head != NULL) {
3804 			fcp_abort_commands(head, LUN_PORT);
3805 		}
3806 		lcount = pptr->port_link_cnt;
3807 		tcount = plun->lun_tgt->tgt_change_cnt;
3808 		mutex_exit(&pptr->port_mutex);
3809 
3810 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3811 			flag = NDI_DEVI_REMOVE;
3812 		}
3813 
3814 		if (is_mpxio)
3815 			mdi_devi_exit(pptr->port_dip, circ);
3816 		else
3817 			ndi_devi_exit(pptr->port_dip, circ);
3818 		devi_entered = 0;
3819 
3820 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3821 		    FCP_OFFLINE, lcount, tcount, flag);
3822 
3823 		if (*rval != NDI_SUCCESS) {
3824 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3825 			break;
3826 		}
3827 
3828 		fcp_update_offline_flags(plun);
3829 
3830 		ptgt = plun->lun_tgt;
3831 		mutex_enter(&ptgt->tgt_mutex);
3832 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3833 		    tplun->lun_next) {
3834 			mutex_enter(&tplun->lun_mutex);
3835 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3836 				all = 0;
3837 			}
3838 			mutex_exit(&tplun->lun_mutex);
3839 		}
3840 
3841 		if (all) {
3842 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3843 			/*
3844 			 * The user is unconfiguring/offlining the device.
3845 			 * If fabric and the auto configuration is set
3846 			 * then make sure the user is the only one who
3847 			 * can reconfigure the device.
3848 			 */
3849 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3850 				    fcp_enable_auto_configuration) {
3851 				ptgt->tgt_manual_config_only = 1;
3852 			}
3853 		}
3854 		mutex_exit(&ptgt->tgt_mutex);
3855 		break;
3856 	}
3857 
3858 	case DEVCTL_DEVICE_ONLINE: {
3859 		int			lcount;
3860 		int			tcount;
3861 		struct fcp_lun	*plun;
3862 		child_info_t		*cip = CIP(cdip);
3863 
3864 		ASSERT(cdip != NULL);
3865 		ASSERT(pptr != NULL);
3866 
3867 		mutex_enter(&pptr->port_mutex);
3868 		if (pip != NULL) {
3869 			cip = CIP(pip);
3870 		}
3871 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3872 			mutex_exit(&pptr->port_mutex);
3873 			*rval = ENXIO;
3874 			break;
3875 		}
3876 		lcount = pptr->port_link_cnt;
3877 		tcount = plun->lun_tgt->tgt_change_cnt;
3878 		mutex_exit(&pptr->port_mutex);
3879 
3880 		/*
3881 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3882 		 * to allow the device attach to occur when the device is
3883 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3884 		 * from the scsi_probe()).
3885 		 */
3886 		mutex_enter(&LUN_TGT->tgt_mutex);
3887 		plun->lun_state |= FCP_LUN_ONLINING;
3888 		mutex_exit(&LUN_TGT->tgt_mutex);
3889 
3890 		if (is_mpxio)
3891 			mdi_devi_exit(pptr->port_dip, circ);
3892 		else
3893 			ndi_devi_exit(pptr->port_dip, circ);
3894 		devi_entered = 0;
3895 
3896 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3897 		    FCP_ONLINE, lcount, tcount, 0);
3898 
3899 		if (*rval != NDI_SUCCESS) {
3900 			/* Reset the FCP_LUN_ONLINING bit */
3901 			mutex_enter(&LUN_TGT->tgt_mutex);
3902 			plun->lun_state &= ~FCP_LUN_ONLINING;
3903 			mutex_exit(&LUN_TGT->tgt_mutex);
3904 			*rval = EIO;
3905 			break;
3906 		}
3907 		mutex_enter(&LUN_TGT->tgt_mutex);
3908 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3909 		    FCP_LUN_ONLINING);
3910 		mutex_exit(&LUN_TGT->tgt_mutex);
3911 		break;
3912 	}
3913 
3914 	case DEVCTL_BUS_DEV_CREATE: {
3915 		uchar_t			*bytes = NULL;
3916 		uint_t			nbytes;
3917 		struct fcp_tgt		*ptgt = NULL;
3918 		struct fcp_lun		*plun = NULL;
3919 		dev_info_t		*useless_dip = NULL;
3920 
3921 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3922 		    DEVCTL_CONSTRUCT, &useless_dip);
3923 		if (*rval != 0 || useless_dip == NULL) {
3924 			break;
3925 		}
3926 
3927 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3928 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3929 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3930 			*rval = EINVAL;
3931 			(void) ndi_devi_free(useless_dip);
3932 			if (bytes != NULL) {
3933 				ddi_prop_free(bytes);
3934 			}
3935 			break;
3936 		}
3937 
3938 		*rval = fcp_create_on_demand(pptr, bytes);
3939 		if (*rval == 0) {
3940 			mutex_enter(&pptr->port_mutex);
3941 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3942 			if (ptgt) {
3943 				/*
3944 				 * We now have a pointer to the target that
3945 				 * was created. Lets point to the first LUN on
3946 				 * this new target.
3947 				 */
3948 				mutex_enter(&ptgt->tgt_mutex);
3949 
3950 				plun = ptgt->tgt_lun;
3951 				/*
3952 				 * There may be stale/offline LUN entries on
3953 				 * this list (this is by design) and so we have
3954 				 * to make sure we point to the first online
3955 				 * LUN
3956 				 */
3957 				while (plun &&
3958 				    plun->lun_state & FCP_LUN_OFFLINE) {
3959 					plun = plun->lun_next;
3960 				}
3961 
3962 				mutex_exit(&ptgt->tgt_mutex);
3963 			}
3964 			mutex_exit(&pptr->port_mutex);
3965 		}
3966 
3967 		if (*rval == 0 && ptgt && plun) {
3968 			mutex_enter(&plun->lun_mutex);
3969 			/*
3970 			 * Allow up to fcp_lun_ready_retry seconds to
3971 			 * configure all the luns behind the target.
3972 			 *
3973 			 * The intent here is to allow targets with long
3974 			 * reboot/reset-recovery times to become available
3975 			 * while limiting the maximum wait time for an
3976 			 * unresponsive target.
3977 			 */
3978 			end_time = ddi_get_lbolt() +
3979 			    SEC_TO_TICK(fcp_lun_ready_retry);
3980 
3981 			while (ddi_get_lbolt() < end_time) {
3982 				retval = FC_SUCCESS;
3983 
3984 				/*
3985 				 * The new ndi interfaces for on-demand creation
3986 				 * are inflexible, Do some more work to pass on
3987 				 * a path name of some LUN (design is broken !)
3988 				 */
3989 				if (plun->lun_cip) {
3990 					if (plun->lun_mpxio == 0) {
3991 						cdip = DIP(plun->lun_cip);
3992 					} else {
3993 						cdip = mdi_pi_get_client(
3994 						    PIP(plun->lun_cip));
3995 					}
3996 					if (cdip == NULL) {
3997 						*rval = ENXIO;
3998 						break;
3999 					}
4000 
4001 					if (!i_ddi_devi_attached(cdip)) {
4002 						mutex_exit(&plun->lun_mutex);
4003 						delay(drv_usectohz(1000000));
4004 						mutex_enter(&plun->lun_mutex);
4005 					} else {
4006 						/*
4007 						 * This Lun is ready, lets
4008 						 * check the next one.
4009 						 */
4010 						mutex_exit(&plun->lun_mutex);
4011 						plun = plun->lun_next;
4012 						while (plun && (plun->lun_state
4013 						    & FCP_LUN_OFFLINE)) {
4014 							plun = plun->lun_next;
4015 						}
4016 						if (!plun) {
4017 							break;
4018 						}
4019 						mutex_enter(&plun->lun_mutex);
4020 					}
4021 				} else {
4022 					/*
4023 					 * lun_cip field for a valid lun
4024 					 * should never be NULL. Fail the
4025 					 * command.
4026 					 */
4027 					*rval = ENXIO;
4028 					break;
4029 				}
4030 			}
4031 			if (plun) {
4032 				mutex_exit(&plun->lun_mutex);
4033 			} else {
4034 				char devnm[MAXNAMELEN];
4035 				int nmlen;
4036 
4037 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4038 				    ddi_node_name(cdip),
4039 				    ddi_get_name_addr(cdip));
4040 
4041 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4042 				    0) {
4043 					*rval = EFAULT;
4044 				}
4045 			}
4046 		} else {
4047 			int	i;
4048 			char 	buf[25];
4049 
4050 			for (i = 0; i < FC_WWN_SIZE; i++) {
4051 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4052 			}
4053 
4054 			fcp_log(CE_WARN, pptr->port_dip,
4055 			    "!Failed to create nodes for pwwn=%s; error=%x",
4056 			    buf, *rval);
4057 		}
4058 
4059 		(void) ndi_devi_free(useless_dip);
4060 		ddi_prop_free(bytes);
4061 		break;
4062 	}
4063 
4064 	case DEVCTL_DEVICE_RESET: {
4065 		struct fcp_lun	*plun;
4066 		struct scsi_address	ap;
4067 		child_info_t		*cip = CIP(cdip);
4068 
4069 		ASSERT(cdip != NULL);
4070 		ASSERT(pptr != NULL);
4071 		mutex_enter(&pptr->port_mutex);
4072 		if (pip != NULL) {
4073 			cip = CIP(pip);
4074 		}
4075 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4076 			mutex_exit(&pptr->port_mutex);
4077 			*rval = ENXIO;
4078 			break;
4079 		}
4080 		mutex_exit(&pptr->port_mutex);
4081 
4082 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4083 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4084 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4085 			*rval = ENXIO;
4086 			break;
4087 		}
4088 		ap.a_hba_tran = plun->lun_tran;
4089 		ASSERT(pptr->port_tran != NULL);
4090 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4091 
4092 		/*
4093 		 * There is a chance lun_tran is NULL at this point. So check
4094 		 * for it. If it is NULL, it basically means that the tgt has
4095 		 * been freed. So, just return a "No such device or address"
4096 		 * error.
4097 		 */
4098 		if (ap.a_hba_tran == NULL) {
4099 			*rval = ENXIO;
4100 			break;
4101 		}
4102 
4103 		/*
4104 		 * set up ap so that fcp_reset can figure out
4105 		 * which target to reset
4106 		 */
4107 		if (fcp_scsi_reset(&ap, RESET_TARGET) == FALSE) {
4108 			*rval = EIO;
4109 		}
4110 		break;
4111 	}
4112 
4113 	case DEVCTL_BUS_GETSTATE:
4114 		ASSERT(dcp != NULL);
4115 		ASSERT(pptr != NULL);
4116 		ASSERT(pptr->port_dip != NULL);
4117 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4118 		    NDI_SUCCESS) {
4119 			*rval = EFAULT;
4120 		}
4121 		break;
4122 
4123 	case DEVCTL_BUS_QUIESCE:
4124 	case DEVCTL_BUS_UNQUIESCE:
4125 		*rval = ENOTSUP;
4126 		break;
4127 
4128 	case DEVCTL_BUS_RESET:
4129 	case DEVCTL_BUS_RESETALL:
4130 		ASSERT(pptr != NULL);
4131 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4132 		break;
4133 
4134 	default:
4135 		ASSERT(dcp != NULL);
4136 		*rval = ENOTTY;
4137 		break;
4138 	}
4139 
4140 	/* all done -- clean up and return */
4141 out:	if (devi_entered) {
4142 		if (is_mpxio)
4143 			mdi_devi_exit(pptr->port_dip, circ);
4144 		else
4145 			ndi_devi_exit(pptr->port_dip, circ);
4146 	}
4147 
4148 	if (dcp != NULL) {
4149 		ndi_dc_freehdl(dcp);
4150 	}
4151 
4152 	return (retval);
4153 }
4154 
4155 
4156 /*ARGSUSED*/
4157 static int
4158 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4159     uint32_t claimed)
4160 {
4161 	uchar_t			r_ctl;
4162 	uchar_t			ls_code;
4163 	struct fcp_port	*pptr;
4164 
4165 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4166 		return (FC_UNCLAIMED);
4167 	}
4168 
4169 	mutex_enter(&pptr->port_mutex);
4170 	if (pptr->port_state & (FCP_STATE_DETACHING |
4171 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4172 		mutex_exit(&pptr->port_mutex);
4173 		return (FC_UNCLAIMED);
4174 	}
4175 	mutex_exit(&pptr->port_mutex);
4176 
4177 	r_ctl = buf->ub_frame.r_ctl;
4178 
4179 	switch (r_ctl & R_CTL_ROUTING) {
4180 	case R_CTL_EXTENDED_SVC:
4181 		if (r_ctl == R_CTL_ELS_REQ) {
4182 			ls_code = buf->ub_buffer[0];
4183 
4184 			switch (ls_code) {
4185 			case LA_ELS_PRLI:
4186 				/*
4187 				 * We really don't care if something fails.
4188 				 * If the PRLI was not sent out, then the
4189 				 * other end will time it out.
4190 				 */
4191 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4192 					return (FC_SUCCESS);
4193 				}
4194 				return (FC_UNCLAIMED);
4195 				/* NOTREACHED */
4196 
4197 			default:
4198 				break;
4199 			}
4200 		}
4201 		/* FALLTHROUGH */
4202 
4203 	default:
4204 		return (FC_UNCLAIMED);
4205 	}
4206 }
4207 
4208 
4209 /*ARGSUSED*/
4210 static int
4211 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4212     uint32_t claimed)
4213 {
4214 	return (FC_UNCLAIMED);
4215 }
4216 
4217 /*
4218  *     Function: fcp_statec_callback
4219  *
4220  *  Description: The purpose of this function is to handle a port state change.
4221  *		 It is called from fp/fctl and, in a few instances, internally.
4222  *
4223  *     Argument: ulph		fp/fctl port handle
4224  *		 port_handle	fcp_port structure
4225  *		 port_state	Physical state of the port
4226  *		 port_top	Topology
4227  *		 *devlist	Pointer to the first entry of a table
4228  *				containing the remote ports that can be
4229  *				reached.
4230  *		 dev_cnt	Number of entries pointed by devlist.
4231  *		 port_sid	Port ID of the local port.
4232  *
4233  * Return Value: None
4234  */
4235 /*ARGSUSED*/
4236 static void
4237 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4238     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4239     uint32_t dev_cnt, uint32_t port_sid)
4240 {
4241 	uint32_t		link_count;
4242 	int			map_len = 0;
4243 	struct fcp_port	*pptr;
4244 	fcp_map_tag_t		*map_tag = NULL;
4245 
4246 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4247 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4248 		return;			/* nothing to work with! */
4249 	}
4250 
4251 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4252 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4253 	    "fcp_statec_callback: port state/dev_cnt/top ="
4254 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4255 	    dev_cnt, port_top);
4256 
4257 	mutex_enter(&pptr->port_mutex);
4258 
4259 	/*
4260 	 * If a thread is in detach, don't do anything.
4261 	 */
4262 	if (pptr->port_state & (FCP_STATE_DETACHING |
4263 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4264 		mutex_exit(&pptr->port_mutex);
4265 		return;
4266 	}
4267 
4268 	/*
4269 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4270 	 * init_pkt is called, it knows whether or not the target's status
4271 	 * (or pd) might be changing.
4272 	 */
4273 
4274 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4275 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4276 	}
4277 
4278 	/*
4279 	 * the transport doesn't allocate or probe unless being
4280 	 * asked to by either the applications or ULPs
4281 	 *
4282 	 * in cases where the port is OFFLINE at the time of port
4283 	 * attach callback and the link comes ONLINE later, for
4284 	 * easier automatic node creation (i.e. without you having to
4285 	 * go out and run the utility to perform LOGINs) the
4286 	 * following conditional is helpful
4287 	 */
4288 	pptr->port_phys_state = port_state;
4289 
4290 	if (dev_cnt) {
4291 		mutex_exit(&pptr->port_mutex);
4292 
4293 		map_len = sizeof (*map_tag) * dev_cnt;
4294 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4295 		if (map_tag == NULL) {
4296 			fcp_log(CE_WARN, pptr->port_dip,
4297 			    "!fcp%d: failed to allocate for map tags; "
4298 			    " state change will not be processed",
4299 			    pptr->port_instance);
4300 
4301 			mutex_enter(&pptr->port_mutex);
4302 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4303 			mutex_exit(&pptr->port_mutex);
4304 
4305 			return;
4306 		}
4307 
4308 		mutex_enter(&pptr->port_mutex);
4309 	}
4310 
4311 	if (pptr->port_id != port_sid) {
4312 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4313 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4314 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4315 		    port_sid);
4316 		/*
4317 		 * The local port changed ID. It is the first time a port ID
4318 		 * is assigned or something drastic happened.  We might have
4319 		 * been unplugged and replugged on another loop or fabric port
4320 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4321 		 * the fabric we were plugged into.
4322 		 */
4323 		pptr->port_id = port_sid;
4324 	}
4325 
4326 	switch (FC_PORT_STATE_MASK(port_state)) {
4327 	case FC_STATE_OFFLINE:
4328 	case FC_STATE_RESET_REQUESTED:
4329 		/*
4330 		 * link has gone from online to offline -- just update the
4331 		 * state of this port to BUSY and MARKed to go offline
4332 		 */
4333 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4334 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4335 		    "link went offline");
4336 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4337 			/*
4338 			 * We were offline a while ago and this one
4339 			 * seems to indicate that the loop has gone
4340 			 * dead forever.
4341 			 */
4342 			pptr->port_tmp_cnt += dev_cnt;
4343 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4344 			pptr->port_state |= FCP_STATE_INIT;
4345 			link_count = pptr->port_link_cnt;
4346 			fcp_handle_devices(pptr, devlist, dev_cnt,
4347 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4348 		} else {
4349 			pptr->port_link_cnt++;
4350 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4351 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4352 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4353 			if (pptr->port_mpxio) {
4354 				fcp_update_mpxio_path_verifybusy(pptr);
4355 			}
4356 			pptr->port_state |= FCP_STATE_OFFLINE;
4357 			pptr->port_state &=
4358 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4359 			pptr->port_tmp_cnt = 0;
4360 		}
4361 		mutex_exit(&pptr->port_mutex);
4362 		break;
4363 
4364 	case FC_STATE_ONLINE:
4365 	case FC_STATE_LIP:
4366 	case FC_STATE_LIP_LBIT_SET:
4367 		/*
4368 		 * link has gone from offline to online
4369 		 */
4370 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4371 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4372 		    "link went online");
4373 
4374 		pptr->port_link_cnt++;
4375 
4376 		while (pptr->port_ipkt_cnt) {
4377 			mutex_exit(&pptr->port_mutex);
4378 			delay(drv_usectohz(1000000));
4379 			mutex_enter(&pptr->port_mutex);
4380 		}
4381 
4382 		pptr->port_topology = port_top;
4383 
4384 		/*
4385 		 * The state of the targets and luns accessible through this
4386 		 * port is updated.
4387 		 */
4388 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4389 		    FCP_CAUSE_LINK_CHANGE);
4390 
4391 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4392 		pptr->port_state |= FCP_STATE_ONLINING;
4393 		pptr->port_tmp_cnt = dev_cnt;
4394 		link_count = pptr->port_link_cnt;
4395 
4396 		pptr->port_deadline = fcp_watchdog_time +
4397 		    FCP_ICMD_DEADLINE;
4398 
4399 		if (!dev_cnt) {
4400 			/*
4401 			 * We go directly to the online state if no remote
4402 			 * ports were discovered.
4403 			 */
4404 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4405 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4406 			    "No remote ports discovered");
4407 
4408 			pptr->port_state &= ~FCP_STATE_ONLINING;
4409 			pptr->port_state |= FCP_STATE_ONLINE;
4410 		}
4411 
4412 		switch (port_top) {
4413 		case FC_TOP_FABRIC:
4414 		case FC_TOP_PUBLIC_LOOP:
4415 		case FC_TOP_PRIVATE_LOOP:
4416 		case FC_TOP_PT_PT:
4417 
4418 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4419 				fcp_retry_ns_registry(pptr, port_sid);
4420 			}
4421 
4422 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4423 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4424 			break;
4425 
4426 		default:
4427 			/*
4428 			 * We got here because we were provided with an unknown
4429 			 * topology.
4430 			 */
4431 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4432 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4433 			}
4434 
4435 			pptr->port_tmp_cnt -= dev_cnt;
4436 			fcp_log(CE_WARN, pptr->port_dip,
4437 			    "!unknown/unsupported topology (0x%x)", port_top);
4438 			break;
4439 		}
4440 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4441 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4442 		    "Notify ssd of the reset to reinstate the reservations");
4443 
4444 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4445 		    &pptr->port_reset_notify_listf);
4446 
4447 		mutex_exit(&pptr->port_mutex);
4448 
4449 		break;
4450 
4451 	case FC_STATE_RESET:
4452 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4453 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4454 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4455 		    "RESET state, waiting for Offline/Online state_cb");
4456 		mutex_exit(&pptr->port_mutex);
4457 		break;
4458 
4459 	case FC_STATE_DEVICE_CHANGE:
4460 		/*
4461 		 * We come here when an application has requested
4462 		 * Dynamic node creation/deletion in Fabric connectivity.
4463 		 */
4464 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4465 		    FCP_STATE_INIT)) {
4466 			/*
4467 			 * This case can happen when the FCTL is in the
4468 			 * process of giving us on online and the host on
4469 			 * the other side issues a PLOGI/PLOGO. Ideally
4470 			 * the state changes should be serialized unless
4471 			 * they are opposite (online-offline).
4472 			 * The transport will give us a final state change
4473 			 * so we can ignore this for the time being.
4474 			 */
4475 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4476 			mutex_exit(&pptr->port_mutex);
4477 			break;
4478 		}
4479 
4480 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4481 			fcp_retry_ns_registry(pptr, port_sid);
4482 		}
4483 
4484 		/*
4485 		 * Extend the deadline under steady state conditions
4486 		 * to provide more time for the device-change-commands
4487 		 */
4488 		if (!pptr->port_ipkt_cnt) {
4489 			pptr->port_deadline = fcp_watchdog_time +
4490 			    FCP_ICMD_DEADLINE;
4491 		}
4492 
4493 		/*
4494 		 * There is another race condition here, where if we were
4495 		 * in ONLINEING state and a devices in the map logs out,
4496 		 * fp will give another state change as DEVICE_CHANGE
4497 		 * and OLD. This will result in that target being offlined.
4498 		 * The pd_handle is freed. If from the first statec callback
4499 		 * we were going to fire a PLOGI/PRLI, the system will
4500 		 * panic in fc_ulp_transport with invalid pd_handle.
4501 		 * The fix is to check for the link_cnt before issuing
4502 		 * any command down.
4503 		 */
4504 		fcp_update_targets(pptr, devlist, dev_cnt,
4505 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4506 
4507 		link_count = pptr->port_link_cnt;
4508 
4509 		fcp_handle_devices(pptr, devlist, dev_cnt,
4510 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4511 
4512 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4513 
4514 		mutex_exit(&pptr->port_mutex);
4515 		break;
4516 
4517 	case FC_STATE_TARGET_PORT_RESET:
4518 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4519 			fcp_retry_ns_registry(pptr, port_sid);
4520 		}
4521 
4522 		/* Do nothing else */
4523 		mutex_exit(&pptr->port_mutex);
4524 		break;
4525 
4526 	default:
4527 		fcp_log(CE_WARN, pptr->port_dip,
4528 		    "!Invalid state change=0x%x", port_state);
4529 		mutex_exit(&pptr->port_mutex);
4530 		break;
4531 	}
4532 
4533 	if (map_tag) {
4534 		kmem_free(map_tag, map_len);
4535 	}
4536 }
4537 
4538 /*
4539  *     Function: fcp_handle_devices
4540  *
4541  *  Description: This function updates the devices currently known by
4542  *		 walking the list provided by the caller.  The list passed
4543  *		 by the caller is supposed to be the list of reachable
4544  *		 devices.
4545  *
4546  *     Argument: *pptr		Fcp port structure.
4547  *		 *devlist	Pointer to the first entry of a table
4548  *				containing the remote ports that can be
4549  *				reached.
4550  *		 dev_cnt	Number of entries pointed by devlist.
4551  *		 link_cnt	Link state count.
4552  *		 *map_tag	Array of fcp_map_tag_t structures.
4553  *		 cause		What caused this function to be called.
4554  *
4555  * Return Value: None
4556  *
4557  *        Notes: The pptr->port_mutex must be held.
4558  */
4559 static void
4560 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4561     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4562 {
4563 	int			i;
4564 	int			check_finish_init = 0;
4565 	fc_portmap_t		*map_entry;
4566 	struct fcp_tgt	*ptgt = NULL;
4567 
4568 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4569 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4570 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4571 
4572 	if (dev_cnt) {
4573 		ASSERT(map_tag != NULL);
4574 	}
4575 
4576 	/*
4577 	 * The following code goes through the list of remote ports that are
4578 	 * accessible through this (pptr) local port (The list walked is the
4579 	 * one provided by the caller which is the list of the remote ports
4580 	 * currently reachable).  It checks if any of them was already
4581 	 * known by looking for the corresponding target structure based on
4582 	 * the world wide name.  If a target is part of the list it is tagged
4583 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4584 	 *
4585 	 * Old comment
4586 	 * -----------
4587 	 * Before we drop port mutex; we MUST get the tags updated; This
4588 	 * two step process is somewhat slow, but more reliable.
4589 	 */
4590 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4591 		map_entry = &(devlist[i]);
4592 
4593 		/*
4594 		 * get ptr to this map entry in our port's
4595 		 * list (if any)
4596 		 */
4597 		ptgt = fcp_lookup_target(pptr,
4598 		    (uchar_t *)&(map_entry->map_pwwn));
4599 
4600 		if (ptgt) {
4601 			map_tag[i] = ptgt->tgt_change_cnt;
4602 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4603 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4604 			}
4605 		}
4606 	}
4607 
4608 	/*
4609 	 * At this point we know which devices of the new list were already
4610 	 * known (The field tgt_aux_state of the target structure has been
4611 	 * set to FCP_TGT_TAGGED).
4612 	 *
4613 	 * The following code goes through the list of targets currently known
4614 	 * by the local port (the list is actually a hashing table).  If a
4615 	 * target is found and is not tagged, it means the target cannot
4616 	 * be reached anymore through the local port (pptr).  It is offlined.
4617 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4618 	 */
4619 	for (i = 0; i < FCP_NUM_HASH; i++) {
4620 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4621 		    ptgt = ptgt->tgt_next) {
4622 			mutex_enter(&ptgt->tgt_mutex);
4623 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4624 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4625 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4626 				fcp_offline_target_now(pptr, ptgt,
4627 				    link_cnt, ptgt->tgt_change_cnt, 0);
4628 			}
4629 			mutex_exit(&ptgt->tgt_mutex);
4630 		}
4631 	}
4632 
4633 	/*
4634 	 * At this point, the devices that were known but cannot be reached
4635 	 * anymore, have most likely been offlined.
4636 	 *
4637 	 * The following section of code seems to go through the list of
4638 	 * remote ports that can now be reached.  For every single one it
4639 	 * checks if it is already known or if it is a new port.
4640 	 */
4641 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4642 
4643 		if (check_finish_init) {
4644 			ASSERT(i > 0);
4645 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4646 			    map_tag[i - 1], cause);
4647 			check_finish_init = 0;
4648 		}
4649 
4650 		/* get a pointer to this map entry */
4651 		map_entry = &(devlist[i]);
4652 
4653 		/*
4654 		 * Check for the duplicate map entry flag. If we have marked
4655 		 * this entry as a duplicate we skip it since the correct
4656 		 * (perhaps even same) state change will be encountered
4657 		 * later in the list.
4658 		 */
4659 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY)
4660 			continue;
4661 
4662 		/* get ptr to this map entry in our port's list (if any) */
4663 		ptgt = fcp_lookup_target(pptr,
4664 		    (uchar_t *)&(map_entry->map_pwwn));
4665 
4666 		if (ptgt) {
4667 			/*
4668 			 * This device was already known.  The field
4669 			 * tgt_aux_state is reset (was probably set to
4670 			 * FCP_TGT_TAGGED previously in this routine).
4671 			 */
4672 			ptgt->tgt_aux_state = 0;
4673 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4674 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4675 			    "handle_devices: map did/state/type/flags = "
4676 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4677 			    "tgt_state=%d",
4678 			    map_entry->map_did.port_id, map_entry->map_state,
4679 			    map_entry->map_type, map_entry->map_flags,
4680 			    ptgt->tgt_d_id, ptgt->tgt_state);
4681 		}
4682 
4683 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4684 		    map_entry->map_type == PORT_DEVICE_NEW ||
4685 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4686 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4687 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4688 			    "map_type=%x, did = %x",
4689 			    map_entry->map_type,
4690 			    map_entry->map_did.port_id);
4691 		}
4692 
4693 		switch (map_entry->map_type) {
4694 		case PORT_DEVICE_NOCHANGE:
4695 		case PORT_DEVICE_USER_CREATE:
4696 		case PORT_DEVICE_USER_LOGIN:
4697 		case PORT_DEVICE_NEW:
4698 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4699 
4700 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4701 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4702 			    cause) == TRUE) {
4703 
4704 				FCP_TGT_TRACE(ptgt, map_tag[i],
4705 					    FCP_TGT_TRACE_2);
4706 				check_finish_init++;
4707 			}
4708 			break;
4709 
4710 		case PORT_DEVICE_OLD:
4711 			if (ptgt != NULL) {
4712 				FCP_TGT_TRACE(ptgt, map_tag[i],
4713 				    FCP_TGT_TRACE_3);
4714 
4715 				mutex_enter(&ptgt->tgt_mutex);
4716 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4717 					/*
4718 					 * Must do an in-line wait for I/Os
4719 					 * to get drained
4720 					 */
4721 					mutex_exit(&ptgt->tgt_mutex);
4722 					mutex_exit(&pptr->port_mutex);
4723 
4724 					mutex_enter(&ptgt->tgt_mutex);
4725 					while (ptgt->tgt_ipkt_cnt ||
4726 					    fcp_outstanding_lun_cmds(ptgt)
4727 						== FC_SUCCESS) {
4728 						mutex_exit(&ptgt->tgt_mutex);
4729 						delay(drv_usectohz(1000000));
4730 						mutex_enter(&ptgt->tgt_mutex);
4731 					}
4732 					mutex_exit(&ptgt->tgt_mutex);
4733 
4734 					mutex_enter(&pptr->port_mutex);
4735 					mutex_enter(&ptgt->tgt_mutex);
4736 
4737 					(void) fcp_offline_target(pptr, ptgt,
4738 					    link_cnt, map_tag[i], 0, 0);
4739 				}
4740 				mutex_exit(&ptgt->tgt_mutex);
4741 			}
4742 			check_finish_init++;
4743 			break;
4744 
4745 		case PORT_DEVICE_USER_DELETE:
4746 		case PORT_DEVICE_USER_LOGOUT:
4747 			if (ptgt != NULL) {
4748 				FCP_TGT_TRACE(ptgt, map_tag[i],
4749 				    FCP_TGT_TRACE_4);
4750 
4751 				mutex_enter(&ptgt->tgt_mutex);
4752 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4753 					(void) fcp_offline_target(pptr, ptgt,
4754 					    link_cnt, map_tag[i], 1, 0);
4755 				}
4756 				mutex_exit(&ptgt->tgt_mutex);
4757 			}
4758 			check_finish_init++;
4759 			break;
4760 
4761 		case PORT_DEVICE_CHANGED:
4762 			if (ptgt != NULL) {
4763 				FCP_TGT_TRACE(ptgt, map_tag[i],
4764 				    FCP_TGT_TRACE_5);
4765 
4766 				if (fcp_device_changed(pptr, ptgt,
4767 				    map_entry, link_cnt, map_tag[i],
4768 				    cause) == TRUE) {
4769 					check_finish_init++;
4770 				}
4771 			} else {
4772 				if (fcp_handle_mapflags(pptr, ptgt,
4773 				    map_entry, link_cnt, 0, cause) == TRUE) {
4774 					check_finish_init++;
4775 				}
4776 			}
4777 			break;
4778 
4779 		default:
4780 			fcp_log(CE_WARN, pptr->port_dip,
4781 			    "!Invalid map_type=0x%x", map_entry->map_type);
4782 			check_finish_init++;
4783 			break;
4784 		}
4785 	}
4786 
4787 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4788 		ASSERT(i > 0);
4789 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4790 		    map_tag[i-1], cause);
4791 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4792 		fcp_offline_all(pptr, link_cnt, cause);
4793 	}
4794 }
4795 
4796 /*
4797  *     Function: fcp_handle_mapflags
4798  *
4799  *  Description: This function creates a target structure if the ptgt passed
4800  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4801  *		 into the target yet or the PRLI if we are logged into the
4802  *		 target already.  The rest of the treatment is done in the
4803  *		 callbacks of the PLOGI or PRLI.
4804  *
4805  *     Argument: *pptr		FCP Port structure.
4806  *		 *ptgt		Target structure.
4807  *		 *map_entry	Array of fc_portmap_t structures.
4808  *		 link_cnt	Link state count.
4809  *		 tgt_cnt	Target state count.
4810  *		 cause		What caused this function to be called.
4811  *
4812  * Return Value: TRUE	Failed
4813  *		 FALSE	Succeeded
4814  *
4815  *        Notes: pptr->port_mutex must be owned.
4816  */
4817 static int
4818 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4819     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4820 {
4821 	int			lcount;
4822 	int			tcount;
4823 	int			ret = TRUE;
4824 	int			alloc;
4825 	struct fcp_ipkt	*icmd;
4826 	struct fcp_lun	*pseq_lun = NULL;
4827 	uchar_t			opcode;
4828 	int			valid_ptgt_was_passed = FALSE;
4829 
4830 	ASSERT(mutex_owned(&pptr->port_mutex));
4831 
4832 	/*
4833 	 * This case is possible where the FCTL has come up and done discovery
4834 	 * before FCP was loaded and attached. FCTL would have discovered the
4835 	 * devices and later the ULP came online. In this case ULP's would get
4836 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4837 	 */
4838 	if (ptgt == NULL) {
4839 		/* don't already have a target */
4840 		mutex_exit(&pptr->port_mutex);
4841 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4842 		mutex_enter(&pptr->port_mutex);
4843 
4844 		if (ptgt == NULL) {
4845 			fcp_log(CE_WARN, pptr->port_dip,
4846 			    "!FC target allocation failed");
4847 			return (ret);
4848 		}
4849 		mutex_enter(&ptgt->tgt_mutex);
4850 		ptgt->tgt_statec_cause = cause;
4851 		ptgt->tgt_tmp_cnt = 1;
4852 		mutex_exit(&ptgt->tgt_mutex);
4853 	} else {
4854 		valid_ptgt_was_passed = TRUE;
4855 	}
4856 
4857 	/*
4858 	 * Copy in the target parameters
4859 	 */
4860 	mutex_enter(&ptgt->tgt_mutex);
4861 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4862 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4863 	ptgt->tgt_pd_handle = map_entry->map_pd;
4864 	ptgt->tgt_fca_dev = NULL;
4865 
4866 	/* Copy port and node WWNs */
4867 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4868 	    FC_WWN_SIZE);
4869 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4870 	    FC_WWN_SIZE);
4871 
4872 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4873 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4874 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4875 	    valid_ptgt_was_passed) {
4876 		/*
4877 		 * determine if there are any tape LUNs on this target
4878 		 */
4879 		for (pseq_lun = ptgt->tgt_lun;
4880 			pseq_lun != NULL;
4881 			pseq_lun = pseq_lun->lun_next) {
4882 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4883 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4884 				fcp_update_tgt_state(ptgt, FCP_RESET,
4885 				    FCP_LUN_MARK);
4886 				mutex_exit(&ptgt->tgt_mutex);
4887 				return (ret);
4888 			}
4889 		}
4890 	}
4891 
4892 	/*
4893 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4894 	 * was never specifically initialized but zeroed out which means
4895 	 * FCP_TGT_NODE_NONE.
4896 	 */
4897 	switch (ptgt->tgt_node_state) {
4898 	case FCP_TGT_NODE_NONE:
4899 	case FCP_TGT_NODE_ON_DEMAND:
4900 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4901 		    !fcp_enable_auto_configuration &&
4902 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4903 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4904 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4905 		    fcp_enable_auto_configuration &&
4906 		    (ptgt->tgt_manual_config_only == 1) &&
4907 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4908 			/*
4909 			 * If auto configuration is set and
4910 			 * the tgt_manual_config_only flag is set then
4911 			 * we only want the user to be able to change
4912 			 * the state through create_on_demand.
4913 			 */
4914 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4915 		} else {
4916 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4917 		}
4918 		break;
4919 
4920 	case FCP_TGT_NODE_PRESENT:
4921 		break;
4922 	}
4923 	/*
4924 	 * If we are booting from a fabric device, make sure we
4925 	 * mark the node state appropriately for this target to be
4926 	 * enumerated
4927 	 */
4928 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
4929 		if (bcmp((caddr_t)pptr->port_boot_wwn,
4930 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
4931 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
4932 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4933 		}
4934 	}
4935 	mutex_exit(&ptgt->tgt_mutex);
4936 
4937 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4938 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4939 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
4940 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
4941 	    map_entry->map_rscn_info.ulp_rscn_count);
4942 
4943 	mutex_enter(&ptgt->tgt_mutex);
4944 
4945 	/*
4946 	 * Reset target OFFLINE state and mark the target BUSY
4947 	 */
4948 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
4949 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
4950 
4951 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
4952 	lcount = link_cnt;
4953 
4954 	mutex_exit(&ptgt->tgt_mutex);
4955 	mutex_exit(&pptr->port_mutex);
4956 
4957 	/*
4958 	 * if we are already logged in, then we do a PRLI, else
4959 	 * we do a PLOGI first (to get logged in)
4960 	 *
4961 	 * We will not check if we are the PLOGI initiator
4962 	 */
4963 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
4964 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
4965 
4966 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
4967 
4968 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
4969 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
4970 
4971 	if (icmd == NULL) {
4972 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
4973 		/*
4974 		 * We've exited port_mutex before calling fcp_icmd_alloc,
4975 		 * we need to make sure we reacquire it before returning.
4976 		 */
4977 		mutex_enter(&pptr->port_mutex);
4978 		return (FALSE);
4979 	}
4980 
4981 	/* TRUE is only returned while target is intended skipped */
4982 	ret = FALSE;
4983 	/* discover info about this target */
4984 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
4985 	    lcount, tcount, cause)) == DDI_SUCCESS) {
4986 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
4987 	} else {
4988 		fcp_icmd_free(pptr, icmd);
4989 		ret = TRUE;
4990 	}
4991 	mutex_enter(&pptr->port_mutex);
4992 
4993 	return (ret);
4994 }
4995 
4996 /*
4997  *     Function: fcp_send_els
4998  *
4999  *  Description: Sends an ELS to the target specified by the caller.  Supports
5000  *		 PLOGI and PRLI.
5001  *
5002  *     Argument: *pptr		Fcp port.
5003  *		 *ptgt		Target to send the ELS to.
5004  *		 *icmd		Internal packet
5005  *		 opcode		ELS opcode
5006  *		 lcount		Link state change counter
5007  *		 tcount		Target state change counter
5008  *		 cause		What caused the call
5009  *
5010  * Return Value: DDI_SUCCESS
5011  *		 Others
5012  */
5013 static int
5014 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5015     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5016 {
5017 	fc_packet_t		*fpkt;
5018 	fc_frame_hdr_t		*hp;
5019 	int			internal = 0;
5020 	int			alloc;
5021 	int			cmd_len;
5022 	int			resp_len;
5023 	int			res = DDI_FAILURE; /* default result */
5024 	int			rval = DDI_FAILURE;
5025 
5026 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5027 	ASSERT(ptgt->tgt_port == pptr);
5028 
5029 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5030 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5031 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5032 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5033 
5034 	if (opcode == LA_ELS_PLOGI) {
5035 		cmd_len = sizeof (la_els_logi_t);
5036 		resp_len = sizeof (la_els_logi_t);
5037 	} else {
5038 		ASSERT(opcode == LA_ELS_PRLI);
5039 		cmd_len = sizeof (la_els_prli_t);
5040 		resp_len = sizeof (la_els_prli_t);
5041 	}
5042 
5043 	if (icmd == NULL) {
5044 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5045 		    sizeof (la_els_prli_t));
5046 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5047 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5048 		if (icmd == NULL) {
5049 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5050 			return (res);
5051 		}
5052 		internal++;
5053 	}
5054 	fpkt = icmd->ipkt_fpkt;
5055 
5056 	fpkt->pkt_cmdlen = cmd_len;
5057 	fpkt->pkt_rsplen = resp_len;
5058 	fpkt->pkt_datalen = 0;
5059 	icmd->ipkt_retries = 0;
5060 
5061 	/* fill in fpkt info */
5062 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5063 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5064 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5065 
5066 	/* get ptr to frame hdr in fpkt */
5067 	hp = &fpkt->pkt_cmd_fhdr;
5068 
5069 	/*
5070 	 * fill in frame hdr
5071 	 */
5072 	hp->r_ctl = R_CTL_ELS_REQ;
5073 	hp->s_id = pptr->port_id;	/* source ID */
5074 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5075 	hp->type = FC_TYPE_EXTENDED_LS;
5076 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5077 	hp->seq_id = 0;
5078 	hp->rsvd = 0;
5079 	hp->df_ctl  = 0;
5080 	hp->seq_cnt = 0;
5081 	hp->ox_id = 0xffff;		/* i.e. none */
5082 	hp->rx_id = 0xffff;		/* i.e. none */
5083 	hp->ro = 0;
5084 
5085 	/*
5086 	 * at this point we have a filled in cmd pkt
5087 	 *
5088 	 * fill in the respective info, then use the transport to send
5089 	 * the packet
5090 	 *
5091 	 * for a PLOGI call fc_ulp_login(), and
5092 	 * for a PRLI call fc_ulp_issue_els()
5093 	 */
5094 	switch (opcode) {
5095 	case LA_ELS_PLOGI: {
5096 		struct la_els_logi logi;
5097 
5098 		bzero(&logi, sizeof (struct la_els_logi));
5099 
5100 		hp = &fpkt->pkt_cmd_fhdr;
5101 		hp->r_ctl = R_CTL_ELS_REQ;
5102 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5103 		logi.ls_code.mbz = 0;
5104 
5105 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5106 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5107 
5108 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5109 
5110 		mutex_enter(&pptr->port_mutex);
5111 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5112 
5113 			mutex_exit(&pptr->port_mutex);
5114 
5115 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5116 			if (rval == FC_SUCCESS) {
5117 				res = DDI_SUCCESS;
5118 				break;
5119 			}
5120 
5121 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5122 
5123 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5124 			    rval, "PLOGI");
5125 		} else {
5126 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5127 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5128 			    "fcp_send_els1: state change occured"
5129 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5130 			mutex_exit(&pptr->port_mutex);
5131 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5132 		}
5133 		break;
5134 	}
5135 
5136 	case LA_ELS_PRLI: {
5137 		struct la_els_prli	prli;
5138 		struct fcp_prli		*fprli;
5139 
5140 		bzero(&prli, sizeof (struct la_els_prli));
5141 
5142 		hp = &fpkt->pkt_cmd_fhdr;
5143 		hp->r_ctl = R_CTL_ELS_REQ;
5144 
5145 		/* fill in PRLI cmd ELS fields */
5146 		prli.ls_code = LA_ELS_PRLI;
5147 		prli.page_length = 0x10;	/* huh? */
5148 		prli.payload_length = sizeof (struct la_els_prli);
5149 
5150 		icmd->ipkt_opcode = LA_ELS_PRLI;
5151 
5152 		/* get ptr to PRLI service params */
5153 		fprli = (struct fcp_prli *)prli.service_params;
5154 
5155 		/* fill in service params */
5156 		fprli->type = 0x08;
5157 		fprli->resvd1 = 0;
5158 		fprli->orig_process_assoc_valid = 0;
5159 		fprli->resp_process_assoc_valid = 0;
5160 		fprli->establish_image_pair = 1;
5161 		fprli->resvd2 = 0;
5162 		fprli->resvd3 = 0;
5163 		fprli->obsolete_1 = 0;
5164 		fprli->obsolete_2 = 0;
5165 		fprli->data_overlay_allowed = 0;
5166 		fprli->initiator_fn = 1;
5167 		fprli->confirmed_compl_allowed = 1;
5168 
5169 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5170 			fprli->target_fn = 1;
5171 		} else {
5172 			fprli->target_fn = 0;
5173 		}
5174 
5175 		fprli->retry = 1;
5176 		fprli->read_xfer_rdy_disabled = 1;
5177 		fprli->write_xfer_rdy_disabled = 0;
5178 
5179 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5180 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5181 
5182 		/* issue the PRLI request */
5183 
5184 		mutex_enter(&pptr->port_mutex);
5185 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5186 
5187 			mutex_exit(&pptr->port_mutex);
5188 
5189 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5190 			if (rval == FC_SUCCESS) {
5191 				res = DDI_SUCCESS;
5192 				break;
5193 			}
5194 
5195 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5196 
5197 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5198 			    rval, "PRLI");
5199 		} else {
5200 			mutex_exit(&pptr->port_mutex);
5201 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5202 		}
5203 		break;
5204 	}
5205 
5206 	default:
5207 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5208 		break;
5209 	}
5210 
5211 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5212 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5213 	    "fcp_send_els: returning %d", res);
5214 
5215 	if (res != DDI_SUCCESS) {
5216 		if (internal) {
5217 			fcp_icmd_free(pptr, icmd);
5218 		}
5219 	}
5220 
5221 	return (res);
5222 }
5223 
5224 
5225 /*
5226  * called internally update the state of all of the tgts and each LUN
5227  * for this port (i.e. each target  known to be attached to this port)
5228  * if they are not already offline
5229  *
5230  * must be called with the port mutex owned
5231  *
5232  * acquires and releases the target mutexes for each target attached
5233  * to this port
5234  */
5235 void
5236 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5237 {
5238 	int i;
5239 	struct fcp_tgt *ptgt;
5240 
5241 	ASSERT(mutex_owned(&pptr->port_mutex));
5242 
5243 	for (i = 0; i < FCP_NUM_HASH; i++) {
5244 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5245 		    ptgt = ptgt->tgt_next) {
5246 			mutex_enter(&ptgt->tgt_mutex);
5247 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5248 			ptgt->tgt_change_cnt++;
5249 			ptgt->tgt_statec_cause = cause;
5250 			ptgt->tgt_tmp_cnt = 1;
5251 			ptgt->tgt_done = 0;
5252 			mutex_exit(&ptgt->tgt_mutex);
5253 		}
5254 	}
5255 }
5256 
5257 
5258 static void
5259 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5260 {
5261 	int i;
5262 	int ndevs;
5263 	struct fcp_tgt *ptgt;
5264 
5265 	ASSERT(mutex_owned(&pptr->port_mutex));
5266 
5267 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5268 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5269 		    ptgt = ptgt->tgt_next) {
5270 			ndevs++;
5271 		}
5272 	}
5273 
5274 	if (ndevs == 0) {
5275 		return;
5276 	}
5277 	pptr->port_tmp_cnt = ndevs;
5278 
5279 	for (i = 0; i < FCP_NUM_HASH; i++) {
5280 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5281 		    ptgt = ptgt->tgt_next) {
5282 			(void) fcp_call_finish_init_held(pptr, ptgt,
5283 			    lcount, ptgt->tgt_change_cnt, cause);
5284 		}
5285 	}
5286 }
5287 
5288 /*
5289  *     Function: fcp_update_tgt_state
5290  *
5291  *  Description: This function updates the field tgt_state of a target.  That
5292  *		 field is a bitmap and which bit can be set or reset
5293  *		 individually.  The action applied to the target state is also
5294  *		 applied to all the LUNs belonging to the target (provided the
5295  *		 LUN is not offline).  A side effect of applying the state
5296  *		 modification to the target and the LUNs is the field tgt_trace
5297  *		 of the target and lun_trace of the LUNs is set to zero.
5298  *
5299  *
5300  *     Argument: *ptgt	Target structure.
5301  *		 flag	Flag indication what action to apply (set/reset).
5302  *		 state	State bits to update.
5303  *
5304  * Return Value: None
5305  *
5306  *      Context: Interrupt, Kernel or User context.
5307  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5308  *		 calling this function.
5309  */
5310 void
5311 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5312 {
5313 	struct fcp_lun *plun;
5314 
5315 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5316 
5317 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5318 		/* The target is not offline. */
5319 		if (flag == FCP_SET) {
5320 			ptgt->tgt_state |= state;
5321 			ptgt->tgt_trace = 0;
5322 		} else {
5323 			ptgt->tgt_state &= ~state;
5324 		}
5325 
5326 		for (plun = ptgt->tgt_lun; plun != NULL;
5327 		    plun = plun->lun_next) {
5328 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5329 				/* The LUN is not offline. */
5330 				if (flag == FCP_SET) {
5331 					plun->lun_state |= state;
5332 					plun->lun_trace = 0;
5333 				} else {
5334 					plun->lun_state &= ~state;
5335 				}
5336 			}
5337 		}
5338 	}
5339 }
5340 
5341 /*
5342  *     Function: fcp_update_tgt_state
5343  *
5344  *  Description: This function updates the field lun_state of a LUN.  That
5345  *		 field is a bitmap and which bit can be set or reset
5346  *		 individually.
5347  *
5348  *     Argument: *plun	LUN structure.
5349  *		 flag	Flag indication what action to apply (set/reset).
5350  *		 state	State bits to update.
5351  *
5352  * Return Value: None
5353  *
5354  *      Context: Interrupt, Kernel or User context.
5355  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5356  *		 calling this function.
5357  */
5358 void
5359 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5360 {
5361 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5362 
5363 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5364 
5365 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5366 		flag == FCP_SET ? (plun->lun_state |= state) :
5367 		    (plun->lun_state &= ~state);
5368 	}
5369 }
5370 
5371 /*
5372  *     Function: fcp_get_port
5373  *
5374  *  Description: This function returns the fcp_port structure from the opaque
5375  *		 handle passed by the caller.  That opaque handle is the handle
5376  *		 used by fp/fctl to identify a particular local port.  That
5377  *		 handle has been stored in the corresponding fcp_port
5378  *		 structure.  This function is going to walk the global list of
5379  *		 fcp_port structures till one has a port_fp_handle that matches
5380  *		 the handle passed by the caller.  This function enters the
5381  *		 mutex fcp_global_mutex while walking the global list and then
5382  *		 releases it.
5383  *
5384  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5385  *				particular port.
5386  *
5387  * Return Value: NULL		Not found.
5388  *		 Not NULL	Pointer to the fcp_port structure.
5389  *
5390  *      Context: Interrupt, Kernel or User context.
5391  */
5392 static struct fcp_port *
5393 fcp_get_port(opaque_t port_handle)
5394 {
5395 	struct fcp_port *pptr;
5396 
5397 	ASSERT(port_handle != NULL);
5398 
5399 	mutex_enter(&fcp_global_mutex);
5400 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5401 		if (pptr->port_fp_handle == port_handle) {
5402 			break;
5403 		}
5404 	}
5405 	mutex_exit(&fcp_global_mutex);
5406 
5407 	return (pptr);
5408 }
5409 
5410 
5411 static void
5412 fcp_unsol_callback(fc_packet_t *fpkt)
5413 {
5414 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5415 	struct fcp_port *pptr = icmd->ipkt_port;
5416 
5417 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5418 		caddr_t state, reason, action, expln;
5419 
5420 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5421 		    &action, &expln);
5422 
5423 		fcp_log(CE_WARN, pptr->port_dip,
5424 		    "!couldn't post response to unsolicited request: "
5425 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5426 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5427 		    fpkt->pkt_cmd_fhdr.rx_id);
5428 	}
5429 	fcp_icmd_free(pptr, icmd);
5430 }
5431 
5432 
5433 /*
5434  * Perform general purpose preparation of a response to an unsolicited request
5435  */
5436 static void
5437 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5438     uchar_t r_ctl, uchar_t type)
5439 {
5440 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5441 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5442 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5443 	pkt->pkt_cmd_fhdr.type = type;
5444 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5445 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5446 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5447 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5448 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5449 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5450 	pkt->pkt_cmd_fhdr.ro = 0;
5451 	pkt->pkt_cmd_fhdr.rsvd = 0;
5452 	pkt->pkt_comp = fcp_unsol_callback;
5453 	pkt->pkt_pd = NULL;
5454 }
5455 
5456 
5457 /*ARGSUSED*/
5458 static int
5459 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5460 {
5461 	fc_packet_t		*fpkt;
5462 	struct la_els_prli	prli;
5463 	struct fcp_prli		*fprli;
5464 	struct fcp_ipkt	*icmd;
5465 	struct la_els_prli	*from;
5466 	struct fcp_prli		*orig;
5467 	struct fcp_tgt	*ptgt;
5468 	int			tcount = 0;
5469 	int			lcount;
5470 
5471 	from = (struct la_els_prli *)buf->ub_buffer;
5472 	orig = (struct fcp_prli *)from->service_params;
5473 
5474 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5475 	    NULL) {
5476 		mutex_enter(&ptgt->tgt_mutex);
5477 		tcount = ptgt->tgt_change_cnt;
5478 		mutex_exit(&ptgt->tgt_mutex);
5479 	}
5480 	mutex_enter(&pptr->port_mutex);
5481 	lcount = pptr->port_link_cnt;
5482 	mutex_exit(&pptr->port_mutex);
5483 
5484 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5485 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5486 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5487 		return (FC_FAILURE);
5488 	}
5489 	fpkt = icmd->ipkt_fpkt;
5490 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5491 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5492 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5493 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5494 	fpkt->pkt_rsplen = 0;
5495 	fpkt->pkt_datalen = 0;
5496 
5497 	icmd->ipkt_opcode = LA_ELS_PRLI;
5498 
5499 	bzero(&prli, sizeof (struct la_els_prli));
5500 	fprli = (struct fcp_prli *)prli.service_params;
5501 	prli.ls_code = LA_ELS_ACC;
5502 	prli.page_length = 0x10;
5503 	prli.payload_length = sizeof (struct la_els_prli);
5504 
5505 	/* fill in service params */
5506 	fprli->type = 0x08;
5507 	fprli->resvd1 = 0;
5508 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5509 	fprli->orig_process_associator = orig->orig_process_associator;
5510 	fprli->resp_process_assoc_valid = 0;
5511 	fprli->establish_image_pair = 1;
5512 	fprli->resvd2 = 0;
5513 	fprli->resvd3 = 0;
5514 	fprli->obsolete_1 = 0;
5515 	fprli->obsolete_2 = 0;
5516 	fprli->data_overlay_allowed = 0;
5517 	fprli->initiator_fn = 1;
5518 	fprli->confirmed_compl_allowed = 1;
5519 
5520 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5521 		fprli->target_fn = 1;
5522 	} else {
5523 		fprli->target_fn = 0;
5524 	}
5525 
5526 	fprli->retry = 1;
5527 	fprli->read_xfer_rdy_disabled = 1;
5528 	fprli->write_xfer_rdy_disabled = 0;
5529 
5530 	/* save the unsol prli payload first */
5531 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5532 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5533 
5534 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5535 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5536 
5537 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5538 
5539 	mutex_enter(&pptr->port_mutex);
5540 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5541 		int rval;
5542 		mutex_exit(&pptr->port_mutex);
5543 
5544 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5545 		    FC_SUCCESS) {
5546 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5547 				fcp_queue_ipkt(pptr, fpkt);
5548 				return (FC_SUCCESS);
5549 			}
5550 			/* Let it timeout */
5551 			fcp_icmd_free(pptr, icmd);
5552 			return (FC_FAILURE);
5553 		}
5554 	} else {
5555 		mutex_exit(&pptr->port_mutex);
5556 		fcp_icmd_free(pptr, icmd);
5557 		return (FC_FAILURE);
5558 	}
5559 
5560 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5561 
5562 	return (FC_SUCCESS);
5563 }
5564 
5565 /*
5566  *     Function: fcp_icmd_alloc
5567  *
5568  *  Description: This function allocated a fcp_ipkt structure.  The pkt_comp
5569  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5570  *		 modified by the caller (such as fcp_send_scsi).  The
5571  *		 structure is also tied to the state of the line and of the
5572  *		 target at a particular time.  That link is established by
5573  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5574  *		 and tcount which came respectively from pptr->link_cnt and
5575  *		 ptgt->tgt_change_cnt.
5576  *
5577  *     Argument: *pptr		Fcp port.
5578  *		 *ptgt		Target (destination of the command).
5579  *		 cmd_len	Length of the command.
5580  *		 resp_len	Length of the expected response.
5581  *		 data_len	Length of the data.
5582  *		 nodma		Indicates weither the command and response.
5583  *				will be transfer through DMA or not.
5584  *		 lcount		Link state change counter.
5585  *		 tcount		Target state change counter.
5586  *		 cause		Reason that lead to this call.
5587  *
5588  * Return Value: NULL		Failed.
5589  *		 Not NULL	Internal packet address.
5590  */
5591 static struct fcp_ipkt *
5592 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5593     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5594     uint32_t rscn_count)
5595 {
5596 	int			dma_setup = 0;
5597 	fc_packet_t		*fpkt;
5598 	struct fcp_ipkt	*icmd = NULL;
5599 
5600 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5601 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5602 	    KM_NOSLEEP);
5603 	if (icmd == NULL) {
5604 		fcp_log(CE_WARN, pptr->port_dip,
5605 		    "!internal packet allocation failed");
5606 		return (NULL);
5607 	}
5608 
5609 	/*
5610 	 * initialize the allocated packet
5611 	 */
5612 	icmd->ipkt_nodma = nodma;
5613 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5614 	icmd->ipkt_lun = NULL;
5615 
5616 	icmd->ipkt_link_cnt = lcount;
5617 	icmd->ipkt_change_cnt = tcount;
5618 	icmd->ipkt_cause = cause;
5619 
5620 	mutex_enter(&pptr->port_mutex);
5621 	icmd->ipkt_port = pptr;
5622 	mutex_exit(&pptr->port_mutex);
5623 
5624 	/* keep track of amt of data to be sent in pkt */
5625 	icmd->ipkt_cmdlen = cmd_len;
5626 	icmd->ipkt_resplen = resp_len;
5627 	icmd->ipkt_datalen = data_len;
5628 
5629 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5630 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5631 
5632 	/* set pkt's private ptr to point to cmd pkt */
5633 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5634 
5635 	/* set FCA private ptr to memory just beyond */
5636 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5637 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5638 	    pptr->port_dmacookie_sz);
5639 
5640 	/* get ptr to fpkt substruct and fill it in */
5641 	fpkt = icmd->ipkt_fpkt;
5642 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5643 					    sizeof (struct fcp_ipkt));
5644 
5645 	if (ptgt != NULL) {
5646 		icmd->ipkt_tgt = ptgt;
5647 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5648 	}
5649 
5650 	fpkt->pkt_comp = fcp_icmd_callback;
5651 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5652 	fpkt->pkt_cmdlen = cmd_len;
5653 	fpkt->pkt_rsplen = resp_len;
5654 	fpkt->pkt_datalen = data_len;
5655 
5656 	/*
5657 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5658 	 * rscn_count as fcp knows down to the transport. If a valid count was
5659 	 * passed into this function, we allocate memory to actually pass down
5660 	 * this info.
5661 	 *
5662 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5663 	 * basically mean that fcp will not be able to help transport
5664 	 * distinguish if a new RSCN has come after fcp was last informed about
5665 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5666 	 * 5068068 where the device might end up going offline in case of RSCN
5667 	 * storms.
5668 	 */
5669 	fpkt->pkt_ulp_rscn_infop = NULL;
5670 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5671 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5672 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5673 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5674 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5675 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5676 			    "Failed to alloc memory to pass rscn info");
5677 		}
5678 	}
5679 
5680 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5681 		fc_ulp_rscn_info_t	*rscnp;
5682 
5683 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5684 		rscnp->ulp_rscn_count = rscn_count;
5685 	}
5686 
5687 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5688 		goto fail;
5689 	}
5690 	dma_setup++;
5691 
5692 	/*
5693 	 * Must hold target mutex across setting of pkt_pd and call to
5694 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5695 	 * away while we're not looking.
5696 	 */
5697 	if (ptgt != NULL) {
5698 		mutex_enter(&ptgt->tgt_mutex);
5699 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5700 
5701 		/* ask transport to do its initialization on this pkt */
5702 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5703 		    != FC_SUCCESS) {
5704 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5705 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5706 			    "fc_ulp_init_packet failed");
5707 			mutex_exit(&ptgt->tgt_mutex);
5708 			goto fail;
5709 		}
5710 		mutex_exit(&ptgt->tgt_mutex);
5711 	} else {
5712 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5713 		    != FC_SUCCESS) {
5714 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5715 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5716 			    "fc_ulp_init_packet failed");
5717 			goto fail;
5718 		}
5719 	}
5720 
5721 	mutex_enter(&pptr->port_mutex);
5722 	if (pptr->port_state & (FCP_STATE_DETACHING |
5723 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5724 		int rval;
5725 
5726 		mutex_exit(&pptr->port_mutex);
5727 
5728 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5729 		ASSERT(rval == FC_SUCCESS);
5730 
5731 		goto fail;
5732 	}
5733 
5734 	if (ptgt != NULL) {
5735 		mutex_enter(&ptgt->tgt_mutex);
5736 		ptgt->tgt_ipkt_cnt++;
5737 		mutex_exit(&ptgt->tgt_mutex);
5738 	}
5739 
5740 	pptr->port_ipkt_cnt++;
5741 
5742 	mutex_exit(&pptr->port_mutex);
5743 
5744 	return (icmd);
5745 
5746 fail:
5747 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5748 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5749 		    sizeof (fc_ulp_rscn_info_t));
5750 		fpkt->pkt_ulp_rscn_infop = NULL;
5751 	}
5752 
5753 	if (dma_setup) {
5754 		fcp_free_dma(pptr, icmd);
5755 	}
5756 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5757 	    (size_t)pptr->port_dmacookie_sz);
5758 
5759 	return (NULL);
5760 }
5761 
5762 /*
5763  *     Function: fcp_icmd_free
5764  *
5765  *  Description: Frees the internal command passed by the caller.
5766  *
5767  *     Argument: *pptr		Fcp port.
5768  *		 *icmd		Internal packet to free.
5769  *
5770  * Return Value: None
5771  */
5772 static void
5773 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5774 {
5775 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5776 
5777 	/* Let the underlying layers do their cleanup. */
5778 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5779 	    icmd->ipkt_fpkt);
5780 
5781 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5782 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5783 		    sizeof (fc_ulp_rscn_info_t));
5784 	}
5785 
5786 	fcp_free_dma(pptr, icmd);
5787 
5788 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5789 	    (size_t)pptr->port_dmacookie_sz);
5790 
5791 	mutex_enter(&pptr->port_mutex);
5792 
5793 	if (ptgt) {
5794 		mutex_enter(&ptgt->tgt_mutex);
5795 		ptgt->tgt_ipkt_cnt--;
5796 		mutex_exit(&ptgt->tgt_mutex);
5797 	}
5798 
5799 	pptr->port_ipkt_cnt--;
5800 	mutex_exit(&pptr->port_mutex);
5801 }
5802 
5803 /*
5804  *     Function: fcp_alloc_dma
5805  *
5806  *  Description: Allocated the DMA resources required for the internal
5807  *		 packet.
5808  *
5809  *     Argument: *pptr	FCP port.
5810  *		 *icmd	Internal FCP packet.
5811  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5812  *		 flags	Allocation flags (Sleep or NoSleep).
5813  *
5814  * Return Value: FC_SUCCESS
5815  *		 FC_NOMEM
5816  */
5817 static int
5818 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5819     int nodma, int flags)
5820 {
5821 	int		rval;
5822 	size_t		real_size;
5823 	uint_t		ccount;
5824 	int		bound = 0;
5825 	int		cmd_resp = 0;
5826 	fc_packet_t	*fpkt;
5827 	ddi_dma_cookie_t	pkt_data_cookie;
5828 	ddi_dma_cookie_t	*cp;
5829 	uint32_t		cnt;
5830 
5831 	fpkt = &icmd->ipkt_fc_packet;
5832 
5833 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5834 	    fpkt->pkt_resp_dma == NULL);
5835 
5836 	icmd->ipkt_nodma = nodma;
5837 
5838 	if (nodma) {
5839 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5840 		if (fpkt->pkt_cmd == NULL) {
5841 			goto fail;
5842 		}
5843 
5844 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5845 		if (fpkt->pkt_resp == NULL) {
5846 			goto fail;
5847 		}
5848 	} else {
5849 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5850 
5851 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5852 		if (rval == FC_FAILURE) {
5853 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5854 			    fpkt->pkt_resp_dma == NULL);
5855 			goto fail;
5856 		}
5857 		cmd_resp++;
5858 	}
5859 
5860 	if (fpkt->pkt_datalen != 0) {
5861 		/*
5862 		 * set up DMA handle and memory for the data in this packet
5863 		 */
5864 		if (ddi_dma_alloc_handle(pptr->port_dip,
5865 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5866 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5867 			goto fail;
5868 		}
5869 
5870 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5871 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5872 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5873 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5874 			goto fail;
5875 		}
5876 
5877 		/* was DMA mem size gotten < size asked for/needed ?? */
5878 		if (real_size < fpkt->pkt_datalen) {
5879 			goto fail;
5880 		}
5881 
5882 		/* bind DMA address and handle together */
5883 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5884 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5885 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5886 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5887 			goto fail;
5888 		}
5889 		bound++;
5890 
5891 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5892 			goto fail;
5893 		}
5894 
5895 		fpkt->pkt_data_cookie_cnt = ccount;
5896 
5897 		cp = fpkt->pkt_data_cookie;
5898 		*cp = pkt_data_cookie;
5899 		cp++;
5900 
5901 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5902 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5903 			    &pkt_data_cookie);
5904 			*cp = pkt_data_cookie;
5905 		}
5906 
5907 	}
5908 
5909 	return (FC_SUCCESS);
5910 
5911 fail:
5912 	if (bound) {
5913 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5914 	}
5915 
5916 	if (fpkt->pkt_data_dma) {
5917 		if (fpkt->pkt_data) {
5918 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5919 		}
5920 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5921 	}
5922 
5923 	if (nodma) {
5924 		if (fpkt->pkt_cmd) {
5925 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
5926 		}
5927 		if (fpkt->pkt_resp) {
5928 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
5929 		}
5930 	} else {
5931 		if (cmd_resp) {
5932 			fcp_free_cmd_resp(pptr, fpkt);
5933 		}
5934 	}
5935 
5936 	return (FC_NOMEM);
5937 }
5938 
5939 
5940 static void
5941 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5942 {
5943 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
5944 
5945 	if (fpkt->pkt_data_dma) {
5946 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5947 		if (fpkt->pkt_data) {
5948 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5949 		}
5950 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5951 	}
5952 
5953 	if (icmd->ipkt_nodma) {
5954 		if (fpkt->pkt_cmd) {
5955 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
5956 		}
5957 		if (fpkt->pkt_resp) {
5958 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
5959 		}
5960 	} else {
5961 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
5962 
5963 		fcp_free_cmd_resp(pptr, fpkt);
5964 	}
5965 }
5966 
5967 /*
5968  *     Function: fcp_lookup_target
5969  *
5970  *  Description: Finds a target given a WWN.
5971  *
5972  *     Argument: *pptr	FCP port.
5973  *		 *wwn	World Wide Name of the device to look for.
5974  *
5975  * Return Value: NULL		No target found
5976  *		 Not NULL	Target structure
5977  *
5978  *      Context: Interrupt context.
5979  *		 The mutex pptr->port_mutex must be owned.
5980  */
5981 /* ARGSUSED */
5982 static struct fcp_tgt *
5983 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
5984 {
5985 	int			hash;
5986 	struct fcp_tgt	*ptgt;
5987 
5988 	ASSERT(mutex_owned(&pptr->port_mutex));
5989 
5990 	hash = FCP_HASH(wwn);
5991 
5992 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
5993 	    ptgt = ptgt->tgt_next) {
5994 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
5995 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5996 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5997 			break;
5998 		}
5999 	}
6000 
6001 	return (ptgt);
6002 }
6003 
6004 
6005 /*
6006  * Find target structure given a port identifier
6007  */
6008 static struct fcp_tgt *
6009 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6010 {
6011 	fc_portid_t		port_id;
6012 	la_wwn_t		pwwn;
6013 	struct fcp_tgt	*ptgt = NULL;
6014 
6015 	port_id.priv_lilp_posit = 0;
6016 	port_id.port_id = d_id;
6017 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6018 	    &pwwn) == FC_SUCCESS) {
6019 		mutex_enter(&pptr->port_mutex);
6020 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6021 		mutex_exit(&pptr->port_mutex);
6022 	}
6023 
6024 	return (ptgt);
6025 }
6026 
6027 
6028 /*
6029  * the packet completion callback routine for info cmd pkts
6030  *
6031  * this means fpkt pts to a response to either a PLOGI or a PRLI
6032  *
6033  * if there is an error an attempt is made to call a routine to resend
6034  * the command that failed
6035  */
6036 static void
6037 fcp_icmd_callback(fc_packet_t *fpkt)
6038 {
6039 	struct fcp_ipkt	*icmd;
6040 	struct fcp_port	*pptr;
6041 	struct fcp_tgt	*ptgt;
6042 	struct la_els_prli	*prli;
6043 	struct la_els_prli	prli_s;
6044 	struct fcp_prli		*fprli;
6045 	struct fcp_lun	*plun;
6046 	int		free_pkt = 1;
6047 	int		rval;
6048 	ls_code_t	resp;
6049 	uchar_t		prli_acc = 0;
6050 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6051 	int		lun0_newalloc;
6052 
6053 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6054 
6055 	/* get ptrs to the port and target structs for the cmd */
6056 	pptr = icmd->ipkt_port;
6057 	ptgt = icmd->ipkt_tgt;
6058 
6059 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6060 
6061 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6062 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6063 		    sizeof (prli_s));
6064 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6065 	}
6066 
6067 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6068 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6069 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6070 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6071 	    ptgt->tgt_d_id);
6072 
6073 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6074 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6075 
6076 		mutex_enter(&ptgt->tgt_mutex);
6077 		if (ptgt->tgt_pd_handle == NULL) {
6078 			/*
6079 			 * in a fabric environment the port device handles
6080 			 * get created only after successful LOGIN into the
6081 			 * transport, so the transport makes this port
6082 			 * device (pd) handle available in this packet, so
6083 			 * save it now
6084 			 */
6085 			ASSERT(fpkt->pkt_pd != NULL);
6086 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6087 		}
6088 		mutex_exit(&ptgt->tgt_mutex);
6089 
6090 		/* which ELS cmd is this response for ?? */
6091 		switch (icmd->ipkt_opcode) {
6092 		case LA_ELS_PLOGI:
6093 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6094 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6095 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6096 			    ptgt->tgt_d_id,
6097 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6098 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6099 
6100 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6101 			    FCP_TGT_TRACE_15);
6102 
6103 			/* Note that we are not allocating a new icmd */
6104 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6105 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6106 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6107 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6108 				    FCP_TGT_TRACE_16);
6109 				goto fail;
6110 			}
6111 			break;
6112 
6113 		case LA_ELS_PRLI:
6114 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6115 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6116 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6117 
6118 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6119 			    FCP_TGT_TRACE_17);
6120 
6121 			prli = &prli_s;
6122 
6123 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6124 			    sizeof (prli_s));
6125 
6126 			fprli = (struct fcp_prli *)prli->service_params;
6127 
6128 			mutex_enter(&ptgt->tgt_mutex);
6129 			ptgt->tgt_icap = fprli->initiator_fn;
6130 			ptgt->tgt_tcap = fprli->target_fn;
6131 			mutex_exit(&ptgt->tgt_mutex);
6132 
6133 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6134 				/*
6135 				 * this FCP device does not support target mode
6136 				 */
6137 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6138 				    FCP_TGT_TRACE_18);
6139 				goto fail;
6140 			}
6141 			if (fprli->retry == 1) {
6142 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6143 				    &ptgt->tgt_port_wwn);
6144 			}
6145 
6146 			/* target is no longer offline */
6147 			mutex_enter(&pptr->port_mutex);
6148 			mutex_enter(&ptgt->tgt_mutex);
6149 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6150 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6151 				    FCP_TGT_MARK);
6152 			} else {
6153 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6154 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6155 				    "fcp_icmd_callback,1: state change "
6156 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6157 				mutex_exit(&ptgt->tgt_mutex);
6158 				mutex_exit(&pptr->port_mutex);
6159 				goto fail;
6160 			}
6161 			mutex_exit(&ptgt->tgt_mutex);
6162 			mutex_exit(&pptr->port_mutex);
6163 
6164 			/*
6165 			 * lun 0 should always respond to inquiry, so
6166 			 * get the LUN struct for LUN 0
6167 			 *
6168 			 * Currently we deal with first level of addressing.
6169 			 * If / when we start supporting 0x device types
6170 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6171 			 * this logic will need revisiting.
6172 			 */
6173 			lun0_newalloc = 0;
6174 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6175 				/*
6176 				 * no LUN struct for LUN 0 yet exists,
6177 				 * so create one
6178 				 */
6179 				plun = fcp_alloc_lun(ptgt);
6180 				if (plun == NULL) {
6181 					fcp_log(CE_WARN, pptr->port_dip,
6182 					    "!Failed to allocate lun 0 for"
6183 					    " D_ID=%x", ptgt->tgt_d_id);
6184 					goto fail;
6185 				}
6186 				lun0_newalloc = 1;
6187 			}
6188 
6189 			/* fill in LUN info */
6190 			mutex_enter(&ptgt->tgt_mutex);
6191 			/*
6192 			 * consider lun 0 as device not connected if it is
6193 			 * offlined or newly allocated
6194 			 */
6195 			if (plun->lun_state & FCP_LUN_OFFLINE || lun0_newalloc)
6196 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6197 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6198 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6199 			ptgt->tgt_lun_cnt = 1;
6200 			ptgt->tgt_report_lun_cnt = 0;
6201 			mutex_exit(&ptgt->tgt_mutex);
6202 
6203 			/* Retrieve the rscn count (if a valid one exists) */
6204 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6205 				rscn_count = ((fc_ulp_rscn_info_t *)
6206 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6207 				    ->ulp_rscn_count;
6208 			} else {
6209 				rscn_count = FC_INVALID_RSCN_COUNT;
6210 			}
6211 
6212 			/* send Report Lun request to target */
6213 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6214 			    sizeof (struct fcp_reportlun_resp),
6215 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6216 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6217 				mutex_enter(&pptr->port_mutex);
6218 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6219 					fcp_log(CE_WARN, pptr->port_dip,
6220 					    "!Failed to send REPORT LUN to"
6221 					    "  D_ID=%x", ptgt->tgt_d_id);
6222 				} else {
6223 					FCP_TRACE(fcp_logq,
6224 					    pptr->port_instbuf, fcp_trace,
6225 					    FCP_BUF_LEVEL_5, 0,
6226 					    "fcp_icmd_callback,2:state change"
6227 					    " occured for D_ID=0x%x",
6228 					    ptgt->tgt_d_id);
6229 				}
6230 				mutex_exit(&pptr->port_mutex);
6231 
6232 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6233 				    FCP_TGT_TRACE_19);
6234 
6235 				goto fail;
6236 			} else {
6237 				free_pkt = 0;
6238 				fcp_icmd_free(pptr, icmd);
6239 			}
6240 			break;
6241 
6242 		default:
6243 			fcp_log(CE_WARN, pptr->port_dip,
6244 			    "!fcp_icmd_callback Invalid opcode");
6245 			goto fail;
6246 		}
6247 
6248 		return;
6249 	}
6250 
6251 
6252 	/*
6253 	 * Other PLOGI failures are not retried as the
6254 	 * transport does it already
6255 	 */
6256 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6257 		if (fcp_is_retryable(icmd) &&
6258 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6259 
6260 			if (FCP_MUST_RETRY(fpkt)) {
6261 				fcp_queue_ipkt(pptr, fpkt);
6262 				return;
6263 			}
6264 
6265 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6266 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6267 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6268 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6269 			    fpkt->pkt_reason);
6270 
6271 			/*
6272 			 * Retry by recalling the routine that
6273 			 * originally queued this packet
6274 			 */
6275 			mutex_enter(&pptr->port_mutex);
6276 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6277 				caddr_t msg;
6278 
6279 				mutex_exit(&pptr->port_mutex);
6280 
6281 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6282 
6283 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6284 					fpkt->pkt_timeout +=
6285 					    FCP_TIMEOUT_DELTA;
6286 				}
6287 
6288 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6289 				    fpkt);
6290 				if (rval == FC_SUCCESS) {
6291 					return;
6292 				}
6293 
6294 				if (rval == FC_STATEC_BUSY ||
6295 				    rval == FC_OFFLINE) {
6296 					fcp_queue_ipkt(pptr, fpkt);
6297 					return;
6298 				}
6299 				(void) fc_ulp_error(rval, &msg);
6300 
6301 				fcp_log(CE_NOTE, pptr->port_dip,
6302 				    "!ELS 0x%x failed to d_id=0x%x;"
6303 				    " %s", icmd->ipkt_opcode,
6304 				    ptgt->tgt_d_id, msg);
6305 			} else {
6306 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6307 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6308 				    "fcp_icmd_callback,3: state change "
6309 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6310 				mutex_exit(&pptr->port_mutex);
6311 			}
6312 		}
6313 	} else {
6314 		if (fcp_is_retryable(icmd) &&
6315 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6316 			if (FCP_MUST_RETRY(fpkt)) {
6317 				fcp_queue_ipkt(pptr, fpkt);
6318 				return;
6319 			}
6320 		}
6321 		mutex_enter(&pptr->port_mutex);
6322 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6323 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6324 			mutex_exit(&pptr->port_mutex);
6325 			fcp_print_error(fpkt);
6326 		} else {
6327 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6328 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6329 			    "fcp_icmd_callback,4: state change occured"
6330 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6331 			mutex_exit(&pptr->port_mutex);
6332 		}
6333 	}
6334 
6335 fail:
6336 	if (free_pkt) {
6337 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6338 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6339 		fcp_icmd_free(pptr, icmd);
6340 	}
6341 }
6342 
6343 
6344 /*
6345  * called internally to send an info cmd using the transport
6346  *
6347  * sends either an INQ or a REPORT_LUN
6348  *
6349  * when the packet is completed fcp_scsi_callback is called
6350  */
6351 static int
6352 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6353     int lcount, int tcount, int cause, uint32_t rscn_count)
6354 {
6355 	int			nodma;
6356 	struct fcp_ipkt		*icmd;
6357 	struct fcp_tgt 		*ptgt;
6358 	struct fcp_port		*pptr;
6359 	fc_frame_hdr_t		*hp;
6360 	fc_packet_t		*fpkt;
6361 	struct fcp_cmd		fcp_cmd;
6362 	struct fcp_cmd		*fcmd;
6363 	union scsi_cdb		*scsi_cdb;
6364 
6365 	ASSERT(plun != NULL);
6366 
6367 	ptgt = plun->lun_tgt;
6368 	ASSERT(ptgt != NULL);
6369 
6370 	pptr = ptgt->tgt_port;
6371 	ASSERT(pptr != NULL);
6372 
6373 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6374 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6375 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6376 
6377 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6378 
6379 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6380 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6381 	    rscn_count);
6382 
6383 	if (icmd == NULL) {
6384 		return (DDI_FAILURE);
6385 	}
6386 
6387 	fpkt = icmd->ipkt_fpkt;
6388 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6389 	icmd->ipkt_retries = 0;
6390 	icmd->ipkt_opcode = opcode;
6391 	icmd->ipkt_lun = plun;
6392 
6393 	if (nodma) {
6394 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6395 	} else {
6396 		fcmd = &fcp_cmd;
6397 	}
6398 	bzero(fcmd, sizeof (struct fcp_cmd));
6399 
6400 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6401 
6402 	hp = &fpkt->pkt_cmd_fhdr;
6403 
6404 	hp->s_id = pptr->port_id;
6405 	hp->d_id = ptgt->tgt_d_id;
6406 	hp->r_ctl = R_CTL_COMMAND;
6407 	hp->type = FC_TYPE_SCSI_FCP;
6408 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6409 	hp->rsvd = 0;
6410 	hp->seq_id = 0;
6411 	hp->seq_cnt = 0;
6412 	hp->ox_id = 0xffff;
6413 	hp->rx_id = 0xffff;
6414 	hp->ro = 0;
6415 
6416 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6417 
6418 	/*
6419 	 * Request SCSI target for expedited processing
6420 	 */
6421 
6422 	/*
6423 	 * Set up for untagged queuing because we do not
6424 	 * know if the fibre device supports queuing.
6425 	 */
6426 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6427 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6428 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6429 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6430 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6431 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6432 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6433 
6434 	switch (opcode) {
6435 	case SCMD_INQUIRY_PAGE83:
6436 		/*
6437 		 * Prepare to get the Inquiry VPD page 83 information
6438 		 */
6439 		fcmd->fcp_cntl.cntl_read_data = 1;
6440 		fcmd->fcp_cntl.cntl_write_data = 0;
6441 		fcmd->fcp_data_len = alloc_len;
6442 
6443 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6444 		fpkt->pkt_comp = fcp_scsi_callback;
6445 
6446 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6447 		scsi_cdb->g0_addr2 = 0x01;
6448 		scsi_cdb->g0_addr1 = 0x83;
6449 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6450 		break;
6451 
6452 	case SCMD_INQUIRY:
6453 		fcmd->fcp_cntl.cntl_read_data = 1;
6454 		fcmd->fcp_cntl.cntl_write_data = 0;
6455 		fcmd->fcp_data_len = alloc_len;
6456 
6457 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6458 		fpkt->pkt_comp = fcp_scsi_callback;
6459 
6460 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6461 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6462 		break;
6463 
6464 	case SCMD_REPORT_LUN: {
6465 		fc_portid_t	d_id;
6466 		opaque_t	fca_dev;
6467 
6468 		ASSERT(alloc_len >= 16);
6469 
6470 		d_id.priv_lilp_posit = 0;
6471 		d_id.port_id = ptgt->tgt_d_id;
6472 
6473 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6474 
6475 		mutex_enter(&ptgt->tgt_mutex);
6476 		ptgt->tgt_fca_dev = fca_dev;
6477 		mutex_exit(&ptgt->tgt_mutex);
6478 
6479 		fcmd->fcp_cntl.cntl_read_data = 1;
6480 		fcmd->fcp_cntl.cntl_write_data = 0;
6481 		fcmd->fcp_data_len = alloc_len;
6482 
6483 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6484 		fpkt->pkt_comp = fcp_scsi_callback;
6485 
6486 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6487 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6488 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6489 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6490 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6491 		break;
6492 	}
6493 
6494 	default:
6495 		fcp_log(CE_WARN, pptr->port_dip,
6496 		    "!fcp_send_scsi Invalid opcode");
6497 		break;
6498 	}
6499 
6500 	if (!nodma) {
6501 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6502 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6503 	}
6504 
6505 	mutex_enter(&pptr->port_mutex);
6506 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6507 
6508 		mutex_exit(&pptr->port_mutex);
6509 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6510 		    FC_SUCCESS) {
6511 			fcp_icmd_free(pptr, icmd);
6512 			return (DDI_FAILURE);
6513 		}
6514 		return (DDI_SUCCESS);
6515 	} else {
6516 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6517 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6518 		    "fcp_send_scsi,1: state change occured"
6519 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6520 		mutex_exit(&pptr->port_mutex);
6521 		fcp_icmd_free(pptr, icmd);
6522 		return (DDI_FAILURE);
6523 	}
6524 }
6525 
6526 
6527 /*
6528  * called by fcp_scsi_callback to check to handle the case where
6529  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6530  */
6531 static int
6532 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6533 {
6534 	uchar_t				rqlen;
6535 	int				rval = DDI_FAILURE;
6536 	struct scsi_extended_sense	sense_info, *sense;
6537 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6538 					    fpkt->pkt_ulp_private;
6539 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6540 	struct fcp_port		*pptr = ptgt->tgt_port;
6541 
6542 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6543 
6544 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6545 		/*
6546 		 * SCSI-II Reserve Release support. Some older FC drives return
6547 		 * Reservation conflict for Report Luns command.
6548 		 */
6549 		if (icmd->ipkt_nodma) {
6550 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6551 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6552 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6553 		} else {
6554 			fcp_rsp_t	new_resp;
6555 
6556 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6557 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6558 
6559 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6560 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6561 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6562 
6563 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6564 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6565 		}
6566 
6567 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6568 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6569 
6570 		return (DDI_SUCCESS);
6571 	}
6572 
6573 	sense = &sense_info;
6574 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6575 		/* no need to continue if sense length is not set */
6576 		return (rval);
6577 	}
6578 
6579 	/* casting 64-bit integer to 8-bit */
6580 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6581 	    sizeof (struct scsi_extended_sense));
6582 
6583 	if (rqlen < 14) {
6584 		/* no need to continue if request length isn't long enough */
6585 		return (rval);
6586 	}
6587 
6588 	if (icmd->ipkt_nodma) {
6589 		/*
6590 		 * We can safely use fcp_response_len here since the
6591 		 * only path that calls fcp_check_reportlun,
6592 		 * fcp_scsi_callback, has already called
6593 		 * fcp_validate_fcp_response.
6594 		 */
6595 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6596 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6597 	} else {
6598 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6599 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6600 		    sizeof (struct scsi_extended_sense));
6601 	}
6602 
6603 	if (!FCP_SENSE_NO_LUN(sense)) {
6604 		mutex_enter(&ptgt->tgt_mutex);
6605 		/* clear the flag if any */
6606 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6607 		mutex_exit(&ptgt->tgt_mutex);
6608 	}
6609 
6610 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6611 	    (sense->es_add_code == 0x20)) {
6612 		if (icmd->ipkt_nodma) {
6613 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6614 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6615 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6616 		} else {
6617 			fcp_rsp_t	new_resp;
6618 
6619 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6620 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6621 
6622 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6623 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6624 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6625 
6626 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6627 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6628 		}
6629 
6630 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6631 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6632 
6633 		return (DDI_SUCCESS);
6634 	}
6635 
6636 	/*
6637 	 * This is for the STK library which returns a check condition,
6638 	 * to indicate device is not ready, manual assistance needed.
6639 	 * This is to a report lun command when the door is open.
6640 	 */
6641 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6642 		if (icmd->ipkt_nodma) {
6643 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6644 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6645 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6646 		} else {
6647 			fcp_rsp_t	new_resp;
6648 
6649 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6650 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6651 
6652 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6653 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6654 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6655 
6656 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6657 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6658 		}
6659 
6660 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6661 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6662 
6663 		return (DDI_SUCCESS);
6664 	}
6665 
6666 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6667 	    (FCP_SENSE_NO_LUN(sense))) {
6668 		mutex_enter(&ptgt->tgt_mutex);
6669 		if ((FCP_SENSE_NO_LUN(sense)) &&
6670 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6671 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6672 			mutex_exit(&ptgt->tgt_mutex);
6673 			/*
6674 			 * reconfig was triggred by ILLEGAL REQUEST but
6675 			 * got ILLEGAL REQUEST again
6676 			 */
6677 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6678 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6679 			    "!FCP: Unable to obtain Report Lun data"
6680 			    " target=%x", ptgt->tgt_d_id);
6681 		} else {
6682 			if (ptgt->tgt_tid == NULL) {
6683 				timeout_id_t	tid;
6684 				/*
6685 				 * REPORT LUN data has changed.  Kick off
6686 				 * rediscovery
6687 				 */
6688 				tid = timeout(fcp_reconfigure_luns,
6689 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6690 
6691 				ptgt->tgt_tid = tid;
6692 				ptgt->tgt_state |= FCP_TGT_BUSY;
6693 			}
6694 			if (FCP_SENSE_NO_LUN(sense)) {
6695 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6696 			}
6697 			mutex_exit(&ptgt->tgt_mutex);
6698 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6699 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6700 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6701 				    "!FCP:Report Lun Has Changed"
6702 				    " target=%x", ptgt->tgt_d_id);
6703 			} else if (FCP_SENSE_NO_LUN(sense)) {
6704 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6705 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6706 				    "!FCP:LU Not Supported"
6707 				    " target=%x", ptgt->tgt_d_id);
6708 			}
6709 		}
6710 		rval = DDI_SUCCESS;
6711 	}
6712 
6713 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6714 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6715 	    "D_ID=%x, sense=%x, status=%x",
6716 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6717 	    rsp->fcp_u.fcp_status.scsi_status);
6718 
6719 	return (rval);
6720 }
6721 
6722 /*
6723  *     Function: fcp_scsi_callback
6724  *
6725  *  Description: This is the callback routine set by fcp_send_scsi() after
6726  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6727  *		 and autogenerated by FCP are:  REPORT_LUN, INQUIRY and
6728  *		 INQUIRY_PAGE83.
6729  *
6730  *     Argument: *fpkt	 FC packet used to convey the command
6731  *
6732  * Return Value: None
6733  */
6734 static void
6735 fcp_scsi_callback(fc_packet_t *fpkt)
6736 {
6737 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6738 				    fpkt->pkt_ulp_private;
6739 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6740 	struct fcp_port	*pptr;
6741 	struct fcp_tgt	*ptgt;
6742 	struct fcp_lun	*plun;
6743 	struct fcp_rsp		response, *rsp;
6744 
6745 	if (icmd->ipkt_nodma) {
6746 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6747 	} else {
6748 		rsp = &response;
6749 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6750 		    sizeof (struct fcp_rsp));
6751 	}
6752 
6753 	ptgt = icmd->ipkt_tgt;
6754 	pptr = ptgt->tgt_port;
6755 	plun = icmd->ipkt_lun;
6756 
6757 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6758 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6759 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6760 	    "status=%x, lun num=%x",
6761 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6762 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6763 
6764 	/*
6765 	 * Pre-init LUN GUID with NWWN if it is not a device that
6766 	 * supports multiple luns and we know it's not page83
6767 	 * compliant.  Although using a NWWN is not lun unique,
6768 	 * we will be fine since there is only one lun behind the taget
6769 	 * in this case.
6770 	 */
6771 	if ((plun->lun_guid_size == 0) &&
6772 		(icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6773 		(fcp_symmetric_device_probe(plun) == 0)) {
6774 
6775 		char ascii_wwn[FC_WWN_SIZE*2+1];
6776 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6777 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6778 	}
6779 
6780 	/*
6781 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6782 	 * when thay have more data than what is asked in CDB. An overrun
6783 	 * is really when FCP_DL is smaller than the data length in CDB.
6784 	 * In the case here we know that REPORT LUN command we formed within
6785 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6786 	 * behavior. In reality this is FC_SUCCESS.
6787 	 */
6788 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6789 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6790 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6791 		fpkt->pkt_state = FC_PKT_SUCCESS;
6792 	}
6793 
6794 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6795 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6796 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6797 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6798 		    ptgt->tgt_d_id);
6799 
6800 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6801 			/*
6802 			 * Inquiry VPD page command on A5K SES devices would
6803 			 * result in data CRC errors.
6804 			 */
6805 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6806 				(void) fcp_handle_page83(fpkt, icmd, 1);
6807 				return;
6808 			}
6809 		}
6810 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6811 		    FCP_MUST_RETRY(fpkt)) {
6812 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6813 			fcp_retry_scsi_cmd(fpkt);
6814 			return;
6815 		}
6816 
6817 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6818 		    FCP_TGT_TRACE_20);
6819 
6820 		mutex_enter(&pptr->port_mutex);
6821 		mutex_enter(&ptgt->tgt_mutex);
6822 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6823 			mutex_exit(&ptgt->tgt_mutex);
6824 			mutex_exit(&pptr->port_mutex);
6825 			fcp_print_error(fpkt);
6826 		} else {
6827 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6828 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6829 			    "fcp_scsi_callback,1: state change occured"
6830 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6831 			mutex_exit(&ptgt->tgt_mutex);
6832 			mutex_exit(&pptr->port_mutex);
6833 		}
6834 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6835 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6836 		fcp_icmd_free(pptr, icmd);
6837 		return;
6838 	}
6839 
6840 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6841 
6842 	mutex_enter(&pptr->port_mutex);
6843 	mutex_enter(&ptgt->tgt_mutex);
6844 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6845 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6846 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6847 		    "fcp_scsi_callback,2: state change occured"
6848 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6849 		mutex_exit(&ptgt->tgt_mutex);
6850 		mutex_exit(&pptr->port_mutex);
6851 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6852 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6853 		fcp_icmd_free(pptr, icmd);
6854 		return;
6855 	}
6856 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6857 
6858 	mutex_exit(&ptgt->tgt_mutex);
6859 	mutex_exit(&pptr->port_mutex);
6860 
6861 	if (icmd->ipkt_nodma) {
6862 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6863 		    sizeof (struct fcp_rsp));
6864 	} else {
6865 		bep = &fcp_rsp_err;
6866 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6867 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6868 	}
6869 
6870 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6871 		fcp_retry_scsi_cmd(fpkt);
6872 		return;
6873 	}
6874 
6875 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6876 	    FCP_NO_FAILURE) {
6877 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6878 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6879 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6880 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6881 		fcp_retry_scsi_cmd(fpkt);
6882 		return;
6883 	}
6884 
6885 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6886 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6887 		fcp_queue_ipkt(pptr, fpkt);
6888 		return;
6889 	}
6890 
6891 	/*
6892 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6893 	 * with illegal request as per SCSI spec.
6894 	 * Crossbridge is one such device and Daktari's SES node is another.
6895 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6896 	 * SES nodes (Daktari only currently) are an exception to this.
6897 	 */
6898 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6899 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6900 
6901 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6902 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6903 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
6904 		    "check condition. May enumerate as non-mpxio device",
6905 		    ptgt->tgt_d_id, plun->lun_type);
6906 
6907 		/*
6908 		 * If we let Daktari's SES be enumerated as a non-mpxio
6909 		 * device, there will be a discrepency in that the other
6910 		 * internal FC disks will get enumerated as mpxio devices.
6911 		 * Applications like luxadm expect this to be consistent.
6912 		 *
6913 		 * So, we put in a hack here to check if this is an SES device
6914 		 * and handle it here.
6915 		 */
6916 		if (plun->lun_type == DTYPE_ESI) {
6917 			/*
6918 			 * Since, pkt_state is actually FC_PKT_SUCCESS
6919 			 * at this stage, we fake a failure here so that
6920 			 * fcp_handle_page83 will create a device path using
6921 			 * the WWN instead of the GUID which is not there anyway
6922 			 */
6923 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
6924 			(void) fcp_handle_page83(fpkt, icmd, 1);
6925 			return;
6926 		}
6927 
6928 		mutex_enter(&ptgt->tgt_mutex);
6929 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
6930 		    FCP_LUN_MARK | FCP_LUN_BUSY);
6931 		mutex_exit(&ptgt->tgt_mutex);
6932 
6933 		(void) fcp_call_finish_init(pptr, ptgt,
6934 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6935 		    icmd->ipkt_cause);
6936 		fcp_icmd_free(pptr, icmd);
6937 		return;
6938 	}
6939 
6940 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6941 		int rval = DDI_FAILURE;
6942 
6943 		/*
6944 		 * handle cases where report lun isn't supported
6945 		 * by faking up our own REPORT_LUN response or
6946 		 * UNIT ATTENTION
6947 		 */
6948 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6949 			rval = fcp_check_reportlun(rsp, fpkt);
6950 
6951 			/*
6952 			 * fcp_check_reportlun might have modified the
6953 			 * FCP response. Copy it in again to get an updated
6954 			 * FCP response
6955 			 */
6956 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
6957 				rsp = &response;
6958 
6959 				FCP_CP_IN(fpkt->pkt_resp, rsp,
6960 				    fpkt->pkt_resp_acc,
6961 				    sizeof (struct fcp_rsp));
6962 			}
6963 		}
6964 
6965 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6966 			if (rval == DDI_SUCCESS) {
6967 				(void) fcp_call_finish_init(pptr, ptgt,
6968 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6969 				    icmd->ipkt_cause);
6970 				fcp_icmd_free(pptr, icmd);
6971 			} else {
6972 				fcp_retry_scsi_cmd(fpkt);
6973 			}
6974 
6975 			return;
6976 		}
6977 	} else {
6978 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6979 			mutex_enter(&ptgt->tgt_mutex);
6980 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6981 			mutex_exit(&ptgt->tgt_mutex);
6982 		}
6983 	}
6984 
6985 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
6986 
6987 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
6988 
6989 	switch (icmd->ipkt_opcode) {
6990 	case SCMD_INQUIRY:
6991 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
6992 		fcp_handle_inquiry(fpkt, icmd);
6993 		break;
6994 
6995 	case SCMD_REPORT_LUN:
6996 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6997 		    FCP_TGT_TRACE_22);
6998 		fcp_handle_reportlun(fpkt, icmd);
6999 		break;
7000 
7001 	case SCMD_INQUIRY_PAGE83:
7002 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7003 		(void) fcp_handle_page83(fpkt, icmd, 0);
7004 		break;
7005 
7006 	default:
7007 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7008 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7009 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7010 		fcp_icmd_free(pptr, icmd);
7011 		break;
7012 	}
7013 }
7014 
7015 
7016 static void
7017 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7018 {
7019 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7020 				    fpkt->pkt_ulp_private;
7021 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7022 	struct fcp_port	*pptr = ptgt->tgt_port;
7023 
7024 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7025 	    fcp_is_retryable(icmd)) {
7026 		mutex_enter(&pptr->port_mutex);
7027 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7028 			mutex_exit(&pptr->port_mutex);
7029 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7030 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7031 			    "Retrying %s to %x; state=%x, reason=%x",
7032 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7033 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7034 			    fpkt->pkt_state, fpkt->pkt_reason);
7035 
7036 			fcp_queue_ipkt(pptr, fpkt);
7037 		} else {
7038 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7039 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7040 			    "fcp_retry_scsi_cmd,1: state change occured"
7041 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7042 			mutex_exit(&pptr->port_mutex);
7043 			(void) fcp_call_finish_init(pptr, ptgt,
7044 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7045 			    icmd->ipkt_cause);
7046 			fcp_icmd_free(pptr, icmd);
7047 		}
7048 	} else {
7049 		fcp_print_error(fpkt);
7050 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7051 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7052 		fcp_icmd_free(pptr, icmd);
7053 	}
7054 }
7055 
7056 /*
7057  *     Function: fcp_handle_page83
7058  *
7059  *  Description: Treats the response to INQUIRY_PAGE83.
7060  *
7061  *     Argument: *fpkt	FC packet used to convey the command.
7062  *		 *icmd	Original fcp_ipkt structure.
7063  *		 ignore_page83_data
7064  *			if it's 1, that means it's a special devices's
7065  *			page83 response, it should be enumerated under mpxio
7066  *
7067  * Return Value: None
7068  */
7069 static void
7070 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7071     int ignore_page83_data)
7072 {
7073 	struct fcp_port	*pptr;
7074 	struct fcp_lun	*plun;
7075 	struct fcp_tgt	*ptgt;
7076 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7077 	int			fail = 0;
7078 	ddi_devid_t		devid;
7079 	char			*guid = NULL;
7080 	int			ret;
7081 
7082 	ASSERT(icmd != NULL && fpkt != NULL);
7083 
7084 	pptr = icmd->ipkt_port;
7085 	ptgt = icmd->ipkt_tgt;
7086 	plun = icmd->ipkt_lun;
7087 
7088 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7089 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7090 
7091 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7092 			    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7093 
7094 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7095 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
7096 			    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7097 			    "dtype=0x%x, lun num=%x",
7098 			    pptr->port_instance, ptgt->tgt_d_id,
7099 			    dev_id_page[0], plun->lun_num);
7100 
7101 		ret = ddi_devid_scsi_encode(
7102 			DEVID_SCSI_ENCODE_VERSION_LATEST,
7103 			NULL, 		/* driver name */
7104 			(unsigned char *) &plun->lun_inq, /* standard inquiry */
7105 			sizeof (plun->lun_inq), /* size of standard inquiry */
7106 			NULL,		/* page 80 data */
7107 			0,		/* page 80 len */
7108 			dev_id_page,	/* page 83 data */
7109 			SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7110 			&devid);
7111 
7112 		if (ret == DDI_SUCCESS) {
7113 
7114 			guid = ddi_devid_to_guid(devid);
7115 
7116 			if (guid) {
7117 				/*
7118 				 * Check our current guid.  If it's non null
7119 				 * and it has changed, we need to copy it into
7120 				 * lun_old_guid since we might still need it.
7121 				 */
7122 				if (plun->lun_guid &&
7123 				    strcmp(guid, plun->lun_guid)) {
7124 					unsigned int len;
7125 
7126 					/*
7127 					 * If the guid of the LUN changes,
7128 					 * reconfiguration should be triggered
7129 					 * to reflect the changes.
7130 					 * i.e. we should offline the LUN with
7131 					 * the old guid, and online the LUN with
7132 					 * the new guid.
7133 					 */
7134 					plun->lun_state |= FCP_LUN_CHANGED;
7135 
7136 					if (plun->lun_old_guid) {
7137 						kmem_free(plun->lun_old_guid,
7138 						    plun->lun_old_guid_size);
7139 					}
7140 
7141 					len = plun->lun_guid_size;
7142 					plun->lun_old_guid_size = len;
7143 
7144 					plun->lun_old_guid = kmem_zalloc(len,
7145 					    KM_NOSLEEP);
7146 
7147 					if (plun->lun_old_guid) {
7148 					/*
7149 					 * The alloc was successful then
7150 					 * let's do the copy.
7151 					 */
7152 						bcopy(plun->lun_guid,
7153 						    plun->lun_old_guid, len);
7154 					} else {
7155 						fail = 1;
7156 						plun->lun_old_guid_size = 0;
7157 					}
7158 				}
7159 				if (!fail) {
7160 					if (fcp_copy_guid_2_lun_block(
7161 					    plun, guid)) {
7162 						fail = 1;
7163 					}
7164 				}
7165 				ddi_devid_free_guid(guid);
7166 
7167 			} else {
7168 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7169 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7170 				    "fcp_handle_page83: unable to create "
7171 				    "GUID");
7172 
7173 				/* couldn't create good guid from devid */
7174 				fail = 1;
7175 			}
7176 			ddi_devid_free(devid);
7177 
7178 		} else if (ret == DDI_NOT_WELL_FORMED) {
7179 			/* NULL filled data for page 83 */
7180 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7181 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7182 			    "fcp_handle_page83: retry GUID");
7183 
7184 			icmd->ipkt_retries = 0;
7185 			fcp_retry_scsi_cmd(fpkt);
7186 			return;
7187 		} else {
7188 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7189 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7190 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7191 			    ret);
7192 			/*
7193 			 * Since the page83 validation
7194 			 * introduced late, we are being
7195 			 * tolerant to the existing devices
7196 			 * that already found to be working
7197 			 * under mpxio, like A5200's SES device,
7198 			 * its page83 response will not be standard-compliant,
7199 			 * but we still want it to be enumerated under mpxio.
7200 			 */
7201 			if (fcp_symmetric_device_probe(plun) != 0) {
7202 				fail = 1;
7203 			}
7204 		}
7205 
7206 	} else {
7207 		/* bad packet state */
7208 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7209 
7210 		/*
7211 		 * For some special devices (A5K SES and Daktari's SES devices),
7212 		 * they should be enumerated under mpxio
7213 		 * or "luxadm dis" will fail
7214 		 */
7215 		if (ignore_page83_data) {
7216 			fail = 0;
7217 		} else {
7218 			fail = 1;
7219 		}
7220 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7221 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7222 			    "!Devid page cmd failed. "
7223 			    "fpkt_state: %x fpkt_reason: %x",
7224 			    "ignore_page83: %d",
7225 			    fpkt->pkt_state, fpkt->pkt_reason,
7226 			    ignore_page83_data);
7227 	}
7228 
7229 	mutex_enter(&pptr->port_mutex);
7230 	mutex_enter(&plun->lun_mutex);
7231 	/*
7232 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7233 	 * mismatch between lun_cip and lun_mpxio.
7234 	 */
7235 	if (plun->lun_cip == NULL) {
7236 		/*
7237 		 * If we don't have a guid for this lun it's because we were
7238 		 * unable to glean one from the page 83 response.  Set the
7239 		 * control flag to 0 here to make sure that we don't attempt to
7240 		 * enumerate it under mpxio.
7241 		 */
7242 		if (fail || pptr->port_mpxio == 0) {
7243 			plun->lun_mpxio = 0;
7244 		} else {
7245 			plun->lun_mpxio = 1;
7246 		}
7247 	}
7248 	mutex_exit(&plun->lun_mutex);
7249 	mutex_exit(&pptr->port_mutex);
7250 
7251 	mutex_enter(&ptgt->tgt_mutex);
7252 	plun->lun_state &=
7253 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7254 	mutex_exit(&ptgt->tgt_mutex);
7255 
7256 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7257 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7258 
7259 	fcp_icmd_free(pptr, icmd);
7260 }
7261 
7262 /*
7263  *     Function: fcp_handle_inquiry
7264  *
7265  *  Description: Called by fcp_scsi_callback to handle the response to an
7266  *		 INQUIRY request.
7267  *
7268  *     Argument: *fpkt	FC packet used to convey the command.
7269  *		 *icmd	Original fcp_ipkt structure.
7270  *
7271  * Return Value: None
7272  */
7273 static void
7274 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7275 {
7276 	struct fcp_port	*pptr;
7277 	struct fcp_lun	*plun;
7278 	struct fcp_tgt	*ptgt;
7279 	uchar_t		dtype;
7280 	uchar_t		pqual;
7281 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7282 
7283 	ASSERT(icmd != NULL && fpkt != NULL);
7284 
7285 	pptr = icmd->ipkt_port;
7286 	ptgt = icmd->ipkt_tgt;
7287 	plun = icmd->ipkt_lun;
7288 
7289 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7290 	    sizeof (struct scsi_inquiry));
7291 
7292 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7293 	pqual = plun->lun_inq.inq_dtype >> 5;
7294 
7295 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7296 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7297 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7298 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7299 	    plun->lun_num, dtype, pqual);
7300 
7301 	if (pqual != 0) {
7302 		/*
7303 		 * Non-zero peripheral qualifier
7304 		 */
7305 		fcp_log(CE_CONT, pptr->port_dip,
7306 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7307 		    "Device type=0x%x Peripheral qual=0x%x\n",
7308 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7309 
7310 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7311 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7312 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7313 		    "Device type=0x%x Peripheral qual=0x%x\n",
7314 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7315 
7316 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7317 
7318 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7319 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7320 		fcp_icmd_free(pptr, icmd);
7321 		return;
7322 	}
7323 
7324 	/*
7325 	 * If the device is already initialized, check the dtype
7326 	 * for a change. If it has changed then update the flags
7327 	 * so the create_luns will offline the old device and
7328 	 * create the new device. Refer to bug: 4764752
7329 	 */
7330 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7331 		plun->lun_state |= FCP_LUN_CHANGED;
7332 	}
7333 	plun->lun_type = plun->lun_inq.inq_dtype;
7334 
7335 	/*
7336 	 * This code is setting/initializing the throttling in the FCA
7337 	 * driver.
7338 	 */
7339 	mutex_enter(&pptr->port_mutex);
7340 	if (!pptr->port_notify) {
7341 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7342 			uint32_t cmd = 0;
7343 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7344 			    ((cmd & 0xFFFFFF00 >> 8) |
7345 			    FCP_SVE_THROTTLE << 8));
7346 			pptr->port_notify = 1;
7347 			mutex_exit(&pptr->port_mutex);
7348 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7349 			mutex_enter(&pptr->port_mutex);
7350 		}
7351 	}
7352 
7353 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7354 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7355 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7356 		    "fcp_handle_inquiry,1:state change occured"
7357 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7358 		mutex_exit(&pptr->port_mutex);
7359 
7360 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7361 		(void) fcp_call_finish_init(pptr, ptgt,
7362 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7363 		    icmd->ipkt_cause);
7364 		fcp_icmd_free(pptr, icmd);
7365 		return;
7366 	}
7367 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7368 	mutex_exit(&pptr->port_mutex);
7369 
7370 	/* Retrieve the rscn count (if a valid one exists) */
7371 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7372 		rscn_count = ((fc_ulp_rscn_info_t *)
7373 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7374 	} else {
7375 		rscn_count = FC_INVALID_RSCN_COUNT;
7376 	}
7377 
7378 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7379 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7380 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7381 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7382 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7383 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7384 		(void) fcp_call_finish_init(pptr, ptgt,
7385 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7386 		    icmd->ipkt_cause);
7387 	}
7388 
7389 	/*
7390 	 * Read Inquiry VPD Page 0x83 to uniquely
7391 	 * identify this logical unit.
7392 	 */
7393 	fcp_icmd_free(pptr, icmd);
7394 }
7395 
7396 /*
7397  *     Function: fcp_handle_reportlun
7398  *
7399  *  Description: Called by fcp_scsi_callback to handle the response to a
7400  *		 REPORT_LUN request.
7401  *
7402  *     Argument: *fpkt	FC packet used to convey the command.
7403  *		 *icmd	Original fcp_ipkt structure.
7404  *
7405  * Return Value: None
7406  */
7407 static void
7408 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7409 {
7410 	int				i;
7411 	int				nluns_claimed;
7412 	int				nluns_bufmax;
7413 	int				len;
7414 	uint16_t			lun_num;
7415 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7416 	struct fcp_port			*pptr;
7417 	struct fcp_tgt			*ptgt;
7418 	struct fcp_lun			*plun;
7419 	struct fcp_reportlun_resp	*report_lun;
7420 
7421 	pptr = icmd->ipkt_port;
7422 	ptgt = icmd->ipkt_tgt;
7423 	len = fpkt->pkt_datalen;
7424 
7425 	if ((len < FCP_LUN_HEADER) ||
7426 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7427 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7428 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7429 		fcp_icmd_free(pptr, icmd);
7430 		return;
7431 	}
7432 
7433 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7434 	    fpkt->pkt_datalen);
7435 
7436 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7437 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7438 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7439 	    pptr->port_instance, ptgt->tgt_d_id);
7440 
7441 	/*
7442 	 * Get the number of luns (which is supplied as LUNS * 8) the
7443 	 * device claims it has.
7444 	 */
7445 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7446 
7447 	/*
7448 	 * Get the maximum number of luns the buffer submitted can hold.
7449 	 */
7450 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7451 
7452 	/*
7453 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7454 	 */
7455 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7456 		kmem_free(report_lun, len);
7457 
7458 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7459 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7460 		    ptgt->tgt_d_id);
7461 
7462 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7463 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7464 		fcp_icmd_free(pptr, icmd);
7465 		return;
7466 	}
7467 
7468 	/*
7469 	 * If there are more LUNs than we have allocated memory for,
7470 	 * allocate more space and send down yet another report lun if
7471 	 * the maximum number of attempts hasn't been reached.
7472 	 */
7473 	mutex_enter(&ptgt->tgt_mutex);
7474 
7475 	if ((nluns_claimed > nluns_bufmax) &&
7476 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7477 
7478 		struct fcp_lun *plun;
7479 
7480 		ptgt->tgt_report_lun_cnt++;
7481 		plun = ptgt->tgt_lun;
7482 		ASSERT(plun != NULL);
7483 		mutex_exit(&ptgt->tgt_mutex);
7484 
7485 		kmem_free(report_lun, len);
7486 
7487 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7488 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7489 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7490 		    nluns_claimed, ptgt->tgt_d_id);
7491 
7492 		/* Retrieve the rscn count (if a valid one exists) */
7493 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7494 			rscn_count = ((fc_ulp_rscn_info_t *)
7495 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7496 			    ulp_rscn_count;
7497 		} else {
7498 			rscn_count = FC_INVALID_RSCN_COUNT;
7499 		}
7500 
7501 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7502 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7503 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7504 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7505 			(void) fcp_call_finish_init(pptr, ptgt,
7506 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7507 			    icmd->ipkt_cause);
7508 		}
7509 
7510 		fcp_icmd_free(pptr, icmd);
7511 		return;
7512 	}
7513 
7514 	if (nluns_claimed > nluns_bufmax) {
7515 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7516 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7517 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7518 		    "    Number of LUNs lost=%x",
7519 		    ptgt->tgt_port_wwn.raw_wwn[0],
7520 		    ptgt->tgt_port_wwn.raw_wwn[1],
7521 		    ptgt->tgt_port_wwn.raw_wwn[2],
7522 		    ptgt->tgt_port_wwn.raw_wwn[3],
7523 		    ptgt->tgt_port_wwn.raw_wwn[4],
7524 		    ptgt->tgt_port_wwn.raw_wwn[5],
7525 		    ptgt->tgt_port_wwn.raw_wwn[6],
7526 		    ptgt->tgt_port_wwn.raw_wwn[7],
7527 		    nluns_claimed - nluns_bufmax);
7528 
7529 		nluns_claimed = nluns_bufmax;
7530 	}
7531 	ptgt->tgt_lun_cnt = nluns_claimed;
7532 
7533 	/*
7534 	 * Identify missing LUNs and print warning messages
7535 	 */
7536 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7537 		int offline;
7538 		int exists = 0;
7539 
7540 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7541 
7542 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7543 			uchar_t		*lun_string;
7544 
7545 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7546 
7547 			switch (lun_string[0] & 0xC0) {
7548 			case FCP_LUN_ADDRESSING:
7549 			case FCP_PD_ADDRESSING:
7550 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7551 				    lun_string[1];
7552 				if (plun->lun_num == lun_num) {
7553 					exists++;
7554 					break;
7555 				}
7556 				break;
7557 
7558 			default:
7559 				break;
7560 			}
7561 		}
7562 
7563 		if (!exists && !offline) {
7564 			mutex_exit(&ptgt->tgt_mutex);
7565 
7566 			mutex_enter(&pptr->port_mutex);
7567 			mutex_enter(&ptgt->tgt_mutex);
7568 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7569 				/*
7570 				 * set disappear flag when device was connected
7571 				 */
7572 				if (!(plun->lun_state &
7573 					FCP_LUN_DEVICE_NOT_CONNECTED))
7574 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7575 				mutex_exit(&ptgt->tgt_mutex);
7576 				mutex_exit(&pptr->port_mutex);
7577 				if (!(plun->lun_state &
7578 					FCP_LUN_DEVICE_NOT_CONNECTED))
7579 					fcp_log(CE_NOTE, pptr->port_dip,
7580 					    "!Lun=%x for target=%x disappeared",
7581 					    plun->lun_num, ptgt->tgt_d_id);
7582 				mutex_enter(&ptgt->tgt_mutex);
7583 			} else {
7584 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7585 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7586 				    "fcp_handle_reportlun,1: state change"
7587 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7588 				mutex_exit(&ptgt->tgt_mutex);
7589 				mutex_exit(&pptr->port_mutex);
7590 				kmem_free(report_lun, len);
7591 				(void) fcp_call_finish_init(pptr, ptgt,
7592 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7593 				    icmd->ipkt_cause);
7594 				fcp_icmd_free(pptr, icmd);
7595 				return;
7596 			}
7597 		} else if (exists) {
7598 			/*
7599 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7600 			 * actually exists in REPORT_LUN response
7601 			 */
7602 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED)
7603 				plun->lun_state &=
7604 					~FCP_LUN_DEVICE_NOT_CONNECTED;
7605 			if (offline || plun->lun_num == 0) {
7606 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7607 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7608 					mutex_exit(&ptgt->tgt_mutex);
7609 					fcp_log(CE_NOTE, pptr->port_dip,
7610 					    "!Lun=%x for target=%x reappeared",
7611 					    plun->lun_num, ptgt->tgt_d_id);
7612 					mutex_enter(&ptgt->tgt_mutex);
7613 				}
7614 			}
7615 		}
7616 	}
7617 
7618 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7619 	mutex_exit(&ptgt->tgt_mutex);
7620 
7621 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7622 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7623 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7624 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7625 
7626 	/* scan each lun */
7627 	for (i = 0; i < nluns_claimed; i++) {
7628 		uchar_t	*lun_string;
7629 
7630 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7631 
7632 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7633 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7634 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7635 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7636 		    lun_string[0]);
7637 
7638 		switch (lun_string[0] & 0xC0) {
7639 		case FCP_LUN_ADDRESSING:
7640 		case FCP_PD_ADDRESSING:
7641 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7642 
7643 			/* We will skip masked LUNs because of the blacklist. */
7644 			if (fcp_lun_blacklist != NULL) {
7645 				mutex_enter(&ptgt->tgt_mutex);
7646 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7647 				    lun_num) == TRUE) {
7648 					ptgt->tgt_lun_cnt--;
7649 					mutex_exit(&ptgt->tgt_mutex);
7650 					break;
7651 				}
7652 				mutex_exit(&ptgt->tgt_mutex);
7653 			}
7654 
7655 			/* see if this LUN is already allocated */
7656 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7657 				plun = fcp_alloc_lun(ptgt);
7658 				if (plun == NULL) {
7659 					fcp_log(CE_NOTE, pptr->port_dip,
7660 					    "!Lun allocation failed"
7661 					    " target=%x lun=%x",
7662 					    ptgt->tgt_d_id, lun_num);
7663 					break;
7664 				}
7665 			}
7666 
7667 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7668 			/* convert to LUN */
7669 			plun->lun_addr.ent_addr_0 =
7670 			    BE_16(*(uint16_t *)&(lun_string[0]));
7671 			plun->lun_addr.ent_addr_1 =
7672 			    BE_16(*(uint16_t *)&(lun_string[2]));
7673 			plun->lun_addr.ent_addr_2 =
7674 			    BE_16(*(uint16_t *)&(lun_string[4]));
7675 			plun->lun_addr.ent_addr_3 =
7676 			    BE_16(*(uint16_t *)&(lun_string[6]));
7677 
7678 			plun->lun_num = lun_num;
7679 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7680 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7681 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7682 
7683 			/* Retrieve the rscn count (if a valid one exists) */
7684 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7685 				rscn_count = ((fc_ulp_rscn_info_t *)
7686 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7687 				    ulp_rscn_count;
7688 			} else {
7689 				rscn_count = FC_INVALID_RSCN_COUNT;
7690 			}
7691 
7692 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7693 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7694 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7695 				mutex_enter(&pptr->port_mutex);
7696 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7697 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7698 					fcp_log(CE_NOTE, pptr->port_dip,
7699 					    "!failed to send INQUIRY"
7700 					    " target=%x lun=%x",
7701 					    ptgt->tgt_d_id, plun->lun_num);
7702 				} else {
7703 					FCP_TRACE(fcp_logq,
7704 					    pptr->port_instbuf, fcp_trace,
7705 					    FCP_BUF_LEVEL_5, 0,
7706 					    "fcp_handle_reportlun,2: state"
7707 					    " change occured for D_ID=0x%x",
7708 					    ptgt->tgt_d_id);
7709 				}
7710 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7711 				mutex_exit(&pptr->port_mutex);
7712 			} else {
7713 				continue;
7714 			}
7715 			break;
7716 
7717 		case FCP_VOLUME_ADDRESSING:
7718 			/* FALLTHROUGH */
7719 		default:
7720 			fcp_log(CE_WARN, NULL,
7721 			    "!Unsupported LUN Addressing method %x "
7722 			    "in response to REPORT_LUN", lun_string[0]);
7723 			break;
7724 		}
7725 
7726 		/*
7727 		 * each time through this loop we should decrement
7728 		 * the tmp_cnt by one -- since we go through this loop
7729 		 * one time for each LUN, the tmp_cnt should never be <=0
7730 		 */
7731 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7732 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7733 	}
7734 
7735 	if (i == 0) {
7736 		fcp_log(CE_WARN, pptr->port_dip,
7737 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7738 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7739 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7740 	}
7741 
7742 	kmem_free(report_lun, len);
7743 	fcp_icmd_free(pptr, icmd);
7744 }
7745 
7746 
7747 /*
7748  * called internally to return a LUN given a target and a LUN number
7749  */
7750 static struct fcp_lun *
7751 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7752 {
7753 	struct fcp_lun	*plun;
7754 
7755 	mutex_enter(&ptgt->tgt_mutex);
7756 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7757 		if (plun->lun_num == lun_num) {
7758 			mutex_exit(&ptgt->tgt_mutex);
7759 			return (plun);
7760 		}
7761 	}
7762 	mutex_exit(&ptgt->tgt_mutex);
7763 
7764 	return (NULL);
7765 }
7766 
7767 
7768 /*
7769  * handle finishing one target for fcp_finish_init
7770  *
7771  * return true (non-zero) if we want finish_init to continue with the
7772  * next target
7773  *
7774  * called with the port mutex held
7775  */
7776 /*ARGSUSED*/
7777 static int
7778 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7779     int link_cnt, int tgt_cnt, int cause)
7780 {
7781 	int 	rval = 1;
7782 	ASSERT(pptr != NULL);
7783 	ASSERT(ptgt != NULL);
7784 
7785 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7786 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7787 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7788 	    ptgt->tgt_state);
7789 
7790 	ASSERT(mutex_owned(&pptr->port_mutex));
7791 
7792 	if ((pptr->port_link_cnt != link_cnt) ||
7793 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7794 		/*
7795 		 * oh oh -- another link reset or target change
7796 		 * must have occurred while we are in here
7797 		 */
7798 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7799 
7800 		return (0);
7801 	} else {
7802 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7803 	}
7804 
7805 	mutex_enter(&ptgt->tgt_mutex);
7806 
7807 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7808 		/*
7809 		 * tgt is not offline -- is it marked (i.e. needs
7810 		 * to be offlined) ??
7811 		 */
7812 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7813 			/*
7814 			 * this target not offline *and*
7815 			 * marked
7816 			 */
7817 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7818 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7819 			    tgt_cnt, 0, 0);
7820 		} else {
7821 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7822 
7823 			/* create the LUNs */
7824 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7825 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7826 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7827 				    cause);
7828 				ptgt->tgt_device_created = 1;
7829 			} else {
7830 				fcp_update_tgt_state(ptgt, FCP_RESET,
7831 				    FCP_LUN_BUSY);
7832 			}
7833 		}
7834 	}
7835 
7836 	mutex_exit(&ptgt->tgt_mutex);
7837 
7838 	return (rval);
7839 }
7840 
7841 
7842 /*
7843  * this routine is called to finish port initialization
7844  *
7845  * Each port has a "temp" counter -- when a state change happens (e.g.
7846  * port online), the temp count is set to the number of devices in the map.
7847  * Then, as each device gets "discovered", the temp counter is decremented
7848  * by one.  When this count reaches zero we know that all of the devices
7849  * in the map have been discovered (or an error has occurred), so we can
7850  * then finish initialization -- which is done by this routine (well, this
7851  * and fcp-finish_tgt())
7852  *
7853  * acquires and releases the global mutex
7854  *
7855  * called with the port mutex owned
7856  */
7857 static void
7858 fcp_finish_init(struct fcp_port *pptr)
7859 {
7860 #ifdef	DEBUG
7861 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7862 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7863 	    FCP_STACK_DEPTH);
7864 #endif /* DEBUG */
7865 
7866 	ASSERT(mutex_owned(&pptr->port_mutex));
7867 
7868 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7869 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7870 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7871 
7872 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7873 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7874 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7875 		pptr->port_state &= ~FCP_STATE_ONLINING;
7876 		pptr->port_state |= FCP_STATE_ONLINE;
7877 	}
7878 
7879 	/* Wake up threads waiting on config done */
7880 	cv_broadcast(&pptr->port_config_cv);
7881 }
7882 
7883 
7884 /*
7885  * called from fcp_finish_init to create the LUNs for a target
7886  *
7887  * called with the port mutex owned
7888  */
7889 static void
7890 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7891 {
7892 	struct fcp_lun	*plun;
7893 	struct fcp_port	*pptr;
7894 	child_info_t		*cip = NULL;
7895 
7896 	ASSERT(ptgt != NULL);
7897 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7898 
7899 	pptr = ptgt->tgt_port;
7900 
7901 	ASSERT(pptr != NULL);
7902 
7903 	/* scan all LUNs for this target */
7904 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7905 		if (plun->lun_state & FCP_LUN_OFFLINE) {
7906 			continue;
7907 		}
7908 
7909 		if (plun->lun_state & FCP_LUN_MARK) {
7910 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7911 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7912 			    "fcp_create_luns: offlining marked LUN!");
7913 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
7914 			continue;
7915 		}
7916 
7917 		plun->lun_state &= ~FCP_LUN_BUSY;
7918 
7919 		/*
7920 		 * There are conditions in which FCP_LUN_INIT flag is cleared
7921 		 * but we have a valid plun->lun_cip. To cover this case also
7922 		 * CLEAR_BUSY whenever we have a valid lun_cip.
7923 		 */
7924 		if (plun->lun_mpxio && plun->lun_cip &&
7925 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
7926 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
7927 		    0, 0))) {
7928 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7929 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7930 			    "fcp_create_luns: enable lun %p failed!",
7931 			    plun);
7932 		}
7933 
7934 		if (plun->lun_state & FCP_LUN_INIT &&
7935 			    !(plun->lun_state & FCP_LUN_CHANGED)) {
7936 			continue;
7937 		}
7938 
7939 		if (cause == FCP_CAUSE_USER_CREATE) {
7940 			continue;
7941 		}
7942 
7943 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7944 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
7945 		    "create_luns: passing ONLINE elem to HP thread");
7946 
7947 		/*
7948 		 * If lun has changed, prepare for offlining the old path.
7949 		 * Do not offline the old path right now, since it may be
7950 		 * still opened.
7951 		 */
7952 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
7953 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
7954 		}
7955 
7956 		/* pass an ONLINE element to the hotplug thread */
7957 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7958 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
7959 
7960 			/*
7961 			 * We can not synchronous attach (i.e pass
7962 			 * NDI_ONLINE_ATTACH) here as we might be
7963 			 * coming from an interrupt or callback
7964 			 * thread.
7965 			 */
7966 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7967 			    link_cnt, tgt_cnt, 0, 0)) {
7968 				fcp_log(CE_CONT, pptr->port_dip,
7969 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
7970 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
7971 			}
7972 		}
7973 	}
7974 }
7975 
7976 
7977 /*
7978  * function to online/offline devices
7979  */
7980 static int
7981 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int online,
7982     int lcount, int tcount, int flags)
7983 {
7984 	int			rval = NDI_FAILURE;
7985 	int			circ;
7986 	child_info_t		*ccip;
7987 	struct fcp_port 	*pptr = plun->lun_tgt->tgt_port;
7988 	int			is_mpxio = pptr->port_mpxio;
7989 	dev_info_t		*cdip, *pdip;
7990 	char			*devname;
7991 
7992 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7993 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
7994 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
7995 	    "flags=%x mpxio=%x\n",
7996 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
7997 	    plun->lun_mpxio);
7998 
7999 	/*
8000 	 * lun_mpxio needs checking here because we can end up in a race
8001 	 * condition where this task has been dispatched while lun_mpxio is
8002 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8003 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8004 	 * the flag. We rely on the serialization of the tasks here. We return
8005 	 * NDI_SUCCESS so any callers continue without reporting spurious
8006 	 * errors, and the still think we're an MPXIO LUN.
8007 	 */
8008 
8009 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8010 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8011 		if (plun->lun_mpxio)
8012 			rval = fcp_update_mpxio_path(plun, cip, online);
8013 		else
8014 			rval = NDI_SUCCESS;
8015 		return (rval);
8016 	}
8017 
8018 	/*
8019 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8020 	 * executing devfs_clean() if parent lock is held.
8021 	 */
8022 	ASSERT(!servicing_interrupt());
8023 	if (online == FCP_OFFLINE) {
8024 		if (plun->lun_mpxio == 0) {
8025 			if (plun->lun_cip == cip)
8026 				cdip = DIP(plun->lun_cip);
8027 			else
8028 				cdip = DIP(cip);
8029 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8030 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8031 		} else if ((plun->lun_cip != cip) && cip) {
8032 			/*
8033 			 * This means a DTYPE/GUID change, we shall get the
8034 			 * dip of the old cip instead of the current lun_cip.
8035 			 */
8036 			cdip = mdi_pi_get_client(PIP(cip));
8037 		}
8038 		if (cdip) {
8039 			if (i_ddi_devi_attached(cdip)) {
8040 				pdip = ddi_get_parent(cdip);
8041 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8042 				ndi_devi_enter(pdip, &circ);
8043 				(void) ddi_deviname(cdip, devname);
8044 				ndi_devi_exit(pdip, circ);
8045 				/*
8046 				 * Release parent lock before calling
8047 				 * devfs_clean().
8048 				 */
8049 				rval = devfs_clean(pdip, devname + 1,
8050 				    DV_CLEAN_FORCE);
8051 				kmem_free(devname, MAXNAMELEN + 1);
8052 				/*
8053 				 * Return if devfs_clean() fails for
8054 				 * non-MPXIO case.
8055 				 * For MPXIO case, another path could be
8056 				 * offlined.
8057 				 */
8058 				if (rval && plun->lun_mpxio == 0) {
8059 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8060 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8061 					    "fcp_trigger_lun: devfs_clean "
8062 					    "failed rval=%x  dip=%p",
8063 					    rval, pdip);
8064 					return (NDI_FAILURE);
8065 				}
8066 			}
8067 		}
8068 	}
8069 
8070 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8071 		return (NDI_FAILURE);
8072 	}
8073 
8074 	if (is_mpxio)
8075 		mdi_devi_enter(pptr->port_dip, &circ);
8076 	else
8077 		ndi_devi_enter(pptr->port_dip, &circ);
8078 
8079 	mutex_enter(&pptr->port_mutex);
8080 	mutex_enter(&plun->lun_mutex);
8081 
8082 	if (online == FCP_ONLINE) {
8083 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8084 		if (ccip == NULL) {
8085 			goto fail;
8086 		}
8087 	} else {
8088 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8089 			goto fail;
8090 		}
8091 		ccip = cip;
8092 	}
8093 
8094 	if (online == FCP_ONLINE) {
8095 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8096 		    &circ);
8097 		fc_ulp_log_device_event(pptr->port_fp_handle,
8098 		    FC_ULP_DEVICE_ONLINE);
8099 	} else {
8100 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8101 		    &circ);
8102 		fc_ulp_log_device_event(pptr->port_fp_handle,
8103 		    FC_ULP_DEVICE_OFFLINE);
8104 	}
8105 
8106 fail:	mutex_exit(&plun->lun_mutex);
8107 	mutex_exit(&pptr->port_mutex);
8108 
8109 	if (is_mpxio)
8110 		mdi_devi_exit(pptr->port_dip, circ);
8111 	else
8112 		ndi_devi_exit(pptr->port_dip, circ);
8113 
8114 	fc_ulp_idle_port(pptr->port_fp_handle);
8115 
8116 	return (rval);
8117 }
8118 
8119 
8120 /*
8121  * take a target offline by taking all of its LUNs offline
8122  */
8123 /*ARGSUSED*/
8124 static int
8125 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8126     int link_cnt, int tgt_cnt, int nowait, int flags)
8127 {
8128 	struct fcp_tgt_elem	*elem;
8129 
8130 	ASSERT(mutex_owned(&pptr->port_mutex));
8131 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8132 
8133 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8134 
8135 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8136 	    ptgt->tgt_change_cnt)) {
8137 		mutex_exit(&ptgt->tgt_mutex);
8138 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8139 		mutex_enter(&ptgt->tgt_mutex);
8140 
8141 		return (0);
8142 	}
8143 
8144 	ptgt->tgt_pd_handle = NULL;
8145 	mutex_exit(&ptgt->tgt_mutex);
8146 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8147 	mutex_enter(&ptgt->tgt_mutex);
8148 
8149 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8150 
8151 	if (ptgt->tgt_tcap &&
8152 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8153 		elem->flags = flags;
8154 		elem->time = fcp_watchdog_time;
8155 		if (nowait == 0) {
8156 			elem->time += fcp_offline_delay;
8157 		}
8158 		elem->ptgt = ptgt;
8159 		elem->link_cnt = link_cnt;
8160 		elem->tgt_cnt = tgt_cnt;
8161 		elem->next = pptr->port_offline_tgts;
8162 		pptr->port_offline_tgts = elem;
8163 	} else {
8164 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8165 	}
8166 
8167 	return (1);
8168 }
8169 
8170 
8171 static void
8172 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8173     int link_cnt, int tgt_cnt, int flags)
8174 {
8175 	ASSERT(mutex_owned(&pptr->port_mutex));
8176 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8177 
8178 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8179 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8180 	ptgt->tgt_pd_handle = NULL;
8181 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8182 }
8183 
8184 
8185 static void
8186 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8187     int flags)
8188 {
8189 	struct	fcp_lun	*plun;
8190 
8191 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8192 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8193 
8194 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8195 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8196 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8197 		}
8198 	}
8199 }
8200 
8201 
8202 /*
8203  * take a LUN offline
8204  *
8205  * enters and leaves with the target mutex held, releasing it in the process
8206  *
8207  * allocates memory in non-sleep mode
8208  */
8209 static void
8210 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8211     int nowait, int flags)
8212 {
8213 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8214 	struct fcp_lun_elem	*elem;
8215 
8216 	ASSERT(plun != NULL);
8217 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8218 
8219 	if (nowait) {
8220 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8221 		return;
8222 	}
8223 
8224 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8225 		elem->flags = flags;
8226 		elem->time = fcp_watchdog_time;
8227 		if (nowait == 0) {
8228 			elem->time += fcp_offline_delay;
8229 		}
8230 		elem->plun = plun;
8231 		elem->link_cnt = link_cnt;
8232 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8233 		elem->next = pptr->port_offline_luns;
8234 		pptr->port_offline_luns = elem;
8235 	} else {
8236 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8237 	}
8238 }
8239 
8240 
8241 static void
8242 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8243 {
8244 	struct fcp_pkt	*head = NULL;
8245 
8246 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8247 
8248 	mutex_exit(&LUN_TGT->tgt_mutex);
8249 
8250 	head = fcp_scan_commands(plun);
8251 	if (head != NULL) {
8252 		fcp_abort_commands(head, LUN_PORT);
8253 	}
8254 
8255 	mutex_enter(&LUN_TGT->tgt_mutex);
8256 
8257 	if (plun->lun_cip && plun->lun_mpxio) {
8258 		/*
8259 		 * Intimate MPxIO lun busy is cleared
8260 		 */
8261 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8262 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8263 		    0, 0)) {
8264 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8265 				"Can not ENABLE LUN; D_ID=%x, LUN=%x",
8266 				LUN_TGT->tgt_d_id, plun->lun_num);
8267 		}
8268 		/*
8269 		 * Intimate MPxIO that the lun is now marked for offline
8270 		 */
8271 		mutex_exit(&LUN_TGT->tgt_mutex);
8272 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8273 		mutex_enter(&LUN_TGT->tgt_mutex);
8274 	}
8275 }
8276 
8277 static void
8278 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8279     int flags)
8280 {
8281 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8282 
8283 	mutex_exit(&LUN_TGT->tgt_mutex);
8284 	fcp_update_offline_flags(plun);
8285 	mutex_enter(&LUN_TGT->tgt_mutex);
8286 
8287 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8288 
8289 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8290 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8291 	    "offline_lun: passing OFFLINE elem to HP thread");
8292 
8293 	if (plun->lun_cip) {
8294 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8295 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8296 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8297 		    LUN_TGT->tgt_trace);
8298 
8299 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8300 		    link_cnt, tgt_cnt, flags, 0)) {
8301 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8302 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8303 			    LUN_TGT->tgt_d_id, plun->lun_num);
8304 		}
8305 	}
8306 }
8307 
8308 static void
8309 fcp_scan_offline_luns(struct fcp_port *pptr)
8310 {
8311 	struct fcp_lun_elem 	*elem;
8312 	struct fcp_lun_elem 	*prev;
8313 	struct fcp_lun_elem 	*next;
8314 
8315 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8316 
8317 	prev = NULL;
8318 	elem = pptr->port_offline_luns;
8319 	while (elem) {
8320 		next = elem->next;
8321 		if (elem->time <= fcp_watchdog_time) {
8322 			int 			changed = 1;
8323 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8324 
8325 			mutex_enter(&ptgt->tgt_mutex);
8326 			if (pptr->port_link_cnt == elem->link_cnt &&
8327 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8328 				changed = 0;
8329 			}
8330 
8331 			if (!changed &&
8332 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8333 				fcp_offline_lun_now(elem->plun,
8334 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8335 			}
8336 			mutex_exit(&ptgt->tgt_mutex);
8337 
8338 			kmem_free(elem, sizeof (*elem));
8339 
8340 			if (prev) {
8341 				prev->next = next;
8342 			} else {
8343 				pptr->port_offline_luns = next;
8344 			}
8345 		} else {
8346 			prev = elem;
8347 		}
8348 		elem = next;
8349 	}
8350 }
8351 
8352 
8353 static void
8354 fcp_scan_offline_tgts(struct fcp_port *pptr)
8355 {
8356 	struct fcp_tgt_elem 	*elem;
8357 	struct fcp_tgt_elem 	*prev;
8358 	struct fcp_tgt_elem 	*next;
8359 
8360 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8361 
8362 	prev = NULL;
8363 	elem = pptr->port_offline_tgts;
8364 	while (elem) {
8365 		next = elem->next;
8366 		if (elem->time <= fcp_watchdog_time) {
8367 			int 			changed = 1;
8368 			struct fcp_tgt	*ptgt = elem->ptgt;
8369 
8370 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8371 				changed = 0;
8372 			}
8373 
8374 			mutex_enter(&ptgt->tgt_mutex);
8375 			if (!changed && !(ptgt->tgt_state &
8376 			    FCP_TGT_OFFLINE)) {
8377 				fcp_offline_target_now(pptr,
8378 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8379 				    elem->flags);
8380 			}
8381 			mutex_exit(&ptgt->tgt_mutex);
8382 
8383 			kmem_free(elem, sizeof (*elem));
8384 
8385 			if (prev) {
8386 				prev->next = next;
8387 			} else {
8388 				pptr->port_offline_tgts = next;
8389 			}
8390 		} else {
8391 			prev = elem;
8392 		}
8393 		elem = next;
8394 	}
8395 }
8396 
8397 
8398 static void
8399 fcp_update_offline_flags(struct fcp_lun *plun)
8400 {
8401 	struct fcp_port	*pptr = LUN_PORT;
8402 	ASSERT(plun != NULL);
8403 
8404 	mutex_enter(&LUN_TGT->tgt_mutex);
8405 	plun->lun_state |= FCP_LUN_OFFLINE;
8406 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8407 
8408 	mutex_enter(&plun->lun_mutex);
8409 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8410 		dev_info_t *cdip = NULL;
8411 
8412 		mutex_exit(&LUN_TGT->tgt_mutex);
8413 
8414 		if (plun->lun_mpxio == 0) {
8415 			cdip = DIP(plun->lun_cip);
8416 		} else if (plun->lun_cip) {
8417 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8418 		}
8419 
8420 		mutex_exit(&plun->lun_mutex);
8421 		if (cdip) {
8422 			(void) ndi_event_retrieve_cookie(
8423 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8424 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8425 			(void) ndi_event_run_callbacks(
8426 			    pptr->port_ndi_event_hdl, cdip,
8427 			    fcp_remove_eid, NULL);
8428 		}
8429 	} else {
8430 		mutex_exit(&plun->lun_mutex);
8431 		mutex_exit(&LUN_TGT->tgt_mutex);
8432 	}
8433 }
8434 
8435 
8436 /*
8437  * Scan all of the command pkts for this port, moving pkts that
8438  * match our LUN onto our own list (headed by "head")
8439  */
8440 static struct fcp_pkt *
8441 fcp_scan_commands(struct fcp_lun *plun)
8442 {
8443 	struct fcp_port	*pptr = LUN_PORT;
8444 
8445 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8446 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8447 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8448 
8449 	struct fcp_pkt	*head = NULL;	/* head of our list */
8450 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8451 
8452 	int			cmds_found = 0;
8453 
8454 	mutex_enter(&pptr->port_pkt_mutex);
8455 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8456 		struct fcp_lun *tlun =
8457 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8458 
8459 		ncmd = cmd->cmd_next;	/* set next command */
8460 
8461 		/*
8462 		 * if this pkt is for a different LUN  or the
8463 		 * command is sent down, skip it.
8464 		 */
8465 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8466 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8467 			pcmd = cmd;
8468 			continue;
8469 		}
8470 		cmds_found++;
8471 		if (pcmd != NULL) {
8472 			ASSERT(pptr->port_pkt_head != cmd);
8473 			pcmd->cmd_next = cmd->cmd_next;
8474 		} else {
8475 			ASSERT(cmd == pptr->port_pkt_head);
8476 			pptr->port_pkt_head = cmd->cmd_next;
8477 		}
8478 
8479 		if (cmd == pptr->port_pkt_tail) {
8480 			pptr->port_pkt_tail = pcmd;
8481 			if (pcmd) {
8482 				pcmd->cmd_next = NULL;
8483 			}
8484 		}
8485 
8486 		if (head == NULL) {
8487 			head = tail = cmd;
8488 		} else {
8489 			ASSERT(tail != NULL);
8490 
8491 			tail->cmd_next = cmd;
8492 			tail = cmd;
8493 		}
8494 		cmd->cmd_next = NULL;
8495 	}
8496 	mutex_exit(&pptr->port_pkt_mutex);
8497 
8498 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8499 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8500 	    "scan commands: %d cmd(s) found", cmds_found);
8501 
8502 	return (head);
8503 }
8504 
8505 
8506 /*
8507  * Abort all the commands in the command queue
8508  */
8509 static void
8510 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8511 {
8512 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8513 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8514 
8515 	ASSERT(mutex_owned(&pptr->port_mutex));
8516 
8517 	/* scan through the pkts and invalid them */
8518 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8519 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8520 
8521 		ncmd = cmd->cmd_next;
8522 		ASSERT(pkt != NULL);
8523 
8524 		/*
8525 		 * The lun is going to be marked offline. Indicate
8526 		 * the target driver not to requeue or retry this command
8527 		 * as the device is going to be offlined pretty soon.
8528 		 */
8529 		pkt->pkt_reason = CMD_DEV_GONE;
8530 		pkt->pkt_statistics = 0;
8531 		pkt->pkt_state = 0;
8532 
8533 		/* reset cmd flags/state */
8534 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8535 		cmd->cmd_state = FCP_PKT_IDLE;
8536 
8537 		/*
8538 		 * ensure we have a packet completion routine,
8539 		 * then call it.
8540 		 */
8541 		ASSERT(pkt->pkt_comp != NULL);
8542 
8543 		mutex_exit(&pptr->port_mutex);
8544 		fcp_post_callback(cmd);
8545 		mutex_enter(&pptr->port_mutex);
8546 	}
8547 }
8548 
8549 
8550 /*
8551  * the pkt_comp callback for command packets
8552  */
8553 static void
8554 fcp_cmd_callback(fc_packet_t *fpkt)
8555 {
8556 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8557 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8558 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8559 
8560 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8561 
8562 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8563 		cmn_err(CE_PANIC, "Packet already completed %p",
8564 		    (void *)cmd);
8565 	}
8566 
8567 	/*
8568 	 * Watch thread should be freeing the packet, ignore the pkt.
8569 	 */
8570 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8571 		fcp_log(CE_CONT, pptr->port_dip,
8572 		    "!FCP: Pkt completed while aborting\n");
8573 		return;
8574 	}
8575 	cmd->cmd_state = FCP_PKT_IDLE;
8576 
8577 	fcp_complete_pkt(fpkt);
8578 
8579 #ifdef	DEBUG
8580 	mutex_enter(&pptr->port_pkt_mutex);
8581 	pptr->port_npkts--;
8582 	mutex_exit(&pptr->port_pkt_mutex);
8583 #endif /* DEBUG */
8584 
8585 	fcp_post_callback(cmd);
8586 }
8587 
8588 
8589 static void
8590 fcp_complete_pkt(fc_packet_t *fpkt)
8591 {
8592 	int			error = 0;
8593 	struct fcp_pkt 	*cmd = (struct fcp_pkt *)
8594 				    fpkt->pkt_ulp_private;
8595 	struct scsi_pkt 	*pkt = cmd->cmd_pkt;
8596 	struct fcp_port 	*pptr = ADDR2FCP(&pkt->pkt_address);
8597 	struct fcp_lun 	*plun;
8598 	struct fcp_tgt 	*ptgt;
8599 	struct fcp_rsp 		*rsp;
8600 	struct scsi_address	save;
8601 
8602 #ifdef	DEBUG
8603 	save = pkt->pkt_address;
8604 #endif /* DEBUG */
8605 
8606 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8607 
8608 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8609 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8610 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8611 			    sizeof (struct fcp_rsp));
8612 		}
8613 
8614 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8615 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8616 
8617 		pkt->pkt_resid = 0;
8618 
8619 		if (cmd->cmd_pkt->pkt_numcookies) {
8620 			pkt->pkt_state |= STATE_XFERRED_DATA;
8621 			if (fpkt->pkt_data_resid) {
8622 				error++;
8623 			}
8624 		}
8625 
8626 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8627 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8628 			/*
8629 			 * The next two checks make sure that if there
8630 			 * is no sense data or a valid response and
8631 			 * the command came back with check condition,
8632 			 * the command should be retried.
8633 			 */
8634 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8635 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8636 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8637 				pkt->pkt_resid = cmd->cmd_dmacount;
8638 			}
8639 		}
8640 
8641 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8642 			return;
8643 		}
8644 
8645 		plun = ADDR2LUN(&pkt->pkt_address);
8646 		ptgt = plun->lun_tgt;
8647 		ASSERT(ptgt != NULL);
8648 
8649 		/*
8650 		 * Update the transfer resid, if appropriate
8651 		 */
8652 		if (rsp->fcp_u.fcp_status.resid_over ||
8653 		    rsp->fcp_u.fcp_status.resid_under) {
8654 			pkt->pkt_resid = rsp->fcp_resid;
8655 		}
8656 
8657 		/*
8658 		 * First see if we got a FCP protocol error.
8659 		 */
8660 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8661 			struct fcp_rsp_info	*bep;
8662 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8663 			    sizeof (struct fcp_rsp));
8664 
8665 			if (fcp_validate_fcp_response(rsp, pptr) !=
8666 			    FC_SUCCESS) {
8667 				pkt->pkt_reason = CMD_CMPLT;
8668 				*(pkt->pkt_scbp) = STATUS_CHECK;
8669 
8670 				fcp_log(CE_WARN, pptr->port_dip,
8671 				    "!SCSI command to d_id=0x%x lun=0x%x"
8672 				    " failed, Bad FCP response values:"
8673 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8674 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8675 				    ptgt->tgt_d_id, plun->lun_num,
8676 				    rsp->reserved_0, rsp->reserved_1,
8677 				    rsp->fcp_u.fcp_status.reserved_0,
8678 				    rsp->fcp_u.fcp_status.reserved_1,
8679 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8680 
8681 				return;
8682 			}
8683 
8684 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8685 				FCP_CP_IN(fpkt->pkt_resp +
8686 				    sizeof (struct fcp_rsp), bep,
8687 				    fpkt->pkt_resp_acc,
8688 				    sizeof (struct fcp_rsp_info));
8689 			}
8690 
8691 			if (bep->rsp_code != FCP_NO_FAILURE) {
8692 				child_info_t	*cip;
8693 
8694 				pkt->pkt_reason = CMD_TRAN_ERR;
8695 
8696 				mutex_enter(&plun->lun_mutex);
8697 				cip = plun->lun_cip;
8698 				mutex_exit(&plun->lun_mutex);
8699 
8700 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8701 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8702 				    "FCP response error on cmd=%p"
8703 				    " target=0x%x, cip=%p", cmd,
8704 				    ptgt->tgt_d_id, cip);
8705 			}
8706 		}
8707 
8708 		/*
8709 		 * See if we got a SCSI error with sense data
8710 		 */
8711 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8712 			uchar_t 			rqlen;
8713 			caddr_t 			sense_from;
8714 			child_info_t			*cip;
8715 			timeout_id_t			tid;
8716 			struct scsi_arq_status 		*arq;
8717 			struct scsi_extended_sense 	*sense_to;
8718 
8719 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8720 			sense_to = &arq->sts_sensedata;
8721 
8722 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8723 			    sizeof (struct scsi_extended_sense));
8724 
8725 			sense_from = (caddr_t)fpkt->pkt_resp +
8726 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8727 
8728 			if (fcp_validate_fcp_response(rsp, pptr) !=
8729 			    FC_SUCCESS) {
8730 				pkt->pkt_reason = CMD_CMPLT;
8731 				*(pkt->pkt_scbp) = STATUS_CHECK;
8732 
8733 				fcp_log(CE_WARN, pptr->port_dip,
8734 				    "!SCSI command to d_id=0x%x lun=0x%x"
8735 				    " failed, Bad FCP response values:"
8736 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8737 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8738 				    ptgt->tgt_d_id, plun->lun_num,
8739 				    rsp->reserved_0, rsp->reserved_1,
8740 				    rsp->fcp_u.fcp_status.reserved_0,
8741 				    rsp->fcp_u.fcp_status.reserved_1,
8742 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8743 
8744 				return;
8745 			}
8746 
8747 			/*
8748 			 * copy in sense information
8749 			 */
8750 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8751 				FCP_CP_IN(sense_from, sense_to,
8752 				    fpkt->pkt_resp_acc, rqlen);
8753 			} else {
8754 				bcopy(sense_from, sense_to, rqlen);
8755 			}
8756 
8757 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8758 			    (FCP_SENSE_NO_LUN(sense_to))) {
8759 				mutex_enter(&ptgt->tgt_mutex);
8760 				if (ptgt->tgt_tid == NULL) {
8761 					/*
8762 					 * Kick off rediscovery
8763 					 */
8764 					tid = timeout(fcp_reconfigure_luns,
8765 					    (caddr_t)ptgt, drv_usectohz(1));
8766 
8767 					ptgt->tgt_tid = tid;
8768 					ptgt->tgt_state |= FCP_TGT_BUSY;
8769 				}
8770 				mutex_exit(&ptgt->tgt_mutex);
8771 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8772 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8773 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8774 					    "!FCP: Report Lun Has Changed"
8775 					    " target=%x", ptgt->tgt_d_id);
8776 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8777 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8778 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8779 					    "!FCP: LU Not Supported"
8780 					    " target=%x", ptgt->tgt_d_id);
8781 				}
8782 			}
8783 			ASSERT(pkt->pkt_scbp != NULL);
8784 
8785 			pkt->pkt_state |= STATE_ARQ_DONE;
8786 
8787 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8788 
8789 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8790 			arq->sts_rqpkt_reason = 0;
8791 			arq->sts_rqpkt_statistics = 0;
8792 
8793 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8794 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8795 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8796 			    STATE_XFERRED_DATA;
8797 
8798 			mutex_enter(&plun->lun_mutex);
8799 			cip = plun->lun_cip;
8800 			mutex_exit(&plun->lun_mutex);
8801 
8802 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8803 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8804 			    "SCSI Check condition on cmd=%p target=0x%x"
8805 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8806 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8807 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8808 			    rsp->fcp_u.fcp_status.scsi_status,
8809 			    sense_to->es_key, sense_to->es_add_code,
8810 			    sense_to->es_qual_code);
8811 		}
8812 	} else {
8813 		plun = ADDR2LUN(&pkt->pkt_address);
8814 		ptgt = plun->lun_tgt;
8815 		ASSERT(ptgt != NULL);
8816 
8817 		/*
8818 		 * Work harder to translate errors into target driver
8819 		 * understandable ones. Note with despair that the target
8820 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8821 		 * They resort to using the big hammer most often, which
8822 		 * may not get fixed in the life time of this driver.
8823 		 */
8824 		pkt->pkt_state = 0;
8825 		pkt->pkt_statistics = 0;
8826 
8827 		switch (fpkt->pkt_state) {
8828 		case FC_PKT_TRAN_ERROR:
8829 			switch (fpkt->pkt_reason) {
8830 			case FC_REASON_OVERRUN:
8831 				pkt->pkt_reason = CMD_CMD_OVR;
8832 				pkt->pkt_statistics |= STAT_ABORTED;
8833 				break;
8834 
8835 			case FC_REASON_XCHG_BSY: {
8836 				caddr_t ptr;
8837 
8838 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8839 
8840 				ptr = (caddr_t)pkt->pkt_scbp;
8841 				if (ptr) {
8842 					*ptr = STATUS_BUSY;
8843 				}
8844 				break;
8845 			}
8846 
8847 			case FC_REASON_ABORTED:
8848 				pkt->pkt_reason = CMD_TRAN_ERR;
8849 				pkt->pkt_statistics |= STAT_ABORTED;
8850 				break;
8851 
8852 			case FC_REASON_ABORT_FAILED:
8853 				pkt->pkt_reason = CMD_ABORT_FAIL;
8854 				break;
8855 
8856 			case FC_REASON_NO_SEQ_INIT:
8857 			case FC_REASON_CRC_ERROR:
8858 				pkt->pkt_reason = CMD_TRAN_ERR;
8859 				pkt->pkt_statistics |= STAT_ABORTED;
8860 				break;
8861 			default:
8862 				pkt->pkt_reason = CMD_TRAN_ERR;
8863 				break;
8864 			}
8865 			break;
8866 
8867 		case FC_PKT_PORT_OFFLINE: {
8868 			dev_info_t	*cdip = NULL;
8869 			caddr_t 	ptr;
8870 
8871 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8872 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8873 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8874 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8875 				    ptgt->tgt_d_id);
8876 			}
8877 
8878 			mutex_enter(&plun->lun_mutex);
8879 			if (plun->lun_mpxio == 0) {
8880 				cdip = DIP(plun->lun_cip);
8881 			} else if (plun->lun_cip) {
8882 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8883 			}
8884 
8885 			mutex_exit(&plun->lun_mutex);
8886 
8887 			if (cdip) {
8888 				(void) ndi_event_retrieve_cookie(
8889 				    pptr->port_ndi_event_hdl, cdip,
8890 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
8891 				    NDI_EVENT_NOPASS);
8892 				(void) ndi_event_run_callbacks(
8893 				    pptr->port_ndi_event_hdl, cdip,
8894 				    fcp_remove_eid, NULL);
8895 			}
8896 
8897 			/*
8898 			 * If the link goes off-line for a lip,
8899 			 * this will cause a error to the ST SG
8900 			 * SGEN drivers. By setting BUSY we will
8901 			 * give the drivers the chance to retry
8902 			 * before it blows of the job. ST will
8903 			 * remember how many times it has retried.
8904 			 */
8905 
8906 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
8907 			    (plun->lun_type == DTYPE_CHANGER)) {
8908 				pkt->pkt_reason = CMD_CMPLT;    /* Lie */
8909 				ptr = (caddr_t)pkt->pkt_scbp;
8910 				if (ptr) {
8911 					*ptr = STATUS_BUSY;
8912 				}
8913 			} else {
8914 				pkt->pkt_reason = CMD_TRAN_ERR;
8915 				pkt->pkt_statistics |= STAT_BUS_RESET;
8916 			}
8917 			break;
8918 		}
8919 
8920 		case FC_PKT_TRAN_BSY:
8921 			/*
8922 			 * Use the ssd Qfull handling here.
8923 			 */
8924 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
8925 			pkt->pkt_state = STATE_GOT_BUS;
8926 			break;
8927 
8928 		case FC_PKT_TIMEOUT:
8929 			pkt->pkt_reason = CMD_TIMEOUT;
8930 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
8931 				pkt->pkt_statistics |= STAT_TIMEOUT;
8932 			} else {
8933 				pkt->pkt_statistics |= STAT_ABORTED;
8934 			}
8935 			break;
8936 
8937 		case FC_PKT_LOCAL_RJT:
8938 			switch (fpkt->pkt_reason) {
8939 			case FC_REASON_OFFLINE: {
8940 				dev_info_t	*cdip = NULL;
8941 
8942 				mutex_enter(&plun->lun_mutex);
8943 				if (plun->lun_mpxio == 0) {
8944 					cdip = DIP(plun->lun_cip);
8945 				} else if (plun->lun_cip) {
8946 					cdip = mdi_pi_get_client(
8947 					    PIP(plun->lun_cip));
8948 				}
8949 				mutex_exit(&plun->lun_mutex);
8950 
8951 				if (cdip) {
8952 					(void) ndi_event_retrieve_cookie(
8953 					    pptr->port_ndi_event_hdl, cdip,
8954 					    FCAL_REMOVE_EVENT,
8955 					    &fcp_remove_eid,
8956 					    NDI_EVENT_NOPASS);
8957 					(void) ndi_event_run_callbacks(
8958 					    pptr->port_ndi_event_hdl,
8959 					    cdip, fcp_remove_eid, NULL);
8960 				}
8961 
8962 				pkt->pkt_reason = CMD_TRAN_ERR;
8963 				pkt->pkt_statistics |= STAT_BUS_RESET;
8964 
8965 				break;
8966 			}
8967 
8968 			case FC_REASON_NOMEM:
8969 			case FC_REASON_QFULL: {
8970 				caddr_t ptr;
8971 
8972 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8973 				ptr = (caddr_t)pkt->pkt_scbp;
8974 				if (ptr) {
8975 					*ptr = STATUS_BUSY;
8976 				}
8977 				break;
8978 			}
8979 
8980 			case FC_REASON_DMA_ERROR:
8981 				pkt->pkt_reason = CMD_DMA_DERR;
8982 				pkt->pkt_statistics |= STAT_ABORTED;
8983 				break;
8984 
8985 			case FC_REASON_CRC_ERROR:
8986 			case FC_REASON_UNDERRUN: {
8987 				uchar_t		status;
8988 				/*
8989 				 * Work around for Bugid: 4240945.
8990 				 * IB on A5k doesn't set the Underrun bit
8991 				 * in the fcp status, when it is transferring
8992 				 * less than requested amount of data. Work
8993 				 * around the ses problem to keep luxadm
8994 				 * happy till ibfirmware is fixed.
8995 				 */
8996 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8997 					FCP_CP_IN(fpkt->pkt_resp, rsp,
8998 					    fpkt->pkt_resp_acc,
8999 					    sizeof (struct fcp_rsp));
9000 				}
9001 				status = rsp->fcp_u.fcp_status.scsi_status;
9002 				if (((plun->lun_type & DTYPE_MASK) ==
9003 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9004 					pkt->pkt_reason = CMD_CMPLT;
9005 					*pkt->pkt_scbp = status;
9006 					pkt->pkt_resid = 0;
9007 				} else {
9008 					pkt->pkt_reason = CMD_TRAN_ERR;
9009 					pkt->pkt_statistics |= STAT_ABORTED;
9010 				}
9011 				break;
9012 			}
9013 
9014 			case FC_REASON_NO_CONNECTION:
9015 			case FC_REASON_UNSUPPORTED:
9016 			case FC_REASON_ILLEGAL_REQ:
9017 			case FC_REASON_BAD_SID:
9018 			case FC_REASON_DIAG_BUSY:
9019 			case FC_REASON_FCAL_OPN_FAIL:
9020 			case FC_REASON_BAD_XID:
9021 			default:
9022 				pkt->pkt_reason = CMD_TRAN_ERR;
9023 				pkt->pkt_statistics |= STAT_ABORTED;
9024 				break;
9025 
9026 			}
9027 			break;
9028 
9029 		case FC_PKT_NPORT_RJT:
9030 		case FC_PKT_FABRIC_RJT:
9031 		case FC_PKT_NPORT_BSY:
9032 		case FC_PKT_FABRIC_BSY:
9033 		default:
9034 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9035 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9036 			    "FC Status 0x%x, reason 0x%x",
9037 			    fpkt->pkt_state, fpkt->pkt_reason);
9038 			pkt->pkt_reason = CMD_TRAN_ERR;
9039 			pkt->pkt_statistics |= STAT_ABORTED;
9040 			break;
9041 		}
9042 
9043 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9044 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9045 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9046 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9047 		    fpkt->pkt_reason);
9048 	}
9049 
9050 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9051 }
9052 
9053 
9054 static int
9055 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9056 {
9057 	if (rsp->reserved_0 || rsp->reserved_1 ||
9058 	    rsp->fcp_u.fcp_status.reserved_0 ||
9059 	    rsp->fcp_u.fcp_status.reserved_1) {
9060 		/*
9061 		 * These reserved fields should ideally be zero. FCP-2 does say
9062 		 * that the recipient need not check for reserved fields to be
9063 		 * zero. If they are not zero, we will not make a fuss about it
9064 		 * - just log it (in debug to both trace buffer and messages
9065 		 * file and to trace buffer only in non-debug) and move on.
9066 		 *
9067 		 * Non-zero reserved fields were seen with minnows.
9068 		 *
9069 		 * qlc takes care of some of this but we cannot assume that all
9070 		 * FCAs will do so.
9071 		 */
9072 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9073 		    FCP_BUF_LEVEL_5, 0,
9074 		    "Got fcp response packet with non-zero reserved fields "
9075 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9076 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9077 		    rsp->reserved_0, rsp->reserved_1,
9078 		    rsp->fcp_u.fcp_status.reserved_0,
9079 		    rsp->fcp_u.fcp_status.reserved_1);
9080 	}
9081 
9082 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9083 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9084 		return (FC_FAILURE);
9085 	}
9086 
9087 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9088 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9089 	    sizeof (struct fcp_rsp))) {
9090 		return (FC_FAILURE);
9091 	}
9092 
9093 	return (FC_SUCCESS);
9094 }
9095 
9096 
9097 /*
9098  * This is called when there is a change the in device state. The case we're
9099  * handling here is, if the d_id s does not match, offline this tgt and online
9100  * a new tgt with the new d_id.  called from fcp_handle_devices with
9101  * port_mutex held.
9102  */
9103 static int
9104 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9105     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9106 {
9107 	ASSERT(mutex_owned(&pptr->port_mutex));
9108 
9109 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9110 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9111 	    "Starting fcp_device_changed...");
9112 
9113 	/*
9114 	 * The two cases where the port_device_changed is called is
9115 	 * either it changes it's d_id or it's hard address.
9116 	 */
9117 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9118 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9119 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9120 
9121 		/* offline this target */
9122 		mutex_enter(&ptgt->tgt_mutex);
9123 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9124 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9125 			    0, 1, NDI_DEVI_REMOVE);
9126 		}
9127 		mutex_exit(&ptgt->tgt_mutex);
9128 
9129 		fcp_log(CE_NOTE, pptr->port_dip,
9130 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9131 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9132 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9133 		    map_entry->map_hard_addr.hard_addr);
9134 	}
9135 
9136 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9137 	    link_cnt, tgt_cnt, cause));
9138 }
9139 
9140 /*
9141  *     Function: fcp_alloc_lun
9142  *
9143  *  Description: Creates a new lun structure and adds it to the list
9144  *		 of luns of the target.
9145  *
9146  *     Argument: ptgt		Target the lun will belong to.
9147  *
9148  * Return Value: NULL		Failed
9149  *		 Not NULL	Succeeded
9150  *
9151  *      Context: Kernel context
9152  */
9153 static struct fcp_lun *
9154 fcp_alloc_lun(struct fcp_tgt *ptgt)
9155 {
9156 	struct fcp_lun *plun;
9157 
9158 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9159 	if (plun != NULL) {
9160 		/*
9161 		 * Initialize the mutex before putting in the target list
9162 		 * especially before releasing the target mutex.
9163 		 */
9164 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9165 		plun->lun_tgt = ptgt;
9166 
9167 		mutex_enter(&ptgt->tgt_mutex);
9168 		plun->lun_next = ptgt->tgt_lun;
9169 		ptgt->tgt_lun = plun;
9170 		plun->lun_old_guid = NULL;
9171 		plun->lun_old_guid_size = 0;
9172 		mutex_exit(&ptgt->tgt_mutex);
9173 	}
9174 
9175 	return (plun);
9176 }
9177 
9178 /*
9179  *     Function: fcp_dealloc_lun
9180  *
9181  *  Description: Frees the LUN structure passed by the caller.
9182  *
9183  *     Argument: plun		LUN structure to free.
9184  *
9185  * Return Value: None
9186  *
9187  *      Context: Kernel context.
9188  */
9189 static void
9190 fcp_dealloc_lun(struct fcp_lun *plun)
9191 {
9192 	mutex_enter(&plun->lun_mutex);
9193 	if (plun->lun_cip) {
9194 		fcp_remove_child(plun);
9195 	}
9196 	mutex_exit(&plun->lun_mutex);
9197 
9198 	mutex_destroy(&plun->lun_mutex);
9199 	if (plun->lun_guid) {
9200 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9201 	}
9202 	if (plun->lun_old_guid) {
9203 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9204 	}
9205 	kmem_free(plun, sizeof (*plun));
9206 }
9207 
9208 /*
9209  *     Function: fcp_alloc_tgt
9210  *
9211  *  Description: Creates a new target structure and adds it to the port
9212  *		 hash list.
9213  *
9214  *     Argument: pptr		fcp port structure
9215  *		 *map_entry	entry describing the target to create
9216  *		 link_cnt	Link state change counter
9217  *
9218  * Return Value: NULL		Failed
9219  *		 Not NULL	Succeeded
9220  *
9221  *      Context: Kernel context.
9222  */
9223 static struct fcp_tgt *
9224 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9225 {
9226 	int			hash;
9227 	uchar_t			*wwn;
9228 	struct fcp_tgt 	*ptgt;
9229 
9230 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9231 	if (ptgt != NULL) {
9232 		mutex_enter(&pptr->port_mutex);
9233 		if (link_cnt != pptr->port_link_cnt) {
9234 			/*
9235 			 * oh oh -- another link reset
9236 			 * in progress -- give up
9237 			 */
9238 			mutex_exit(&pptr->port_mutex);
9239 			kmem_free(ptgt, sizeof (*ptgt));
9240 			ptgt = NULL;
9241 		} else {
9242 			/*
9243 			 * initialize the mutex before putting in the port
9244 			 * wwn list, especially before releasing the port
9245 			 * mutex.
9246 			 */
9247 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9248 
9249 			/* add new target entry to the port's hash list */
9250 			wwn = (uchar_t *)&map_entry->map_pwwn;
9251 			hash = FCP_HASH(wwn);
9252 
9253 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9254 			pptr->port_tgt_hash_table[hash] = ptgt;
9255 
9256 			/* save cross-ptr */
9257 			ptgt->tgt_port = pptr;
9258 
9259 			ptgt->tgt_change_cnt = 1;
9260 
9261 			/* initialize the target manual_config_only flag */
9262 			if (fcp_enable_auto_configuration) {
9263 				ptgt->tgt_manual_config_only = 0;
9264 			} else {
9265 				ptgt->tgt_manual_config_only = 1;
9266 			}
9267 
9268 			mutex_exit(&pptr->port_mutex);
9269 		}
9270 	}
9271 
9272 	return (ptgt);
9273 }
9274 
9275 /*
9276  *     Function: fcp_dealloc_tgt
9277  *
9278  *  Description: Frees the target structure passed by the caller.
9279  *
9280  *     Argument: ptgt		Target structure to free.
9281  *
9282  * Return Value: None
9283  *
9284  *      Context: Kernel context.
9285  */
9286 static void
9287 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9288 {
9289 	mutex_destroy(&ptgt->tgt_mutex);
9290 	kmem_free(ptgt, sizeof (*ptgt));
9291 }
9292 
9293 
9294 /*
9295  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9296  *
9297  *	Device discovery commands will not be retried for-ever as
9298  *	this will have repercussions on other devices that need to
9299  *	be submitted to the hotplug thread. After a quick glance
9300  *	at the SCSI-3 spec, it was found that the spec doesn't
9301  *	mandate a forever retry, rather recommends a delayed retry.
9302  *
9303  *	Since Photon IB is single threaded, STATUS_BUSY is common
9304  *	in a 4+initiator environment. Make sure the total time
9305  * 	spent on retries (including command timeout) does not
9306  *	60 seconds
9307  */
9308 static void
9309 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9310 {
9311 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9312 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9313 
9314 	mutex_enter(&pptr->port_mutex);
9315 	mutex_enter(&ptgt->tgt_mutex);
9316 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9317 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9318 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9319 		    "fcp_queue_ipkt,1:state change occured"
9320 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9321 		mutex_exit(&ptgt->tgt_mutex);
9322 		mutex_exit(&pptr->port_mutex);
9323 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9324 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9325 		fcp_icmd_free(pptr, icmd);
9326 		return;
9327 	}
9328 	mutex_exit(&ptgt->tgt_mutex);
9329 
9330 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9331 
9332 	if (pptr->port_ipkt_list != NULL) {
9333 		/* add pkt to front of doubly-linked list */
9334 		pptr->port_ipkt_list->ipkt_prev = icmd;
9335 		icmd->ipkt_next = pptr->port_ipkt_list;
9336 		pptr->port_ipkt_list = icmd;
9337 		icmd->ipkt_prev = NULL;
9338 	} else {
9339 		/* this is the first/only pkt on the list */
9340 		pptr->port_ipkt_list = icmd;
9341 		icmd->ipkt_next = NULL;
9342 		icmd->ipkt_prev = NULL;
9343 	}
9344 	mutex_exit(&pptr->port_mutex);
9345 }
9346 
9347 /*
9348  *     Function: fcp_transport
9349  *
9350  *  Description: This function submits the Fibre Channel packet to the transort
9351  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9352  *		 fails the submission, the treatment depends on the value of
9353  *		 the variable internal.
9354  *
9355  *     Argument: port_handle	fp/fctl port handle.
9356  *		 *fpkt		Packet to submit to the transport layer.
9357  *		 internal	Not zero when it's an internal packet.
9358  *
9359  * Return Value: FC_TRAN_BUSY
9360  *		 FC_STATEC_BUSY
9361  *		 FC_OFFLINE
9362  *		 FC_LOGINREQ
9363  *		 FC_DEVICE_BUSY
9364  *		 FC_SUCCESS
9365  */
9366 static int
9367 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9368 {
9369 	int	rval;
9370 
9371 	rval = fc_ulp_transport(port_handle, fpkt);
9372 	if (rval == FC_SUCCESS) {
9373 		return (rval);
9374 	}
9375 
9376 	/*
9377 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9378 	 * a command, if the underlying modules see that there is a state
9379 	 * change, or if a port is OFFLINE, that means, that state change
9380 	 * hasn't reached FCP yet, so re-queue the command for deferred
9381 	 * submission.
9382 	 */
9383 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9384 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9385 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9386 		/*
9387 		 * Defer packet re-submission. Life hang is possible on
9388 		 * internal commands if the port driver sends FC_STATEC_BUSY
9389 		 * for ever, but that shouldn't happen in a good environment.
9390 		 * Limiting re-transport for internal commands is probably a
9391 		 * good idea..
9392 		 * A race condition can happen when a port sees barrage of
9393 		 * link transitions offline to online. If the FCTL has
9394 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9395 		 * internal commands should be queued to do the discovery.
9396 		 * The race condition is when an online comes and FCP starts
9397 		 * its internal discovery and the link goes offline. It is
9398 		 * possible that the statec_callback has not reached FCP
9399 		 * and FCP is carrying on with its internal discovery.
9400 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9401 		 * that the link has gone offline. At this point FCP should
9402 		 * drop all the internal commands and wait for the
9403 		 * statec_callback. It will be facilitated by incrementing
9404 		 * port_link_cnt.
9405 		 *
9406 		 * For external commands, the (FC)pkt_timeout is decremented
9407 		 * by the QUEUE Delay added by our driver, Care is taken to
9408 		 * ensure that it doesn't become zero (zero means no timeout)
9409 		 * If the time expires right inside driver queue itself,
9410 		 * the watch thread will return it to the original caller
9411 		 * indicating that the command has timed-out.
9412 		 */
9413 		if (internal) {
9414 			char			*op;
9415 			struct fcp_ipkt	*icmd;
9416 
9417 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9418 			switch (icmd->ipkt_opcode) {
9419 			case SCMD_REPORT_LUN:
9420 				op = "REPORT LUN";
9421 				break;
9422 
9423 			case SCMD_INQUIRY:
9424 				op = "INQUIRY";
9425 				break;
9426 
9427 			case SCMD_INQUIRY_PAGE83:
9428 				op = "INQUIRY-83";
9429 				break;
9430 
9431 			default:
9432 				op = "Internal SCSI COMMAND";
9433 				break;
9434 			}
9435 
9436 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9437 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9438 				rval = FC_SUCCESS;
9439 			}
9440 		} else {
9441 			struct fcp_pkt *cmd;
9442 			struct fcp_port *pptr;
9443 
9444 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9445 			cmd->cmd_state = FCP_PKT_IDLE;
9446 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9447 
9448 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9449 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9450 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9451 				    "fcp_transport: xport busy for pkt %p",
9452 				    cmd->cmd_pkt);
9453 				rval = FC_TRAN_BUSY;
9454 			} else {
9455 				fcp_queue_pkt(pptr, cmd);
9456 				rval = FC_SUCCESS;
9457 			}
9458 		}
9459 	}
9460 
9461 	return (rval);
9462 }
9463 
9464 /*VARARGS3*/
9465 static void
9466 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9467 {
9468 	char		buf[256];
9469 	va_list		ap;
9470 
9471 	if (dip == NULL) {
9472 		dip = fcp_global_dip;
9473 	}
9474 
9475 	va_start(ap, fmt);
9476 	(void) vsprintf(buf, fmt, ap);
9477 	va_end(ap);
9478 
9479 	scsi_log(dip, "fcp", level, buf);
9480 }
9481 
9482 /*
9483  * This function retries NS registry of FC4 type.
9484  * It assumes that fcp_mutex is held.
9485  * The function does nothing if topology is not fabric
9486  * So, the topology has to be set before this function can be called
9487  */
9488 static void
9489 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9490 {
9491 	int	rval;
9492 
9493 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9494 
9495 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9496 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9497 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9498 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9499 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9500 		}
9501 		return;
9502 	}
9503 	mutex_exit(&pptr->port_mutex);
9504 	rval = fcp_do_ns_registry(pptr, s_id);
9505 	mutex_enter(&pptr->port_mutex);
9506 
9507 	if (rval == 0) {
9508 		/* Registry successful. Reset flag */
9509 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9510 	}
9511 }
9512 
9513 /*
9514  * This function registers the ULP with the switch by calling transport i/f
9515  */
9516 static int
9517 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9518 {
9519 	fc_ns_cmd_t		ns_cmd;
9520 	ns_rfc_type_t		rfc;
9521 	uint32_t		types[8];
9522 
9523 	/*
9524 	 * Prepare the Name server structure to
9525 	 * register with the transport in case of
9526 	 * Fabric configuration.
9527 	 */
9528 	bzero(&rfc, sizeof (rfc));
9529 	bzero(types, sizeof (types));
9530 
9531 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9532 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9533 
9534 	rfc.rfc_port_id.port_id = s_id;
9535 	bcopy(types, rfc.rfc_types, sizeof (types));
9536 
9537 	ns_cmd.ns_flags = 0;
9538 	ns_cmd.ns_cmd = NS_RFT_ID;
9539 	ns_cmd.ns_req_len = sizeof (rfc);
9540 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9541 	ns_cmd.ns_resp_len = 0;
9542 	ns_cmd.ns_resp_payload = NULL;
9543 
9544 	/*
9545 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9546 	 */
9547 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9548 	    fcp_log(CE_WARN, pptr->port_dip,
9549 		    "!ns_registry: failed name server registration");
9550 	    return (1);
9551 	}
9552 
9553 	return (0);
9554 }
9555 
9556 /*
9557  *     Function: fcp_handle_port_attach
9558  *
9559  *  Description: This function is called from fcp_port_attach() to attach a
9560  *		 new port. This routine does the following:
9561  *
9562  *		1) Allocates an fcp_port structure and initializes it.
9563  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9564  *		   server.
9565  *		3) Kicks off the enumeration of the targets/luns visible
9566  *		   through this new port.  That is done by calling
9567  *		   fcp_statec_callback() if the port is online.
9568  *
9569  *     Argument: ulph		fp/fctl port handle.
9570  *		 *pinfo		Port information.
9571  *		 s_id		Port ID.
9572  *		 instance	Device instance number for the local port
9573  *				(returned by ddi_get_instance()).
9574  *
9575  * Return Value: DDI_SUCCESS
9576  *		 DDI_FAILURE
9577  *
9578  *      Context: User and Kernel context.
9579  */
9580 /*ARGSUSED*/
9581 int
9582 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9583     uint32_t s_id, int instance)
9584 {
9585 	int			res = DDI_FAILURE;
9586 	scsi_hba_tran_t		*tran;
9587 	int			mutex_initted = FALSE;
9588 	int			hba_attached = FALSE;
9589 	int			soft_state_linked = FALSE;
9590 	int			event_bind = FALSE;
9591 	struct fcp_port 	*pptr;
9592 	fc_portmap_t		*tmp_list = NULL;
9593 	uint32_t		max_cnt, alloc_cnt;
9594 	uchar_t			*boot_wwn = NULL;
9595 	uint_t			nbytes;
9596 	int			manual_cfg;
9597 
9598 	/*
9599 	 * this port instance attaching for the first time (or after
9600 	 * being detached before)
9601 	 */
9602 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9603 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9604 
9605 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9606 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9607 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9608 		    instance);
9609 		return (res);
9610 	}
9611 
9612 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9613 		/* this shouldn't happen */
9614 		ddi_soft_state_free(fcp_softstate, instance);
9615 		cmn_err(CE_WARN, "fcp: bad soft state");
9616 		return (res);
9617 	}
9618 
9619 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9620 
9621 	/*
9622 	 * Make a copy of ulp_port_info as fctl allocates
9623 	 * a temp struct.
9624 	 */
9625 	(void) fcp_cp_pinfo(pptr, pinfo);
9626 
9627 	/*
9628 	 * Check for manual_configuration_only property.
9629 	 * Enable manual configurtion if the property is
9630 	 * set to 1, otherwise disable manual configuration.
9631 	 */
9632 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9633 		DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9634 		MANUAL_CFG_ONLY,
9635 		    -1)) != -1) {
9636 		if (manual_cfg == 1) {
9637 			char	*pathname;
9638 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9639 			(void) ddi_pathname(pptr->port_dip, pathname);
9640 			cmn_err(CE_NOTE,
9641 			    "%s (%s%d) %s is enabled via %s.conf.",
9642 			    pathname,
9643 			    ddi_driver_name(pptr->port_dip),
9644 			    ddi_get_instance(pptr->port_dip),
9645 			    MANUAL_CFG_ONLY,
9646 			    ddi_driver_name(pptr->port_dip));
9647 			fcp_enable_auto_configuration = 0;
9648 			kmem_free(pathname, MAXPATHLEN);
9649 		}
9650 	}
9651 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9652 	pptr->port_link_cnt = 1;
9653 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9654 	pptr->port_id = s_id;
9655 	pptr->port_instance = instance;
9656 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9657 	pptr->port_state = FCP_STATE_INIT;
9658 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9659 
9660 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9661 				    sizeof (ddi_dma_cookie_t));
9662 
9663 	/*
9664 	 * The two mutexes of fcp_port are initialized.  The variable
9665 	 * mutex_initted is incremented to remember that fact.  That variable
9666 	 * is checked when the routine fails and the mutexes have to be
9667 	 * destroyed.
9668 	 */
9669 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9670 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9671 	mutex_initted++;
9672 
9673 	/*
9674 	 * The SCSI tran structure is allocate and initialized now.
9675 	 */
9676 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9677 		fcp_log(CE_WARN, pptr->port_dip,
9678 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9679 		goto fail;
9680 	}
9681 
9682 	/* link in the transport structure then fill it in */
9683 	pptr->port_tran = tran;
9684 	tran->tran_hba_private		= pptr;
9685 	tran->tran_tgt_private		= NULL;
9686 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9687 	tran->tran_tgt_probe		= NULL;
9688 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9689 	tran->tran_start		= fcp_scsi_start;
9690 	tran->tran_reset		= fcp_scsi_reset;
9691 	tran->tran_abort		= fcp_scsi_abort;
9692 	tran->tran_getcap		= fcp_scsi_getcap;
9693 	tran->tran_setcap		= fcp_scsi_setcap;
9694 	tran->tran_init_pkt		= NULL;
9695 	tran->tran_destroy_pkt		= NULL;
9696 	tran->tran_dmafree		= NULL;
9697 	tran->tran_sync_pkt		= NULL;
9698 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9699 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9700 	tran->tran_get_name		= fcp_scsi_get_name;
9701 	tran->tran_clear_aca		= NULL;
9702 	tran->tran_clear_task_set	= NULL;
9703 	tran->tran_terminate_task	= NULL;
9704 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9705 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9706 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9707 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9708 	tran->tran_quiesce		= NULL;
9709 	tran->tran_unquiesce		= NULL;
9710 	tran->tran_bus_reset		= NULL;
9711 	tran->tran_bus_config		= fcp_scsi_bus_config;
9712 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9713 	tran->tran_bus_power		= NULL;
9714 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9715 
9716 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9717 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9718 	tran->tran_setup_pkt		= fcp_pkt_setup;
9719 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9720 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9721 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9722 
9723 	/*
9724 	 * Allocate an ndi event handle
9725 	 */
9726 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9727 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9728 
9729 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9730 	    sizeof (fcp_ndi_event_defs));
9731 
9732 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9733 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9734 
9735 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9736 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9737 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9738 
9739 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9740 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9741 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9742 		goto fail;
9743 	}
9744 	event_bind++;	/* Checked in fail case */
9745 
9746 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9747 	    tran, SCSI_HBA_TRAN_CLONE | SCSI_HBA_TRAN_SCB)
9748 	    != DDI_SUCCESS) {
9749 		fcp_log(CE_WARN, pptr->port_dip,
9750 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9751 		goto fail;
9752 	}
9753 	hba_attached++;	/* Checked in fail case */
9754 
9755 	pptr->port_mpxio = 0;
9756 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9757 	    MDI_SUCCESS) {
9758 		pptr->port_mpxio++;
9759 	}
9760 
9761 	/*
9762 	 * The following code is putting the new port structure in the global
9763 	 * list of ports and, if it is the first port to attach, it start the
9764 	 * fcp_watchdog_tick.
9765 	 *
9766 	 * Why put this new port in the global before we are done attaching it?
9767 	 * We are actually making the structure globally known before we are
9768 	 * done attaching it.  The reason for that is: because of the code that
9769 	 * follows.  At this point the resources to handle the port are
9770 	 * allocated.  This function is now going to do the following:
9771 	 *
9772 	 *   1) It is going to try to register with the name server advertizing
9773 	 *	the new FCP capability of the port.
9774 	 *   2) It is going to play the role of the fp/fctl layer by building
9775 	 *	a list of worlwide names reachable through this port and call
9776 	 *	itself on fcp_statec_callback().  That requires the port to
9777 	 *	be part of the global list.
9778 	 */
9779 	mutex_enter(&fcp_global_mutex);
9780 	if (fcp_port_head == NULL) {
9781 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9782 	}
9783 	pptr->port_next = fcp_port_head;
9784 	fcp_port_head = pptr;
9785 	soft_state_linked++;
9786 
9787 	if (fcp_watchdog_init++ == 0) {
9788 		fcp_watchdog_tick = fcp_watchdog_timeout *
9789 		    drv_usectohz(1000000);
9790 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9791 		    fcp_watchdog_tick);
9792 	}
9793 	mutex_exit(&fcp_global_mutex);
9794 
9795 	/*
9796 	 * Here an attempt is made to register with the name server, the new
9797 	 * FCP capability.  That is done using an RTF_ID to the name server.
9798 	 * It is done synchronously.  The function fcp_do_ns_registry()
9799 	 * doesn't return till the name server responded.
9800 	 * On failures, just ignore it for now and it will get retried during
9801 	 * state change callbacks. We'll set a flag to show this failure
9802 	 */
9803 	if (fcp_do_ns_registry(pptr, s_id)) {
9804 		mutex_enter(&pptr->port_mutex);
9805 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9806 		mutex_exit(&pptr->port_mutex);
9807 	} else {
9808 		mutex_enter(&pptr->port_mutex);
9809 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9810 		mutex_exit(&pptr->port_mutex);
9811 	}
9812 
9813 	/*
9814 	 * Lookup for boot WWN property
9815 	 */
9816 	if (modrootloaded != 1) {
9817 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9818 		    ddi_get_parent(pinfo->port_dip),
9819 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9820 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9821 		    (nbytes == FC_WWN_SIZE)) {
9822 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9823 		}
9824 		if (boot_wwn) {
9825 			ddi_prop_free(boot_wwn);
9826 		}
9827 	}
9828 
9829 	/*
9830 	 * Handle various topologies and link states.
9831 	 */
9832 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9833 	case FC_STATE_OFFLINE:
9834 
9835 		/*
9836 		 * we're attaching a port where the link is offline
9837 		 *
9838 		 * Wait for ONLINE, at which time a state
9839 		 * change will cause a statec_callback
9840 		 *
9841 		 * in the mean time, do not do anything
9842 		 */
9843 		res = DDI_SUCCESS;
9844 		pptr->port_state |= FCP_STATE_OFFLINE;
9845 		break;
9846 
9847 	case FC_STATE_ONLINE: {
9848 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9849 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9850 			res = DDI_SUCCESS;
9851 			break;
9852 		}
9853 		/*
9854 		 * discover devices and create nodes (a private
9855 		 * loop or point-to-point)
9856 		 */
9857 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9858 
9859 		/*
9860 		 * At this point we are going to build a list of all the ports
9861 		 * that	can be reached through this local port.  It looks like
9862 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9863 		 * (128).
9864 		 */
9865 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9866 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9867 		    KM_NOSLEEP)) == NULL) {
9868 			fcp_log(CE_WARN, pptr->port_dip,
9869 			    "!fcp%d: failed to allocate portmap",
9870 			    instance);
9871 			goto fail;
9872 		}
9873 
9874 		/*
9875 		 * fc_ulp_getportmap() is going to provide us with the list of
9876 		 * remote ports in the buffer we just allocated.  The way the
9877 		 * list is going to be retrieved depends on the topology.
9878 		 * However, if we are connected to a Fabric, a name server
9879 		 * request may be sent to get the list of FCP capable ports.
9880 		 * It should be noted that is the case the request is
9881 		 * synchronous.  This means we are stuck here till the name
9882 		 * server replies.  A lot of things can change during that time
9883 		 * and including, may be, being called on
9884 		 * fcp_statec_callback() for different reasons. I'm not sure
9885 		 * the code can handle that.
9886 		 */
9887 		max_cnt = FCP_MAX_DEVICES;
9888 		alloc_cnt = FCP_MAX_DEVICES;
9889 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
9890 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
9891 		    FC_SUCCESS) {
9892 			caddr_t msg;
9893 
9894 			(void) fc_ulp_error(res, &msg);
9895 
9896 			/*
9897 			 * this  just means the transport is
9898 			 * busy perhaps building a portmap so,
9899 			 * for now, succeed this port attach
9900 			 * when the transport has a new map,
9901 			 * it'll send us a state change then
9902 			 */
9903 			fcp_log(CE_WARN, pptr->port_dip,
9904 			    "!failed to get port map : %s", msg);
9905 
9906 			res = DDI_SUCCESS;
9907 			break;	/* go return result */
9908 		}
9909 		if (max_cnt > alloc_cnt) {
9910 			alloc_cnt = max_cnt;
9911 		}
9912 
9913 		/*
9914 		 * We are now going to call fcp_statec_callback() ourselves.
9915 		 * By issuing this call we are trying to kick off the enumera-
9916 		 * tion process.
9917 		 */
9918 		/*
9919 		 * let the state change callback do the SCSI device
9920 		 * discovery and create the devinfos
9921 		 */
9922 		fcp_statec_callback(ulph, pptr->port_fp_handle,
9923 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
9924 		    max_cnt, pptr->port_id);
9925 
9926 		res = DDI_SUCCESS;
9927 		break;
9928 	}
9929 
9930 	default:
9931 		/* unknown port state */
9932 		fcp_log(CE_WARN, pptr->port_dip,
9933 		    "!fcp%d: invalid port state at attach=0x%x",
9934 		    instance, pptr->port_phys_state);
9935 
9936 		mutex_enter(&pptr->port_mutex);
9937 		pptr->port_phys_state = FCP_STATE_OFFLINE;
9938 		mutex_exit(&pptr->port_mutex);
9939 
9940 		res = DDI_SUCCESS;
9941 		break;
9942 	}
9943 
9944 	/* free temp list if used */
9945 	if (tmp_list != NULL) {
9946 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
9947 	}
9948 
9949 	/* note the attach time */
9950 	pptr->port_attach_time = lbolt64;
9951 
9952 	/* all done */
9953 	return (res);
9954 
9955 	/* a failure we have to clean up after */
9956 fail:
9957 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
9958 
9959 	if (soft_state_linked) {
9960 		/* remove this fcp_port from the linked list */
9961 		(void) fcp_soft_state_unlink(pptr);
9962 	}
9963 
9964 	/* unbind and free event set */
9965 	if (pptr->port_ndi_event_hdl) {
9966 		if (event_bind) {
9967 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
9968 			    &pptr->port_ndi_events, NDI_SLEEP);
9969 		}
9970 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
9971 	}
9972 
9973 	if (pptr->port_ndi_event_defs) {
9974 		(void) kmem_free(pptr->port_ndi_event_defs,
9975 			sizeof (fcp_ndi_event_defs));
9976 	}
9977 
9978 	/*
9979 	 * Clean up mpxio stuff
9980 	 */
9981 	if (pptr->port_mpxio) {
9982 		(void) mdi_phci_unregister(pptr->port_dip, 0);
9983 		pptr->port_mpxio--;
9984 	}
9985 
9986 	/* undo SCSI HBA setup */
9987 	if (hba_attached) {
9988 		(void) scsi_hba_detach(pptr->port_dip);
9989 	}
9990 	if (pptr->port_tran != NULL) {
9991 		scsi_hba_tran_free(pptr->port_tran);
9992 	}
9993 
9994 	mutex_enter(&fcp_global_mutex);
9995 
9996 	/*
9997 	 * We check soft_state_linked, because it is incremented right before
9998 	 * we call increment fcp_watchdog_init.  Therefore, we know if
9999 	 * soft_state_linked is still FALSE, we do not want to decrement
10000 	 * fcp_watchdog_init or possibly call untimeout.
10001 	 */
10002 
10003 	if (soft_state_linked) {
10004 		if (--fcp_watchdog_init == 0) {
10005 			timeout_id_t	tid = fcp_watchdog_id;
10006 
10007 			mutex_exit(&fcp_global_mutex);
10008 			(void) untimeout(tid);
10009 		} else {
10010 			mutex_exit(&fcp_global_mutex);
10011 		}
10012 	} else {
10013 		mutex_exit(&fcp_global_mutex);
10014 	}
10015 
10016 	if (mutex_initted) {
10017 		mutex_destroy(&pptr->port_mutex);
10018 		mutex_destroy(&pptr->port_pkt_mutex);
10019 	}
10020 
10021 	if (tmp_list != NULL) {
10022 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10023 	}
10024 
10025 	/* this makes pptr invalid */
10026 	ddi_soft_state_free(fcp_softstate, instance);
10027 
10028 	return (DDI_FAILURE);
10029 }
10030 
10031 
10032 static int
10033 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10034 {
10035 	int count = 0;
10036 
10037 	mutex_enter(&pptr->port_mutex);
10038 
10039 	/*
10040 	 * if the port is powered down or suspended, nothing else
10041 	 * to do; just return.
10042 	 */
10043 	if (flag != FCP_STATE_DETACHING) {
10044 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10045 		    FCP_STATE_SUSPENDED)) {
10046 			pptr->port_state |= flag;
10047 			mutex_exit(&pptr->port_mutex);
10048 			return (FC_SUCCESS);
10049 		}
10050 	}
10051 
10052 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10053 		mutex_exit(&pptr->port_mutex);
10054 		return (FC_FAILURE);
10055 	}
10056 
10057 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10058 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10059 	    "fcp_handle_port_detach: port is detaching");
10060 
10061 	pptr->port_state |= flag;
10062 
10063 	/*
10064 	 * Wait for any ongoing reconfig/ipkt to complete, that
10065 	 * ensures the freeing to targets/luns is safe.
10066 	 * No more ref to this port should happen from statec/ioctl
10067 	 * after that as it was removed from the global port list.
10068 	 */
10069 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10070 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10071 		/*
10072 		 * Let's give sufficient time for reconfig/ipkt
10073 		 * to complete.
10074 		 */
10075 		if (count++ >= FCP_ICMD_DEADLINE) {
10076 			break;
10077 		}
10078 		mutex_exit(&pptr->port_mutex);
10079 		delay(drv_usectohz(1000000));
10080 		mutex_enter(&pptr->port_mutex);
10081 	}
10082 
10083 	/*
10084 	 * if the driver is still busy then fail to
10085 	 * suspend/power down.
10086 	 */
10087 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10088 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10089 		pptr->port_state &= ~flag;
10090 		mutex_exit(&pptr->port_mutex);
10091 		return (FC_FAILURE);
10092 	}
10093 
10094 	if (flag == FCP_STATE_DETACHING) {
10095 		pptr = fcp_soft_state_unlink(pptr);
10096 		ASSERT(pptr != NULL);
10097 	}
10098 
10099 	pptr->port_link_cnt++;
10100 	pptr->port_state |= FCP_STATE_OFFLINE;
10101 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10102 
10103 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10104 	    FCP_CAUSE_LINK_DOWN);
10105 	mutex_exit(&pptr->port_mutex);
10106 
10107 	/* kill watch dog timer if we're the last */
10108 	mutex_enter(&fcp_global_mutex);
10109 	if (--fcp_watchdog_init == 0) {
10110 		timeout_id_t	tid = fcp_watchdog_id;
10111 		mutex_exit(&fcp_global_mutex);
10112 		(void) untimeout(tid);
10113 	} else {
10114 		mutex_exit(&fcp_global_mutex);
10115 	}
10116 
10117 	/* clean up the port structures */
10118 	if (flag == FCP_STATE_DETACHING) {
10119 		fcp_cleanup_port(pptr, instance);
10120 	}
10121 
10122 	return (FC_SUCCESS);
10123 }
10124 
10125 
10126 static void
10127 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10128 {
10129 	ASSERT(pptr != NULL);
10130 
10131 	/* unbind and free event set */
10132 	if (pptr->port_ndi_event_hdl) {
10133 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10134 		    &pptr->port_ndi_events, NDI_SLEEP);
10135 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10136 	}
10137 
10138 	if (pptr->port_ndi_event_defs) {
10139 		(void) kmem_free(pptr->port_ndi_event_defs,
10140 		    sizeof (fcp_ndi_event_defs));
10141 	}
10142 
10143 	/* free the lun/target structures and devinfos */
10144 	fcp_free_targets(pptr);
10145 
10146 	/*
10147 	 * Clean up mpxio stuff
10148 	 */
10149 	if (pptr->port_mpxio) {
10150 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10151 		pptr->port_mpxio--;
10152 	}
10153 
10154 	/* clean up SCSA stuff */
10155 	(void) scsi_hba_detach(pptr->port_dip);
10156 	if (pptr->port_tran != NULL) {
10157 		scsi_hba_tran_free(pptr->port_tran);
10158 	}
10159 
10160 #ifdef  KSTATS_CODE
10161 	/* clean up kstats */
10162 	if (pptr->fcp_ksp != NULL) {
10163 		kstat_delete(pptr->fcp_ksp);
10164 	}
10165 #endif
10166 
10167 	/* clean up soft state mutexes/condition variables */
10168 	mutex_destroy(&pptr->port_mutex);
10169 	mutex_destroy(&pptr->port_pkt_mutex);
10170 
10171 	/* all done with soft state */
10172 	ddi_soft_state_free(fcp_softstate, instance);
10173 }
10174 
10175 /*
10176  *     Function: fcp_kmem_cache_constructor
10177  *
10178  *  Description: This function allocates and initializes the resources required
10179  *		 to build a scsi_pkt structure the target driver.  The result
10180  *		 of the allocation and initialization will be cached in the
10181  *		 memory cache.  As DMA resources may be allocated here, that
10182  *		 means DMA resources will be tied up in the cache manager.
10183  *		 This is a tradeoff that has been made for performance reasons.
10184  *
10185  *     Argument: *buf		Memory to preinitialize.
10186  *		 *arg		FCP port structure (fcp_port).
10187  *		 kmflags	Value passed to kmem_cache_alloc() and
10188  *				propagated to the constructor.
10189  *
10190  * Return Value: 0	Allocation/Initialization was successful.
10191  *		 -1	Allocation or Initialization failed.
10192  *
10193  *
10194  * If the returned value is 0, the buffer is initialized like this:
10195  *
10196  *                  +================================+
10197  *           +----> |	      struct scsi_pkt        |
10198  *           |      |				     |
10199  *           | +--- | pkt_ha_private		     |
10200  *           | |    |				     |
10201  *           | |    +================================+
10202  *           | |
10203  *           | |    +================================+
10204  *           | +--> |       struct fcp_pkt           | <---------+
10205  *           |      |                                |           |
10206  *           +----- | cmd_pkt                        |           |
10207  *                  |                     cmd_fp_pkt | ---+      |
10208  *        +-------->| cmd_fcp_rsp[]                  |    |      |
10209  *        |    +--->| cmd_fcp_cmd[]                  |    |      |
10210  *        |    |    |--------------------------------|    |      |
10211  *        |    |    |	      struct fc_packet       | <--+      |
10212  *        |    |    |				     |           |
10213  *        |    |    |                pkt_ulp_private | ----------+
10214  *        |    |    |                pkt_fca_private | -----+
10215  *        |    |    |                pkt_data_cookie | ---+ |
10216  *        |    |    | pkt_cmdlen                     |    | |
10217  *        |    |(a) | pkt_rsplen                     |    | |
10218  *        |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10219  *        |     (b) |                 pkt_cmd_cookie | ---|-|----------+    |
10220  *        +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10221  *                  |                pkt_resp_cookie | ---|-|--+   |   |    |
10222  *                  | pkt_cmd_dma                    |    | |  |   |   |    |
10223  *                  | pkt_cmd_acc                    |    | |  |   |   |    |
10224  *                  +================================+    | |  |   |   |    |
10225  *                  |         dma_cookies            | <--+ |  |   |   |    |
10226  *                  |				     |      |  |   |   |    |
10227  *                  +================================+      |  |   |   |    |
10228  *                  |         fca_private            | <----+  |   |   |    |
10229  *                  |				     |         |   |   |    |
10230  *                  +================================+         |   |   |    |
10231  *                                                             |   |   |    |
10232  *                                                             |   |   |    |
10233  *                  +================================+   (d)   |   |   |    |
10234  *                  |        fcp_resp cookies        | <-------+   |   |    |
10235  *                  |				     |             |   |    |
10236  *                  +================================+             |   |    |
10237  *                                                                 |   |    |
10238  *                  +================================+   (d)       |   |    |
10239  *                  |		fcp_resp	     | <-----------+   |    |
10240  *                  |   (DMA resources associated)   |                 |    |
10241  *                  +================================+                 |    |
10242  *                                                                     |    |
10243  *                                                                     |    |
10244  *                                                                     |    |
10245  *                  +================================+   (c)           |    |
10246  *                  |	     fcp_cmd cookies         | <---------------+    |
10247  *                  |                                |                      |
10248  *                  +================================+                      |
10249  *                                                                          |
10250  *                  +================================+   (c)                |
10251  *                  |            fcp_cmd             | <--------------------+
10252  *                  |   (DMA resources associated)   |
10253  *                  +================================+
10254  *
10255  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10256  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10257  * (c) Only if DMA is used for the FCP_CMD buffer.
10258  * (d) Only if DMA is used for the FCP_RESP buffer
10259  */
10260 static int
10261 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10262 	int kmflags)
10263 {
10264 	struct fcp_pkt	*cmd;
10265 	struct fcp_port	*pptr;
10266 	fc_packet_t	*fpkt;
10267 
10268 	pptr = (struct fcp_port *)tran->tran_hba_private;
10269 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10270 	bzero(cmd, tran->tran_hba_len);
10271 
10272 	cmd->cmd_pkt = pkt;
10273 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10274 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10275 	cmd->cmd_fp_pkt = fpkt;
10276 
10277 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10278 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10279 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10280 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10281 
10282 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10283 	    sizeof (struct fcp_pkt));
10284 
10285 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10286 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10287 
10288 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10289 		/*
10290 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10291 		 * fcp_resp.  The transfer of information will be done by
10292 		 * bcopy.
10293 		 * The naming of the flags (that is actually a value) is
10294 		 * unfortunate.  FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10295 		 * DMA" but instead "NO DMA".
10296 		 */
10297 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10298 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10299 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10300 	} else {
10301 		/*
10302 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10303 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10304 		 * interfaces.
10305 		 */
10306 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10307 			return (-1);
10308 		}
10309 	}
10310 
10311 	return (0);
10312 }
10313 
10314 /*
10315  *     Function: fcp_kmem_cache_destructor
10316  *
10317  *  Description: Called by the destructor of the cache managed by SCSA.
10318  *		 All the resources pre-allocated in fcp_pkt_constructor
10319  *		 and the data also pre-initialized in fcp_pkt_constructor
10320  *		 are freed and uninitialized here.
10321  *
10322  *     Argument: *buf		Memory to uninitialize.
10323  *		 *arg		FCP port structure (fcp_port).
10324  *
10325  * Return Value: None
10326  *
10327  *      Context: kernel
10328  */
10329 static void
10330 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10331 {
10332 	struct fcp_pkt	*cmd;
10333 	struct fcp_port	*pptr;
10334 
10335 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10336 	cmd = pkt->pkt_ha_private;
10337 
10338 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10339 		/*
10340 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10341 		 * buffer and DMA resources allocated to do so are released.
10342 		 */
10343 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10344 	}
10345 }
10346 
10347 /*
10348  *     Function: fcp_alloc_cmd_resp
10349  *
10350  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10351  *		 will be DMAed by the HBA.  The buffer is allocated applying
10352  *		 the DMA requirements for the HBA.  The buffers allocated will
10353  *		 also be bound.  DMA resources are allocated in the process.
10354  *		 They will be released by fcp_free_cmd_resp().
10355  *
10356  *     Argument: *pptr	FCP port.
10357  *		 *fpkt	fc packet for which the cmd and resp packet should be
10358  *			allocated.
10359  *		 flags	Allocation flags.
10360  *
10361  * Return Value: FC_FAILURE
10362  *		 FC_SUCCESS
10363  *
10364  *      Context: User or Kernel context only if flags == KM_SLEEP.
10365  *		 Interrupt context if the KM_SLEEP is not specified.
10366  */
10367 static int
10368 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10369 {
10370 	int			rval;
10371 	int			cmd_len;
10372 	int			resp_len;
10373 	ulong_t			real_len;
10374 	int 			(*cb) (caddr_t);
10375 	ddi_dma_cookie_t	pkt_cookie;
10376 	ddi_dma_cookie_t	*cp;
10377 	uint32_t		cnt;
10378 
10379 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10380 
10381 	cmd_len = fpkt->pkt_cmdlen;
10382 	resp_len = fpkt->pkt_rsplen;
10383 
10384 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10385 
10386 	/* Allocation of a DMA handle used in subsequent calls. */
10387 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10388 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10389 		return (FC_FAILURE);
10390 	}
10391 
10392 	/* A buffer is allocated that satisfies the DMA requirements. */
10393 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10394 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10395 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10396 
10397 	if (rval != DDI_SUCCESS) {
10398 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10399 		return (FC_FAILURE);
10400 	}
10401 
10402 	if (real_len < cmd_len) {
10403 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10404 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10405 		return (FC_FAILURE);
10406 	}
10407 
10408 	/* The buffer allocated is DMA bound. */
10409 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10410 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10411 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10412 
10413 	if (rval != DDI_DMA_MAPPED) {
10414 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10415 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10416 		return (FC_FAILURE);
10417 	}
10418 
10419 	if (fpkt->pkt_cmd_cookie_cnt >
10420 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10421 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10422 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10423 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10424 		return (FC_FAILURE);
10425 	}
10426 
10427 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10428 
10429 	/*
10430 	 * The buffer where the scatter/gather list is going to be built is
10431 	 * allocated.
10432 	 */
10433 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10434 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10435 	    KM_NOSLEEP);
10436 
10437 	if (cp == NULL) {
10438 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10439 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10440 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10441 		return (FC_FAILURE);
10442 	}
10443 
10444 	/*
10445 	 * The scatter/gather list for the buffer we just allocated is built
10446 	 * here.
10447 	 */
10448 	*cp = pkt_cookie;
10449 	cp++;
10450 
10451 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10452 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10453 		    &pkt_cookie);
10454 		*cp = pkt_cookie;
10455 	}
10456 
10457 	ASSERT(fpkt->pkt_resp_dma == NULL);
10458 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10459 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10460 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10461 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10462 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10463 		return (FC_FAILURE);
10464 	}
10465 
10466 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10467 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10468 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10469 	    &fpkt->pkt_resp_acc);
10470 
10471 	if (rval != DDI_SUCCESS) {
10472 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10473 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10474 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10475 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10476 		kmem_free(fpkt->pkt_cmd_cookie,
10477 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10478 		return (FC_FAILURE);
10479 	}
10480 
10481 	if (real_len < resp_len) {
10482 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10483 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10484 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10485 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10486 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10487 		kmem_free(fpkt->pkt_cmd_cookie,
10488 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10489 		return (FC_FAILURE);
10490 	}
10491 
10492 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10493 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10494 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10495 
10496 	if (rval != DDI_DMA_MAPPED) {
10497 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10498 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10499 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10500 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10501 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10502 		kmem_free(fpkt->pkt_cmd_cookie,
10503 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10504 		return (FC_FAILURE);
10505 	}
10506 
10507 	if (fpkt->pkt_resp_cookie_cnt >
10508 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10509 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10510 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10511 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10512 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10513 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10514 		kmem_free(fpkt->pkt_cmd_cookie,
10515 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10516 		return (FC_FAILURE);
10517 	}
10518 
10519 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10520 
10521 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10522 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10523 	    KM_NOSLEEP);
10524 
10525 	if (cp == NULL) {
10526 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10527 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10528 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10529 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10530 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10531 		kmem_free(fpkt->pkt_cmd_cookie,
10532 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10533 		return (FC_FAILURE);
10534 	}
10535 
10536 	*cp = pkt_cookie;
10537 	cp++;
10538 
10539 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10540 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10541 		    &pkt_cookie);
10542 		*cp = pkt_cookie;
10543 	}
10544 
10545 	return (FC_SUCCESS);
10546 }
10547 
10548 /*
10549  *     Function: fcp_free_cmd_resp
10550  *
10551  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10552  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10553  *		 associated with them.  That includes the DMA resources and the
10554  *		 buffer allocated for the cookies of each one of them.
10555  *
10556  *     Argument: *pptr		FCP port context.
10557  *		 *fpkt		fc packet containing the cmd and resp packet
10558  *				to be released.
10559  *
10560  * Return Value: None
10561  *
10562  *      Context: Interrupt, User and Kernel context.
10563  */
10564 /* ARGSUSED */
10565 static void
10566 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10567 {
10568 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10569 
10570 	if (fpkt->pkt_resp_dma) {
10571 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10572 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10573 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10574 	}
10575 
10576 	if (fpkt->pkt_resp_cookie) {
10577 		kmem_free(fpkt->pkt_resp_cookie,
10578 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10579 		fpkt->pkt_resp_cookie = NULL;
10580 	}
10581 
10582 	if (fpkt->pkt_cmd_dma) {
10583 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10584 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10585 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10586 	}
10587 
10588 	if (fpkt->pkt_cmd_cookie) {
10589 		kmem_free(fpkt->pkt_cmd_cookie,
10590 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10591 		fpkt->pkt_cmd_cookie = NULL;
10592 	}
10593 }
10594 
10595 
10596 /*
10597  * called by the transport to do our own target initialization
10598  *
10599  * can acquire and release the global mutex
10600  */
10601 /* ARGSUSED */
10602 static int
10603 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10604     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10605 {
10606 	int			*words;
10607 	uchar_t			*bytes;
10608 	uint_t			nbytes;
10609 	uint_t			nwords;
10610 	struct fcp_tgt	*ptgt;
10611 	struct fcp_lun	*plun;
10612 	struct fcp_port	*pptr = (struct fcp_port *)
10613 				    hba_tran->tran_hba_private;
10614 
10615 	ASSERT(pptr != NULL);
10616 
10617 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10618 	    FCP_BUF_LEVEL_8, 0,
10619 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10620 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10621 
10622 	/* get our port WWN property */
10623 	bytes = NULL;
10624 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
10625 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
10626 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10627 		/* no port WWN property */
10628 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10629 		    FCP_BUF_LEVEL_8, 0,
10630 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10631 		    " for %s (instance %d): bytes=%p nbytes=%x",
10632 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10633 		    nbytes);
10634 
10635 		if (bytes != NULL) {
10636 			ddi_prop_free(bytes);
10637 		}
10638 
10639 		return (DDI_NOT_WELL_FORMED);
10640 	}
10641 
10642 	words = NULL;
10643 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, tgt_dip,
10644 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
10645 	    LUN_PROP, &words, &nwords) != DDI_PROP_SUCCESS) {
10646 		ASSERT(bytes != NULL);
10647 
10648 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10649 		    FCP_BUF_LEVEL_8, 0,
10650 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10651 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10652 		    ddi_get_instance(tgt_dip));
10653 
10654 		ddi_prop_free(bytes);
10655 
10656 		return (DDI_NOT_WELL_FORMED);
10657 	}
10658 
10659 	if (nwords == 0) {
10660 		ddi_prop_free(bytes);
10661 		ddi_prop_free(words);
10662 		return (DDI_NOT_WELL_FORMED);
10663 	}
10664 
10665 	ASSERT(bytes != NULL && words != NULL);
10666 
10667 	mutex_enter(&pptr->port_mutex);
10668 	if ((plun = fcp_lookup_lun(pptr, bytes, *words)) == NULL) {
10669 		mutex_exit(&pptr->port_mutex);
10670 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10671 		    FCP_BUF_LEVEL_8, 0,
10672 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10673 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10674 		    ddi_get_instance(tgt_dip));
10675 
10676 		ddi_prop_free(bytes);
10677 		ddi_prop_free(words);
10678 
10679 		return (DDI_FAILURE);
10680 	}
10681 
10682 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10683 	    FC_WWN_SIZE) == 0);
10684 	ASSERT(plun->lun_num == (uint16_t)*words);
10685 
10686 	ddi_prop_free(bytes);
10687 	ddi_prop_free(words);
10688 
10689 	ptgt = plun->lun_tgt;
10690 
10691 	mutex_enter(&ptgt->tgt_mutex);
10692 	plun->lun_tgt_count++;
10693 	hba_tran->tran_tgt_private = plun;
10694 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10695 	plun->lun_tran = hba_tran;
10696 	mutex_exit(&ptgt->tgt_mutex);
10697 	mutex_exit(&pptr->port_mutex);
10698 
10699 	return (DDI_SUCCESS);
10700 }
10701 
10702 /*ARGSUSED*/
10703 static int
10704 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10705     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10706 {
10707 	int			words;
10708 	uchar_t			*bytes;
10709 	uint_t			nbytes;
10710 	struct fcp_tgt	*ptgt;
10711 	struct fcp_lun	*plun;
10712 	struct fcp_port	*pptr = (struct fcp_port *)
10713 				    hba_tran->tran_hba_private;
10714 	child_info_t		*cip;
10715 
10716 	ASSERT(pptr != NULL);
10717 
10718 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10719 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10720 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10721 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10722 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10723 
10724 	cip = (child_info_t *)sd->sd_private;
10725 	if (cip == NULL) {
10726 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10727 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10728 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10729 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10730 		    ddi_get_instance(tgt_dip));
10731 
10732 		return (DDI_NOT_WELL_FORMED);
10733 	}
10734 
10735 	/* get our port WWN property */
10736 	bytes = NULL;
10737 	if ((mdi_prop_lookup_byte_array(PIP(cip), PORT_WWN_PROP, &bytes,
10738 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10739 		if (bytes) {
10740 			(void) mdi_prop_free(bytes);
10741 		}
10742 		return (DDI_NOT_WELL_FORMED);
10743 	}
10744 
10745 	words = 0;
10746 	if (mdi_prop_lookup_int(PIP(cip), LUN_PROP, &words) !=
10747 	    DDI_PROP_SUCCESS) {
10748 		ASSERT(bytes != NULL);
10749 
10750 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10751 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10752 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10753 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10754 		    ddi_get_instance(tgt_dip));
10755 
10756 		(void) mdi_prop_free(bytes);
10757 		return (DDI_NOT_WELL_FORMED);
10758 	}
10759 
10760 	ASSERT(bytes != NULL);
10761 
10762 	mutex_enter(&pptr->port_mutex);
10763 	if ((plun = fcp_lookup_lun(pptr, bytes, words)) == NULL) {
10764 		mutex_exit(&pptr->port_mutex);
10765 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10766 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10767 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10768 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10769 		    ddi_get_instance(tgt_dip));
10770 
10771 		(void) mdi_prop_free(bytes);
10772 		(void) mdi_prop_free(&words);
10773 
10774 		return (DDI_FAILURE);
10775 	}
10776 
10777 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10778 	    FC_WWN_SIZE) == 0);
10779 	ASSERT(plun->lun_num == (uint16_t)words);
10780 
10781 	(void) mdi_prop_free(bytes);
10782 	(void) mdi_prop_free(&words);
10783 
10784 	ptgt = plun->lun_tgt;
10785 
10786 	mutex_enter(&ptgt->tgt_mutex);
10787 	plun->lun_tgt_count++;
10788 	hba_tran->tran_tgt_private = plun;
10789 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10790 	plun->lun_tran = hba_tran;
10791 	mutex_exit(&ptgt->tgt_mutex);
10792 	mutex_exit(&pptr->port_mutex);
10793 
10794 	return (DDI_SUCCESS);
10795 }
10796 
10797 
10798 /*
10799  * called by the transport to do our own target initialization
10800  *
10801  * can acquire and release the global mutex
10802  */
10803 /* ARGSUSED */
10804 static int
10805 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10806     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10807 {
10808 	struct fcp_port	*pptr = (struct fcp_port *)
10809 				    hba_tran->tran_hba_private;
10810 	int			rval;
10811 
10812 	ASSERT(pptr != NULL);
10813 
10814 	/*
10815 	 * Child node is getting initialized.  Look at the mpxio component
10816 	 * type on the child device to see if this device is mpxio managed
10817 	 * or not.
10818 	 */
10819 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10820 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10821 	} else {
10822 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10823 	}
10824 
10825 	return (rval);
10826 }
10827 
10828 
10829 /* ARGSUSED */
10830 static void
10831 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10832     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10833 {
10834 	struct fcp_lun	*plun = hba_tran->tran_tgt_private;
10835 	struct fcp_tgt 	*ptgt;
10836 
10837 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10838 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10839 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10840 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10841 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10842 
10843 	if (plun == NULL) {
10844 		return;
10845 	}
10846 	ptgt = plun->lun_tgt;
10847 
10848 	ASSERT(ptgt != NULL);
10849 
10850 	mutex_enter(&ptgt->tgt_mutex);
10851 	ASSERT(plun->lun_tgt_count > 0);
10852 
10853 	if (--plun->lun_tgt_count == 0) {
10854 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10855 	}
10856 	plun->lun_tran = NULL;
10857 	mutex_exit(&ptgt->tgt_mutex);
10858 }
10859 
10860 /*
10861  *     Function: fcp_scsi_start
10862  *
10863  *  Description: This function is called by the target driver to request a
10864  *		 command to be sent.
10865  *
10866  *     Argument: *ap		SCSI address of the device.
10867  *		 *pkt		SCSI packet containing the cmd to send.
10868  *
10869  * Return Value: TRAN_ACCEPT
10870  *		 TRAN_BUSY
10871  *		 TRAN_BADPKT
10872  *		 TRAN_FATAL_ERROR
10873  */
10874 static int
10875 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10876 {
10877 	struct fcp_port	*pptr = ADDR2FCP(ap);
10878 	struct fcp_lun	*plun = ADDR2LUN(ap);
10879 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10880 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10881 	int			rval;
10882 
10883 	/* ensure command isn't already issued */
10884 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10885 
10886 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10887 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10888 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10889 
10890 	/*
10891 	 * It is strange that we enter the fcp_port mutex and the target
10892 	 * mutex to check the lun state (which has a mutex of its own).
10893 	 */
10894 	mutex_enter(&pptr->port_mutex);
10895 	mutex_enter(&ptgt->tgt_mutex);
10896 
10897 	/*
10898 	 * If the device is offline and is not in the process of coming
10899 	 * online, fail the request.
10900 	 */
10901 
10902 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10903 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10904 		mutex_exit(&ptgt->tgt_mutex);
10905 		mutex_exit(&pptr->port_mutex);
10906 
10907 		if (cmd->cmd_fp_pkt->pkt_pd == NULL)
10908 			pkt->pkt_reason = CMD_DEV_GONE;
10909 
10910 		return (TRAN_FATAL_ERROR);
10911 	}
10912 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
10913 
10914 	/*
10915 	 * If we are suspended, kernel is trying to dump, so don't
10916 	 * block, fail or defer requests - send them down right away.
10917 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
10918 	 * assume we have been suspended.  There is hardware such as
10919 	 * the v880 that doesn't do PM.  Thus, the check for
10920 	 * ddi_in_panic.
10921 	 *
10922 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
10923 	 * of changing.  So, if we can queue the packet, do it.  Eventually,
10924 	 * either the device will have gone away or changed and we can fail
10925 	 * the request, or we can proceed if the device didn't change.
10926 	 *
10927 	 * If the pd in the target or the packet is NULL it's probably
10928 	 * because the device has gone away, we allow the request to be
10929 	 * put on the internal queue here in case the device comes back within
10930 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
10931 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
10932 	 * could be NULL because the device was disappearing during or since
10933 	 * packet initialization.
10934 	 */
10935 
10936 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
10937 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
10938 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
10939 	    (ptgt->tgt_pd_handle == NULL) ||
10940 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
10941 		/*
10942 		 * If ((LUN is busy AND
10943 		 *	LUN not suspended AND
10944 		 *	The system is not in panic state) OR
10945 		 *	(The port is coming up))
10946 		 *
10947 		 * We check to see if the any of the flags FLAG_NOINTR or
10948 		 * FLAG_NOQUEUE is set.  If one of them is set the value
10949 		 * returned will be TRAN_BUSY.  If not, the request is queued.
10950 		 */
10951 		mutex_exit(&ptgt->tgt_mutex);
10952 		mutex_exit(&pptr->port_mutex);
10953 
10954 		/* see if using interrupts is allowed (so queueing'll work) */
10955 		if (pkt->pkt_flags & FLAG_NOINTR) {
10956 			pkt->pkt_resid = 0;
10957 			return (TRAN_BUSY);
10958 		}
10959 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
10960 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10961 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
10962 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
10963 			return (TRAN_BUSY);
10964 		}
10965 #ifdef	DEBUG
10966 		mutex_enter(&pptr->port_pkt_mutex);
10967 		pptr->port_npkts++;
10968 		mutex_exit(&pptr->port_pkt_mutex);
10969 #endif /* DEBUG */
10970 
10971 		/* got queue up the pkt for later */
10972 		fcp_queue_pkt(pptr, cmd);
10973 		return (TRAN_ACCEPT);
10974 	}
10975 	cmd->cmd_state = FCP_PKT_ISSUED;
10976 
10977 	mutex_exit(&ptgt->tgt_mutex);
10978 	mutex_exit(&pptr->port_mutex);
10979 
10980 	/*
10981 	 * Now that we released the mutexes, what was protected by them can
10982 	 * change.
10983 	 */
10984 
10985 	/*
10986 	 * If there is a reconfiguration in progress, wait for it to complete.
10987 	 */
10988 	fcp_reconfig_wait(pptr);
10989 
10990 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
10991 	    pkt->pkt_time : 0;
10992 
10993 	/* prepare the packet */
10994 
10995 	fcp_prepare_pkt(pptr, cmd, plun);
10996 
10997 	if (cmd->cmd_pkt->pkt_time) {
10998 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
10999 	} else {
11000 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11001 	}
11002 
11003 	/*
11004 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11005 	 * have to do polled I/O
11006 	 */
11007 	if (pkt->pkt_flags & FLAG_NOINTR) {
11008 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11009 		return (fcp_dopoll(pptr, cmd));
11010 	}
11011 
11012 #ifdef	DEBUG
11013 	mutex_enter(&pptr->port_pkt_mutex);
11014 	pptr->port_npkts++;
11015 	mutex_exit(&pptr->port_pkt_mutex);
11016 #endif /* DEBUG */
11017 
11018 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11019 	if (rval == FC_SUCCESS) {
11020 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11021 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11022 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11023 		return (TRAN_ACCEPT);
11024 	}
11025 
11026 	cmd->cmd_state = FCP_PKT_IDLE;
11027 
11028 #ifdef	DEBUG
11029 	mutex_enter(&pptr->port_pkt_mutex);
11030 	pptr->port_npkts--;
11031 	mutex_exit(&pptr->port_pkt_mutex);
11032 #endif /* DEBUG */
11033 
11034 	/*
11035 	 * For lack of clearer definitions, choose
11036 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11037 	 */
11038 
11039 	if (rval == FC_TRAN_BUSY) {
11040 		pkt->pkt_resid = 0;
11041 		rval = TRAN_BUSY;
11042 	} else {
11043 		mutex_enter(&ptgt->tgt_mutex);
11044 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11045 			child_info_t	*cip;
11046 
11047 			mutex_enter(&plun->lun_mutex);
11048 			cip = plun->lun_cip;
11049 			mutex_exit(&plun->lun_mutex);
11050 
11051 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11052 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11053 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11054 			    plun->lun_tgt->tgt_d_id, rval, cip);
11055 
11056 			rval = TRAN_FATAL_ERROR;
11057 		} else {
11058 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11059 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11060 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11061 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11062 				    pkt);
11063 				rval = TRAN_BUSY;
11064 			} else {
11065 				rval = TRAN_ACCEPT;
11066 				fcp_queue_pkt(pptr, cmd);
11067 			}
11068 		}
11069 		mutex_exit(&ptgt->tgt_mutex);
11070 	}
11071 
11072 	return (rval);
11073 }
11074 
11075 /*
11076  * called by the transport to abort a packet
11077  */
11078 /*ARGSUSED*/
11079 static int
11080 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11081 {
11082 	int tgt_cnt;
11083 	struct fcp_port  	*pptr = ADDR2FCP(ap);
11084 	struct fcp_lun 	*plun = ADDR2LUN(ap);
11085 	struct fcp_tgt 	*ptgt = plun->lun_tgt;
11086 
11087 	if (pkt == NULL) {
11088 		if (ptgt) {
11089 			mutex_enter(&ptgt->tgt_mutex);
11090 			tgt_cnt = ptgt->tgt_change_cnt;
11091 			mutex_exit(&ptgt->tgt_mutex);
11092 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11093 			return (TRUE);
11094 		}
11095 	}
11096 	return (FALSE);
11097 }
11098 
11099 
11100 /*
11101  * Perform reset
11102  */
11103 int
11104 fcp_scsi_reset(struct scsi_address *ap, int level)
11105 {
11106 	int 			rval = 0;
11107 	struct fcp_port  	*pptr = ADDR2FCP(ap);
11108 	struct fcp_lun 	*plun = ADDR2LUN(ap);
11109 	struct fcp_tgt 	*ptgt = plun->lun_tgt;
11110 
11111 	if (level == RESET_ALL) {
11112 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11113 			rval = 1;
11114 		}
11115 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11116 		/*
11117 		 * If we are in the middle of discovery, return
11118 		 * SUCCESS as this target will be rediscovered
11119 		 * anyway
11120 		 */
11121 		mutex_enter(&ptgt->tgt_mutex);
11122 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11123 			mutex_exit(&ptgt->tgt_mutex);
11124 			return (1);
11125 		}
11126 		mutex_exit(&ptgt->tgt_mutex);
11127 
11128 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11129 			rval = 1;
11130 		}
11131 	}
11132 	return (rval);
11133 }
11134 
11135 
11136 /*
11137  * called by the framework to get a SCSI capability
11138  */
11139 static int
11140 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11141 {
11142 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11143 }
11144 
11145 
11146 /*
11147  * called by the framework to set a SCSI capability
11148  */
11149 static int
11150 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11151 {
11152 	return (fcp_commoncap(ap, cap, value, whom, 1));
11153 }
11154 
11155 /*
11156  *     Function: fcp_pkt_setup
11157  *
11158  *  Description: This function sets up the scsi_pkt structure passed by the
11159  *		 caller. This function assumes fcp_pkt_constructor has been
11160  *		 called previously for the packet passed by the caller.  If
11161  *		 successful this call will have the following results:
11162  *
11163  *		   - The resources needed that will be constant through out
11164  *		     the whole transaction are allocated.
11165  *		   - The fields that will be constant through out the whole
11166  *		     transaction are initialized.
11167  *		   - The scsi packet will be linked to the LUN structure
11168  *		     addressed by the transaction.
11169  *
11170  *     Argument:
11171  *		 *pkt		Pointer to a scsi_pkt structure.
11172  *		 callback
11173  *		 arg
11174  *
11175  * Return Value: 0	Success
11176  *		 !0	Failure
11177  *
11178  *      Context: Kernel context or interrupt context
11179  */
11180 /* ARGSUSED */
11181 static int
11182 fcp_pkt_setup(struct scsi_pkt *pkt,
11183     int (*callback)(caddr_t arg),
11184     caddr_t arg)
11185 {
11186 	struct fcp_pkt 	*cmd;
11187 	struct fcp_port	*pptr;
11188 	struct fcp_lun 	*plun;
11189 	struct fcp_tgt 	*ptgt;
11190 	int 		kf;
11191 	fc_packet_t	*fpkt;
11192 	fc_frame_hdr_t	*hp;
11193 
11194 	pptr = ADDR2FCP(&pkt->pkt_address);
11195 	plun = ADDR2LUN(&pkt->pkt_address);
11196 	ptgt = plun->lun_tgt;
11197 
11198 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11199 	fpkt = cmd->cmd_fp_pkt;
11200 
11201 	/*
11202 	 * this request is for dma allocation only
11203 	 */
11204 	/*
11205 	 * First step of fcp_scsi_init_pkt: pkt allocation
11206 	 * We determine if the caller is willing to wait for the
11207 	 * resources.
11208 	 */
11209 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11210 
11211 	/*
11212 	 * Selective zeroing of the pkt.
11213 	 */
11214 	cmd->cmd_back = NULL;
11215 	cmd->cmd_next = NULL;
11216 
11217 	/*
11218 	 * Zero out fcp command
11219 	 */
11220 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11221 
11222 	cmd->cmd_state = FCP_PKT_IDLE;
11223 
11224 	fpkt = cmd->cmd_fp_pkt;
11225 	fpkt->pkt_data_acc = NULL;
11226 
11227 	mutex_enter(&ptgt->tgt_mutex);
11228 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11229 
11230 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11231 	    != FC_SUCCESS) {
11232 		mutex_exit(&ptgt->tgt_mutex);
11233 		return (-1);
11234 	}
11235 
11236 	mutex_exit(&ptgt->tgt_mutex);
11237 
11238 	/* Fill in the Fabric Channel Header */
11239 	hp = &fpkt->pkt_cmd_fhdr;
11240 	hp->r_ctl = R_CTL_COMMAND;
11241 	hp->rsvd = 0;
11242 	hp->type = FC_TYPE_SCSI_FCP;
11243 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11244 	hp->seq_id = 0;
11245 	hp->df_ctl  = 0;
11246 	hp->seq_cnt = 0;
11247 	hp->ox_id = 0xffff;
11248 	hp->rx_id = 0xffff;
11249 	hp->ro = 0;
11250 
11251 	/*
11252 	 * A doubly linked list (cmd_forw, cmd_back) is built
11253 	 * out of every allocated packet on a per-lun basis
11254 	 *
11255 	 * The packets are maintained in the list so as to satisfy
11256 	 * scsi_abort() requests. At present (which is unlikely to
11257 	 * change in the future) nobody performs a real scsi_abort
11258 	 * in the SCSI target drivers (as they don't keep the packets
11259 	 * after doing scsi_transport - so they don't know how to
11260 	 * abort a packet other than sending a NULL to abort all
11261 	 * outstanding packets)
11262 	 */
11263 	mutex_enter(&plun->lun_mutex);
11264 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11265 		plun->lun_pkt_head->cmd_back = cmd;
11266 	} else {
11267 		plun->lun_pkt_tail = cmd;
11268 	}
11269 	plun->lun_pkt_head = cmd;
11270 	mutex_exit(&plun->lun_mutex);
11271 	return (0);
11272 }
11273 
11274 /*
11275  *     Function: fcp_pkt_teardown
11276  *
11277  *  Description: This function releases a scsi_pkt structure and all the
11278  *		 resources attached to it.
11279  *
11280  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11281  *
11282  * Return Value: None
11283  *
11284  *      Context: User, Kernel or Interrupt context.
11285  */
11286 static void
11287 fcp_pkt_teardown(struct scsi_pkt *pkt)
11288 {
11289 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11290 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11291 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11292 
11293 	/*
11294 	 * Remove the packet from the per-lun list
11295 	 */
11296 	mutex_enter(&plun->lun_mutex);
11297 	if (cmd->cmd_back) {
11298 		ASSERT(cmd != plun->lun_pkt_head);
11299 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11300 	} else {
11301 		ASSERT(cmd == plun->lun_pkt_head);
11302 		plun->lun_pkt_head = cmd->cmd_forw;
11303 	}
11304 
11305 	if (cmd->cmd_forw) {
11306 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11307 	} else {
11308 		ASSERT(cmd == plun->lun_pkt_tail);
11309 		plun->lun_pkt_tail = cmd->cmd_back;
11310 	}
11311 
11312 	mutex_exit(&plun->lun_mutex);
11313 
11314 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11315 }
11316 
11317 /*
11318  * Routine for reset notification setup, to register or cancel.
11319  * This function is called by SCSA
11320  */
11321 /*ARGSUSED*/
11322 static int
11323 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11324     void (*callback)(caddr_t), caddr_t arg)
11325 {
11326 	struct fcp_port *pptr = ADDR2FCP(ap);
11327 
11328 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11329 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11330 }
11331 
11332 
11333 static int
11334 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11335     ddi_eventcookie_t *event_cookiep)
11336 {
11337 	struct fcp_port *pptr = fcp_dip2port(dip);
11338 
11339 	if (pptr == NULL) {
11340 		return (DDI_FAILURE);
11341 	}
11342 
11343 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11344 	    event_cookiep, NDI_EVENT_NOPASS));
11345 }
11346 
11347 
11348 static int
11349 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11350     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11351     ddi_callback_id_t *cb_id)
11352 {
11353 	struct fcp_port *pptr = fcp_dip2port(dip);
11354 
11355 	if (pptr == NULL) {
11356 		return (DDI_FAILURE);
11357 	}
11358 
11359 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11360 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11361 }
11362 
11363 
11364 static int
11365 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11366 {
11367 
11368 	struct fcp_port *pptr = fcp_dip2port(dip);
11369 
11370 	if (pptr == NULL) {
11371 		return (DDI_FAILURE);
11372 	}
11373 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11374 }
11375 
11376 
11377 /*
11378  * called by the transport to post an event
11379  */
11380 static int
11381 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11382     ddi_eventcookie_t eventid, void *impldata)
11383 {
11384 	struct fcp_port *pptr = fcp_dip2port(dip);
11385 
11386 	if (pptr == NULL) {
11387 		return (DDI_FAILURE);
11388 	}
11389 
11390 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11391 	    eventid, impldata));
11392 }
11393 
11394 
11395 /*
11396  * A target in in many cases in Fibre Channel has a one to one relation
11397  * with a port identifier (which is also known as D_ID and also as AL_PA
11398  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11399  * will most likely result in resetting all LUNs (which means a reset will
11400  * occur on all the SCSI devices connected at the other end of the bridge)
11401  * That is the latest favorite topic for discussion, for, one can debate as
11402  * hot as one likes and come up with arguably a best solution to one's
11403  * satisfaction
11404  *
11405  * To stay on track and not digress much, here are the problems stated
11406  * briefly:
11407  *
11408  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11409  *	target drivers use RESET_TARGET even if their instance is on a
11410  *	LUN. Doesn't that sound a bit broken ?
11411  *
11412  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11413  *	control fields of an FCP_CMND structure. It should have been
11414  *	fixed right there, giving flexibility to the initiators to
11415  *	minimize havoc that could be caused by resetting a target.
11416  */
11417 static int
11418 fcp_reset_target(struct scsi_address *ap, int level)
11419 {
11420 	int			rval = FC_FAILURE;
11421 	char			lun_id[25];
11422 	struct fcp_port  	*pptr = ADDR2FCP(ap);
11423 	struct fcp_lun 	*plun = ADDR2LUN(ap);
11424 	struct fcp_tgt 	*ptgt = plun->lun_tgt;
11425 	struct scsi_pkt		*pkt;
11426 	struct fcp_pkt	*cmd;
11427 	struct fcp_rsp		*rsp;
11428 	uint32_t		tgt_cnt;
11429 	struct fcp_rsp_info	*rsp_info;
11430 	struct fcp_reset_elem	*p;
11431 	int			bval;
11432 
11433 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11434 	    KM_NOSLEEP)) == NULL) {
11435 		return (rval);
11436 	}
11437 
11438 	mutex_enter(&ptgt->tgt_mutex);
11439 	if (level == RESET_TARGET) {
11440 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11441 			mutex_exit(&ptgt->tgt_mutex);
11442 			kmem_free(p, sizeof (struct fcp_reset_elem));
11443 			return (rval);
11444 		}
11445 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11446 		(void) strcpy(lun_id, " ");
11447 	} else {
11448 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11449 			mutex_exit(&ptgt->tgt_mutex);
11450 			kmem_free(p, sizeof (struct fcp_reset_elem));
11451 			return (rval);
11452 		}
11453 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11454 
11455 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11456 	}
11457 	tgt_cnt = ptgt->tgt_change_cnt;
11458 
11459 	mutex_exit(&ptgt->tgt_mutex);
11460 
11461 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11462 	    0, 0, NULL, 0)) == NULL) {
11463 		kmem_free(p, sizeof (struct fcp_reset_elem));
11464 		mutex_enter(&ptgt->tgt_mutex);
11465 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11466 		mutex_exit(&ptgt->tgt_mutex);
11467 		return (rval);
11468 	}
11469 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11470 
11471 	/* fill in cmd part of packet */
11472 	cmd = PKT2CMD(pkt);
11473 	if (level == RESET_TARGET) {
11474 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11475 	} else {
11476 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11477 	}
11478 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11479 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11480 
11481 	/* prepare a packet for transport */
11482 	fcp_prepare_pkt(pptr, cmd, plun);
11483 
11484 	if (cmd->cmd_pkt->pkt_time) {
11485 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11486 	} else {
11487 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11488 	}
11489 
11490 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11491 	bval = fcp_dopoll(pptr, cmd);
11492 	fc_ulp_idle_port(pptr->port_fp_handle);
11493 
11494 	/* submit the packet */
11495 	if (bval == TRAN_ACCEPT) {
11496 		int error = 3;
11497 
11498 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11499 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11500 		    sizeof (struct fcp_rsp));
11501 
11502 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11503 			if (fcp_validate_fcp_response(rsp, pptr) ==
11504 			    FC_SUCCESS) {
11505 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11506 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11507 					    sizeof (struct fcp_rsp), rsp_info,
11508 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11509 					    sizeof (struct fcp_rsp_info));
11510 				}
11511 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11512 					rval = FC_SUCCESS;
11513 					error = 0;
11514 				} else {
11515 					error = 1;
11516 				}
11517 			} else {
11518 				error = 2;
11519 			}
11520 		}
11521 
11522 		switch (error) {
11523 		case 0:
11524 			fcp_log(CE_WARN, pptr->port_dip,
11525 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11526 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11527 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11528 			break;
11529 
11530 		case 1:
11531 			fcp_log(CE_WARN, pptr->port_dip,
11532 			    "!FCP: Reset to WWN  0x%08x%08x %s failed,"
11533 			    " response code=%x",
11534 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11535 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11536 			    rsp_info->rsp_code);
11537 			break;
11538 
11539 		case 2:
11540 			fcp_log(CE_WARN, pptr->port_dip,
11541 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11542 			    " Bad FCP response values: rsvd1=%x,"
11543 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11544 			    " rsplen=%x, senselen=%x",
11545 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11546 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11547 			    rsp->reserved_0, rsp->reserved_1,
11548 			    rsp->fcp_u.fcp_status.reserved_0,
11549 			    rsp->fcp_u.fcp_status.reserved_1,
11550 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11551 			break;
11552 
11553 		default:
11554 			fcp_log(CE_WARN, pptr->port_dip,
11555 			    "!FCP: Reset to WWN  0x%08x%08x %s failed",
11556 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11557 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11558 			break;
11559 		}
11560 	}
11561 	scsi_destroy_pkt(pkt);
11562 
11563 	if (rval == FC_FAILURE) {
11564 		mutex_enter(&ptgt->tgt_mutex);
11565 		if (level == RESET_TARGET) {
11566 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11567 		} else {
11568 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11569 		}
11570 		mutex_exit(&ptgt->tgt_mutex);
11571 		kmem_free(p, sizeof (struct fcp_reset_elem));
11572 		return (rval);
11573 	}
11574 
11575 	mutex_enter(&pptr->port_mutex);
11576 	if (level == RESET_TARGET) {
11577 		p->tgt = ptgt;
11578 		p->lun = NULL;
11579 	} else {
11580 		p->tgt = NULL;
11581 		p->lun = plun;
11582 	}
11583 	p->tgt = ptgt;
11584 	p->tgt_cnt = tgt_cnt;
11585 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11586 	p->next = pptr->port_reset_list;
11587 	pptr->port_reset_list = p;
11588 
11589 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11590 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11591 	    "Notify ssd of the reset to reinstate the reservations");
11592 
11593 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11594 	    &pptr->port_reset_notify_listf);
11595 
11596 	mutex_exit(&pptr->port_mutex);
11597 
11598 	return (rval);
11599 }
11600 
11601 
11602 /*
11603  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11604  * SCSI capabilities
11605  */
11606 /* ARGSUSED */
11607 static int
11608 fcp_commoncap(struct scsi_address *ap, char *cap,
11609     int val, int tgtonly, int doset)
11610 {
11611 	struct fcp_port 	*pptr = ADDR2FCP(ap);
11612 	struct fcp_lun 	*plun = ADDR2LUN(ap);
11613 	struct fcp_tgt 	*ptgt = plun->lun_tgt;
11614 	int 			cidx;
11615 	int 			rval = FALSE;
11616 
11617 	if (cap == (char *)0) {
11618 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11619 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11620 		    "fcp_commoncap: invalid arg");
11621 		return (rval);
11622 	}
11623 
11624 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11625 		return (UNDEFINED);
11626 	}
11627 
11628 	/*
11629 	 * Process setcap request.
11630 	 */
11631 	if (doset) {
11632 		/*
11633 		 * At present, we can only set binary (0/1) values
11634 		 */
11635 		switch (cidx) {
11636 		case SCSI_CAP_ARQ:
11637 			if (val == 0) {
11638 				rval = FALSE;
11639 			} else {
11640 				rval = TRUE;
11641 			}
11642 			break;
11643 
11644 		case SCSI_CAP_LUN_RESET:
11645 			if (val) {
11646 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11647 			} else {
11648 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11649 			}
11650 			rval = TRUE;
11651 			break;
11652 
11653 		case SCSI_CAP_SECTOR_SIZE:
11654 			rval = TRUE;
11655 			break;
11656 		default:
11657 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11658 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11659 			    "fcp_setcap: unsupported %d", cidx);
11660 			rval = UNDEFINED;
11661 			break;
11662 		}
11663 
11664 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11665 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11666 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11667 		    "0x%x/0x%x/0x%x/%d",
11668 		    cap, val, tgtonly, doset, rval);
11669 
11670 	} else {
11671 		/*
11672 		 * Process getcap request.
11673 		 */
11674 		switch (cidx) {
11675 		case SCSI_CAP_DMA_MAX:
11676 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11677 
11678 			/*
11679 			 * Need to make an adjustment qlc is uint_t 64
11680 			 * st is int, so we will make the adjustment here
11681 			 * being as nobody wants to touch this.
11682 			 * It still leaves the max single block length
11683 			 * of 2 gig. This should last .
11684 			 */
11685 
11686 			if (rval == -1) {
11687 				rval = MAX_INT_DMA;
11688 			}
11689 
11690 			break;
11691 
11692 		case SCSI_CAP_INITIATOR_ID:
11693 			rval = pptr->port_id;
11694 			break;
11695 
11696 		case SCSI_CAP_ARQ:
11697 		case SCSI_CAP_RESET_NOTIFICATION:
11698 		case SCSI_CAP_TAGGED_QING:
11699 			rval = TRUE;
11700 			break;
11701 
11702 		case SCSI_CAP_SCSI_VERSION:
11703 			rval = 3;
11704 			break;
11705 
11706 		case SCSI_CAP_INTERCONNECT_TYPE:
11707 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11708 			    (ptgt->tgt_hard_addr == 0)) {
11709 				rval = INTERCONNECT_FABRIC;
11710 			} else {
11711 				rval = INTERCONNECT_FIBRE;
11712 			}
11713 			break;
11714 
11715 		case SCSI_CAP_LUN_RESET:
11716 			rval = plun->lun_cap;
11717 			break;
11718 
11719 		default:
11720 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11721 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11722 			    "fcp_getcap: unsupported %d", cidx);
11723 			rval = UNDEFINED;
11724 			break;
11725 		}
11726 
11727 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11728 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11729 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11730 		    "0x%x/0x%x/0x%x/%d",
11731 		    cap, val, tgtonly, doset, rval);
11732 	}
11733 
11734 	return (rval);
11735 }
11736 
11737 /*
11738  * called by the transport to get the port-wwn and lun
11739  * properties of this device, and to create a "name" based on them
11740  *
11741  * these properties don't exist on sun4m
11742  *
11743  * return 1 for success else return 0
11744  */
11745 /* ARGSUSED */
11746 static int
11747 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11748 {
11749 	int			i;
11750 	int			*lun;
11751 	int			numChars;
11752 	uint_t			nlun;
11753 	uint_t			count;
11754 	uint_t			nbytes;
11755 	uchar_t			*bytes;
11756 	uint16_t		lun_num;
11757 	uint32_t		tgt_id;
11758 	char			**conf_wwn;
11759 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11760 	uchar_t			barray[FC_WWN_SIZE];
11761 	dev_info_t		*tgt_dip;
11762 	struct fcp_tgt	*ptgt;
11763 	struct fcp_port	*pptr;
11764 	struct fcp_lun	*plun;
11765 
11766 	ASSERT(sd != NULL);
11767 	ASSERT(name != NULL);
11768 
11769 	tgt_dip = sd->sd_dev;
11770 	pptr = ddi_get_soft_state(fcp_softstate,
11771 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11772 	if (pptr == NULL) {
11773 		return (0);
11774 	}
11775 
11776 	ASSERT(tgt_dip != NULL);
11777 
11778 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11779 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11780 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11781 		name[0] = '\0';
11782 		return (0);
11783 	}
11784 
11785 	if (nlun == 0) {
11786 		ddi_prop_free(lun);
11787 		return (0);
11788 	}
11789 
11790 	lun_num = lun[0];
11791 	ddi_prop_free(lun);
11792 
11793 	/*
11794 	 * Lookup for .conf WWN property
11795 	 */
11796 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11797 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11798 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11799 		ASSERT(count >= 1);
11800 
11801 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11802 		ddi_prop_free(conf_wwn);
11803 		mutex_enter(&pptr->port_mutex);
11804 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11805 			mutex_exit(&pptr->port_mutex);
11806 			return (0);
11807 		}
11808 		ptgt = plun->lun_tgt;
11809 		mutex_exit(&pptr->port_mutex);
11810 
11811 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11812 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11813 
11814 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11815 		    ptgt->tgt_hard_addr != 0) {
11816 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11817 			    ptgt->tgt_hard_addr];
11818 		} else {
11819 			tgt_id = ptgt->tgt_d_id;
11820 		}
11821 
11822 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11823 		    TARGET_PROP, tgt_id);
11824 	}
11825 
11826 	/* get the our port-wwn property */
11827 	bytes = NULL;
11828 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11829 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11830 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11831 		if (bytes != NULL) {
11832 			ddi_prop_free(bytes);
11833 		}
11834 		return (0);
11835 	}
11836 
11837 	for (i = 0; i < FC_WWN_SIZE; i++) {
11838 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11839 	}
11840 
11841 	/* Stick in the address of the form "wWWN,LUN" */
11842 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11843 
11844 	ASSERT(numChars < len);
11845 	if (numChars >= len) {
11846 		fcp_log(CE_WARN, pptr->port_dip,
11847 		    "!fcp_scsi_get_name: "
11848 		    "name parameter length too small, it needs to be %d",
11849 		    numChars+1);
11850 	}
11851 
11852 	ddi_prop_free(bytes);
11853 
11854 	return (1);
11855 }
11856 
11857 
11858 /*
11859  * called by the transport to get the SCSI target id value, returning
11860  * it in "name"
11861  *
11862  * this isn't needed/used on sun4m
11863  *
11864  * return 1 for success else return 0
11865  */
11866 /* ARGSUSED */
11867 static int
11868 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11869 {
11870 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11871 	struct fcp_tgt	*ptgt;
11872 	int    numChars;
11873 
11874 	if (plun == NULL) {
11875 		return (0);
11876 	}
11877 
11878 	if ((ptgt = plun->lun_tgt) == NULL) {
11879 		return (0);
11880 	}
11881 
11882 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11883 
11884 	ASSERT(numChars < len);
11885 	if (numChars >= len) {
11886 		fcp_log(CE_WARN, NULL,
11887 		    "!fcp_scsi_get_bus_addr: "
11888 		    "name parameter length too small, it needs to be %d",
11889 		    numChars+1);
11890 	}
11891 
11892 	return (1);
11893 }
11894 
11895 
11896 /*
11897  * called internally to reset the link where the specified port lives
11898  */
11899 static int
11900 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
11901 {
11902 	la_wwn_t 		wwn;
11903 	struct fcp_lun 	*plun;
11904 	struct fcp_tgt 	*ptgt;
11905 
11906 	/* disable restart of lip if we're suspended */
11907 	mutex_enter(&pptr->port_mutex);
11908 
11909 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
11910 	    FCP_STATE_POWER_DOWN)) {
11911 		mutex_exit(&pptr->port_mutex);
11912 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11913 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
11914 		    "fcp_linkreset, fcp%d: link reset "
11915 		    "disabled due to DDI_SUSPEND",
11916 		    ddi_get_instance(pptr->port_dip));
11917 		return (FC_FAILURE);
11918 	}
11919 
11920 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
11921 		mutex_exit(&pptr->port_mutex);
11922 		return (FC_SUCCESS);
11923 	}
11924 
11925 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11926 		    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
11927 
11928 	/*
11929 	 * If ap == NULL assume local link reset.
11930 	 */
11931 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
11932 		plun = ADDR2LUN(ap);
11933 		ptgt = plun->lun_tgt;
11934 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
11935 	} else {
11936 		bzero((caddr_t)&wwn, sizeof (wwn));
11937 	}
11938 	mutex_exit(&pptr->port_mutex);
11939 
11940 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
11941 }
11942 
11943 
11944 /*
11945  * called from fcp_port_attach() to resume a port
11946  * return DDI_* success/failure status
11947  * acquires and releases the global mutex
11948  * acquires and releases the port mutex
11949  */
11950 /*ARGSUSED*/
11951 
11952 static int
11953 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
11954     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
11955 {
11956 	int			res = DDI_FAILURE; /* default result */
11957 	struct fcp_port	*pptr;		/* port state ptr */
11958 	uint32_t		alloc_cnt;
11959 	uint32_t		max_cnt;
11960 	fc_portmap_t		*tmp_list = NULL;
11961 
11962 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
11963 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
11964 	    instance);
11965 
11966 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
11967 		cmn_err(CE_WARN, "fcp: bad soft state");
11968 		return (res);
11969 	}
11970 
11971 	mutex_enter(&pptr->port_mutex);
11972 	switch (cmd) {
11973 	case FC_CMD_RESUME:
11974 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
11975 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
11976 		break;
11977 
11978 	case FC_CMD_POWER_UP:
11979 		/*
11980 		 * If the port is DDI_SUSPENded, defer rediscovery
11981 		 * until DDI_RESUME occurs
11982 		 */
11983 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
11984 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
11985 			mutex_exit(&pptr->port_mutex);
11986 			return (DDI_SUCCESS);
11987 		}
11988 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
11989 	}
11990 	pptr->port_id = s_id;
11991 	pptr->port_state = FCP_STATE_INIT;
11992 	mutex_exit(&pptr->port_mutex);
11993 
11994 	/*
11995 	 * Make a copy of ulp_port_info as fctl allocates
11996 	 * a temp struct.
11997 	 */
11998 	(void) fcp_cp_pinfo(pptr, pinfo);
11999 
12000 	mutex_enter(&fcp_global_mutex);
12001 	if (fcp_watchdog_init++ == 0) {
12002 		fcp_watchdog_tick = fcp_watchdog_timeout *
12003 			drv_usectohz(1000000);
12004 		fcp_watchdog_id = timeout(fcp_watch,
12005 		    NULL, fcp_watchdog_tick);
12006 	}
12007 	mutex_exit(&fcp_global_mutex);
12008 
12009 	/*
12010 	 * Handle various topologies and link states.
12011 	 */
12012 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12013 	case FC_STATE_OFFLINE:
12014 		/*
12015 		 * Wait for ONLINE, at which time a state
12016 		 * change will cause a statec_callback
12017 		 */
12018 		res = DDI_SUCCESS;
12019 		break;
12020 
12021 	case FC_STATE_ONLINE:
12022 
12023 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12024 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12025 			res = DDI_SUCCESS;
12026 			break;
12027 		}
12028 
12029 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12030 		    !fcp_enable_auto_configuration) {
12031 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12032 			if (tmp_list == NULL) {
12033 				if (!alloc_cnt) {
12034 					res = DDI_SUCCESS;
12035 				}
12036 				break;
12037 			}
12038 			max_cnt = alloc_cnt;
12039 		} else {
12040 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12041 
12042 			alloc_cnt = FCP_MAX_DEVICES;
12043 
12044 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12045 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12046 			    KM_NOSLEEP)) == NULL) {
12047 				fcp_log(CE_WARN, pptr->port_dip,
12048 				    "!fcp%d: failed to allocate portmap",
12049 				    instance);
12050 				break;
12051 			}
12052 
12053 			max_cnt = alloc_cnt;
12054 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12055 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12056 			    FC_SUCCESS) {
12057 				caddr_t msg;
12058 
12059 				(void) fc_ulp_error(res, &msg);
12060 
12061 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12062 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12063 				    "resume failed getportmap: reason=0x%x",
12064 				    res);
12065 
12066 				fcp_log(CE_WARN, pptr->port_dip,
12067 				    "!failed to get port map : %s", msg);
12068 				break;
12069 			}
12070 			if (max_cnt > alloc_cnt) {
12071 				alloc_cnt = max_cnt;
12072 			}
12073 		}
12074 
12075 		/*
12076 		 * do the SCSI device discovery and create
12077 		 * the devinfos
12078 		 */
12079 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12080 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12081 		    max_cnt, pptr->port_id);
12082 
12083 		res = DDI_SUCCESS;
12084 		break;
12085 
12086 	default:
12087 		fcp_log(CE_WARN, pptr->port_dip,
12088 		    "!fcp%d: invalid port state at attach=0x%x",
12089 		    instance, pptr->port_phys_state);
12090 
12091 		mutex_enter(&pptr->port_mutex);
12092 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12093 		mutex_exit(&pptr->port_mutex);
12094 		res = DDI_SUCCESS;
12095 
12096 		break;
12097 	}
12098 
12099 	if (tmp_list != NULL) {
12100 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12101 	}
12102 
12103 	return (res);
12104 }
12105 
12106 
12107 static void
12108 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12109 {
12110 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12111 	pptr->port_dip = pinfo->port_dip;
12112 	pptr->port_fp_handle = pinfo->port_handle;
12113 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12114 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12115 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12116 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12117 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12118 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12119 	pptr->port_phys_state = pinfo->port_state;
12120 	pptr->port_topology = pinfo->port_flags;
12121 	pptr->port_reset_action = pinfo->port_reset_action;
12122 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12123 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12124 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12125 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12126 }
12127 
12128 /*
12129  * If the elements wait field is set to 1 then
12130  * another thread is waiting for the operation to complete. Once
12131  * it is complete, the waiting thread is signaled and the element is
12132  * freed by the waiting thread. If the elements wait field is set to 0
12133  * the element is freed.
12134  */
12135 static void
12136 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12137 {
12138 	ASSERT(elem != NULL);
12139 	mutex_enter(&elem->mutex);
12140 	elem->result = result;
12141 	if (elem->wait) {
12142 		elem->wait = 0;
12143 		cv_signal(&elem->cv);
12144 		mutex_exit(&elem->mutex);
12145 	} else {
12146 		mutex_exit(&elem->mutex);
12147 		cv_destroy(&elem->cv);
12148 		mutex_destroy(&elem->mutex);
12149 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12150 	}
12151 }
12152 
12153 /*
12154  * This function is invoked from the taskq thread to allocate
12155  * devinfo nodes and to online/offline them.
12156  */
12157 static void
12158 fcp_hp_task(void *arg)
12159 {
12160 	struct fcp_hp_elem 	*elem = (struct fcp_hp_elem *)arg;
12161 	struct fcp_lun 	*plun = elem->lun;
12162 	struct fcp_port 	*pptr = elem->port;
12163 	int			result;
12164 
12165 	ASSERT(elem->what == FCP_ONLINE ||
12166 		elem->what == FCP_OFFLINE ||
12167 		elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12168 		elem->what == FCP_MPXIO_PATH_SET_BUSY);
12169 
12170 	mutex_enter(&pptr->port_mutex);
12171 	mutex_enter(&plun->lun_mutex);
12172 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12173 	    plun->lun_event_count != elem->event_cnt) ||
12174 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12175 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12176 		mutex_exit(&plun->lun_mutex);
12177 		mutex_exit(&pptr->port_mutex);
12178 		fcp_process_elem(elem, NDI_FAILURE);
12179 		return;
12180 	}
12181 	mutex_exit(&plun->lun_mutex);
12182 	mutex_exit(&pptr->port_mutex);
12183 
12184 	result = fcp_trigger_lun(plun, elem->cip, elem->what,
12185 	    elem->link_cnt, elem->tgt_cnt, elem->flags);
12186 	fcp_process_elem(elem, result);
12187 }
12188 
12189 
12190 static child_info_t *
12191 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12192     int tcount)
12193 {
12194 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12195 
12196 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12197 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12198 
12199 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12200 		/*
12201 		 * Child has not been created yet. Create the child device
12202 		 * based on the per-Lun flags.
12203 		 */
12204 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12205 			plun->lun_cip =
12206 			    CIP(fcp_create_dip(plun, lcount, tcount));
12207 			plun->lun_mpxio = 0;
12208 		} else {
12209 			plun->lun_cip =
12210 			    CIP(fcp_create_pip(plun, lcount, tcount));
12211 			plun->lun_mpxio = 1;
12212 		}
12213 	} else {
12214 		plun->lun_cip = cip;
12215 	}
12216 
12217 	return (plun->lun_cip);
12218 }
12219 
12220 
12221 static int
12222 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12223 {
12224 	int		rval = FC_FAILURE;
12225 	dev_info_t 	*pdip;
12226 	struct dev_info	*dip;
12227 	int		circular;
12228 
12229 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12230 
12231 	pdip = plun->lun_tgt->tgt_port->port_dip;
12232 
12233 	if (plun->lun_cip == NULL) {
12234 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12235 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12236 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12237 		    "plun: %p lun state: %x num: %d target state: %x",
12238 		    plun, plun->lun_state, plun->lun_num,
12239 		    plun->lun_tgt->tgt_port->port_state);
12240 		return (rval);
12241 	}
12242 	ndi_devi_enter(pdip, &circular);
12243 	dip = DEVI(pdip)->devi_child;
12244 	while (dip) {
12245 		if (dip == DEVI(cdip)) {
12246 			rval = FC_SUCCESS;
12247 			break;
12248 		}
12249 		dip = dip->devi_sibling;
12250 	}
12251 	ndi_devi_exit(pdip, circular);
12252 	return (rval);
12253 }
12254 
12255 static int
12256 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12257 {
12258 	int		rval = FC_FAILURE;
12259 
12260 	ASSERT(plun != NULL);
12261 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12262 
12263 	if (plun->lun_mpxio == 0) {
12264 		rval = fcp_is_dip_present(plun, DIP(cip));
12265 	} else {
12266 		rval = fcp_is_pip_present(plun, PIP(cip));
12267 	}
12268 
12269 	return (rval);
12270 }
12271 
12272 /*
12273  *     Function: fcp_create_dip
12274  *
12275  *  Description: Creates a dev_info_t structure for the LUN specified by the
12276  *		 caller.
12277  *
12278  *     Argument: plun		Lun structure
12279  *		 link_cnt	Link state count.
12280  *		 tgt_cnt	Target state change count.
12281  *
12282  * Return Value: NULL if it failed
12283  *		 dev_info_t structure address if it succeeded
12284  *
12285  *      Context: Kernel context
12286  */
12287 static dev_info_t *
12288 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12289 {
12290 	int			failure = 0;
12291 	uint32_t		tgt_id;
12292 	uint64_t		sam_lun;
12293 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12294 	struct fcp_port	*pptr = ptgt->tgt_port;
12295 	dev_info_t		*pdip = pptr->port_dip;
12296 	dev_info_t		*cdip = NULL;
12297 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12298 	char			*nname = NULL;
12299 	char			**compatible = NULL;
12300 	int			ncompatible;
12301 	char			*scsi_binding_set;
12302 	char			t_pwwn[17];
12303 
12304 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12305 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12306 
12307 	/* get the 'scsi-binding-set' property */
12308 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12309 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12310 	    &scsi_binding_set) != DDI_PROP_SUCCESS)
12311 		scsi_binding_set = NULL;
12312 
12313 	/* determine the node name and compatible */
12314 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12315 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12316 	if (scsi_binding_set)
12317 		ddi_prop_free(scsi_binding_set);
12318 
12319 	if (nname == NULL) {
12320 #ifdef	DEBUG
12321 		cmn_err(CE_WARN, "%s%d: no driver for "
12322 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12323 		    "    compatible: %s",
12324 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12325 		    ptgt->tgt_port_wwn.raw_wwn[0],
12326 		    ptgt->tgt_port_wwn.raw_wwn[1],
12327 		    ptgt->tgt_port_wwn.raw_wwn[2],
12328 		    ptgt->tgt_port_wwn.raw_wwn[3],
12329 		    ptgt->tgt_port_wwn.raw_wwn[4],
12330 		    ptgt->tgt_port_wwn.raw_wwn[5],
12331 		    ptgt->tgt_port_wwn.raw_wwn[6],
12332 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12333 		    *compatible);
12334 #endif	/* DEBUG */
12335 		failure++;
12336 		goto end_of_fcp_create_dip;
12337 	}
12338 
12339 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12340 
12341 	/*
12342 	 * if the old_dip does not match the cdip, that means there is
12343 	 * some property change. since we'll be using the cdip, we need
12344 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12345 	 * then the dtype for the device has been updated. Offline the
12346 	 * the old device and create a new device with the new device type
12347 	 * Refer to bug: 4764752
12348 	 */
12349 	if (old_dip && (cdip != old_dip ||
12350 			plun->lun_state & FCP_LUN_CHANGED)) {
12351 		plun->lun_state &= ~(FCP_LUN_INIT);
12352 		mutex_exit(&plun->lun_mutex);
12353 		mutex_exit(&pptr->port_mutex);
12354 
12355 		mutex_enter(&ptgt->tgt_mutex);
12356 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12357 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12358 		mutex_exit(&ptgt->tgt_mutex);
12359 
12360 #ifdef DEBUG
12361 		if (cdip != NULL) {
12362 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12363 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12364 			    "Old dip=%p; New dip=%p don't match", old_dip,
12365 			    cdip);
12366 		} else {
12367 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12368 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12369 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12370 		}
12371 #endif
12372 
12373 		mutex_enter(&pptr->port_mutex);
12374 		mutex_enter(&plun->lun_mutex);
12375 	}
12376 
12377 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12378 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12379 		if (ndi_devi_alloc(pptr->port_dip, nname,
12380 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12381 			failure++;
12382 			goto end_of_fcp_create_dip;
12383 		}
12384 	}
12385 
12386 	/*
12387 	 * Previously all the properties for the devinfo were destroyed here
12388 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12389 	 * the devid property (and other properties established by the target
12390 	 * driver or framework) which the code does not always recreate, this
12391 	 * call was removed.
12392 	 * This opens a theoretical possibility that we may return with a
12393 	 * stale devid on the node if the scsi entity behind the fibre channel
12394 	 * lun has changed.
12395 	 */
12396 
12397 	/* decorate the node with compatible */
12398 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12399 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12400 		failure++;
12401 		goto end_of_fcp_create_dip;
12402 	}
12403 
12404 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12405 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12406 		failure++;
12407 		goto end_of_fcp_create_dip;
12408 	}
12409 
12410 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12411 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12412 		failure++;
12413 		goto end_of_fcp_create_dip;
12414 	}
12415 
12416 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12417 	t_pwwn[16] = '\0';
12418 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12419 	    != DDI_PROP_SUCCESS) {
12420 		failure++;
12421 		goto end_of_fcp_create_dip;
12422 	}
12423 
12424 	/*
12425 	 * If there is no hard address - We might have to deal with
12426 	 * that by using WWN - Having said that it is important to
12427 	 * recognize this problem early so ssd can be informed of
12428 	 * the right interconnect type.
12429 	 */
12430 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12431 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12432 	} else {
12433 		tgt_id = ptgt->tgt_d_id;
12434 	}
12435 
12436 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12437 	    tgt_id) != DDI_PROP_SUCCESS) {
12438 		failure++;
12439 		goto end_of_fcp_create_dip;
12440 	}
12441 
12442 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12443 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12444 		failure++;
12445 		goto end_of_fcp_create_dip;
12446 	}
12447 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12448 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12449 	    sam_lun) != DDI_PROP_SUCCESS) {
12450 		failure++;
12451 		goto end_of_fcp_create_dip;
12452 	}
12453 
12454 end_of_fcp_create_dip:
12455 	scsi_hba_nodename_compatible_free(nname, compatible);
12456 
12457 	if (cdip != NULL && failure) {
12458 		(void) ndi_prop_remove_all(cdip);
12459 		(void) ndi_devi_free(cdip);
12460 		cdip = NULL;
12461 	}
12462 
12463 	return (cdip);
12464 }
12465 
12466 /*
12467  *     Function: fcp_create_pip
12468  *
12469  *  Description: Creates a Path Id for the LUN specified by the caller.
12470  *
12471  *     Argument: plun		Lun structure
12472  *		 link_cnt	Link state count.
12473  *		 tgt_cnt	Target state count.
12474  *
12475  * Return Value: NULL if it failed
12476  *		 mdi_pathinfo_t structure address if it succeeded
12477  *
12478  *      Context: Kernel context
12479  */
12480 static mdi_pathinfo_t *
12481 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12482 {
12483 	int			i;
12484 	char			buf[MAXNAMELEN];
12485 	char			uaddr[MAXNAMELEN];
12486 	int			failure = 0;
12487 	uint32_t		tgt_id;
12488 	uint64_t		sam_lun;
12489 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12490 	struct fcp_port	*pptr = ptgt->tgt_port;
12491 	dev_info_t		*pdip = pptr->port_dip;
12492 	mdi_pathinfo_t		*pip = NULL;
12493 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12494 	char			*nname = NULL;
12495 	char			**compatible = NULL;
12496 	int			ncompatible;
12497 	char			*scsi_binding_set;
12498 	char			t_pwwn[17];
12499 
12500 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12501 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12502 
12503 	scsi_binding_set = "vhci";
12504 
12505 	/* determine the node name and compatible */
12506 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12507 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12508 
12509 	if (nname == NULL) {
12510 #ifdef	DEBUG
12511 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12512 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12513 		    "    compatible: %s",
12514 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12515 		    ptgt->tgt_port_wwn.raw_wwn[0],
12516 		    ptgt->tgt_port_wwn.raw_wwn[1],
12517 		    ptgt->tgt_port_wwn.raw_wwn[2],
12518 		    ptgt->tgt_port_wwn.raw_wwn[3],
12519 		    ptgt->tgt_port_wwn.raw_wwn[4],
12520 		    ptgt->tgt_port_wwn.raw_wwn[5],
12521 		    ptgt->tgt_port_wwn.raw_wwn[6],
12522 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12523 		    *compatible);
12524 #endif	/* DEBUG */
12525 		failure++;
12526 		goto end_of_fcp_create_pip;
12527 	}
12528 
12529 	pip = fcp_find_existing_pip(plun, pdip);
12530 
12531 	/*
12532 	 * if the old_dip does not match the cdip, that means there is
12533 	 * some property change. since we'll be using the cdip, we need
12534 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12535 	 * then the dtype for the device has been updated. Offline the
12536 	 * the old device and create a new device with the new device type
12537 	 * Refer to bug: 4764752
12538 	 */
12539 	if (old_pip && (pip != old_pip ||
12540 			plun->lun_state & FCP_LUN_CHANGED)) {
12541 		plun->lun_state &= ~(FCP_LUN_INIT);
12542 		mutex_exit(&plun->lun_mutex);
12543 		mutex_exit(&pptr->port_mutex);
12544 
12545 		mutex_enter(&ptgt->tgt_mutex);
12546 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12547 		    FCP_OFFLINE, lcount, tcount,
12548 		    NDI_DEVI_REMOVE, 0);
12549 		mutex_exit(&ptgt->tgt_mutex);
12550 
12551 		if (pip != NULL) {
12552 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12553 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12554 			    "Old pip=%p; New pip=%p don't match",
12555 			    old_pip, pip);
12556 		} else {
12557 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12558 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12559 			    "Old pip=%p; New pip=NULL don't match",
12560 			    old_pip);
12561 		}
12562 
12563 		mutex_enter(&pptr->port_mutex);
12564 		mutex_enter(&plun->lun_mutex);
12565 	}
12566 
12567 	/*
12568 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12569 	 * lun_guid_size which is dependent on the target, I don't
12570 	 * believe the same trancation happens here UNLESS the standards
12571 	 * change the FC_WWN_SIZE value to something larger than
12572 	 * MAXNAMELEN(currently 255 bytes).
12573 	 */
12574 
12575 	for (i = 0; i < FC_WWN_SIZE; i++)
12576 		(void) sprintf(&buf[i << 1], "%02x",
12577 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12578 
12579 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12580 	    buf, plun->lun_num);
12581 
12582 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12583 		/*
12584 		 * Release the locks before calling into
12585 		 * mdi_pi_alloc_compatible() since this can result in a
12586 		 * callback into fcp which can result in a deadlock
12587 		 * (see bug # 4870272).
12588 		 *
12589 		 * Basically, what we are trying to avoid is the scenario where
12590 		 * one thread does ndi_devi_enter() and tries to grab
12591 		 * fcp_mutex and another does it the other way round.
12592 		 *
12593 		 * But before we do that, make sure that nobody releases the
12594 		 * port in the meantime. We can do this by setting a flag.
12595 		 */
12596 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12597 		pptr->port_state |= FCP_STATE_IN_MDI;
12598 		mutex_exit(&plun->lun_mutex);
12599 		mutex_exit(&pptr->port_mutex);
12600 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12601 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12602 			fcp_log(CE_WARN, pptr->port_dip,
12603 			    "!path alloc failed:0x%x", plun);
12604 			mutex_enter(&pptr->port_mutex);
12605 			mutex_enter(&plun->lun_mutex);
12606 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12607 			failure++;
12608 			goto end_of_fcp_create_pip;
12609 		}
12610 		mutex_enter(&pptr->port_mutex);
12611 		mutex_enter(&plun->lun_mutex);
12612 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12613 	} else {
12614 		(void) mdi_prop_remove(pip, NULL);
12615 	}
12616 
12617 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12618 
12619 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12620 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12621 	    != DDI_PROP_SUCCESS) {
12622 		failure++;
12623 		goto end_of_fcp_create_pip;
12624 	}
12625 
12626 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12627 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12628 	    != DDI_PROP_SUCCESS) {
12629 		failure++;
12630 		goto end_of_fcp_create_pip;
12631 	}
12632 
12633 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12634 	t_pwwn[16] = '\0';
12635 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12636 	    != DDI_PROP_SUCCESS) {
12637 		failure++;
12638 		goto end_of_fcp_create_pip;
12639 	}
12640 
12641 	/*
12642 	 * If there is no hard address - We might have to deal with
12643 	 * that by using WWN - Having said that it is important to
12644 	 * recognize this problem early so ssd can be informed of
12645 	 * the right interconnect type.
12646 	 */
12647 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12648 	    ptgt->tgt_hard_addr != 0) {
12649 		tgt_id = (uint32_t)
12650 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12651 	} else {
12652 		tgt_id = ptgt->tgt_d_id;
12653 	}
12654 
12655 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12656 	    != DDI_PROP_SUCCESS) {
12657 		failure++;
12658 		goto end_of_fcp_create_pip;
12659 	}
12660 
12661 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12662 	    != DDI_PROP_SUCCESS) {
12663 		failure++;
12664 		goto end_of_fcp_create_pip;
12665 	}
12666 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12667 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12668 	    != DDI_PROP_SUCCESS) {
12669 		failure++;
12670 		goto end_of_fcp_create_pip;
12671 	}
12672 
12673 end_of_fcp_create_pip:
12674 	scsi_hba_nodename_compatible_free(nname, compatible);
12675 
12676 	if (pip != NULL && failure) {
12677 		(void) mdi_prop_remove(pip, NULL);
12678 		mutex_exit(&plun->lun_mutex);
12679 		mutex_exit(&pptr->port_mutex);
12680 		(void) mdi_pi_free(pip, 0);
12681 		mutex_enter(&pptr->port_mutex);
12682 		mutex_enter(&plun->lun_mutex);
12683 		pip = NULL;
12684 	}
12685 
12686 	return (pip);
12687 }
12688 
12689 static dev_info_t *
12690 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12691 {
12692 	uint_t			nbytes;
12693 	uchar_t			*bytes;
12694 	uint_t			nwords;
12695 	uint32_t		tgt_id;
12696 	int			*words;
12697 	dev_info_t 		*cdip;
12698 	dev_info_t 		*ndip;
12699 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12700 	struct fcp_port	*pptr = ptgt->tgt_port;
12701 	int			circular;
12702 
12703 	ndi_devi_enter(pdip, &circular);
12704 
12705 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12706 	while ((cdip = ndip) != NULL) {
12707 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12708 
12709 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12710 			continue;
12711 		}
12712 
12713 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12714 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12715 		    &nbytes) != DDI_PROP_SUCCESS) {
12716 			continue;
12717 		}
12718 
12719 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12720 			if (bytes != NULL) {
12721 				ddi_prop_free(bytes);
12722 			}
12723 			continue;
12724 		}
12725 		ASSERT(bytes != NULL);
12726 
12727 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12728 			ddi_prop_free(bytes);
12729 			continue;
12730 		}
12731 
12732 		ddi_prop_free(bytes);
12733 
12734 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12735 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12736 		    &nbytes) != DDI_PROP_SUCCESS) {
12737 			continue;
12738 		}
12739 
12740 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12741 			if (bytes != NULL) {
12742 				ddi_prop_free(bytes);
12743 			}
12744 			continue;
12745 		}
12746 		ASSERT(bytes != NULL);
12747 
12748 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12749 			ddi_prop_free(bytes);
12750 			continue;
12751 		}
12752 
12753 		ddi_prop_free(bytes);
12754 
12755 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12756 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12757 		    &nwords) != DDI_PROP_SUCCESS) {
12758 			continue;
12759 		}
12760 
12761 		if (nwords != 1 || words == NULL) {
12762 			if (words != NULL) {
12763 				ddi_prop_free(words);
12764 			}
12765 			continue;
12766 		}
12767 		ASSERT(words != NULL);
12768 
12769 		/*
12770 		 * If there is no hard address - We might have to deal with
12771 		 * that by using WWN - Having said that it is important to
12772 		 * recognize this problem early so ssd can be informed of
12773 		 * the right interconnect type.
12774 		 */
12775 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12776 		    ptgt->tgt_hard_addr != 0) {
12777 			tgt_id =
12778 			(uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12779 		} else {
12780 			tgt_id = ptgt->tgt_d_id;
12781 		}
12782 
12783 		if (tgt_id != (uint32_t)*words) {
12784 			ddi_prop_free(words);
12785 			continue;
12786 		}
12787 		ddi_prop_free(words);
12788 
12789 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12790 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12791 		    &nwords) != DDI_PROP_SUCCESS) {
12792 			continue;
12793 		}
12794 
12795 		if (nwords != 1 || words == NULL) {
12796 			if (words != NULL) {
12797 				ddi_prop_free(words);
12798 			}
12799 			continue;
12800 		}
12801 		ASSERT(words != NULL);
12802 
12803 		if (plun->lun_num == (uint16_t)*words) {
12804 			ddi_prop_free(words);
12805 			break;
12806 		}
12807 		ddi_prop_free(words);
12808 	}
12809 	ndi_devi_exit(pdip, circular);
12810 
12811 	return (cdip);
12812 }
12813 
12814 
12815 static int
12816 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12817 {
12818 	dev_info_t 	*pdip;
12819 	char		buf[MAXNAMELEN];
12820 	char		uaddr[MAXNAMELEN];
12821 	int		rval = FC_FAILURE;
12822 
12823 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12824 
12825 	pdip = plun->lun_tgt->tgt_port->port_dip;
12826 
12827 	/*
12828 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12829 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12830 	 * configured and then deleted on the device end (for T3/T4 case). In
12831 	 * such cases, pip will be NULL.
12832 	 *
12833 	 * If the device generates an RSCN, it will end up getting offlined when
12834 	 * it disappeared and a new LUN will get created when it is rediscovered
12835 	 * on the device. If we check for lun_cip here, the LUN will not end
12836 	 * up getting onlined since this function will end up returning a
12837 	 * FC_SUCCESS.
12838 	 *
12839 	 * The behavior is different on other devices. For instance, on a HDS,
12840 	 * there was no RSCN generated by the device but the next I/O generated
12841 	 * a check condition and rediscovery got triggered that way. So, in
12842 	 * such cases, this path will not be exercised
12843 	 */
12844 	if (pip == NULL) {
12845 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12846 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12847 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12848 		    "plun: %p lun state: %x num: %d target state: %x",
12849 		    plun, plun->lun_state, plun->lun_num,
12850 		    plun->lun_tgt->tgt_port->port_state);
12851 		return (rval);
12852 	}
12853 
12854 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12855 
12856 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12857 
12858 	if (plun->lun_old_guid) {
12859 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12860 			rval = FC_SUCCESS;
12861 		}
12862 	} else {
12863 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12864 			rval = FC_SUCCESS;
12865 		}
12866 	}
12867 	return (rval);
12868 }
12869 
12870 static mdi_pathinfo_t *
12871 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12872 {
12873 	char			buf[MAXNAMELEN];
12874 	char			uaddr[MAXNAMELEN];
12875 	mdi_pathinfo_t		*pip;
12876 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12877 	struct fcp_port	*pptr = ptgt->tgt_port;
12878 
12879 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12880 
12881 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12882 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12883 
12884 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12885 
12886 	return (pip);
12887 }
12888 
12889 
12890 static int
12891 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12892     int tcount, int flags, int *circ)
12893 {
12894 	int			rval;
12895 	struct fcp_port 	*pptr = plun->lun_tgt->tgt_port;
12896 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12897 	dev_info_t		*cdip = NULL;
12898 
12899 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12900 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12901 
12902 	if (plun->lun_cip == NULL) {
12903 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12904 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12905 		    "fcp_online_child: plun->lun_cip is NULL: "
12906 		    "plun: %p state: %x num: %d target state: %x",
12907 		    plun, plun->lun_state, plun->lun_num,
12908 		    plun->lun_tgt->tgt_port->port_state);
12909 		return (NDI_FAILURE);
12910 	}
12911 again:
12912 	if (plun->lun_mpxio == 0) {
12913 		cdip = DIP(cip);
12914 		mutex_exit(&plun->lun_mutex);
12915 		mutex_exit(&pptr->port_mutex);
12916 
12917 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12918 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12919 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
12920 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12921 
12922 		/*
12923 		 * We could check for FCP_LUN_INIT here but chances
12924 		 * of getting here when it's already in FCP_LUN_INIT
12925 		 * is rare and a duplicate ndi_devi_online wouldn't
12926 		 * hurt either (as the node would already have been
12927 		 * in CF2)
12928 		 */
12929 		if (!i_ddi_devi_attached(ddi_get_parent(cdip)))
12930 			rval = ndi_devi_bind_driver(cdip, flags);
12931 		else
12932 			rval = ndi_devi_online(cdip, flags);
12933 		/*
12934 		 * We log the message into trace buffer if the device
12935 		 * is "ses" and into syslog for any other device
12936 		 * type. This is to prevent the ndi_devi_online failure
12937 		 * message that appears for V880/A5K ses devices.
12938 		 */
12939 		if (rval == NDI_SUCCESS) {
12940 			mutex_enter(&ptgt->tgt_mutex);
12941 			plun->lun_state |= FCP_LUN_INIT;
12942 			mutex_exit(&ptgt->tgt_mutex);
12943 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
12944 			fcp_log(CE_NOTE, pptr->port_dip,
12945 			    "!ndi_devi_online:"
12946 			    " failed for %s: target=%x lun=%x %x",
12947 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12948 			    plun->lun_num, rval);
12949 		} else {
12950 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12951 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
12952 			    " !ndi_devi_online:"
12953 			    " failed for %s: target=%x lun=%x %x",
12954 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12955 			    plun->lun_num, rval);
12956 		}
12957 	} else {
12958 		cdip = mdi_pi_get_client(PIP(cip));
12959 		mutex_exit(&plun->lun_mutex);
12960 		mutex_exit(&pptr->port_mutex);
12961 
12962 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12963 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12964 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
12965 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12966 
12967 		/*
12968 		 * Hold path and exit phci to avoid deadlock with power
12969 		 * management code during mdi_pi_online.
12970 		 */
12971 		mdi_hold_path(PIP(cip));
12972 		mdi_devi_exit_phci(pptr->port_dip, *circ);
12973 
12974 		rval = mdi_pi_online(PIP(cip), flags);
12975 
12976 		mdi_devi_enter_phci(pptr->port_dip, circ);
12977 		mdi_rele_path(PIP(cip));
12978 
12979 		if (rval == MDI_SUCCESS) {
12980 			mutex_enter(&ptgt->tgt_mutex);
12981 			plun->lun_state |= FCP_LUN_INIT;
12982 			mutex_exit(&ptgt->tgt_mutex);
12983 
12984 			/*
12985 			 * Clear MPxIO path permanent disable in case
12986 			 * fcp hotplug dropped the offline event.
12987 			 */
12988 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
12989 
12990 		} else if (rval == MDI_NOT_SUPPORTED) {
12991 			child_info_t	*old_cip = cip;
12992 
12993 			/*
12994 			 * MPxIO does not support this device yet.
12995 			 * Enumerate in legacy mode.
12996 			 */
12997 			mutex_enter(&pptr->port_mutex);
12998 			mutex_enter(&plun->lun_mutex);
12999 			plun->lun_mpxio = 0;
13000 			plun->lun_cip = NULL;
13001 			cdip = fcp_create_dip(plun, lcount, tcount);
13002 			plun->lun_cip = cip = CIP(cdip);
13003 			if (cip == NULL) {
13004 				fcp_log(CE_WARN, pptr->port_dip,
13005 				    "!fcp_online_child: "
13006 				    "Create devinfo failed for LU=%p", plun);
13007 				mutex_exit(&plun->lun_mutex);
13008 
13009 				mutex_enter(&ptgt->tgt_mutex);
13010 				plun->lun_state |= FCP_LUN_OFFLINE;
13011 				mutex_exit(&ptgt->tgt_mutex);
13012 
13013 				mutex_exit(&pptr->port_mutex);
13014 
13015 				/*
13016 				 * free the mdi_pathinfo node
13017 				 */
13018 				(void) mdi_pi_free(PIP(old_cip), 0);
13019 			} else {
13020 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13021 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13022 				    "fcp_online_child: creating devinfo "
13023 				    "node 0x%p for plun 0x%p",
13024 				    cip, plun);
13025 				mutex_exit(&plun->lun_mutex);
13026 				mutex_exit(&pptr->port_mutex);
13027 				/*
13028 				 * free the mdi_pathinfo node
13029 				 */
13030 				(void) mdi_pi_free(PIP(old_cip), 0);
13031 				mutex_enter(&pptr->port_mutex);
13032 				mutex_enter(&plun->lun_mutex);
13033 				goto again;
13034 			}
13035 		} else {
13036 			if (cdip) {
13037 				fcp_log(CE_NOTE, pptr->port_dip,
13038 				    "!fcp_online_child: mdi_pi_online:"
13039 				    " failed for %s: target=%x lun=%x %x",
13040 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13041 				    plun->lun_num, rval);
13042 			}
13043 		}
13044 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13045 	}
13046 
13047 	if (rval == NDI_SUCCESS) {
13048 		if (cdip) {
13049 			(void) ndi_event_retrieve_cookie(
13050 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13051 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13052 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13053 			    cdip, fcp_insert_eid, NULL);
13054 		}
13055 	}
13056 	mutex_enter(&pptr->port_mutex);
13057 	mutex_enter(&plun->lun_mutex);
13058 	return (rval);
13059 }
13060 
13061 /* ARGSUSED */
13062 static int
13063 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13064     int tcount, int flags, int *circ)
13065 {
13066 	int rval;
13067 	struct fcp_port 	*pptr = plun->lun_tgt->tgt_port;
13068 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13069 	dev_info_t		*cdip;
13070 
13071 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13072 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13073 
13074 	if (plun->lun_cip == NULL) {
13075 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13076 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13077 		    "fcp_offline_child: plun->lun_cip is NULL: "
13078 		    "plun: %p lun state: %x num: %d target state: %x",
13079 		    plun, plun->lun_state, plun->lun_num,
13080 		    plun->lun_tgt->tgt_port->port_state);
13081 		return (NDI_FAILURE);
13082 	}
13083 
13084 	if (plun->lun_mpxio == 0) {
13085 		cdip = DIP(cip);
13086 		mutex_exit(&plun->lun_mutex);
13087 		mutex_exit(&pptr->port_mutex);
13088 		rval = ndi_devi_offline(DIP(cip), flags);
13089 		if (rval != NDI_SUCCESS) {
13090 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13091 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13092 			    "fcp_offline_child: ndi_devi_offline failed "
13093 			    "rval=%x cip=%p", rval, cip);
13094 		}
13095 	} else {
13096 		cdip = mdi_pi_get_client(PIP(cip));
13097 		mutex_exit(&plun->lun_mutex);
13098 		mutex_exit(&pptr->port_mutex);
13099 
13100 		/*
13101 		 * Exit phci to avoid deadlock with power management code
13102 		 * during mdi_pi_offline
13103 		 */
13104 		mdi_hold_path(PIP(cip));
13105 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13106 
13107 		rval = mdi_pi_offline(PIP(cip), flags);
13108 
13109 		mdi_devi_enter_phci(pptr->port_dip, circ);
13110 		mdi_rele_path(PIP(cip));
13111 
13112 		if (rval == MDI_SUCCESS) {
13113 			/*
13114 			 * Clear MPxIO path permanent disable as the path is
13115 			 * already offlined.
13116 			 */
13117 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13118 
13119 			if (flags & NDI_DEVI_REMOVE) {
13120 				(void) mdi_pi_free(PIP(cip), 0);
13121 			}
13122 		} else {
13123 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13124 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13125 			    "fcp_offline_child: mdi_pi_offline failed "
13126 			    "rval=%x cip=%p", rval, cip);
13127 		}
13128 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13129 	}
13130 
13131 	mutex_enter(&ptgt->tgt_mutex);
13132 	plun->lun_state &= ~FCP_LUN_INIT;
13133 	mutex_exit(&ptgt->tgt_mutex);
13134 
13135 	mutex_enter(&pptr->port_mutex);
13136 	mutex_enter(&plun->lun_mutex);
13137 
13138 	if (rval == NDI_SUCCESS) {
13139 		cdip = NULL;
13140 		if (flags & NDI_DEVI_REMOVE) {
13141 			/*
13142 			 * If the guid of the LUN changes, lun_cip will not
13143 			 * equal to cip, and after offlining the LUN with the
13144 			 * old guid, we should keep lun_cip since it's the cip
13145 			 * of the LUN with the new guid.
13146 			 * Otherwise remove our reference to child node.
13147 			 */
13148 			if (plun->lun_cip == cip)
13149 				plun->lun_cip = NULL;
13150 			if (plun->lun_old_guid) {
13151 				kmem_free(plun->lun_old_guid,
13152 				    plun->lun_old_guid_size);
13153 				plun->lun_old_guid = NULL;
13154 				plun->lun_old_guid_size = 0;
13155 			}
13156 		}
13157 	}
13158 
13159 	if (cdip) {
13160 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13161 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13162 		    " target=%x lun=%x", "ndi_offline",
13163 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13164 	}
13165 
13166 	return (rval);
13167 }
13168 
13169 static void
13170 fcp_remove_child(struct fcp_lun *plun)
13171 {
13172 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13173 
13174 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13175 		if (plun->lun_mpxio == 0) {
13176 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13177 			(void) ndi_devi_free(DIP(plun->lun_cip));
13178 		} else {
13179 			mutex_exit(&plun->lun_mutex);
13180 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13181 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13182 			FCP_TRACE(fcp_logq,
13183 			    plun->lun_tgt->tgt_port->port_instbuf,
13184 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13185 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13186 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13187 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13188 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13189 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13190 			mutex_enter(&plun->lun_mutex);
13191 		}
13192 	}
13193 
13194 	plun->lun_cip = NULL;
13195 }
13196 
13197 /*
13198  * called when a timeout occurs
13199  *
13200  * can be scheduled during an attach or resume (if not already running)
13201  *
13202  * one timeout is set up for all ports
13203  *
13204  * acquires and releases the global mutex
13205  */
13206 /*ARGSUSED*/
13207 static void
13208 fcp_watch(void *arg)
13209 {
13210 	struct fcp_port	*pptr;
13211 	struct fcp_ipkt	*icmd;
13212 	struct fcp_ipkt	*nicmd;
13213 	struct fcp_pkt 	*cmd;
13214 	struct fcp_pkt 	*ncmd;
13215 	struct fcp_pkt 	*tail;
13216 	struct fcp_pkt 	*pcmd;
13217 	struct fcp_pkt	*save_head;
13218 	struct fcp_port	*save_port;
13219 
13220 	/* increment global watchdog time */
13221 	fcp_watchdog_time += fcp_watchdog_timeout;
13222 
13223 	mutex_enter(&fcp_global_mutex);
13224 
13225 	/* scan each port in our list */
13226 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13227 		save_port = fcp_port_head;
13228 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13229 		mutex_exit(&fcp_global_mutex);
13230 
13231 		mutex_enter(&pptr->port_mutex);
13232 		if (pptr->port_ipkt_list == NULL &&
13233 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13234 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13235 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13236 			mutex_exit(&pptr->port_mutex);
13237 			mutex_enter(&fcp_global_mutex);
13238 			goto end_of_watchdog;
13239 		}
13240 
13241 		/*
13242 		 * We check if a list of targets need to be offlined.
13243 		 */
13244 		if (pptr->port_offline_tgts) {
13245 			fcp_scan_offline_tgts(pptr);
13246 		}
13247 
13248 		/*
13249 		 * We check if a list of luns need to be offlined.
13250 		 */
13251 		if (pptr->port_offline_luns) {
13252 			fcp_scan_offline_luns(pptr);
13253 		}
13254 
13255 		/*
13256 		 * We check if a list of targets or luns need to be reset.
13257 		 */
13258 		if (pptr->port_reset_list) {
13259 			fcp_check_reset_delay(pptr);
13260 		}
13261 
13262 		mutex_exit(&pptr->port_mutex);
13263 
13264 		/*
13265 		 * This is where the pending commands (pkt) are checked for
13266 		 * timeout.
13267 		 */
13268 		mutex_enter(&pptr->port_pkt_mutex);
13269 		tail = pptr->port_pkt_tail;
13270 
13271 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13272 		    cmd != NULL; cmd = ncmd) {
13273 			ncmd = cmd->cmd_next;
13274 			/*
13275 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13276 			 * must be set.
13277 			 */
13278 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13279 			/*
13280 			 * FCP_INVALID_TIMEOUT will be set for those
13281 			 * command that need to be failed. Mostly those
13282 			 * cmds that could not be queued down for the
13283 			 * "timeout" value. cmd->cmd_timeout is used
13284 			 * to try and requeue the command regularly.
13285 			 */
13286 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13287 				/*
13288 				 * This command hasn't timed out yet.  Let's
13289 				 * go to the next one.
13290 				 */
13291 				pcmd = cmd;
13292 				goto end_of_loop;
13293 			}
13294 
13295 			if (cmd == pptr->port_pkt_head) {
13296 				ASSERT(pcmd == NULL);
13297 				pptr->port_pkt_head = cmd->cmd_next;
13298 			} else {
13299 				ASSERT(pcmd != NULL);
13300 				pcmd->cmd_next = cmd->cmd_next;
13301 			}
13302 
13303 			if (cmd == pptr->port_pkt_tail) {
13304 				ASSERT(cmd->cmd_next == NULL);
13305 				pptr->port_pkt_tail = pcmd;
13306 				if (pcmd)
13307 					pcmd->cmd_next = NULL;
13308 			}
13309 			cmd->cmd_next = NULL;
13310 
13311 			/*
13312 			 * save the current head before dropping the
13313 			 * mutex - If the head doesn't remain the
13314 			 * same after re acquiring the mutex, just
13315 			 * bail out and revisit on next tick.
13316 			 *
13317 			 * PS: The tail pointer can change as the commands
13318 			 * get requeued after failure to retransport
13319 			 */
13320 			save_head = pptr->port_pkt_head;
13321 			mutex_exit(&pptr->port_pkt_mutex);
13322 
13323 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13324 			    FCP_INVALID_TIMEOUT) {
13325 				struct scsi_pkt 	*pkt = cmd->cmd_pkt;
13326 				struct fcp_lun 	*plun;
13327 				struct fcp_tgt	*ptgt;
13328 
13329 				plun = ADDR2LUN(&pkt->pkt_address);
13330 				ptgt = plun->lun_tgt;
13331 
13332 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13333 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13334 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13335 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13336 
13337 				cmd->cmd_state == FCP_PKT_ABORTING ?
13338 				    fcp_fail_cmd(cmd, CMD_RESET,
13339 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13340 				    CMD_TIMEOUT, STAT_ABORTED);
13341 			} else {
13342 				fcp_retransport_cmd(pptr, cmd);
13343 			}
13344 			mutex_enter(&pptr->port_pkt_mutex);
13345 			if (save_head && save_head != pptr->port_pkt_head) {
13346 				/*
13347 				 * Looks like linked list got changed (mostly
13348 				 * happens when an an OFFLINE LUN code starts
13349 				 * returning overflow queue commands in
13350 				 * parallel. So bail out and revisit during
13351 				 * next tick
13352 				 */
13353 				break;
13354 			}
13355 end_of_loop:
13356 			/*
13357 			 * Scan only upto the previously known tail pointer
13358 			 * to avoid excessive processing - lots of new packets
13359 			 * could have been added to the tail or the old ones
13360 			 * re-queued.
13361 			 */
13362 			if (cmd == tail) {
13363 				break;
13364 			}
13365 		}
13366 		mutex_exit(&pptr->port_pkt_mutex);
13367 
13368 		mutex_enter(&pptr->port_mutex);
13369 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13370 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13371 
13372 			nicmd = icmd->ipkt_next;
13373 			if ((icmd->ipkt_restart != 0) &&
13374 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13375 				/* packet has not timed out */
13376 				continue;
13377 			}
13378 
13379 			/* time for packet re-transport */
13380 			if (icmd == pptr->port_ipkt_list) {
13381 				pptr->port_ipkt_list = icmd->ipkt_next;
13382 				if (pptr->port_ipkt_list) {
13383 					pptr->port_ipkt_list->ipkt_prev =
13384 						NULL;
13385 				}
13386 			} else {
13387 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13388 				if (icmd->ipkt_next) {
13389 					icmd->ipkt_next->ipkt_prev =
13390 						icmd->ipkt_prev;
13391 				}
13392 			}
13393 			icmd->ipkt_next = NULL;
13394 			icmd->ipkt_prev = NULL;
13395 			mutex_exit(&pptr->port_mutex);
13396 
13397 			if (fcp_is_retryable(icmd)) {
13398 				fc_ulp_rscn_info_t *rscnp =
13399 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13400 				    pkt_ulp_rscn_infop;
13401 
13402 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13403 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13404 				    "%x to D_ID=%x Retrying..",
13405 				    icmd->ipkt_opcode,
13406 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13407 
13408 				/*
13409 				 * Update the RSCN count in the packet
13410 				 * before resending.
13411 				 */
13412 
13413 				if (rscnp != NULL) {
13414 					rscnp->ulp_rscn_count =
13415 					    fc_ulp_get_rscn_count(pptr->
13416 					    port_fp_handle);
13417 				}
13418 
13419 				mutex_enter(&pptr->port_mutex);
13420 				mutex_enter(&ptgt->tgt_mutex);
13421 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13422 					mutex_exit(&ptgt->tgt_mutex);
13423 					mutex_exit(&pptr->port_mutex);
13424 					switch (icmd->ipkt_opcode) {
13425 					int rval;
13426 					case LA_ELS_PLOGI:
13427 						if ((rval = fc_ulp_login(
13428 						    pptr->port_fp_handle,
13429 						    &icmd->ipkt_fpkt, 1)) ==
13430 						    FC_SUCCESS) {
13431 							mutex_enter(
13432 							    &pptr->port_mutex);
13433 							continue;
13434 						}
13435 						if (fcp_handle_ipkt_errors(
13436 						    pptr, ptgt, icmd, rval,
13437 						    "PLOGI") == DDI_SUCCESS) {
13438 							mutex_enter(
13439 							    &pptr->port_mutex);
13440 							continue;
13441 						}
13442 						break;
13443 
13444 					case LA_ELS_PRLI:
13445 						if ((rval = fc_ulp_issue_els(
13446 						    pptr->port_fp_handle,
13447 						    icmd->ipkt_fpkt)) ==
13448 						    FC_SUCCESS) {
13449 							mutex_enter(
13450 							    &pptr->port_mutex);
13451 							continue;
13452 						}
13453 						if (fcp_handle_ipkt_errors(
13454 						    pptr, ptgt, icmd, rval,
13455 						    "PRLI") == DDI_SUCCESS) {
13456 							mutex_enter(
13457 							    &pptr->port_mutex);
13458 							continue;
13459 						}
13460 						break;
13461 
13462 					default:
13463 						if ((rval = fcp_transport(
13464 						    pptr->port_fp_handle,
13465 						    icmd->ipkt_fpkt, 1)) ==
13466 						    FC_SUCCESS) {
13467 							mutex_enter(
13468 							    &pptr->port_mutex);
13469 							continue;
13470 						}
13471 						if (fcp_handle_ipkt_errors(
13472 						    pptr, ptgt, icmd, rval,
13473 						    "PRLI") == DDI_SUCCESS) {
13474 							mutex_enter(
13475 							    &pptr->port_mutex);
13476 							continue;
13477 						}
13478 						break;
13479 					}
13480 				} else {
13481 					mutex_exit(&ptgt->tgt_mutex);
13482 					mutex_exit(&pptr->port_mutex);
13483 				}
13484 			} else {
13485 				fcp_print_error(icmd->ipkt_fpkt);
13486 			}
13487 
13488 			(void) fcp_call_finish_init(pptr, ptgt,
13489 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13490 			    icmd->ipkt_cause);
13491 			fcp_icmd_free(pptr, icmd);
13492 			mutex_enter(&pptr->port_mutex);
13493 		}
13494 
13495 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13496 		mutex_exit(&pptr->port_mutex);
13497 		mutex_enter(&fcp_global_mutex);
13498 
13499 end_of_watchdog:
13500 		/*
13501 		 * Bail out early before getting into trouble
13502 		 */
13503 		if (save_port != fcp_port_head) {
13504 			break;
13505 		}
13506 	}
13507 
13508 	if (fcp_watchdog_init > 0) {
13509 		/* reschedule timeout to go again */
13510 		fcp_watchdog_id =
13511 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13512 	}
13513 	mutex_exit(&fcp_global_mutex);
13514 }
13515 
13516 
13517 static void
13518 fcp_check_reset_delay(struct fcp_port *pptr)
13519 {
13520 	uint32_t		tgt_cnt;
13521 	int			level;
13522 	struct fcp_tgt	*ptgt;
13523 	struct fcp_lun	*plun;
13524 	struct fcp_reset_elem *cur = NULL;
13525 	struct fcp_reset_elem *next = NULL;
13526 	struct fcp_reset_elem *prev = NULL;
13527 
13528 	ASSERT(mutex_owned(&pptr->port_mutex));
13529 
13530 	next = pptr->port_reset_list;
13531 	while ((cur = next) != NULL) {
13532 		next = cur->next;
13533 
13534 		if (cur->timeout < fcp_watchdog_time) {
13535 			prev = cur;
13536 			continue;
13537 		}
13538 
13539 		ptgt = cur->tgt;
13540 		plun = cur->lun;
13541 		tgt_cnt = cur->tgt_cnt;
13542 
13543 		if (ptgt) {
13544 			level = RESET_TARGET;
13545 		} else {
13546 			ASSERT(plun != NULL);
13547 			level = RESET_LUN;
13548 			ptgt = plun->lun_tgt;
13549 		}
13550 		if (prev) {
13551 			prev->next = next;
13552 		} else {
13553 			/*
13554 			 * Because we drop port mutex while doing aborts for
13555 			 * packets, we can't rely on reset_list pointing to
13556 			 * our head
13557 			 */
13558 			if (cur == pptr->port_reset_list) {
13559 				pptr->port_reset_list = next;
13560 			} else {
13561 				struct fcp_reset_elem *which;
13562 
13563 				which = pptr->port_reset_list;
13564 				while (which && which->next != cur) {
13565 					which = which->next;
13566 				}
13567 				ASSERT(which != NULL);
13568 
13569 				which->next = next;
13570 				prev = which;
13571 			}
13572 		}
13573 
13574 		kmem_free(cur, sizeof (*cur));
13575 
13576 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13577 			mutex_enter(&ptgt->tgt_mutex);
13578 			if (level == RESET_TARGET) {
13579 				fcp_update_tgt_state(ptgt,
13580 				    FCP_RESET, FCP_LUN_BUSY);
13581 			} else {
13582 				fcp_update_lun_state(plun,
13583 				    FCP_RESET, FCP_LUN_BUSY);
13584 			}
13585 			mutex_exit(&ptgt->tgt_mutex);
13586 
13587 			mutex_exit(&pptr->port_mutex);
13588 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13589 			mutex_enter(&pptr->port_mutex);
13590 		}
13591 	}
13592 }
13593 
13594 
13595 static void
13596 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13597     struct fcp_lun *rlun, int tgt_cnt)
13598 {
13599 	int 			rval;
13600 	struct fcp_lun 	*tlun, *nlun;
13601 	struct fcp_pkt 	*pcmd = NULL, *ncmd = NULL,
13602 				*cmd = NULL, *head = NULL,
13603 				*tail = NULL;
13604 
13605 	mutex_enter(&pptr->port_pkt_mutex);
13606 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13607 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13608 		struct fcp_tgt *ptgt = plun->lun_tgt;
13609 
13610 		ncmd = cmd->cmd_next;
13611 
13612 		if (ptgt != ttgt && plun != rlun) {
13613 			pcmd = cmd;
13614 			continue;
13615 		}
13616 
13617 		if (pcmd != NULL) {
13618 			ASSERT(pptr->port_pkt_head != cmd);
13619 			pcmd->cmd_next = ncmd;
13620 		} else {
13621 			ASSERT(cmd == pptr->port_pkt_head);
13622 			pptr->port_pkt_head = ncmd;
13623 		}
13624 		if (pptr->port_pkt_tail == cmd) {
13625 			ASSERT(cmd->cmd_next == NULL);
13626 			pptr->port_pkt_tail = pcmd;
13627 			if (pcmd != NULL)
13628 				pcmd->cmd_next = NULL;
13629 		}
13630 
13631 		if (head == NULL) {
13632 			head = tail = cmd;
13633 		} else {
13634 			ASSERT(tail != NULL);
13635 			tail->cmd_next = cmd;
13636 			tail = cmd;
13637 		}
13638 		cmd->cmd_next = NULL;
13639 	}
13640 	mutex_exit(&pptr->port_pkt_mutex);
13641 
13642 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13643 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13644 
13645 		ncmd = cmd->cmd_next;
13646 		ASSERT(pkt != NULL);
13647 
13648 		mutex_enter(&pptr->port_mutex);
13649 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13650 			mutex_exit(&pptr->port_mutex);
13651 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13652 			pkt->pkt_reason = CMD_RESET;
13653 			pkt->pkt_statistics |= STAT_DEV_RESET;
13654 			cmd->cmd_state = FCP_PKT_IDLE;
13655 			fcp_post_callback(cmd);
13656 		} else {
13657 			mutex_exit(&pptr->port_mutex);
13658 		}
13659 	}
13660 
13661 	/*
13662 	 * If the FCA will return all the commands in its queue then our
13663 	 * work is easy, just return.
13664 	 */
13665 
13666 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL)
13667 		return;
13668 
13669 	/*
13670 	 * For RESET_LUN get hold of target pointer
13671 	 */
13672 	if (ttgt == NULL) {
13673 		ASSERT(rlun != NULL);
13674 
13675 		ttgt = rlun->lun_tgt;
13676 
13677 		ASSERT(ttgt != NULL);
13678 	}
13679 
13680 	/*
13681 	 * There are some severe race conditions here.
13682 	 * While we are trying to abort the pkt, it might be completing
13683 	 * so mark it aborted and if the abort does not succeed then
13684 	 * handle it in the watch thread.
13685 	 */
13686 	mutex_enter(&ttgt->tgt_mutex);
13687 	nlun = ttgt->tgt_lun;
13688 	mutex_exit(&ttgt->tgt_mutex);
13689 	while ((tlun = nlun) != NULL) {
13690 		int restart = 0;
13691 		if (rlun && rlun != tlun) {
13692 			mutex_enter(&ttgt->tgt_mutex);
13693 			nlun = tlun->lun_next;
13694 			mutex_exit(&ttgt->tgt_mutex);
13695 			continue;
13696 		}
13697 		mutex_enter(&tlun->lun_mutex);
13698 		cmd = tlun->lun_pkt_head;
13699 		while (cmd != NULL) {
13700 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13701 				struct scsi_pkt *pkt;
13702 
13703 				restart = 1;
13704 				cmd->cmd_state = FCP_PKT_ABORTING;
13705 				mutex_exit(&tlun->lun_mutex);
13706 				rval = fc_ulp_abort(pptr->port_fp_handle,
13707 				    cmd->cmd_fp_pkt, KM_SLEEP);
13708 				if (rval == FC_SUCCESS) {
13709 					pkt = cmd->cmd_pkt;
13710 					pkt->pkt_reason = CMD_RESET;
13711 					pkt->pkt_statistics |= STAT_DEV_RESET;
13712 					cmd->cmd_state = FCP_PKT_IDLE;
13713 					fcp_post_callback(cmd);
13714 				} else {
13715 					caddr_t msg;
13716 
13717 					(void) fc_ulp_error(rval, &msg);
13718 
13719 					/*
13720 					 * This part is tricky. The abort
13721 					 * failed and now the command could
13722 					 * be completing.  The cmd_state ==
13723 					 * FCP_PKT_ABORTING should save
13724 					 * us in fcp_cmd_callback. If we
13725 					 * are already aborting ignore the
13726 					 * command in fcp_cmd_callback.
13727 					 * Here we leave this packet for 20
13728 					 * sec to be aborted in the
13729 					 * fcp_watch thread.
13730 					 */
13731 					fcp_log(CE_WARN, pptr->port_dip,
13732 					    "!Abort failed after reset %s",
13733 					    msg);
13734 
13735 					cmd->cmd_timeout =
13736 					    fcp_watchdog_time +
13737 					    cmd->cmd_pkt->pkt_time +
13738 					    FCP_FAILED_DELAY;
13739 
13740 					cmd->cmd_fp_pkt->pkt_timeout =
13741 					    FCP_INVALID_TIMEOUT;
13742 					/*
13743 					 * This is a hack, cmd is put in the
13744 					 * overflow queue so that it can be
13745 					 * timed out finally
13746 					 */
13747 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13748 
13749 					mutex_enter(&pptr->port_pkt_mutex);
13750 					if (pptr->port_pkt_head) {
13751 						ASSERT(pptr->port_pkt_tail
13752 								!= NULL);
13753 						pptr->port_pkt_tail->cmd_next
13754 						    = cmd;
13755 						pptr->port_pkt_tail = cmd;
13756 					} else {
13757 						ASSERT(pptr->port_pkt_tail
13758 								== NULL);
13759 						pptr->port_pkt_head =
13760 							pptr->port_pkt_tail
13761 								= cmd;
13762 					}
13763 					cmd->cmd_next = NULL;
13764 					mutex_exit(&pptr->port_pkt_mutex);
13765 				}
13766 				mutex_enter(&tlun->lun_mutex);
13767 				cmd = tlun->lun_pkt_head;
13768 			} else {
13769 				cmd = cmd->cmd_forw;
13770 			}
13771 		}
13772 		mutex_exit(&tlun->lun_mutex);
13773 
13774 		mutex_enter(&ttgt->tgt_mutex);
13775 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13776 		mutex_exit(&ttgt->tgt_mutex);
13777 
13778 		mutex_enter(&pptr->port_mutex);
13779 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13780 			mutex_exit(&pptr->port_mutex);
13781 			return;
13782 		} else {
13783 			mutex_exit(&pptr->port_mutex);
13784 		}
13785 	}
13786 }
13787 
13788 
13789 /*
13790  * unlink the soft state, returning the soft state found (if any)
13791  *
13792  * acquires and releases the global mutex
13793  */
13794 struct fcp_port *
13795 fcp_soft_state_unlink(struct fcp_port *pptr)
13796 {
13797 	struct fcp_port	*hptr;		/* ptr index */
13798 	struct fcp_port	*tptr;		/* prev hptr */
13799 
13800 	mutex_enter(&fcp_global_mutex);
13801 	for (hptr = fcp_port_head, tptr = NULL;
13802 	    hptr != NULL;
13803 	    tptr = hptr, hptr = hptr->port_next) {
13804 		if (hptr == pptr) {
13805 			/* we found a match -- remove this item */
13806 			if (tptr == NULL) {
13807 				/* we're at the head of the list */
13808 				fcp_port_head = hptr->port_next;
13809 			} else {
13810 				tptr->port_next = hptr->port_next;
13811 			}
13812 			break;			/* success */
13813 		}
13814 	}
13815 	if (fcp_port_head == NULL) {
13816 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13817 	}
13818 	mutex_exit(&fcp_global_mutex);
13819 	return (hptr);
13820 }
13821 
13822 
13823 /*
13824  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13825  * WWN and a LUN number
13826  */
13827 /* ARGSUSED */
13828 static struct fcp_lun *
13829 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13830 {
13831 	int hash;
13832 	struct fcp_tgt *ptgt;
13833 	struct fcp_lun *plun;
13834 
13835 	ASSERT(mutex_owned(&pptr->port_mutex));
13836 
13837 	hash = FCP_HASH(wwn);
13838 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13839 	    ptgt = ptgt->tgt_next) {
13840 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13841 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13842 			mutex_enter(&ptgt->tgt_mutex);
13843 			for (plun = ptgt->tgt_lun;
13844 			    plun != NULL;
13845 			    plun = plun->lun_next) {
13846 				if (plun->lun_num == lun) {
13847 					mutex_exit(&ptgt->tgt_mutex);
13848 					return (plun);
13849 				}
13850 			}
13851 			mutex_exit(&ptgt->tgt_mutex);
13852 			return (NULL);
13853 		}
13854 	}
13855 	return (NULL);
13856 }
13857 
13858 /*
13859  *     Function: fcp_prepare_pkt
13860  *
13861  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13862  *		 for fcp_start(). It binds the data or partially maps it.
13863  *		 Builds the FCP header and starts the initialization of the
13864  *		 Fibre Channel header.
13865  *
13866  *     Argument: *pptr		FCP port.
13867  *		 *cmd		FCP packet.
13868  *		 *plun		LUN the command will be sent to.
13869  *
13870  *      Context: User, Kernel and Interrupt context.
13871  */
13872 static void
13873 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13874     struct fcp_lun *plun)
13875 {
13876 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13877 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13878 	struct fcp_cmd 		*fcmd = &cmd->cmd_fcp_cmd;
13879 
13880 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13881 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13882 
13883 	if (cmd->cmd_pkt->pkt_numcookies) {
13884 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13885 			fcmd->fcp_cntl.cntl_read_data = 1;
13886 			fcmd->fcp_cntl.cntl_write_data = 0;
13887 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
13888 		} else {
13889 			fcmd->fcp_cntl.cntl_read_data = 0;
13890 			fcmd->fcp_cntl.cntl_write_data = 1;
13891 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
13892 		}
13893 
13894 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
13895 
13896 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
13897 		ASSERT(fpkt->pkt_data_cookie_cnt <=
13898 		    pptr->port_data_dma_attr.dma_attr_sgllen);
13899 
13900 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
13901 
13902 		/* FCA needs pkt_datalen to be set */
13903 		fpkt->pkt_datalen = cmd->cmd_dmacount;
13904 		fcmd->fcp_data_len = cmd->cmd_dmacount;
13905 	} else {
13906 		fcmd->fcp_cntl.cntl_read_data = 0;
13907 		fcmd->fcp_cntl.cntl_write_data = 0;
13908 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
13909 		fpkt->pkt_datalen = 0;
13910 		fcmd->fcp_data_len = 0;
13911 	}
13912 
13913 	/* set up the Tagged Queuing type */
13914 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
13915 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
13916 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
13917 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
13918 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
13919 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
13920 	} else {
13921 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
13922 	}
13923 
13924 	fcmd->fcp_ent_addr = plun->lun_addr;
13925 
13926 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
13927 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
13928 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
13929 	} else {
13930 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
13931 	}
13932 
13933 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
13934 	cmd->cmd_pkt->pkt_state = 0;
13935 	cmd->cmd_pkt->pkt_statistics = 0;
13936 	cmd->cmd_pkt->pkt_resid = 0;
13937 
13938 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
13939 
13940 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
13941 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
13942 		fpkt->pkt_comp = NULL;
13943 	} else {
13944 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
13945 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
13946 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
13947 		}
13948 		fpkt->pkt_comp = fcp_cmd_callback;
13949 	}
13950 
13951 	mutex_enter(&pptr->port_mutex);
13952 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
13953 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
13954 	}
13955 	mutex_exit(&pptr->port_mutex);
13956 
13957 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
13958 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
13959 
13960 	/*
13961 	 * Save a few kernel cycles here
13962 	 */
13963 #ifndef	__lock_lint
13964 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
13965 #endif /* __lock_lint */
13966 }
13967 
13968 static void
13969 fcp_post_callback(struct fcp_pkt *cmd)
13970 {
13971 	if (cmd->cmd_pkt->pkt_comp) {
13972 		(*cmd->cmd_pkt->pkt_comp) (cmd->cmd_pkt);
13973 	}
13974 }
13975 
13976 
13977 /*
13978  * called to do polled I/O by fcp_start()
13979  *
13980  * return a transport status value, i.e. TRAN_ACCECPT for success
13981  */
13982 static int
13983 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
13984 {
13985 	int	rval;
13986 
13987 #ifdef	DEBUG
13988 	mutex_enter(&pptr->port_pkt_mutex);
13989 	pptr->port_npkts++;
13990 	mutex_exit(&pptr->port_pkt_mutex);
13991 #endif /* DEBUG */
13992 
13993 	if (cmd->cmd_fp_pkt->pkt_timeout) {
13994 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
13995 	} else {
13996 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
13997 	}
13998 
13999 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14000 
14001 	cmd->cmd_state = FCP_PKT_ISSUED;
14002 
14003 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14004 
14005 #ifdef	DEBUG
14006 	mutex_enter(&pptr->port_pkt_mutex);
14007 	pptr->port_npkts--;
14008 	mutex_exit(&pptr->port_pkt_mutex);
14009 #endif /* DEBUG */
14010 
14011 	cmd->cmd_state = FCP_PKT_IDLE;
14012 
14013 	switch (rval) {
14014 	case FC_SUCCESS:
14015 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14016 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14017 			rval = TRAN_ACCEPT;
14018 		} else {
14019 			rval = TRAN_FATAL_ERROR;
14020 		}
14021 		break;
14022 
14023 	case FC_TRAN_BUSY:
14024 		rval = TRAN_BUSY;
14025 		cmd->cmd_pkt->pkt_resid = 0;
14026 		break;
14027 
14028 	case FC_BADPACKET:
14029 		rval = TRAN_BADPKT;
14030 		break;
14031 
14032 	default:
14033 		rval = TRAN_FATAL_ERROR;
14034 		break;
14035 	}
14036 
14037 	return (rval);
14038 }
14039 
14040 
14041 /*
14042  * called by some of the following transport-called routines to convert
14043  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14044  */
14045 static struct fcp_port *
14046 fcp_dip2port(dev_info_t *dip)
14047 {
14048 	int	instance;
14049 
14050 	instance = ddi_get_instance(dip);
14051 	return (ddi_get_soft_state(fcp_softstate, instance));
14052 }
14053 
14054 
14055 /*
14056  * called internally to return a LUN given a dip
14057  */
14058 struct fcp_lun *
14059 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14060 {
14061 	struct fcp_tgt *ptgt;
14062 	struct fcp_lun *plun;
14063 	int i;
14064 
14065 
14066 	ASSERT(mutex_owned(&pptr->port_mutex));
14067 
14068 	for (i = 0; i < FCP_NUM_HASH; i++) {
14069 		for (ptgt = pptr->port_tgt_hash_table[i];
14070 		    ptgt != NULL;
14071 		    ptgt = ptgt->tgt_next) {
14072 			mutex_enter(&ptgt->tgt_mutex);
14073 			for (plun = ptgt->tgt_lun; plun != NULL;
14074 			    plun = plun->lun_next) {
14075 				mutex_enter(&plun->lun_mutex);
14076 				if (plun->lun_cip == cip) {
14077 					mutex_exit(&plun->lun_mutex);
14078 					mutex_exit(&ptgt->tgt_mutex);
14079 					return (plun); /* match found */
14080 				}
14081 				mutex_exit(&plun->lun_mutex);
14082 			}
14083 			mutex_exit(&ptgt->tgt_mutex);
14084 		}
14085 	}
14086 	return (NULL);				/* no LUN found */
14087 }
14088 
14089 /*
14090  * pass an element to the hotplug list, kick the hotplug thread
14091  * and wait for the element to get processed by the hotplug thread.
14092  * on return the element is freed.
14093  *
14094  * return zero success and non-zero on failure
14095  *
14096  * acquires/releases the target mutex
14097  *
14098  */
14099 static int
14100 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14101     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14102 {
14103 	struct fcp_hp_elem	*elem;
14104 	int			rval;
14105 
14106 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14107 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14108 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14109 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14110 		fcp_log(CE_CONT, pptr->port_dip,
14111 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14112 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14113 		return (NDI_FAILURE);
14114 	}
14115 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14116 	mutex_enter(&elem->mutex);
14117 	if (elem->wait) {
14118 		while (elem->wait) {
14119 			cv_wait(&elem->cv, &elem->mutex);
14120 		}
14121 	}
14122 	rval = (elem->result);
14123 	mutex_exit(&elem->mutex);
14124 	mutex_destroy(&elem->mutex);
14125 	cv_destroy(&elem->cv);
14126 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14127 	return (rval);
14128 }
14129 
14130 /*
14131  * pass an element to the hotplug list, and then
14132  * kick the hotplug thread
14133  *
14134  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14135  *
14136  * acquires/releases the hotplug mutex
14137  *
14138  * called with the target mutex owned
14139  *
14140  * memory acquired in NOSLEEP mode
14141  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14142  *       for the hp daemon to process the request and is responsible for
14143  *	 freeing the element
14144  */
14145 static struct fcp_hp_elem *
14146 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14147     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14148 {
14149 	struct fcp_hp_elem	*elem;
14150 	dev_info_t *pdip;
14151 
14152 	ASSERT(pptr != NULL);
14153 	ASSERT(plun != NULL);
14154 	ASSERT(plun->lun_tgt != NULL);
14155 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14156 
14157 	/* create space for a hotplug element */
14158 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14159 	    == NULL) {
14160 		fcp_log(CE_WARN, NULL,
14161 		    "!can't allocate memory for hotplug element");
14162 		return (NULL);
14163 	}
14164 
14165 	/* fill in hotplug element */
14166 	elem->port = pptr;
14167 	elem->lun = plun;
14168 	elem->cip = cip;
14169 	elem->what = what;
14170 	elem->flags = flags;
14171 	elem->link_cnt = link_cnt;
14172 	elem->tgt_cnt = tgt_cnt;
14173 	elem->wait = wait;
14174 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14175 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14176 
14177 	/* schedule the hotplug task */
14178 	pdip = pptr->port_dip;
14179 	mutex_enter(&plun->lun_mutex);
14180 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14181 		plun->lun_event_count++;
14182 		elem->event_cnt = plun->lun_event_count;
14183 	}
14184 	mutex_exit(&plun->lun_mutex);
14185 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14186 	    (void *)elem, KM_NOSLEEP) == NULL) {
14187 		mutex_enter(&plun->lun_mutex);
14188 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE)
14189 			plun->lun_event_count--;
14190 		mutex_exit(&plun->lun_mutex);
14191 		kmem_free(elem, sizeof (*elem));
14192 		return (0);
14193 	}
14194 
14195 	return (elem);
14196 }
14197 
14198 
14199 static void
14200 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14201 {
14202 	int			rval;
14203 	struct scsi_address 	*ap;
14204 	struct fcp_lun 	*plun;
14205 	struct fcp_tgt	*ptgt;
14206 	fc_packet_t	*fpkt;
14207 
14208 	ap = &cmd->cmd_pkt->pkt_address;
14209 	plun = ADDR2LUN(ap);
14210 	ptgt = plun->lun_tgt;
14211 
14212 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14213 
14214 	cmd->cmd_state = FCP_PKT_IDLE;
14215 
14216 	mutex_enter(&pptr->port_mutex);
14217 	mutex_enter(&ptgt->tgt_mutex);
14218 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14219 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14220 		fc_ulp_rscn_info_t *rscnp;
14221 
14222 		cmd->cmd_state = FCP_PKT_ISSUED;
14223 
14224 		/*
14225 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14226 		 * originally NULL, hence we try to set it to the pd pointed
14227 		 * to by the SCSI device we're trying to get to.
14228 		 */
14229 
14230 		fpkt = cmd->cmd_fp_pkt;
14231 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14232 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14233 			/*
14234 			 * We need to notify the transport that we now have a
14235 			 * reference to the remote port handle.
14236 			 */
14237 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14238 		}
14239 
14240 		mutex_exit(&ptgt->tgt_mutex);
14241 		mutex_exit(&pptr->port_mutex);
14242 
14243 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14244 
14245 		/* prepare the packet */
14246 
14247 		fcp_prepare_pkt(pptr, cmd, plun);
14248 
14249 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14250 		    pkt_ulp_rscn_infop;
14251 
14252 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14253 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14254 
14255 		if (rscnp != NULL) {
14256 			rscnp->ulp_rscn_count =
14257 			    fc_ulp_get_rscn_count(pptr->
14258 			    port_fp_handle);
14259 		}
14260 
14261 		rval = fcp_transport(pptr->port_fp_handle,
14262 		    cmd->cmd_fp_pkt, 0);
14263 
14264 		if (rval == FC_SUCCESS) {
14265 			return;
14266 		}
14267 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14268 	} else {
14269 		mutex_exit(&ptgt->tgt_mutex);
14270 		mutex_exit(&pptr->port_mutex);
14271 	}
14272 
14273 	fcp_queue_pkt(pptr, cmd);
14274 }
14275 
14276 
14277 static void
14278 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14279 {
14280 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14281 
14282 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14283 	cmd->cmd_state = FCP_PKT_IDLE;
14284 
14285 	cmd->cmd_pkt->pkt_reason = reason;
14286 	cmd->cmd_pkt->pkt_state = 0;
14287 	cmd->cmd_pkt->pkt_statistics = statistics;
14288 
14289 	fcp_post_callback(cmd);
14290 }
14291 
14292 /*
14293  *     Function: fcp_queue_pkt
14294  *
14295  *  Description: This function queues the packet passed by the caller into
14296  *		 the list of packets of the FCP port.
14297  *
14298  *     Argument: *pptr		FCP port.
14299  *		 *cmd		FCP packet to queue.
14300  *
14301  * Return Value: None
14302  *
14303  *      Context: User, Kernel and Interrupt context.
14304  */
14305 static void
14306 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14307 {
14308 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14309 
14310 	mutex_enter(&pptr->port_pkt_mutex);
14311 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14312 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14313 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14314 
14315 	/*
14316 	 * zero pkt_time means hang around for ever
14317 	 */
14318 	if (cmd->cmd_pkt->pkt_time) {
14319 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14320 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14321 		} else {
14322 			/*
14323 			 * Indicate the watch thread to fail the
14324 			 * command by setting it to highest value
14325 			 */
14326 			cmd->cmd_timeout = fcp_watchdog_time;
14327 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14328 		}
14329 	}
14330 
14331 	if (pptr->port_pkt_head) {
14332 		ASSERT(pptr->port_pkt_tail != NULL);
14333 
14334 		pptr->port_pkt_tail->cmd_next = cmd;
14335 		pptr->port_pkt_tail = cmd;
14336 	} else {
14337 		ASSERT(pptr->port_pkt_tail == NULL);
14338 
14339 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14340 	}
14341 	cmd->cmd_next = NULL;
14342 	mutex_exit(&pptr->port_pkt_mutex);
14343 }
14344 
14345 /*
14346  *     Function: fcp_update_targets
14347  *
14348  *  Description: This function applies the specified change of state to all
14349  *		 the targets listed.  The operation applied is 'set'.
14350  *
14351  *     Argument: *pptr		FCP port.
14352  *		 *dev_list	Array of fc_portmap_t structures.
14353  *		 count		Length of dev_list.
14354  *		 state		State bits to update.
14355  *		 cause		Reason for the update.
14356  *
14357  * Return Value: None
14358  *
14359  *      Context: User, Kernel and Interrupt context.
14360  *		 The mutex pptr->port_mutex must be held.
14361  */
14362 static void
14363 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14364     uint32_t count, uint32_t state, int cause)
14365 {
14366 	fc_portmap_t		*map_entry;
14367 	struct fcp_tgt	*ptgt;
14368 
14369 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14370 
14371 	while (count--) {
14372 		map_entry = &(dev_list[count]);
14373 		ptgt = fcp_lookup_target(pptr,
14374 		    (uchar_t *)&(map_entry->map_pwwn));
14375 		if (ptgt == NULL) {
14376 			continue;
14377 		}
14378 
14379 		mutex_enter(&ptgt->tgt_mutex);
14380 		ptgt->tgt_trace = 0;
14381 		ptgt->tgt_change_cnt++;
14382 		ptgt->tgt_statec_cause = cause;
14383 		ptgt->tgt_tmp_cnt = 1;
14384 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14385 		mutex_exit(&ptgt->tgt_mutex);
14386 	}
14387 }
14388 
14389 static int
14390 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14391     int lcount, int tcount, int cause)
14392 {
14393 	int rval;
14394 
14395 	mutex_enter(&pptr->port_mutex);
14396 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14397 	mutex_exit(&pptr->port_mutex);
14398 
14399 	return (rval);
14400 }
14401 
14402 
14403 static int
14404 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14405     int lcount, int tcount, int cause)
14406 {
14407 	int	finish_init = 0;
14408 	int 	finish_tgt = 0;
14409 	int	do_finish_init = 0;
14410 	int	rval = FCP_NO_CHANGE;
14411 
14412 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14413 	    cause == FCP_CAUSE_LINK_DOWN) {
14414 		do_finish_init = 1;
14415 	}
14416 
14417 	if (ptgt != NULL) {
14418 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14419 		    FCP_BUF_LEVEL_2, 0,
14420 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14421 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14422 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14423 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14424 		    ptgt->tgt_d_id, ptgt->tgt_done);
14425 
14426 		mutex_enter(&ptgt->tgt_mutex);
14427 
14428 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14429 			rval = FCP_DEV_CHANGE;
14430 			if (do_finish_init && ptgt->tgt_done == 0) {
14431 				ptgt->tgt_done++;
14432 				finish_init = 1;
14433 			}
14434 		} else {
14435 			if (--ptgt->tgt_tmp_cnt <= 0) {
14436 				ptgt->tgt_tmp_cnt = 0;
14437 				finish_tgt = 1;
14438 
14439 				if (do_finish_init) {
14440 					finish_init = 1;
14441 				}
14442 			}
14443 		}
14444 		mutex_exit(&ptgt->tgt_mutex);
14445 	} else {
14446 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14447 		    FCP_BUF_LEVEL_2, 0,
14448 		    "Call Finish Init for NO target");
14449 
14450 		if (do_finish_init) {
14451 			finish_init = 1;
14452 		}
14453 	}
14454 
14455 	if (finish_tgt) {
14456 		ASSERT(ptgt != NULL);
14457 
14458 		mutex_enter(&ptgt->tgt_mutex);
14459 #ifdef	DEBUG
14460 		bzero(ptgt->tgt_tmp_cnt_stack,
14461 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14462 
14463 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14464 		    FCP_STACK_DEPTH);
14465 #endif /* DEBUG */
14466 		mutex_exit(&ptgt->tgt_mutex);
14467 
14468 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14469 	}
14470 
14471 	if (finish_init && lcount == pptr->port_link_cnt) {
14472 		ASSERT(pptr->port_tmp_cnt > 0);
14473 		if (--pptr->port_tmp_cnt == 0) {
14474 			fcp_finish_init(pptr);
14475 		}
14476 	} else if (lcount != pptr->port_link_cnt) {
14477 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14478 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14479 		    "fcp_call_finish_init_held,1: state change occured"
14480 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14481 	}
14482 
14483 	return (rval);
14484 }
14485 
14486 
14487 static void
14488 fcp_reconfigure_luns(void * tgt_handle)
14489 {
14490 	uint32_t		dev_cnt;
14491 	fc_portmap_t 		*devlist;
14492 	struct fcp_tgt 	*ptgt = (struct fcp_tgt *)tgt_handle;
14493 	struct fcp_port 	*pptr = ptgt->tgt_port;
14494 
14495 	/*
14496 	 * If the timer that fires this off got canceled too late, the
14497 	 * target could have been destroyed.
14498 	 */
14499 
14500 	if (ptgt->tgt_tid == NULL) {
14501 		return;
14502 	}
14503 
14504 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14505 	if (devlist == NULL) {
14506 		fcp_log(CE_WARN, pptr->port_dip,
14507 		    "!fcp%d: failed to allocate for portmap",
14508 		    pptr->port_instance);
14509 		return;
14510 	}
14511 
14512 	dev_cnt = 1;
14513 	devlist->map_pd = ptgt->tgt_pd_handle;
14514 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14515 	devlist->map_did.port_id = ptgt->tgt_d_id;
14516 
14517 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14518 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14519 
14520 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14521 	devlist->map_type = PORT_DEVICE_NEW;
14522 	devlist->map_flags = 0;
14523 
14524 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14525 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14526 
14527 	/*
14528 	 * Clear the tgt_tid after no more references to
14529 	 * the fcp_tgt
14530 	 */
14531 	mutex_enter(&ptgt->tgt_mutex);
14532 	ptgt->tgt_tid = NULL;
14533 	mutex_exit(&ptgt->tgt_mutex);
14534 
14535 	kmem_free(devlist, sizeof (*devlist));
14536 }
14537 
14538 
14539 static void
14540 fcp_free_targets(struct fcp_port *pptr)
14541 {
14542 	int 			i;
14543 	struct fcp_tgt 	*ptgt;
14544 
14545 	mutex_enter(&pptr->port_mutex);
14546 	for (i = 0; i < FCP_NUM_HASH; i++) {
14547 		ptgt = pptr->port_tgt_hash_table[i];
14548 		while (ptgt != NULL) {
14549 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14550 
14551 			fcp_free_target(ptgt);
14552 			ptgt = next_tgt;
14553 		}
14554 	}
14555 	mutex_exit(&pptr->port_mutex);
14556 }
14557 
14558 
14559 static void
14560 fcp_free_target(struct fcp_tgt *ptgt)
14561 {
14562 	struct fcp_lun 	*plun;
14563 	timeout_id_t		tid;
14564 
14565 	mutex_enter(&ptgt->tgt_mutex);
14566 	tid = ptgt->tgt_tid;
14567 
14568 	/*
14569 	 * Cancel any pending timeouts for this target.
14570 	 */
14571 
14572 	if (tid != NULL) {
14573 		/*
14574 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14575 		 * If tgt_tid is NULL, the callback will simply return.
14576 		 */
14577 		ptgt->tgt_tid = NULL;
14578 		mutex_exit(&ptgt->tgt_mutex);
14579 		(void) untimeout(tid);
14580 		mutex_enter(&ptgt->tgt_mutex);
14581 	}
14582 
14583 	plun = ptgt->tgt_lun;
14584 	while (plun != NULL) {
14585 		struct fcp_lun *next_lun = plun->lun_next;
14586 
14587 		fcp_dealloc_lun(plun);
14588 		plun = next_lun;
14589 	}
14590 
14591 	mutex_exit(&ptgt->tgt_mutex);
14592 	fcp_dealloc_tgt(ptgt);
14593 }
14594 
14595 /*
14596  *     Function: fcp_is_retryable
14597  *
14598  *  Description: Indicates if the internal packet is retryable.
14599  *
14600  *     Argument: *icmd		FCP internal packet.
14601  *
14602  * Return Value: 0	Not retryable
14603  *		 1	Retryable
14604  *
14605  *      Context: User, Kernel and Interrupt context
14606  */
14607 static int
14608 fcp_is_retryable(struct fcp_ipkt *icmd)
14609 {
14610 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14611 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))
14612 		return (0);
14613 
14614 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14615 		    icmd->ipkt_port->port_deadline) ? 1 : 0);
14616 }
14617 
14618 /*
14619  *     Function: fcp_create_on_demand
14620  *
14621  *     Argument: *pptr		FCP port.
14622  *		 *pwwn		Port WWN.
14623  *
14624  * Return Value: 0	Success
14625  *		 EIO
14626  *		 ENOMEM
14627  *		 EBUSY
14628  *		 EINVAL
14629  *
14630  *      Context: User and Kernel context
14631  */
14632 static int
14633 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14634 {
14635 	int			wait_ms;
14636 	int			tcount;
14637 	int			lcount;
14638 	int 			ret;
14639 	int			error;
14640 	int 			rval = EIO;
14641 	int 			ntries;
14642 	fc_portmap_t 		*devlist;
14643 	opaque_t		pd;
14644 	struct fcp_lun		*plun;
14645 	struct fcp_tgt		*ptgt;
14646 	int			old_manual = 0;
14647 
14648 	/* Allocates the fc_portmap_t structure. */
14649 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14650 
14651 	/*
14652 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14653 	 * in the commented statement below:
14654 	 *
14655 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14656 	 *
14657 	 * Below, the deadline for the discovery process is set.
14658 	 */
14659 	mutex_enter(&pptr->port_mutex);
14660 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14661 	mutex_exit(&pptr->port_mutex);
14662 
14663 	/*
14664 	 * We try to find the remote port based on the WWN provided by the
14665 	 * caller.  We actually ask fp/fctl if it has it.
14666 	 */
14667 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14668 	    (la_wwn_t *)pwwn, &error, 1);
14669 
14670 	if (pd == NULL) {
14671 		kmem_free(devlist, sizeof (*devlist));
14672 		return (rval);
14673 	}
14674 
14675 	/*
14676 	 * The remote port was found.  We ask fp/fctl to update our
14677 	 * fc_portmap_t structure.
14678 	 */
14679 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14680 	    (la_wwn_t *)pwwn, devlist);
14681 	if (ret != FC_SUCCESS) {
14682 		kmem_free(devlist, sizeof (*devlist));
14683 		return (rval);
14684 	}
14685 
14686 	/*
14687 	 * The map flag field is set to indicates that the creation is being
14688 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14689 	 */
14690 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14691 
14692 	mutex_enter(&pptr->port_mutex);
14693 
14694 	/*
14695 	 * We check to see if fcp already has a target that describes the
14696 	 * device being created.  If not it is created.
14697 	 */
14698 	ptgt = fcp_lookup_target(pptr, pwwn);
14699 	if (ptgt == NULL) {
14700 		lcount = pptr->port_link_cnt;
14701 		mutex_exit(&pptr->port_mutex);
14702 
14703 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14704 		if (ptgt == NULL) {
14705 			fcp_log(CE_WARN, pptr->port_dip,
14706 			    "!FC target allocation failed");
14707 			return (ENOMEM);
14708 		}
14709 
14710 		mutex_enter(&pptr->port_mutex);
14711 	}
14712 
14713 	mutex_enter(&ptgt->tgt_mutex);
14714 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14715 	ptgt->tgt_tmp_cnt = 1;
14716 	ptgt->tgt_device_created = 0;
14717 	/*
14718 	 * If fabric and auto config is set but the target was
14719 	 * manually unconfigured then reset to the manual_config_only to
14720 	 * 0 so the device will get configured.
14721 	 */
14722 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14723 		fcp_enable_auto_configuration &&
14724 		    ptgt->tgt_manual_config_only == 1) {
14725 		old_manual = 1;
14726 		ptgt->tgt_manual_config_only = 0;
14727 	}
14728 	mutex_exit(&ptgt->tgt_mutex);
14729 
14730 	fcp_update_targets(pptr, devlist, 1,
14731 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14732 
14733 	lcount = pptr->port_link_cnt;
14734 	tcount = ptgt->tgt_change_cnt;
14735 
14736 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14737 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14738 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14739 			fcp_enable_auto_configuration && old_manual) {
14740 			mutex_enter(&ptgt->tgt_mutex);
14741 			ptgt->tgt_manual_config_only = 1;
14742 			mutex_exit(&ptgt->tgt_mutex);
14743 		}
14744 
14745 		if (pptr->port_link_cnt != lcount ||
14746 		    ptgt->tgt_change_cnt != tcount) {
14747 			rval = EBUSY;
14748 		}
14749 		mutex_exit(&pptr->port_mutex);
14750 
14751 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14752 		    FCP_BUF_LEVEL_3, 0,
14753 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14754 		    "lcount=%x::port_link_cnt=%x, "
14755 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14756 		    ptgt, lcount, pptr->port_link_cnt,
14757 		    tcount, ptgt->tgt_change_cnt, rval);
14758 		return (rval);
14759 	}
14760 
14761 	/*
14762 	 * Due to lack of synchronization mechanisms, we perform
14763 	 * periodic monitoring of our request; Because requests
14764 	 * get dropped when another one supercedes (either because
14765 	 * of a link change or a target change), it is difficult to
14766 	 * provide a clean synchronization mechanism (such as a
14767 	 * semaphore or a conditional variable) without exhaustively
14768 	 * rewriting the mainline discovery code of this driver.
14769 	 */
14770 	wait_ms = 500;
14771 
14772 	ntries = fcp_max_target_retries;
14773 
14774 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14775 	    FCP_BUF_LEVEL_3, 0,
14776 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14777 	    "lcount=%x::port_link_cnt=%x, "
14778 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14779 	    "tgt_tmp_cnt =%x",
14780 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14781 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14782 	    ptgt->tgt_tmp_cnt);
14783 
14784 	mutex_enter(&ptgt->tgt_mutex);
14785 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14786 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14787 		mutex_exit(&ptgt->tgt_mutex);
14788 		mutex_exit(&pptr->port_mutex);
14789 
14790 		delay(drv_usectohz(wait_ms * 1000));
14791 
14792 		mutex_enter(&pptr->port_mutex);
14793 		mutex_enter(&ptgt->tgt_mutex);
14794 	}
14795 
14796 
14797 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14798 		rval = EBUSY;
14799 	} else {
14800 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14801 		    FCP_TGT_NODE_PRESENT) {
14802 			rval = 0;
14803 		}
14804 	}
14805 
14806 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14807 	    FCP_BUF_LEVEL_3, 0,
14808 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14809 	    "lcount=%x::port_link_cnt=%x, "
14810 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14811 	    "tgt_tmp_cnt =%x",
14812 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14813 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14814 	    ptgt->tgt_tmp_cnt);
14815 
14816 	if (rval) {
14817 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14818 			fcp_enable_auto_configuration && old_manual) {
14819 			ptgt->tgt_manual_config_only = 1;
14820 		}
14821 		mutex_exit(&ptgt->tgt_mutex);
14822 		mutex_exit(&pptr->port_mutex);
14823 		kmem_free(devlist, sizeof (*devlist));
14824 
14825 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14826 		    FCP_BUF_LEVEL_3, 0,
14827 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14828 		    "lcount=%x::port_link_cnt=%x, "
14829 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14830 		    "tgt_device_created=%x, tgt D_ID=%x",
14831 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14832 		    tcount, ptgt->tgt_change_cnt, rval,
14833 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14834 		return (rval);
14835 	}
14836 
14837 	if ((plun = ptgt->tgt_lun) != NULL) {
14838 		tcount = plun->lun_tgt->tgt_change_cnt;
14839 	} else {
14840 		rval = EINVAL;
14841 	}
14842 	lcount = pptr->port_link_cnt;
14843 
14844 	/*
14845 	 * Configuring the target with no LUNs will fail. We
14846 	 * should reset the node state so that it is not
14847 	 * automatically configured when the LUNs are added
14848 	 * to this target.
14849 	 */
14850 	if (ptgt->tgt_lun_cnt == 0) {
14851 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14852 	}
14853 	mutex_exit(&ptgt->tgt_mutex);
14854 	mutex_exit(&pptr->port_mutex);
14855 
14856 	while (plun) {
14857 		child_info_t	*cip;
14858 
14859 		mutex_enter(&plun->lun_mutex);
14860 		cip = plun->lun_cip;
14861 		mutex_exit(&plun->lun_mutex);
14862 
14863 		mutex_enter(&ptgt->tgt_mutex);
14864 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14865 			mutex_exit(&ptgt->tgt_mutex);
14866 
14867 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14868 				FCP_ONLINE, lcount, tcount,
14869 				NDI_ONLINE_ATTACH);
14870 			if (rval != NDI_SUCCESS) {
14871 				FCP_TRACE(fcp_logq,
14872 				    pptr->port_instbuf, fcp_trace,
14873 				    FCP_BUF_LEVEL_3, 0,
14874 				    "fcp_create_on_demand: "
14875 				    "pass_to_hp_and_wait failed "
14876 				    "rval=%x", rval);
14877 				rval = EIO;
14878 			} else {
14879 				mutex_enter(&LUN_TGT->tgt_mutex);
14880 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14881 					FCP_LUN_BUSY);
14882 				mutex_exit(&LUN_TGT->tgt_mutex);
14883 			}
14884 			mutex_enter(&ptgt->tgt_mutex);
14885 		}
14886 
14887 		plun = plun->lun_next;
14888 		mutex_exit(&ptgt->tgt_mutex);
14889 	}
14890 
14891 	kmem_free(devlist, sizeof (*devlist));
14892 
14893 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14894 		fcp_enable_auto_configuration && old_manual) {
14895 		mutex_enter(&ptgt->tgt_mutex);
14896 		/* if successful then set manual to 0 */
14897 		if (rval == 0) {
14898 			ptgt->tgt_manual_config_only = 0;
14899 		} else {
14900 			/* reset to 1 so the user has to do the config */
14901 			ptgt->tgt_manual_config_only = 1;
14902 		}
14903 		mutex_exit(&ptgt->tgt_mutex);
14904 	}
14905 
14906 	return (rval);
14907 }
14908 
14909 
14910 static void
14911 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
14912 {
14913 	int		count;
14914 	uchar_t		byte;
14915 
14916 	count = 0;
14917 	while (*string) {
14918 		byte = FCP_ATOB(*string); string++;
14919 		byte = byte << 4 | FCP_ATOB(*string); string++;
14920 		bytes[count++] = byte;
14921 
14922 		if (count >= byte_len) {
14923 			break;
14924 		}
14925 	}
14926 }
14927 
14928 static void
14929 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
14930 {
14931 	int		i;
14932 
14933 	for (i = 0; i < FC_WWN_SIZE; i++) {
14934 		(void) sprintf(string + (i * 2),
14935 		    "%02x", wwn[i]);
14936 	}
14937 
14938 }
14939 
14940 static void
14941 fcp_print_error(fc_packet_t *fpkt)
14942 {
14943 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
14944 				    fpkt->pkt_ulp_private;
14945 	struct fcp_port	*pptr;
14946 	struct fcp_tgt	*ptgt;
14947 	struct fcp_lun	*plun;
14948 	caddr_t 		buf;
14949 	int 			scsi_cmd = 0;
14950 
14951 	ptgt = icmd->ipkt_tgt;
14952 	plun = icmd->ipkt_lun;
14953 	pptr = ptgt->tgt_port;
14954 
14955 	buf = kmem_zalloc(256, KM_NOSLEEP);
14956 	if (buf == NULL) {
14957 		return;
14958 	}
14959 
14960 	switch (icmd->ipkt_opcode) {
14961 	case SCMD_REPORT_LUN:
14962 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
14963 		    " lun=0x%%x failed");
14964 		scsi_cmd++;
14965 		break;
14966 
14967 	case SCMD_INQUIRY_PAGE83:
14968 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
14969 		    " lun=0x%%x failed");
14970 		scsi_cmd++;
14971 		break;
14972 
14973 	case SCMD_INQUIRY:
14974 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
14975 		    " lun=0x%%x failed");
14976 		scsi_cmd++;
14977 		break;
14978 
14979 	case LA_ELS_PLOGI:
14980 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
14981 		break;
14982 
14983 	case LA_ELS_PRLI:
14984 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
14985 		break;
14986 	}
14987 
14988 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
14989 		struct fcp_rsp		response, *rsp;
14990 		uchar_t			asc, ascq;
14991 		caddr_t			sense_key = NULL;
14992 		struct fcp_rsp_info	fcp_rsp_err, *bep;
14993 
14994 		if (icmd->ipkt_nodma) {
14995 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
14996 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
14997 			    sizeof (struct fcp_rsp));
14998 		} else {
14999 			rsp = &response;
15000 			bep = &fcp_rsp_err;
15001 
15002 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15003 			    sizeof (struct fcp_rsp));
15004 
15005 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15006 			    bep, fpkt->pkt_resp_acc,
15007 			    sizeof (struct fcp_rsp_info));
15008 		}
15009 
15010 
15011 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15012 			(void) sprintf(buf + strlen(buf),
15013 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15014 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15015 			    " senselen=%%x. Giving up");
15016 
15017 			fcp_log(CE_WARN, pptr->port_dip, buf,
15018 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15019 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15020 			    rsp->fcp_u.fcp_status.reserved_1,
15021 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15022 
15023 			kmem_free(buf, 256);
15024 			return;
15025 		}
15026 
15027 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15028 		    bep->rsp_code != FCP_NO_FAILURE) {
15029 			(void) sprintf(buf + strlen(buf),
15030 			    " FCP Response code = 0x%x", bep->rsp_code);
15031 		}
15032 
15033 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15034 			struct scsi_extended_sense sense_info, *sense_ptr;
15035 
15036 			if (icmd->ipkt_nodma) {
15037 				sense_ptr = (struct scsi_extended_sense *)
15038 				    ((caddr_t)fpkt->pkt_resp +
15039 				    sizeof (struct fcp_rsp) +
15040 				    rsp->fcp_response_len);
15041 			} else {
15042 				sense_ptr = &sense_info;
15043 
15044 				FCP_CP_IN(fpkt->pkt_resp +
15045 				    sizeof (struct fcp_rsp) +
15046 				    rsp->fcp_response_len, &sense_info,
15047 				    fpkt->pkt_resp_acc,
15048 				    sizeof (struct scsi_extended_sense));
15049 			}
15050 
15051 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15052 			    NUM_IMPL_SENSE_KEYS) {
15053 				sense_key = sense_keys[sense_ptr->es_key];
15054 			} else {
15055 				sense_key = "Undefined";
15056 			}
15057 
15058 			asc = sense_ptr->es_add_code;
15059 			ascq = sense_ptr->es_qual_code;
15060 
15061 			(void) sprintf(buf + strlen(buf),
15062 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15063 			    " Giving up");
15064 
15065 			fcp_log(CE_WARN, pptr->port_dip, buf,
15066 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15067 			    asc, ascq);
15068 		} else {
15069 			(void) sprintf(buf + strlen(buf),
15070 			    " : SCSI status=%%x. Giving up");
15071 
15072 			fcp_log(CE_WARN, pptr->port_dip, buf,
15073 			    ptgt->tgt_d_id, plun->lun_num,
15074 			    rsp->fcp_u.fcp_status.scsi_status);
15075 		}
15076 	} else {
15077 		caddr_t state, reason, action, expln;
15078 
15079 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15080 		    &action, &expln);
15081 
15082 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15083 		    " Reason:%%s. Giving up");
15084 
15085 		if (scsi_cmd) {
15086 			fcp_log(CE_WARN, pptr->port_dip, buf,
15087 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15088 		} else {
15089 			fcp_log(CE_WARN, pptr->port_dip, buf,
15090 			    ptgt->tgt_d_id, state, reason);
15091 		}
15092 	}
15093 
15094 	kmem_free(buf, 256);
15095 }
15096 
15097 
15098 static int
15099 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15100     struct fcp_ipkt *icmd, int rval, caddr_t op)
15101 {
15102 	int	ret = DDI_FAILURE;
15103 	char 	*error;
15104 
15105 	switch (rval) {
15106 	case FC_DEVICE_BUSY_NEW_RSCN:
15107 		/*
15108 		 * This means that there was a new RSCN that the transport
15109 		 * knows about (which the ULP *may* know about too) but the
15110 		 * pkt that was sent down was related to an older RSCN. So, we
15111 		 * are just going to reset the retry count and deadline and
15112 		 * continue to retry. The idea is that transport is currently
15113 		 * working on the new RSCN and will soon let the ULPs know
15114 		 * about it and when it does the existing logic will kick in
15115 		 * where it will change the tcount to indicate that something
15116 		 * changed on the target. So, rediscovery will start and there
15117 		 * will not be an infinite retry.
15118 		 *
15119 		 * For a full flow of how the RSCN info is transferred back and
15120 		 * forth, see fp.c
15121 		 */
15122 		icmd->ipkt_retries = 0;
15123 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15124 		    FCP_ICMD_DEADLINE;
15125 
15126 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15127 		    FCP_BUF_LEVEL_3, 0,
15128 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15129 		    rval, ptgt->tgt_d_id);
15130 		/* FALLTHROUGH */
15131 
15132 	case FC_STATEC_BUSY:
15133 	case FC_DEVICE_BUSY:
15134 	case FC_PBUSY:
15135 	case FC_FBUSY:
15136 	case FC_TRAN_BUSY:
15137 	case FC_OFFLINE:
15138 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15139 		    FCP_BUF_LEVEL_3, 0,
15140 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15141 		    rval, ptgt->tgt_d_id);
15142 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15143 		    fcp_is_retryable(icmd)) {
15144 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15145 			ret = DDI_SUCCESS;
15146 		}
15147 		break;
15148 
15149 	case FC_LOGINREQ:
15150 		/*
15151 		 * FC_LOGINREQ used to be handled just like all the cases
15152 		 * above. It has been changed to handled a PRLI that fails
15153 		 * with FC_LOGINREQ different than other ipkts that fail
15154 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15155 		 * a simple matter to turn it into a PLOGI instead, so that's
15156 		 * exactly what we do here.
15157 		 */
15158 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15159 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15160 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15161 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15162 		} else {
15163 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15164 			    FCP_BUF_LEVEL_3, 0,
15165 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15166 			    rval, ptgt->tgt_d_id);
15167 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15168 			    fcp_is_retryable(icmd)) {
15169 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15170 				ret = DDI_SUCCESS;
15171 			}
15172 		}
15173 		break;
15174 
15175 	default:
15176 		mutex_enter(&pptr->port_mutex);
15177 		mutex_enter(&ptgt->tgt_mutex);
15178 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15179 			mutex_exit(&ptgt->tgt_mutex);
15180 			mutex_exit(&pptr->port_mutex);
15181 
15182 			(void) fc_ulp_error(rval, &error);
15183 			fcp_log(CE_WARN, pptr->port_dip,
15184 			    "!Failed to send %s to D_ID=%x error=%s",
15185 			    op, ptgt->tgt_d_id, error);
15186 		} else {
15187 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15188 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15189 			    "fcp_handle_ipkt_errors,1: state change occured"
15190 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15191 			mutex_exit(&ptgt->tgt_mutex);
15192 			mutex_exit(&pptr->port_mutex);
15193 		}
15194 		break;
15195 	}
15196 
15197 	return (ret);
15198 }
15199 
15200 
15201 /*
15202  * Check of outstanding commands on any LUN for this target
15203  */
15204 static int
15205 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15206 {
15207 	struct	fcp_lun	*plun;
15208 	struct	fcp_pkt	*cmd;
15209 
15210 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15211 		mutex_enter(&plun->lun_mutex);
15212 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15213 		    cmd = cmd->cmd_forw) {
15214 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15215 				mutex_exit(&plun->lun_mutex);
15216 				return (FC_SUCCESS);
15217 			}
15218 		}
15219 		mutex_exit(&plun->lun_mutex);
15220 	}
15221 
15222 	return (FC_FAILURE);
15223 }
15224 
15225 static fc_portmap_t *
15226 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15227 {
15228 	int			i;
15229 	fc_portmap_t 		*devlist;
15230 	fc_portmap_t 		*devptr = NULL;
15231 	struct fcp_tgt 	*ptgt;
15232 
15233 	mutex_enter(&pptr->port_mutex);
15234 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15235 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15236 		    ptgt = ptgt->tgt_next) {
15237 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15238 				++*dev_cnt;
15239 			}
15240 		}
15241 	}
15242 
15243 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15244 	    KM_NOSLEEP);
15245 	if (devlist == NULL) {
15246 		mutex_exit(&pptr->port_mutex);
15247 		fcp_log(CE_WARN, pptr->port_dip,
15248 		    "!fcp%d: failed to allocate for portmap for construct map",
15249 		    pptr->port_instance);
15250 		return (devptr);
15251 	}
15252 
15253 	for (i = 0; i < FCP_NUM_HASH; i++) {
15254 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15255 		    ptgt = ptgt->tgt_next) {
15256 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15257 				int ret;
15258 
15259 				ret = fc_ulp_pwwn_to_portmap(
15260 				    pptr->port_fp_handle,
15261 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15262 				    devlist);
15263 
15264 				if (ret == FC_SUCCESS) {
15265 					devlist++;
15266 					continue;
15267 				}
15268 
15269 				devlist->map_pd = NULL;
15270 				devlist->map_did.port_id = ptgt->tgt_d_id;
15271 				devlist->map_hard_addr.hard_addr =
15272 				    ptgt->tgt_hard_addr;
15273 
15274 				devlist->map_state = PORT_DEVICE_INVALID;
15275 				devlist->map_type = PORT_DEVICE_OLD;
15276 
15277 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15278 				    &devlist->map_nwwn, FC_WWN_SIZE);
15279 
15280 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15281 				    &devlist->map_pwwn, FC_WWN_SIZE);
15282 
15283 				devlist++;
15284 			}
15285 		}
15286 	}
15287 
15288 	mutex_exit(&pptr->port_mutex);
15289 
15290 	return (devptr);
15291 }
15292 /*
15293  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15294  */
15295 static void
15296 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15297 {
15298 	int i;
15299 	struct fcp_tgt	*ptgt;
15300 	struct fcp_lun	*plun;
15301 
15302 	for (i = 0; i < FCP_NUM_HASH; i++) {
15303 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15304 		    ptgt = ptgt->tgt_next) {
15305 			mutex_enter(&ptgt->tgt_mutex);
15306 			for (plun = ptgt->tgt_lun; plun != NULL;
15307 			    plun = plun->lun_next) {
15308 				if (plun->lun_mpxio &&
15309 				    plun->lun_state & FCP_LUN_BUSY) {
15310 					if (!fcp_pass_to_hp(pptr, plun,
15311 					    plun->lun_cip,
15312 					    FCP_MPXIO_PATH_SET_BUSY,
15313 					    pptr->port_link_cnt,
15314 					    ptgt->tgt_change_cnt, 0, 0)) {
15315 						FCP_TRACE(fcp_logq,
15316 						    pptr->port_instbuf,
15317 						    fcp_trace,
15318 						    FCP_BUF_LEVEL_2, 0,
15319 						    "path_verifybusy: "
15320 						    "disable lun %p failed!",
15321 						    plun);
15322 					}
15323 				}
15324 			}
15325 			mutex_exit(&ptgt->tgt_mutex);
15326 		}
15327 	}
15328 }
15329 
15330 static int
15331 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15332 {
15333 	dev_info_t		*cdip = NULL;
15334 	dev_info_t		*pdip = NULL;
15335 
15336 	ASSERT(plun);
15337 
15338 	mutex_enter(&plun->lun_mutex);
15339 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15340 		mutex_exit(&plun->lun_mutex);
15341 		return (NDI_FAILURE);
15342 	}
15343 	mutex_exit(&plun->lun_mutex);
15344 	cdip = mdi_pi_get_client(PIP(cip));
15345 	pdip = mdi_pi_get_phci(PIP(cip));
15346 
15347 	ASSERT(cdip != NULL);
15348 	ASSERT(pdip != NULL);
15349 
15350 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15351 		/* LUN ready for IO */
15352 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15353 	} else {
15354 		/* LUN busy to accept IO */
15355 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15356 	}
15357 	return (NDI_SUCCESS);
15358 }
15359 
15360 /*
15361  * Caller must free the returned string of MAXPATHLEN len
15362  * If the device is offline (-1 instance number) NULL
15363  * will be returned.
15364  */
15365 static char *
15366 fcp_get_lun_path(struct fcp_lun *plun) {
15367 	dev_info_t	*dip = NULL;
15368 	char	*path = NULL;
15369 	if (plun == NULL) {
15370 	    return (NULL);
15371 	}
15372 	if (plun->lun_mpxio == 0) {
15373 	    dip = DIP(plun->lun_cip);
15374 	} else {
15375 	    dip = mdi_pi_get_client(PIP(plun->lun_cip));
15376 	}
15377 	if (dip == NULL) {
15378 	    return (NULL);
15379 	}
15380 	if (ddi_get_instance(dip) < 0) {
15381 	    return (NULL);
15382 	}
15383 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15384 	if (path == NULL) {
15385 	    return (NULL);
15386 	}
15387 
15388 	(void) ddi_pathname(dip, path);
15389 	/*
15390 	 * In reality, the user wants a fully valid path (one they can open)
15391 	 * but this string is lacking the mount point, and the minor node.
15392 	 * It would be nice if we could "figure these out" somehow
15393 	 * and fill them in.  Otherwise, the userland code has to understand
15394 	 * driver specific details of which minor node is the "best" or
15395 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15396 	 * which tape doesn't rewind)
15397 	 */
15398 	return (path);
15399 }
15400 
15401 static int
15402 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15403     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15404 {
15405 	int64_t reset_delay;
15406 	int rval, retry = 0;
15407 	struct fcp_port *pptr = fcp_dip2port(parent);
15408 
15409 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15410 		(lbolt64 - pptr->port_attach_time);
15411 	if (reset_delay < 0)
15412 		reset_delay = 0;
15413 
15414 	if (fcp_bus_config_debug)
15415 		flag |= NDI_DEVI_DEBUG;
15416 
15417 	switch (op) {
15418 	case BUS_CONFIG_ONE:
15419 		/*
15420 		 * Retry the command since we need to ensure
15421 		 * the fabric devices are available for root
15422 		 */
15423 		while (retry++ < fcp_max_bus_config_retries) {
15424 			rval =  (ndi_busop_bus_config(parent,
15425 				    flag | NDI_MDI_FALLBACK, op,
15426 				    arg, childp, (clock_t)reset_delay));
15427 			if (rval == 0)
15428 				return (rval);
15429 		}
15430 
15431 		/*
15432 		 * drain taskq to make sure nodes are created and then
15433 		 * try again.
15434 		 */
15435 		taskq_wait(DEVI(parent)->devi_taskq);
15436 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15437 		    op, arg, childp, 0));
15438 
15439 	case BUS_CONFIG_DRIVER:
15440 	case BUS_CONFIG_ALL: {
15441 		/*
15442 		 * delay till all devices report in (port_tmp_cnt == 0)
15443 		 * or FCP_INIT_WAIT_TIMEOUT
15444 		 */
15445 		mutex_enter(&pptr->port_mutex);
15446 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15447 			(void) cv_timedwait(&pptr->port_config_cv,
15448 			    &pptr->port_mutex,
15449 			    ddi_get_lbolt() + (clock_t)reset_delay);
15450 			reset_delay =
15451 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15452 			    (lbolt64 - pptr->port_attach_time);
15453 		}
15454 		mutex_exit(&pptr->port_mutex);
15455 		/* drain taskq to make sure nodes are created */
15456 		taskq_wait(DEVI(parent)->devi_taskq);
15457 		return (ndi_busop_bus_config(parent, flag, op,
15458 		    arg, childp, 0));
15459 	}
15460 
15461 	default:
15462 		return (NDI_FAILURE);
15463 	}
15464 	/*NOTREACHED*/
15465 }
15466 
15467 static int
15468 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15469     ddi_bus_config_op_t op, void *arg)
15470 {
15471 	if (fcp_bus_config_debug)
15472 		flag |= NDI_DEVI_DEBUG;
15473 
15474 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15475 }
15476 
15477 
15478 /*
15479  * Routine to copy GUID into the lun structure.
15480  * returns 0 if copy was successful and 1 if encountered a
15481  * failure and did not copy the guid.
15482  */
15483 static int
15484 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15485 {
15486 
15487 	int retval = 0;
15488 
15489 	/* add one for the null terminator */
15490 	const unsigned int len = strlen(guidp) + 1;
15491 
15492 	if ((guidp == NULL) || (plun == NULL)) {
15493 		return (1);
15494 	}
15495 
15496 	/*
15497 	 * if the plun->lun_guid already has been allocated,
15498 	 * then check the size. if the size is exact, reuse
15499 	 * it....if not free it an allocate the required size.
15500 	 * The reallocation should NOT typically happen
15501 	 * unless the GUIDs reported changes between passes.
15502 	 * We free up and alloc again even if the
15503 	 * size was more than required. This is due to the
15504 	 * fact that the field lun_guid_size - serves
15505 	 * dual role of indicating the size of the wwn
15506 	 * size and ALSO the allocation size.
15507 	 */
15508 	if (plun->lun_guid) {
15509 		if (plun->lun_guid_size != len) {
15510 			/*
15511 			 * free the allocated memory and
15512 			 * initialize the field
15513 			 * lun_guid_size to 0.
15514 			 */
15515 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15516 			plun->lun_guid = NULL;
15517 			plun->lun_guid_size = 0;
15518 		}
15519 	}
15520 	/*
15521 	 * alloc only if not already done.
15522 	 */
15523 	if (plun->lun_guid == NULL) {
15524 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15525 		if (plun->lun_guid == NULL) {
15526 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15527 				"Unable to allocate"
15528 				"Memory for GUID!!! size %d", len);
15529 			retval = 1;
15530 		} else {
15531 			plun->lun_guid_size = len;
15532 		}
15533 	}
15534 	if (plun->lun_guid) {
15535 		/*
15536 		 * now copy the GUID
15537 		 */
15538 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15539 	}
15540 	return (retval);
15541 }
15542 
15543 /*
15544  * fcp_reconfig_wait
15545  *
15546  * Wait for a rediscovery/reconfiguration to complete before continuing.
15547  */
15548 
15549 static void
15550 fcp_reconfig_wait(struct fcp_port *pptr)
15551 {
15552 	clock_t		reconfig_start, wait_timeout;
15553 
15554 	/*
15555 	 * Quick check.  If pptr->port_tmp_cnt is 0, there is no
15556 	 * reconfiguration in progress.
15557 	 */
15558 
15559 	mutex_enter(&pptr->port_mutex);
15560 	if (pptr->port_tmp_cnt == 0) {
15561 		mutex_exit(&pptr->port_mutex);
15562 		return;
15563 	}
15564 	mutex_exit(&pptr->port_mutex);
15565 
15566 	/*
15567 	 * If we cause a reconfig by raising power, delay until all devices
15568 	 * report in (port_tmp_cnt returns to 0)
15569 	 */
15570 
15571 	reconfig_start = ddi_get_lbolt();
15572 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15573 
15574 	mutex_enter(&pptr->port_mutex);
15575 
15576 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15577 		pptr->port_tmp_cnt) {
15578 
15579 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15580 		    reconfig_start + wait_timeout);
15581 	}
15582 
15583 	mutex_exit(&pptr->port_mutex);
15584 
15585 	/*
15586 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15587 	 * we want may still be ok.  If not, it will error out later
15588 	 */
15589 }
15590 
15591 /*
15592  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15593  * We rely on the fcp_global_mutex to provide protection against changes to
15594  * the fcp_lun_blacklist.
15595  *
15596  * You can describe a list of target port WWNs and LUN numbers which will
15597  * not be configured. LUN numbers will be interpreted as decimal. White
15598  * spaces and ',' can be used in the list of LUN numbers.
15599  *
15600  * To prevent LUNs 1 and 2 from being configured for target
15601  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15602  *
15603  * pwwn-lun-blacklist=
15604  * "510000f010fd92a1,1,2",
15605  * "510000e012079df1,1,2";
15606  */
15607 static void
15608 fcp_read_blacklist(dev_info_t *dip,
15609     struct fcp_black_list_entry **pplun_blacklist) {
15610 	char **prop_array	= NULL;
15611 	char *curr_pwwn		= NULL;
15612 	char *curr_lun		= NULL;
15613 	uint32_t prop_item	= 0;
15614 	int idx			= 0;
15615 	int len			= 0;
15616 
15617 	ASSERT(mutex_owned(&fcp_global_mutex));
15618 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15619 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15620 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15621 		return;
15622 	}
15623 
15624 	for (idx = 0; idx < prop_item; idx++) {
15625 
15626 		curr_pwwn = prop_array[idx];
15627 		while (*curr_pwwn == ' ') {
15628 			curr_pwwn++;
15629 		}
15630 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15631 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15632 			    ", please check.", curr_pwwn);
15633 			continue;
15634 		}
15635 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15636 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15637 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15638 			    ", please check.", curr_pwwn);
15639 			continue;
15640 		}
15641 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15642 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15643 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15644 				    "blacklist, please check.", curr_pwwn);
15645 				break;
15646 			}
15647 		}
15648 		if (len != sizeof (la_wwn_t) * 2) {
15649 			continue;
15650 		}
15651 
15652 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15653 		*(curr_lun - 1) = '\0';
15654 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15655 	}
15656 
15657 	ddi_prop_free(prop_array);
15658 }
15659 
15660 /*
15661  * Get the masking info about one remote target port designated by wwn.
15662  * Lun ids could be separated by ',' or white spaces.
15663  */
15664 static void
15665 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15666     struct fcp_black_list_entry **pplun_blacklist) {
15667 	int 		idx			= 0;
15668 	uint32_t	offset			= 0;
15669 	unsigned long	lun_id			= 0;
15670 	char		lunid_buf[16];
15671 	char		*pend			= NULL;
15672 	int		illegal_digit		= 0;
15673 
15674 	while (offset < strlen(curr_lun)) {
15675 		while ((curr_lun[offset + idx] != ',') &&
15676 		    (curr_lun[offset + idx] != '\0') &&
15677 		    (curr_lun[offset + idx] != ' ')) {
15678 			if (isdigit(curr_lun[offset + idx]) == 0) {
15679 				illegal_digit++;
15680 			}
15681 			idx++;
15682 		}
15683 		if (illegal_digit > 0) {
15684 			offset += (idx+1);	/* To the start of next lun */
15685 			idx = 0;
15686 			illegal_digit = 0;
15687 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15688 			    "the blacklist, please check digits.",
15689 			    curr_lun, curr_pwwn);
15690 			continue;
15691 		}
15692 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15693 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15694 			    "the blacklist, please check the length of LUN#.",
15695 			    curr_lun, curr_pwwn);
15696 			break;
15697 		}
15698 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15699 			offset++;
15700 			continue;
15701 		}
15702 
15703 		bcopy(curr_lun + offset, lunid_buf, idx);
15704 		lunid_buf[idx] = '\0';
15705 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15706 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15707 		} else {
15708 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15709 			    "the blacklist, please check %s.",
15710 			    curr_lun, curr_pwwn, lunid_buf);
15711 		}
15712 		offset += (idx+1);	/* To the start of next lun */
15713 		idx = 0;
15714 	}
15715 }
15716 
15717 /*
15718  * Add one masking record
15719  */
15720 static void
15721 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15722     struct fcp_black_list_entry **pplun_blacklist) {
15723 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15724 	struct fcp_black_list_entry	*new_entry	= NULL;
15725 	la_wwn_t			wwn;
15726 
15727 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15728 	while (tmp_entry) {
15729 		if ((bcmp(&tmp_entry->wwn, &wwn,
15730 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15731 			return;
15732 		}
15733 
15734 		tmp_entry = tmp_entry->next;
15735 	}
15736 
15737 	/* add to black list */
15738 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15739 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15740 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15741 	new_entry->lun = lun_id;
15742 	new_entry->masked = 0;
15743 	new_entry->next = *pplun_blacklist;
15744 	*pplun_blacklist = new_entry;
15745 }
15746 
15747 /*
15748  * Check if we should mask the specified lun of this fcp_tgt
15749  */
15750 static int
15751 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15752 	struct fcp_black_list_entry *remote_port;
15753 
15754 	remote_port = fcp_lun_blacklist;
15755 	while (remote_port != NULL) {
15756 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15757 			if (remote_port->lun == lun_id) {
15758 				remote_port->masked++;
15759 				if (remote_port->masked == 1) {
15760 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15761 	"%02x%02x%02x%02x%02x%02x%02x%02x is masked due to black listing.\n",
15762 	lun_id, wwn->raw_wwn[0], wwn->raw_wwn[1], wwn->raw_wwn[2],
15763 	wwn->raw_wwn[3], wwn->raw_wwn[4], wwn->raw_wwn[5], wwn->raw_wwn[6],
15764 	wwn->raw_wwn[7]);
15765 				}
15766 				return (TRUE);
15767 			}
15768 		}
15769 		remote_port = remote_port->next;
15770 	}
15771 	return (FALSE);
15772 }
15773 
15774 /*
15775  * Release all allocated resources
15776  */
15777 static void
15778 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15779 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15780 	struct fcp_black_list_entry	*current_entry	= NULL;
15781 
15782 	ASSERT(mutex_owned(&fcp_global_mutex));
15783 	/*
15784 	 * Traverse all luns
15785 	 */
15786 	while (tmp_entry) {
15787 		current_entry = tmp_entry;
15788 		tmp_entry = tmp_entry->next;
15789 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15790 	}
15791 	*pplun_blacklist = NULL;
15792 }
15793