xref: /titanic_51/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 65cf7c958eb9457cf2c83a853d128e4ecfc187a6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433     int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435     uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439     uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446     int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449     uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451     uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461     uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463     int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466     int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468     int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470     int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474     int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476     int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478     int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480     int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486     fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490     struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496     fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500     int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503     uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505     int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508     int);
509 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512     int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516     int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520     int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526     int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530     int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532     dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534     int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536     int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541     struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544     uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546     struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551     child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554     int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557     int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559     struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561     uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581     uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585     struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588     int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593     int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 
596 /*
597  * New functions added for mpxio support
598  */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602     int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604     dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610     int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612     fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 
615 /*
616  * New functions added for lun masking support
617  */
618 static void fcp_read_blacklist(dev_info_t *dip,
619     struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621     struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 
627 extern struct mod_ops	mod_driverops;
628 /*
629  * This variable is defined in modctl.c and set to '1' after the root driver
630  * and fs are loaded.  It serves as an indication that the root filesystem can
631  * be used.
632  */
633 extern int		modrootloaded;
634 /*
635  * This table contains strings associated with the SCSI sense key codes.  It
636  * is used by FCP to print a clear explanation of the code returned in the
637  * sense information by a device.
638  */
639 extern char		*sense_keys[];
640 /*
641  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
642  * under this device that the paths to a physical device are created when
643  * MPxIO is used.
644  */
645 extern dev_info_t	*scsi_vhci_dip;
646 
647 /*
648  * Report lun processing
649  */
650 #define	FCP_LUN_ADDRESSING		0x80
651 #define	FCP_PD_ADDRESSING		0x00
652 #define	FCP_VOLUME_ADDRESSING		0x40
653 
654 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
655 #define	MAX_INT_DMA			0x7fffffff
656 #define	FCP_MAX_SENSE_LEN		252
657 #define	FCP_MAX_RESPONSE_LEN		0xffffff
658 /*
659  * Property definitions
660  */
661 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
662 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
663 #define	TARGET_PROP	(char *)fcp_target_prop
664 #define	LUN_PROP	(char *)fcp_lun_prop
665 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
666 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
667 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
668 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
669 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
670 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
671 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
672 /*
673  * Short hand macros.
674  */
675 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
676 #define	LUN_TGT		(plun->lun_tgt)
677 
678 /*
679  * Driver private macros
680  */
681 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
682 			((x) >= 'a' && (x) <= 'f') ?			\
683 			((x) - 'a' + 10) : ((x) - 'A' + 10))
684 
685 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
686 
687 #define	FCP_N_NDI_EVENTS						\
688 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
689 
690 #define	FCP_LINK_STATE_CHANGED(p, c)			\
691 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
692 
693 #define	FCP_TGT_STATE_CHANGED(t, c)			\
694 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
695 
696 #define	FCP_STATE_CHANGED(p, t, c)		\
697 	(FCP_TGT_STATE_CHANGED(t, c))
698 
699 #define	FCP_MUST_RETRY(fpkt)				\
700 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
701 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
702 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
703 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
704 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
705 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
706 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
707 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
708 
709 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
710 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
711 	(es)->es_add_code == 0x3f &&		\
712 	(es)->es_qual_code == 0x0e)
713 
714 #define	FCP_SENSE_NO_LUN(es)			\
715 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
716 	(es)->es_add_code == 0x25 &&		\
717 	(es)->es_qual_code == 0x0)
718 
719 #define	FCP_VERSION		"1.186"
720 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
721 
722 #define	FCP_NUM_ELEMENTS(array)			\
723 	(sizeof (array) / sizeof ((array)[0]))
724 
725 /*
726  * Debugging, Error reporting, and tracing
727  */
728 #define	FCP_LOG_SIZE		1024 * 1024
729 
730 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
731 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
732 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
733 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
734 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
735 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
736 #define	FCP_LEVEL_7		0x00040
737 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
738 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
739 
740 
741 
742 /*
743  * Log contents to system messages file
744  */
745 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
746 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
754 
755 
756 /*
757  * Log contents to trace buffer
758  */
759 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
760 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
768 
769 
770 /*
771  * Log contents to both system messages file and trace buffer
772  */
773 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
774 				FC_TRACE_LOG_MSG)
775 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
776 				FC_TRACE_LOG_MSG)
777 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
778 				FC_TRACE_LOG_MSG)
779 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
780 				FC_TRACE_LOG_MSG)
781 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
782 				FC_TRACE_LOG_MSG)
783 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
784 				FC_TRACE_LOG_MSG)
785 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
786 				FC_TRACE_LOG_MSG)
787 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #ifdef DEBUG
792 #define	FCP_DTRACE	fc_trace_debug
793 #else
794 #define	FCP_DTRACE
795 #endif
796 
797 #define	FCP_TRACE	fc_trace_debug
798 
799 static struct cb_ops fcp_cb_ops = {
800 	fcp_open,			/* open */
801 	fcp_close,			/* close */
802 	nodev,				/* strategy */
803 	nodev,				/* print */
804 	nodev,				/* dump */
805 	nodev,				/* read */
806 	nodev,				/* write */
807 	fcp_ioctl,			/* ioctl */
808 	nodev,				/* devmap */
809 	nodev,				/* mmap */
810 	nodev,				/* segmap */
811 	nochpoll,			/* chpoll */
812 	ddi_prop_op,			/* cb_prop_op */
813 	0,				/* streamtab */
814 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
815 	CB_REV,				/* rev */
816 	nodev,				/* aread */
817 	nodev				/* awrite */
818 };
819 
820 
821 static struct dev_ops fcp_ops = {
822 	DEVO_REV,
823 	0,
824 	ddi_getinfo_1to1,
825 	nulldev,		/* identify */
826 	nulldev,		/* probe */
827 	fcp_attach,		/* attach and detach are mandatory */
828 	fcp_detach,
829 	nodev,			/* reset */
830 	&fcp_cb_ops,		/* cb_ops */
831 	NULL,			/* bus_ops */
832 	NULL,			/* power */
833 };
834 
835 
836 char *fcp_version = FCP_NAME_VERSION;
837 
838 static struct modldrv modldrv = {
839 	&mod_driverops,
840 	FCP_NAME_VERSION,
841 	&fcp_ops
842 };
843 
844 
845 static struct modlinkage modlinkage = {
846 	MODREV_1,
847 	&modldrv,
848 	NULL
849 };
850 
851 
852 static fc_ulp_modinfo_t fcp_modinfo = {
853 	&fcp_modinfo,			/* ulp_handle */
854 	FCTL_ULP_MODREV_4,		/* ulp_rev */
855 	FC4_SCSI_FCP,			/* ulp_type */
856 	"fcp",				/* ulp_name */
857 	FCP_STATEC_MASK,		/* ulp_statec_mask */
858 	fcp_port_attach,		/* ulp_port_attach */
859 	fcp_port_detach,		/* ulp_port_detach */
860 	fcp_port_ioctl,			/* ulp_port_ioctl */
861 	fcp_els_callback,		/* ulp_els_callback */
862 	fcp_data_callback,		/* ulp_data_callback */
863 	fcp_statec_callback		/* ulp_statec_callback */
864 };
865 
866 #ifdef	DEBUG
867 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
868 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
869 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
870 				FCP_LEVEL_6 | FCP_LEVEL_7)
871 #else
872 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
873 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
874 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
875 				FCP_LEVEL_6 | FCP_LEVEL_7)
876 #endif
877 
878 /* FCP global variables */
879 int			fcp_bus_config_debug = 0;
880 static int		fcp_log_size = FCP_LOG_SIZE;
881 static int		fcp_trace = FCP_TRACE_DEFAULT;
882 static fc_trace_logq_t	*fcp_logq = NULL;
883 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
884 /*
885  * The auto-configuration is set by default.  The only way of disabling it is
886  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
887  */
888 static int		fcp_enable_auto_configuration = 1;
889 static int		fcp_max_bus_config_retries	= 4;
890 static int		fcp_lun_ready_retry = 300;
891 /*
892  * The value assigned to the following variable has changed several times due
893  * to a problem with the data underruns reporting of some firmware(s).	The
894  * current value of 50 gives a timeout value of 25 seconds for a max number
895  * of 256 LUNs.
896  */
897 static int		fcp_max_target_retries = 50;
898 /*
899  * Watchdog variables
900  * ------------------
901  *
902  * fcp_watchdog_init
903  *
904  *	Indicates if the watchdog timer is running or not.  This is actually
905  *	a counter of the number of Fibre Channel ports that attached.  When
906  *	the first port attaches the watchdog is started.  When the last port
907  *	detaches the watchdog timer is stopped.
908  *
909  * fcp_watchdog_time
910  *
911  *	This is the watchdog clock counter.  It is incremented by
912  *	fcp_watchdog_time each time the watchdog timer expires.
913  *
914  * fcp_watchdog_timeout
915  *
916  *	Increment value of the variable fcp_watchdog_time as well as the
917  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
918  *	is strange that this is not a #define	but a variable since the code
919  *	never changes this value.  The reason why it can be said that the
920  *	unit is 1 second is because the number of ticks for the watchdog
921  *	timer is determined like this:
922  *
923  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
924  *				  drv_usectohz(1000000);
925  *
926  *	The value 1000000 is hard coded in the code.
927  *
928  * fcp_watchdog_tick
929  *
930  *	Watchdog timer value in ticks.
931  */
932 static int		fcp_watchdog_init = 0;
933 static int		fcp_watchdog_time = 0;
934 static int		fcp_watchdog_timeout = 1;
935 static int		fcp_watchdog_tick;
936 
937 /*
938  * fcp_offline_delay is a global variable to enable customisation of
939  * the timeout on link offlines or RSCNs. The default value is set
940  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
941  * specified in FCP4 Chapter 11 (see www.t10.org).
942  *
943  * The variable fcp_offline_delay is specified in SECONDS.
944  *
945  * If we made this a static var then the user would not be able to
946  * change it. This variable is set in fcp_attach().
947  */
948 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
949 
950 static void		*fcp_softstate = NULL; /* for soft state */
951 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
952 static kmutex_t		fcp_global_mutex;
953 static kmutex_t		fcp_ioctl_mutex;
954 static dev_info_t	*fcp_global_dip = NULL;
955 static timeout_id_t	fcp_watchdog_id;
956 const char		*fcp_lun_prop = "lun";
957 const char		*fcp_sam_lun_prop = "sam-lun";
958 const char		*fcp_target_prop = "target";
959 /*
960  * NOTE: consumers of "node-wwn" property include stmsboot in ON
961  * consolidation.
962  */
963 const char		*fcp_node_wwn_prop = "node-wwn";
964 const char		*fcp_port_wwn_prop = "port-wwn";
965 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
966 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
967 const char		*fcp_manual_config_only = "manual_configuration_only";
968 const char		*fcp_init_port_prop = "initiator-port";
969 const char		*fcp_tgt_port_prop = "target-port";
970 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
971 
972 static struct fcp_port	*fcp_port_head = NULL;
973 static ddi_eventcookie_t	fcp_insert_eid;
974 static ddi_eventcookie_t	fcp_remove_eid;
975 
976 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
977 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
978 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
979 };
980 
981 /*
982  * List of valid commands for the scsi_ioctl call
983  */
984 static uint8_t scsi_ioctl_list[] = {
985 	SCMD_INQUIRY,
986 	SCMD_REPORT_LUN,
987 	SCMD_READ_CAPACITY
988 };
989 
990 /*
991  * this is used to dummy up a report lun response for cases
992  * where the target doesn't support it
993  */
994 static uchar_t fcp_dummy_lun[] = {
995 	0x00,		/* MSB length (length = no of luns * 8) */
996 	0x00,
997 	0x00,
998 	0x08,		/* LSB length */
999 	0x00,		/* MSB reserved */
1000 	0x00,
1001 	0x00,
1002 	0x00,		/* LSB reserved */
1003 	FCP_PD_ADDRESSING,
1004 	0x00,		/* LUN is ZERO at the first level */
1005 	0x00,
1006 	0x00,		/* second level is zero */
1007 	0x00,
1008 	0x00,		/* third level is zero */
1009 	0x00,
1010 	0x00		/* fourth level is zero */
1011 };
1012 
1013 static uchar_t fcp_alpa_to_switch[] = {
1014 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1015 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1017 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1018 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1019 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1020 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1021 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1022 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1023 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1024 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1025 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1026 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1027 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1030 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1031 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1032 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1033 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1034 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1035 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1036 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1037 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 };
1039 
1040 static caddr_t pid = "SESS01	      ";
1041 
1042 #if	!defined(lint)
1043 
1044 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1045     fcp_port::fcp_next fcp_watchdog_id))
1046 
1047 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1048 
1049 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1050     fcp_insert_eid
1051     fcp_remove_eid
1052     fcp_watchdog_time))
1053 
1054 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1055     fcp_cb_ops
1056     fcp_ops
1057     callb_cpr))
1058 
1059 #endif /* lint */
1060 
1061 /*
1062  * This table is used to determine whether or not it's safe to copy in
1063  * the target node name for a lun.  Since all luns behind the same target
1064  * have the same wwnn, only tagets that do not support multiple luns are
1065  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1066  */
1067 
1068 char *fcp_symmetric_disk_table[] = {
1069 	"SEAGATE ST",
1070 	"IBM	 DDYFT",
1071 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1072 	"SUN	 SENA",		/* SES device */
1073 	"SUN	 SESS01"	/* VICOM SVE box */
1074 };
1075 
1076 int fcp_symmetric_disk_table_size =
1077 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1078 
1079 /*
1080  * The _init(9e) return value should be that of mod_install(9f). Under
1081  * some circumstances, a failure may not be related mod_install(9f) and
1082  * one would then require a return value to indicate the failure. Looking
1083  * at mod_install(9f), it is expected to return 0 for success and non-zero
1084  * for failure. mod_install(9f) for device drivers, further goes down the
1085  * calling chain and ends up in ddi_installdrv(), whose return values are
1086  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1087  * calling chain of mod_install(9f) which return values like EINVAL and
1088  * in some even return -1.
1089  *
1090  * To work around the vagaries of the mod_install() calling chain, return
1091  * either 0 or ENODEV depending on the success or failure of mod_install()
1092  */
1093 int
1094 _init(void)
1095 {
1096 	int rval;
1097 
1098 	/*
1099 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1100 	 * before registering with the transport first.
1101 	 */
1102 	if (ddi_soft_state_init(&fcp_softstate,
1103 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1104 		return (EINVAL);
1105 	}
1106 
1107 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1108 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1109 
1110 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1111 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1112 		mutex_destroy(&fcp_global_mutex);
1113 		mutex_destroy(&fcp_ioctl_mutex);
1114 		ddi_soft_state_fini(&fcp_softstate);
1115 		return (ENODEV);
1116 	}
1117 
1118 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1119 
1120 	if ((rval = mod_install(&modlinkage)) != 0) {
1121 		fc_trace_free_logq(fcp_logq);
1122 		(void) fc_ulp_remove(&fcp_modinfo);
1123 		mutex_destroy(&fcp_global_mutex);
1124 		mutex_destroy(&fcp_ioctl_mutex);
1125 		ddi_soft_state_fini(&fcp_softstate);
1126 		rval = ENODEV;
1127 	}
1128 
1129 	return (rval);
1130 }
1131 
1132 
1133 /*
1134  * the system is done with us as a driver, so clean up
1135  */
1136 int
1137 _fini(void)
1138 {
1139 	int rval;
1140 
1141 	/*
1142 	 * don't start cleaning up until we know that the module remove
1143 	 * has worked  -- if this works, then we know that each instance
1144 	 * has successfully been DDI_DETACHed
1145 	 */
1146 	if ((rval = mod_remove(&modlinkage)) != 0) {
1147 		return (rval);
1148 	}
1149 
1150 	(void) fc_ulp_remove(&fcp_modinfo);
1151 
1152 	ddi_soft_state_fini(&fcp_softstate);
1153 	mutex_destroy(&fcp_global_mutex);
1154 	mutex_destroy(&fcp_ioctl_mutex);
1155 	fc_trace_free_logq(fcp_logq);
1156 
1157 	return (rval);
1158 }
1159 
1160 
1161 int
1162 _info(struct modinfo *modinfop)
1163 {
1164 	return (mod_info(&modlinkage, modinfop));
1165 }
1166 
1167 
1168 /*
1169  * attach the module
1170  */
1171 static int
1172 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1173 {
1174 	int rval = DDI_SUCCESS;
1175 
1176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1177 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1178 
1179 	if (cmd == DDI_ATTACH) {
1180 		/* The FCP pseudo device is created here. */
1181 		mutex_enter(&fcp_global_mutex);
1182 		fcp_global_dip = devi;
1183 		mutex_exit(&fcp_global_mutex);
1184 
1185 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1186 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1187 			ddi_report_dev(fcp_global_dip);
1188 		} else {
1189 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1190 			mutex_enter(&fcp_global_mutex);
1191 			fcp_global_dip = NULL;
1192 			mutex_exit(&fcp_global_mutex);
1193 
1194 			rval = DDI_FAILURE;
1195 		}
1196 		/*
1197 		 * We check the fcp_offline_delay property at this
1198 		 * point. This variable is global for the driver,
1199 		 * not specific to an instance.
1200 		 *
1201 		 * We do not recommend setting the value to less
1202 		 * than 10 seconds (RA_TOV_els), or greater than
1203 		 * 60 seconds.
1204 		 */
1205 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1206 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1207 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1208 		if ((fcp_offline_delay < 10) ||
1209 		    (fcp_offline_delay > 60)) {
1210 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1211 			    "to %d second(s). This is outside the "
1212 			    "recommended range of 10..60 seconds.",
1213 			    fcp_offline_delay);
1214 		}
1215 	}
1216 
1217 	return (rval);
1218 }
1219 
1220 
1221 /*ARGSUSED*/
1222 static int
1223 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1224 {
1225 	int	res = DDI_SUCCESS;
1226 
1227 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1228 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1229 
1230 	if (cmd == DDI_DETACH) {
1231 		/*
1232 		 * Check if there are active ports/threads. If there
1233 		 * are any, we will fail, else we will succeed (there
1234 		 * should not be much to clean up)
1235 		 */
1236 		mutex_enter(&fcp_global_mutex);
1237 		FCP_DTRACE(fcp_logq, "fcp",
1238 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1239 		    (void *) fcp_port_head);
1240 
1241 		if (fcp_port_head == NULL) {
1242 			ddi_remove_minor_node(fcp_global_dip, NULL);
1243 			fcp_global_dip = NULL;
1244 			mutex_exit(&fcp_global_mutex);
1245 		} else {
1246 			mutex_exit(&fcp_global_mutex);
1247 			res = DDI_FAILURE;
1248 		}
1249 	}
1250 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1251 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1252 
1253 	return (res);
1254 }
1255 
1256 
1257 /* ARGSUSED */
1258 static int
1259 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1260 {
1261 	if (otype != OTYP_CHR) {
1262 		return (EINVAL);
1263 	}
1264 
1265 	/*
1266 	 * Allow only root to talk;
1267 	 */
1268 	if (drv_priv(credp)) {
1269 		return (EPERM);
1270 	}
1271 
1272 	mutex_enter(&fcp_global_mutex);
1273 	if (fcp_oflag & FCP_EXCL) {
1274 		mutex_exit(&fcp_global_mutex);
1275 		return (EBUSY);
1276 	}
1277 
1278 	if (flag & FEXCL) {
1279 		if (fcp_oflag & FCP_OPEN) {
1280 			mutex_exit(&fcp_global_mutex);
1281 			return (EBUSY);
1282 		}
1283 		fcp_oflag |= FCP_EXCL;
1284 	}
1285 	fcp_oflag |= FCP_OPEN;
1286 	mutex_exit(&fcp_global_mutex);
1287 
1288 	return (0);
1289 }
1290 
1291 
1292 /* ARGSUSED */
1293 static int
1294 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1295 {
1296 	if (otype != OTYP_CHR) {
1297 		return (EINVAL);
1298 	}
1299 
1300 	mutex_enter(&fcp_global_mutex);
1301 	if (!(fcp_oflag & FCP_OPEN)) {
1302 		mutex_exit(&fcp_global_mutex);
1303 		return (ENODEV);
1304 	}
1305 	fcp_oflag = FCP_IDLE;
1306 	mutex_exit(&fcp_global_mutex);
1307 
1308 	return (0);
1309 }
1310 
1311 
1312 /*
1313  * fcp_ioctl
1314  *	Entry point for the FCP ioctls
1315  *
1316  * Input:
1317  *	See ioctl(9E)
1318  *
1319  * Output:
1320  *	See ioctl(9E)
1321  *
1322  * Returns:
1323  *	See ioctl(9E)
1324  *
1325  * Context:
1326  *	Kernel context.
1327  */
1328 /* ARGSUSED */
1329 static int
1330 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1331     int *rval)
1332 {
1333 	int			ret = 0;
1334 
1335 	mutex_enter(&fcp_global_mutex);
1336 	if (!(fcp_oflag & FCP_OPEN)) {
1337 		mutex_exit(&fcp_global_mutex);
1338 		return (ENXIO);
1339 	}
1340 	mutex_exit(&fcp_global_mutex);
1341 
1342 	switch (cmd) {
1343 	case FCP_TGT_INQUIRY:
1344 	case FCP_TGT_CREATE:
1345 	case FCP_TGT_DELETE:
1346 		ret = fcp_setup_device_data_ioctl(cmd,
1347 		    (struct fcp_ioctl *)data, mode, rval);
1348 		break;
1349 
1350 	case FCP_TGT_SEND_SCSI:
1351 		mutex_enter(&fcp_ioctl_mutex);
1352 		ret = fcp_setup_scsi_ioctl(
1353 		    (struct fcp_scsi_cmd *)data, mode, rval);
1354 		mutex_exit(&fcp_ioctl_mutex);
1355 		break;
1356 
1357 	case FCP_STATE_COUNT:
1358 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1359 		    mode, rval);
1360 		break;
1361 	case FCP_GET_TARGET_MAPPINGS:
1362 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1363 		    mode, rval);
1364 		break;
1365 	default:
1366 		fcp_log(CE_WARN, NULL,
1367 		    "!Invalid ioctl opcode = 0x%x", cmd);
1368 		ret	= EINVAL;
1369 	}
1370 
1371 	return (ret);
1372 }
1373 
1374 
1375 /*
1376  * fcp_setup_device_data_ioctl
1377  *	Setup handler for the "device data" style of
1378  *	ioctl for FCP.	See "fcp_util.h" for data structure
1379  *	definition.
1380  *
1381  * Input:
1382  *	cmd	= FCP ioctl command
1383  *	data	= ioctl data
1384  *	mode	= See ioctl(9E)
1385  *
1386  * Output:
1387  *	data	= ioctl data
1388  *	rval	= return value - see ioctl(9E)
1389  *
1390  * Returns:
1391  *	See ioctl(9E)
1392  *
1393  * Context:
1394  *	Kernel context.
1395  */
1396 /* ARGSUSED */
1397 static int
1398 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1399     int *rval)
1400 {
1401 	struct fcp_port	*pptr;
1402 	struct	device_data	*dev_data;
1403 	uint32_t		link_cnt;
1404 	la_wwn_t		*wwn_ptr = NULL;
1405 	struct fcp_tgt		*ptgt = NULL;
1406 	struct fcp_lun		*plun = NULL;
1407 	int			i, error;
1408 	struct fcp_ioctl	fioctl;
1409 
1410 #ifdef	_MULTI_DATAMODEL
1411 	switch (ddi_model_convert_from(mode & FMODELS)) {
1412 	case DDI_MODEL_ILP32: {
1413 		struct fcp32_ioctl f32_ioctl;
1414 
1415 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1416 		    sizeof (struct fcp32_ioctl), mode)) {
1417 			return (EFAULT);
1418 		}
1419 		fioctl.fp_minor = f32_ioctl.fp_minor;
1420 		fioctl.listlen = f32_ioctl.listlen;
1421 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1422 		break;
1423 	}
1424 	case DDI_MODEL_NONE:
1425 		if (ddi_copyin((void *)data, (void *)&fioctl,
1426 		    sizeof (struct fcp_ioctl), mode)) {
1427 			return (EFAULT);
1428 		}
1429 		break;
1430 	}
1431 
1432 #else	/* _MULTI_DATAMODEL */
1433 	if (ddi_copyin((void *)data, (void *)&fioctl,
1434 	    sizeof (struct fcp_ioctl), mode)) {
1435 		return (EFAULT);
1436 	}
1437 #endif	/* _MULTI_DATAMODEL */
1438 
1439 	/*
1440 	 * Right now we can assume that the minor number matches with
1441 	 * this instance of fp. If this changes we will need to
1442 	 * revisit this logic.
1443 	 */
1444 	mutex_enter(&fcp_global_mutex);
1445 	pptr = fcp_port_head;
1446 	while (pptr) {
1447 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1448 			break;
1449 		} else {
1450 			pptr = pptr->port_next;
1451 		}
1452 	}
1453 	mutex_exit(&fcp_global_mutex);
1454 	if (pptr == NULL) {
1455 		return (ENXIO);
1456 	}
1457 	mutex_enter(&pptr->port_mutex);
1458 
1459 
1460 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1461 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1462 		mutex_exit(&pptr->port_mutex);
1463 		return (ENOMEM);
1464 	}
1465 
1466 	if (ddi_copyin(fioctl.list, dev_data,
1467 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1468 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1469 		mutex_exit(&pptr->port_mutex);
1470 		return (EFAULT);
1471 	}
1472 	link_cnt = pptr->port_link_cnt;
1473 
1474 	if (cmd == FCP_TGT_INQUIRY) {
1475 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1476 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1477 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1478 			/* This ioctl is requesting INQ info of local HBA */
1479 			mutex_exit(&pptr->port_mutex);
1480 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1481 			dev_data[0].dev_status = 0;
1482 			if (ddi_copyout(dev_data, fioctl.list,
1483 			    (sizeof (struct device_data)) * fioctl.listlen,
1484 			    mode)) {
1485 				kmem_free(dev_data,
1486 				    sizeof (*dev_data) * fioctl.listlen);
1487 				return (EFAULT);
1488 			}
1489 			kmem_free(dev_data,
1490 			    sizeof (*dev_data) * fioctl.listlen);
1491 #ifdef	_MULTI_DATAMODEL
1492 			switch (ddi_model_convert_from(mode & FMODELS)) {
1493 			case DDI_MODEL_ILP32: {
1494 				struct fcp32_ioctl f32_ioctl;
1495 				f32_ioctl.fp_minor = fioctl.fp_minor;
1496 				f32_ioctl.listlen = fioctl.listlen;
1497 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1498 				if (ddi_copyout((void *)&f32_ioctl,
1499 				    (void *)data,
1500 				    sizeof (struct fcp32_ioctl), mode)) {
1501 					return (EFAULT);
1502 				}
1503 				break;
1504 			}
1505 			case DDI_MODEL_NONE:
1506 				if (ddi_copyout((void *)&fioctl, (void *)data,
1507 				    sizeof (struct fcp_ioctl), mode)) {
1508 					return (EFAULT);
1509 				}
1510 				break;
1511 			}
1512 #else	/* _MULTI_DATAMODEL */
1513 			if (ddi_copyout((void *)&fioctl, (void *)data,
1514 			    sizeof (struct fcp_ioctl), mode)) {
1515 				return (EFAULT);
1516 			}
1517 #endif	/* _MULTI_DATAMODEL */
1518 			return (0);
1519 		}
1520 	}
1521 
1522 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1523 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1524 		mutex_exit(&pptr->port_mutex);
1525 		return (ENXIO);
1526 	}
1527 
1528 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1529 	    i++) {
1530 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1531 
1532 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1533 
1534 
1535 		dev_data[i].dev_status = ENXIO;
1536 
1537 		if ((ptgt = fcp_lookup_target(pptr,
1538 		    (uchar_t *)wwn_ptr)) == NULL) {
1539 			mutex_exit(&pptr->port_mutex);
1540 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1541 			    wwn_ptr, &error, 0) == NULL) {
1542 				dev_data[i].dev_status = ENODEV;
1543 				mutex_enter(&pptr->port_mutex);
1544 				continue;
1545 			} else {
1546 
1547 				dev_data[i].dev_status = EAGAIN;
1548 
1549 				mutex_enter(&pptr->port_mutex);
1550 				continue;
1551 			}
1552 		} else {
1553 			mutex_enter(&ptgt->tgt_mutex);
1554 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1555 			    FCP_TGT_BUSY)) {
1556 				dev_data[i].dev_status = EAGAIN;
1557 				mutex_exit(&ptgt->tgt_mutex);
1558 				continue;
1559 			}
1560 
1561 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1562 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1563 					dev_data[i].dev_status = ENOTSUP;
1564 				} else {
1565 					dev_data[i].dev_status = ENXIO;
1566 				}
1567 				mutex_exit(&ptgt->tgt_mutex);
1568 				continue;
1569 			}
1570 
1571 			switch (cmd) {
1572 			case FCP_TGT_INQUIRY:
1573 				/*
1574 				 * The reason we give device type of
1575 				 * lun 0 only even though in some
1576 				 * cases(like maxstrat) lun 0 device
1577 				 * type may be 0x3f(invalid) is that
1578 				 * for bridge boxes target will appear
1579 				 * as luns and the first lun could be
1580 				 * a device that utility may not care
1581 				 * about (like a tape device).
1582 				 */
1583 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1584 				dev_data[i].dev_status = 0;
1585 				mutex_exit(&ptgt->tgt_mutex);
1586 
1587 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1588 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1589 				} else {
1590 					dev_data[i].dev0_type = plun->lun_type;
1591 				}
1592 				mutex_enter(&ptgt->tgt_mutex);
1593 				break;
1594 
1595 			case FCP_TGT_CREATE:
1596 				mutex_exit(&ptgt->tgt_mutex);
1597 				mutex_exit(&pptr->port_mutex);
1598 
1599 				/*
1600 				 * serialize state change call backs.
1601 				 * only one call back will be handled
1602 				 * at a time.
1603 				 */
1604 				mutex_enter(&fcp_global_mutex);
1605 				if (fcp_oflag & FCP_BUSY) {
1606 					mutex_exit(&fcp_global_mutex);
1607 					if (dev_data) {
1608 						kmem_free(dev_data,
1609 						    sizeof (*dev_data) *
1610 						    fioctl.listlen);
1611 					}
1612 					return (EBUSY);
1613 				}
1614 				fcp_oflag |= FCP_BUSY;
1615 				mutex_exit(&fcp_global_mutex);
1616 
1617 				dev_data[i].dev_status =
1618 				    fcp_create_on_demand(pptr,
1619 				    wwn_ptr->raw_wwn);
1620 
1621 				if (dev_data[i].dev_status != 0) {
1622 					char	buf[25];
1623 
1624 					for (i = 0; i < FC_WWN_SIZE; i++) {
1625 						(void) sprintf(&buf[i << 1],
1626 						    "%02x",
1627 						    wwn_ptr->raw_wwn[i]);
1628 					}
1629 
1630 					fcp_log(CE_WARN, pptr->port_dip,
1631 					    "!Failed to create nodes for"
1632 					    " pwwn=%s; error=%x", buf,
1633 					    dev_data[i].dev_status);
1634 				}
1635 
1636 				/* allow state change call backs again */
1637 				mutex_enter(&fcp_global_mutex);
1638 				fcp_oflag &= ~FCP_BUSY;
1639 				mutex_exit(&fcp_global_mutex);
1640 
1641 				mutex_enter(&pptr->port_mutex);
1642 				mutex_enter(&ptgt->tgt_mutex);
1643 
1644 				break;
1645 
1646 			case FCP_TGT_DELETE:
1647 				break;
1648 
1649 			default:
1650 				fcp_log(CE_WARN, pptr->port_dip,
1651 				    "!Invalid device data ioctl "
1652 				    "opcode = 0x%x", cmd);
1653 			}
1654 			mutex_exit(&ptgt->tgt_mutex);
1655 		}
1656 	}
1657 	mutex_exit(&pptr->port_mutex);
1658 
1659 	if (ddi_copyout(dev_data, fioctl.list,
1660 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1661 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1662 		return (EFAULT);
1663 	}
1664 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1665 
1666 #ifdef	_MULTI_DATAMODEL
1667 	switch (ddi_model_convert_from(mode & FMODELS)) {
1668 	case DDI_MODEL_ILP32: {
1669 		struct fcp32_ioctl f32_ioctl;
1670 
1671 		f32_ioctl.fp_minor = fioctl.fp_minor;
1672 		f32_ioctl.listlen = fioctl.listlen;
1673 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1674 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1675 		    sizeof (struct fcp32_ioctl), mode)) {
1676 			return (EFAULT);
1677 		}
1678 		break;
1679 	}
1680 	case DDI_MODEL_NONE:
1681 		if (ddi_copyout((void *)&fioctl, (void *)data,
1682 		    sizeof (struct fcp_ioctl), mode)) {
1683 			return (EFAULT);
1684 		}
1685 		break;
1686 	}
1687 #else	/* _MULTI_DATAMODEL */
1688 
1689 	if (ddi_copyout((void *)&fioctl, (void *)data,
1690 	    sizeof (struct fcp_ioctl), mode)) {
1691 		return (EFAULT);
1692 	}
1693 #endif	/* _MULTI_DATAMODEL */
1694 
1695 	return (0);
1696 }
1697 
1698 /*
1699  * Fetch the target mappings (path, etc.) for all LUNs
1700  * on this port.
1701  */
1702 /* ARGSUSED */
1703 static int
1704 fcp_get_target_mappings(struct fcp_ioctl *data,
1705     int mode, int *rval)
1706 {
1707 	struct fcp_port	    *pptr;
1708 	fc_hba_target_mappings_t    *mappings;
1709 	fc_hba_mapping_entry_t	    *map;
1710 	struct fcp_tgt	    *ptgt = NULL;
1711 	struct fcp_lun	    *plun = NULL;
1712 	int			    i, mapIndex, mappingSize;
1713 	int			    listlen;
1714 	struct fcp_ioctl	    fioctl;
1715 	char			    *path;
1716 	fcp_ent_addr_t		    sam_lun_addr;
1717 
1718 #ifdef	_MULTI_DATAMODEL
1719 	switch (ddi_model_convert_from(mode & FMODELS)) {
1720 	case DDI_MODEL_ILP32: {
1721 		struct fcp32_ioctl f32_ioctl;
1722 
1723 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1724 		    sizeof (struct fcp32_ioctl), mode)) {
1725 			return (EFAULT);
1726 		}
1727 		fioctl.fp_minor = f32_ioctl.fp_minor;
1728 		fioctl.listlen = f32_ioctl.listlen;
1729 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1730 		break;
1731 	}
1732 	case DDI_MODEL_NONE:
1733 		if (ddi_copyin((void *)data, (void *)&fioctl,
1734 		    sizeof (struct fcp_ioctl), mode)) {
1735 			return (EFAULT);
1736 		}
1737 		break;
1738 	}
1739 
1740 #else	/* _MULTI_DATAMODEL */
1741 	if (ddi_copyin((void *)data, (void *)&fioctl,
1742 	    sizeof (struct fcp_ioctl), mode)) {
1743 		return (EFAULT);
1744 	}
1745 #endif	/* _MULTI_DATAMODEL */
1746 
1747 	/*
1748 	 * Right now we can assume that the minor number matches with
1749 	 * this instance of fp. If this changes we will need to
1750 	 * revisit this logic.
1751 	 */
1752 	mutex_enter(&fcp_global_mutex);
1753 	pptr = fcp_port_head;
1754 	while (pptr) {
1755 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1756 			break;
1757 		} else {
1758 			pptr = pptr->port_next;
1759 		}
1760 	}
1761 	mutex_exit(&fcp_global_mutex);
1762 	if (pptr == NULL) {
1763 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1764 		    fioctl.fp_minor);
1765 		return (ENXIO);
1766 	}
1767 
1768 
1769 	/* We use listlen to show the total buffer size */
1770 	mappingSize = fioctl.listlen;
1771 
1772 	/* Now calculate how many mapping entries will fit */
1773 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1774 	    - sizeof (fc_hba_target_mappings_t);
1775 	if (listlen <= 0) {
1776 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1777 		return (ENXIO);
1778 	}
1779 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1780 
1781 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1782 		return (ENOMEM);
1783 	}
1784 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1785 
1786 	/* Now get to work */
1787 	mapIndex = 0;
1788 
1789 	mutex_enter(&pptr->port_mutex);
1790 	/* Loop through all targets on this port */
1791 	for (i = 0; i < FCP_NUM_HASH; i++) {
1792 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1793 		    ptgt = ptgt->tgt_next) {
1794 
1795 
1796 			/* Loop through all LUNs on this target */
1797 			for (plun = ptgt->tgt_lun; plun != NULL;
1798 			    plun = plun->lun_next) {
1799 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1800 					continue;
1801 				}
1802 
1803 				path = fcp_get_lun_path(plun);
1804 				if (path == NULL) {
1805 					continue;
1806 				}
1807 
1808 				if (mapIndex >= listlen) {
1809 					mapIndex ++;
1810 					kmem_free(path, MAXPATHLEN);
1811 					continue;
1812 				}
1813 				map = &mappings->entries[mapIndex++];
1814 				bcopy(path, map->targetDriver,
1815 				    sizeof (map->targetDriver));
1816 				map->d_id = ptgt->tgt_d_id;
1817 				map->busNumber = 0;
1818 				map->targetNumber = ptgt->tgt_d_id;
1819 				map->osLUN = plun->lun_num;
1820 
1821 				/*
1822 				 * We had swapped lun when we stored it in
1823 				 * lun_addr. We need to swap it back before
1824 				 * returning it to user land
1825 				 */
1826 
1827 				sam_lun_addr.ent_addr_0 =
1828 				    BE_16(plun->lun_addr.ent_addr_0);
1829 				sam_lun_addr.ent_addr_1 =
1830 				    BE_16(plun->lun_addr.ent_addr_1);
1831 				sam_lun_addr.ent_addr_2 =
1832 				    BE_16(plun->lun_addr.ent_addr_2);
1833 				sam_lun_addr.ent_addr_3 =
1834 				    BE_16(plun->lun_addr.ent_addr_3);
1835 
1836 				bcopy(&sam_lun_addr, &map->samLUN,
1837 				    FCP_LUN_SIZE);
1838 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1839 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1840 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1841 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1842 
1843 				if (plun->lun_guid) {
1844 
1845 					/* convert ascii wwn to bytes */
1846 					fcp_ascii_to_wwn(plun->lun_guid,
1847 					    map->guid, sizeof (map->guid));
1848 
1849 					if ((sizeof (map->guid)) <
1850 					    plun->lun_guid_size / 2) {
1851 						cmn_err(CE_WARN,
1852 						    "fcp_get_target_mappings:"
1853 						    "guid copy space "
1854 						    "insufficient."
1855 						    "Copy Truncation - "
1856 						    "available %d; need %d",
1857 						    (int)sizeof (map->guid),
1858 						    (int)
1859 						    plun->lun_guid_size / 2);
1860 					}
1861 				}
1862 				kmem_free(path, MAXPATHLEN);
1863 			}
1864 		}
1865 	}
1866 	mutex_exit(&pptr->port_mutex);
1867 	mappings->numLuns = mapIndex;
1868 
1869 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1870 		kmem_free(mappings, mappingSize);
1871 		return (EFAULT);
1872 	}
1873 	kmem_free(mappings, mappingSize);
1874 
1875 #ifdef	_MULTI_DATAMODEL
1876 	switch (ddi_model_convert_from(mode & FMODELS)) {
1877 	case DDI_MODEL_ILP32: {
1878 		struct fcp32_ioctl f32_ioctl;
1879 
1880 		f32_ioctl.fp_minor = fioctl.fp_minor;
1881 		f32_ioctl.listlen = fioctl.listlen;
1882 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1883 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1884 		    sizeof (struct fcp32_ioctl), mode)) {
1885 			return (EFAULT);
1886 		}
1887 		break;
1888 	}
1889 	case DDI_MODEL_NONE:
1890 		if (ddi_copyout((void *)&fioctl, (void *)data,
1891 		    sizeof (struct fcp_ioctl), mode)) {
1892 			return (EFAULT);
1893 		}
1894 		break;
1895 	}
1896 #else	/* _MULTI_DATAMODEL */
1897 
1898 	if (ddi_copyout((void *)&fioctl, (void *)data,
1899 	    sizeof (struct fcp_ioctl), mode)) {
1900 		return (EFAULT);
1901 	}
1902 #endif	/* _MULTI_DATAMODEL */
1903 
1904 	return (0);
1905 }
1906 
1907 /*
1908  * fcp_setup_scsi_ioctl
1909  *	Setup handler for the "scsi passthru" style of
1910  *	ioctl for FCP.	See "fcp_util.h" for data structure
1911  *	definition.
1912  *
1913  * Input:
1914  *	u_fscsi	= ioctl data (user address space)
1915  *	mode	= See ioctl(9E)
1916  *
1917  * Output:
1918  *	u_fscsi	= ioctl data (user address space)
1919  *	rval	= return value - see ioctl(9E)
1920  *
1921  * Returns:
1922  *	0	= OK
1923  *	EAGAIN	= See errno.h
1924  *	EBUSY	= See errno.h
1925  *	EFAULT	= See errno.h
1926  *	EINTR	= See errno.h
1927  *	EINVAL	= See errno.h
1928  *	EIO	= See errno.h
1929  *	ENOMEM	= See errno.h
1930  *	ENXIO	= See errno.h
1931  *
1932  * Context:
1933  *	Kernel context.
1934  */
1935 /* ARGSUSED */
1936 static int
1937 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1938     int mode, int *rval)
1939 {
1940 	int			ret		= 0;
1941 	int			temp_ret;
1942 	caddr_t			k_cdbbufaddr	= NULL;
1943 	caddr_t			k_bufaddr	= NULL;
1944 	caddr_t			k_rqbufaddr	= NULL;
1945 	caddr_t			u_cdbbufaddr;
1946 	caddr_t			u_bufaddr;
1947 	caddr_t			u_rqbufaddr;
1948 	struct fcp_scsi_cmd	k_fscsi;
1949 
1950 	/*
1951 	 * Get fcp_scsi_cmd array element from user address space
1952 	 */
1953 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1954 	    != 0) {
1955 		return (ret);
1956 	}
1957 
1958 
1959 	/*
1960 	 * Even though kmem_alloc() checks the validity of the
1961 	 * buffer length, this check is needed when the
1962 	 * kmem_flags set and the zero buffer length is passed.
1963 	 */
1964 	if ((k_fscsi.scsi_cdblen <= 0) ||
1965 	    (k_fscsi.scsi_buflen <= 0) ||
1966 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1967 	    (k_fscsi.scsi_rqlen <= 0) ||
1968 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1969 		return (EINVAL);
1970 	}
1971 
1972 	/*
1973 	 * Allocate data for fcp_scsi_cmd pointer fields
1974 	 */
1975 	if (ret == 0) {
1976 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1977 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1978 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1979 
1980 		if (k_cdbbufaddr == NULL ||
1981 		    k_bufaddr	 == NULL ||
1982 		    k_rqbufaddr	 == NULL) {
1983 			ret = ENOMEM;
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * Get fcp_scsi_cmd pointer fields from user
1989 	 * address space
1990 	 */
1991 	if (ret == 0) {
1992 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1993 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1994 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1995 
1996 		if (ddi_copyin(u_cdbbufaddr,
1997 		    k_cdbbufaddr,
1998 		    k_fscsi.scsi_cdblen,
1999 		    mode)) {
2000 			ret = EFAULT;
2001 		} else if (ddi_copyin(u_bufaddr,
2002 		    k_bufaddr,
2003 		    k_fscsi.scsi_buflen,
2004 		    mode)) {
2005 			ret = EFAULT;
2006 		} else if (ddi_copyin(u_rqbufaddr,
2007 		    k_rqbufaddr,
2008 		    k_fscsi.scsi_rqlen,
2009 		    mode)) {
2010 			ret = EFAULT;
2011 		}
2012 	}
2013 
2014 	/*
2015 	 * Send scsi command (blocking)
2016 	 */
2017 	if (ret == 0) {
2018 		/*
2019 		 * Prior to sending the scsi command, the
2020 		 * fcp_scsi_cmd data structure must contain kernel,
2021 		 * not user, addresses.
2022 		 */
2023 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2024 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2025 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2026 
2027 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2028 
2029 		/*
2030 		 * After sending the scsi command, the
2031 		 * fcp_scsi_cmd data structure must contain user,
2032 		 * not kernel, addresses.
2033 		 */
2034 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2035 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2036 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2037 	}
2038 
2039 	/*
2040 	 * Put fcp_scsi_cmd pointer fields to user address space
2041 	 */
2042 	if (ret == 0) {
2043 		if (ddi_copyout(k_cdbbufaddr,
2044 		    u_cdbbufaddr,
2045 		    k_fscsi.scsi_cdblen,
2046 		    mode)) {
2047 			ret = EFAULT;
2048 		} else if (ddi_copyout(k_bufaddr,
2049 		    u_bufaddr,
2050 		    k_fscsi.scsi_buflen,
2051 		    mode)) {
2052 			ret = EFAULT;
2053 		} else if (ddi_copyout(k_rqbufaddr,
2054 		    u_rqbufaddr,
2055 		    k_fscsi.scsi_rqlen,
2056 		    mode)) {
2057 			ret = EFAULT;
2058 		}
2059 	}
2060 
2061 	/*
2062 	 * Free data for fcp_scsi_cmd pointer fields
2063 	 */
2064 	if (k_cdbbufaddr != NULL) {
2065 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2066 	}
2067 	if (k_bufaddr != NULL) {
2068 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2069 	}
2070 	if (k_rqbufaddr != NULL) {
2071 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2072 	}
2073 
2074 	/*
2075 	 * Put fcp_scsi_cmd array element to user address space
2076 	 */
2077 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2078 	if (temp_ret != 0) {
2079 		ret = temp_ret;
2080 	}
2081 
2082 	/*
2083 	 * Return status
2084 	 */
2085 	return (ret);
2086 }
2087 
2088 
2089 /*
2090  * fcp_copyin_scsi_cmd
2091  *	Copy in fcp_scsi_cmd data structure from user address space.
2092  *	The data may be in 32 bit or 64 bit modes.
2093  *
2094  * Input:
2095  *	base_addr	= from address (user address space)
2096  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2097  *
2098  * Output:
2099  *	fscsi		= to address (kernel address space)
2100  *
2101  * Returns:
2102  *	0	= OK
2103  *	EFAULT	= Error
2104  *
2105  * Context:
2106  *	Kernel context.
2107  */
2108 static int
2109 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2110 {
2111 #ifdef	_MULTI_DATAMODEL
2112 	struct fcp32_scsi_cmd	f32scsi;
2113 
2114 	switch (ddi_model_convert_from(mode & FMODELS)) {
2115 	case DDI_MODEL_ILP32:
2116 		/*
2117 		 * Copy data from user address space
2118 		 */
2119 		if (ddi_copyin((void *)base_addr,
2120 		    &f32scsi,
2121 		    sizeof (struct fcp32_scsi_cmd),
2122 		    mode)) {
2123 			return (EFAULT);
2124 		}
2125 		/*
2126 		 * Convert from 32 bit to 64 bit
2127 		 */
2128 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2129 		break;
2130 	case DDI_MODEL_NONE:
2131 		/*
2132 		 * Copy data from user address space
2133 		 */
2134 		if (ddi_copyin((void *)base_addr,
2135 		    fscsi,
2136 		    sizeof (struct fcp_scsi_cmd),
2137 		    mode)) {
2138 			return (EFAULT);
2139 		}
2140 		break;
2141 	}
2142 #else	/* _MULTI_DATAMODEL */
2143 	/*
2144 	 * Copy data from user address space
2145 	 */
2146 	if (ddi_copyin((void *)base_addr,
2147 	    fscsi,
2148 	    sizeof (struct fcp_scsi_cmd),
2149 	    mode)) {
2150 		return (EFAULT);
2151 	}
2152 #endif	/* _MULTI_DATAMODEL */
2153 
2154 	return (0);
2155 }
2156 
2157 
2158 /*
2159  * fcp_copyout_scsi_cmd
2160  *	Copy out fcp_scsi_cmd data structure to user address space.
2161  *	The data may be in 32 bit or 64 bit modes.
2162  *
2163  * Input:
2164  *	fscsi		= to address (kernel address space)
2165  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2166  *
2167  * Output:
2168  *	base_addr	= from address (user address space)
2169  *
2170  * Returns:
2171  *	0	= OK
2172  *	EFAULT	= Error
2173  *
2174  * Context:
2175  *	Kernel context.
2176  */
2177 static int
2178 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2179 {
2180 #ifdef	_MULTI_DATAMODEL
2181 	struct fcp32_scsi_cmd	f32scsi;
2182 
2183 	switch (ddi_model_convert_from(mode & FMODELS)) {
2184 	case DDI_MODEL_ILP32:
2185 		/*
2186 		 * Convert from 64 bit to 32 bit
2187 		 */
2188 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2189 		/*
2190 		 * Copy data to user address space
2191 		 */
2192 		if (ddi_copyout(&f32scsi,
2193 		    (void *)base_addr,
2194 		    sizeof (struct fcp32_scsi_cmd),
2195 		    mode)) {
2196 			return (EFAULT);
2197 		}
2198 		break;
2199 	case DDI_MODEL_NONE:
2200 		/*
2201 		 * Copy data to user address space
2202 		 */
2203 		if (ddi_copyout(fscsi,
2204 		    (void *)base_addr,
2205 		    sizeof (struct fcp_scsi_cmd),
2206 		    mode)) {
2207 			return (EFAULT);
2208 		}
2209 		break;
2210 	}
2211 #else	/* _MULTI_DATAMODEL */
2212 	/*
2213 	 * Copy data to user address space
2214 	 */
2215 	if (ddi_copyout(fscsi,
2216 	    (void *)base_addr,
2217 	    sizeof (struct fcp_scsi_cmd),
2218 	    mode)) {
2219 		return (EFAULT);
2220 	}
2221 #endif	/* _MULTI_DATAMODEL */
2222 
2223 	return (0);
2224 }
2225 
2226 
2227 /*
2228  * fcp_send_scsi_ioctl
2229  *	Sends the SCSI command in blocking mode.
2230  *
2231  * Input:
2232  *	fscsi		= SCSI command data structure
2233  *
2234  * Output:
2235  *	fscsi		= SCSI command data structure
2236  *
2237  * Returns:
2238  *	0	= OK
2239  *	EAGAIN	= See errno.h
2240  *	EBUSY	= See errno.h
2241  *	EINTR	= See errno.h
2242  *	EINVAL	= See errno.h
2243  *	EIO	= See errno.h
2244  *	ENOMEM	= See errno.h
2245  *	ENXIO	= See errno.h
2246  *
2247  * Context:
2248  *	Kernel context.
2249  */
2250 static int
2251 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2252 {
2253 	struct fcp_lun	*plun		= NULL;
2254 	struct fcp_port	*pptr		= NULL;
2255 	struct fcp_tgt	*ptgt		= NULL;
2256 	fc_packet_t		*fpkt		= NULL;
2257 	struct fcp_ipkt	*icmd		= NULL;
2258 	int			target_created	= FALSE;
2259 	fc_frame_hdr_t		*hp;
2260 	struct fcp_cmd		fcp_cmd;
2261 	struct fcp_cmd		*fcmd;
2262 	union scsi_cdb		*scsi_cdb;
2263 	la_wwn_t		*wwn_ptr;
2264 	int			nodma;
2265 	struct fcp_rsp		*rsp;
2266 	struct fcp_rsp_info	*rsp_info;
2267 	caddr_t			rsp_sense;
2268 	int			buf_len;
2269 	int			info_len;
2270 	int			sense_len;
2271 	struct scsi_extended_sense	*sense_to = NULL;
2272 	timeout_id_t		tid;
2273 	uint8_t			reconfig_lun = FALSE;
2274 	uint8_t			reconfig_pending = FALSE;
2275 	uint8_t			scsi_cmd;
2276 	int			rsp_len;
2277 	int			cmd_index;
2278 	int			fc_status;
2279 	int			pkt_state;
2280 	int			pkt_action;
2281 	int			pkt_reason;
2282 	int			ret, xport_retval = ~FC_SUCCESS;
2283 	int			lcount;
2284 	int			tcount;
2285 	int			reconfig_status;
2286 	int			port_busy = FALSE;
2287 	uchar_t			*lun_string;
2288 
2289 	/*
2290 	 * Check valid SCSI command
2291 	 */
2292 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2293 	ret = EINVAL;
2294 	for (cmd_index = 0;
2295 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2296 	    ret != 0;
2297 	    cmd_index++) {
2298 		/*
2299 		 * First byte of CDB is the SCSI command
2300 		 */
2301 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2302 			ret = 0;
2303 		}
2304 	}
2305 
2306 	/*
2307 	 * Check inputs
2308 	 */
2309 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2310 		ret = EINVAL;
2311 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2312 		/* no larger than */
2313 		ret = EINVAL;
2314 	}
2315 
2316 
2317 	/*
2318 	 * Find FC port
2319 	 */
2320 	if (ret == 0) {
2321 		/*
2322 		 * Acquire global mutex
2323 		 */
2324 		mutex_enter(&fcp_global_mutex);
2325 
2326 		pptr = fcp_port_head;
2327 		while (pptr) {
2328 			if (pptr->port_instance ==
2329 			    (uint32_t)fscsi->scsi_fc_port_num) {
2330 				break;
2331 			} else {
2332 				pptr = pptr->port_next;
2333 			}
2334 		}
2335 
2336 		if (pptr == NULL) {
2337 			ret = ENXIO;
2338 		} else {
2339 			/*
2340 			 * fc_ulp_busy_port can raise power
2341 			 *  so, we must not hold any mutexes involved in PM
2342 			 */
2343 			mutex_exit(&fcp_global_mutex);
2344 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2345 		}
2346 
2347 		if (ret == 0) {
2348 
2349 			/* remember port is busy, so we will release later */
2350 			port_busy = TRUE;
2351 
2352 			/*
2353 			 * If there is a reconfiguration in progress, wait
2354 			 * for it to complete.
2355 			 */
2356 
2357 			fcp_reconfig_wait(pptr);
2358 
2359 			/* reacquire mutexes in order */
2360 			mutex_enter(&fcp_global_mutex);
2361 			mutex_enter(&pptr->port_mutex);
2362 
2363 			/*
2364 			 * Will port accept DMA?
2365 			 */
2366 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2367 			    ? 1 : 0;
2368 
2369 			/*
2370 			 * If init or offline, device not known
2371 			 *
2372 			 * If we are discovering (onlining), we can
2373 			 * NOT obviously provide reliable data about
2374 			 * devices until it is complete
2375 			 */
2376 			if (pptr->port_state &	  (FCP_STATE_INIT |
2377 			    FCP_STATE_OFFLINE)) {
2378 				ret = ENXIO;
2379 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2380 				ret = EBUSY;
2381 			} else {
2382 				/*
2383 				 * Find target from pwwn
2384 				 *
2385 				 * The wwn must be put into a local
2386 				 * variable to ensure alignment.
2387 				 */
2388 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2389 				ptgt = fcp_lookup_target(pptr,
2390 				    (uchar_t *)wwn_ptr);
2391 
2392 				/*
2393 				 * If target not found,
2394 				 */
2395 				if (ptgt == NULL) {
2396 					/*
2397 					 * Note: Still have global &
2398 					 * port mutexes
2399 					 */
2400 					mutex_exit(&pptr->port_mutex);
2401 					ptgt = fcp_port_create_tgt(pptr,
2402 					    wwn_ptr, &ret, &fc_status,
2403 					    &pkt_state, &pkt_action,
2404 					    &pkt_reason);
2405 					mutex_enter(&pptr->port_mutex);
2406 
2407 					fscsi->scsi_fc_status  = fc_status;
2408 					fscsi->scsi_pkt_state  =
2409 					    (uchar_t)pkt_state;
2410 					fscsi->scsi_pkt_reason = pkt_reason;
2411 					fscsi->scsi_pkt_action =
2412 					    (uchar_t)pkt_action;
2413 
2414 					if (ptgt != NULL) {
2415 						target_created = TRUE;
2416 					} else if (ret == 0) {
2417 						ret = ENOMEM;
2418 					}
2419 				}
2420 
2421 				if (ret == 0) {
2422 					/*
2423 					 * Acquire target
2424 					 */
2425 					mutex_enter(&ptgt->tgt_mutex);
2426 
2427 					/*
2428 					 * If target is mark or busy,
2429 					 * then target can not be used
2430 					 */
2431 					if (ptgt->tgt_state &
2432 					    (FCP_TGT_MARK |
2433 					    FCP_TGT_BUSY)) {
2434 						ret = EBUSY;
2435 					} else {
2436 						/*
2437 						 * Mark target as busy
2438 						 */
2439 						ptgt->tgt_state |=
2440 						    FCP_TGT_BUSY;
2441 					}
2442 
2443 					/*
2444 					 * Release target
2445 					 */
2446 					lcount = pptr->port_link_cnt;
2447 					tcount = ptgt->tgt_change_cnt;
2448 					mutex_exit(&ptgt->tgt_mutex);
2449 				}
2450 			}
2451 
2452 			/*
2453 			 * Release port
2454 			 */
2455 			mutex_exit(&pptr->port_mutex);
2456 		}
2457 
2458 		/*
2459 		 * Release global mutex
2460 		 */
2461 		mutex_exit(&fcp_global_mutex);
2462 	}
2463 
2464 	if (ret == 0) {
2465 		uint64_t belun = BE_64(fscsi->scsi_lun);
2466 
2467 		/*
2468 		 * If it's a target device, find lun from pwwn
2469 		 * The wwn must be put into a local
2470 		 * variable to ensure alignment.
2471 		 */
2472 		mutex_enter(&pptr->port_mutex);
2473 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2474 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2475 			/* this is not a target */
2476 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2477 			ret = ENXIO;
2478 		} else if ((belun << 16) != 0) {
2479 			/*
2480 			 * Since fcp only support PD and LU addressing method
2481 			 * so far, the last 6 bytes of a valid LUN are expected
2482 			 * to be filled with 00h.
2483 			 */
2484 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2485 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2486 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2487 			    (uint8_t)(belun >> 62), belun);
2488 			ret = ENXIO;
2489 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2490 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2491 			/*
2492 			 * This is a SCSI target, but no LUN at this
2493 			 * address.
2494 			 *
2495 			 * In the future, we may want to send this to
2496 			 * the target, and let it respond
2497 			 * appropriately
2498 			 */
2499 			ret = ENXIO;
2500 		}
2501 		mutex_exit(&pptr->port_mutex);
2502 	}
2503 
2504 	/*
2505 	 * Finished grabbing external resources
2506 	 * Allocate internal packet (icmd)
2507 	 */
2508 	if (ret == 0) {
2509 		/*
2510 		 * Calc rsp len assuming rsp info included
2511 		 */
2512 		rsp_len = sizeof (struct fcp_rsp) +
2513 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2514 
2515 		icmd = fcp_icmd_alloc(pptr, ptgt,
2516 		    sizeof (struct fcp_cmd),
2517 		    rsp_len,
2518 		    fscsi->scsi_buflen,
2519 		    nodma,
2520 		    lcount,			/* ipkt_link_cnt */
2521 		    tcount,			/* ipkt_change_cnt */
2522 		    0,				/* cause */
2523 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2524 
2525 		if (icmd == NULL) {
2526 			ret = ENOMEM;
2527 		} else {
2528 			/*
2529 			 * Setup internal packet as sema sync
2530 			 */
2531 			fcp_ipkt_sema_init(icmd);
2532 		}
2533 	}
2534 
2535 	if (ret == 0) {
2536 		/*
2537 		 * Init fpkt pointer for use.
2538 		 */
2539 
2540 		fpkt = icmd->ipkt_fpkt;
2541 
2542 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2543 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2544 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2545 
2546 		/*
2547 		 * Init fcmd pointer for use by SCSI command
2548 		 */
2549 
2550 		if (nodma) {
2551 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2552 		} else {
2553 			fcmd = &fcp_cmd;
2554 		}
2555 		bzero(fcmd, sizeof (struct fcp_cmd));
2556 		ptgt = plun->lun_tgt;
2557 
2558 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2559 
2560 		fcmd->fcp_ent_addr.ent_addr_0 =
2561 		    BE_16(*(uint16_t *)&(lun_string[0]));
2562 		fcmd->fcp_ent_addr.ent_addr_1 =
2563 		    BE_16(*(uint16_t *)&(lun_string[2]));
2564 		fcmd->fcp_ent_addr.ent_addr_2 =
2565 		    BE_16(*(uint16_t *)&(lun_string[4]));
2566 		fcmd->fcp_ent_addr.ent_addr_3 =
2567 		    BE_16(*(uint16_t *)&(lun_string[6]));
2568 
2569 		/*
2570 		 * Setup internal packet(icmd)
2571 		 */
2572 		icmd->ipkt_lun		= plun;
2573 		icmd->ipkt_restart	= 0;
2574 		icmd->ipkt_retries	= 0;
2575 		icmd->ipkt_opcode	= 0;
2576 
2577 		/*
2578 		 * Init the frame HEADER Pointer for use
2579 		 */
2580 		hp = &fpkt->pkt_cmd_fhdr;
2581 
2582 		hp->s_id	= pptr->port_id;
2583 		hp->d_id	= ptgt->tgt_d_id;
2584 		hp->r_ctl	= R_CTL_COMMAND;
2585 		hp->type	= FC_TYPE_SCSI_FCP;
2586 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2587 		hp->rsvd	= 0;
2588 		hp->seq_id	= 0;
2589 		hp->seq_cnt	= 0;
2590 		hp->ox_id	= 0xffff;
2591 		hp->rx_id	= 0xffff;
2592 		hp->ro		= 0;
2593 
2594 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2595 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2596 		fcmd->fcp_cntl.cntl_write_data	= 0;
2597 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2598 
2599 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2600 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2601 		    fscsi->scsi_cdblen);
2602 
2603 		if (!nodma) {
2604 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2605 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2606 		}
2607 
2608 		/*
2609 		 * Send SCSI command to FC transport
2610 		 */
2611 
2612 		if (ret == 0) {
2613 			mutex_enter(&ptgt->tgt_mutex);
2614 
2615 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2616 				mutex_exit(&ptgt->tgt_mutex);
2617 				fscsi->scsi_fc_status = xport_retval =
2618 				    fc_ulp_transport(pptr->port_fp_handle,
2619 				    fpkt);
2620 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2621 					ret = EIO;
2622 				}
2623 			} else {
2624 				mutex_exit(&ptgt->tgt_mutex);
2625 				ret = EBUSY;
2626 			}
2627 		}
2628 	}
2629 
2630 	/*
2631 	 * Wait for completion only if fc_ulp_transport was called and it
2632 	 * returned a success. This is the only time callback will happen.
2633 	 * Otherwise, there is no point in waiting
2634 	 */
2635 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2636 		ret = fcp_ipkt_sema_wait(icmd);
2637 	}
2638 
2639 	/*
2640 	 * Copy data to IOCTL data structures
2641 	 */
2642 	rsp = NULL;
2643 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2644 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2645 
2646 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2647 			fcp_log(CE_WARN, pptr->port_dip,
2648 			    "!SCSI command to d_id=0x%x lun=0x%x"
2649 			    " failed, Bad FCP response values:"
2650 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2651 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2652 			    ptgt->tgt_d_id, plun->lun_num,
2653 			    rsp->reserved_0, rsp->reserved_1,
2654 			    rsp->fcp_u.fcp_status.reserved_0,
2655 			    rsp->fcp_u.fcp_status.reserved_1,
2656 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2657 
2658 			ret = EIO;
2659 		}
2660 	}
2661 
2662 	if ((ret == 0) && (rsp != NULL)) {
2663 		/*
2664 		 * Calc response lengths
2665 		 */
2666 		sense_len = 0;
2667 		info_len = 0;
2668 
2669 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2670 			info_len = rsp->fcp_response_len;
2671 		}
2672 
2673 		rsp_info   = (struct fcp_rsp_info *)
2674 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2675 
2676 		/*
2677 		 * Get SCSI status
2678 		 */
2679 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2680 		/*
2681 		 * If a lun was just added or removed and the next command
2682 		 * comes through this interface, we need to capture the check
2683 		 * condition so we can discover the new topology.
2684 		 */
2685 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2686 		    rsp->fcp_u.fcp_status.sense_len_set) {
2687 			sense_len = rsp->fcp_sense_len;
2688 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2689 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2690 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2691 			    (FCP_SENSE_NO_LUN(sense_to))) {
2692 				reconfig_lun = TRUE;
2693 			}
2694 		}
2695 
2696 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2697 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2698 			if (reconfig_lun == FALSE) {
2699 				reconfig_status =
2700 				    fcp_is_reconfig_needed(ptgt, fpkt);
2701 			}
2702 
2703 			if ((reconfig_lun == TRUE) ||
2704 			    (reconfig_status == TRUE)) {
2705 				mutex_enter(&ptgt->tgt_mutex);
2706 				if (ptgt->tgt_tid == NULL) {
2707 					/*
2708 					 * Either we've been notified the
2709 					 * REPORT_LUN data has changed, or
2710 					 * we've determined on our own that
2711 					 * we're out of date.  Kick off
2712 					 * rediscovery.
2713 					 */
2714 					tid = timeout(fcp_reconfigure_luns,
2715 					    (caddr_t)ptgt, drv_usectohz(1));
2716 
2717 					ptgt->tgt_tid = tid;
2718 					ptgt->tgt_state |= FCP_TGT_BUSY;
2719 					ret = EBUSY;
2720 					reconfig_pending = TRUE;
2721 				}
2722 				mutex_exit(&ptgt->tgt_mutex);
2723 			}
2724 		}
2725 
2726 		/*
2727 		 * Calc residuals and buffer lengths
2728 		 */
2729 
2730 		if (ret == 0) {
2731 			buf_len = fscsi->scsi_buflen;
2732 			fscsi->scsi_bufresid	= 0;
2733 			if (rsp->fcp_u.fcp_status.resid_under) {
2734 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2735 					fscsi->scsi_bufresid = rsp->fcp_resid;
2736 				} else {
2737 					cmn_err(CE_WARN, "fcp: bad residue %x "
2738 					    "for txfer len %x", rsp->fcp_resid,
2739 					    fscsi->scsi_buflen);
2740 					fscsi->scsi_bufresid =
2741 					    fscsi->scsi_buflen;
2742 				}
2743 				buf_len -= fscsi->scsi_bufresid;
2744 			}
2745 			if (rsp->fcp_u.fcp_status.resid_over) {
2746 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2747 			}
2748 
2749 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2750 			if (fscsi->scsi_rqlen < sense_len) {
2751 				sense_len = fscsi->scsi_rqlen;
2752 			}
2753 
2754 			fscsi->scsi_fc_rspcode	= 0;
2755 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2756 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2757 			}
2758 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2759 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2760 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2761 
2762 			/*
2763 			 * Copy data and request sense
2764 			 *
2765 			 * Data must be copied by using the FCP_CP_IN macro.
2766 			 * This will ensure the proper byte order since the data
2767 			 * is being copied directly from the memory mapped
2768 			 * device register.
2769 			 *
2770 			 * The response (and request sense) will be in the
2771 			 * correct byte order.	No special copy is necessary.
2772 			 */
2773 
2774 			if (buf_len) {
2775 				FCP_CP_IN(fpkt->pkt_data,
2776 				    fscsi->scsi_bufaddr,
2777 				    fpkt->pkt_data_acc,
2778 				    buf_len);
2779 			}
2780 			bcopy((void *)rsp_sense,
2781 			    (void *)fscsi->scsi_rqbufaddr,
2782 			    sense_len);
2783 		}
2784 	}
2785 
2786 	/*
2787 	 * Cleanup transport data structures if icmd was alloc-ed
2788 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2789 	 */
2790 	if (icmd != NULL) {
2791 		fcp_ipkt_sema_cleanup(icmd);
2792 	}
2793 
2794 	/* restore pm busy/idle status */
2795 	if (port_busy) {
2796 		fc_ulp_idle_port(pptr->port_fp_handle);
2797 	}
2798 
2799 	/*
2800 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2801 	 * flag, it'll be cleared when the reconfig is complete.
2802 	 */
2803 	if ((ptgt != NULL) && !reconfig_pending) {
2804 		/*
2805 		 * If target was created,
2806 		 */
2807 		if (target_created) {
2808 			mutex_enter(&ptgt->tgt_mutex);
2809 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2810 			mutex_exit(&ptgt->tgt_mutex);
2811 		} else {
2812 			/*
2813 			 * De-mark target as busy
2814 			 */
2815 			mutex_enter(&ptgt->tgt_mutex);
2816 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2817 			mutex_exit(&ptgt->tgt_mutex);
2818 		}
2819 	}
2820 	return (ret);
2821 }
2822 
2823 
2824 static int
2825 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2826     fc_packet_t	*fpkt)
2827 {
2828 	uchar_t			*lun_string;
2829 	uint16_t		lun_num, i;
2830 	int			num_luns;
2831 	int			actual_luns;
2832 	int			num_masked_luns;
2833 	int			lun_buflen;
2834 	struct fcp_lun	*plun	= NULL;
2835 	struct fcp_reportlun_resp	*report_lun;
2836 	uint8_t			reconfig_needed = FALSE;
2837 	uint8_t			lun_exists = FALSE;
2838 
2839 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2840 
2841 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2842 	    fpkt->pkt_datalen);
2843 
2844 	/* get number of luns (which is supplied as LUNS * 8) */
2845 	num_luns = BE_32(report_lun->num_lun) >> 3;
2846 
2847 	/*
2848 	 * Figure out exactly how many lun strings our response buffer
2849 	 * can hold.
2850 	 */
2851 	lun_buflen = (fpkt->pkt_datalen -
2852 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2853 
2854 	/*
2855 	 * Is our response buffer full or not? We don't want to
2856 	 * potentially walk beyond the number of luns we have.
2857 	 */
2858 	if (num_luns <= lun_buflen) {
2859 		actual_luns = num_luns;
2860 	} else {
2861 		actual_luns = lun_buflen;
2862 	}
2863 
2864 	mutex_enter(&ptgt->tgt_mutex);
2865 
2866 	/* Scan each lun to see if we have masked it. */
2867 	num_masked_luns = 0;
2868 	if (fcp_lun_blacklist != NULL) {
2869 		for (i = 0; i < actual_luns; i++) {
2870 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2871 			switch (lun_string[0] & 0xC0) {
2872 			case FCP_LUN_ADDRESSING:
2873 			case FCP_PD_ADDRESSING:
2874 				lun_num = ((lun_string[0] & 0x3F) << 8)
2875 				    | lun_string[1];
2876 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2877 				    lun_num) == TRUE) {
2878 					num_masked_luns++;
2879 				}
2880 				break;
2881 			default:
2882 				break;
2883 			}
2884 		}
2885 	}
2886 
2887 	/*
2888 	 * The quick and easy check.  If the number of LUNs reported
2889 	 * doesn't match the number we currently know about, we need
2890 	 * to reconfigure.
2891 	 */
2892 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2893 		mutex_exit(&ptgt->tgt_mutex);
2894 		kmem_free(report_lun, fpkt->pkt_datalen);
2895 		return (TRUE);
2896 	}
2897 
2898 	/*
2899 	 * If the quick and easy check doesn't turn up anything, we walk
2900 	 * the list of luns from the REPORT_LUN response and look for
2901 	 * any luns we don't know about.  If we find one, we know we need
2902 	 * to reconfigure. We will skip LUNs that are masked because of the
2903 	 * blacklist.
2904 	 */
2905 	for (i = 0; i < actual_luns; i++) {
2906 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2907 		lun_exists = FALSE;
2908 		switch (lun_string[0] & 0xC0) {
2909 		case FCP_LUN_ADDRESSING:
2910 		case FCP_PD_ADDRESSING:
2911 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2912 
2913 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2914 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2915 				lun_exists = TRUE;
2916 				break;
2917 			}
2918 
2919 			for (plun = ptgt->tgt_lun; plun;
2920 			    plun = plun->lun_next) {
2921 				if (plun->lun_num == lun_num) {
2922 					lun_exists = TRUE;
2923 					break;
2924 				}
2925 			}
2926 			break;
2927 		default:
2928 			break;
2929 		}
2930 
2931 		if (lun_exists == FALSE) {
2932 			reconfig_needed = TRUE;
2933 			break;
2934 		}
2935 	}
2936 
2937 	mutex_exit(&ptgt->tgt_mutex);
2938 	kmem_free(report_lun, fpkt->pkt_datalen);
2939 
2940 	return (reconfig_needed);
2941 }
2942 
2943 /*
2944  * This function is called by fcp_handle_page83 and uses inquiry response data
2945  * stored in plun->lun_inq to determine whether or not a device is a member of
2946  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2947  * otherwise 1.
2948  */
2949 static int
2950 fcp_symmetric_device_probe(struct fcp_lun *plun)
2951 {
2952 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2953 	char			*devidptr;
2954 	int			i, len;
2955 
2956 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2957 		devidptr = fcp_symmetric_disk_table[i];
2958 		len = (int)strlen(devidptr);
2959 
2960 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2961 			return (0);
2962 		}
2963 	}
2964 	return (1);
2965 }
2966 
2967 
2968 /*
2969  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2970  * It basically returns the current count of # of state change callbacks
2971  * i.e the value of tgt_change_cnt.
2972  *
2973  * INPUT:
2974  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2975  *   fcp_ioctl.listlen	-> 1
2976  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
2977  */
2978 /*ARGSUSED2*/
2979 static int
2980 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2981 {
2982 	int			ret;
2983 	uint32_t		link_cnt;
2984 	struct fcp_ioctl	fioctl;
2985 	struct fcp_port	*pptr = NULL;
2986 
2987 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2988 	    &pptr)) != 0) {
2989 		return (ret);
2990 	}
2991 
2992 	ASSERT(pptr != NULL);
2993 
2994 	if (fioctl.listlen != 1) {
2995 		return (EINVAL);
2996 	}
2997 
2998 	mutex_enter(&pptr->port_mutex);
2999 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3000 		mutex_exit(&pptr->port_mutex);
3001 		return (ENXIO);
3002 	}
3003 
3004 	/*
3005 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3006 	 * When the fcp initially attaches to the port and there are nothing
3007 	 * hanging out of the port or if there was a repeat offline state change
3008 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3009 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3010 	 * will differentiate the 2 cases.
3011 	 */
3012 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3013 		mutex_exit(&pptr->port_mutex);
3014 		return (ENXIO);
3015 	}
3016 
3017 	link_cnt = pptr->port_link_cnt;
3018 	mutex_exit(&pptr->port_mutex);
3019 
3020 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3021 		return (EFAULT);
3022 	}
3023 
3024 #ifdef	_MULTI_DATAMODEL
3025 	switch (ddi_model_convert_from(mode & FMODELS)) {
3026 	case DDI_MODEL_ILP32: {
3027 		struct fcp32_ioctl f32_ioctl;
3028 
3029 		f32_ioctl.fp_minor = fioctl.fp_minor;
3030 		f32_ioctl.listlen = fioctl.listlen;
3031 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3032 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3033 		    sizeof (struct fcp32_ioctl), mode)) {
3034 			return (EFAULT);
3035 		}
3036 		break;
3037 	}
3038 	case DDI_MODEL_NONE:
3039 		if (ddi_copyout((void *)&fioctl, (void *)data,
3040 		    sizeof (struct fcp_ioctl), mode)) {
3041 			return (EFAULT);
3042 		}
3043 		break;
3044 	}
3045 #else	/* _MULTI_DATAMODEL */
3046 
3047 	if (ddi_copyout((void *)&fioctl, (void *)data,
3048 	    sizeof (struct fcp_ioctl), mode)) {
3049 		return (EFAULT);
3050 	}
3051 #endif	/* _MULTI_DATAMODEL */
3052 
3053 	return (0);
3054 }
3055 
3056 /*
3057  * This function copies the fcp_ioctl structure passed in from user land
3058  * into kernel land. Handles 32 bit applications.
3059  */
3060 /*ARGSUSED*/
3061 static int
3062 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3063     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3064 {
3065 	struct fcp_port	*t_pptr;
3066 
3067 #ifdef	_MULTI_DATAMODEL
3068 	switch (ddi_model_convert_from(mode & FMODELS)) {
3069 	case DDI_MODEL_ILP32: {
3070 		struct fcp32_ioctl f32_ioctl;
3071 
3072 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3073 		    sizeof (struct fcp32_ioctl), mode)) {
3074 			return (EFAULT);
3075 		}
3076 		fioctl->fp_minor = f32_ioctl.fp_minor;
3077 		fioctl->listlen = f32_ioctl.listlen;
3078 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3079 		break;
3080 	}
3081 	case DDI_MODEL_NONE:
3082 		if (ddi_copyin((void *)data, (void *)fioctl,
3083 		    sizeof (struct fcp_ioctl), mode)) {
3084 			return (EFAULT);
3085 		}
3086 		break;
3087 	}
3088 
3089 #else	/* _MULTI_DATAMODEL */
3090 	if (ddi_copyin((void *)data, (void *)fioctl,
3091 	    sizeof (struct fcp_ioctl), mode)) {
3092 		return (EFAULT);
3093 	}
3094 #endif	/* _MULTI_DATAMODEL */
3095 
3096 	/*
3097 	 * Right now we can assume that the minor number matches with
3098 	 * this instance of fp. If this changes we will need to
3099 	 * revisit this logic.
3100 	 */
3101 	mutex_enter(&fcp_global_mutex);
3102 	t_pptr = fcp_port_head;
3103 	while (t_pptr) {
3104 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3105 			break;
3106 		} else {
3107 			t_pptr = t_pptr->port_next;
3108 		}
3109 	}
3110 	*pptr = t_pptr;
3111 	mutex_exit(&fcp_global_mutex);
3112 	if (t_pptr == NULL) {
3113 		return (ENXIO);
3114 	}
3115 
3116 	return (0);
3117 }
3118 
3119 /*
3120  *     Function: fcp_port_create_tgt
3121  *
3122  *  Description: As the name suggest this function creates the target context
3123  *		 specified by the the WWN provided by the caller.  If the
3124  *		 creation goes well and the target is known by fp/fctl a PLOGI
3125  *		 followed by a PRLI are issued.
3126  *
3127  *     Argument: pptr		fcp port structure
3128  *		 pwwn		WWN of the target
3129  *		 ret_val	Address of the return code.  It could be:
3130  *				EIO, ENOMEM or 0.
3131  *		 fc_status	PLOGI or PRLI status completion
3132  *		 fc_pkt_state	PLOGI or PRLI state completion
3133  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3134  *		 fc_pkt_action	PLOGI or PRLI action completion
3135  *
3136  * Return Value: NULL if it failed
3137  *		 Target structure address if it succeeds
3138  */
3139 static struct fcp_tgt *
3140 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3141     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3142 {
3143 	struct fcp_tgt	*ptgt = NULL;
3144 	fc_portmap_t		devlist;
3145 	int			lcount;
3146 	int			error;
3147 
3148 	*ret_val = 0;
3149 
3150 	/*
3151 	 * Check FC port device & get port map
3152 	 */
3153 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3154 	    &error, 1) == NULL) {
3155 		*ret_val = EIO;
3156 	} else {
3157 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3158 		    &devlist) != FC_SUCCESS) {
3159 			*ret_val = EIO;
3160 		}
3161 	}
3162 
3163 	/* Set port map flags */
3164 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3165 
3166 	/* Allocate target */
3167 	if (*ret_val == 0) {
3168 		lcount = pptr->port_link_cnt;
3169 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3170 		if (ptgt == NULL) {
3171 			fcp_log(CE_WARN, pptr->port_dip,
3172 			    "!FC target allocation failed");
3173 			*ret_val = ENOMEM;
3174 		} else {
3175 			/* Setup target */
3176 			mutex_enter(&ptgt->tgt_mutex);
3177 
3178 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3179 			ptgt->tgt_tmp_cnt	= 1;
3180 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3181 			ptgt->tgt_hard_addr	=
3182 			    devlist.map_hard_addr.hard_addr;
3183 			ptgt->tgt_pd_handle	= devlist.map_pd;
3184 			ptgt->tgt_fca_dev	= NULL;
3185 
3186 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3187 			    FC_WWN_SIZE);
3188 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3189 			    FC_WWN_SIZE);
3190 
3191 			mutex_exit(&ptgt->tgt_mutex);
3192 		}
3193 	}
3194 
3195 	/* Release global mutex for PLOGI and PRLI */
3196 	mutex_exit(&fcp_global_mutex);
3197 
3198 	/* Send PLOGI (If necessary) */
3199 	if (*ret_val == 0) {
3200 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3201 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3202 	}
3203 
3204 	/* Send PRLI (If necessary) */
3205 	if (*ret_val == 0) {
3206 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3207 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3208 	}
3209 
3210 	mutex_enter(&fcp_global_mutex);
3211 
3212 	return (ptgt);
3213 }
3214 
3215 /*
3216  *     Function: fcp_tgt_send_plogi
3217  *
3218  *  Description: This function sends a PLOGI to the target specified by the
3219  *		 caller and waits till it completes.
3220  *
3221  *     Argument: ptgt		Target to send the plogi to.
3222  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3223  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3224  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3225  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3226  *
3227  * Return Value: 0
3228  *		 ENOMEM
3229  *		 EIO
3230  *
3231  *	Context: User context.
3232  */
3233 static int
3234 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3235     int *fc_pkt_reason, int *fc_pkt_action)
3236 {
3237 	struct fcp_port	*pptr;
3238 	struct fcp_ipkt	*icmd;
3239 	struct fc_packet	*fpkt;
3240 	fc_frame_hdr_t		*hp;
3241 	struct la_els_logi	logi;
3242 	int			tcount;
3243 	int			lcount;
3244 	int			ret, login_retval = ~FC_SUCCESS;
3245 
3246 	ret = 0;
3247 
3248 	pptr = ptgt->tgt_port;
3249 
3250 	lcount = pptr->port_link_cnt;
3251 	tcount = ptgt->tgt_change_cnt;
3252 
3253 	/* Alloc internal packet */
3254 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3255 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3256 	    FC_INVALID_RSCN_COUNT);
3257 
3258 	if (icmd == NULL) {
3259 		ret = ENOMEM;
3260 	} else {
3261 		/*
3262 		 * Setup internal packet as sema sync
3263 		 */
3264 		fcp_ipkt_sema_init(icmd);
3265 
3266 		/*
3267 		 * Setup internal packet (icmd)
3268 		 */
3269 		icmd->ipkt_lun		= NULL;
3270 		icmd->ipkt_restart	= 0;
3271 		icmd->ipkt_retries	= 0;
3272 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3273 
3274 		/*
3275 		 * Setup fc_packet
3276 		 */
3277 		fpkt = icmd->ipkt_fpkt;
3278 
3279 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3280 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3281 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3282 
3283 		/*
3284 		 * Setup FC frame header
3285 		 */
3286 		hp = &fpkt->pkt_cmd_fhdr;
3287 
3288 		hp->s_id	= pptr->port_id;	/* source ID */
3289 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3290 		hp->r_ctl	= R_CTL_ELS_REQ;
3291 		hp->type	= FC_TYPE_EXTENDED_LS;
3292 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3293 		hp->seq_id	= 0;
3294 		hp->rsvd	= 0;
3295 		hp->df_ctl	= 0;
3296 		hp->seq_cnt	= 0;
3297 		hp->ox_id	= 0xffff;		/* i.e. none */
3298 		hp->rx_id	= 0xffff;		/* i.e. none */
3299 		hp->ro		= 0;
3300 
3301 		/*
3302 		 * Setup PLOGI
3303 		 */
3304 		bzero(&logi, sizeof (struct la_els_logi));
3305 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3306 
3307 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3308 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3309 
3310 		/*
3311 		 * Send PLOGI
3312 		 */
3313 		*fc_status = login_retval =
3314 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3315 		if (*fc_status != FC_SUCCESS) {
3316 			ret = EIO;
3317 		}
3318 	}
3319 
3320 	/*
3321 	 * Wait for completion
3322 	 */
3323 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3324 		ret = fcp_ipkt_sema_wait(icmd);
3325 
3326 		*fc_pkt_state	= fpkt->pkt_state;
3327 		*fc_pkt_reason	= fpkt->pkt_reason;
3328 		*fc_pkt_action	= fpkt->pkt_action;
3329 	}
3330 
3331 	/*
3332 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3333 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3334 	 * Otherwise, cleanup happens in callback routine.
3335 	 */
3336 	if (icmd != NULL) {
3337 		fcp_ipkt_sema_cleanup(icmd);
3338 	}
3339 
3340 	return (ret);
3341 }
3342 
3343 /*
3344  *     Function: fcp_tgt_send_prli
3345  *
3346  *  Description: Does nothing as of today.
3347  *
3348  *     Argument: ptgt		Target to send the prli to.
3349  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3350  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3351  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3352  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3353  *
3354  * Return Value: 0
3355  */
3356 /*ARGSUSED*/
3357 static int
3358 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3359     int *fc_pkt_reason, int *fc_pkt_action)
3360 {
3361 	return (0);
3362 }
3363 
3364 /*
3365  *     Function: fcp_ipkt_sema_init
3366  *
3367  *  Description: Initializes the semaphore contained in the internal packet.
3368  *
3369  *     Argument: icmd	Internal packet the semaphore of which must be
3370  *			initialized.
3371  *
3372  * Return Value: None
3373  *
3374  *	Context: User context only.
3375  */
3376 static void
3377 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3378 {
3379 	struct fc_packet	*fpkt;
3380 
3381 	fpkt = icmd->ipkt_fpkt;
3382 
3383 	/* Create semaphore for sync */
3384 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3385 
3386 	/* Setup the completion callback */
3387 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3388 }
3389 
3390 /*
3391  *     Function: fcp_ipkt_sema_wait
3392  *
3393  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3394  *		 semaphore is released in the callback.
3395  *
3396  *     Argument: icmd	Internal packet to wait on for completion.
3397  *
3398  * Return Value: 0
3399  *		 EIO
3400  *		 EBUSY
3401  *		 EAGAIN
3402  *
3403  *	Context: User context only.
3404  *
3405  * This function does a conversion between the field pkt_state of the fc_packet
3406  * embedded in the internal packet (icmd) and the code it returns.
3407  */
3408 static int
3409 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3410 {
3411 	struct fc_packet	*fpkt;
3412 	int	ret;
3413 
3414 	ret = EIO;
3415 	fpkt = icmd->ipkt_fpkt;
3416 
3417 	/*
3418 	 * Wait on semaphore
3419 	 */
3420 	sema_p(&(icmd->ipkt_sema));
3421 
3422 	/*
3423 	 * Check the status of the FC packet
3424 	 */
3425 	switch (fpkt->pkt_state) {
3426 	case FC_PKT_SUCCESS:
3427 		ret = 0;
3428 		break;
3429 	case FC_PKT_LOCAL_RJT:
3430 		switch (fpkt->pkt_reason) {
3431 		case FC_REASON_SEQ_TIMEOUT:
3432 		case FC_REASON_RX_BUF_TIMEOUT:
3433 			ret = EAGAIN;
3434 			break;
3435 		case FC_REASON_PKT_BUSY:
3436 			ret = EBUSY;
3437 			break;
3438 		}
3439 		break;
3440 	case FC_PKT_TIMEOUT:
3441 		ret = EAGAIN;
3442 		break;
3443 	case FC_PKT_LOCAL_BSY:
3444 	case FC_PKT_TRAN_BSY:
3445 	case FC_PKT_NPORT_BSY:
3446 	case FC_PKT_FABRIC_BSY:
3447 		ret = EBUSY;
3448 		break;
3449 	case FC_PKT_LS_RJT:
3450 	case FC_PKT_BA_RJT:
3451 		switch (fpkt->pkt_reason) {
3452 		case FC_REASON_LOGICAL_BSY:
3453 			ret = EBUSY;
3454 			break;
3455 		}
3456 		break;
3457 	case FC_PKT_FS_RJT:
3458 		switch (fpkt->pkt_reason) {
3459 		case FC_REASON_FS_LOGICAL_BUSY:
3460 			ret = EBUSY;
3461 			break;
3462 		}
3463 		break;
3464 	}
3465 
3466 	return (ret);
3467 }
3468 
3469 /*
3470  *     Function: fcp_ipkt_sema_callback
3471  *
3472  *  Description: Registered as the completion callback function for the FC
3473  *		 transport when the ipkt semaphore is used for sync. This will
3474  *		 cleanup the used data structures, if necessary and wake up
3475  *		 the user thread to complete the transaction.
3476  *
3477  *     Argument: fpkt	FC packet (points to the icmd)
3478  *
3479  * Return Value: None
3480  *
3481  *	Context: User context only
3482  */
3483 static void
3484 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3485 {
3486 	struct fcp_ipkt	*icmd;
3487 
3488 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3489 
3490 	/*
3491 	 * Wake up user thread
3492 	 */
3493 	sema_v(&(icmd->ipkt_sema));
3494 }
3495 
3496 /*
3497  *     Function: fcp_ipkt_sema_cleanup
3498  *
3499  *  Description: Called to cleanup (if necessary) the data structures used
3500  *		 when ipkt sema is used for sync.  This function will detect
3501  *		 whether the caller is the last thread (via counter) and
3502  *		 cleanup only if necessary.
3503  *
3504  *     Argument: icmd	Internal command packet
3505  *
3506  * Return Value: None
3507  *
3508  *	Context: User context only
3509  */
3510 static void
3511 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3512 {
3513 	struct fcp_tgt	*ptgt;
3514 	struct fcp_port	*pptr;
3515 
3516 	ptgt = icmd->ipkt_tgt;
3517 	pptr = icmd->ipkt_port;
3518 
3519 	/*
3520 	 * Acquire data structure
3521 	 */
3522 	mutex_enter(&ptgt->tgt_mutex);
3523 
3524 	/*
3525 	 * Destroy semaphore
3526 	 */
3527 	sema_destroy(&(icmd->ipkt_sema));
3528 
3529 	/*
3530 	 * Cleanup internal packet
3531 	 */
3532 	mutex_exit(&ptgt->tgt_mutex);
3533 	fcp_icmd_free(pptr, icmd);
3534 }
3535 
3536 /*
3537  *     Function: fcp_port_attach
3538  *
3539  *  Description: Called by the transport framework to resume, suspend or
3540  *		 attach a new port.
3541  *
3542  *     Argument: ulph		Port handle
3543  *		 *pinfo		Port information
3544  *		 cmd		Command
3545  *		 s_id		Port ID
3546  *
3547  * Return Value: FC_FAILURE or FC_SUCCESS
3548  */
3549 /*ARGSUSED*/
3550 static int
3551 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3552     fc_attach_cmd_t cmd, uint32_t s_id)
3553 {
3554 	int	instance;
3555 	int	res = FC_FAILURE; /* default result */
3556 
3557 	ASSERT(pinfo != NULL);
3558 
3559 	instance = ddi_get_instance(pinfo->port_dip);
3560 
3561 	switch (cmd) {
3562 	case FC_CMD_ATTACH:
3563 		/*
3564 		 * this port instance attaching for the first time (or after
3565 		 * being detached before)
3566 		 */
3567 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3568 		    instance) == DDI_SUCCESS) {
3569 			res = FC_SUCCESS;
3570 		} else {
3571 			ASSERT(ddi_get_soft_state(fcp_softstate,
3572 			    instance) == NULL);
3573 		}
3574 		break;
3575 
3576 	case FC_CMD_RESUME:
3577 	case FC_CMD_POWER_UP:
3578 		/*
3579 		 * this port instance was attached and the suspended and
3580 		 * will now be resumed
3581 		 */
3582 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3583 		    instance) == DDI_SUCCESS) {
3584 			res = FC_SUCCESS;
3585 		}
3586 		break;
3587 
3588 	default:
3589 		/* shouldn't happen */
3590 		FCP_TRACE(fcp_logq, "fcp",
3591 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3592 		    "port_attach: unknown cmdcommand: %d", cmd);
3593 		break;
3594 	}
3595 
3596 	/* return result */
3597 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3598 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3599 
3600 	return (res);
3601 }
3602 
3603 
3604 /*
3605  * detach or suspend this port instance
3606  *
3607  * acquires and releases the global mutex
3608  *
3609  * acquires and releases the mutex for this port
3610  *
3611  * acquires and releases the hotplug mutex for this port
3612  */
3613 /*ARGSUSED*/
3614 static int
3615 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3616     fc_detach_cmd_t cmd)
3617 {
3618 	int			flag;
3619 	int			instance;
3620 	struct fcp_port		*pptr;
3621 
3622 	instance = ddi_get_instance(info->port_dip);
3623 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3624 
3625 	switch (cmd) {
3626 	case FC_CMD_SUSPEND:
3627 		FCP_DTRACE(fcp_logq, "fcp",
3628 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3629 		    "port suspend called for port %d", instance);
3630 		flag = FCP_STATE_SUSPENDED;
3631 		break;
3632 
3633 	case FC_CMD_POWER_DOWN:
3634 		FCP_DTRACE(fcp_logq, "fcp",
3635 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3636 		    "port power down called for port %d", instance);
3637 		flag = FCP_STATE_POWER_DOWN;
3638 		break;
3639 
3640 	case FC_CMD_DETACH:
3641 		FCP_DTRACE(fcp_logq, "fcp",
3642 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3643 		    "port detach called for port %d", instance);
3644 		flag = FCP_STATE_DETACHING;
3645 		break;
3646 
3647 	default:
3648 		/* shouldn't happen */
3649 		return (FC_FAILURE);
3650 	}
3651 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3652 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3653 
3654 	return (fcp_handle_port_detach(pptr, flag, instance));
3655 }
3656 
3657 
3658 /*
3659  * called for ioctls on the transport's devctl interface, and the transport
3660  * has passed it to us
3661  *
3662  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3663  *
3664  * return FC_SUCCESS if we decide to claim the ioctl,
3665  * else return FC_UNCLAIMED
3666  *
3667  * *rval is set iff we decide to claim the ioctl
3668  */
3669 /*ARGSUSED*/
3670 static int
3671 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3672     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3673 {
3674 	int			retval = FC_UNCLAIMED;	/* return value */
3675 	struct fcp_port		*pptr = NULL;		/* our soft state */
3676 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3677 	dev_info_t		*cdip;
3678 	mdi_pathinfo_t		*pip = NULL;
3679 	char			*ndi_nm;		/* NDI name */
3680 	char			*ndi_addr;		/* NDI addr */
3681 	int			is_mpxio, circ;
3682 	int			devi_entered = 0;
3683 	time_t			end_time;
3684 
3685 	ASSERT(rval != NULL);
3686 
3687 	FCP_DTRACE(fcp_logq, "fcp",
3688 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3690 
3691 	/* if already claimed then forget it */
3692 	if (claimed) {
3693 		/*
3694 		 * for now, if this ioctl has already been claimed, then
3695 		 * we just ignore it
3696 		 */
3697 		return (retval);
3698 	}
3699 
3700 	/* get our port info */
3701 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3702 		fcp_log(CE_WARN, NULL,
3703 		    "!fcp:Invalid port handle handle in ioctl");
3704 		*rval = ENXIO;
3705 		return (retval);
3706 	}
3707 	is_mpxio = pptr->port_mpxio;
3708 
3709 	switch (cmd) {
3710 	case DEVCTL_BUS_GETSTATE:
3711 	case DEVCTL_BUS_QUIESCE:
3712 	case DEVCTL_BUS_UNQUIESCE:
3713 	case DEVCTL_BUS_RESET:
3714 	case DEVCTL_BUS_RESETALL:
3715 
3716 	case DEVCTL_BUS_DEV_CREATE:
3717 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3718 			return (retval);
3719 		}
3720 		break;
3721 
3722 	case DEVCTL_DEVICE_GETSTATE:
3723 	case DEVCTL_DEVICE_OFFLINE:
3724 	case DEVCTL_DEVICE_ONLINE:
3725 	case DEVCTL_DEVICE_REMOVE:
3726 	case DEVCTL_DEVICE_RESET:
3727 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3728 			return (retval);
3729 		}
3730 
3731 		ASSERT(dcp != NULL);
3732 
3733 		/* ensure we have a name and address */
3734 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3735 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3736 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3737 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3738 			    "ioctl: can't get name (%s) or addr (%s)",
3739 			    ndi_nm ? ndi_nm : "<null ptr>",
3740 			    ndi_addr ? ndi_addr : "<null ptr>");
3741 			ndi_dc_freehdl(dcp);
3742 			return (retval);
3743 		}
3744 
3745 
3746 		/* get our child's DIP */
3747 		ASSERT(pptr != NULL);
3748 		if (is_mpxio) {
3749 			mdi_devi_enter(pptr->port_dip, &circ);
3750 		} else {
3751 			ndi_devi_enter(pptr->port_dip, &circ);
3752 		}
3753 		devi_entered = 1;
3754 
3755 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3756 		    ndi_addr)) == NULL) {
3757 			/* Look for virtually enumerated devices. */
3758 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3759 			if (pip == NULL ||
3760 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3761 				*rval = ENXIO;
3762 				goto out;
3763 			}
3764 		}
3765 		break;
3766 
3767 	default:
3768 		*rval = ENOTTY;
3769 		return (retval);
3770 	}
3771 
3772 	/* this ioctl is ours -- process it */
3773 
3774 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3775 
3776 	/* we assume it will be a success; else we'll set error value */
3777 	*rval = 0;
3778 
3779 
3780 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3781 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3782 	    "ioctl: claiming this one");
3783 
3784 	/* handle ioctls now */
3785 	switch (cmd) {
3786 	case DEVCTL_DEVICE_GETSTATE:
3787 		ASSERT(cdip != NULL);
3788 		ASSERT(dcp != NULL);
3789 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3790 			*rval = EFAULT;
3791 		}
3792 		break;
3793 
3794 	case DEVCTL_DEVICE_REMOVE:
3795 	case DEVCTL_DEVICE_OFFLINE: {
3796 		int			flag = 0;
3797 		int			lcount;
3798 		int			tcount;
3799 		struct fcp_pkt	*head = NULL;
3800 		struct fcp_lun	*plun;
3801 		child_info_t		*cip = CIP(cdip);
3802 		int			all = 1;
3803 		struct fcp_lun	*tplun;
3804 		struct fcp_tgt	*ptgt;
3805 
3806 		ASSERT(pptr != NULL);
3807 		ASSERT(cdip != NULL);
3808 
3809 		mutex_enter(&pptr->port_mutex);
3810 		if (pip != NULL) {
3811 			cip = CIP(pip);
3812 		}
3813 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3814 			mutex_exit(&pptr->port_mutex);
3815 			*rval = ENXIO;
3816 			break;
3817 		}
3818 
3819 		head = fcp_scan_commands(plun);
3820 		if (head != NULL) {
3821 			fcp_abort_commands(head, LUN_PORT);
3822 		}
3823 		lcount = pptr->port_link_cnt;
3824 		tcount = plun->lun_tgt->tgt_change_cnt;
3825 		mutex_exit(&pptr->port_mutex);
3826 
3827 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3828 			flag = NDI_DEVI_REMOVE;
3829 		}
3830 
3831 		if (is_mpxio) {
3832 			mdi_devi_exit(pptr->port_dip, circ);
3833 		} else {
3834 			ndi_devi_exit(pptr->port_dip, circ);
3835 		}
3836 		devi_entered = 0;
3837 
3838 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3839 		    FCP_OFFLINE, lcount, tcount, flag);
3840 
3841 		if (*rval != NDI_SUCCESS) {
3842 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3843 			break;
3844 		}
3845 
3846 		fcp_update_offline_flags(plun);
3847 
3848 		ptgt = plun->lun_tgt;
3849 		mutex_enter(&ptgt->tgt_mutex);
3850 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3851 		    tplun->lun_next) {
3852 			mutex_enter(&tplun->lun_mutex);
3853 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3854 				all = 0;
3855 			}
3856 			mutex_exit(&tplun->lun_mutex);
3857 		}
3858 
3859 		if (all) {
3860 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3861 			/*
3862 			 * The user is unconfiguring/offlining the device.
3863 			 * If fabric and the auto configuration is set
3864 			 * then make sure the user is the only one who
3865 			 * can reconfigure the device.
3866 			 */
3867 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3868 			    fcp_enable_auto_configuration) {
3869 				ptgt->tgt_manual_config_only = 1;
3870 			}
3871 		}
3872 		mutex_exit(&ptgt->tgt_mutex);
3873 		break;
3874 	}
3875 
3876 	case DEVCTL_DEVICE_ONLINE: {
3877 		int			lcount;
3878 		int			tcount;
3879 		struct fcp_lun	*plun;
3880 		child_info_t		*cip = CIP(cdip);
3881 
3882 		ASSERT(cdip != NULL);
3883 		ASSERT(pptr != NULL);
3884 
3885 		mutex_enter(&pptr->port_mutex);
3886 		if (pip != NULL) {
3887 			cip = CIP(pip);
3888 		}
3889 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3890 			mutex_exit(&pptr->port_mutex);
3891 			*rval = ENXIO;
3892 			break;
3893 		}
3894 		lcount = pptr->port_link_cnt;
3895 		tcount = plun->lun_tgt->tgt_change_cnt;
3896 		mutex_exit(&pptr->port_mutex);
3897 
3898 		/*
3899 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3900 		 * to allow the device attach to occur when the device is
3901 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3902 		 * from the scsi_probe()).
3903 		 */
3904 		mutex_enter(&LUN_TGT->tgt_mutex);
3905 		plun->lun_state |= FCP_LUN_ONLINING;
3906 		mutex_exit(&LUN_TGT->tgt_mutex);
3907 
3908 		if (is_mpxio) {
3909 			mdi_devi_exit(pptr->port_dip, circ);
3910 		} else {
3911 			ndi_devi_exit(pptr->port_dip, circ);
3912 		}
3913 		devi_entered = 0;
3914 
3915 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3916 		    FCP_ONLINE, lcount, tcount, 0);
3917 
3918 		if (*rval != NDI_SUCCESS) {
3919 			/* Reset the FCP_LUN_ONLINING bit */
3920 			mutex_enter(&LUN_TGT->tgt_mutex);
3921 			plun->lun_state &= ~FCP_LUN_ONLINING;
3922 			mutex_exit(&LUN_TGT->tgt_mutex);
3923 			*rval = EIO;
3924 			break;
3925 		}
3926 		mutex_enter(&LUN_TGT->tgt_mutex);
3927 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3928 		    FCP_LUN_ONLINING);
3929 		mutex_exit(&LUN_TGT->tgt_mutex);
3930 		break;
3931 	}
3932 
3933 	case DEVCTL_BUS_DEV_CREATE: {
3934 		uchar_t			*bytes = NULL;
3935 		uint_t			nbytes;
3936 		struct fcp_tgt		*ptgt = NULL;
3937 		struct fcp_lun		*plun = NULL;
3938 		dev_info_t		*useless_dip = NULL;
3939 
3940 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3941 		    DEVCTL_CONSTRUCT, &useless_dip);
3942 		if (*rval != 0 || useless_dip == NULL) {
3943 			break;
3944 		}
3945 
3946 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3947 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3948 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3949 			*rval = EINVAL;
3950 			(void) ndi_devi_free(useless_dip);
3951 			if (bytes != NULL) {
3952 				ddi_prop_free(bytes);
3953 			}
3954 			break;
3955 		}
3956 
3957 		*rval = fcp_create_on_demand(pptr, bytes);
3958 		if (*rval == 0) {
3959 			mutex_enter(&pptr->port_mutex);
3960 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3961 			if (ptgt) {
3962 				/*
3963 				 * We now have a pointer to the target that
3964 				 * was created. Lets point to the first LUN on
3965 				 * this new target.
3966 				 */
3967 				mutex_enter(&ptgt->tgt_mutex);
3968 
3969 				plun = ptgt->tgt_lun;
3970 				/*
3971 				 * There may be stale/offline LUN entries on
3972 				 * this list (this is by design) and so we have
3973 				 * to make sure we point to the first online
3974 				 * LUN
3975 				 */
3976 				while (plun &&
3977 				    plun->lun_state & FCP_LUN_OFFLINE) {
3978 					plun = plun->lun_next;
3979 				}
3980 
3981 				mutex_exit(&ptgt->tgt_mutex);
3982 			}
3983 			mutex_exit(&pptr->port_mutex);
3984 		}
3985 
3986 		if (*rval == 0 && ptgt && plun) {
3987 			mutex_enter(&plun->lun_mutex);
3988 			/*
3989 			 * Allow up to fcp_lun_ready_retry seconds to
3990 			 * configure all the luns behind the target.
3991 			 *
3992 			 * The intent here is to allow targets with long
3993 			 * reboot/reset-recovery times to become available
3994 			 * while limiting the maximum wait time for an
3995 			 * unresponsive target.
3996 			 */
3997 			end_time = ddi_get_lbolt() +
3998 			    SEC_TO_TICK(fcp_lun_ready_retry);
3999 
4000 			while (ddi_get_lbolt() < end_time) {
4001 				retval = FC_SUCCESS;
4002 
4003 				/*
4004 				 * The new ndi interfaces for on-demand creation
4005 				 * are inflexible, Do some more work to pass on
4006 				 * a path name of some LUN (design is broken !)
4007 				 */
4008 				if (plun->lun_cip) {
4009 					if (plun->lun_mpxio == 0) {
4010 						cdip = DIP(plun->lun_cip);
4011 					} else {
4012 						cdip = mdi_pi_get_client(
4013 						    PIP(plun->lun_cip));
4014 					}
4015 					if (cdip == NULL) {
4016 						*rval = ENXIO;
4017 						break;
4018 					}
4019 
4020 					if (!i_ddi_devi_attached(cdip)) {
4021 						mutex_exit(&plun->lun_mutex);
4022 						delay(drv_usectohz(1000000));
4023 						mutex_enter(&plun->lun_mutex);
4024 					} else {
4025 						/*
4026 						 * This Lun is ready, lets
4027 						 * check the next one.
4028 						 */
4029 						mutex_exit(&plun->lun_mutex);
4030 						plun = plun->lun_next;
4031 						while (plun && (plun->lun_state
4032 						    & FCP_LUN_OFFLINE)) {
4033 							plun = plun->lun_next;
4034 						}
4035 						if (!plun) {
4036 							break;
4037 						}
4038 						mutex_enter(&plun->lun_mutex);
4039 					}
4040 				} else {
4041 					/*
4042 					 * lun_cip field for a valid lun
4043 					 * should never be NULL. Fail the
4044 					 * command.
4045 					 */
4046 					*rval = ENXIO;
4047 					break;
4048 				}
4049 			}
4050 			if (plun) {
4051 				mutex_exit(&plun->lun_mutex);
4052 			} else {
4053 				char devnm[MAXNAMELEN];
4054 				int nmlen;
4055 
4056 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4057 				    ddi_node_name(cdip),
4058 				    ddi_get_name_addr(cdip));
4059 
4060 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4061 				    0) {
4062 					*rval = EFAULT;
4063 				}
4064 			}
4065 		} else {
4066 			int	i;
4067 			char	buf[25];
4068 
4069 			for (i = 0; i < FC_WWN_SIZE; i++) {
4070 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4071 			}
4072 
4073 			fcp_log(CE_WARN, pptr->port_dip,
4074 			    "!Failed to create nodes for pwwn=%s; error=%x",
4075 			    buf, *rval);
4076 		}
4077 
4078 		(void) ndi_devi_free(useless_dip);
4079 		ddi_prop_free(bytes);
4080 		break;
4081 	}
4082 
4083 	case DEVCTL_DEVICE_RESET: {
4084 		struct fcp_lun	*plun;
4085 		struct scsi_address	ap;
4086 		child_info_t		*cip = CIP(cdip);
4087 
4088 		ASSERT(cdip != NULL);
4089 		ASSERT(pptr != NULL);
4090 		mutex_enter(&pptr->port_mutex);
4091 		if (pip != NULL) {
4092 			cip = CIP(pip);
4093 		}
4094 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4095 			mutex_exit(&pptr->port_mutex);
4096 			*rval = ENXIO;
4097 			break;
4098 		}
4099 		mutex_exit(&pptr->port_mutex);
4100 
4101 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4102 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4103 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4104 			*rval = ENXIO;
4105 			break;
4106 		}
4107 		ap.a_hba_tran = plun->lun_tran;
4108 		ASSERT(pptr->port_tran != NULL);
4109 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4110 
4111 		/*
4112 		 * There is a chance lun_tran is NULL at this point. So check
4113 		 * for it. If it is NULL, it basically means that the tgt has
4114 		 * been freed. So, just return a "No such device or address"
4115 		 * error.
4116 		 */
4117 		if (ap.a_hba_tran == NULL) {
4118 			*rval = ENXIO;
4119 			break;
4120 		}
4121 
4122 		/*
4123 		 * set up ap so that fcp_reset can figure out
4124 		 * which target to reset
4125 		 */
4126 		if (fcp_scsi_reset(&ap, RESET_TARGET) == FALSE) {
4127 			*rval = EIO;
4128 		}
4129 		break;
4130 	}
4131 
4132 	case DEVCTL_BUS_GETSTATE:
4133 		ASSERT(dcp != NULL);
4134 		ASSERT(pptr != NULL);
4135 		ASSERT(pptr->port_dip != NULL);
4136 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4137 		    NDI_SUCCESS) {
4138 			*rval = EFAULT;
4139 		}
4140 		break;
4141 
4142 	case DEVCTL_BUS_QUIESCE:
4143 	case DEVCTL_BUS_UNQUIESCE:
4144 		*rval = ENOTSUP;
4145 		break;
4146 
4147 	case DEVCTL_BUS_RESET:
4148 	case DEVCTL_BUS_RESETALL:
4149 		ASSERT(pptr != NULL);
4150 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4151 		break;
4152 
4153 	default:
4154 		ASSERT(dcp != NULL);
4155 		*rval = ENOTTY;
4156 		break;
4157 	}
4158 
4159 	/* all done -- clean up and return */
4160 out:	if (devi_entered) {
4161 		if (is_mpxio) {
4162 			mdi_devi_exit(pptr->port_dip, circ);
4163 		} else {
4164 			ndi_devi_exit(pptr->port_dip, circ);
4165 		}
4166 	}
4167 
4168 	if (dcp != NULL) {
4169 		ndi_dc_freehdl(dcp);
4170 	}
4171 
4172 	return (retval);
4173 }
4174 
4175 
4176 /*ARGSUSED*/
4177 static int
4178 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4179     uint32_t claimed)
4180 {
4181 	uchar_t			r_ctl;
4182 	uchar_t			ls_code;
4183 	struct fcp_port	*pptr;
4184 
4185 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4186 		return (FC_UNCLAIMED);
4187 	}
4188 
4189 	mutex_enter(&pptr->port_mutex);
4190 	if (pptr->port_state & (FCP_STATE_DETACHING |
4191 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4192 		mutex_exit(&pptr->port_mutex);
4193 		return (FC_UNCLAIMED);
4194 	}
4195 	mutex_exit(&pptr->port_mutex);
4196 
4197 	r_ctl = buf->ub_frame.r_ctl;
4198 
4199 	switch (r_ctl & R_CTL_ROUTING) {
4200 	case R_CTL_EXTENDED_SVC:
4201 		if (r_ctl == R_CTL_ELS_REQ) {
4202 			ls_code = buf->ub_buffer[0];
4203 
4204 			switch (ls_code) {
4205 			case LA_ELS_PRLI:
4206 				/*
4207 				 * We really don't care if something fails.
4208 				 * If the PRLI was not sent out, then the
4209 				 * other end will time it out.
4210 				 */
4211 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4212 					return (FC_SUCCESS);
4213 				}
4214 				return (FC_UNCLAIMED);
4215 				/* NOTREACHED */
4216 
4217 			default:
4218 				break;
4219 			}
4220 		}
4221 		/* FALLTHROUGH */
4222 
4223 	default:
4224 		return (FC_UNCLAIMED);
4225 	}
4226 }
4227 
4228 
4229 /*ARGSUSED*/
4230 static int
4231 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4232     uint32_t claimed)
4233 {
4234 	return (FC_UNCLAIMED);
4235 }
4236 
4237 /*
4238  *     Function: fcp_statec_callback
4239  *
4240  *  Description: The purpose of this function is to handle a port state change.
4241  *		 It is called from fp/fctl and, in a few instances, internally.
4242  *
4243  *     Argument: ulph		fp/fctl port handle
4244  *		 port_handle	fcp_port structure
4245  *		 port_state	Physical state of the port
4246  *		 port_top	Topology
4247  *		 *devlist	Pointer to the first entry of a table
4248  *				containing the remote ports that can be
4249  *				reached.
4250  *		 dev_cnt	Number of entries pointed by devlist.
4251  *		 port_sid	Port ID of the local port.
4252  *
4253  * Return Value: None
4254  */
4255 /*ARGSUSED*/
4256 static void
4257 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4258     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4259     uint32_t dev_cnt, uint32_t port_sid)
4260 {
4261 	uint32_t		link_count;
4262 	int			map_len = 0;
4263 	struct fcp_port	*pptr;
4264 	fcp_map_tag_t		*map_tag = NULL;
4265 
4266 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4267 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4268 		return;			/* nothing to work with! */
4269 	}
4270 
4271 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4272 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4273 	    "fcp_statec_callback: port state/dev_cnt/top ="
4274 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4275 	    dev_cnt, port_top);
4276 
4277 	mutex_enter(&pptr->port_mutex);
4278 
4279 	/*
4280 	 * If a thread is in detach, don't do anything.
4281 	 */
4282 	if (pptr->port_state & (FCP_STATE_DETACHING |
4283 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4284 		mutex_exit(&pptr->port_mutex);
4285 		return;
4286 	}
4287 
4288 	/*
4289 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4290 	 * init_pkt is called, it knows whether or not the target's status
4291 	 * (or pd) might be changing.
4292 	 */
4293 
4294 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4295 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4296 	}
4297 
4298 	/*
4299 	 * the transport doesn't allocate or probe unless being
4300 	 * asked to by either the applications or ULPs
4301 	 *
4302 	 * in cases where the port is OFFLINE at the time of port
4303 	 * attach callback and the link comes ONLINE later, for
4304 	 * easier automatic node creation (i.e. without you having to
4305 	 * go out and run the utility to perform LOGINs) the
4306 	 * following conditional is helpful
4307 	 */
4308 	pptr->port_phys_state = port_state;
4309 
4310 	if (dev_cnt) {
4311 		mutex_exit(&pptr->port_mutex);
4312 
4313 		map_len = sizeof (*map_tag) * dev_cnt;
4314 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4315 		if (map_tag == NULL) {
4316 			fcp_log(CE_WARN, pptr->port_dip,
4317 			    "!fcp%d: failed to allocate for map tags; "
4318 			    " state change will not be processed",
4319 			    pptr->port_instance);
4320 
4321 			mutex_enter(&pptr->port_mutex);
4322 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4323 			mutex_exit(&pptr->port_mutex);
4324 
4325 			return;
4326 		}
4327 
4328 		mutex_enter(&pptr->port_mutex);
4329 	}
4330 
4331 	if (pptr->port_id != port_sid) {
4332 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4333 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4334 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4335 		    port_sid);
4336 		/*
4337 		 * The local port changed ID. It is the first time a port ID
4338 		 * is assigned or something drastic happened.  We might have
4339 		 * been unplugged and replugged on another loop or fabric port
4340 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4341 		 * the fabric we were plugged into.
4342 		 */
4343 		pptr->port_id = port_sid;
4344 	}
4345 
4346 	switch (FC_PORT_STATE_MASK(port_state)) {
4347 	case FC_STATE_OFFLINE:
4348 	case FC_STATE_RESET_REQUESTED:
4349 		/*
4350 		 * link has gone from online to offline -- just update the
4351 		 * state of this port to BUSY and MARKed to go offline
4352 		 */
4353 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4354 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4355 		    "link went offline");
4356 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4357 			/*
4358 			 * We were offline a while ago and this one
4359 			 * seems to indicate that the loop has gone
4360 			 * dead forever.
4361 			 */
4362 			pptr->port_tmp_cnt += dev_cnt;
4363 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4364 			pptr->port_state |= FCP_STATE_INIT;
4365 			link_count = pptr->port_link_cnt;
4366 			fcp_handle_devices(pptr, devlist, dev_cnt,
4367 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4368 		} else {
4369 			pptr->port_link_cnt++;
4370 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4371 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4372 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4373 			if (pptr->port_mpxio) {
4374 				fcp_update_mpxio_path_verifybusy(pptr);
4375 			}
4376 			pptr->port_state |= FCP_STATE_OFFLINE;
4377 			pptr->port_state &=
4378 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4379 			pptr->port_tmp_cnt = 0;
4380 		}
4381 		mutex_exit(&pptr->port_mutex);
4382 		break;
4383 
4384 	case FC_STATE_ONLINE:
4385 	case FC_STATE_LIP:
4386 	case FC_STATE_LIP_LBIT_SET:
4387 		/*
4388 		 * link has gone from offline to online
4389 		 */
4390 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4391 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4392 		    "link went online");
4393 
4394 		pptr->port_link_cnt++;
4395 
4396 		while (pptr->port_ipkt_cnt) {
4397 			mutex_exit(&pptr->port_mutex);
4398 			delay(drv_usectohz(1000000));
4399 			mutex_enter(&pptr->port_mutex);
4400 		}
4401 
4402 		pptr->port_topology = port_top;
4403 
4404 		/*
4405 		 * The state of the targets and luns accessible through this
4406 		 * port is updated.
4407 		 */
4408 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4409 		    FCP_CAUSE_LINK_CHANGE);
4410 
4411 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4412 		pptr->port_state |= FCP_STATE_ONLINING;
4413 		pptr->port_tmp_cnt = dev_cnt;
4414 		link_count = pptr->port_link_cnt;
4415 
4416 		pptr->port_deadline = fcp_watchdog_time +
4417 		    FCP_ICMD_DEADLINE;
4418 
4419 		if (!dev_cnt) {
4420 			/*
4421 			 * We go directly to the online state if no remote
4422 			 * ports were discovered.
4423 			 */
4424 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4425 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4426 			    "No remote ports discovered");
4427 
4428 			pptr->port_state &= ~FCP_STATE_ONLINING;
4429 			pptr->port_state |= FCP_STATE_ONLINE;
4430 		}
4431 
4432 		switch (port_top) {
4433 		case FC_TOP_FABRIC:
4434 		case FC_TOP_PUBLIC_LOOP:
4435 		case FC_TOP_PRIVATE_LOOP:
4436 		case FC_TOP_PT_PT:
4437 
4438 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4439 				fcp_retry_ns_registry(pptr, port_sid);
4440 			}
4441 
4442 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4443 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4444 			break;
4445 
4446 		default:
4447 			/*
4448 			 * We got here because we were provided with an unknown
4449 			 * topology.
4450 			 */
4451 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4452 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4453 			}
4454 
4455 			pptr->port_tmp_cnt -= dev_cnt;
4456 			fcp_log(CE_WARN, pptr->port_dip,
4457 			    "!unknown/unsupported topology (0x%x)", port_top);
4458 			break;
4459 		}
4460 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4461 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4462 		    "Notify ssd of the reset to reinstate the reservations");
4463 
4464 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4465 		    &pptr->port_reset_notify_listf);
4466 
4467 		mutex_exit(&pptr->port_mutex);
4468 
4469 		break;
4470 
4471 	case FC_STATE_RESET:
4472 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4473 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4474 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4475 		    "RESET state, waiting for Offline/Online state_cb");
4476 		mutex_exit(&pptr->port_mutex);
4477 		break;
4478 
4479 	case FC_STATE_DEVICE_CHANGE:
4480 		/*
4481 		 * We come here when an application has requested
4482 		 * Dynamic node creation/deletion in Fabric connectivity.
4483 		 */
4484 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4485 		    FCP_STATE_INIT)) {
4486 			/*
4487 			 * This case can happen when the FCTL is in the
4488 			 * process of giving us on online and the host on
4489 			 * the other side issues a PLOGI/PLOGO. Ideally
4490 			 * the state changes should be serialized unless
4491 			 * they are opposite (online-offline).
4492 			 * The transport will give us a final state change
4493 			 * so we can ignore this for the time being.
4494 			 */
4495 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4496 			mutex_exit(&pptr->port_mutex);
4497 			break;
4498 		}
4499 
4500 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4501 			fcp_retry_ns_registry(pptr, port_sid);
4502 		}
4503 
4504 		/*
4505 		 * Extend the deadline under steady state conditions
4506 		 * to provide more time for the device-change-commands
4507 		 */
4508 		if (!pptr->port_ipkt_cnt) {
4509 			pptr->port_deadline = fcp_watchdog_time +
4510 			    FCP_ICMD_DEADLINE;
4511 		}
4512 
4513 		/*
4514 		 * There is another race condition here, where if we were
4515 		 * in ONLINEING state and a devices in the map logs out,
4516 		 * fp will give another state change as DEVICE_CHANGE
4517 		 * and OLD. This will result in that target being offlined.
4518 		 * The pd_handle is freed. If from the first statec callback
4519 		 * we were going to fire a PLOGI/PRLI, the system will
4520 		 * panic in fc_ulp_transport with invalid pd_handle.
4521 		 * The fix is to check for the link_cnt before issuing
4522 		 * any command down.
4523 		 */
4524 		fcp_update_targets(pptr, devlist, dev_cnt,
4525 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4526 
4527 		link_count = pptr->port_link_cnt;
4528 
4529 		fcp_handle_devices(pptr, devlist, dev_cnt,
4530 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4531 
4532 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4533 
4534 		mutex_exit(&pptr->port_mutex);
4535 		break;
4536 
4537 	case FC_STATE_TARGET_PORT_RESET:
4538 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4539 			fcp_retry_ns_registry(pptr, port_sid);
4540 		}
4541 
4542 		/* Do nothing else */
4543 		mutex_exit(&pptr->port_mutex);
4544 		break;
4545 
4546 	default:
4547 		fcp_log(CE_WARN, pptr->port_dip,
4548 		    "!Invalid state change=0x%x", port_state);
4549 		mutex_exit(&pptr->port_mutex);
4550 		break;
4551 	}
4552 
4553 	if (map_tag) {
4554 		kmem_free(map_tag, map_len);
4555 	}
4556 }
4557 
4558 /*
4559  *     Function: fcp_handle_devices
4560  *
4561  *  Description: This function updates the devices currently known by
4562  *		 walking the list provided by the caller.  The list passed
4563  *		 by the caller is supposed to be the list of reachable
4564  *		 devices.
4565  *
4566  *     Argument: *pptr		Fcp port structure.
4567  *		 *devlist	Pointer to the first entry of a table
4568  *				containing the remote ports that can be
4569  *				reached.
4570  *		 dev_cnt	Number of entries pointed by devlist.
4571  *		 link_cnt	Link state count.
4572  *		 *map_tag	Array of fcp_map_tag_t structures.
4573  *		 cause		What caused this function to be called.
4574  *
4575  * Return Value: None
4576  *
4577  *	  Notes: The pptr->port_mutex must be held.
4578  */
4579 static void
4580 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4581     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4582 {
4583 	int			i;
4584 	int			check_finish_init = 0;
4585 	fc_portmap_t		*map_entry;
4586 	struct fcp_tgt	*ptgt = NULL;
4587 
4588 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4589 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4590 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4591 
4592 	if (dev_cnt) {
4593 		ASSERT(map_tag != NULL);
4594 	}
4595 
4596 	/*
4597 	 * The following code goes through the list of remote ports that are
4598 	 * accessible through this (pptr) local port (The list walked is the
4599 	 * one provided by the caller which is the list of the remote ports
4600 	 * currently reachable).  It checks if any of them was already
4601 	 * known by looking for the corresponding target structure based on
4602 	 * the world wide name.	 If a target is part of the list it is tagged
4603 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4604 	 *
4605 	 * Old comment
4606 	 * -----------
4607 	 * Before we drop port mutex; we MUST get the tags updated; This
4608 	 * two step process is somewhat slow, but more reliable.
4609 	 */
4610 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4611 		map_entry = &(devlist[i]);
4612 
4613 		/*
4614 		 * get ptr to this map entry in our port's
4615 		 * list (if any)
4616 		 */
4617 		ptgt = fcp_lookup_target(pptr,
4618 		    (uchar_t *)&(map_entry->map_pwwn));
4619 
4620 		if (ptgt) {
4621 			map_tag[i] = ptgt->tgt_change_cnt;
4622 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4623 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4624 			}
4625 		}
4626 	}
4627 
4628 	/*
4629 	 * At this point we know which devices of the new list were already
4630 	 * known (The field tgt_aux_state of the target structure has been
4631 	 * set to FCP_TGT_TAGGED).
4632 	 *
4633 	 * The following code goes through the list of targets currently known
4634 	 * by the local port (the list is actually a hashing table).  If a
4635 	 * target is found and is not tagged, it means the target cannot
4636 	 * be reached anymore through the local port (pptr).  It is offlined.
4637 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4638 	 */
4639 	for (i = 0; i < FCP_NUM_HASH; i++) {
4640 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4641 		    ptgt = ptgt->tgt_next) {
4642 			mutex_enter(&ptgt->tgt_mutex);
4643 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4644 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4645 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4646 				fcp_offline_target_now(pptr, ptgt,
4647 				    link_cnt, ptgt->tgt_change_cnt, 0);
4648 			}
4649 			mutex_exit(&ptgt->tgt_mutex);
4650 		}
4651 	}
4652 
4653 	/*
4654 	 * At this point, the devices that were known but cannot be reached
4655 	 * anymore, have most likely been offlined.
4656 	 *
4657 	 * The following section of code seems to go through the list of
4658 	 * remote ports that can now be reached.  For every single one it
4659 	 * checks if it is already known or if it is a new port.
4660 	 */
4661 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4662 
4663 		if (check_finish_init) {
4664 			ASSERT(i > 0);
4665 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4666 			    map_tag[i - 1], cause);
4667 			check_finish_init = 0;
4668 		}
4669 
4670 		/* get a pointer to this map entry */
4671 		map_entry = &(devlist[i]);
4672 
4673 		/*
4674 		 * Check for the duplicate map entry flag. If we have marked
4675 		 * this entry as a duplicate we skip it since the correct
4676 		 * (perhaps even same) state change will be encountered
4677 		 * later in the list.
4678 		 */
4679 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4680 			continue;
4681 		}
4682 
4683 		/* get ptr to this map entry in our port's list (if any) */
4684 		ptgt = fcp_lookup_target(pptr,
4685 		    (uchar_t *)&(map_entry->map_pwwn));
4686 
4687 		if (ptgt) {
4688 			/*
4689 			 * This device was already known.  The field
4690 			 * tgt_aux_state is reset (was probably set to
4691 			 * FCP_TGT_TAGGED previously in this routine).
4692 			 */
4693 			ptgt->tgt_aux_state = 0;
4694 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4695 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4696 			    "handle_devices: map did/state/type/flags = "
4697 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4698 			    "tgt_state=%d",
4699 			    map_entry->map_did.port_id, map_entry->map_state,
4700 			    map_entry->map_type, map_entry->map_flags,
4701 			    ptgt->tgt_d_id, ptgt->tgt_state);
4702 		}
4703 
4704 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4705 		    map_entry->map_type == PORT_DEVICE_NEW ||
4706 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4707 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4708 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4709 			    "map_type=%x, did = %x",
4710 			    map_entry->map_type,
4711 			    map_entry->map_did.port_id);
4712 		}
4713 
4714 		switch (map_entry->map_type) {
4715 		case PORT_DEVICE_NOCHANGE:
4716 		case PORT_DEVICE_USER_CREATE:
4717 		case PORT_DEVICE_USER_LOGIN:
4718 		case PORT_DEVICE_NEW:
4719 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4720 
4721 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4722 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4723 			    cause) == TRUE) {
4724 
4725 				FCP_TGT_TRACE(ptgt, map_tag[i],
4726 				    FCP_TGT_TRACE_2);
4727 				check_finish_init++;
4728 			}
4729 			break;
4730 
4731 		case PORT_DEVICE_OLD:
4732 			if (ptgt != NULL) {
4733 				FCP_TGT_TRACE(ptgt, map_tag[i],
4734 				    FCP_TGT_TRACE_3);
4735 
4736 				mutex_enter(&ptgt->tgt_mutex);
4737 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4738 					/*
4739 					 * Must do an in-line wait for I/Os
4740 					 * to get drained
4741 					 */
4742 					mutex_exit(&ptgt->tgt_mutex);
4743 					mutex_exit(&pptr->port_mutex);
4744 
4745 					mutex_enter(&ptgt->tgt_mutex);
4746 					while (ptgt->tgt_ipkt_cnt ||
4747 					    fcp_outstanding_lun_cmds(ptgt)
4748 					    == FC_SUCCESS) {
4749 						mutex_exit(&ptgt->tgt_mutex);
4750 						delay(drv_usectohz(1000000));
4751 						mutex_enter(&ptgt->tgt_mutex);
4752 					}
4753 					mutex_exit(&ptgt->tgt_mutex);
4754 
4755 					mutex_enter(&pptr->port_mutex);
4756 					mutex_enter(&ptgt->tgt_mutex);
4757 
4758 					(void) fcp_offline_target(pptr, ptgt,
4759 					    link_cnt, map_tag[i], 0, 0);
4760 				}
4761 				mutex_exit(&ptgt->tgt_mutex);
4762 			}
4763 			check_finish_init++;
4764 			break;
4765 
4766 		case PORT_DEVICE_USER_DELETE:
4767 		case PORT_DEVICE_USER_LOGOUT:
4768 			if (ptgt != NULL) {
4769 				FCP_TGT_TRACE(ptgt, map_tag[i],
4770 				    FCP_TGT_TRACE_4);
4771 
4772 				mutex_enter(&ptgt->tgt_mutex);
4773 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4774 					(void) fcp_offline_target(pptr, ptgt,
4775 					    link_cnt, map_tag[i], 1, 0);
4776 				}
4777 				mutex_exit(&ptgt->tgt_mutex);
4778 			}
4779 			check_finish_init++;
4780 			break;
4781 
4782 		case PORT_DEVICE_CHANGED:
4783 			if (ptgt != NULL) {
4784 				FCP_TGT_TRACE(ptgt, map_tag[i],
4785 				    FCP_TGT_TRACE_5);
4786 
4787 				if (fcp_device_changed(pptr, ptgt,
4788 				    map_entry, link_cnt, map_tag[i],
4789 				    cause) == TRUE) {
4790 					check_finish_init++;
4791 				}
4792 			} else {
4793 				if (fcp_handle_mapflags(pptr, ptgt,
4794 				    map_entry, link_cnt, 0, cause) == TRUE) {
4795 					check_finish_init++;
4796 				}
4797 			}
4798 			break;
4799 
4800 		default:
4801 			fcp_log(CE_WARN, pptr->port_dip,
4802 			    "!Invalid map_type=0x%x", map_entry->map_type);
4803 			check_finish_init++;
4804 			break;
4805 		}
4806 	}
4807 
4808 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4809 		ASSERT(i > 0);
4810 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4811 		    map_tag[i-1], cause);
4812 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4813 		fcp_offline_all(pptr, link_cnt, cause);
4814 	}
4815 }
4816 
4817 /*
4818  *     Function: fcp_handle_mapflags
4819  *
4820  *  Description: This function creates a target structure if the ptgt passed
4821  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4822  *		 into the target yet or the PRLI if we are logged into the
4823  *		 target already.  The rest of the treatment is done in the
4824  *		 callbacks of the PLOGI or PRLI.
4825  *
4826  *     Argument: *pptr		FCP Port structure.
4827  *		 *ptgt		Target structure.
4828  *		 *map_entry	Array of fc_portmap_t structures.
4829  *		 link_cnt	Link state count.
4830  *		 tgt_cnt	Target state count.
4831  *		 cause		What caused this function to be called.
4832  *
4833  * Return Value: TRUE	Failed
4834  *		 FALSE	Succeeded
4835  *
4836  *	  Notes: pptr->port_mutex must be owned.
4837  */
4838 static int
4839 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4840     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4841 {
4842 	int			lcount;
4843 	int			tcount;
4844 	int			ret = TRUE;
4845 	int			alloc;
4846 	struct fcp_ipkt	*icmd;
4847 	struct fcp_lun	*pseq_lun = NULL;
4848 	uchar_t			opcode;
4849 	int			valid_ptgt_was_passed = FALSE;
4850 
4851 	ASSERT(mutex_owned(&pptr->port_mutex));
4852 
4853 	/*
4854 	 * This case is possible where the FCTL has come up and done discovery
4855 	 * before FCP was loaded and attached. FCTL would have discovered the
4856 	 * devices and later the ULP came online. In this case ULP's would get
4857 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4858 	 */
4859 	if (ptgt == NULL) {
4860 		/* don't already have a target */
4861 		mutex_exit(&pptr->port_mutex);
4862 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4863 		mutex_enter(&pptr->port_mutex);
4864 
4865 		if (ptgt == NULL) {
4866 			fcp_log(CE_WARN, pptr->port_dip,
4867 			    "!FC target allocation failed");
4868 			return (ret);
4869 		}
4870 		mutex_enter(&ptgt->tgt_mutex);
4871 		ptgt->tgt_statec_cause = cause;
4872 		ptgt->tgt_tmp_cnt = 1;
4873 		mutex_exit(&ptgt->tgt_mutex);
4874 	} else {
4875 		valid_ptgt_was_passed = TRUE;
4876 	}
4877 
4878 	/*
4879 	 * Copy in the target parameters
4880 	 */
4881 	mutex_enter(&ptgt->tgt_mutex);
4882 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4883 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4884 	ptgt->tgt_pd_handle = map_entry->map_pd;
4885 	ptgt->tgt_fca_dev = NULL;
4886 
4887 	/* Copy port and node WWNs */
4888 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4889 	    FC_WWN_SIZE);
4890 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4891 	    FC_WWN_SIZE);
4892 
4893 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4894 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4895 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4896 	    valid_ptgt_was_passed) {
4897 		/*
4898 		 * determine if there are any tape LUNs on this target
4899 		 */
4900 		for (pseq_lun = ptgt->tgt_lun;
4901 		    pseq_lun != NULL;
4902 		    pseq_lun = pseq_lun->lun_next) {
4903 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4904 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4905 				fcp_update_tgt_state(ptgt, FCP_RESET,
4906 				    FCP_LUN_MARK);
4907 				mutex_exit(&ptgt->tgt_mutex);
4908 				return (ret);
4909 			}
4910 		}
4911 	}
4912 
4913 	/*
4914 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4915 	 * was never specifically initialized but zeroed out which means
4916 	 * FCP_TGT_NODE_NONE.
4917 	 */
4918 	switch (ptgt->tgt_node_state) {
4919 	case FCP_TGT_NODE_NONE:
4920 	case FCP_TGT_NODE_ON_DEMAND:
4921 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4922 		    !fcp_enable_auto_configuration &&
4923 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4924 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4925 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4926 		    fcp_enable_auto_configuration &&
4927 		    (ptgt->tgt_manual_config_only == 1) &&
4928 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4929 			/*
4930 			 * If auto configuration is set and
4931 			 * the tgt_manual_config_only flag is set then
4932 			 * we only want the user to be able to change
4933 			 * the state through create_on_demand.
4934 			 */
4935 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4936 		} else {
4937 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4938 		}
4939 		break;
4940 
4941 	case FCP_TGT_NODE_PRESENT:
4942 		break;
4943 	}
4944 	/*
4945 	 * If we are booting from a fabric device, make sure we
4946 	 * mark the node state appropriately for this target to be
4947 	 * enumerated
4948 	 */
4949 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
4950 		if (bcmp((caddr_t)pptr->port_boot_wwn,
4951 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
4952 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
4953 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
4954 		}
4955 	}
4956 	mutex_exit(&ptgt->tgt_mutex);
4957 
4958 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4959 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4960 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
4961 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
4962 	    map_entry->map_rscn_info.ulp_rscn_count);
4963 
4964 	mutex_enter(&ptgt->tgt_mutex);
4965 
4966 	/*
4967 	 * Reset target OFFLINE state and mark the target BUSY
4968 	 */
4969 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
4970 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
4971 
4972 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
4973 	lcount = link_cnt;
4974 
4975 	mutex_exit(&ptgt->tgt_mutex);
4976 	mutex_exit(&pptr->port_mutex);
4977 
4978 	/*
4979 	 * if we are already logged in, then we do a PRLI, else
4980 	 * we do a PLOGI first (to get logged in)
4981 	 *
4982 	 * We will not check if we are the PLOGI initiator
4983 	 */
4984 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
4985 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
4986 
4987 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
4988 
4989 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
4990 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
4991 
4992 	if (icmd == NULL) {
4993 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
4994 		/*
4995 		 * We've exited port_mutex before calling fcp_icmd_alloc,
4996 		 * we need to make sure we reacquire it before returning.
4997 		 */
4998 		mutex_enter(&pptr->port_mutex);
4999 		return (FALSE);
5000 	}
5001 
5002 	/* TRUE is only returned while target is intended skipped */
5003 	ret = FALSE;
5004 	/* discover info about this target */
5005 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5006 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5007 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5008 	} else {
5009 		fcp_icmd_free(pptr, icmd);
5010 		ret = TRUE;
5011 	}
5012 	mutex_enter(&pptr->port_mutex);
5013 
5014 	return (ret);
5015 }
5016 
5017 /*
5018  *     Function: fcp_send_els
5019  *
5020  *  Description: Sends an ELS to the target specified by the caller.  Supports
5021  *		 PLOGI and PRLI.
5022  *
5023  *     Argument: *pptr		Fcp port.
5024  *		 *ptgt		Target to send the ELS to.
5025  *		 *icmd		Internal packet
5026  *		 opcode		ELS opcode
5027  *		 lcount		Link state change counter
5028  *		 tcount		Target state change counter
5029  *		 cause		What caused the call
5030  *
5031  * Return Value: DDI_SUCCESS
5032  *		 Others
5033  */
5034 static int
5035 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5036     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5037 {
5038 	fc_packet_t		*fpkt;
5039 	fc_frame_hdr_t		*hp;
5040 	int			internal = 0;
5041 	int			alloc;
5042 	int			cmd_len;
5043 	int			resp_len;
5044 	int			res = DDI_FAILURE; /* default result */
5045 	int			rval = DDI_FAILURE;
5046 
5047 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5048 	ASSERT(ptgt->tgt_port == pptr);
5049 
5050 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5051 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5052 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5053 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5054 
5055 	if (opcode == LA_ELS_PLOGI) {
5056 		cmd_len = sizeof (la_els_logi_t);
5057 		resp_len = sizeof (la_els_logi_t);
5058 	} else {
5059 		ASSERT(opcode == LA_ELS_PRLI);
5060 		cmd_len = sizeof (la_els_prli_t);
5061 		resp_len = sizeof (la_els_prli_t);
5062 	}
5063 
5064 	if (icmd == NULL) {
5065 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5066 		    sizeof (la_els_prli_t));
5067 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5068 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5069 		if (icmd == NULL) {
5070 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5071 			return (res);
5072 		}
5073 		internal++;
5074 	}
5075 	fpkt = icmd->ipkt_fpkt;
5076 
5077 	fpkt->pkt_cmdlen = cmd_len;
5078 	fpkt->pkt_rsplen = resp_len;
5079 	fpkt->pkt_datalen = 0;
5080 	icmd->ipkt_retries = 0;
5081 
5082 	/* fill in fpkt info */
5083 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5084 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5085 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5086 
5087 	/* get ptr to frame hdr in fpkt */
5088 	hp = &fpkt->pkt_cmd_fhdr;
5089 
5090 	/*
5091 	 * fill in frame hdr
5092 	 */
5093 	hp->r_ctl = R_CTL_ELS_REQ;
5094 	hp->s_id = pptr->port_id;	/* source ID */
5095 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5096 	hp->type = FC_TYPE_EXTENDED_LS;
5097 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5098 	hp->seq_id = 0;
5099 	hp->rsvd = 0;
5100 	hp->df_ctl  = 0;
5101 	hp->seq_cnt = 0;
5102 	hp->ox_id = 0xffff;		/* i.e. none */
5103 	hp->rx_id = 0xffff;		/* i.e. none */
5104 	hp->ro = 0;
5105 
5106 	/*
5107 	 * at this point we have a filled in cmd pkt
5108 	 *
5109 	 * fill in the respective info, then use the transport to send
5110 	 * the packet
5111 	 *
5112 	 * for a PLOGI call fc_ulp_login(), and
5113 	 * for a PRLI call fc_ulp_issue_els()
5114 	 */
5115 	switch (opcode) {
5116 	case LA_ELS_PLOGI: {
5117 		struct la_els_logi logi;
5118 
5119 		bzero(&logi, sizeof (struct la_els_logi));
5120 
5121 		hp = &fpkt->pkt_cmd_fhdr;
5122 		hp->r_ctl = R_CTL_ELS_REQ;
5123 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5124 		logi.ls_code.mbz = 0;
5125 
5126 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5127 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5128 
5129 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5130 
5131 		mutex_enter(&pptr->port_mutex);
5132 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5133 
5134 			mutex_exit(&pptr->port_mutex);
5135 
5136 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5137 			if (rval == FC_SUCCESS) {
5138 				res = DDI_SUCCESS;
5139 				break;
5140 			}
5141 
5142 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5143 
5144 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5145 			    rval, "PLOGI");
5146 		} else {
5147 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5148 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5149 			    "fcp_send_els1: state change occured"
5150 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5151 			mutex_exit(&pptr->port_mutex);
5152 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5153 		}
5154 		break;
5155 	}
5156 
5157 	case LA_ELS_PRLI: {
5158 		struct la_els_prli	prli;
5159 		struct fcp_prli		*fprli;
5160 
5161 		bzero(&prli, sizeof (struct la_els_prli));
5162 
5163 		hp = &fpkt->pkt_cmd_fhdr;
5164 		hp->r_ctl = R_CTL_ELS_REQ;
5165 
5166 		/* fill in PRLI cmd ELS fields */
5167 		prli.ls_code = LA_ELS_PRLI;
5168 		prli.page_length = 0x10;	/* huh? */
5169 		prli.payload_length = sizeof (struct la_els_prli);
5170 
5171 		icmd->ipkt_opcode = LA_ELS_PRLI;
5172 
5173 		/* get ptr to PRLI service params */
5174 		fprli = (struct fcp_prli *)prli.service_params;
5175 
5176 		/* fill in service params */
5177 		fprli->type = 0x08;
5178 		fprli->resvd1 = 0;
5179 		fprli->orig_process_assoc_valid = 0;
5180 		fprli->resp_process_assoc_valid = 0;
5181 		fprli->establish_image_pair = 1;
5182 		fprli->resvd2 = 0;
5183 		fprli->resvd3 = 0;
5184 		fprli->obsolete_1 = 0;
5185 		fprli->obsolete_2 = 0;
5186 		fprli->data_overlay_allowed = 0;
5187 		fprli->initiator_fn = 1;
5188 		fprli->confirmed_compl_allowed = 1;
5189 
5190 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5191 			fprli->target_fn = 1;
5192 		} else {
5193 			fprli->target_fn = 0;
5194 		}
5195 
5196 		fprli->retry = 1;
5197 		fprli->read_xfer_rdy_disabled = 1;
5198 		fprli->write_xfer_rdy_disabled = 0;
5199 
5200 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5201 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5202 
5203 		/* issue the PRLI request */
5204 
5205 		mutex_enter(&pptr->port_mutex);
5206 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5207 
5208 			mutex_exit(&pptr->port_mutex);
5209 
5210 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5211 			if (rval == FC_SUCCESS) {
5212 				res = DDI_SUCCESS;
5213 				break;
5214 			}
5215 
5216 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5217 
5218 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5219 			    rval, "PRLI");
5220 		} else {
5221 			mutex_exit(&pptr->port_mutex);
5222 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5223 		}
5224 		break;
5225 	}
5226 
5227 	default:
5228 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5229 		break;
5230 	}
5231 
5232 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5233 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5234 	    "fcp_send_els: returning %d", res);
5235 
5236 	if (res != DDI_SUCCESS) {
5237 		if (internal) {
5238 			fcp_icmd_free(pptr, icmd);
5239 		}
5240 	}
5241 
5242 	return (res);
5243 }
5244 
5245 
5246 /*
5247  * called internally update the state of all of the tgts and each LUN
5248  * for this port (i.e. each target  known to be attached to this port)
5249  * if they are not already offline
5250  *
5251  * must be called with the port mutex owned
5252  *
5253  * acquires and releases the target mutexes for each target attached
5254  * to this port
5255  */
5256 void
5257 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5258 {
5259 	int i;
5260 	struct fcp_tgt *ptgt;
5261 
5262 	ASSERT(mutex_owned(&pptr->port_mutex));
5263 
5264 	for (i = 0; i < FCP_NUM_HASH; i++) {
5265 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5266 		    ptgt = ptgt->tgt_next) {
5267 			mutex_enter(&ptgt->tgt_mutex);
5268 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5269 			ptgt->tgt_change_cnt++;
5270 			ptgt->tgt_statec_cause = cause;
5271 			ptgt->tgt_tmp_cnt = 1;
5272 			ptgt->tgt_done = 0;
5273 			mutex_exit(&ptgt->tgt_mutex);
5274 		}
5275 	}
5276 }
5277 
5278 
5279 static void
5280 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5281 {
5282 	int i;
5283 	int ndevs;
5284 	struct fcp_tgt *ptgt;
5285 
5286 	ASSERT(mutex_owned(&pptr->port_mutex));
5287 
5288 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5289 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5290 		    ptgt = ptgt->tgt_next) {
5291 			ndevs++;
5292 		}
5293 	}
5294 
5295 	if (ndevs == 0) {
5296 		return;
5297 	}
5298 	pptr->port_tmp_cnt = ndevs;
5299 
5300 	for (i = 0; i < FCP_NUM_HASH; i++) {
5301 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5302 		    ptgt = ptgt->tgt_next) {
5303 			(void) fcp_call_finish_init_held(pptr, ptgt,
5304 			    lcount, ptgt->tgt_change_cnt, cause);
5305 		}
5306 	}
5307 }
5308 
5309 /*
5310  *     Function: fcp_update_tgt_state
5311  *
5312  *  Description: This function updates the field tgt_state of a target.	 That
5313  *		 field is a bitmap and which bit can be set or reset
5314  *		 individually.	The action applied to the target state is also
5315  *		 applied to all the LUNs belonging to the target (provided the
5316  *		 LUN is not offline).  A side effect of applying the state
5317  *		 modification to the target and the LUNs is the field tgt_trace
5318  *		 of the target and lun_trace of the LUNs is set to zero.
5319  *
5320  *
5321  *     Argument: *ptgt	Target structure.
5322  *		 flag	Flag indication what action to apply (set/reset).
5323  *		 state	State bits to update.
5324  *
5325  * Return Value: None
5326  *
5327  *	Context: Interrupt, Kernel or User context.
5328  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5329  *		 calling this function.
5330  */
5331 void
5332 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5333 {
5334 	struct fcp_lun *plun;
5335 
5336 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5337 
5338 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5339 		/* The target is not offline. */
5340 		if (flag == FCP_SET) {
5341 			ptgt->tgt_state |= state;
5342 			ptgt->tgt_trace = 0;
5343 		} else {
5344 			ptgt->tgt_state &= ~state;
5345 		}
5346 
5347 		for (plun = ptgt->tgt_lun; plun != NULL;
5348 		    plun = plun->lun_next) {
5349 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5350 				/* The LUN is not offline. */
5351 				if (flag == FCP_SET) {
5352 					plun->lun_state |= state;
5353 					plun->lun_trace = 0;
5354 				} else {
5355 					plun->lun_state &= ~state;
5356 				}
5357 			}
5358 		}
5359 	}
5360 }
5361 
5362 /*
5363  *     Function: fcp_update_tgt_state
5364  *
5365  *  Description: This function updates the field lun_state of a LUN.  That
5366  *		 field is a bitmap and which bit can be set or reset
5367  *		 individually.
5368  *
5369  *     Argument: *plun	LUN structure.
5370  *		 flag	Flag indication what action to apply (set/reset).
5371  *		 state	State bits to update.
5372  *
5373  * Return Value: None
5374  *
5375  *	Context: Interrupt, Kernel or User context.
5376  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5377  *		 calling this function.
5378  */
5379 void
5380 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5381 {
5382 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5383 
5384 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5385 
5386 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5387 		if (flag == FCP_SET) {
5388 			plun->lun_state |= state;
5389 		} else {
5390 			plun->lun_state &= ~state;
5391 		}
5392 	}
5393 }
5394 
5395 /*
5396  *     Function: fcp_get_port
5397  *
5398  *  Description: This function returns the fcp_port structure from the opaque
5399  *		 handle passed by the caller.  That opaque handle is the handle
5400  *		 used by fp/fctl to identify a particular local port.  That
5401  *		 handle has been stored in the corresponding fcp_port
5402  *		 structure.  This function is going to walk the global list of
5403  *		 fcp_port structures till one has a port_fp_handle that matches
5404  *		 the handle passed by the caller.  This function enters the
5405  *		 mutex fcp_global_mutex while walking the global list and then
5406  *		 releases it.
5407  *
5408  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5409  *				particular port.
5410  *
5411  * Return Value: NULL		Not found.
5412  *		 Not NULL	Pointer to the fcp_port structure.
5413  *
5414  *	Context: Interrupt, Kernel or User context.
5415  */
5416 static struct fcp_port *
5417 fcp_get_port(opaque_t port_handle)
5418 {
5419 	struct fcp_port *pptr;
5420 
5421 	ASSERT(port_handle != NULL);
5422 
5423 	mutex_enter(&fcp_global_mutex);
5424 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5425 		if (pptr->port_fp_handle == port_handle) {
5426 			break;
5427 		}
5428 	}
5429 	mutex_exit(&fcp_global_mutex);
5430 
5431 	return (pptr);
5432 }
5433 
5434 
5435 static void
5436 fcp_unsol_callback(fc_packet_t *fpkt)
5437 {
5438 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5439 	struct fcp_port *pptr = icmd->ipkt_port;
5440 
5441 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5442 		caddr_t state, reason, action, expln;
5443 
5444 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5445 		    &action, &expln);
5446 
5447 		fcp_log(CE_WARN, pptr->port_dip,
5448 		    "!couldn't post response to unsolicited request: "
5449 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5450 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5451 		    fpkt->pkt_cmd_fhdr.rx_id);
5452 	}
5453 	fcp_icmd_free(pptr, icmd);
5454 }
5455 
5456 
5457 /*
5458  * Perform general purpose preparation of a response to an unsolicited request
5459  */
5460 static void
5461 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5462     uchar_t r_ctl, uchar_t type)
5463 {
5464 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5465 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5466 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5467 	pkt->pkt_cmd_fhdr.type = type;
5468 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5469 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5470 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5471 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5472 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5473 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5474 	pkt->pkt_cmd_fhdr.ro = 0;
5475 	pkt->pkt_cmd_fhdr.rsvd = 0;
5476 	pkt->pkt_comp = fcp_unsol_callback;
5477 	pkt->pkt_pd = NULL;
5478 }
5479 
5480 
5481 /*ARGSUSED*/
5482 static int
5483 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5484 {
5485 	fc_packet_t		*fpkt;
5486 	struct la_els_prli	prli;
5487 	struct fcp_prli		*fprli;
5488 	struct fcp_ipkt	*icmd;
5489 	struct la_els_prli	*from;
5490 	struct fcp_prli		*orig;
5491 	struct fcp_tgt	*ptgt;
5492 	int			tcount = 0;
5493 	int			lcount;
5494 
5495 	from = (struct la_els_prli *)buf->ub_buffer;
5496 	orig = (struct fcp_prli *)from->service_params;
5497 
5498 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5499 	    NULL) {
5500 		mutex_enter(&ptgt->tgt_mutex);
5501 		tcount = ptgt->tgt_change_cnt;
5502 		mutex_exit(&ptgt->tgt_mutex);
5503 	}
5504 	mutex_enter(&pptr->port_mutex);
5505 	lcount = pptr->port_link_cnt;
5506 	mutex_exit(&pptr->port_mutex);
5507 
5508 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5509 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5510 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5511 		return (FC_FAILURE);
5512 	}
5513 	fpkt = icmd->ipkt_fpkt;
5514 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5515 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5516 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5517 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5518 	fpkt->pkt_rsplen = 0;
5519 	fpkt->pkt_datalen = 0;
5520 
5521 	icmd->ipkt_opcode = LA_ELS_PRLI;
5522 
5523 	bzero(&prli, sizeof (struct la_els_prli));
5524 	fprli = (struct fcp_prli *)prli.service_params;
5525 	prli.ls_code = LA_ELS_ACC;
5526 	prli.page_length = 0x10;
5527 	prli.payload_length = sizeof (struct la_els_prli);
5528 
5529 	/* fill in service params */
5530 	fprli->type = 0x08;
5531 	fprli->resvd1 = 0;
5532 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5533 	fprli->orig_process_associator = orig->orig_process_associator;
5534 	fprli->resp_process_assoc_valid = 0;
5535 	fprli->establish_image_pair = 1;
5536 	fprli->resvd2 = 0;
5537 	fprli->resvd3 = 0;
5538 	fprli->obsolete_1 = 0;
5539 	fprli->obsolete_2 = 0;
5540 	fprli->data_overlay_allowed = 0;
5541 	fprli->initiator_fn = 1;
5542 	fprli->confirmed_compl_allowed = 1;
5543 
5544 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5545 		fprli->target_fn = 1;
5546 	} else {
5547 		fprli->target_fn = 0;
5548 	}
5549 
5550 	fprli->retry = 1;
5551 	fprli->read_xfer_rdy_disabled = 1;
5552 	fprli->write_xfer_rdy_disabled = 0;
5553 
5554 	/* save the unsol prli payload first */
5555 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5556 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5557 
5558 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5559 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5560 
5561 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5562 
5563 	mutex_enter(&pptr->port_mutex);
5564 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5565 		int rval;
5566 		mutex_exit(&pptr->port_mutex);
5567 
5568 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5569 		    FC_SUCCESS) {
5570 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5571 				fcp_queue_ipkt(pptr, fpkt);
5572 				return (FC_SUCCESS);
5573 			}
5574 			/* Let it timeout */
5575 			fcp_icmd_free(pptr, icmd);
5576 			return (FC_FAILURE);
5577 		}
5578 	} else {
5579 		mutex_exit(&pptr->port_mutex);
5580 		fcp_icmd_free(pptr, icmd);
5581 		return (FC_FAILURE);
5582 	}
5583 
5584 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5585 
5586 	return (FC_SUCCESS);
5587 }
5588 
5589 /*
5590  *     Function: fcp_icmd_alloc
5591  *
5592  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5593  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5594  *		 modified by the caller (such as fcp_send_scsi).  The
5595  *		 structure is also tied to the state of the line and of the
5596  *		 target at a particular time.  That link is established by
5597  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5598  *		 and tcount which came respectively from pptr->link_cnt and
5599  *		 ptgt->tgt_change_cnt.
5600  *
5601  *     Argument: *pptr		Fcp port.
5602  *		 *ptgt		Target (destination of the command).
5603  *		 cmd_len	Length of the command.
5604  *		 resp_len	Length of the expected response.
5605  *		 data_len	Length of the data.
5606  *		 nodma		Indicates weither the command and response.
5607  *				will be transfer through DMA or not.
5608  *		 lcount		Link state change counter.
5609  *		 tcount		Target state change counter.
5610  *		 cause		Reason that lead to this call.
5611  *
5612  * Return Value: NULL		Failed.
5613  *		 Not NULL	Internal packet address.
5614  */
5615 static struct fcp_ipkt *
5616 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5617     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5618     uint32_t rscn_count)
5619 {
5620 	int			dma_setup = 0;
5621 	fc_packet_t		*fpkt;
5622 	struct fcp_ipkt	*icmd = NULL;
5623 
5624 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5625 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5626 	    KM_NOSLEEP);
5627 	if (icmd == NULL) {
5628 		fcp_log(CE_WARN, pptr->port_dip,
5629 		    "!internal packet allocation failed");
5630 		return (NULL);
5631 	}
5632 
5633 	/*
5634 	 * initialize the allocated packet
5635 	 */
5636 	icmd->ipkt_nodma = nodma;
5637 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5638 	icmd->ipkt_lun = NULL;
5639 
5640 	icmd->ipkt_link_cnt = lcount;
5641 	icmd->ipkt_change_cnt = tcount;
5642 	icmd->ipkt_cause = cause;
5643 
5644 	mutex_enter(&pptr->port_mutex);
5645 	icmd->ipkt_port = pptr;
5646 	mutex_exit(&pptr->port_mutex);
5647 
5648 	/* keep track of amt of data to be sent in pkt */
5649 	icmd->ipkt_cmdlen = cmd_len;
5650 	icmd->ipkt_resplen = resp_len;
5651 	icmd->ipkt_datalen = data_len;
5652 
5653 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5654 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5655 
5656 	/* set pkt's private ptr to point to cmd pkt */
5657 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5658 
5659 	/* set FCA private ptr to memory just beyond */
5660 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5661 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5662 	    pptr->port_dmacookie_sz);
5663 
5664 	/* get ptr to fpkt substruct and fill it in */
5665 	fpkt = icmd->ipkt_fpkt;
5666 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5667 	    sizeof (struct fcp_ipkt));
5668 
5669 	if (ptgt != NULL) {
5670 		icmd->ipkt_tgt = ptgt;
5671 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5672 	}
5673 
5674 	fpkt->pkt_comp = fcp_icmd_callback;
5675 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5676 	fpkt->pkt_cmdlen = cmd_len;
5677 	fpkt->pkt_rsplen = resp_len;
5678 	fpkt->pkt_datalen = data_len;
5679 
5680 	/*
5681 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5682 	 * rscn_count as fcp knows down to the transport. If a valid count was
5683 	 * passed into this function, we allocate memory to actually pass down
5684 	 * this info.
5685 	 *
5686 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5687 	 * basically mean that fcp will not be able to help transport
5688 	 * distinguish if a new RSCN has come after fcp was last informed about
5689 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5690 	 * 5068068 where the device might end up going offline in case of RSCN
5691 	 * storms.
5692 	 */
5693 	fpkt->pkt_ulp_rscn_infop = NULL;
5694 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5695 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5696 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5697 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5698 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5699 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5700 			    "Failed to alloc memory to pass rscn info");
5701 		}
5702 	}
5703 
5704 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5705 		fc_ulp_rscn_info_t	*rscnp;
5706 
5707 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5708 		rscnp->ulp_rscn_count = rscn_count;
5709 	}
5710 
5711 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5712 		goto fail;
5713 	}
5714 	dma_setup++;
5715 
5716 	/*
5717 	 * Must hold target mutex across setting of pkt_pd and call to
5718 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5719 	 * away while we're not looking.
5720 	 */
5721 	if (ptgt != NULL) {
5722 		mutex_enter(&ptgt->tgt_mutex);
5723 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5724 
5725 		/* ask transport to do its initialization on this pkt */
5726 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5727 		    != FC_SUCCESS) {
5728 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5729 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5730 			    "fc_ulp_init_packet failed");
5731 			mutex_exit(&ptgt->tgt_mutex);
5732 			goto fail;
5733 		}
5734 		mutex_exit(&ptgt->tgt_mutex);
5735 	} else {
5736 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5737 		    != FC_SUCCESS) {
5738 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5739 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5740 			    "fc_ulp_init_packet failed");
5741 			goto fail;
5742 		}
5743 	}
5744 
5745 	mutex_enter(&pptr->port_mutex);
5746 	if (pptr->port_state & (FCP_STATE_DETACHING |
5747 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5748 		int rval;
5749 
5750 		mutex_exit(&pptr->port_mutex);
5751 
5752 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5753 		ASSERT(rval == FC_SUCCESS);
5754 
5755 		goto fail;
5756 	}
5757 
5758 	if (ptgt != NULL) {
5759 		mutex_enter(&ptgt->tgt_mutex);
5760 		ptgt->tgt_ipkt_cnt++;
5761 		mutex_exit(&ptgt->tgt_mutex);
5762 	}
5763 
5764 	pptr->port_ipkt_cnt++;
5765 
5766 	mutex_exit(&pptr->port_mutex);
5767 
5768 	return (icmd);
5769 
5770 fail:
5771 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5772 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5773 		    sizeof (fc_ulp_rscn_info_t));
5774 		fpkt->pkt_ulp_rscn_infop = NULL;
5775 	}
5776 
5777 	if (dma_setup) {
5778 		fcp_free_dma(pptr, icmd);
5779 	}
5780 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5781 	    (size_t)pptr->port_dmacookie_sz);
5782 
5783 	return (NULL);
5784 }
5785 
5786 /*
5787  *     Function: fcp_icmd_free
5788  *
5789  *  Description: Frees the internal command passed by the caller.
5790  *
5791  *     Argument: *pptr		Fcp port.
5792  *		 *icmd		Internal packet to free.
5793  *
5794  * Return Value: None
5795  */
5796 static void
5797 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5798 {
5799 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5800 
5801 	/* Let the underlying layers do their cleanup. */
5802 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5803 	    icmd->ipkt_fpkt);
5804 
5805 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5806 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5807 		    sizeof (fc_ulp_rscn_info_t));
5808 	}
5809 
5810 	fcp_free_dma(pptr, icmd);
5811 
5812 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5813 	    (size_t)pptr->port_dmacookie_sz);
5814 
5815 	mutex_enter(&pptr->port_mutex);
5816 
5817 	if (ptgt) {
5818 		mutex_enter(&ptgt->tgt_mutex);
5819 		ptgt->tgt_ipkt_cnt--;
5820 		mutex_exit(&ptgt->tgt_mutex);
5821 	}
5822 
5823 	pptr->port_ipkt_cnt--;
5824 	mutex_exit(&pptr->port_mutex);
5825 }
5826 
5827 /*
5828  *     Function: fcp_alloc_dma
5829  *
5830  *  Description: Allocated the DMA resources required for the internal
5831  *		 packet.
5832  *
5833  *     Argument: *pptr	FCP port.
5834  *		 *icmd	Internal FCP packet.
5835  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5836  *		 flags	Allocation flags (Sleep or NoSleep).
5837  *
5838  * Return Value: FC_SUCCESS
5839  *		 FC_NOMEM
5840  */
5841 static int
5842 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5843     int nodma, int flags)
5844 {
5845 	int		rval;
5846 	size_t		real_size;
5847 	uint_t		ccount;
5848 	int		bound = 0;
5849 	int		cmd_resp = 0;
5850 	fc_packet_t	*fpkt;
5851 	ddi_dma_cookie_t	pkt_data_cookie;
5852 	ddi_dma_cookie_t	*cp;
5853 	uint32_t		cnt;
5854 
5855 	fpkt = &icmd->ipkt_fc_packet;
5856 
5857 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5858 	    fpkt->pkt_resp_dma == NULL);
5859 
5860 	icmd->ipkt_nodma = nodma;
5861 
5862 	if (nodma) {
5863 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5864 		if (fpkt->pkt_cmd == NULL) {
5865 			goto fail;
5866 		}
5867 
5868 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5869 		if (fpkt->pkt_resp == NULL) {
5870 			goto fail;
5871 		}
5872 	} else {
5873 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5874 
5875 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5876 		if (rval == FC_FAILURE) {
5877 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5878 			    fpkt->pkt_resp_dma == NULL);
5879 			goto fail;
5880 		}
5881 		cmd_resp++;
5882 	}
5883 
5884 	if (fpkt->pkt_datalen != 0) {
5885 		/*
5886 		 * set up DMA handle and memory for the data in this packet
5887 		 */
5888 		if (ddi_dma_alloc_handle(pptr->port_dip,
5889 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5890 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5891 			goto fail;
5892 		}
5893 
5894 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5895 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5896 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5897 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5898 			goto fail;
5899 		}
5900 
5901 		/* was DMA mem size gotten < size asked for/needed ?? */
5902 		if (real_size < fpkt->pkt_datalen) {
5903 			goto fail;
5904 		}
5905 
5906 		/* bind DMA address and handle together */
5907 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5908 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5909 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5910 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5911 			goto fail;
5912 		}
5913 		bound++;
5914 
5915 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5916 			goto fail;
5917 		}
5918 
5919 		fpkt->pkt_data_cookie_cnt = ccount;
5920 
5921 		cp = fpkt->pkt_data_cookie;
5922 		*cp = pkt_data_cookie;
5923 		cp++;
5924 
5925 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5926 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5927 			    &pkt_data_cookie);
5928 			*cp = pkt_data_cookie;
5929 		}
5930 
5931 	}
5932 
5933 	return (FC_SUCCESS);
5934 
5935 fail:
5936 	if (bound) {
5937 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5938 	}
5939 
5940 	if (fpkt->pkt_data_dma) {
5941 		if (fpkt->pkt_data) {
5942 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5943 		}
5944 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5945 	}
5946 
5947 	if (nodma) {
5948 		if (fpkt->pkt_cmd) {
5949 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
5950 		}
5951 		if (fpkt->pkt_resp) {
5952 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
5953 		}
5954 	} else {
5955 		if (cmd_resp) {
5956 			fcp_free_cmd_resp(pptr, fpkt);
5957 		}
5958 	}
5959 
5960 	return (FC_NOMEM);
5961 }
5962 
5963 
5964 static void
5965 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5966 {
5967 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
5968 
5969 	if (fpkt->pkt_data_dma) {
5970 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
5971 		if (fpkt->pkt_data) {
5972 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
5973 		}
5974 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
5975 	}
5976 
5977 	if (icmd->ipkt_nodma) {
5978 		if (fpkt->pkt_cmd) {
5979 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
5980 		}
5981 		if (fpkt->pkt_resp) {
5982 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
5983 		}
5984 	} else {
5985 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
5986 
5987 		fcp_free_cmd_resp(pptr, fpkt);
5988 	}
5989 }
5990 
5991 /*
5992  *     Function: fcp_lookup_target
5993  *
5994  *  Description: Finds a target given a WWN.
5995  *
5996  *     Argument: *pptr	FCP port.
5997  *		 *wwn	World Wide Name of the device to look for.
5998  *
5999  * Return Value: NULL		No target found
6000  *		 Not NULL	Target structure
6001  *
6002  *	Context: Interrupt context.
6003  *		 The mutex pptr->port_mutex must be owned.
6004  */
6005 /* ARGSUSED */
6006 static struct fcp_tgt *
6007 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6008 {
6009 	int			hash;
6010 	struct fcp_tgt	*ptgt;
6011 
6012 	ASSERT(mutex_owned(&pptr->port_mutex));
6013 
6014 	hash = FCP_HASH(wwn);
6015 
6016 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6017 	    ptgt = ptgt->tgt_next) {
6018 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6019 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6020 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6021 			break;
6022 		}
6023 	}
6024 
6025 	return (ptgt);
6026 }
6027 
6028 
6029 /*
6030  * Find target structure given a port identifier
6031  */
6032 static struct fcp_tgt *
6033 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6034 {
6035 	fc_portid_t		port_id;
6036 	la_wwn_t		pwwn;
6037 	struct fcp_tgt	*ptgt = NULL;
6038 
6039 	port_id.priv_lilp_posit = 0;
6040 	port_id.port_id = d_id;
6041 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6042 	    &pwwn) == FC_SUCCESS) {
6043 		mutex_enter(&pptr->port_mutex);
6044 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6045 		mutex_exit(&pptr->port_mutex);
6046 	}
6047 
6048 	return (ptgt);
6049 }
6050 
6051 
6052 /*
6053  * the packet completion callback routine for info cmd pkts
6054  *
6055  * this means fpkt pts to a response to either a PLOGI or a PRLI
6056  *
6057  * if there is an error an attempt is made to call a routine to resend
6058  * the command that failed
6059  */
6060 static void
6061 fcp_icmd_callback(fc_packet_t *fpkt)
6062 {
6063 	struct fcp_ipkt	*icmd;
6064 	struct fcp_port	*pptr;
6065 	struct fcp_tgt	*ptgt;
6066 	struct la_els_prli	*prli;
6067 	struct la_els_prli	prli_s;
6068 	struct fcp_prli		*fprli;
6069 	struct fcp_lun	*plun;
6070 	int		free_pkt = 1;
6071 	int		rval;
6072 	ls_code_t	resp;
6073 	uchar_t		prli_acc = 0;
6074 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6075 	int		lun0_newalloc;
6076 
6077 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6078 
6079 	/* get ptrs to the port and target structs for the cmd */
6080 	pptr = icmd->ipkt_port;
6081 	ptgt = icmd->ipkt_tgt;
6082 
6083 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6084 
6085 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6086 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6087 		    sizeof (prli_s));
6088 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6089 	}
6090 
6091 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6092 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6093 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6094 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6095 	    ptgt->tgt_d_id);
6096 
6097 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6098 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6099 
6100 		mutex_enter(&ptgt->tgt_mutex);
6101 		if (ptgt->tgt_pd_handle == NULL) {
6102 			/*
6103 			 * in a fabric environment the port device handles
6104 			 * get created only after successful LOGIN into the
6105 			 * transport, so the transport makes this port
6106 			 * device (pd) handle available in this packet, so
6107 			 * save it now
6108 			 */
6109 			ASSERT(fpkt->pkt_pd != NULL);
6110 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6111 		}
6112 		mutex_exit(&ptgt->tgt_mutex);
6113 
6114 		/* which ELS cmd is this response for ?? */
6115 		switch (icmd->ipkt_opcode) {
6116 		case LA_ELS_PLOGI:
6117 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6118 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6119 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6120 			    ptgt->tgt_d_id,
6121 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6122 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6123 
6124 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6125 			    FCP_TGT_TRACE_15);
6126 
6127 			/* Note that we are not allocating a new icmd */
6128 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6129 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6130 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6131 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6132 				    FCP_TGT_TRACE_16);
6133 				goto fail;
6134 			}
6135 			break;
6136 
6137 		case LA_ELS_PRLI:
6138 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6139 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6140 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6141 
6142 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6143 			    FCP_TGT_TRACE_17);
6144 
6145 			prli = &prli_s;
6146 
6147 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6148 			    sizeof (prli_s));
6149 
6150 			fprli = (struct fcp_prli *)prli->service_params;
6151 
6152 			mutex_enter(&ptgt->tgt_mutex);
6153 			ptgt->tgt_icap = fprli->initiator_fn;
6154 			ptgt->tgt_tcap = fprli->target_fn;
6155 			mutex_exit(&ptgt->tgt_mutex);
6156 
6157 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6158 				/*
6159 				 * this FCP device does not support target mode
6160 				 */
6161 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6162 				    FCP_TGT_TRACE_18);
6163 				goto fail;
6164 			}
6165 			if (fprli->retry == 1) {
6166 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6167 				    &ptgt->tgt_port_wwn);
6168 			}
6169 
6170 			/* target is no longer offline */
6171 			mutex_enter(&pptr->port_mutex);
6172 			mutex_enter(&ptgt->tgt_mutex);
6173 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6174 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6175 				    FCP_TGT_MARK);
6176 			} else {
6177 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6178 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6179 				    "fcp_icmd_callback,1: state change "
6180 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6181 				mutex_exit(&ptgt->tgt_mutex);
6182 				mutex_exit(&pptr->port_mutex);
6183 				goto fail;
6184 			}
6185 			mutex_exit(&ptgt->tgt_mutex);
6186 			mutex_exit(&pptr->port_mutex);
6187 
6188 			/*
6189 			 * lun 0 should always respond to inquiry, so
6190 			 * get the LUN struct for LUN 0
6191 			 *
6192 			 * Currently we deal with first level of addressing.
6193 			 * If / when we start supporting 0x device types
6194 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6195 			 * this logic will need revisiting.
6196 			 */
6197 			lun0_newalloc = 0;
6198 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6199 				/*
6200 				 * no LUN struct for LUN 0 yet exists,
6201 				 * so create one
6202 				 */
6203 				plun = fcp_alloc_lun(ptgt);
6204 				if (plun == NULL) {
6205 					fcp_log(CE_WARN, pptr->port_dip,
6206 					    "!Failed to allocate lun 0 for"
6207 					    " D_ID=%x", ptgt->tgt_d_id);
6208 					goto fail;
6209 				}
6210 				lun0_newalloc = 1;
6211 			}
6212 
6213 			/* fill in LUN info */
6214 			mutex_enter(&ptgt->tgt_mutex);
6215 			/*
6216 			 * consider lun 0 as device not connected if it is
6217 			 * offlined or newly allocated
6218 			 */
6219 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6220 			    lun0_newalloc) {
6221 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6222 			}
6223 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6224 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6225 			ptgt->tgt_lun_cnt = 1;
6226 			ptgt->tgt_report_lun_cnt = 0;
6227 			mutex_exit(&ptgt->tgt_mutex);
6228 
6229 			/* Retrieve the rscn count (if a valid one exists) */
6230 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6231 				rscn_count = ((fc_ulp_rscn_info_t *)
6232 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6233 				    ->ulp_rscn_count;
6234 			} else {
6235 				rscn_count = FC_INVALID_RSCN_COUNT;
6236 			}
6237 
6238 			/* send Report Lun request to target */
6239 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6240 			    sizeof (struct fcp_reportlun_resp),
6241 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6242 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6243 				mutex_enter(&pptr->port_mutex);
6244 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6245 					fcp_log(CE_WARN, pptr->port_dip,
6246 					    "!Failed to send REPORT LUN to"
6247 					    "  D_ID=%x", ptgt->tgt_d_id);
6248 				} else {
6249 					FCP_TRACE(fcp_logq,
6250 					    pptr->port_instbuf, fcp_trace,
6251 					    FCP_BUF_LEVEL_5, 0,
6252 					    "fcp_icmd_callback,2:state change"
6253 					    " occured for D_ID=0x%x",
6254 					    ptgt->tgt_d_id);
6255 				}
6256 				mutex_exit(&pptr->port_mutex);
6257 
6258 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6259 				    FCP_TGT_TRACE_19);
6260 
6261 				goto fail;
6262 			} else {
6263 				free_pkt = 0;
6264 				fcp_icmd_free(pptr, icmd);
6265 			}
6266 			break;
6267 
6268 		default:
6269 			fcp_log(CE_WARN, pptr->port_dip,
6270 			    "!fcp_icmd_callback Invalid opcode");
6271 			goto fail;
6272 		}
6273 
6274 		return;
6275 	}
6276 
6277 
6278 	/*
6279 	 * Other PLOGI failures are not retried as the
6280 	 * transport does it already
6281 	 */
6282 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6283 		if (fcp_is_retryable(icmd) &&
6284 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6285 
6286 			if (FCP_MUST_RETRY(fpkt)) {
6287 				fcp_queue_ipkt(pptr, fpkt);
6288 				return;
6289 			}
6290 
6291 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6292 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6293 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6294 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6295 			    fpkt->pkt_reason);
6296 
6297 			/*
6298 			 * Retry by recalling the routine that
6299 			 * originally queued this packet
6300 			 */
6301 			mutex_enter(&pptr->port_mutex);
6302 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6303 				caddr_t msg;
6304 
6305 				mutex_exit(&pptr->port_mutex);
6306 
6307 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6308 
6309 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6310 					fpkt->pkt_timeout +=
6311 					    FCP_TIMEOUT_DELTA;
6312 				}
6313 
6314 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6315 				    fpkt);
6316 				if (rval == FC_SUCCESS) {
6317 					return;
6318 				}
6319 
6320 				if (rval == FC_STATEC_BUSY ||
6321 				    rval == FC_OFFLINE) {
6322 					fcp_queue_ipkt(pptr, fpkt);
6323 					return;
6324 				}
6325 				(void) fc_ulp_error(rval, &msg);
6326 
6327 				fcp_log(CE_NOTE, pptr->port_dip,
6328 				    "!ELS 0x%x failed to d_id=0x%x;"
6329 				    " %s", icmd->ipkt_opcode,
6330 				    ptgt->tgt_d_id, msg);
6331 			} else {
6332 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6333 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6334 				    "fcp_icmd_callback,3: state change "
6335 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6336 				mutex_exit(&pptr->port_mutex);
6337 			}
6338 		}
6339 	} else {
6340 		if (fcp_is_retryable(icmd) &&
6341 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6342 			if (FCP_MUST_RETRY(fpkt)) {
6343 				fcp_queue_ipkt(pptr, fpkt);
6344 				return;
6345 			}
6346 		}
6347 		mutex_enter(&pptr->port_mutex);
6348 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6349 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6350 			mutex_exit(&pptr->port_mutex);
6351 			fcp_print_error(fpkt);
6352 		} else {
6353 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6354 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6355 			    "fcp_icmd_callback,4: state change occured"
6356 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6357 			mutex_exit(&pptr->port_mutex);
6358 		}
6359 	}
6360 
6361 fail:
6362 	if (free_pkt) {
6363 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6364 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6365 		fcp_icmd_free(pptr, icmd);
6366 	}
6367 }
6368 
6369 
6370 /*
6371  * called internally to send an info cmd using the transport
6372  *
6373  * sends either an INQ or a REPORT_LUN
6374  *
6375  * when the packet is completed fcp_scsi_callback is called
6376  */
6377 static int
6378 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6379     int lcount, int tcount, int cause, uint32_t rscn_count)
6380 {
6381 	int			nodma;
6382 	struct fcp_ipkt		*icmd;
6383 	struct fcp_tgt		*ptgt;
6384 	struct fcp_port		*pptr;
6385 	fc_frame_hdr_t		*hp;
6386 	fc_packet_t		*fpkt;
6387 	struct fcp_cmd		fcp_cmd;
6388 	struct fcp_cmd		*fcmd;
6389 	union scsi_cdb		*scsi_cdb;
6390 
6391 	ASSERT(plun != NULL);
6392 
6393 	ptgt = plun->lun_tgt;
6394 	ASSERT(ptgt != NULL);
6395 
6396 	pptr = ptgt->tgt_port;
6397 	ASSERT(pptr != NULL);
6398 
6399 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6400 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6401 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6402 
6403 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6404 
6405 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6406 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6407 	    rscn_count);
6408 
6409 	if (icmd == NULL) {
6410 		return (DDI_FAILURE);
6411 	}
6412 
6413 	fpkt = icmd->ipkt_fpkt;
6414 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6415 	icmd->ipkt_retries = 0;
6416 	icmd->ipkt_opcode = opcode;
6417 	icmd->ipkt_lun = plun;
6418 
6419 	if (nodma) {
6420 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6421 	} else {
6422 		fcmd = &fcp_cmd;
6423 	}
6424 	bzero(fcmd, sizeof (struct fcp_cmd));
6425 
6426 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6427 
6428 	hp = &fpkt->pkt_cmd_fhdr;
6429 
6430 	hp->s_id = pptr->port_id;
6431 	hp->d_id = ptgt->tgt_d_id;
6432 	hp->r_ctl = R_CTL_COMMAND;
6433 	hp->type = FC_TYPE_SCSI_FCP;
6434 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6435 	hp->rsvd = 0;
6436 	hp->seq_id = 0;
6437 	hp->seq_cnt = 0;
6438 	hp->ox_id = 0xffff;
6439 	hp->rx_id = 0xffff;
6440 	hp->ro = 0;
6441 
6442 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6443 
6444 	/*
6445 	 * Request SCSI target for expedited processing
6446 	 */
6447 
6448 	/*
6449 	 * Set up for untagged queuing because we do not
6450 	 * know if the fibre device supports queuing.
6451 	 */
6452 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6453 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6454 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6455 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6456 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6457 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6458 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6459 
6460 	switch (opcode) {
6461 	case SCMD_INQUIRY_PAGE83:
6462 		/*
6463 		 * Prepare to get the Inquiry VPD page 83 information
6464 		 */
6465 		fcmd->fcp_cntl.cntl_read_data = 1;
6466 		fcmd->fcp_cntl.cntl_write_data = 0;
6467 		fcmd->fcp_data_len = alloc_len;
6468 
6469 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6470 		fpkt->pkt_comp = fcp_scsi_callback;
6471 
6472 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6473 		scsi_cdb->g0_addr2 = 0x01;
6474 		scsi_cdb->g0_addr1 = 0x83;
6475 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6476 		break;
6477 
6478 	case SCMD_INQUIRY:
6479 		fcmd->fcp_cntl.cntl_read_data = 1;
6480 		fcmd->fcp_cntl.cntl_write_data = 0;
6481 		fcmd->fcp_data_len = alloc_len;
6482 
6483 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6484 		fpkt->pkt_comp = fcp_scsi_callback;
6485 
6486 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6487 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6488 		break;
6489 
6490 	case SCMD_REPORT_LUN: {
6491 		fc_portid_t	d_id;
6492 		opaque_t	fca_dev;
6493 
6494 		ASSERT(alloc_len >= 16);
6495 
6496 		d_id.priv_lilp_posit = 0;
6497 		d_id.port_id = ptgt->tgt_d_id;
6498 
6499 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6500 
6501 		mutex_enter(&ptgt->tgt_mutex);
6502 		ptgt->tgt_fca_dev = fca_dev;
6503 		mutex_exit(&ptgt->tgt_mutex);
6504 
6505 		fcmd->fcp_cntl.cntl_read_data = 1;
6506 		fcmd->fcp_cntl.cntl_write_data = 0;
6507 		fcmd->fcp_data_len = alloc_len;
6508 
6509 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6510 		fpkt->pkt_comp = fcp_scsi_callback;
6511 
6512 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6513 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6514 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6515 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6516 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6517 		break;
6518 	}
6519 
6520 	default:
6521 		fcp_log(CE_WARN, pptr->port_dip,
6522 		    "!fcp_send_scsi Invalid opcode");
6523 		break;
6524 	}
6525 
6526 	if (!nodma) {
6527 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6528 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6529 	}
6530 
6531 	mutex_enter(&pptr->port_mutex);
6532 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6533 
6534 		mutex_exit(&pptr->port_mutex);
6535 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6536 		    FC_SUCCESS) {
6537 			fcp_icmd_free(pptr, icmd);
6538 			return (DDI_FAILURE);
6539 		}
6540 		return (DDI_SUCCESS);
6541 	} else {
6542 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6543 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6544 		    "fcp_send_scsi,1: state change occured"
6545 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6546 		mutex_exit(&pptr->port_mutex);
6547 		fcp_icmd_free(pptr, icmd);
6548 		return (DDI_FAILURE);
6549 	}
6550 }
6551 
6552 
6553 /*
6554  * called by fcp_scsi_callback to check to handle the case where
6555  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6556  */
6557 static int
6558 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6559 {
6560 	uchar_t				rqlen;
6561 	int				rval = DDI_FAILURE;
6562 	struct scsi_extended_sense	sense_info, *sense;
6563 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6564 	    fpkt->pkt_ulp_private;
6565 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6566 	struct fcp_port		*pptr = ptgt->tgt_port;
6567 
6568 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6569 
6570 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6571 		/*
6572 		 * SCSI-II Reserve Release support. Some older FC drives return
6573 		 * Reservation conflict for Report Luns command.
6574 		 */
6575 		if (icmd->ipkt_nodma) {
6576 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6577 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6578 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6579 		} else {
6580 			fcp_rsp_t	new_resp;
6581 
6582 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6583 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6584 
6585 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6586 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6587 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6588 
6589 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6590 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6591 		}
6592 
6593 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6594 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6595 
6596 		return (DDI_SUCCESS);
6597 	}
6598 
6599 	sense = &sense_info;
6600 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6601 		/* no need to continue if sense length is not set */
6602 		return (rval);
6603 	}
6604 
6605 	/* casting 64-bit integer to 8-bit */
6606 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6607 	    sizeof (struct scsi_extended_sense));
6608 
6609 	if (rqlen < 14) {
6610 		/* no need to continue if request length isn't long enough */
6611 		return (rval);
6612 	}
6613 
6614 	if (icmd->ipkt_nodma) {
6615 		/*
6616 		 * We can safely use fcp_response_len here since the
6617 		 * only path that calls fcp_check_reportlun,
6618 		 * fcp_scsi_callback, has already called
6619 		 * fcp_validate_fcp_response.
6620 		 */
6621 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6622 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6623 	} else {
6624 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6625 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6626 		    sizeof (struct scsi_extended_sense));
6627 	}
6628 
6629 	if (!FCP_SENSE_NO_LUN(sense)) {
6630 		mutex_enter(&ptgt->tgt_mutex);
6631 		/* clear the flag if any */
6632 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6633 		mutex_exit(&ptgt->tgt_mutex);
6634 	}
6635 
6636 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6637 	    (sense->es_add_code == 0x20)) {
6638 		if (icmd->ipkt_nodma) {
6639 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6640 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6641 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6642 		} else {
6643 			fcp_rsp_t	new_resp;
6644 
6645 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6646 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6647 
6648 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6649 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6650 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6651 
6652 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6653 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6654 		}
6655 
6656 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6657 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6658 
6659 		return (DDI_SUCCESS);
6660 	}
6661 
6662 	/*
6663 	 * This is for the STK library which returns a check condition,
6664 	 * to indicate device is not ready, manual assistance needed.
6665 	 * This is to a report lun command when the door is open.
6666 	 */
6667 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6668 		if (icmd->ipkt_nodma) {
6669 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6670 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6671 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6672 		} else {
6673 			fcp_rsp_t	new_resp;
6674 
6675 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6676 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6677 
6678 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6679 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6680 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6681 
6682 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6683 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6684 		}
6685 
6686 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6687 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6688 
6689 		return (DDI_SUCCESS);
6690 	}
6691 
6692 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6693 	    (FCP_SENSE_NO_LUN(sense))) {
6694 		mutex_enter(&ptgt->tgt_mutex);
6695 		if ((FCP_SENSE_NO_LUN(sense)) &&
6696 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6697 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6698 			mutex_exit(&ptgt->tgt_mutex);
6699 			/*
6700 			 * reconfig was triggred by ILLEGAL REQUEST but
6701 			 * got ILLEGAL REQUEST again
6702 			 */
6703 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6704 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6705 			    "!FCP: Unable to obtain Report Lun data"
6706 			    " target=%x", ptgt->tgt_d_id);
6707 		} else {
6708 			if (ptgt->tgt_tid == NULL) {
6709 				timeout_id_t	tid;
6710 				/*
6711 				 * REPORT LUN data has changed.	 Kick off
6712 				 * rediscovery
6713 				 */
6714 				tid = timeout(fcp_reconfigure_luns,
6715 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6716 
6717 				ptgt->tgt_tid = tid;
6718 				ptgt->tgt_state |= FCP_TGT_BUSY;
6719 			}
6720 			if (FCP_SENSE_NO_LUN(sense)) {
6721 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6722 			}
6723 			mutex_exit(&ptgt->tgt_mutex);
6724 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6725 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6726 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6727 				    "!FCP:Report Lun Has Changed"
6728 				    " target=%x", ptgt->tgt_d_id);
6729 			} else if (FCP_SENSE_NO_LUN(sense)) {
6730 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6731 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6732 				    "!FCP:LU Not Supported"
6733 				    " target=%x", ptgt->tgt_d_id);
6734 			}
6735 		}
6736 		rval = DDI_SUCCESS;
6737 	}
6738 
6739 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6740 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6741 	    "D_ID=%x, sense=%x, status=%x",
6742 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6743 	    rsp->fcp_u.fcp_status.scsi_status);
6744 
6745 	return (rval);
6746 }
6747 
6748 /*
6749  *     Function: fcp_scsi_callback
6750  *
6751  *  Description: This is the callback routine set by fcp_send_scsi() after
6752  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6753  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6754  *		 INQUIRY_PAGE83.
6755  *
6756  *     Argument: *fpkt	 FC packet used to convey the command
6757  *
6758  * Return Value: None
6759  */
6760 static void
6761 fcp_scsi_callback(fc_packet_t *fpkt)
6762 {
6763 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6764 	    fpkt->pkt_ulp_private;
6765 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6766 	struct fcp_port	*pptr;
6767 	struct fcp_tgt	*ptgt;
6768 	struct fcp_lun	*plun;
6769 	struct fcp_rsp		response, *rsp;
6770 
6771 	if (icmd->ipkt_nodma) {
6772 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6773 	} else {
6774 		rsp = &response;
6775 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6776 		    sizeof (struct fcp_rsp));
6777 	}
6778 
6779 	ptgt = icmd->ipkt_tgt;
6780 	pptr = ptgt->tgt_port;
6781 	plun = icmd->ipkt_lun;
6782 
6783 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6784 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6785 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6786 	    "status=%x, lun num=%x",
6787 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6788 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6789 
6790 	/*
6791 	 * Pre-init LUN GUID with NWWN if it is not a device that
6792 	 * supports multiple luns and we know it's not page83
6793 	 * compliant.  Although using a NWWN is not lun unique,
6794 	 * we will be fine since there is only one lun behind the taget
6795 	 * in this case.
6796 	 */
6797 	if ((plun->lun_guid_size == 0) &&
6798 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6799 	    (fcp_symmetric_device_probe(plun) == 0)) {
6800 
6801 		char ascii_wwn[FC_WWN_SIZE*2+1];
6802 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6803 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6804 	}
6805 
6806 	/*
6807 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6808 	 * when thay have more data than what is asked in CDB. An overrun
6809 	 * is really when FCP_DL is smaller than the data length in CDB.
6810 	 * In the case here we know that REPORT LUN command we formed within
6811 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6812 	 * behavior. In reality this is FC_SUCCESS.
6813 	 */
6814 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6815 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6816 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6817 		fpkt->pkt_state = FC_PKT_SUCCESS;
6818 	}
6819 
6820 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6821 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6822 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6823 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6824 		    ptgt->tgt_d_id);
6825 
6826 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6827 			/*
6828 			 * Inquiry VPD page command on A5K SES devices would
6829 			 * result in data CRC errors.
6830 			 */
6831 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6832 				(void) fcp_handle_page83(fpkt, icmd, 1);
6833 				return;
6834 			}
6835 		}
6836 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6837 		    FCP_MUST_RETRY(fpkt)) {
6838 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6839 			fcp_retry_scsi_cmd(fpkt);
6840 			return;
6841 		}
6842 
6843 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6844 		    FCP_TGT_TRACE_20);
6845 
6846 		mutex_enter(&pptr->port_mutex);
6847 		mutex_enter(&ptgt->tgt_mutex);
6848 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6849 			mutex_exit(&ptgt->tgt_mutex);
6850 			mutex_exit(&pptr->port_mutex);
6851 			fcp_print_error(fpkt);
6852 		} else {
6853 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6854 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6855 			    "fcp_scsi_callback,1: state change occured"
6856 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6857 			mutex_exit(&ptgt->tgt_mutex);
6858 			mutex_exit(&pptr->port_mutex);
6859 		}
6860 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6861 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6862 		fcp_icmd_free(pptr, icmd);
6863 		return;
6864 	}
6865 
6866 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6867 
6868 	mutex_enter(&pptr->port_mutex);
6869 	mutex_enter(&ptgt->tgt_mutex);
6870 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6871 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6872 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6873 		    "fcp_scsi_callback,2: state change occured"
6874 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6875 		mutex_exit(&ptgt->tgt_mutex);
6876 		mutex_exit(&pptr->port_mutex);
6877 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6878 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6879 		fcp_icmd_free(pptr, icmd);
6880 		return;
6881 	}
6882 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6883 
6884 	mutex_exit(&ptgt->tgt_mutex);
6885 	mutex_exit(&pptr->port_mutex);
6886 
6887 	if (icmd->ipkt_nodma) {
6888 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6889 		    sizeof (struct fcp_rsp));
6890 	} else {
6891 		bep = &fcp_rsp_err;
6892 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6893 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6894 	}
6895 
6896 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6897 		fcp_retry_scsi_cmd(fpkt);
6898 		return;
6899 	}
6900 
6901 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6902 	    FCP_NO_FAILURE) {
6903 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6904 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6905 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6906 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6907 		fcp_retry_scsi_cmd(fpkt);
6908 		return;
6909 	}
6910 
6911 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6912 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6913 		fcp_queue_ipkt(pptr, fpkt);
6914 		return;
6915 	}
6916 
6917 	/*
6918 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6919 	 * with illegal request as per SCSI spec.
6920 	 * Crossbridge is one such device and Daktari's SES node is another.
6921 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6922 	 * SES nodes (Daktari only currently) are an exception to this.
6923 	 */
6924 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6925 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6926 
6927 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6928 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6929 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
6930 		    "check condition. May enumerate as non-mpxio device",
6931 		    ptgt->tgt_d_id, plun->lun_type);
6932 
6933 		/*
6934 		 * If we let Daktari's SES be enumerated as a non-mpxio
6935 		 * device, there will be a discrepency in that the other
6936 		 * internal FC disks will get enumerated as mpxio devices.
6937 		 * Applications like luxadm expect this to be consistent.
6938 		 *
6939 		 * So, we put in a hack here to check if this is an SES device
6940 		 * and handle it here.
6941 		 */
6942 		if (plun->lun_type == DTYPE_ESI) {
6943 			/*
6944 			 * Since, pkt_state is actually FC_PKT_SUCCESS
6945 			 * at this stage, we fake a failure here so that
6946 			 * fcp_handle_page83 will create a device path using
6947 			 * the WWN instead of the GUID which is not there anyway
6948 			 */
6949 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
6950 			(void) fcp_handle_page83(fpkt, icmd, 1);
6951 			return;
6952 		}
6953 
6954 		mutex_enter(&ptgt->tgt_mutex);
6955 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
6956 		    FCP_LUN_MARK | FCP_LUN_BUSY);
6957 		mutex_exit(&ptgt->tgt_mutex);
6958 
6959 		(void) fcp_call_finish_init(pptr, ptgt,
6960 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6961 		    icmd->ipkt_cause);
6962 		fcp_icmd_free(pptr, icmd);
6963 		return;
6964 	}
6965 
6966 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6967 		int rval = DDI_FAILURE;
6968 
6969 		/*
6970 		 * handle cases where report lun isn't supported
6971 		 * by faking up our own REPORT_LUN response or
6972 		 * UNIT ATTENTION
6973 		 */
6974 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
6975 			rval = fcp_check_reportlun(rsp, fpkt);
6976 
6977 			/*
6978 			 * fcp_check_reportlun might have modified the
6979 			 * FCP response. Copy it in again to get an updated
6980 			 * FCP response
6981 			 */
6982 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
6983 				rsp = &response;
6984 
6985 				FCP_CP_IN(fpkt->pkt_resp, rsp,
6986 				    fpkt->pkt_resp_acc,
6987 				    sizeof (struct fcp_rsp));
6988 			}
6989 		}
6990 
6991 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
6992 			if (rval == DDI_SUCCESS) {
6993 				(void) fcp_call_finish_init(pptr, ptgt,
6994 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6995 				    icmd->ipkt_cause);
6996 				fcp_icmd_free(pptr, icmd);
6997 			} else {
6998 				fcp_retry_scsi_cmd(fpkt);
6999 			}
7000 
7001 			return;
7002 		}
7003 	} else {
7004 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7005 			mutex_enter(&ptgt->tgt_mutex);
7006 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7007 			mutex_exit(&ptgt->tgt_mutex);
7008 		}
7009 	}
7010 
7011 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7012 
7013 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
7014 
7015 	switch (icmd->ipkt_opcode) {
7016 	case SCMD_INQUIRY:
7017 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7018 		fcp_handle_inquiry(fpkt, icmd);
7019 		break;
7020 
7021 	case SCMD_REPORT_LUN:
7022 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7023 		    FCP_TGT_TRACE_22);
7024 		fcp_handle_reportlun(fpkt, icmd);
7025 		break;
7026 
7027 	case SCMD_INQUIRY_PAGE83:
7028 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7029 		(void) fcp_handle_page83(fpkt, icmd, 0);
7030 		break;
7031 
7032 	default:
7033 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7034 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7035 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7036 		fcp_icmd_free(pptr, icmd);
7037 		break;
7038 	}
7039 }
7040 
7041 
7042 static void
7043 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7044 {
7045 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7046 	    fpkt->pkt_ulp_private;
7047 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7048 	struct fcp_port	*pptr = ptgt->tgt_port;
7049 
7050 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7051 	    fcp_is_retryable(icmd)) {
7052 		mutex_enter(&pptr->port_mutex);
7053 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7054 			mutex_exit(&pptr->port_mutex);
7055 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7056 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7057 			    "Retrying %s to %x; state=%x, reason=%x",
7058 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7059 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7060 			    fpkt->pkt_state, fpkt->pkt_reason);
7061 
7062 			fcp_queue_ipkt(pptr, fpkt);
7063 		} else {
7064 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7065 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7066 			    "fcp_retry_scsi_cmd,1: state change occured"
7067 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7068 			mutex_exit(&pptr->port_mutex);
7069 			(void) fcp_call_finish_init(pptr, ptgt,
7070 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7071 			    icmd->ipkt_cause);
7072 			fcp_icmd_free(pptr, icmd);
7073 		}
7074 	} else {
7075 		fcp_print_error(fpkt);
7076 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7077 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7078 		fcp_icmd_free(pptr, icmd);
7079 	}
7080 }
7081 
7082 /*
7083  *     Function: fcp_handle_page83
7084  *
7085  *  Description: Treats the response to INQUIRY_PAGE83.
7086  *
7087  *     Argument: *fpkt	FC packet used to convey the command.
7088  *		 *icmd	Original fcp_ipkt structure.
7089  *		 ignore_page83_data
7090  *			if it's 1, that means it's a special devices's
7091  *			page83 response, it should be enumerated under mpxio
7092  *
7093  * Return Value: None
7094  */
7095 static void
7096 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7097     int ignore_page83_data)
7098 {
7099 	struct fcp_port	*pptr;
7100 	struct fcp_lun	*plun;
7101 	struct fcp_tgt	*ptgt;
7102 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7103 	int			fail = 0;
7104 	ddi_devid_t		devid;
7105 	char			*guid = NULL;
7106 	int			ret;
7107 
7108 	ASSERT(icmd != NULL && fpkt != NULL);
7109 
7110 	pptr = icmd->ipkt_port;
7111 	ptgt = icmd->ipkt_tgt;
7112 	plun = icmd->ipkt_lun;
7113 
7114 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7115 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7116 
7117 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7118 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7119 
7120 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7121 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7122 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7123 		    "dtype=0x%x, lun num=%x",
7124 		    pptr->port_instance, ptgt->tgt_d_id,
7125 		    dev_id_page[0], plun->lun_num);
7126 
7127 		ret = ddi_devid_scsi_encode(
7128 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7129 		    NULL,		/* driver name */
7130 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7131 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7132 		    NULL,		/* page 80 data */
7133 		    0,		/* page 80 len */
7134 		    dev_id_page,	/* page 83 data */
7135 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7136 		    &devid);
7137 
7138 		if (ret == DDI_SUCCESS) {
7139 
7140 			guid = ddi_devid_to_guid(devid);
7141 
7142 			if (guid) {
7143 				/*
7144 				 * Check our current guid.  If it's non null
7145 				 * and it has changed, we need to copy it into
7146 				 * lun_old_guid since we might still need it.
7147 				 */
7148 				if (plun->lun_guid &&
7149 				    strcmp(guid, plun->lun_guid)) {
7150 					unsigned int len;
7151 
7152 					/*
7153 					 * If the guid of the LUN changes,
7154 					 * reconfiguration should be triggered
7155 					 * to reflect the changes.
7156 					 * i.e. we should offline the LUN with
7157 					 * the old guid, and online the LUN with
7158 					 * the new guid.
7159 					 */
7160 					plun->lun_state |= FCP_LUN_CHANGED;
7161 
7162 					if (plun->lun_old_guid) {
7163 						kmem_free(plun->lun_old_guid,
7164 						    plun->lun_old_guid_size);
7165 					}
7166 
7167 					len = plun->lun_guid_size;
7168 					plun->lun_old_guid_size = len;
7169 
7170 					plun->lun_old_guid = kmem_zalloc(len,
7171 					    KM_NOSLEEP);
7172 
7173 					if (plun->lun_old_guid) {
7174 						/*
7175 						 * The alloc was successful then
7176 						 * let's do the copy.
7177 						 */
7178 						bcopy(plun->lun_guid,
7179 						    plun->lun_old_guid, len);
7180 					} else {
7181 						fail = 1;
7182 						plun->lun_old_guid_size = 0;
7183 					}
7184 				}
7185 				if (!fail) {
7186 					if (fcp_copy_guid_2_lun_block(
7187 					    plun, guid)) {
7188 						fail = 1;
7189 					}
7190 				}
7191 				ddi_devid_free_guid(guid);
7192 
7193 			} else {
7194 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7195 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7196 				    "fcp_handle_page83: unable to create "
7197 				    "GUID");
7198 
7199 				/* couldn't create good guid from devid */
7200 				fail = 1;
7201 			}
7202 			ddi_devid_free(devid);
7203 
7204 		} else if (ret == DDI_NOT_WELL_FORMED) {
7205 			/* NULL filled data for page 83 */
7206 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7207 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7208 			    "fcp_handle_page83: retry GUID");
7209 
7210 			icmd->ipkt_retries = 0;
7211 			fcp_retry_scsi_cmd(fpkt);
7212 			return;
7213 		} else {
7214 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7215 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7216 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7217 			    ret);
7218 			/*
7219 			 * Since the page83 validation
7220 			 * introduced late, we are being
7221 			 * tolerant to the existing devices
7222 			 * that already found to be working
7223 			 * under mpxio, like A5200's SES device,
7224 			 * its page83 response will not be standard-compliant,
7225 			 * but we still want it to be enumerated under mpxio.
7226 			 */
7227 			if (fcp_symmetric_device_probe(plun) != 0) {
7228 				fail = 1;
7229 			}
7230 		}
7231 
7232 	} else {
7233 		/* bad packet state */
7234 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7235 
7236 		/*
7237 		 * For some special devices (A5K SES and Daktari's SES devices),
7238 		 * they should be enumerated under mpxio
7239 		 * or "luxadm dis" will fail
7240 		 */
7241 		if (ignore_page83_data) {
7242 			fail = 0;
7243 		} else {
7244 			fail = 1;
7245 		}
7246 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7247 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7248 		    "!Devid page cmd failed. "
7249 		    "fpkt_state: %x fpkt_reason: %x",
7250 		    "ignore_page83: %d",
7251 		    fpkt->pkt_state, fpkt->pkt_reason,
7252 		    ignore_page83_data);
7253 	}
7254 
7255 	mutex_enter(&pptr->port_mutex);
7256 	mutex_enter(&plun->lun_mutex);
7257 	/*
7258 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7259 	 * mismatch between lun_cip and lun_mpxio.
7260 	 */
7261 	if (plun->lun_cip == NULL) {
7262 		/*
7263 		 * If we don't have a guid for this lun it's because we were
7264 		 * unable to glean one from the page 83 response.  Set the
7265 		 * control flag to 0 here to make sure that we don't attempt to
7266 		 * enumerate it under mpxio.
7267 		 */
7268 		if (fail || pptr->port_mpxio == 0) {
7269 			plun->lun_mpxio = 0;
7270 		} else {
7271 			plun->lun_mpxio = 1;
7272 		}
7273 	}
7274 	mutex_exit(&plun->lun_mutex);
7275 	mutex_exit(&pptr->port_mutex);
7276 
7277 	mutex_enter(&ptgt->tgt_mutex);
7278 	plun->lun_state &=
7279 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7280 	mutex_exit(&ptgt->tgt_mutex);
7281 
7282 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7283 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7284 
7285 	fcp_icmd_free(pptr, icmd);
7286 }
7287 
7288 /*
7289  *     Function: fcp_handle_inquiry
7290  *
7291  *  Description: Called by fcp_scsi_callback to handle the response to an
7292  *		 INQUIRY request.
7293  *
7294  *     Argument: *fpkt	FC packet used to convey the command.
7295  *		 *icmd	Original fcp_ipkt structure.
7296  *
7297  * Return Value: None
7298  */
7299 static void
7300 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7301 {
7302 	struct fcp_port	*pptr;
7303 	struct fcp_lun	*plun;
7304 	struct fcp_tgt	*ptgt;
7305 	uchar_t		dtype;
7306 	uchar_t		pqual;
7307 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7308 
7309 	ASSERT(icmd != NULL && fpkt != NULL);
7310 
7311 	pptr = icmd->ipkt_port;
7312 	ptgt = icmd->ipkt_tgt;
7313 	plun = icmd->ipkt_lun;
7314 
7315 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7316 	    sizeof (struct scsi_inquiry));
7317 
7318 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7319 	pqual = plun->lun_inq.inq_dtype >> 5;
7320 
7321 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7322 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7323 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7324 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7325 	    plun->lun_num, dtype, pqual);
7326 
7327 	if (pqual != 0) {
7328 		/*
7329 		 * Non-zero peripheral qualifier
7330 		 */
7331 		fcp_log(CE_CONT, pptr->port_dip,
7332 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7333 		    "Device type=0x%x Peripheral qual=0x%x\n",
7334 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7335 
7336 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7337 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7338 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7339 		    "Device type=0x%x Peripheral qual=0x%x\n",
7340 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7341 
7342 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7343 
7344 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7345 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7346 		fcp_icmd_free(pptr, icmd);
7347 		return;
7348 	}
7349 
7350 	/*
7351 	 * If the device is already initialized, check the dtype
7352 	 * for a change. If it has changed then update the flags
7353 	 * so the create_luns will offline the old device and
7354 	 * create the new device. Refer to bug: 4764752
7355 	 */
7356 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7357 		plun->lun_state |= FCP_LUN_CHANGED;
7358 	}
7359 	plun->lun_type = plun->lun_inq.inq_dtype;
7360 
7361 	/*
7362 	 * This code is setting/initializing the throttling in the FCA
7363 	 * driver.
7364 	 */
7365 	mutex_enter(&pptr->port_mutex);
7366 	if (!pptr->port_notify) {
7367 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7368 			uint32_t cmd = 0;
7369 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7370 			    ((cmd & 0xFFFFFF00 >> 8) |
7371 			    FCP_SVE_THROTTLE << 8));
7372 			pptr->port_notify = 1;
7373 			mutex_exit(&pptr->port_mutex);
7374 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7375 			mutex_enter(&pptr->port_mutex);
7376 		}
7377 	}
7378 
7379 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7380 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7381 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7382 		    "fcp_handle_inquiry,1:state change occured"
7383 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7384 		mutex_exit(&pptr->port_mutex);
7385 
7386 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7387 		(void) fcp_call_finish_init(pptr, ptgt,
7388 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7389 		    icmd->ipkt_cause);
7390 		fcp_icmd_free(pptr, icmd);
7391 		return;
7392 	}
7393 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7394 	mutex_exit(&pptr->port_mutex);
7395 
7396 	/* Retrieve the rscn count (if a valid one exists) */
7397 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7398 		rscn_count = ((fc_ulp_rscn_info_t *)
7399 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7400 	} else {
7401 		rscn_count = FC_INVALID_RSCN_COUNT;
7402 	}
7403 
7404 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7405 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7406 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7407 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7408 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7409 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7410 		(void) fcp_call_finish_init(pptr, ptgt,
7411 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7412 		    icmd->ipkt_cause);
7413 	}
7414 
7415 	/*
7416 	 * Read Inquiry VPD Page 0x83 to uniquely
7417 	 * identify this logical unit.
7418 	 */
7419 	fcp_icmd_free(pptr, icmd);
7420 }
7421 
7422 /*
7423  *     Function: fcp_handle_reportlun
7424  *
7425  *  Description: Called by fcp_scsi_callback to handle the response to a
7426  *		 REPORT_LUN request.
7427  *
7428  *     Argument: *fpkt	FC packet used to convey the command.
7429  *		 *icmd	Original fcp_ipkt structure.
7430  *
7431  * Return Value: None
7432  */
7433 static void
7434 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7435 {
7436 	int				i;
7437 	int				nluns_claimed;
7438 	int				nluns_bufmax;
7439 	int				len;
7440 	uint16_t			lun_num;
7441 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7442 	struct fcp_port			*pptr;
7443 	struct fcp_tgt			*ptgt;
7444 	struct fcp_lun			*plun;
7445 	struct fcp_reportlun_resp	*report_lun;
7446 
7447 	pptr = icmd->ipkt_port;
7448 	ptgt = icmd->ipkt_tgt;
7449 	len = fpkt->pkt_datalen;
7450 
7451 	if ((len < FCP_LUN_HEADER) ||
7452 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7453 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7454 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7455 		fcp_icmd_free(pptr, icmd);
7456 		return;
7457 	}
7458 
7459 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7460 	    fpkt->pkt_datalen);
7461 
7462 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7463 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7464 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7465 	    pptr->port_instance, ptgt->tgt_d_id);
7466 
7467 	/*
7468 	 * Get the number of luns (which is supplied as LUNS * 8) the
7469 	 * device claims it has.
7470 	 */
7471 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7472 
7473 	/*
7474 	 * Get the maximum number of luns the buffer submitted can hold.
7475 	 */
7476 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7477 
7478 	/*
7479 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7480 	 */
7481 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7482 		kmem_free(report_lun, len);
7483 
7484 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7485 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7486 		    ptgt->tgt_d_id);
7487 
7488 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7489 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7490 		fcp_icmd_free(pptr, icmd);
7491 		return;
7492 	}
7493 
7494 	/*
7495 	 * If there are more LUNs than we have allocated memory for,
7496 	 * allocate more space and send down yet another report lun if
7497 	 * the maximum number of attempts hasn't been reached.
7498 	 */
7499 	mutex_enter(&ptgt->tgt_mutex);
7500 
7501 	if ((nluns_claimed > nluns_bufmax) &&
7502 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7503 
7504 		struct fcp_lun *plun;
7505 
7506 		ptgt->tgt_report_lun_cnt++;
7507 		plun = ptgt->tgt_lun;
7508 		ASSERT(plun != NULL);
7509 		mutex_exit(&ptgt->tgt_mutex);
7510 
7511 		kmem_free(report_lun, len);
7512 
7513 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7514 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7515 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7516 		    nluns_claimed, ptgt->tgt_d_id);
7517 
7518 		/* Retrieve the rscn count (if a valid one exists) */
7519 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7520 			rscn_count = ((fc_ulp_rscn_info_t *)
7521 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7522 			    ulp_rscn_count;
7523 		} else {
7524 			rscn_count = FC_INVALID_RSCN_COUNT;
7525 		}
7526 
7527 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7528 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7529 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7530 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7531 			(void) fcp_call_finish_init(pptr, ptgt,
7532 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7533 			    icmd->ipkt_cause);
7534 		}
7535 
7536 		fcp_icmd_free(pptr, icmd);
7537 		return;
7538 	}
7539 
7540 	if (nluns_claimed > nluns_bufmax) {
7541 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7542 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7543 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7544 		    "	 Number of LUNs lost=%x",
7545 		    ptgt->tgt_port_wwn.raw_wwn[0],
7546 		    ptgt->tgt_port_wwn.raw_wwn[1],
7547 		    ptgt->tgt_port_wwn.raw_wwn[2],
7548 		    ptgt->tgt_port_wwn.raw_wwn[3],
7549 		    ptgt->tgt_port_wwn.raw_wwn[4],
7550 		    ptgt->tgt_port_wwn.raw_wwn[5],
7551 		    ptgt->tgt_port_wwn.raw_wwn[6],
7552 		    ptgt->tgt_port_wwn.raw_wwn[7],
7553 		    nluns_claimed - nluns_bufmax);
7554 
7555 		nluns_claimed = nluns_bufmax;
7556 	}
7557 	ptgt->tgt_lun_cnt = nluns_claimed;
7558 
7559 	/*
7560 	 * Identify missing LUNs and print warning messages
7561 	 */
7562 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7563 		int offline;
7564 		int exists = 0;
7565 
7566 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7567 
7568 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7569 			uchar_t		*lun_string;
7570 
7571 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7572 
7573 			switch (lun_string[0] & 0xC0) {
7574 			case FCP_LUN_ADDRESSING:
7575 			case FCP_PD_ADDRESSING:
7576 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7577 				    lun_string[1];
7578 				if (plun->lun_num == lun_num) {
7579 					exists++;
7580 					break;
7581 				}
7582 				break;
7583 
7584 			default:
7585 				break;
7586 			}
7587 		}
7588 
7589 		if (!exists && !offline) {
7590 			mutex_exit(&ptgt->tgt_mutex);
7591 
7592 			mutex_enter(&pptr->port_mutex);
7593 			mutex_enter(&ptgt->tgt_mutex);
7594 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7595 				/*
7596 				 * set disappear flag when device was connected
7597 				 */
7598 				if (!(plun->lun_state &
7599 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7600 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7601 				}
7602 				mutex_exit(&ptgt->tgt_mutex);
7603 				mutex_exit(&pptr->port_mutex);
7604 				if (!(plun->lun_state &
7605 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7606 					fcp_log(CE_NOTE, pptr->port_dip,
7607 					    "!Lun=%x for target=%x disappeared",
7608 					    plun->lun_num, ptgt->tgt_d_id);
7609 				}
7610 				mutex_enter(&ptgt->tgt_mutex);
7611 			} else {
7612 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7613 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7614 				    "fcp_handle_reportlun,1: state change"
7615 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7616 				mutex_exit(&ptgt->tgt_mutex);
7617 				mutex_exit(&pptr->port_mutex);
7618 				kmem_free(report_lun, len);
7619 				(void) fcp_call_finish_init(pptr, ptgt,
7620 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7621 				    icmd->ipkt_cause);
7622 				fcp_icmd_free(pptr, icmd);
7623 				return;
7624 			}
7625 		} else if (exists) {
7626 			/*
7627 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7628 			 * actually exists in REPORT_LUN response
7629 			 */
7630 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7631 				plun->lun_state &=
7632 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7633 			}
7634 			if (offline || plun->lun_num == 0) {
7635 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7636 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7637 					mutex_exit(&ptgt->tgt_mutex);
7638 					fcp_log(CE_NOTE, pptr->port_dip,
7639 					    "!Lun=%x for target=%x reappeared",
7640 					    plun->lun_num, ptgt->tgt_d_id);
7641 					mutex_enter(&ptgt->tgt_mutex);
7642 				}
7643 			}
7644 		}
7645 	}
7646 
7647 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7648 	mutex_exit(&ptgt->tgt_mutex);
7649 
7650 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7651 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7652 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7653 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7654 
7655 	/* scan each lun */
7656 	for (i = 0; i < nluns_claimed; i++) {
7657 		uchar_t	*lun_string;
7658 
7659 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7660 
7661 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7662 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7663 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7664 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7665 		    lun_string[0]);
7666 
7667 		switch (lun_string[0] & 0xC0) {
7668 		case FCP_LUN_ADDRESSING:
7669 		case FCP_PD_ADDRESSING:
7670 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7671 
7672 			/* We will skip masked LUNs because of the blacklist. */
7673 			if (fcp_lun_blacklist != NULL) {
7674 				mutex_enter(&ptgt->tgt_mutex);
7675 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7676 				    lun_num) == TRUE) {
7677 					ptgt->tgt_lun_cnt--;
7678 					mutex_exit(&ptgt->tgt_mutex);
7679 					break;
7680 				}
7681 				mutex_exit(&ptgt->tgt_mutex);
7682 			}
7683 
7684 			/* see if this LUN is already allocated */
7685 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7686 				plun = fcp_alloc_lun(ptgt);
7687 				if (plun == NULL) {
7688 					fcp_log(CE_NOTE, pptr->port_dip,
7689 					    "!Lun allocation failed"
7690 					    " target=%x lun=%x",
7691 					    ptgt->tgt_d_id, lun_num);
7692 					break;
7693 				}
7694 			}
7695 
7696 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7697 			/* convert to LUN */
7698 			plun->lun_addr.ent_addr_0 =
7699 			    BE_16(*(uint16_t *)&(lun_string[0]));
7700 			plun->lun_addr.ent_addr_1 =
7701 			    BE_16(*(uint16_t *)&(lun_string[2]));
7702 			plun->lun_addr.ent_addr_2 =
7703 			    BE_16(*(uint16_t *)&(lun_string[4]));
7704 			plun->lun_addr.ent_addr_3 =
7705 			    BE_16(*(uint16_t *)&(lun_string[6]));
7706 
7707 			plun->lun_num = lun_num;
7708 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7709 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7710 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7711 
7712 			/* Retrieve the rscn count (if a valid one exists) */
7713 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7714 				rscn_count = ((fc_ulp_rscn_info_t *)
7715 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7716 				    ulp_rscn_count;
7717 			} else {
7718 				rscn_count = FC_INVALID_RSCN_COUNT;
7719 			}
7720 
7721 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7722 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7723 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7724 				mutex_enter(&pptr->port_mutex);
7725 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7726 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7727 					fcp_log(CE_NOTE, pptr->port_dip,
7728 					    "!failed to send INQUIRY"
7729 					    " target=%x lun=%x",
7730 					    ptgt->tgt_d_id, plun->lun_num);
7731 				} else {
7732 					FCP_TRACE(fcp_logq,
7733 					    pptr->port_instbuf, fcp_trace,
7734 					    FCP_BUF_LEVEL_5, 0,
7735 					    "fcp_handle_reportlun,2: state"
7736 					    " change occured for D_ID=0x%x",
7737 					    ptgt->tgt_d_id);
7738 				}
7739 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7740 				mutex_exit(&pptr->port_mutex);
7741 			} else {
7742 				continue;
7743 			}
7744 			break;
7745 
7746 		case FCP_VOLUME_ADDRESSING:
7747 			/* FALLTHROUGH */
7748 		default:
7749 			fcp_log(CE_WARN, NULL,
7750 			    "!Unsupported LUN Addressing method %x "
7751 			    "in response to REPORT_LUN", lun_string[0]);
7752 			break;
7753 		}
7754 
7755 		/*
7756 		 * each time through this loop we should decrement
7757 		 * the tmp_cnt by one -- since we go through this loop
7758 		 * one time for each LUN, the tmp_cnt should never be <=0
7759 		 */
7760 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7761 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7762 	}
7763 
7764 	if (i == 0) {
7765 		fcp_log(CE_WARN, pptr->port_dip,
7766 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7767 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7768 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7769 	}
7770 
7771 	kmem_free(report_lun, len);
7772 	fcp_icmd_free(pptr, icmd);
7773 }
7774 
7775 
7776 /*
7777  * called internally to return a LUN given a target and a LUN number
7778  */
7779 static struct fcp_lun *
7780 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7781 {
7782 	struct fcp_lun	*plun;
7783 
7784 	mutex_enter(&ptgt->tgt_mutex);
7785 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7786 		if (plun->lun_num == lun_num) {
7787 			mutex_exit(&ptgt->tgt_mutex);
7788 			return (plun);
7789 		}
7790 	}
7791 	mutex_exit(&ptgt->tgt_mutex);
7792 
7793 	return (NULL);
7794 }
7795 
7796 
7797 /*
7798  * handle finishing one target for fcp_finish_init
7799  *
7800  * return true (non-zero) if we want finish_init to continue with the
7801  * next target
7802  *
7803  * called with the port mutex held
7804  */
7805 /*ARGSUSED*/
7806 static int
7807 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7808     int link_cnt, int tgt_cnt, int cause)
7809 {
7810 	int	rval = 1;
7811 	ASSERT(pptr != NULL);
7812 	ASSERT(ptgt != NULL);
7813 
7814 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7815 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7816 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7817 	    ptgt->tgt_state);
7818 
7819 	ASSERT(mutex_owned(&pptr->port_mutex));
7820 
7821 	if ((pptr->port_link_cnt != link_cnt) ||
7822 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7823 		/*
7824 		 * oh oh -- another link reset or target change
7825 		 * must have occurred while we are in here
7826 		 */
7827 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7828 
7829 		return (0);
7830 	} else {
7831 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7832 	}
7833 
7834 	mutex_enter(&ptgt->tgt_mutex);
7835 
7836 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7837 		/*
7838 		 * tgt is not offline -- is it marked (i.e. needs
7839 		 * to be offlined) ??
7840 		 */
7841 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7842 			/*
7843 			 * this target not offline *and*
7844 			 * marked
7845 			 */
7846 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7847 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7848 			    tgt_cnt, 0, 0);
7849 		} else {
7850 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7851 
7852 			/* create the LUNs */
7853 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7854 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7855 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7856 				    cause);
7857 				ptgt->tgt_device_created = 1;
7858 			} else {
7859 				fcp_update_tgt_state(ptgt, FCP_RESET,
7860 				    FCP_LUN_BUSY);
7861 			}
7862 		}
7863 	}
7864 
7865 	mutex_exit(&ptgt->tgt_mutex);
7866 
7867 	return (rval);
7868 }
7869 
7870 
7871 /*
7872  * this routine is called to finish port initialization
7873  *
7874  * Each port has a "temp" counter -- when a state change happens (e.g.
7875  * port online), the temp count is set to the number of devices in the map.
7876  * Then, as each device gets "discovered", the temp counter is decremented
7877  * by one.  When this count reaches zero we know that all of the devices
7878  * in the map have been discovered (or an error has occurred), so we can
7879  * then finish initialization -- which is done by this routine (well, this
7880  * and fcp-finish_tgt())
7881  *
7882  * acquires and releases the global mutex
7883  *
7884  * called with the port mutex owned
7885  */
7886 static void
7887 fcp_finish_init(struct fcp_port *pptr)
7888 {
7889 #ifdef	DEBUG
7890 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7891 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7892 	    FCP_STACK_DEPTH);
7893 #endif /* DEBUG */
7894 
7895 	ASSERT(mutex_owned(&pptr->port_mutex));
7896 
7897 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7898 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7899 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7900 
7901 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7902 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7903 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7904 		pptr->port_state &= ~FCP_STATE_ONLINING;
7905 		pptr->port_state |= FCP_STATE_ONLINE;
7906 	}
7907 
7908 	/* Wake up threads waiting on config done */
7909 	cv_broadcast(&pptr->port_config_cv);
7910 }
7911 
7912 
7913 /*
7914  * called from fcp_finish_init to create the LUNs for a target
7915  *
7916  * called with the port mutex owned
7917  */
7918 static void
7919 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7920 {
7921 	struct fcp_lun	*plun;
7922 	struct fcp_port	*pptr;
7923 	child_info_t		*cip = NULL;
7924 
7925 	ASSERT(ptgt != NULL);
7926 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7927 
7928 	pptr = ptgt->tgt_port;
7929 
7930 	ASSERT(pptr != NULL);
7931 
7932 	/* scan all LUNs for this target */
7933 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7934 		if (plun->lun_state & FCP_LUN_OFFLINE) {
7935 			continue;
7936 		}
7937 
7938 		if (plun->lun_state & FCP_LUN_MARK) {
7939 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7940 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7941 			    "fcp_create_luns: offlining marked LUN!");
7942 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
7943 			continue;
7944 		}
7945 
7946 		plun->lun_state &= ~FCP_LUN_BUSY;
7947 
7948 		/*
7949 		 * There are conditions in which FCP_LUN_INIT flag is cleared
7950 		 * but we have a valid plun->lun_cip. To cover this case also
7951 		 * CLEAR_BUSY whenever we have a valid lun_cip.
7952 		 */
7953 		if (plun->lun_mpxio && plun->lun_cip &&
7954 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
7955 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
7956 		    0, 0))) {
7957 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7958 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7959 			    "fcp_create_luns: enable lun %p failed!",
7960 			    plun);
7961 		}
7962 
7963 		if (plun->lun_state & FCP_LUN_INIT &&
7964 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
7965 			continue;
7966 		}
7967 
7968 		if (cause == FCP_CAUSE_USER_CREATE) {
7969 			continue;
7970 		}
7971 
7972 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7973 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
7974 		    "create_luns: passing ONLINE elem to HP thread");
7975 
7976 		/*
7977 		 * If lun has changed, prepare for offlining the old path.
7978 		 * Do not offline the old path right now, since it may be
7979 		 * still opened.
7980 		 */
7981 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
7982 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
7983 		}
7984 
7985 		/* pass an ONLINE element to the hotplug thread */
7986 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7987 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
7988 
7989 			/*
7990 			 * We can not synchronous attach (i.e pass
7991 			 * NDI_ONLINE_ATTACH) here as we might be
7992 			 * coming from an interrupt or callback
7993 			 * thread.
7994 			 */
7995 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
7996 			    link_cnt, tgt_cnt, 0, 0)) {
7997 				fcp_log(CE_CONT, pptr->port_dip,
7998 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
7999 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8000 			}
8001 		}
8002 	}
8003 }
8004 
8005 
8006 /*
8007  * function to online/offline devices
8008  */
8009 static int
8010 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int online,
8011     int lcount, int tcount, int flags)
8012 {
8013 	int			rval = NDI_FAILURE;
8014 	int			circ;
8015 	child_info_t		*ccip;
8016 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8017 	int			is_mpxio = pptr->port_mpxio;
8018 	dev_info_t		*cdip, *pdip;
8019 	char			*devname;
8020 
8021 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8022 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8023 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8024 	    "flags=%x mpxio=%x\n",
8025 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8026 	    plun->lun_mpxio);
8027 
8028 	/*
8029 	 * lun_mpxio needs checking here because we can end up in a race
8030 	 * condition where this task has been dispatched while lun_mpxio is
8031 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8032 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8033 	 * the flag. We rely on the serialization of the tasks here. We return
8034 	 * NDI_SUCCESS so any callers continue without reporting spurious
8035 	 * errors, and the still think we're an MPXIO LUN.
8036 	 */
8037 
8038 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8039 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8040 		if (plun->lun_mpxio) {
8041 			rval = fcp_update_mpxio_path(plun, cip, online);
8042 		} else {
8043 			rval = NDI_SUCCESS;
8044 		}
8045 		return (rval);
8046 	}
8047 
8048 	/*
8049 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8050 	 * executing devfs_clean() if parent lock is held.
8051 	 */
8052 	ASSERT(!servicing_interrupt());
8053 	if (online == FCP_OFFLINE) {
8054 		if (plun->lun_mpxio == 0) {
8055 			if (plun->lun_cip == cip) {
8056 				cdip = DIP(plun->lun_cip);
8057 			} else {
8058 				cdip = DIP(cip);
8059 			}
8060 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8061 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8062 		} else if ((plun->lun_cip != cip) && cip) {
8063 			/*
8064 			 * This means a DTYPE/GUID change, we shall get the
8065 			 * dip of the old cip instead of the current lun_cip.
8066 			 */
8067 			cdip = mdi_pi_get_client(PIP(cip));
8068 		}
8069 		if (cdip) {
8070 			if (i_ddi_devi_attached(cdip)) {
8071 				pdip = ddi_get_parent(cdip);
8072 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8073 				ndi_devi_enter(pdip, &circ);
8074 				(void) ddi_deviname(cdip, devname);
8075 				ndi_devi_exit(pdip, circ);
8076 				/*
8077 				 * Release parent lock before calling
8078 				 * devfs_clean().
8079 				 */
8080 				rval = devfs_clean(pdip, devname + 1,
8081 				    DV_CLEAN_FORCE);
8082 				kmem_free(devname, MAXNAMELEN + 1);
8083 				/*
8084 				 * Return if devfs_clean() fails for
8085 				 * non-MPXIO case.
8086 				 * For MPXIO case, another path could be
8087 				 * offlined.
8088 				 */
8089 				if (rval && plun->lun_mpxio == 0) {
8090 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8091 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8092 					    "fcp_trigger_lun: devfs_clean "
8093 					    "failed rval=%x  dip=%p",
8094 					    rval, pdip);
8095 					return (NDI_FAILURE);
8096 				}
8097 			}
8098 		}
8099 	}
8100 
8101 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8102 		return (NDI_FAILURE);
8103 	}
8104 
8105 	if (is_mpxio) {
8106 		mdi_devi_enter(pptr->port_dip, &circ);
8107 	} else {
8108 		ndi_devi_enter(pptr->port_dip, &circ);
8109 	}
8110 
8111 	mutex_enter(&pptr->port_mutex);
8112 	mutex_enter(&plun->lun_mutex);
8113 
8114 	if (online == FCP_ONLINE) {
8115 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8116 		if (ccip == NULL) {
8117 			goto fail;
8118 		}
8119 	} else {
8120 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8121 			goto fail;
8122 		}
8123 		ccip = cip;
8124 	}
8125 
8126 	if (online == FCP_ONLINE) {
8127 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8128 		    &circ);
8129 		fc_ulp_log_device_event(pptr->port_fp_handle,
8130 		    FC_ULP_DEVICE_ONLINE);
8131 	} else {
8132 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8133 		    &circ);
8134 		fc_ulp_log_device_event(pptr->port_fp_handle,
8135 		    FC_ULP_DEVICE_OFFLINE);
8136 	}
8137 
8138 fail:	mutex_exit(&plun->lun_mutex);
8139 	mutex_exit(&pptr->port_mutex);
8140 
8141 	if (is_mpxio) {
8142 		mdi_devi_exit(pptr->port_dip, circ);
8143 	} else {
8144 		ndi_devi_exit(pptr->port_dip, circ);
8145 	}
8146 
8147 	fc_ulp_idle_port(pptr->port_fp_handle);
8148 
8149 	return (rval);
8150 }
8151 
8152 
8153 /*
8154  * take a target offline by taking all of its LUNs offline
8155  */
8156 /*ARGSUSED*/
8157 static int
8158 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8159     int link_cnt, int tgt_cnt, int nowait, int flags)
8160 {
8161 	struct fcp_tgt_elem	*elem;
8162 
8163 	ASSERT(mutex_owned(&pptr->port_mutex));
8164 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8165 
8166 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8167 
8168 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8169 	    ptgt->tgt_change_cnt)) {
8170 		mutex_exit(&ptgt->tgt_mutex);
8171 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8172 		mutex_enter(&ptgt->tgt_mutex);
8173 
8174 		return (0);
8175 	}
8176 
8177 	ptgt->tgt_pd_handle = NULL;
8178 	mutex_exit(&ptgt->tgt_mutex);
8179 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8180 	mutex_enter(&ptgt->tgt_mutex);
8181 
8182 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8183 
8184 	if (ptgt->tgt_tcap &&
8185 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8186 		elem->flags = flags;
8187 		elem->time = fcp_watchdog_time;
8188 		if (nowait == 0) {
8189 			elem->time += fcp_offline_delay;
8190 		}
8191 		elem->ptgt = ptgt;
8192 		elem->link_cnt = link_cnt;
8193 		elem->tgt_cnt = tgt_cnt;
8194 		elem->next = pptr->port_offline_tgts;
8195 		pptr->port_offline_tgts = elem;
8196 	} else {
8197 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8198 	}
8199 
8200 	return (1);
8201 }
8202 
8203 
8204 static void
8205 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8206     int link_cnt, int tgt_cnt, int flags)
8207 {
8208 	ASSERT(mutex_owned(&pptr->port_mutex));
8209 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8210 
8211 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8212 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8213 	ptgt->tgt_pd_handle = NULL;
8214 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8215 }
8216 
8217 
8218 static void
8219 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8220     int flags)
8221 {
8222 	struct	fcp_lun	*plun;
8223 
8224 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8225 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8226 
8227 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8228 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8229 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8230 		}
8231 	}
8232 }
8233 
8234 
8235 /*
8236  * take a LUN offline
8237  *
8238  * enters and leaves with the target mutex held, releasing it in the process
8239  *
8240  * allocates memory in non-sleep mode
8241  */
8242 static void
8243 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8244     int nowait, int flags)
8245 {
8246 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8247 	struct fcp_lun_elem	*elem;
8248 
8249 	ASSERT(plun != NULL);
8250 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8251 
8252 	if (nowait) {
8253 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8254 		return;
8255 	}
8256 
8257 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8258 		elem->flags = flags;
8259 		elem->time = fcp_watchdog_time;
8260 		if (nowait == 0) {
8261 			elem->time += fcp_offline_delay;
8262 		}
8263 		elem->plun = plun;
8264 		elem->link_cnt = link_cnt;
8265 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8266 		elem->next = pptr->port_offline_luns;
8267 		pptr->port_offline_luns = elem;
8268 	} else {
8269 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8270 	}
8271 }
8272 
8273 
8274 static void
8275 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8276 {
8277 	struct fcp_pkt	*head = NULL;
8278 
8279 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8280 
8281 	mutex_exit(&LUN_TGT->tgt_mutex);
8282 
8283 	head = fcp_scan_commands(plun);
8284 	if (head != NULL) {
8285 		fcp_abort_commands(head, LUN_PORT);
8286 	}
8287 
8288 	mutex_enter(&LUN_TGT->tgt_mutex);
8289 
8290 	if (plun->lun_cip && plun->lun_mpxio) {
8291 		/*
8292 		 * Intimate MPxIO lun busy is cleared
8293 		 */
8294 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8295 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8296 		    0, 0)) {
8297 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8298 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8299 			    LUN_TGT->tgt_d_id, plun->lun_num);
8300 		}
8301 		/*
8302 		 * Intimate MPxIO that the lun is now marked for offline
8303 		 */
8304 		mutex_exit(&LUN_TGT->tgt_mutex);
8305 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8306 		mutex_enter(&LUN_TGT->tgt_mutex);
8307 	}
8308 }
8309 
8310 static void
8311 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8312     int flags)
8313 {
8314 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8315 
8316 	mutex_exit(&LUN_TGT->tgt_mutex);
8317 	fcp_update_offline_flags(plun);
8318 	mutex_enter(&LUN_TGT->tgt_mutex);
8319 
8320 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8321 
8322 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8323 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8324 	    "offline_lun: passing OFFLINE elem to HP thread");
8325 
8326 	if (plun->lun_cip) {
8327 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8328 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8329 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8330 		    LUN_TGT->tgt_trace);
8331 
8332 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8333 		    link_cnt, tgt_cnt, flags, 0)) {
8334 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8335 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8336 			    LUN_TGT->tgt_d_id, plun->lun_num);
8337 		}
8338 	}
8339 }
8340 
8341 static void
8342 fcp_scan_offline_luns(struct fcp_port *pptr)
8343 {
8344 	struct fcp_lun_elem	*elem;
8345 	struct fcp_lun_elem	*prev;
8346 	struct fcp_lun_elem	*next;
8347 
8348 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8349 
8350 	prev = NULL;
8351 	elem = pptr->port_offline_luns;
8352 	while (elem) {
8353 		next = elem->next;
8354 		if (elem->time <= fcp_watchdog_time) {
8355 			int			changed = 1;
8356 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8357 
8358 			mutex_enter(&ptgt->tgt_mutex);
8359 			if (pptr->port_link_cnt == elem->link_cnt &&
8360 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8361 				changed = 0;
8362 			}
8363 
8364 			if (!changed &&
8365 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8366 				fcp_offline_lun_now(elem->plun,
8367 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8368 			}
8369 			mutex_exit(&ptgt->tgt_mutex);
8370 
8371 			kmem_free(elem, sizeof (*elem));
8372 
8373 			if (prev) {
8374 				prev->next = next;
8375 			} else {
8376 				pptr->port_offline_luns = next;
8377 			}
8378 		} else {
8379 			prev = elem;
8380 		}
8381 		elem = next;
8382 	}
8383 }
8384 
8385 
8386 static void
8387 fcp_scan_offline_tgts(struct fcp_port *pptr)
8388 {
8389 	struct fcp_tgt_elem	*elem;
8390 	struct fcp_tgt_elem	*prev;
8391 	struct fcp_tgt_elem	*next;
8392 
8393 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8394 
8395 	prev = NULL;
8396 	elem = pptr->port_offline_tgts;
8397 	while (elem) {
8398 		next = elem->next;
8399 		if (elem->time <= fcp_watchdog_time) {
8400 			int			changed = 1;
8401 			struct fcp_tgt	*ptgt = elem->ptgt;
8402 
8403 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8404 				changed = 0;
8405 			}
8406 
8407 			mutex_enter(&ptgt->tgt_mutex);
8408 			if (!changed && !(ptgt->tgt_state &
8409 			    FCP_TGT_OFFLINE)) {
8410 				fcp_offline_target_now(pptr,
8411 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8412 				    elem->flags);
8413 			}
8414 			mutex_exit(&ptgt->tgt_mutex);
8415 
8416 			kmem_free(elem, sizeof (*elem));
8417 
8418 			if (prev) {
8419 				prev->next = next;
8420 			} else {
8421 				pptr->port_offline_tgts = next;
8422 			}
8423 		} else {
8424 			prev = elem;
8425 		}
8426 		elem = next;
8427 	}
8428 }
8429 
8430 
8431 static void
8432 fcp_update_offline_flags(struct fcp_lun *plun)
8433 {
8434 	struct fcp_port	*pptr = LUN_PORT;
8435 	ASSERT(plun != NULL);
8436 
8437 	mutex_enter(&LUN_TGT->tgt_mutex);
8438 	plun->lun_state |= FCP_LUN_OFFLINE;
8439 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8440 
8441 	mutex_enter(&plun->lun_mutex);
8442 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8443 		dev_info_t *cdip = NULL;
8444 
8445 		mutex_exit(&LUN_TGT->tgt_mutex);
8446 
8447 		if (plun->lun_mpxio == 0) {
8448 			cdip = DIP(plun->lun_cip);
8449 		} else if (plun->lun_cip) {
8450 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8451 		}
8452 
8453 		mutex_exit(&plun->lun_mutex);
8454 		if (cdip) {
8455 			(void) ndi_event_retrieve_cookie(
8456 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8457 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8458 			(void) ndi_event_run_callbacks(
8459 			    pptr->port_ndi_event_hdl, cdip,
8460 			    fcp_remove_eid, NULL);
8461 		}
8462 	} else {
8463 		mutex_exit(&plun->lun_mutex);
8464 		mutex_exit(&LUN_TGT->tgt_mutex);
8465 	}
8466 }
8467 
8468 
8469 /*
8470  * Scan all of the command pkts for this port, moving pkts that
8471  * match our LUN onto our own list (headed by "head")
8472  */
8473 static struct fcp_pkt *
8474 fcp_scan_commands(struct fcp_lun *plun)
8475 {
8476 	struct fcp_port	*pptr = LUN_PORT;
8477 
8478 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8479 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8480 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8481 
8482 	struct fcp_pkt	*head = NULL;	/* head of our list */
8483 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8484 
8485 	int			cmds_found = 0;
8486 
8487 	mutex_enter(&pptr->port_pkt_mutex);
8488 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8489 		struct fcp_lun *tlun =
8490 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8491 
8492 		ncmd = cmd->cmd_next;	/* set next command */
8493 
8494 		/*
8495 		 * if this pkt is for a different LUN  or the
8496 		 * command is sent down, skip it.
8497 		 */
8498 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8499 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8500 			pcmd = cmd;
8501 			continue;
8502 		}
8503 		cmds_found++;
8504 		if (pcmd != NULL) {
8505 			ASSERT(pptr->port_pkt_head != cmd);
8506 			pcmd->cmd_next = cmd->cmd_next;
8507 		} else {
8508 			ASSERT(cmd == pptr->port_pkt_head);
8509 			pptr->port_pkt_head = cmd->cmd_next;
8510 		}
8511 
8512 		if (cmd == pptr->port_pkt_tail) {
8513 			pptr->port_pkt_tail = pcmd;
8514 			if (pcmd) {
8515 				pcmd->cmd_next = NULL;
8516 			}
8517 		}
8518 
8519 		if (head == NULL) {
8520 			head = tail = cmd;
8521 		} else {
8522 			ASSERT(tail != NULL);
8523 
8524 			tail->cmd_next = cmd;
8525 			tail = cmd;
8526 		}
8527 		cmd->cmd_next = NULL;
8528 	}
8529 	mutex_exit(&pptr->port_pkt_mutex);
8530 
8531 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8532 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8533 	    "scan commands: %d cmd(s) found", cmds_found);
8534 
8535 	return (head);
8536 }
8537 
8538 
8539 /*
8540  * Abort all the commands in the command queue
8541  */
8542 static void
8543 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8544 {
8545 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8546 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8547 
8548 	ASSERT(mutex_owned(&pptr->port_mutex));
8549 
8550 	/* scan through the pkts and invalid them */
8551 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8552 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8553 
8554 		ncmd = cmd->cmd_next;
8555 		ASSERT(pkt != NULL);
8556 
8557 		/*
8558 		 * The lun is going to be marked offline. Indicate
8559 		 * the target driver not to requeue or retry this command
8560 		 * as the device is going to be offlined pretty soon.
8561 		 */
8562 		pkt->pkt_reason = CMD_DEV_GONE;
8563 		pkt->pkt_statistics = 0;
8564 		pkt->pkt_state = 0;
8565 
8566 		/* reset cmd flags/state */
8567 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8568 		cmd->cmd_state = FCP_PKT_IDLE;
8569 
8570 		/*
8571 		 * ensure we have a packet completion routine,
8572 		 * then call it.
8573 		 */
8574 		ASSERT(pkt->pkt_comp != NULL);
8575 
8576 		mutex_exit(&pptr->port_mutex);
8577 		fcp_post_callback(cmd);
8578 		mutex_enter(&pptr->port_mutex);
8579 	}
8580 }
8581 
8582 
8583 /*
8584  * the pkt_comp callback for command packets
8585  */
8586 static void
8587 fcp_cmd_callback(fc_packet_t *fpkt)
8588 {
8589 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8590 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8591 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8592 
8593 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8594 
8595 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8596 		cmn_err(CE_PANIC, "Packet already completed %p",
8597 		    (void *)cmd);
8598 	}
8599 
8600 	/*
8601 	 * Watch thread should be freeing the packet, ignore the pkt.
8602 	 */
8603 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8604 		fcp_log(CE_CONT, pptr->port_dip,
8605 		    "!FCP: Pkt completed while aborting\n");
8606 		return;
8607 	}
8608 	cmd->cmd_state = FCP_PKT_IDLE;
8609 
8610 	fcp_complete_pkt(fpkt);
8611 
8612 #ifdef	DEBUG
8613 	mutex_enter(&pptr->port_pkt_mutex);
8614 	pptr->port_npkts--;
8615 	mutex_exit(&pptr->port_pkt_mutex);
8616 #endif /* DEBUG */
8617 
8618 	fcp_post_callback(cmd);
8619 }
8620 
8621 
8622 static void
8623 fcp_complete_pkt(fc_packet_t *fpkt)
8624 {
8625 	int			error = 0;
8626 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8627 	    fpkt->pkt_ulp_private;
8628 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8629 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8630 	struct fcp_lun	*plun;
8631 	struct fcp_tgt	*ptgt;
8632 	struct fcp_rsp		*rsp;
8633 	struct scsi_address	save;
8634 
8635 #ifdef	DEBUG
8636 	save = pkt->pkt_address;
8637 #endif /* DEBUG */
8638 
8639 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8640 
8641 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8642 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8643 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8644 			    sizeof (struct fcp_rsp));
8645 		}
8646 
8647 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8648 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8649 
8650 		pkt->pkt_resid = 0;
8651 
8652 		if (cmd->cmd_pkt->pkt_numcookies) {
8653 			pkt->pkt_state |= STATE_XFERRED_DATA;
8654 			if (fpkt->pkt_data_resid) {
8655 				error++;
8656 			}
8657 		}
8658 
8659 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8660 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8661 			/*
8662 			 * The next two checks make sure that if there
8663 			 * is no sense data or a valid response and
8664 			 * the command came back with check condition,
8665 			 * the command should be retried.
8666 			 */
8667 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8668 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8669 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8670 				pkt->pkt_resid = cmd->cmd_dmacount;
8671 			}
8672 		}
8673 
8674 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8675 			return;
8676 		}
8677 
8678 		plun = ADDR2LUN(&pkt->pkt_address);
8679 		ptgt = plun->lun_tgt;
8680 		ASSERT(ptgt != NULL);
8681 
8682 		/*
8683 		 * Update the transfer resid, if appropriate
8684 		 */
8685 		if (rsp->fcp_u.fcp_status.resid_over ||
8686 		    rsp->fcp_u.fcp_status.resid_under) {
8687 			pkt->pkt_resid = rsp->fcp_resid;
8688 		}
8689 
8690 		/*
8691 		 * First see if we got a FCP protocol error.
8692 		 */
8693 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8694 			struct fcp_rsp_info	*bep;
8695 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8696 			    sizeof (struct fcp_rsp));
8697 
8698 			if (fcp_validate_fcp_response(rsp, pptr) !=
8699 			    FC_SUCCESS) {
8700 				pkt->pkt_reason = CMD_CMPLT;
8701 				*(pkt->pkt_scbp) = STATUS_CHECK;
8702 
8703 				fcp_log(CE_WARN, pptr->port_dip,
8704 				    "!SCSI command to d_id=0x%x lun=0x%x"
8705 				    " failed, Bad FCP response values:"
8706 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8707 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8708 				    ptgt->tgt_d_id, plun->lun_num,
8709 				    rsp->reserved_0, rsp->reserved_1,
8710 				    rsp->fcp_u.fcp_status.reserved_0,
8711 				    rsp->fcp_u.fcp_status.reserved_1,
8712 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8713 
8714 				return;
8715 			}
8716 
8717 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8718 				FCP_CP_IN(fpkt->pkt_resp +
8719 				    sizeof (struct fcp_rsp), bep,
8720 				    fpkt->pkt_resp_acc,
8721 				    sizeof (struct fcp_rsp_info));
8722 			}
8723 
8724 			if (bep->rsp_code != FCP_NO_FAILURE) {
8725 				child_info_t	*cip;
8726 
8727 				pkt->pkt_reason = CMD_TRAN_ERR;
8728 
8729 				mutex_enter(&plun->lun_mutex);
8730 				cip = plun->lun_cip;
8731 				mutex_exit(&plun->lun_mutex);
8732 
8733 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8734 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8735 				    "FCP response error on cmd=%p"
8736 				    " target=0x%x, cip=%p", cmd,
8737 				    ptgt->tgt_d_id, cip);
8738 			}
8739 		}
8740 
8741 		/*
8742 		 * See if we got a SCSI error with sense data
8743 		 */
8744 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8745 			uchar_t				rqlen;
8746 			caddr_t				sense_from;
8747 			child_info_t			*cip;
8748 			timeout_id_t			tid;
8749 			struct scsi_arq_status		*arq;
8750 			struct scsi_extended_sense	*sense_to;
8751 
8752 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8753 			sense_to = &arq->sts_sensedata;
8754 
8755 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8756 			    sizeof (struct scsi_extended_sense));
8757 
8758 			sense_from = (caddr_t)fpkt->pkt_resp +
8759 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8760 
8761 			if (fcp_validate_fcp_response(rsp, pptr) !=
8762 			    FC_SUCCESS) {
8763 				pkt->pkt_reason = CMD_CMPLT;
8764 				*(pkt->pkt_scbp) = STATUS_CHECK;
8765 
8766 				fcp_log(CE_WARN, pptr->port_dip,
8767 				    "!SCSI command to d_id=0x%x lun=0x%x"
8768 				    " failed, Bad FCP response values:"
8769 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8770 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8771 				    ptgt->tgt_d_id, plun->lun_num,
8772 				    rsp->reserved_0, rsp->reserved_1,
8773 				    rsp->fcp_u.fcp_status.reserved_0,
8774 				    rsp->fcp_u.fcp_status.reserved_1,
8775 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8776 
8777 				return;
8778 			}
8779 
8780 			/*
8781 			 * copy in sense information
8782 			 */
8783 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8784 				FCP_CP_IN(sense_from, sense_to,
8785 				    fpkt->pkt_resp_acc, rqlen);
8786 			} else {
8787 				bcopy(sense_from, sense_to, rqlen);
8788 			}
8789 
8790 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8791 			    (FCP_SENSE_NO_LUN(sense_to))) {
8792 				mutex_enter(&ptgt->tgt_mutex);
8793 				if (ptgt->tgt_tid == NULL) {
8794 					/*
8795 					 * Kick off rediscovery
8796 					 */
8797 					tid = timeout(fcp_reconfigure_luns,
8798 					    (caddr_t)ptgt, drv_usectohz(1));
8799 
8800 					ptgt->tgt_tid = tid;
8801 					ptgt->tgt_state |= FCP_TGT_BUSY;
8802 				}
8803 				mutex_exit(&ptgt->tgt_mutex);
8804 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8805 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8806 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8807 					    "!FCP: Report Lun Has Changed"
8808 					    " target=%x", ptgt->tgt_d_id);
8809 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8810 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8811 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8812 					    "!FCP: LU Not Supported"
8813 					    " target=%x", ptgt->tgt_d_id);
8814 				}
8815 			}
8816 			ASSERT(pkt->pkt_scbp != NULL);
8817 
8818 			pkt->pkt_state |= STATE_ARQ_DONE;
8819 
8820 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8821 
8822 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8823 			arq->sts_rqpkt_reason = 0;
8824 			arq->sts_rqpkt_statistics = 0;
8825 
8826 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8827 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8828 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8829 			    STATE_XFERRED_DATA;
8830 
8831 			mutex_enter(&plun->lun_mutex);
8832 			cip = plun->lun_cip;
8833 			mutex_exit(&plun->lun_mutex);
8834 
8835 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8836 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8837 			    "SCSI Check condition on cmd=%p target=0x%x"
8838 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8839 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8840 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8841 			    rsp->fcp_u.fcp_status.scsi_status,
8842 			    sense_to->es_key, sense_to->es_add_code,
8843 			    sense_to->es_qual_code);
8844 		}
8845 	} else {
8846 		plun = ADDR2LUN(&pkt->pkt_address);
8847 		ptgt = plun->lun_tgt;
8848 		ASSERT(ptgt != NULL);
8849 
8850 		/*
8851 		 * Work harder to translate errors into target driver
8852 		 * understandable ones. Note with despair that the target
8853 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8854 		 * They resort to using the big hammer most often, which
8855 		 * may not get fixed in the life time of this driver.
8856 		 */
8857 		pkt->pkt_state = 0;
8858 		pkt->pkt_statistics = 0;
8859 
8860 		switch (fpkt->pkt_state) {
8861 		case FC_PKT_TRAN_ERROR:
8862 			switch (fpkt->pkt_reason) {
8863 			case FC_REASON_OVERRUN:
8864 				pkt->pkt_reason = CMD_CMD_OVR;
8865 				pkt->pkt_statistics |= STAT_ABORTED;
8866 				break;
8867 
8868 			case FC_REASON_XCHG_BSY: {
8869 				caddr_t ptr;
8870 
8871 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8872 
8873 				ptr = (caddr_t)pkt->pkt_scbp;
8874 				if (ptr) {
8875 					*ptr = STATUS_BUSY;
8876 				}
8877 				break;
8878 			}
8879 
8880 			case FC_REASON_ABORTED:
8881 				pkt->pkt_reason = CMD_TRAN_ERR;
8882 				pkt->pkt_statistics |= STAT_ABORTED;
8883 				break;
8884 
8885 			case FC_REASON_ABORT_FAILED:
8886 				pkt->pkt_reason = CMD_ABORT_FAIL;
8887 				break;
8888 
8889 			case FC_REASON_NO_SEQ_INIT:
8890 			case FC_REASON_CRC_ERROR:
8891 				pkt->pkt_reason = CMD_TRAN_ERR;
8892 				pkt->pkt_statistics |= STAT_ABORTED;
8893 				break;
8894 			default:
8895 				pkt->pkt_reason = CMD_TRAN_ERR;
8896 				break;
8897 			}
8898 			break;
8899 
8900 		case FC_PKT_PORT_OFFLINE: {
8901 			dev_info_t	*cdip = NULL;
8902 			caddr_t		ptr;
8903 
8904 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8905 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8906 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8907 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8908 				    ptgt->tgt_d_id);
8909 			}
8910 
8911 			mutex_enter(&plun->lun_mutex);
8912 			if (plun->lun_mpxio == 0) {
8913 				cdip = DIP(plun->lun_cip);
8914 			} else if (plun->lun_cip) {
8915 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8916 			}
8917 
8918 			mutex_exit(&plun->lun_mutex);
8919 
8920 			if (cdip) {
8921 				(void) ndi_event_retrieve_cookie(
8922 				    pptr->port_ndi_event_hdl, cdip,
8923 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
8924 				    NDI_EVENT_NOPASS);
8925 				(void) ndi_event_run_callbacks(
8926 				    pptr->port_ndi_event_hdl, cdip,
8927 				    fcp_remove_eid, NULL);
8928 			}
8929 
8930 			/*
8931 			 * If the link goes off-line for a lip,
8932 			 * this will cause a error to the ST SG
8933 			 * SGEN drivers. By setting BUSY we will
8934 			 * give the drivers the chance to retry
8935 			 * before it blows of the job. ST will
8936 			 * remember how many times it has retried.
8937 			 */
8938 
8939 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
8940 			    (plun->lun_type == DTYPE_CHANGER)) {
8941 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8942 				ptr = (caddr_t)pkt->pkt_scbp;
8943 				if (ptr) {
8944 					*ptr = STATUS_BUSY;
8945 				}
8946 			} else {
8947 				pkt->pkt_reason = CMD_TRAN_ERR;
8948 				pkt->pkt_statistics |= STAT_BUS_RESET;
8949 			}
8950 			break;
8951 		}
8952 
8953 		case FC_PKT_TRAN_BSY:
8954 			/*
8955 			 * Use the ssd Qfull handling here.
8956 			 */
8957 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
8958 			pkt->pkt_state = STATE_GOT_BUS;
8959 			break;
8960 
8961 		case FC_PKT_TIMEOUT:
8962 			pkt->pkt_reason = CMD_TIMEOUT;
8963 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
8964 				pkt->pkt_statistics |= STAT_TIMEOUT;
8965 			} else {
8966 				pkt->pkt_statistics |= STAT_ABORTED;
8967 			}
8968 			break;
8969 
8970 		case FC_PKT_LOCAL_RJT:
8971 			switch (fpkt->pkt_reason) {
8972 			case FC_REASON_OFFLINE: {
8973 				dev_info_t	*cdip = NULL;
8974 
8975 				mutex_enter(&plun->lun_mutex);
8976 				if (plun->lun_mpxio == 0) {
8977 					cdip = DIP(plun->lun_cip);
8978 				} else if (plun->lun_cip) {
8979 					cdip = mdi_pi_get_client(
8980 					    PIP(plun->lun_cip));
8981 				}
8982 				mutex_exit(&plun->lun_mutex);
8983 
8984 				if (cdip) {
8985 					(void) ndi_event_retrieve_cookie(
8986 					    pptr->port_ndi_event_hdl, cdip,
8987 					    FCAL_REMOVE_EVENT,
8988 					    &fcp_remove_eid,
8989 					    NDI_EVENT_NOPASS);
8990 					(void) ndi_event_run_callbacks(
8991 					    pptr->port_ndi_event_hdl,
8992 					    cdip, fcp_remove_eid, NULL);
8993 				}
8994 
8995 				pkt->pkt_reason = CMD_TRAN_ERR;
8996 				pkt->pkt_statistics |= STAT_BUS_RESET;
8997 
8998 				break;
8999 			}
9000 
9001 			case FC_REASON_NOMEM:
9002 			case FC_REASON_QFULL: {
9003 				caddr_t ptr;
9004 
9005 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9006 				ptr = (caddr_t)pkt->pkt_scbp;
9007 				if (ptr) {
9008 					*ptr = STATUS_BUSY;
9009 				}
9010 				break;
9011 			}
9012 
9013 			case FC_REASON_DMA_ERROR:
9014 				pkt->pkt_reason = CMD_DMA_DERR;
9015 				pkt->pkt_statistics |= STAT_ABORTED;
9016 				break;
9017 
9018 			case FC_REASON_CRC_ERROR:
9019 			case FC_REASON_UNDERRUN: {
9020 				uchar_t		status;
9021 				/*
9022 				 * Work around for Bugid: 4240945.
9023 				 * IB on A5k doesn't set the Underrun bit
9024 				 * in the fcp status, when it is transferring
9025 				 * less than requested amount of data. Work
9026 				 * around the ses problem to keep luxadm
9027 				 * happy till ibfirmware is fixed.
9028 				 */
9029 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9030 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9031 					    fpkt->pkt_resp_acc,
9032 					    sizeof (struct fcp_rsp));
9033 				}
9034 				status = rsp->fcp_u.fcp_status.scsi_status;
9035 				if (((plun->lun_type & DTYPE_MASK) ==
9036 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9037 					pkt->pkt_reason = CMD_CMPLT;
9038 					*pkt->pkt_scbp = status;
9039 					pkt->pkt_resid = 0;
9040 				} else {
9041 					pkt->pkt_reason = CMD_TRAN_ERR;
9042 					pkt->pkt_statistics |= STAT_ABORTED;
9043 				}
9044 				break;
9045 			}
9046 
9047 			case FC_REASON_NO_CONNECTION:
9048 			case FC_REASON_UNSUPPORTED:
9049 			case FC_REASON_ILLEGAL_REQ:
9050 			case FC_REASON_BAD_SID:
9051 			case FC_REASON_DIAG_BUSY:
9052 			case FC_REASON_FCAL_OPN_FAIL:
9053 			case FC_REASON_BAD_XID:
9054 			default:
9055 				pkt->pkt_reason = CMD_TRAN_ERR;
9056 				pkt->pkt_statistics |= STAT_ABORTED;
9057 				break;
9058 
9059 			}
9060 			break;
9061 
9062 		case FC_PKT_NPORT_RJT:
9063 		case FC_PKT_FABRIC_RJT:
9064 		case FC_PKT_NPORT_BSY:
9065 		case FC_PKT_FABRIC_BSY:
9066 		default:
9067 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9068 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9069 			    "FC Status 0x%x, reason 0x%x",
9070 			    fpkt->pkt_state, fpkt->pkt_reason);
9071 			pkt->pkt_reason = CMD_TRAN_ERR;
9072 			pkt->pkt_statistics |= STAT_ABORTED;
9073 			break;
9074 		}
9075 
9076 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9077 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9078 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9079 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9080 		    fpkt->pkt_reason);
9081 	}
9082 
9083 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9084 }
9085 
9086 
9087 static int
9088 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9089 {
9090 	if (rsp->reserved_0 || rsp->reserved_1 ||
9091 	    rsp->fcp_u.fcp_status.reserved_0 ||
9092 	    rsp->fcp_u.fcp_status.reserved_1) {
9093 		/*
9094 		 * These reserved fields should ideally be zero. FCP-2 does say
9095 		 * that the recipient need not check for reserved fields to be
9096 		 * zero. If they are not zero, we will not make a fuss about it
9097 		 * - just log it (in debug to both trace buffer and messages
9098 		 * file and to trace buffer only in non-debug) and move on.
9099 		 *
9100 		 * Non-zero reserved fields were seen with minnows.
9101 		 *
9102 		 * qlc takes care of some of this but we cannot assume that all
9103 		 * FCAs will do so.
9104 		 */
9105 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9106 		    FCP_BUF_LEVEL_5, 0,
9107 		    "Got fcp response packet with non-zero reserved fields "
9108 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9109 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9110 		    rsp->reserved_0, rsp->reserved_1,
9111 		    rsp->fcp_u.fcp_status.reserved_0,
9112 		    rsp->fcp_u.fcp_status.reserved_1);
9113 	}
9114 
9115 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9116 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9117 		return (FC_FAILURE);
9118 	}
9119 
9120 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9121 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9122 	    sizeof (struct fcp_rsp))) {
9123 		return (FC_FAILURE);
9124 	}
9125 
9126 	return (FC_SUCCESS);
9127 }
9128 
9129 
9130 /*
9131  * This is called when there is a change the in device state. The case we're
9132  * handling here is, if the d_id s does not match, offline this tgt and online
9133  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9134  * port_mutex held.
9135  */
9136 static int
9137 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9138     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9139 {
9140 	ASSERT(mutex_owned(&pptr->port_mutex));
9141 
9142 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9143 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9144 	    "Starting fcp_device_changed...");
9145 
9146 	/*
9147 	 * The two cases where the port_device_changed is called is
9148 	 * either it changes it's d_id or it's hard address.
9149 	 */
9150 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9151 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9152 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9153 
9154 		/* offline this target */
9155 		mutex_enter(&ptgt->tgt_mutex);
9156 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9157 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9158 			    0, 1, NDI_DEVI_REMOVE);
9159 		}
9160 		mutex_exit(&ptgt->tgt_mutex);
9161 
9162 		fcp_log(CE_NOTE, pptr->port_dip,
9163 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9164 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9165 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9166 		    map_entry->map_hard_addr.hard_addr);
9167 	}
9168 
9169 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9170 	    link_cnt, tgt_cnt, cause));
9171 }
9172 
9173 /*
9174  *     Function: fcp_alloc_lun
9175  *
9176  *  Description: Creates a new lun structure and adds it to the list
9177  *		 of luns of the target.
9178  *
9179  *     Argument: ptgt		Target the lun will belong to.
9180  *
9181  * Return Value: NULL		Failed
9182  *		 Not NULL	Succeeded
9183  *
9184  *	Context: Kernel context
9185  */
9186 static struct fcp_lun *
9187 fcp_alloc_lun(struct fcp_tgt *ptgt)
9188 {
9189 	struct fcp_lun *plun;
9190 
9191 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9192 	if (plun != NULL) {
9193 		/*
9194 		 * Initialize the mutex before putting in the target list
9195 		 * especially before releasing the target mutex.
9196 		 */
9197 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9198 		plun->lun_tgt = ptgt;
9199 
9200 		mutex_enter(&ptgt->tgt_mutex);
9201 		plun->lun_next = ptgt->tgt_lun;
9202 		ptgt->tgt_lun = plun;
9203 		plun->lun_old_guid = NULL;
9204 		plun->lun_old_guid_size = 0;
9205 		mutex_exit(&ptgt->tgt_mutex);
9206 	}
9207 
9208 	return (plun);
9209 }
9210 
9211 /*
9212  *     Function: fcp_dealloc_lun
9213  *
9214  *  Description: Frees the LUN structure passed by the caller.
9215  *
9216  *     Argument: plun		LUN structure to free.
9217  *
9218  * Return Value: None
9219  *
9220  *	Context: Kernel context.
9221  */
9222 static void
9223 fcp_dealloc_lun(struct fcp_lun *plun)
9224 {
9225 	mutex_enter(&plun->lun_mutex);
9226 	if (plun->lun_cip) {
9227 		fcp_remove_child(plun);
9228 	}
9229 	mutex_exit(&plun->lun_mutex);
9230 
9231 	mutex_destroy(&plun->lun_mutex);
9232 	if (plun->lun_guid) {
9233 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9234 	}
9235 	if (plun->lun_old_guid) {
9236 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9237 	}
9238 	kmem_free(plun, sizeof (*plun));
9239 }
9240 
9241 /*
9242  *     Function: fcp_alloc_tgt
9243  *
9244  *  Description: Creates a new target structure and adds it to the port
9245  *		 hash list.
9246  *
9247  *     Argument: pptr		fcp port structure
9248  *		 *map_entry	entry describing the target to create
9249  *		 link_cnt	Link state change counter
9250  *
9251  * Return Value: NULL		Failed
9252  *		 Not NULL	Succeeded
9253  *
9254  *	Context: Kernel context.
9255  */
9256 static struct fcp_tgt *
9257 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9258 {
9259 	int			hash;
9260 	uchar_t			*wwn;
9261 	struct fcp_tgt	*ptgt;
9262 
9263 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9264 	if (ptgt != NULL) {
9265 		mutex_enter(&pptr->port_mutex);
9266 		if (link_cnt != pptr->port_link_cnt) {
9267 			/*
9268 			 * oh oh -- another link reset
9269 			 * in progress -- give up
9270 			 */
9271 			mutex_exit(&pptr->port_mutex);
9272 			kmem_free(ptgt, sizeof (*ptgt));
9273 			ptgt = NULL;
9274 		} else {
9275 			/*
9276 			 * initialize the mutex before putting in the port
9277 			 * wwn list, especially before releasing the port
9278 			 * mutex.
9279 			 */
9280 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9281 
9282 			/* add new target entry to the port's hash list */
9283 			wwn = (uchar_t *)&map_entry->map_pwwn;
9284 			hash = FCP_HASH(wwn);
9285 
9286 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9287 			pptr->port_tgt_hash_table[hash] = ptgt;
9288 
9289 			/* save cross-ptr */
9290 			ptgt->tgt_port = pptr;
9291 
9292 			ptgt->tgt_change_cnt = 1;
9293 
9294 			/* initialize the target manual_config_only flag */
9295 			if (fcp_enable_auto_configuration) {
9296 				ptgt->tgt_manual_config_only = 0;
9297 			} else {
9298 				ptgt->tgt_manual_config_only = 1;
9299 			}
9300 
9301 			mutex_exit(&pptr->port_mutex);
9302 		}
9303 	}
9304 
9305 	return (ptgt);
9306 }
9307 
9308 /*
9309  *     Function: fcp_dealloc_tgt
9310  *
9311  *  Description: Frees the target structure passed by the caller.
9312  *
9313  *     Argument: ptgt		Target structure to free.
9314  *
9315  * Return Value: None
9316  *
9317  *	Context: Kernel context.
9318  */
9319 static void
9320 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9321 {
9322 	mutex_destroy(&ptgt->tgt_mutex);
9323 	kmem_free(ptgt, sizeof (*ptgt));
9324 }
9325 
9326 
9327 /*
9328  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9329  *
9330  *	Device discovery commands will not be retried for-ever as
9331  *	this will have repercussions on other devices that need to
9332  *	be submitted to the hotplug thread. After a quick glance
9333  *	at the SCSI-3 spec, it was found that the spec doesn't
9334  *	mandate a forever retry, rather recommends a delayed retry.
9335  *
9336  *	Since Photon IB is single threaded, STATUS_BUSY is common
9337  *	in a 4+initiator environment. Make sure the total time
9338  *	spent on retries (including command timeout) does not
9339  *	60 seconds
9340  */
9341 static void
9342 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9343 {
9344 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9345 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9346 
9347 	mutex_enter(&pptr->port_mutex);
9348 	mutex_enter(&ptgt->tgt_mutex);
9349 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9350 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9351 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9352 		    "fcp_queue_ipkt,1:state change occured"
9353 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9354 		mutex_exit(&ptgt->tgt_mutex);
9355 		mutex_exit(&pptr->port_mutex);
9356 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9357 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9358 		fcp_icmd_free(pptr, icmd);
9359 		return;
9360 	}
9361 	mutex_exit(&ptgt->tgt_mutex);
9362 
9363 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9364 
9365 	if (pptr->port_ipkt_list != NULL) {
9366 		/* add pkt to front of doubly-linked list */
9367 		pptr->port_ipkt_list->ipkt_prev = icmd;
9368 		icmd->ipkt_next = pptr->port_ipkt_list;
9369 		pptr->port_ipkt_list = icmd;
9370 		icmd->ipkt_prev = NULL;
9371 	} else {
9372 		/* this is the first/only pkt on the list */
9373 		pptr->port_ipkt_list = icmd;
9374 		icmd->ipkt_next = NULL;
9375 		icmd->ipkt_prev = NULL;
9376 	}
9377 	mutex_exit(&pptr->port_mutex);
9378 }
9379 
9380 /*
9381  *     Function: fcp_transport
9382  *
9383  *  Description: This function submits the Fibre Channel packet to the transort
9384  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9385  *		 fails the submission, the treatment depends on the value of
9386  *		 the variable internal.
9387  *
9388  *     Argument: port_handle	fp/fctl port handle.
9389  *		 *fpkt		Packet to submit to the transport layer.
9390  *		 internal	Not zero when it's an internal packet.
9391  *
9392  * Return Value: FC_TRAN_BUSY
9393  *		 FC_STATEC_BUSY
9394  *		 FC_OFFLINE
9395  *		 FC_LOGINREQ
9396  *		 FC_DEVICE_BUSY
9397  *		 FC_SUCCESS
9398  */
9399 static int
9400 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9401 {
9402 	int	rval;
9403 
9404 	rval = fc_ulp_transport(port_handle, fpkt);
9405 	if (rval == FC_SUCCESS) {
9406 		return (rval);
9407 	}
9408 
9409 	/*
9410 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9411 	 * a command, if the underlying modules see that there is a state
9412 	 * change, or if a port is OFFLINE, that means, that state change
9413 	 * hasn't reached FCP yet, so re-queue the command for deferred
9414 	 * submission.
9415 	 */
9416 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9417 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9418 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9419 		/*
9420 		 * Defer packet re-submission. Life hang is possible on
9421 		 * internal commands if the port driver sends FC_STATEC_BUSY
9422 		 * for ever, but that shouldn't happen in a good environment.
9423 		 * Limiting re-transport for internal commands is probably a
9424 		 * good idea..
9425 		 * A race condition can happen when a port sees barrage of
9426 		 * link transitions offline to online. If the FCTL has
9427 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9428 		 * internal commands should be queued to do the discovery.
9429 		 * The race condition is when an online comes and FCP starts
9430 		 * its internal discovery and the link goes offline. It is
9431 		 * possible that the statec_callback has not reached FCP
9432 		 * and FCP is carrying on with its internal discovery.
9433 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9434 		 * that the link has gone offline. At this point FCP should
9435 		 * drop all the internal commands and wait for the
9436 		 * statec_callback. It will be facilitated by incrementing
9437 		 * port_link_cnt.
9438 		 *
9439 		 * For external commands, the (FC)pkt_timeout is decremented
9440 		 * by the QUEUE Delay added by our driver, Care is taken to
9441 		 * ensure that it doesn't become zero (zero means no timeout)
9442 		 * If the time expires right inside driver queue itself,
9443 		 * the watch thread will return it to the original caller
9444 		 * indicating that the command has timed-out.
9445 		 */
9446 		if (internal) {
9447 			char			*op;
9448 			struct fcp_ipkt	*icmd;
9449 
9450 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9451 			switch (icmd->ipkt_opcode) {
9452 			case SCMD_REPORT_LUN:
9453 				op = "REPORT LUN";
9454 				break;
9455 
9456 			case SCMD_INQUIRY:
9457 				op = "INQUIRY";
9458 				break;
9459 
9460 			case SCMD_INQUIRY_PAGE83:
9461 				op = "INQUIRY-83";
9462 				break;
9463 
9464 			default:
9465 				op = "Internal SCSI COMMAND";
9466 				break;
9467 			}
9468 
9469 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9470 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9471 				rval = FC_SUCCESS;
9472 			}
9473 		} else {
9474 			struct fcp_pkt *cmd;
9475 			struct fcp_port *pptr;
9476 
9477 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9478 			cmd->cmd_state = FCP_PKT_IDLE;
9479 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9480 
9481 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9482 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9483 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9484 				    "fcp_transport: xport busy for pkt %p",
9485 				    cmd->cmd_pkt);
9486 				rval = FC_TRAN_BUSY;
9487 			} else {
9488 				fcp_queue_pkt(pptr, cmd);
9489 				rval = FC_SUCCESS;
9490 			}
9491 		}
9492 	}
9493 
9494 	return (rval);
9495 }
9496 
9497 /*VARARGS3*/
9498 static void
9499 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9500 {
9501 	char		buf[256];
9502 	va_list		ap;
9503 
9504 	if (dip == NULL) {
9505 		dip = fcp_global_dip;
9506 	}
9507 
9508 	va_start(ap, fmt);
9509 	(void) vsprintf(buf, fmt, ap);
9510 	va_end(ap);
9511 
9512 	scsi_log(dip, "fcp", level, buf);
9513 }
9514 
9515 /*
9516  * This function retries NS registry of FC4 type.
9517  * It assumes that fcp_mutex is held.
9518  * The function does nothing if topology is not fabric
9519  * So, the topology has to be set before this function can be called
9520  */
9521 static void
9522 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9523 {
9524 	int	rval;
9525 
9526 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9527 
9528 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9529 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9530 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9531 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9532 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9533 		}
9534 		return;
9535 	}
9536 	mutex_exit(&pptr->port_mutex);
9537 	rval = fcp_do_ns_registry(pptr, s_id);
9538 	mutex_enter(&pptr->port_mutex);
9539 
9540 	if (rval == 0) {
9541 		/* Registry successful. Reset flag */
9542 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9543 	}
9544 }
9545 
9546 /*
9547  * This function registers the ULP with the switch by calling transport i/f
9548  */
9549 static int
9550 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9551 {
9552 	fc_ns_cmd_t		ns_cmd;
9553 	ns_rfc_type_t		rfc;
9554 	uint32_t		types[8];
9555 
9556 	/*
9557 	 * Prepare the Name server structure to
9558 	 * register with the transport in case of
9559 	 * Fabric configuration.
9560 	 */
9561 	bzero(&rfc, sizeof (rfc));
9562 	bzero(types, sizeof (types));
9563 
9564 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9565 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9566 
9567 	rfc.rfc_port_id.port_id = s_id;
9568 	bcopy(types, rfc.rfc_types, sizeof (types));
9569 
9570 	ns_cmd.ns_flags = 0;
9571 	ns_cmd.ns_cmd = NS_RFT_ID;
9572 	ns_cmd.ns_req_len = sizeof (rfc);
9573 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9574 	ns_cmd.ns_resp_len = 0;
9575 	ns_cmd.ns_resp_payload = NULL;
9576 
9577 	/*
9578 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9579 	 */
9580 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9581 		fcp_log(CE_WARN, pptr->port_dip,
9582 		    "!ns_registry: failed name server registration");
9583 		return (1);
9584 	}
9585 
9586 	return (0);
9587 }
9588 
9589 /*
9590  *     Function: fcp_handle_port_attach
9591  *
9592  *  Description: This function is called from fcp_port_attach() to attach a
9593  *		 new port. This routine does the following:
9594  *
9595  *		1) Allocates an fcp_port structure and initializes it.
9596  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9597  *		   server.
9598  *		3) Kicks off the enumeration of the targets/luns visible
9599  *		   through this new port.  That is done by calling
9600  *		   fcp_statec_callback() if the port is online.
9601  *
9602  *     Argument: ulph		fp/fctl port handle.
9603  *		 *pinfo		Port information.
9604  *		 s_id		Port ID.
9605  *		 instance	Device instance number for the local port
9606  *				(returned by ddi_get_instance()).
9607  *
9608  * Return Value: DDI_SUCCESS
9609  *		 DDI_FAILURE
9610  *
9611  *	Context: User and Kernel context.
9612  */
9613 /*ARGSUSED*/
9614 int
9615 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9616     uint32_t s_id, int instance)
9617 {
9618 	int			res = DDI_FAILURE;
9619 	scsi_hba_tran_t		*tran;
9620 	int			mutex_initted = FALSE;
9621 	int			hba_attached = FALSE;
9622 	int			soft_state_linked = FALSE;
9623 	int			event_bind = FALSE;
9624 	struct fcp_port		*pptr;
9625 	fc_portmap_t		*tmp_list = NULL;
9626 	uint32_t		max_cnt, alloc_cnt;
9627 	uchar_t			*boot_wwn = NULL;
9628 	uint_t			nbytes;
9629 	int			manual_cfg;
9630 
9631 	/*
9632 	 * this port instance attaching for the first time (or after
9633 	 * being detached before)
9634 	 */
9635 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9636 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9637 
9638 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9639 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9640 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9641 		    instance);
9642 		return (res);
9643 	}
9644 
9645 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9646 		/* this shouldn't happen */
9647 		ddi_soft_state_free(fcp_softstate, instance);
9648 		cmn_err(CE_WARN, "fcp: bad soft state");
9649 		return (res);
9650 	}
9651 
9652 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9653 
9654 	/*
9655 	 * Make a copy of ulp_port_info as fctl allocates
9656 	 * a temp struct.
9657 	 */
9658 	(void) fcp_cp_pinfo(pptr, pinfo);
9659 
9660 	/*
9661 	 * Check for manual_configuration_only property.
9662 	 * Enable manual configurtion if the property is
9663 	 * set to 1, otherwise disable manual configuration.
9664 	 */
9665 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9666 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9667 	    MANUAL_CFG_ONLY,
9668 	    -1)) != -1) {
9669 		if (manual_cfg == 1) {
9670 			char	*pathname;
9671 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9672 			(void) ddi_pathname(pptr->port_dip, pathname);
9673 			cmn_err(CE_NOTE,
9674 			    "%s (%s%d) %s is enabled via %s.conf.",
9675 			    pathname,
9676 			    ddi_driver_name(pptr->port_dip),
9677 			    ddi_get_instance(pptr->port_dip),
9678 			    MANUAL_CFG_ONLY,
9679 			    ddi_driver_name(pptr->port_dip));
9680 			fcp_enable_auto_configuration = 0;
9681 			kmem_free(pathname, MAXPATHLEN);
9682 		}
9683 	}
9684 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9685 	pptr->port_link_cnt = 1;
9686 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9687 	pptr->port_id = s_id;
9688 	pptr->port_instance = instance;
9689 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9690 	pptr->port_state = FCP_STATE_INIT;
9691 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9692 
9693 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9694 	    sizeof (ddi_dma_cookie_t));
9695 
9696 	/*
9697 	 * The two mutexes of fcp_port are initialized.	 The variable
9698 	 * mutex_initted is incremented to remember that fact.	That variable
9699 	 * is checked when the routine fails and the mutexes have to be
9700 	 * destroyed.
9701 	 */
9702 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9703 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9704 	mutex_initted++;
9705 
9706 	/*
9707 	 * The SCSI tran structure is allocate and initialized now.
9708 	 */
9709 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9710 		fcp_log(CE_WARN, pptr->port_dip,
9711 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9712 		goto fail;
9713 	}
9714 
9715 	/* link in the transport structure then fill it in */
9716 	pptr->port_tran = tran;
9717 	tran->tran_hba_private		= pptr;
9718 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9719 	tran->tran_tgt_probe		= NULL;
9720 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9721 	tran->tran_start		= fcp_scsi_start;
9722 	tran->tran_reset		= fcp_scsi_reset;
9723 	tran->tran_abort		= fcp_scsi_abort;
9724 	tran->tran_getcap		= fcp_scsi_getcap;
9725 	tran->tran_setcap		= fcp_scsi_setcap;
9726 	tran->tran_init_pkt		= NULL;
9727 	tran->tran_destroy_pkt		= NULL;
9728 	tran->tran_dmafree		= NULL;
9729 	tran->tran_sync_pkt		= NULL;
9730 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9731 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9732 	tran->tran_get_name		= fcp_scsi_get_name;
9733 	tran->tran_clear_aca		= NULL;
9734 	tran->tran_clear_task_set	= NULL;
9735 	tran->tran_terminate_task	= NULL;
9736 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9737 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9738 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9739 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9740 	tran->tran_quiesce		= NULL;
9741 	tran->tran_unquiesce		= NULL;
9742 	tran->tran_bus_reset		= NULL;
9743 	tran->tran_bus_config		= fcp_scsi_bus_config;
9744 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9745 	tran->tran_bus_power		= NULL;
9746 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9747 
9748 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9749 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9750 	tran->tran_setup_pkt		= fcp_pkt_setup;
9751 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9752 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9753 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9754 
9755 	/*
9756 	 * Allocate an ndi event handle
9757 	 */
9758 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9759 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9760 
9761 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9762 	    sizeof (fcp_ndi_event_defs));
9763 
9764 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9765 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9766 
9767 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9768 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9769 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9770 
9771 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9772 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9773 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9774 		goto fail;
9775 	}
9776 	event_bind++;	/* Checked in fail case */
9777 
9778 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9779 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9780 	    != DDI_SUCCESS) {
9781 		fcp_log(CE_WARN, pptr->port_dip,
9782 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9783 		goto fail;
9784 	}
9785 	hba_attached++;	/* Checked in fail case */
9786 
9787 	pptr->port_mpxio = 0;
9788 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9789 	    MDI_SUCCESS) {
9790 		pptr->port_mpxio++;
9791 	}
9792 
9793 	/*
9794 	 * The following code is putting the new port structure in the global
9795 	 * list of ports and, if it is the first port to attach, it start the
9796 	 * fcp_watchdog_tick.
9797 	 *
9798 	 * Why put this new port in the global before we are done attaching it?
9799 	 * We are actually making the structure globally known before we are
9800 	 * done attaching it.  The reason for that is: because of the code that
9801 	 * follows.  At this point the resources to handle the port are
9802 	 * allocated.  This function is now going to do the following:
9803 	 *
9804 	 *   1) It is going to try to register with the name server advertizing
9805 	 *	the new FCP capability of the port.
9806 	 *   2) It is going to play the role of the fp/fctl layer by building
9807 	 *	a list of worlwide names reachable through this port and call
9808 	 *	itself on fcp_statec_callback().  That requires the port to
9809 	 *	be part of the global list.
9810 	 */
9811 	mutex_enter(&fcp_global_mutex);
9812 	if (fcp_port_head == NULL) {
9813 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9814 	}
9815 	pptr->port_next = fcp_port_head;
9816 	fcp_port_head = pptr;
9817 	soft_state_linked++;
9818 
9819 	if (fcp_watchdog_init++ == 0) {
9820 		fcp_watchdog_tick = fcp_watchdog_timeout *
9821 		    drv_usectohz(1000000);
9822 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9823 		    fcp_watchdog_tick);
9824 	}
9825 	mutex_exit(&fcp_global_mutex);
9826 
9827 	/*
9828 	 * Here an attempt is made to register with the name server, the new
9829 	 * FCP capability.  That is done using an RTF_ID to the name server.
9830 	 * It is done synchronously.  The function fcp_do_ns_registry()
9831 	 * doesn't return till the name server responded.
9832 	 * On failures, just ignore it for now and it will get retried during
9833 	 * state change callbacks. We'll set a flag to show this failure
9834 	 */
9835 	if (fcp_do_ns_registry(pptr, s_id)) {
9836 		mutex_enter(&pptr->port_mutex);
9837 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9838 		mutex_exit(&pptr->port_mutex);
9839 	} else {
9840 		mutex_enter(&pptr->port_mutex);
9841 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9842 		mutex_exit(&pptr->port_mutex);
9843 	}
9844 
9845 	/*
9846 	 * Lookup for boot WWN property
9847 	 */
9848 	if (modrootloaded != 1) {
9849 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9850 		    ddi_get_parent(pinfo->port_dip),
9851 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9852 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9853 		    (nbytes == FC_WWN_SIZE)) {
9854 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9855 		}
9856 		if (boot_wwn) {
9857 			ddi_prop_free(boot_wwn);
9858 		}
9859 	}
9860 
9861 	/*
9862 	 * Handle various topologies and link states.
9863 	 */
9864 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9865 	case FC_STATE_OFFLINE:
9866 
9867 		/*
9868 		 * we're attaching a port where the link is offline
9869 		 *
9870 		 * Wait for ONLINE, at which time a state
9871 		 * change will cause a statec_callback
9872 		 *
9873 		 * in the mean time, do not do anything
9874 		 */
9875 		res = DDI_SUCCESS;
9876 		pptr->port_state |= FCP_STATE_OFFLINE;
9877 		break;
9878 
9879 	case FC_STATE_ONLINE: {
9880 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9881 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9882 			res = DDI_SUCCESS;
9883 			break;
9884 		}
9885 		/*
9886 		 * discover devices and create nodes (a private
9887 		 * loop or point-to-point)
9888 		 */
9889 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9890 
9891 		/*
9892 		 * At this point we are going to build a list of all the ports
9893 		 * that	can be reached through this local port.	 It looks like
9894 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9895 		 * (128).
9896 		 */
9897 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9898 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9899 		    KM_NOSLEEP)) == NULL) {
9900 			fcp_log(CE_WARN, pptr->port_dip,
9901 			    "!fcp%d: failed to allocate portmap",
9902 			    instance);
9903 			goto fail;
9904 		}
9905 
9906 		/*
9907 		 * fc_ulp_getportmap() is going to provide us with the list of
9908 		 * remote ports in the buffer we just allocated.  The way the
9909 		 * list is going to be retrieved depends on the topology.
9910 		 * However, if we are connected to a Fabric, a name server
9911 		 * request may be sent to get the list of FCP capable ports.
9912 		 * It should be noted that is the case the request is
9913 		 * synchronous.	 This means we are stuck here till the name
9914 		 * server replies.  A lot of things can change during that time
9915 		 * and including, may be, being called on
9916 		 * fcp_statec_callback() for different reasons. I'm not sure
9917 		 * the code can handle that.
9918 		 */
9919 		max_cnt = FCP_MAX_DEVICES;
9920 		alloc_cnt = FCP_MAX_DEVICES;
9921 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
9922 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
9923 		    FC_SUCCESS) {
9924 			caddr_t msg;
9925 
9926 			(void) fc_ulp_error(res, &msg);
9927 
9928 			/*
9929 			 * this	 just means the transport is
9930 			 * busy perhaps building a portmap so,
9931 			 * for now, succeed this port attach
9932 			 * when the transport has a new map,
9933 			 * it'll send us a state change then
9934 			 */
9935 			fcp_log(CE_WARN, pptr->port_dip,
9936 			    "!failed to get port map : %s", msg);
9937 
9938 			res = DDI_SUCCESS;
9939 			break;	/* go return result */
9940 		}
9941 		if (max_cnt > alloc_cnt) {
9942 			alloc_cnt = max_cnt;
9943 		}
9944 
9945 		/*
9946 		 * We are now going to call fcp_statec_callback() ourselves.
9947 		 * By issuing this call we are trying to kick off the enumera-
9948 		 * tion process.
9949 		 */
9950 		/*
9951 		 * let the state change callback do the SCSI device
9952 		 * discovery and create the devinfos
9953 		 */
9954 		fcp_statec_callback(ulph, pptr->port_fp_handle,
9955 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
9956 		    max_cnt, pptr->port_id);
9957 
9958 		res = DDI_SUCCESS;
9959 		break;
9960 	}
9961 
9962 	default:
9963 		/* unknown port state */
9964 		fcp_log(CE_WARN, pptr->port_dip,
9965 		    "!fcp%d: invalid port state at attach=0x%x",
9966 		    instance, pptr->port_phys_state);
9967 
9968 		mutex_enter(&pptr->port_mutex);
9969 		pptr->port_phys_state = FCP_STATE_OFFLINE;
9970 		mutex_exit(&pptr->port_mutex);
9971 
9972 		res = DDI_SUCCESS;
9973 		break;
9974 	}
9975 
9976 	/* free temp list if used */
9977 	if (tmp_list != NULL) {
9978 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
9979 	}
9980 
9981 	/* note the attach time */
9982 	pptr->port_attach_time = lbolt64;
9983 
9984 	/* all done */
9985 	return (res);
9986 
9987 	/* a failure we have to clean up after */
9988 fail:
9989 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
9990 
9991 	if (soft_state_linked) {
9992 		/* remove this fcp_port from the linked list */
9993 		(void) fcp_soft_state_unlink(pptr);
9994 	}
9995 
9996 	/* unbind and free event set */
9997 	if (pptr->port_ndi_event_hdl) {
9998 		if (event_bind) {
9999 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10000 			    &pptr->port_ndi_events, NDI_SLEEP);
10001 		}
10002 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10003 	}
10004 
10005 	if (pptr->port_ndi_event_defs) {
10006 		(void) kmem_free(pptr->port_ndi_event_defs,
10007 		    sizeof (fcp_ndi_event_defs));
10008 	}
10009 
10010 	/*
10011 	 * Clean up mpxio stuff
10012 	 */
10013 	if (pptr->port_mpxio) {
10014 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10015 		pptr->port_mpxio--;
10016 	}
10017 
10018 	/* undo SCSI HBA setup */
10019 	if (hba_attached) {
10020 		(void) scsi_hba_detach(pptr->port_dip);
10021 	}
10022 	if (pptr->port_tran != NULL) {
10023 		scsi_hba_tran_free(pptr->port_tran);
10024 	}
10025 
10026 	mutex_enter(&fcp_global_mutex);
10027 
10028 	/*
10029 	 * We check soft_state_linked, because it is incremented right before
10030 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10031 	 * soft_state_linked is still FALSE, we do not want to decrement
10032 	 * fcp_watchdog_init or possibly call untimeout.
10033 	 */
10034 
10035 	if (soft_state_linked) {
10036 		if (--fcp_watchdog_init == 0) {
10037 			timeout_id_t	tid = fcp_watchdog_id;
10038 
10039 			mutex_exit(&fcp_global_mutex);
10040 			(void) untimeout(tid);
10041 		} else {
10042 			mutex_exit(&fcp_global_mutex);
10043 		}
10044 	} else {
10045 		mutex_exit(&fcp_global_mutex);
10046 	}
10047 
10048 	if (mutex_initted) {
10049 		mutex_destroy(&pptr->port_mutex);
10050 		mutex_destroy(&pptr->port_pkt_mutex);
10051 	}
10052 
10053 	if (tmp_list != NULL) {
10054 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10055 	}
10056 
10057 	/* this makes pptr invalid */
10058 	ddi_soft_state_free(fcp_softstate, instance);
10059 
10060 	return (DDI_FAILURE);
10061 }
10062 
10063 
10064 static int
10065 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10066 {
10067 	int count = 0;
10068 
10069 	mutex_enter(&pptr->port_mutex);
10070 
10071 	/*
10072 	 * if the port is powered down or suspended, nothing else
10073 	 * to do; just return.
10074 	 */
10075 	if (flag != FCP_STATE_DETACHING) {
10076 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10077 		    FCP_STATE_SUSPENDED)) {
10078 			pptr->port_state |= flag;
10079 			mutex_exit(&pptr->port_mutex);
10080 			return (FC_SUCCESS);
10081 		}
10082 	}
10083 
10084 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10085 		mutex_exit(&pptr->port_mutex);
10086 		return (FC_FAILURE);
10087 	}
10088 
10089 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10090 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10091 	    "fcp_handle_port_detach: port is detaching");
10092 
10093 	pptr->port_state |= flag;
10094 
10095 	/*
10096 	 * Wait for any ongoing reconfig/ipkt to complete, that
10097 	 * ensures the freeing to targets/luns is safe.
10098 	 * No more ref to this port should happen from statec/ioctl
10099 	 * after that as it was removed from the global port list.
10100 	 */
10101 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10102 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10103 		/*
10104 		 * Let's give sufficient time for reconfig/ipkt
10105 		 * to complete.
10106 		 */
10107 		if (count++ >= FCP_ICMD_DEADLINE) {
10108 			break;
10109 		}
10110 		mutex_exit(&pptr->port_mutex);
10111 		delay(drv_usectohz(1000000));
10112 		mutex_enter(&pptr->port_mutex);
10113 	}
10114 
10115 	/*
10116 	 * if the driver is still busy then fail to
10117 	 * suspend/power down.
10118 	 */
10119 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10120 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10121 		pptr->port_state &= ~flag;
10122 		mutex_exit(&pptr->port_mutex);
10123 		return (FC_FAILURE);
10124 	}
10125 
10126 	if (flag == FCP_STATE_DETACHING) {
10127 		pptr = fcp_soft_state_unlink(pptr);
10128 		ASSERT(pptr != NULL);
10129 	}
10130 
10131 	pptr->port_link_cnt++;
10132 	pptr->port_state |= FCP_STATE_OFFLINE;
10133 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10134 
10135 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10136 	    FCP_CAUSE_LINK_DOWN);
10137 	mutex_exit(&pptr->port_mutex);
10138 
10139 	/* kill watch dog timer if we're the last */
10140 	mutex_enter(&fcp_global_mutex);
10141 	if (--fcp_watchdog_init == 0) {
10142 		timeout_id_t	tid = fcp_watchdog_id;
10143 		mutex_exit(&fcp_global_mutex);
10144 		(void) untimeout(tid);
10145 	} else {
10146 		mutex_exit(&fcp_global_mutex);
10147 	}
10148 
10149 	/* clean up the port structures */
10150 	if (flag == FCP_STATE_DETACHING) {
10151 		fcp_cleanup_port(pptr, instance);
10152 	}
10153 
10154 	return (FC_SUCCESS);
10155 }
10156 
10157 
10158 static void
10159 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10160 {
10161 	ASSERT(pptr != NULL);
10162 
10163 	/* unbind and free event set */
10164 	if (pptr->port_ndi_event_hdl) {
10165 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10166 		    &pptr->port_ndi_events, NDI_SLEEP);
10167 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10168 	}
10169 
10170 	if (pptr->port_ndi_event_defs) {
10171 		(void) kmem_free(pptr->port_ndi_event_defs,
10172 		    sizeof (fcp_ndi_event_defs));
10173 	}
10174 
10175 	/* free the lun/target structures and devinfos */
10176 	fcp_free_targets(pptr);
10177 
10178 	/*
10179 	 * Clean up mpxio stuff
10180 	 */
10181 	if (pptr->port_mpxio) {
10182 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10183 		pptr->port_mpxio--;
10184 	}
10185 
10186 	/* clean up SCSA stuff */
10187 	(void) scsi_hba_detach(pptr->port_dip);
10188 	if (pptr->port_tran != NULL) {
10189 		scsi_hba_tran_free(pptr->port_tran);
10190 	}
10191 
10192 #ifdef	KSTATS_CODE
10193 	/* clean up kstats */
10194 	if (pptr->fcp_ksp != NULL) {
10195 		kstat_delete(pptr->fcp_ksp);
10196 	}
10197 #endif
10198 
10199 	/* clean up soft state mutexes/condition variables */
10200 	mutex_destroy(&pptr->port_mutex);
10201 	mutex_destroy(&pptr->port_pkt_mutex);
10202 
10203 	/* all done with soft state */
10204 	ddi_soft_state_free(fcp_softstate, instance);
10205 }
10206 
10207 /*
10208  *     Function: fcp_kmem_cache_constructor
10209  *
10210  *  Description: This function allocates and initializes the resources required
10211  *		 to build a scsi_pkt structure the target driver.  The result
10212  *		 of the allocation and initialization will be cached in the
10213  *		 memory cache.	As DMA resources may be allocated here, that
10214  *		 means DMA resources will be tied up in the cache manager.
10215  *		 This is a tradeoff that has been made for performance reasons.
10216  *
10217  *     Argument: *buf		Memory to preinitialize.
10218  *		 *arg		FCP port structure (fcp_port).
10219  *		 kmflags	Value passed to kmem_cache_alloc() and
10220  *				propagated to the constructor.
10221  *
10222  * Return Value: 0	Allocation/Initialization was successful.
10223  *		 -1	Allocation or Initialization failed.
10224  *
10225  *
10226  * If the returned value is 0, the buffer is initialized like this:
10227  *
10228  *		    +================================+
10229  *	     +----> |	      struct scsi_pkt	     |
10230  *	     |	    |				     |
10231  *	     | +--- | pkt_ha_private		     |
10232  *	     | |    |				     |
10233  *	     | |    +================================+
10234  *	     | |
10235  *	     | |    +================================+
10236  *	     | +--> |	    struct fcp_pkt	     | <---------+
10237  *	     |	    |				     |		 |
10238  *	     +----- | cmd_pkt			     |		 |
10239  *		    |			  cmd_fp_pkt | ---+	 |
10240  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10241  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10242  *	  |    |    |--------------------------------|	  |	 |
10243  *	  |    |    |	      struct fc_packet	     | <--+	 |
10244  *	  |    |    |				     |		 |
10245  *	  |    |    |		     pkt_ulp_private | ----------+
10246  *	  |    |    |		     pkt_fca_private | -----+
10247  *	  |    |    |		     pkt_data_cookie | ---+ |
10248  *	  |    |    | pkt_cmdlen		     |	  | |
10249  *	  |    |(a) | pkt_rsplen		     |	  | |
10250  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10251  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10252  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10253  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10254  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10255  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10256  *		    +================================+	  | |  |   |   |    |
10257  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10258  *		    |				     |	    |  |   |   |    |
10259  *		    +================================+	    |  |   |   |    |
10260  *		    |	      fca_private	     | <----+  |   |   |    |
10261  *		    |				     |	       |   |   |    |
10262  *		    +================================+	       |   |   |    |
10263  *							       |   |   |    |
10264  *							       |   |   |    |
10265  *		    +================================+	 (d)   |   |   |    |
10266  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10267  *		    |				     |		   |   |    |
10268  *		    +================================+		   |   |    |
10269  *								   |   |    |
10270  *		    +================================+	 (d)	   |   |    |
10271  *		    |		fcp_resp	     | <-----------+   |    |
10272  *		    |	(DMA resources associated)   |		       |    |
10273  *		    +================================+		       |    |
10274  *								       |    |
10275  *								       |    |
10276  *								       |    |
10277  *		    +================================+	 (c)	       |    |
10278  *		    |	     fcp_cmd cookies	     | <---------------+    |
10279  *		    |				     |			    |
10280  *		    +================================+			    |
10281  *									    |
10282  *		    +================================+	 (c)		    |
10283  *		    |		 fcp_cmd	     | <--------------------+
10284  *		    |	(DMA resources associated)   |
10285  *		    +================================+
10286  *
10287  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10288  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10289  * (c) Only if DMA is used for the FCP_CMD buffer.
10290  * (d) Only if DMA is used for the FCP_RESP buffer
10291  */
10292 static int
10293 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10294     int kmflags)
10295 {
10296 	struct fcp_pkt	*cmd;
10297 	struct fcp_port	*pptr;
10298 	fc_packet_t	*fpkt;
10299 
10300 	pptr = (struct fcp_port *)tran->tran_hba_private;
10301 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10302 	bzero(cmd, tran->tran_hba_len);
10303 
10304 	cmd->cmd_pkt = pkt;
10305 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10306 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10307 	cmd->cmd_fp_pkt = fpkt;
10308 
10309 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10310 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10311 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10312 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10313 
10314 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10315 	    sizeof (struct fcp_pkt));
10316 
10317 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10318 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10319 
10320 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10321 		/*
10322 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10323 		 * fcp_resp.  The transfer of information will be done by
10324 		 * bcopy.
10325 		 * The naming of the flags (that is actually a value) is
10326 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10327 		 * DMA" but instead "NO DMA".
10328 		 */
10329 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10330 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10331 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10332 	} else {
10333 		/*
10334 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10335 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10336 		 * interfaces.
10337 		 */
10338 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10339 			return (-1);
10340 		}
10341 	}
10342 
10343 	return (0);
10344 }
10345 
10346 /*
10347  *     Function: fcp_kmem_cache_destructor
10348  *
10349  *  Description: Called by the destructor of the cache managed by SCSA.
10350  *		 All the resources pre-allocated in fcp_pkt_constructor
10351  *		 and the data also pre-initialized in fcp_pkt_constructor
10352  *		 are freed and uninitialized here.
10353  *
10354  *     Argument: *buf		Memory to uninitialize.
10355  *		 *arg		FCP port structure (fcp_port).
10356  *
10357  * Return Value: None
10358  *
10359  *	Context: kernel
10360  */
10361 static void
10362 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10363 {
10364 	struct fcp_pkt	*cmd;
10365 	struct fcp_port	*pptr;
10366 
10367 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10368 	cmd = pkt->pkt_ha_private;
10369 
10370 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10371 		/*
10372 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10373 		 * buffer and DMA resources allocated to do so are released.
10374 		 */
10375 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10376 	}
10377 }
10378 
10379 /*
10380  *     Function: fcp_alloc_cmd_resp
10381  *
10382  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10383  *		 will be DMAed by the HBA.  The buffer is allocated applying
10384  *		 the DMA requirements for the HBA.  The buffers allocated will
10385  *		 also be bound.	 DMA resources are allocated in the process.
10386  *		 They will be released by fcp_free_cmd_resp().
10387  *
10388  *     Argument: *pptr	FCP port.
10389  *		 *fpkt	fc packet for which the cmd and resp packet should be
10390  *			allocated.
10391  *		 flags	Allocation flags.
10392  *
10393  * Return Value: FC_FAILURE
10394  *		 FC_SUCCESS
10395  *
10396  *	Context: User or Kernel context only if flags == KM_SLEEP.
10397  *		 Interrupt context if the KM_SLEEP is not specified.
10398  */
10399 static int
10400 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10401 {
10402 	int			rval;
10403 	int			cmd_len;
10404 	int			resp_len;
10405 	ulong_t			real_len;
10406 	int			(*cb) (caddr_t);
10407 	ddi_dma_cookie_t	pkt_cookie;
10408 	ddi_dma_cookie_t	*cp;
10409 	uint32_t		cnt;
10410 
10411 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10412 
10413 	cmd_len = fpkt->pkt_cmdlen;
10414 	resp_len = fpkt->pkt_rsplen;
10415 
10416 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10417 
10418 	/* Allocation of a DMA handle used in subsequent calls. */
10419 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10420 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10421 		return (FC_FAILURE);
10422 	}
10423 
10424 	/* A buffer is allocated that satisfies the DMA requirements. */
10425 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10426 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10427 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10428 
10429 	if (rval != DDI_SUCCESS) {
10430 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10431 		return (FC_FAILURE);
10432 	}
10433 
10434 	if (real_len < cmd_len) {
10435 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10436 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10437 		return (FC_FAILURE);
10438 	}
10439 
10440 	/* The buffer allocated is DMA bound. */
10441 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10442 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10443 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10444 
10445 	if (rval != DDI_DMA_MAPPED) {
10446 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10447 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10448 		return (FC_FAILURE);
10449 	}
10450 
10451 	if (fpkt->pkt_cmd_cookie_cnt >
10452 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10453 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10454 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10455 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10456 		return (FC_FAILURE);
10457 	}
10458 
10459 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10460 
10461 	/*
10462 	 * The buffer where the scatter/gather list is going to be built is
10463 	 * allocated.
10464 	 */
10465 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10466 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10467 	    KM_NOSLEEP);
10468 
10469 	if (cp == NULL) {
10470 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10471 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10472 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10473 		return (FC_FAILURE);
10474 	}
10475 
10476 	/*
10477 	 * The scatter/gather list for the buffer we just allocated is built
10478 	 * here.
10479 	 */
10480 	*cp = pkt_cookie;
10481 	cp++;
10482 
10483 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10484 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10485 		    &pkt_cookie);
10486 		*cp = pkt_cookie;
10487 	}
10488 
10489 	ASSERT(fpkt->pkt_resp_dma == NULL);
10490 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10491 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10492 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10493 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10494 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10495 		return (FC_FAILURE);
10496 	}
10497 
10498 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10499 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10500 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10501 	    &fpkt->pkt_resp_acc);
10502 
10503 	if (rval != DDI_SUCCESS) {
10504 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10505 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10506 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10507 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10508 		kmem_free(fpkt->pkt_cmd_cookie,
10509 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10510 		return (FC_FAILURE);
10511 	}
10512 
10513 	if (real_len < resp_len) {
10514 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10515 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10516 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10517 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10518 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10519 		kmem_free(fpkt->pkt_cmd_cookie,
10520 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10521 		return (FC_FAILURE);
10522 	}
10523 
10524 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10525 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10526 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10527 
10528 	if (rval != DDI_DMA_MAPPED) {
10529 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10530 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10531 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10532 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10533 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10534 		kmem_free(fpkt->pkt_cmd_cookie,
10535 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10536 		return (FC_FAILURE);
10537 	}
10538 
10539 	if (fpkt->pkt_resp_cookie_cnt >
10540 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10541 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10542 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10543 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10544 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10545 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10546 		kmem_free(fpkt->pkt_cmd_cookie,
10547 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10548 		return (FC_FAILURE);
10549 	}
10550 
10551 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10552 
10553 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10554 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10555 	    KM_NOSLEEP);
10556 
10557 	if (cp == NULL) {
10558 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10559 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10560 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10561 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10562 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10563 		kmem_free(fpkt->pkt_cmd_cookie,
10564 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10565 		return (FC_FAILURE);
10566 	}
10567 
10568 	*cp = pkt_cookie;
10569 	cp++;
10570 
10571 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10572 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10573 		    &pkt_cookie);
10574 		*cp = pkt_cookie;
10575 	}
10576 
10577 	return (FC_SUCCESS);
10578 }
10579 
10580 /*
10581  *     Function: fcp_free_cmd_resp
10582  *
10583  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10584  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10585  *		 associated with them.	That includes the DMA resources and the
10586  *		 buffer allocated for the cookies of each one of them.
10587  *
10588  *     Argument: *pptr		FCP port context.
10589  *		 *fpkt		fc packet containing the cmd and resp packet
10590  *				to be released.
10591  *
10592  * Return Value: None
10593  *
10594  *	Context: Interrupt, User and Kernel context.
10595  */
10596 /* ARGSUSED */
10597 static void
10598 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10599 {
10600 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10601 
10602 	if (fpkt->pkt_resp_dma) {
10603 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10604 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10605 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10606 	}
10607 
10608 	if (fpkt->pkt_resp_cookie) {
10609 		kmem_free(fpkt->pkt_resp_cookie,
10610 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10611 		fpkt->pkt_resp_cookie = NULL;
10612 	}
10613 
10614 	if (fpkt->pkt_cmd_dma) {
10615 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10616 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10617 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10618 	}
10619 
10620 	if (fpkt->pkt_cmd_cookie) {
10621 		kmem_free(fpkt->pkt_cmd_cookie,
10622 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10623 		fpkt->pkt_cmd_cookie = NULL;
10624 	}
10625 }
10626 
10627 
10628 /*
10629  * called by the transport to do our own target initialization
10630  *
10631  * can acquire and release the global mutex
10632  */
10633 /* ARGSUSED */
10634 static int
10635 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10636     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10637 {
10638 	int			*words;
10639 	uchar_t			*bytes;
10640 	uint_t			nbytes;
10641 	uint_t			nwords;
10642 	struct fcp_tgt	*ptgt;
10643 	struct fcp_lun	*plun;
10644 	struct fcp_port	*pptr = (struct fcp_port *)
10645 	    hba_tran->tran_hba_private;
10646 
10647 	ASSERT(pptr != NULL);
10648 
10649 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10650 	    FCP_BUF_LEVEL_8, 0,
10651 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10652 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10653 
10654 	/* get our port WWN property */
10655 	bytes = NULL;
10656 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
10657 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
10658 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10659 		/* no port WWN property */
10660 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10661 		    FCP_BUF_LEVEL_8, 0,
10662 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10663 		    " for %s (instance %d): bytes=%p nbytes=%x",
10664 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10665 		    nbytes);
10666 
10667 		if (bytes != NULL) {
10668 			ddi_prop_free(bytes);
10669 		}
10670 
10671 		return (DDI_NOT_WELL_FORMED);
10672 	}
10673 
10674 	words = NULL;
10675 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, tgt_dip,
10676 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
10677 	    LUN_PROP, &words, &nwords) != DDI_PROP_SUCCESS) {
10678 		ASSERT(bytes != NULL);
10679 
10680 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10681 		    FCP_BUF_LEVEL_8, 0,
10682 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10683 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10684 		    ddi_get_instance(tgt_dip));
10685 
10686 		ddi_prop_free(bytes);
10687 
10688 		return (DDI_NOT_WELL_FORMED);
10689 	}
10690 
10691 	if (nwords == 0) {
10692 		ddi_prop_free(bytes);
10693 		ddi_prop_free(words);
10694 		return (DDI_NOT_WELL_FORMED);
10695 	}
10696 
10697 	ASSERT(bytes != NULL && words != NULL);
10698 
10699 	mutex_enter(&pptr->port_mutex);
10700 	if ((plun = fcp_lookup_lun(pptr, bytes, *words)) == NULL) {
10701 		mutex_exit(&pptr->port_mutex);
10702 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10703 		    FCP_BUF_LEVEL_8, 0,
10704 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10705 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10706 		    ddi_get_instance(tgt_dip));
10707 
10708 		ddi_prop_free(bytes);
10709 		ddi_prop_free(words);
10710 
10711 		return (DDI_FAILURE);
10712 	}
10713 
10714 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10715 	    FC_WWN_SIZE) == 0);
10716 	ASSERT(plun->lun_num == (uint16_t)*words);
10717 
10718 	ddi_prop_free(bytes);
10719 	ddi_prop_free(words);
10720 
10721 	ptgt = plun->lun_tgt;
10722 
10723 	mutex_enter(&ptgt->tgt_mutex);
10724 	plun->lun_tgt_count++;
10725 	scsi_device_hba_private_set(sd, plun);
10726 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10727 	plun->lun_tran = hba_tran;
10728 	mutex_exit(&ptgt->tgt_mutex);
10729 	mutex_exit(&pptr->port_mutex);
10730 
10731 	return (DDI_SUCCESS);
10732 }
10733 
10734 /*ARGSUSED*/
10735 static int
10736 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10737     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10738 {
10739 	int			words;
10740 	uchar_t			*bytes;
10741 	uint_t			nbytes;
10742 	struct fcp_tgt	*ptgt;
10743 	struct fcp_lun	*plun;
10744 	struct fcp_port	*pptr = (struct fcp_port *)
10745 	    hba_tran->tran_hba_private;
10746 	child_info_t		*cip;
10747 
10748 	ASSERT(pptr != NULL);
10749 
10750 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10751 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10752 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10753 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10754 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10755 
10756 	cip = (child_info_t *)sd->sd_pathinfo;
10757 	if (cip == NULL) {
10758 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10759 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10760 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10761 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10762 		    ddi_get_instance(tgt_dip));
10763 
10764 		return (DDI_NOT_WELL_FORMED);
10765 	}
10766 
10767 	/* get our port WWN property */
10768 	bytes = NULL;
10769 	if ((mdi_prop_lookup_byte_array(PIP(cip), PORT_WWN_PROP, &bytes,
10770 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
10771 		if (bytes) {
10772 			(void) mdi_prop_free(bytes);
10773 		}
10774 		return (DDI_NOT_WELL_FORMED);
10775 	}
10776 
10777 	words = 0;
10778 	if (mdi_prop_lookup_int(PIP(cip), LUN_PROP, &words) !=
10779 	    DDI_PROP_SUCCESS) {
10780 		ASSERT(bytes != NULL);
10781 
10782 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10783 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10784 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10785 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10786 		    ddi_get_instance(tgt_dip));
10787 
10788 		(void) mdi_prop_free(bytes);
10789 		return (DDI_NOT_WELL_FORMED);
10790 	}
10791 
10792 	ASSERT(bytes != NULL);
10793 
10794 	mutex_enter(&pptr->port_mutex);
10795 	if ((plun = fcp_lookup_lun(pptr, bytes, words)) == NULL) {
10796 		mutex_exit(&pptr->port_mutex);
10797 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10798 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10799 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10800 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10801 		    ddi_get_instance(tgt_dip));
10802 
10803 		(void) mdi_prop_free(bytes);
10804 		(void) mdi_prop_free(&words);
10805 
10806 		return (DDI_FAILURE);
10807 	}
10808 
10809 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10810 	    FC_WWN_SIZE) == 0);
10811 	ASSERT(plun->lun_num == (uint16_t)words);
10812 
10813 	(void) mdi_prop_free(bytes);
10814 	(void) mdi_prop_free(&words);
10815 
10816 	ptgt = plun->lun_tgt;
10817 
10818 	mutex_enter(&ptgt->tgt_mutex);
10819 	plun->lun_tgt_count++;
10820 	scsi_device_hba_private_set(sd, plun);
10821 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10822 	plun->lun_tran = hba_tran;
10823 	mutex_exit(&ptgt->tgt_mutex);
10824 	mutex_exit(&pptr->port_mutex);
10825 
10826 	return (DDI_SUCCESS);
10827 }
10828 
10829 
10830 /*
10831  * called by the transport to do our own target initialization
10832  *
10833  * can acquire and release the global mutex
10834  */
10835 /* ARGSUSED */
10836 static int
10837 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10838     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10839 {
10840 	struct fcp_port	*pptr = (struct fcp_port *)
10841 	    hba_tran->tran_hba_private;
10842 	int			rval;
10843 
10844 	ASSERT(pptr != NULL);
10845 
10846 	/*
10847 	 * Child node is getting initialized.  Look at the mpxio component
10848 	 * type on the child device to see if this device is mpxio managed
10849 	 * or not.
10850 	 */
10851 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10852 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10853 	} else {
10854 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10855 	}
10856 
10857 	return (rval);
10858 }
10859 
10860 
10861 /* ARGSUSED */
10862 static void
10863 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10864     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10865 {
10866 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
10867 	struct fcp_tgt	*ptgt;
10868 
10869 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10870 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10871 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10872 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10873 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10874 
10875 	if (plun == NULL) {
10876 		return;
10877 	}
10878 	ptgt = plun->lun_tgt;
10879 
10880 	ASSERT(ptgt != NULL);
10881 
10882 	mutex_enter(&ptgt->tgt_mutex);
10883 	ASSERT(plun->lun_tgt_count > 0);
10884 
10885 	if (--plun->lun_tgt_count == 0) {
10886 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10887 	}
10888 	plun->lun_tran = NULL;
10889 	mutex_exit(&ptgt->tgt_mutex);
10890 }
10891 
10892 /*
10893  *     Function: fcp_scsi_start
10894  *
10895  *  Description: This function is called by the target driver to request a
10896  *		 command to be sent.
10897  *
10898  *     Argument: *ap		SCSI address of the device.
10899  *		 *pkt		SCSI packet containing the cmd to send.
10900  *
10901  * Return Value: TRAN_ACCEPT
10902  *		 TRAN_BUSY
10903  *		 TRAN_BADPKT
10904  *		 TRAN_FATAL_ERROR
10905  */
10906 static int
10907 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10908 {
10909 	struct fcp_port	*pptr = ADDR2FCP(ap);
10910 	struct fcp_lun	*plun = ADDR2LUN(ap);
10911 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10912 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10913 	int			rval;
10914 
10915 	/* ensure command isn't already issued */
10916 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10917 
10918 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10919 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10920 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10921 
10922 	/*
10923 	 * It is strange that we enter the fcp_port mutex and the target
10924 	 * mutex to check the lun state (which has a mutex of its own).
10925 	 */
10926 	mutex_enter(&pptr->port_mutex);
10927 	mutex_enter(&ptgt->tgt_mutex);
10928 
10929 	/*
10930 	 * If the device is offline and is not in the process of coming
10931 	 * online, fail the request.
10932 	 */
10933 
10934 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10935 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10936 		mutex_exit(&ptgt->tgt_mutex);
10937 		mutex_exit(&pptr->port_mutex);
10938 
10939 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
10940 			pkt->pkt_reason = CMD_DEV_GONE;
10941 		}
10942 
10943 		return (TRAN_FATAL_ERROR);
10944 	}
10945 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
10946 
10947 	/*
10948 	 * If we are suspended, kernel is trying to dump, so don't
10949 	 * block, fail or defer requests - send them down right away.
10950 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
10951 	 * assume we have been suspended.  There is hardware such as
10952 	 * the v880 that doesn't do PM.	 Thus, the check for
10953 	 * ddi_in_panic.
10954 	 *
10955 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
10956 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
10957 	 * either the device will have gone away or changed and we can fail
10958 	 * the request, or we can proceed if the device didn't change.
10959 	 *
10960 	 * If the pd in the target or the packet is NULL it's probably
10961 	 * because the device has gone away, we allow the request to be
10962 	 * put on the internal queue here in case the device comes back within
10963 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
10964 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
10965 	 * could be NULL because the device was disappearing during or since
10966 	 * packet initialization.
10967 	 */
10968 
10969 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
10970 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
10971 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
10972 	    (ptgt->tgt_pd_handle == NULL) ||
10973 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
10974 		/*
10975 		 * If ((LUN is busy AND
10976 		 *	LUN not suspended AND
10977 		 *	The system is not in panic state) OR
10978 		 *	(The port is coming up))
10979 		 *
10980 		 * We check to see if the any of the flags FLAG_NOINTR or
10981 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
10982 		 * returned will be TRAN_BUSY.	If not, the request is queued.
10983 		 */
10984 		mutex_exit(&ptgt->tgt_mutex);
10985 		mutex_exit(&pptr->port_mutex);
10986 
10987 		/* see if using interrupts is allowed (so queueing'll work) */
10988 		if (pkt->pkt_flags & FLAG_NOINTR) {
10989 			pkt->pkt_resid = 0;
10990 			return (TRAN_BUSY);
10991 		}
10992 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
10993 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10994 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
10995 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
10996 			return (TRAN_BUSY);
10997 		}
10998 #ifdef	DEBUG
10999 		mutex_enter(&pptr->port_pkt_mutex);
11000 		pptr->port_npkts++;
11001 		mutex_exit(&pptr->port_pkt_mutex);
11002 #endif /* DEBUG */
11003 
11004 		/* got queue up the pkt for later */
11005 		fcp_queue_pkt(pptr, cmd);
11006 		return (TRAN_ACCEPT);
11007 	}
11008 	cmd->cmd_state = FCP_PKT_ISSUED;
11009 
11010 	mutex_exit(&ptgt->tgt_mutex);
11011 	mutex_exit(&pptr->port_mutex);
11012 
11013 	/*
11014 	 * Now that we released the mutexes, what was protected by them can
11015 	 * change.
11016 	 */
11017 
11018 	/*
11019 	 * If there is a reconfiguration in progress, wait for it to complete.
11020 	 */
11021 	fcp_reconfig_wait(pptr);
11022 
11023 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11024 	    pkt->pkt_time : 0;
11025 
11026 	/* prepare the packet */
11027 
11028 	fcp_prepare_pkt(pptr, cmd, plun);
11029 
11030 	if (cmd->cmd_pkt->pkt_time) {
11031 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11032 	} else {
11033 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11034 	}
11035 
11036 	/*
11037 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11038 	 * have to do polled I/O
11039 	 */
11040 	if (pkt->pkt_flags & FLAG_NOINTR) {
11041 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11042 		return (fcp_dopoll(pptr, cmd));
11043 	}
11044 
11045 #ifdef	DEBUG
11046 	mutex_enter(&pptr->port_pkt_mutex);
11047 	pptr->port_npkts++;
11048 	mutex_exit(&pptr->port_pkt_mutex);
11049 #endif /* DEBUG */
11050 
11051 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11052 	if (rval == FC_SUCCESS) {
11053 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11054 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11055 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11056 		return (TRAN_ACCEPT);
11057 	}
11058 
11059 	cmd->cmd_state = FCP_PKT_IDLE;
11060 
11061 #ifdef	DEBUG
11062 	mutex_enter(&pptr->port_pkt_mutex);
11063 	pptr->port_npkts--;
11064 	mutex_exit(&pptr->port_pkt_mutex);
11065 #endif /* DEBUG */
11066 
11067 	/*
11068 	 * For lack of clearer definitions, choose
11069 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11070 	 */
11071 
11072 	if (rval == FC_TRAN_BUSY) {
11073 		pkt->pkt_resid = 0;
11074 		rval = TRAN_BUSY;
11075 	} else {
11076 		mutex_enter(&ptgt->tgt_mutex);
11077 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11078 			child_info_t	*cip;
11079 
11080 			mutex_enter(&plun->lun_mutex);
11081 			cip = plun->lun_cip;
11082 			mutex_exit(&plun->lun_mutex);
11083 
11084 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11085 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11086 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11087 			    plun->lun_tgt->tgt_d_id, rval, cip);
11088 
11089 			rval = TRAN_FATAL_ERROR;
11090 		} else {
11091 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11092 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11093 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11094 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11095 				    pkt);
11096 				rval = TRAN_BUSY;
11097 			} else {
11098 				rval = TRAN_ACCEPT;
11099 				fcp_queue_pkt(pptr, cmd);
11100 			}
11101 		}
11102 		mutex_exit(&ptgt->tgt_mutex);
11103 	}
11104 
11105 	return (rval);
11106 }
11107 
11108 /*
11109  * called by the transport to abort a packet
11110  */
11111 /*ARGSUSED*/
11112 static int
11113 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11114 {
11115 	int tgt_cnt;
11116 	struct fcp_port		*pptr = ADDR2FCP(ap);
11117 	struct fcp_lun	*plun = ADDR2LUN(ap);
11118 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11119 
11120 	if (pkt == NULL) {
11121 		if (ptgt) {
11122 			mutex_enter(&ptgt->tgt_mutex);
11123 			tgt_cnt = ptgt->tgt_change_cnt;
11124 			mutex_exit(&ptgt->tgt_mutex);
11125 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11126 			return (TRUE);
11127 		}
11128 	}
11129 	return (FALSE);
11130 }
11131 
11132 
11133 /*
11134  * Perform reset
11135  */
11136 int
11137 fcp_scsi_reset(struct scsi_address *ap, int level)
11138 {
11139 	int			rval = 0;
11140 	struct fcp_port		*pptr = ADDR2FCP(ap);
11141 	struct fcp_lun	*plun = ADDR2LUN(ap);
11142 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11143 
11144 	if (level == RESET_ALL) {
11145 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11146 			rval = 1;
11147 		}
11148 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11149 		/*
11150 		 * If we are in the middle of discovery, return
11151 		 * SUCCESS as this target will be rediscovered
11152 		 * anyway
11153 		 */
11154 		mutex_enter(&ptgt->tgt_mutex);
11155 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11156 			mutex_exit(&ptgt->tgt_mutex);
11157 			return (1);
11158 		}
11159 		mutex_exit(&ptgt->tgt_mutex);
11160 
11161 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11162 			rval = 1;
11163 		}
11164 	}
11165 	return (rval);
11166 }
11167 
11168 
11169 /*
11170  * called by the framework to get a SCSI capability
11171  */
11172 static int
11173 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11174 {
11175 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11176 }
11177 
11178 
11179 /*
11180  * called by the framework to set a SCSI capability
11181  */
11182 static int
11183 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11184 {
11185 	return (fcp_commoncap(ap, cap, value, whom, 1));
11186 }
11187 
11188 /*
11189  *     Function: fcp_pkt_setup
11190  *
11191  *  Description: This function sets up the scsi_pkt structure passed by the
11192  *		 caller. This function assumes fcp_pkt_constructor has been
11193  *		 called previously for the packet passed by the caller.	 If
11194  *		 successful this call will have the following results:
11195  *
11196  *		   - The resources needed that will be constant through out
11197  *		     the whole transaction are allocated.
11198  *		   - The fields that will be constant through out the whole
11199  *		     transaction are initialized.
11200  *		   - The scsi packet will be linked to the LUN structure
11201  *		     addressed by the transaction.
11202  *
11203  *     Argument:
11204  *		 *pkt		Pointer to a scsi_pkt structure.
11205  *		 callback
11206  *		 arg
11207  *
11208  * Return Value: 0	Success
11209  *		 !0	Failure
11210  *
11211  *	Context: Kernel context or interrupt context
11212  */
11213 /* ARGSUSED */
11214 static int
11215 fcp_pkt_setup(struct scsi_pkt *pkt,
11216     int (*callback)(caddr_t arg),
11217     caddr_t arg)
11218 {
11219 	struct fcp_pkt	*cmd;
11220 	struct fcp_port	*pptr;
11221 	struct fcp_lun	*plun;
11222 	struct fcp_tgt	*ptgt;
11223 	int		kf;
11224 	fc_packet_t	*fpkt;
11225 	fc_frame_hdr_t	*hp;
11226 
11227 	pptr = ADDR2FCP(&pkt->pkt_address);
11228 	plun = ADDR2LUN(&pkt->pkt_address);
11229 	ptgt = plun->lun_tgt;
11230 
11231 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11232 	fpkt = cmd->cmd_fp_pkt;
11233 
11234 	/*
11235 	 * this request is for dma allocation only
11236 	 */
11237 	/*
11238 	 * First step of fcp_scsi_init_pkt: pkt allocation
11239 	 * We determine if the caller is willing to wait for the
11240 	 * resources.
11241 	 */
11242 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11243 
11244 	/*
11245 	 * Selective zeroing of the pkt.
11246 	 */
11247 	cmd->cmd_back = NULL;
11248 	cmd->cmd_next = NULL;
11249 
11250 	/*
11251 	 * Zero out fcp command
11252 	 */
11253 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11254 
11255 	cmd->cmd_state = FCP_PKT_IDLE;
11256 
11257 	fpkt = cmd->cmd_fp_pkt;
11258 	fpkt->pkt_data_acc = NULL;
11259 
11260 	mutex_enter(&ptgt->tgt_mutex);
11261 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11262 
11263 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11264 	    != FC_SUCCESS) {
11265 		mutex_exit(&ptgt->tgt_mutex);
11266 		return (-1);
11267 	}
11268 
11269 	mutex_exit(&ptgt->tgt_mutex);
11270 
11271 	/* Fill in the Fabric Channel Header */
11272 	hp = &fpkt->pkt_cmd_fhdr;
11273 	hp->r_ctl = R_CTL_COMMAND;
11274 	hp->rsvd = 0;
11275 	hp->type = FC_TYPE_SCSI_FCP;
11276 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11277 	hp->seq_id = 0;
11278 	hp->df_ctl  = 0;
11279 	hp->seq_cnt = 0;
11280 	hp->ox_id = 0xffff;
11281 	hp->rx_id = 0xffff;
11282 	hp->ro = 0;
11283 
11284 	/*
11285 	 * A doubly linked list (cmd_forw, cmd_back) is built
11286 	 * out of every allocated packet on a per-lun basis
11287 	 *
11288 	 * The packets are maintained in the list so as to satisfy
11289 	 * scsi_abort() requests. At present (which is unlikely to
11290 	 * change in the future) nobody performs a real scsi_abort
11291 	 * in the SCSI target drivers (as they don't keep the packets
11292 	 * after doing scsi_transport - so they don't know how to
11293 	 * abort a packet other than sending a NULL to abort all
11294 	 * outstanding packets)
11295 	 */
11296 	mutex_enter(&plun->lun_mutex);
11297 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11298 		plun->lun_pkt_head->cmd_back = cmd;
11299 	} else {
11300 		plun->lun_pkt_tail = cmd;
11301 	}
11302 	plun->lun_pkt_head = cmd;
11303 	mutex_exit(&plun->lun_mutex);
11304 	return (0);
11305 }
11306 
11307 /*
11308  *     Function: fcp_pkt_teardown
11309  *
11310  *  Description: This function releases a scsi_pkt structure and all the
11311  *		 resources attached to it.
11312  *
11313  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11314  *
11315  * Return Value: None
11316  *
11317  *	Context: User, Kernel or Interrupt context.
11318  */
11319 static void
11320 fcp_pkt_teardown(struct scsi_pkt *pkt)
11321 {
11322 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11323 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11324 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11325 
11326 	/*
11327 	 * Remove the packet from the per-lun list
11328 	 */
11329 	mutex_enter(&plun->lun_mutex);
11330 	if (cmd->cmd_back) {
11331 		ASSERT(cmd != plun->lun_pkt_head);
11332 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11333 	} else {
11334 		ASSERT(cmd == plun->lun_pkt_head);
11335 		plun->lun_pkt_head = cmd->cmd_forw;
11336 	}
11337 
11338 	if (cmd->cmd_forw) {
11339 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11340 	} else {
11341 		ASSERT(cmd == plun->lun_pkt_tail);
11342 		plun->lun_pkt_tail = cmd->cmd_back;
11343 	}
11344 
11345 	mutex_exit(&plun->lun_mutex);
11346 
11347 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11348 }
11349 
11350 /*
11351  * Routine for reset notification setup, to register or cancel.
11352  * This function is called by SCSA
11353  */
11354 /*ARGSUSED*/
11355 static int
11356 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11357     void (*callback)(caddr_t), caddr_t arg)
11358 {
11359 	struct fcp_port *pptr = ADDR2FCP(ap);
11360 
11361 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11362 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11363 }
11364 
11365 
11366 static int
11367 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11368     ddi_eventcookie_t *event_cookiep)
11369 {
11370 	struct fcp_port *pptr = fcp_dip2port(dip);
11371 
11372 	if (pptr == NULL) {
11373 		return (DDI_FAILURE);
11374 	}
11375 
11376 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11377 	    event_cookiep, NDI_EVENT_NOPASS));
11378 }
11379 
11380 
11381 static int
11382 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11383     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11384     ddi_callback_id_t *cb_id)
11385 {
11386 	struct fcp_port *pptr = fcp_dip2port(dip);
11387 
11388 	if (pptr == NULL) {
11389 		return (DDI_FAILURE);
11390 	}
11391 
11392 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11393 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11394 }
11395 
11396 
11397 static int
11398 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11399 {
11400 
11401 	struct fcp_port *pptr = fcp_dip2port(dip);
11402 
11403 	if (pptr == NULL) {
11404 		return (DDI_FAILURE);
11405 	}
11406 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11407 }
11408 
11409 
11410 /*
11411  * called by the transport to post an event
11412  */
11413 static int
11414 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11415     ddi_eventcookie_t eventid, void *impldata)
11416 {
11417 	struct fcp_port *pptr = fcp_dip2port(dip);
11418 
11419 	if (pptr == NULL) {
11420 		return (DDI_FAILURE);
11421 	}
11422 
11423 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11424 	    eventid, impldata));
11425 }
11426 
11427 
11428 /*
11429  * A target in in many cases in Fibre Channel has a one to one relation
11430  * with a port identifier (which is also known as D_ID and also as AL_PA
11431  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11432  * will most likely result in resetting all LUNs (which means a reset will
11433  * occur on all the SCSI devices connected at the other end of the bridge)
11434  * That is the latest favorite topic for discussion, for, one can debate as
11435  * hot as one likes and come up with arguably a best solution to one's
11436  * satisfaction
11437  *
11438  * To stay on track and not digress much, here are the problems stated
11439  * briefly:
11440  *
11441  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11442  *	target drivers use RESET_TARGET even if their instance is on a
11443  *	LUN. Doesn't that sound a bit broken ?
11444  *
11445  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11446  *	control fields of an FCP_CMND structure. It should have been
11447  *	fixed right there, giving flexibility to the initiators to
11448  *	minimize havoc that could be caused by resetting a target.
11449  */
11450 static int
11451 fcp_reset_target(struct scsi_address *ap, int level)
11452 {
11453 	int			rval = FC_FAILURE;
11454 	char			lun_id[25];
11455 	struct fcp_port		*pptr = ADDR2FCP(ap);
11456 	struct fcp_lun	*plun = ADDR2LUN(ap);
11457 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11458 	struct scsi_pkt		*pkt;
11459 	struct fcp_pkt	*cmd;
11460 	struct fcp_rsp		*rsp;
11461 	uint32_t		tgt_cnt;
11462 	struct fcp_rsp_info	*rsp_info;
11463 	struct fcp_reset_elem	*p;
11464 	int			bval;
11465 
11466 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11467 	    KM_NOSLEEP)) == NULL) {
11468 		return (rval);
11469 	}
11470 
11471 	mutex_enter(&ptgt->tgt_mutex);
11472 	if (level == RESET_TARGET) {
11473 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11474 			mutex_exit(&ptgt->tgt_mutex);
11475 			kmem_free(p, sizeof (struct fcp_reset_elem));
11476 			return (rval);
11477 		}
11478 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11479 		(void) strcpy(lun_id, " ");
11480 	} else {
11481 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11482 			mutex_exit(&ptgt->tgt_mutex);
11483 			kmem_free(p, sizeof (struct fcp_reset_elem));
11484 			return (rval);
11485 		}
11486 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11487 
11488 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11489 	}
11490 	tgt_cnt = ptgt->tgt_change_cnt;
11491 
11492 	mutex_exit(&ptgt->tgt_mutex);
11493 
11494 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11495 	    0, 0, NULL, 0)) == NULL) {
11496 		kmem_free(p, sizeof (struct fcp_reset_elem));
11497 		mutex_enter(&ptgt->tgt_mutex);
11498 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11499 		mutex_exit(&ptgt->tgt_mutex);
11500 		return (rval);
11501 	}
11502 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11503 
11504 	/* fill in cmd part of packet */
11505 	cmd = PKT2CMD(pkt);
11506 	if (level == RESET_TARGET) {
11507 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11508 	} else {
11509 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11510 	}
11511 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11512 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11513 
11514 	/* prepare a packet for transport */
11515 	fcp_prepare_pkt(pptr, cmd, plun);
11516 
11517 	if (cmd->cmd_pkt->pkt_time) {
11518 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11519 	} else {
11520 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11521 	}
11522 
11523 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11524 	bval = fcp_dopoll(pptr, cmd);
11525 	fc_ulp_idle_port(pptr->port_fp_handle);
11526 
11527 	/* submit the packet */
11528 	if (bval == TRAN_ACCEPT) {
11529 		int error = 3;
11530 
11531 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11532 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11533 		    sizeof (struct fcp_rsp));
11534 
11535 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11536 			if (fcp_validate_fcp_response(rsp, pptr) ==
11537 			    FC_SUCCESS) {
11538 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11539 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11540 					    sizeof (struct fcp_rsp), rsp_info,
11541 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11542 					    sizeof (struct fcp_rsp_info));
11543 				}
11544 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11545 					rval = FC_SUCCESS;
11546 					error = 0;
11547 				} else {
11548 					error = 1;
11549 				}
11550 			} else {
11551 				error = 2;
11552 			}
11553 		}
11554 
11555 		switch (error) {
11556 		case 0:
11557 			fcp_log(CE_WARN, pptr->port_dip,
11558 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11559 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11560 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11561 			break;
11562 
11563 		case 1:
11564 			fcp_log(CE_WARN, pptr->port_dip,
11565 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11566 			    " response code=%x",
11567 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11568 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11569 			    rsp_info->rsp_code);
11570 			break;
11571 
11572 		case 2:
11573 			fcp_log(CE_WARN, pptr->port_dip,
11574 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11575 			    " Bad FCP response values: rsvd1=%x,"
11576 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11577 			    " rsplen=%x, senselen=%x",
11578 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11579 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11580 			    rsp->reserved_0, rsp->reserved_1,
11581 			    rsp->fcp_u.fcp_status.reserved_0,
11582 			    rsp->fcp_u.fcp_status.reserved_1,
11583 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11584 			break;
11585 
11586 		default:
11587 			fcp_log(CE_WARN, pptr->port_dip,
11588 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11589 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11590 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11591 			break;
11592 		}
11593 	}
11594 	scsi_destroy_pkt(pkt);
11595 
11596 	if (rval == FC_FAILURE) {
11597 		mutex_enter(&ptgt->tgt_mutex);
11598 		if (level == RESET_TARGET) {
11599 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11600 		} else {
11601 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11602 		}
11603 		mutex_exit(&ptgt->tgt_mutex);
11604 		kmem_free(p, sizeof (struct fcp_reset_elem));
11605 		return (rval);
11606 	}
11607 
11608 	mutex_enter(&pptr->port_mutex);
11609 	if (level == RESET_TARGET) {
11610 		p->tgt = ptgt;
11611 		p->lun = NULL;
11612 	} else {
11613 		p->tgt = NULL;
11614 		p->lun = plun;
11615 	}
11616 	p->tgt = ptgt;
11617 	p->tgt_cnt = tgt_cnt;
11618 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11619 	p->next = pptr->port_reset_list;
11620 	pptr->port_reset_list = p;
11621 
11622 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11623 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11624 	    "Notify ssd of the reset to reinstate the reservations");
11625 
11626 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11627 	    &pptr->port_reset_notify_listf);
11628 
11629 	mutex_exit(&pptr->port_mutex);
11630 
11631 	return (rval);
11632 }
11633 
11634 
11635 /*
11636  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11637  * SCSI capabilities
11638  */
11639 /* ARGSUSED */
11640 static int
11641 fcp_commoncap(struct scsi_address *ap, char *cap,
11642     int val, int tgtonly, int doset)
11643 {
11644 	struct fcp_port		*pptr = ADDR2FCP(ap);
11645 	struct fcp_lun	*plun = ADDR2LUN(ap);
11646 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11647 	int			cidx;
11648 	int			rval = FALSE;
11649 
11650 	if (cap == (char *)0) {
11651 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11652 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11653 		    "fcp_commoncap: invalid arg");
11654 		return (rval);
11655 	}
11656 
11657 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11658 		return (UNDEFINED);
11659 	}
11660 
11661 	/*
11662 	 * Process setcap request.
11663 	 */
11664 	if (doset) {
11665 		/*
11666 		 * At present, we can only set binary (0/1) values
11667 		 */
11668 		switch (cidx) {
11669 		case SCSI_CAP_ARQ:
11670 			if (val == 0) {
11671 				rval = FALSE;
11672 			} else {
11673 				rval = TRUE;
11674 			}
11675 			break;
11676 
11677 		case SCSI_CAP_LUN_RESET:
11678 			if (val) {
11679 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11680 			} else {
11681 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11682 			}
11683 			rval = TRUE;
11684 			break;
11685 
11686 		case SCSI_CAP_SECTOR_SIZE:
11687 			rval = TRUE;
11688 			break;
11689 		default:
11690 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11691 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11692 			    "fcp_setcap: unsupported %d", cidx);
11693 			rval = UNDEFINED;
11694 			break;
11695 		}
11696 
11697 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11698 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11699 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11700 		    "0x%x/0x%x/0x%x/%d",
11701 		    cap, val, tgtonly, doset, rval);
11702 
11703 	} else {
11704 		/*
11705 		 * Process getcap request.
11706 		 */
11707 		switch (cidx) {
11708 		case SCSI_CAP_DMA_MAX:
11709 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11710 
11711 			/*
11712 			 * Need to make an adjustment qlc is uint_t 64
11713 			 * st is int, so we will make the adjustment here
11714 			 * being as nobody wants to touch this.
11715 			 * It still leaves the max single block length
11716 			 * of 2 gig. This should last .
11717 			 */
11718 
11719 			if (rval == -1) {
11720 				rval = MAX_INT_DMA;
11721 			}
11722 
11723 			break;
11724 
11725 		case SCSI_CAP_INITIATOR_ID:
11726 			rval = pptr->port_id;
11727 			break;
11728 
11729 		case SCSI_CAP_ARQ:
11730 		case SCSI_CAP_RESET_NOTIFICATION:
11731 		case SCSI_CAP_TAGGED_QING:
11732 			rval = TRUE;
11733 			break;
11734 
11735 		case SCSI_CAP_SCSI_VERSION:
11736 			rval = 3;
11737 			break;
11738 
11739 		case SCSI_CAP_INTERCONNECT_TYPE:
11740 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11741 			    (ptgt->tgt_hard_addr == 0)) {
11742 				rval = INTERCONNECT_FABRIC;
11743 			} else {
11744 				rval = INTERCONNECT_FIBRE;
11745 			}
11746 			break;
11747 
11748 		case SCSI_CAP_LUN_RESET:
11749 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11750 			    TRUE : FALSE;
11751 			break;
11752 
11753 		default:
11754 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11755 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11756 			    "fcp_getcap: unsupported %d", cidx);
11757 			rval = UNDEFINED;
11758 			break;
11759 		}
11760 
11761 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11762 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11763 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11764 		    "0x%x/0x%x/0x%x/%d",
11765 		    cap, val, tgtonly, doset, rval);
11766 	}
11767 
11768 	return (rval);
11769 }
11770 
11771 /*
11772  * called by the transport to get the port-wwn and lun
11773  * properties of this device, and to create a "name" based on them
11774  *
11775  * these properties don't exist on sun4m
11776  *
11777  * return 1 for success else return 0
11778  */
11779 /* ARGSUSED */
11780 static int
11781 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11782 {
11783 	int			i;
11784 	int			*lun;
11785 	int			numChars;
11786 	uint_t			nlun;
11787 	uint_t			count;
11788 	uint_t			nbytes;
11789 	uchar_t			*bytes;
11790 	uint16_t		lun_num;
11791 	uint32_t		tgt_id;
11792 	char			**conf_wwn;
11793 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11794 	uchar_t			barray[FC_WWN_SIZE];
11795 	dev_info_t		*tgt_dip;
11796 	struct fcp_tgt	*ptgt;
11797 	struct fcp_port	*pptr;
11798 	struct fcp_lun	*plun;
11799 
11800 	ASSERT(sd != NULL);
11801 	ASSERT(name != NULL);
11802 
11803 	tgt_dip = sd->sd_dev;
11804 	pptr = ddi_get_soft_state(fcp_softstate,
11805 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11806 	if (pptr == NULL) {
11807 		return (0);
11808 	}
11809 
11810 	ASSERT(tgt_dip != NULL);
11811 
11812 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11813 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11814 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11815 		name[0] = '\0';
11816 		return (0);
11817 	}
11818 
11819 	if (nlun == 0) {
11820 		ddi_prop_free(lun);
11821 		return (0);
11822 	}
11823 
11824 	lun_num = lun[0];
11825 	ddi_prop_free(lun);
11826 
11827 	/*
11828 	 * Lookup for .conf WWN property
11829 	 */
11830 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11831 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11832 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11833 		ASSERT(count >= 1);
11834 
11835 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11836 		ddi_prop_free(conf_wwn);
11837 		mutex_enter(&pptr->port_mutex);
11838 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11839 			mutex_exit(&pptr->port_mutex);
11840 			return (0);
11841 		}
11842 		ptgt = plun->lun_tgt;
11843 		mutex_exit(&pptr->port_mutex);
11844 
11845 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11846 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11847 
11848 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11849 		    ptgt->tgt_hard_addr != 0) {
11850 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11851 			    ptgt->tgt_hard_addr];
11852 		} else {
11853 			tgt_id = ptgt->tgt_d_id;
11854 		}
11855 
11856 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11857 		    TARGET_PROP, tgt_id);
11858 	}
11859 
11860 	/* get the our port-wwn property */
11861 	bytes = NULL;
11862 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11863 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11864 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11865 		if (bytes != NULL) {
11866 			ddi_prop_free(bytes);
11867 		}
11868 		return (0);
11869 	}
11870 
11871 	for (i = 0; i < FC_WWN_SIZE; i++) {
11872 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11873 	}
11874 
11875 	/* Stick in the address of the form "wWWN,LUN" */
11876 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11877 
11878 	ASSERT(numChars < len);
11879 	if (numChars >= len) {
11880 		fcp_log(CE_WARN, pptr->port_dip,
11881 		    "!fcp_scsi_get_name: "
11882 		    "name parameter length too small, it needs to be %d",
11883 		    numChars+1);
11884 	}
11885 
11886 	ddi_prop_free(bytes);
11887 
11888 	return (1);
11889 }
11890 
11891 
11892 /*
11893  * called by the transport to get the SCSI target id value, returning
11894  * it in "name"
11895  *
11896  * this isn't needed/used on sun4m
11897  *
11898  * return 1 for success else return 0
11899  */
11900 /* ARGSUSED */
11901 static int
11902 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11903 {
11904 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11905 	struct fcp_tgt	*ptgt;
11906 	int    numChars;
11907 
11908 	if (plun == NULL) {
11909 		return (0);
11910 	}
11911 
11912 	if ((ptgt = plun->lun_tgt) == NULL) {
11913 		return (0);
11914 	}
11915 
11916 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11917 
11918 	ASSERT(numChars < len);
11919 	if (numChars >= len) {
11920 		fcp_log(CE_WARN, NULL,
11921 		    "!fcp_scsi_get_bus_addr: "
11922 		    "name parameter length too small, it needs to be %d",
11923 		    numChars+1);
11924 	}
11925 
11926 	return (1);
11927 }
11928 
11929 
11930 /*
11931  * called internally to reset the link where the specified port lives
11932  */
11933 static int
11934 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
11935 {
11936 	la_wwn_t		wwn;
11937 	struct fcp_lun	*plun;
11938 	struct fcp_tgt	*ptgt;
11939 
11940 	/* disable restart of lip if we're suspended */
11941 	mutex_enter(&pptr->port_mutex);
11942 
11943 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
11944 	    FCP_STATE_POWER_DOWN)) {
11945 		mutex_exit(&pptr->port_mutex);
11946 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11947 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
11948 		    "fcp_linkreset, fcp%d: link reset "
11949 		    "disabled due to DDI_SUSPEND",
11950 		    ddi_get_instance(pptr->port_dip));
11951 		return (FC_FAILURE);
11952 	}
11953 
11954 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
11955 		mutex_exit(&pptr->port_mutex);
11956 		return (FC_SUCCESS);
11957 	}
11958 
11959 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11960 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
11961 
11962 	/*
11963 	 * If ap == NULL assume local link reset.
11964 	 */
11965 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
11966 		plun = ADDR2LUN(ap);
11967 		ptgt = plun->lun_tgt;
11968 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
11969 	} else {
11970 		bzero((caddr_t)&wwn, sizeof (wwn));
11971 	}
11972 	mutex_exit(&pptr->port_mutex);
11973 
11974 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
11975 }
11976 
11977 
11978 /*
11979  * called from fcp_port_attach() to resume a port
11980  * return DDI_* success/failure status
11981  * acquires and releases the global mutex
11982  * acquires and releases the port mutex
11983  */
11984 /*ARGSUSED*/
11985 
11986 static int
11987 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
11988     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
11989 {
11990 	int			res = DDI_FAILURE; /* default result */
11991 	struct fcp_port	*pptr;		/* port state ptr */
11992 	uint32_t		alloc_cnt;
11993 	uint32_t		max_cnt;
11994 	fc_portmap_t		*tmp_list = NULL;
11995 
11996 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
11997 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
11998 	    instance);
11999 
12000 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12001 		cmn_err(CE_WARN, "fcp: bad soft state");
12002 		return (res);
12003 	}
12004 
12005 	mutex_enter(&pptr->port_mutex);
12006 	switch (cmd) {
12007 	case FC_CMD_RESUME:
12008 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12009 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12010 		break;
12011 
12012 	case FC_CMD_POWER_UP:
12013 		/*
12014 		 * If the port is DDI_SUSPENded, defer rediscovery
12015 		 * until DDI_RESUME occurs
12016 		 */
12017 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12018 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12019 			mutex_exit(&pptr->port_mutex);
12020 			return (DDI_SUCCESS);
12021 		}
12022 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12023 	}
12024 	pptr->port_id = s_id;
12025 	pptr->port_state = FCP_STATE_INIT;
12026 	mutex_exit(&pptr->port_mutex);
12027 
12028 	/*
12029 	 * Make a copy of ulp_port_info as fctl allocates
12030 	 * a temp struct.
12031 	 */
12032 	(void) fcp_cp_pinfo(pptr, pinfo);
12033 
12034 	mutex_enter(&fcp_global_mutex);
12035 	if (fcp_watchdog_init++ == 0) {
12036 		fcp_watchdog_tick = fcp_watchdog_timeout *
12037 		    drv_usectohz(1000000);
12038 		fcp_watchdog_id = timeout(fcp_watch,
12039 		    NULL, fcp_watchdog_tick);
12040 	}
12041 	mutex_exit(&fcp_global_mutex);
12042 
12043 	/*
12044 	 * Handle various topologies and link states.
12045 	 */
12046 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12047 	case FC_STATE_OFFLINE:
12048 		/*
12049 		 * Wait for ONLINE, at which time a state
12050 		 * change will cause a statec_callback
12051 		 */
12052 		res = DDI_SUCCESS;
12053 		break;
12054 
12055 	case FC_STATE_ONLINE:
12056 
12057 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12058 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12059 			res = DDI_SUCCESS;
12060 			break;
12061 		}
12062 
12063 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12064 		    !fcp_enable_auto_configuration) {
12065 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12066 			if (tmp_list == NULL) {
12067 				if (!alloc_cnt) {
12068 					res = DDI_SUCCESS;
12069 				}
12070 				break;
12071 			}
12072 			max_cnt = alloc_cnt;
12073 		} else {
12074 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12075 
12076 			alloc_cnt = FCP_MAX_DEVICES;
12077 
12078 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12079 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12080 			    KM_NOSLEEP)) == NULL) {
12081 				fcp_log(CE_WARN, pptr->port_dip,
12082 				    "!fcp%d: failed to allocate portmap",
12083 				    instance);
12084 				break;
12085 			}
12086 
12087 			max_cnt = alloc_cnt;
12088 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12089 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12090 			    FC_SUCCESS) {
12091 				caddr_t msg;
12092 
12093 				(void) fc_ulp_error(res, &msg);
12094 
12095 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12096 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12097 				    "resume failed getportmap: reason=0x%x",
12098 				    res);
12099 
12100 				fcp_log(CE_WARN, pptr->port_dip,
12101 				    "!failed to get port map : %s", msg);
12102 				break;
12103 			}
12104 			if (max_cnt > alloc_cnt) {
12105 				alloc_cnt = max_cnt;
12106 			}
12107 		}
12108 
12109 		/*
12110 		 * do the SCSI device discovery and create
12111 		 * the devinfos
12112 		 */
12113 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12114 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12115 		    max_cnt, pptr->port_id);
12116 
12117 		res = DDI_SUCCESS;
12118 		break;
12119 
12120 	default:
12121 		fcp_log(CE_WARN, pptr->port_dip,
12122 		    "!fcp%d: invalid port state at attach=0x%x",
12123 		    instance, pptr->port_phys_state);
12124 
12125 		mutex_enter(&pptr->port_mutex);
12126 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12127 		mutex_exit(&pptr->port_mutex);
12128 		res = DDI_SUCCESS;
12129 
12130 		break;
12131 	}
12132 
12133 	if (tmp_list != NULL) {
12134 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12135 	}
12136 
12137 	return (res);
12138 }
12139 
12140 
12141 static void
12142 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12143 {
12144 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12145 	pptr->port_dip = pinfo->port_dip;
12146 	pptr->port_fp_handle = pinfo->port_handle;
12147 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12148 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12149 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12150 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12151 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12152 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12153 	pptr->port_phys_state = pinfo->port_state;
12154 	pptr->port_topology = pinfo->port_flags;
12155 	pptr->port_reset_action = pinfo->port_reset_action;
12156 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12157 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12158 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12159 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12160 }
12161 
12162 /*
12163  * If the elements wait field is set to 1 then
12164  * another thread is waiting for the operation to complete. Once
12165  * it is complete, the waiting thread is signaled and the element is
12166  * freed by the waiting thread. If the elements wait field is set to 0
12167  * the element is freed.
12168  */
12169 static void
12170 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12171 {
12172 	ASSERT(elem != NULL);
12173 	mutex_enter(&elem->mutex);
12174 	elem->result = result;
12175 	if (elem->wait) {
12176 		elem->wait = 0;
12177 		cv_signal(&elem->cv);
12178 		mutex_exit(&elem->mutex);
12179 	} else {
12180 		mutex_exit(&elem->mutex);
12181 		cv_destroy(&elem->cv);
12182 		mutex_destroy(&elem->mutex);
12183 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12184 	}
12185 }
12186 
12187 /*
12188  * This function is invoked from the taskq thread to allocate
12189  * devinfo nodes and to online/offline them.
12190  */
12191 static void
12192 fcp_hp_task(void *arg)
12193 {
12194 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12195 	struct fcp_lun	*plun = elem->lun;
12196 	struct fcp_port		*pptr = elem->port;
12197 	int			result;
12198 
12199 	ASSERT(elem->what == FCP_ONLINE ||
12200 	    elem->what == FCP_OFFLINE ||
12201 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12202 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12203 
12204 	mutex_enter(&pptr->port_mutex);
12205 	mutex_enter(&plun->lun_mutex);
12206 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12207 	    plun->lun_event_count != elem->event_cnt) ||
12208 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12209 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12210 		mutex_exit(&plun->lun_mutex);
12211 		mutex_exit(&pptr->port_mutex);
12212 		fcp_process_elem(elem, NDI_FAILURE);
12213 		return;
12214 	}
12215 	mutex_exit(&plun->lun_mutex);
12216 	mutex_exit(&pptr->port_mutex);
12217 
12218 	result = fcp_trigger_lun(plun, elem->cip, elem->what,
12219 	    elem->link_cnt, elem->tgt_cnt, elem->flags);
12220 	fcp_process_elem(elem, result);
12221 }
12222 
12223 
12224 static child_info_t *
12225 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12226     int tcount)
12227 {
12228 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12229 
12230 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12231 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12232 
12233 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12234 		/*
12235 		 * Child has not been created yet. Create the child device
12236 		 * based on the per-Lun flags.
12237 		 */
12238 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12239 			plun->lun_cip =
12240 			    CIP(fcp_create_dip(plun, lcount, tcount));
12241 			plun->lun_mpxio = 0;
12242 		} else {
12243 			plun->lun_cip =
12244 			    CIP(fcp_create_pip(plun, lcount, tcount));
12245 			plun->lun_mpxio = 1;
12246 		}
12247 	} else {
12248 		plun->lun_cip = cip;
12249 	}
12250 
12251 	return (plun->lun_cip);
12252 }
12253 
12254 
12255 static int
12256 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12257 {
12258 	int		rval = FC_FAILURE;
12259 	dev_info_t	*pdip;
12260 	struct dev_info	*dip;
12261 	int		circular;
12262 
12263 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12264 
12265 	pdip = plun->lun_tgt->tgt_port->port_dip;
12266 
12267 	if (plun->lun_cip == NULL) {
12268 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12269 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12270 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12271 		    "plun: %p lun state: %x num: %d target state: %x",
12272 		    plun, plun->lun_state, plun->lun_num,
12273 		    plun->lun_tgt->tgt_port->port_state);
12274 		return (rval);
12275 	}
12276 	ndi_devi_enter(pdip, &circular);
12277 	dip = DEVI(pdip)->devi_child;
12278 	while (dip) {
12279 		if (dip == DEVI(cdip)) {
12280 			rval = FC_SUCCESS;
12281 			break;
12282 		}
12283 		dip = dip->devi_sibling;
12284 	}
12285 	ndi_devi_exit(pdip, circular);
12286 	return (rval);
12287 }
12288 
12289 static int
12290 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12291 {
12292 	int		rval = FC_FAILURE;
12293 
12294 	ASSERT(plun != NULL);
12295 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12296 
12297 	if (plun->lun_mpxio == 0) {
12298 		rval = fcp_is_dip_present(plun, DIP(cip));
12299 	} else {
12300 		rval = fcp_is_pip_present(plun, PIP(cip));
12301 	}
12302 
12303 	return (rval);
12304 }
12305 
12306 /*
12307  *     Function: fcp_create_dip
12308  *
12309  *  Description: Creates a dev_info_t structure for the LUN specified by the
12310  *		 caller.
12311  *
12312  *     Argument: plun		Lun structure
12313  *		 link_cnt	Link state count.
12314  *		 tgt_cnt	Target state change count.
12315  *
12316  * Return Value: NULL if it failed
12317  *		 dev_info_t structure address if it succeeded
12318  *
12319  *	Context: Kernel context
12320  */
12321 static dev_info_t *
12322 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12323 {
12324 	int			failure = 0;
12325 	uint32_t		tgt_id;
12326 	uint64_t		sam_lun;
12327 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12328 	struct fcp_port	*pptr = ptgt->tgt_port;
12329 	dev_info_t		*pdip = pptr->port_dip;
12330 	dev_info_t		*cdip = NULL;
12331 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12332 	char			*nname = NULL;
12333 	char			**compatible = NULL;
12334 	int			ncompatible;
12335 	char			*scsi_binding_set;
12336 	char			t_pwwn[17];
12337 
12338 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12339 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12340 
12341 	/* get the 'scsi-binding-set' property */
12342 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12343 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12344 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12345 		scsi_binding_set = NULL;
12346 	}
12347 
12348 	/* determine the node name and compatible */
12349 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12350 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12351 	if (scsi_binding_set) {
12352 		ddi_prop_free(scsi_binding_set);
12353 	}
12354 
12355 	if (nname == NULL) {
12356 #ifdef	DEBUG
12357 		cmn_err(CE_WARN, "%s%d: no driver for "
12358 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12359 		    "	 compatible: %s",
12360 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12361 		    ptgt->tgt_port_wwn.raw_wwn[0],
12362 		    ptgt->tgt_port_wwn.raw_wwn[1],
12363 		    ptgt->tgt_port_wwn.raw_wwn[2],
12364 		    ptgt->tgt_port_wwn.raw_wwn[3],
12365 		    ptgt->tgt_port_wwn.raw_wwn[4],
12366 		    ptgt->tgt_port_wwn.raw_wwn[5],
12367 		    ptgt->tgt_port_wwn.raw_wwn[6],
12368 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12369 		    *compatible);
12370 #endif	/* DEBUG */
12371 		failure++;
12372 		goto end_of_fcp_create_dip;
12373 	}
12374 
12375 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12376 
12377 	/*
12378 	 * if the old_dip does not match the cdip, that means there is
12379 	 * some property change. since we'll be using the cdip, we need
12380 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12381 	 * then the dtype for the device has been updated. Offline the
12382 	 * the old device and create a new device with the new device type
12383 	 * Refer to bug: 4764752
12384 	 */
12385 	if (old_dip && (cdip != old_dip ||
12386 	    plun->lun_state & FCP_LUN_CHANGED)) {
12387 		plun->lun_state &= ~(FCP_LUN_INIT);
12388 		mutex_exit(&plun->lun_mutex);
12389 		mutex_exit(&pptr->port_mutex);
12390 
12391 		mutex_enter(&ptgt->tgt_mutex);
12392 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12393 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12394 		mutex_exit(&ptgt->tgt_mutex);
12395 
12396 #ifdef DEBUG
12397 		if (cdip != NULL) {
12398 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12399 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12400 			    "Old dip=%p; New dip=%p don't match", old_dip,
12401 			    cdip);
12402 		} else {
12403 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12404 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12405 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12406 		}
12407 #endif
12408 
12409 		mutex_enter(&pptr->port_mutex);
12410 		mutex_enter(&plun->lun_mutex);
12411 	}
12412 
12413 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12414 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12415 		if (ndi_devi_alloc(pptr->port_dip, nname,
12416 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12417 			failure++;
12418 			goto end_of_fcp_create_dip;
12419 		}
12420 	}
12421 
12422 	/*
12423 	 * Previously all the properties for the devinfo were destroyed here
12424 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12425 	 * the devid property (and other properties established by the target
12426 	 * driver or framework) which the code does not always recreate, this
12427 	 * call was removed.
12428 	 * This opens a theoretical possibility that we may return with a
12429 	 * stale devid on the node if the scsi entity behind the fibre channel
12430 	 * lun has changed.
12431 	 */
12432 
12433 	/* decorate the node with compatible */
12434 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12435 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12436 		failure++;
12437 		goto end_of_fcp_create_dip;
12438 	}
12439 
12440 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12441 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12442 		failure++;
12443 		goto end_of_fcp_create_dip;
12444 	}
12445 
12446 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12447 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12448 		failure++;
12449 		goto end_of_fcp_create_dip;
12450 	}
12451 
12452 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12453 	t_pwwn[16] = '\0';
12454 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12455 	    != DDI_PROP_SUCCESS) {
12456 		failure++;
12457 		goto end_of_fcp_create_dip;
12458 	}
12459 
12460 	/*
12461 	 * If there is no hard address - We might have to deal with
12462 	 * that by using WWN - Having said that it is important to
12463 	 * recognize this problem early so ssd can be informed of
12464 	 * the right interconnect type.
12465 	 */
12466 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12467 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12468 	} else {
12469 		tgt_id = ptgt->tgt_d_id;
12470 	}
12471 
12472 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12473 	    tgt_id) != DDI_PROP_SUCCESS) {
12474 		failure++;
12475 		goto end_of_fcp_create_dip;
12476 	}
12477 
12478 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12479 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12480 		failure++;
12481 		goto end_of_fcp_create_dip;
12482 	}
12483 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12484 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12485 	    sam_lun) != DDI_PROP_SUCCESS) {
12486 		failure++;
12487 		goto end_of_fcp_create_dip;
12488 	}
12489 
12490 end_of_fcp_create_dip:
12491 	scsi_hba_nodename_compatible_free(nname, compatible);
12492 
12493 	if (cdip != NULL && failure) {
12494 		(void) ndi_prop_remove_all(cdip);
12495 		(void) ndi_devi_free(cdip);
12496 		cdip = NULL;
12497 	}
12498 
12499 	return (cdip);
12500 }
12501 
12502 /*
12503  *     Function: fcp_create_pip
12504  *
12505  *  Description: Creates a Path Id for the LUN specified by the caller.
12506  *
12507  *     Argument: plun		Lun structure
12508  *		 link_cnt	Link state count.
12509  *		 tgt_cnt	Target state count.
12510  *
12511  * Return Value: NULL if it failed
12512  *		 mdi_pathinfo_t structure address if it succeeded
12513  *
12514  *	Context: Kernel context
12515  */
12516 static mdi_pathinfo_t *
12517 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12518 {
12519 	int			i;
12520 	char			buf[MAXNAMELEN];
12521 	char			uaddr[MAXNAMELEN];
12522 	int			failure = 0;
12523 	uint32_t		tgt_id;
12524 	uint64_t		sam_lun;
12525 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12526 	struct fcp_port	*pptr = ptgt->tgt_port;
12527 	dev_info_t		*pdip = pptr->port_dip;
12528 	mdi_pathinfo_t		*pip = NULL;
12529 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12530 	char			*nname = NULL;
12531 	char			**compatible = NULL;
12532 	int			ncompatible;
12533 	char			*scsi_binding_set;
12534 	char			t_pwwn[17];
12535 
12536 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12537 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12538 
12539 	scsi_binding_set = "vhci";
12540 
12541 	/* determine the node name and compatible */
12542 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12543 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12544 
12545 	if (nname == NULL) {
12546 #ifdef	DEBUG
12547 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12548 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12549 		    "	 compatible: %s",
12550 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12551 		    ptgt->tgt_port_wwn.raw_wwn[0],
12552 		    ptgt->tgt_port_wwn.raw_wwn[1],
12553 		    ptgt->tgt_port_wwn.raw_wwn[2],
12554 		    ptgt->tgt_port_wwn.raw_wwn[3],
12555 		    ptgt->tgt_port_wwn.raw_wwn[4],
12556 		    ptgt->tgt_port_wwn.raw_wwn[5],
12557 		    ptgt->tgt_port_wwn.raw_wwn[6],
12558 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12559 		    *compatible);
12560 #endif	/* DEBUG */
12561 		failure++;
12562 		goto end_of_fcp_create_pip;
12563 	}
12564 
12565 	pip = fcp_find_existing_pip(plun, pdip);
12566 
12567 	/*
12568 	 * if the old_dip does not match the cdip, that means there is
12569 	 * some property change. since we'll be using the cdip, we need
12570 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12571 	 * then the dtype for the device has been updated. Offline the
12572 	 * the old device and create a new device with the new device type
12573 	 * Refer to bug: 4764752
12574 	 */
12575 	if (old_pip && (pip != old_pip ||
12576 	    plun->lun_state & FCP_LUN_CHANGED)) {
12577 		plun->lun_state &= ~(FCP_LUN_INIT);
12578 		mutex_exit(&plun->lun_mutex);
12579 		mutex_exit(&pptr->port_mutex);
12580 
12581 		mutex_enter(&ptgt->tgt_mutex);
12582 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12583 		    FCP_OFFLINE, lcount, tcount,
12584 		    NDI_DEVI_REMOVE, 0);
12585 		mutex_exit(&ptgt->tgt_mutex);
12586 
12587 		if (pip != NULL) {
12588 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12589 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12590 			    "Old pip=%p; New pip=%p don't match",
12591 			    old_pip, pip);
12592 		} else {
12593 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12594 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12595 			    "Old pip=%p; New pip=NULL don't match",
12596 			    old_pip);
12597 		}
12598 
12599 		mutex_enter(&pptr->port_mutex);
12600 		mutex_enter(&plun->lun_mutex);
12601 	}
12602 
12603 	/*
12604 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12605 	 * lun_guid_size which is dependent on the target, I don't
12606 	 * believe the same trancation happens here UNLESS the standards
12607 	 * change the FC_WWN_SIZE value to something larger than
12608 	 * MAXNAMELEN(currently 255 bytes).
12609 	 */
12610 
12611 	for (i = 0; i < FC_WWN_SIZE; i++) {
12612 		(void) sprintf(&buf[i << 1], "%02x",
12613 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12614 	}
12615 
12616 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12617 	    buf, plun->lun_num);
12618 
12619 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12620 		/*
12621 		 * Release the locks before calling into
12622 		 * mdi_pi_alloc_compatible() since this can result in a
12623 		 * callback into fcp which can result in a deadlock
12624 		 * (see bug # 4870272).
12625 		 *
12626 		 * Basically, what we are trying to avoid is the scenario where
12627 		 * one thread does ndi_devi_enter() and tries to grab
12628 		 * fcp_mutex and another does it the other way round.
12629 		 *
12630 		 * But before we do that, make sure that nobody releases the
12631 		 * port in the meantime. We can do this by setting a flag.
12632 		 */
12633 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12634 		pptr->port_state |= FCP_STATE_IN_MDI;
12635 		mutex_exit(&plun->lun_mutex);
12636 		mutex_exit(&pptr->port_mutex);
12637 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12638 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12639 			fcp_log(CE_WARN, pptr->port_dip,
12640 			    "!path alloc failed:0x%x", plun);
12641 			mutex_enter(&pptr->port_mutex);
12642 			mutex_enter(&plun->lun_mutex);
12643 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12644 			failure++;
12645 			goto end_of_fcp_create_pip;
12646 		}
12647 		mutex_enter(&pptr->port_mutex);
12648 		mutex_enter(&plun->lun_mutex);
12649 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12650 	} else {
12651 		(void) mdi_prop_remove(pip, NULL);
12652 	}
12653 
12654 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12655 
12656 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12657 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12658 	    != DDI_PROP_SUCCESS) {
12659 		failure++;
12660 		goto end_of_fcp_create_pip;
12661 	}
12662 
12663 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12664 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12665 	    != DDI_PROP_SUCCESS) {
12666 		failure++;
12667 		goto end_of_fcp_create_pip;
12668 	}
12669 
12670 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12671 	t_pwwn[16] = '\0';
12672 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12673 	    != DDI_PROP_SUCCESS) {
12674 		failure++;
12675 		goto end_of_fcp_create_pip;
12676 	}
12677 
12678 	/*
12679 	 * If there is no hard address - We might have to deal with
12680 	 * that by using WWN - Having said that it is important to
12681 	 * recognize this problem early so ssd can be informed of
12682 	 * the right interconnect type.
12683 	 */
12684 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12685 	    ptgt->tgt_hard_addr != 0) {
12686 		tgt_id = (uint32_t)
12687 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12688 	} else {
12689 		tgt_id = ptgt->tgt_d_id;
12690 	}
12691 
12692 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12693 	    != DDI_PROP_SUCCESS) {
12694 		failure++;
12695 		goto end_of_fcp_create_pip;
12696 	}
12697 
12698 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12699 	    != DDI_PROP_SUCCESS) {
12700 		failure++;
12701 		goto end_of_fcp_create_pip;
12702 	}
12703 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12704 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12705 	    != DDI_PROP_SUCCESS) {
12706 		failure++;
12707 		goto end_of_fcp_create_pip;
12708 	}
12709 
12710 end_of_fcp_create_pip:
12711 	scsi_hba_nodename_compatible_free(nname, compatible);
12712 
12713 	if (pip != NULL && failure) {
12714 		(void) mdi_prop_remove(pip, NULL);
12715 		mutex_exit(&plun->lun_mutex);
12716 		mutex_exit(&pptr->port_mutex);
12717 		(void) mdi_pi_free(pip, 0);
12718 		mutex_enter(&pptr->port_mutex);
12719 		mutex_enter(&plun->lun_mutex);
12720 		pip = NULL;
12721 	}
12722 
12723 	return (pip);
12724 }
12725 
12726 static dev_info_t *
12727 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12728 {
12729 	uint_t			nbytes;
12730 	uchar_t			*bytes;
12731 	uint_t			nwords;
12732 	uint32_t		tgt_id;
12733 	int			*words;
12734 	dev_info_t		*cdip;
12735 	dev_info_t		*ndip;
12736 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12737 	struct fcp_port	*pptr = ptgt->tgt_port;
12738 	int			circular;
12739 
12740 	ndi_devi_enter(pdip, &circular);
12741 
12742 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12743 	while ((cdip = ndip) != NULL) {
12744 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12745 
12746 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12747 			continue;
12748 		}
12749 
12750 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12751 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12752 		    &nbytes) != DDI_PROP_SUCCESS) {
12753 			continue;
12754 		}
12755 
12756 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12757 			if (bytes != NULL) {
12758 				ddi_prop_free(bytes);
12759 			}
12760 			continue;
12761 		}
12762 		ASSERT(bytes != NULL);
12763 
12764 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12765 			ddi_prop_free(bytes);
12766 			continue;
12767 		}
12768 
12769 		ddi_prop_free(bytes);
12770 
12771 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12772 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12773 		    &nbytes) != DDI_PROP_SUCCESS) {
12774 			continue;
12775 		}
12776 
12777 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12778 			if (bytes != NULL) {
12779 				ddi_prop_free(bytes);
12780 			}
12781 			continue;
12782 		}
12783 		ASSERT(bytes != NULL);
12784 
12785 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12786 			ddi_prop_free(bytes);
12787 			continue;
12788 		}
12789 
12790 		ddi_prop_free(bytes);
12791 
12792 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12793 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12794 		    &nwords) != DDI_PROP_SUCCESS) {
12795 			continue;
12796 		}
12797 
12798 		if (nwords != 1 || words == NULL) {
12799 			if (words != NULL) {
12800 				ddi_prop_free(words);
12801 			}
12802 			continue;
12803 		}
12804 		ASSERT(words != NULL);
12805 
12806 		/*
12807 		 * If there is no hard address - We might have to deal with
12808 		 * that by using WWN - Having said that it is important to
12809 		 * recognize this problem early so ssd can be informed of
12810 		 * the right interconnect type.
12811 		 */
12812 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12813 		    ptgt->tgt_hard_addr != 0) {
12814 			tgt_id =
12815 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12816 		} else {
12817 			tgt_id = ptgt->tgt_d_id;
12818 		}
12819 
12820 		if (tgt_id != (uint32_t)*words) {
12821 			ddi_prop_free(words);
12822 			continue;
12823 		}
12824 		ddi_prop_free(words);
12825 
12826 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12827 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12828 		    &nwords) != DDI_PROP_SUCCESS) {
12829 			continue;
12830 		}
12831 
12832 		if (nwords != 1 || words == NULL) {
12833 			if (words != NULL) {
12834 				ddi_prop_free(words);
12835 			}
12836 			continue;
12837 		}
12838 		ASSERT(words != NULL);
12839 
12840 		if (plun->lun_num == (uint16_t)*words) {
12841 			ddi_prop_free(words);
12842 			break;
12843 		}
12844 		ddi_prop_free(words);
12845 	}
12846 	ndi_devi_exit(pdip, circular);
12847 
12848 	return (cdip);
12849 }
12850 
12851 
12852 static int
12853 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12854 {
12855 	dev_info_t	*pdip;
12856 	char		buf[MAXNAMELEN];
12857 	char		uaddr[MAXNAMELEN];
12858 	int		rval = FC_FAILURE;
12859 
12860 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12861 
12862 	pdip = plun->lun_tgt->tgt_port->port_dip;
12863 
12864 	/*
12865 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12866 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12867 	 * configured and then deleted on the device end (for T3/T4 case). In
12868 	 * such cases, pip will be NULL.
12869 	 *
12870 	 * If the device generates an RSCN, it will end up getting offlined when
12871 	 * it disappeared and a new LUN will get created when it is rediscovered
12872 	 * on the device. If we check for lun_cip here, the LUN will not end
12873 	 * up getting onlined since this function will end up returning a
12874 	 * FC_SUCCESS.
12875 	 *
12876 	 * The behavior is different on other devices. For instance, on a HDS,
12877 	 * there was no RSCN generated by the device but the next I/O generated
12878 	 * a check condition and rediscovery got triggered that way. So, in
12879 	 * such cases, this path will not be exercised
12880 	 */
12881 	if (pip == NULL) {
12882 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12883 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12884 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12885 		    "plun: %p lun state: %x num: %d target state: %x",
12886 		    plun, plun->lun_state, plun->lun_num,
12887 		    plun->lun_tgt->tgt_port->port_state);
12888 		return (rval);
12889 	}
12890 
12891 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12892 
12893 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12894 
12895 	if (plun->lun_old_guid) {
12896 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12897 			rval = FC_SUCCESS;
12898 		}
12899 	} else {
12900 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12901 			rval = FC_SUCCESS;
12902 		}
12903 	}
12904 	return (rval);
12905 }
12906 
12907 static mdi_pathinfo_t *
12908 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12909 {
12910 	char			buf[MAXNAMELEN];
12911 	char			uaddr[MAXNAMELEN];
12912 	mdi_pathinfo_t		*pip;
12913 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12914 	struct fcp_port	*pptr = ptgt->tgt_port;
12915 
12916 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12917 
12918 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12919 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12920 
12921 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12922 
12923 	return (pip);
12924 }
12925 
12926 
12927 static int
12928 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12929     int tcount, int flags, int *circ)
12930 {
12931 	int			rval;
12932 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
12933 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12934 	dev_info_t		*cdip = NULL;
12935 
12936 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12937 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12938 
12939 	if (plun->lun_cip == NULL) {
12940 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12941 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12942 		    "fcp_online_child: plun->lun_cip is NULL: "
12943 		    "plun: %p state: %x num: %d target state: %x",
12944 		    plun, plun->lun_state, plun->lun_num,
12945 		    plun->lun_tgt->tgt_port->port_state);
12946 		return (NDI_FAILURE);
12947 	}
12948 again:
12949 	if (plun->lun_mpxio == 0) {
12950 		cdip = DIP(cip);
12951 		mutex_exit(&plun->lun_mutex);
12952 		mutex_exit(&pptr->port_mutex);
12953 
12954 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12955 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12956 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
12957 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
12958 
12959 		/*
12960 		 * We could check for FCP_LUN_INIT here but chances
12961 		 * of getting here when it's already in FCP_LUN_INIT
12962 		 * is rare and a duplicate ndi_devi_online wouldn't
12963 		 * hurt either (as the node would already have been
12964 		 * in CF2)
12965 		 */
12966 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
12967 			rval = ndi_devi_bind_driver(cdip, flags);
12968 		} else {
12969 			rval = ndi_devi_online(cdip, flags);
12970 		}
12971 		/*
12972 		 * We log the message into trace buffer if the device
12973 		 * is "ses" and into syslog for any other device
12974 		 * type. This is to prevent the ndi_devi_online failure
12975 		 * message that appears for V880/A5K ses devices.
12976 		 */
12977 		if (rval == NDI_SUCCESS) {
12978 			mutex_enter(&ptgt->tgt_mutex);
12979 			plun->lun_state |= FCP_LUN_INIT;
12980 			mutex_exit(&ptgt->tgt_mutex);
12981 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
12982 			fcp_log(CE_NOTE, pptr->port_dip,
12983 			    "!ndi_devi_online:"
12984 			    " failed for %s: target=%x lun=%x %x",
12985 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12986 			    plun->lun_num, rval);
12987 		} else {
12988 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12989 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
12990 			    " !ndi_devi_online:"
12991 			    " failed for %s: target=%x lun=%x %x",
12992 			    ddi_get_name(cdip), ptgt->tgt_d_id,
12993 			    plun->lun_num, rval);
12994 		}
12995 	} else {
12996 		cdip = mdi_pi_get_client(PIP(cip));
12997 		mutex_exit(&plun->lun_mutex);
12998 		mutex_exit(&pptr->port_mutex);
12999 
13000 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13001 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13002 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13003 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13004 
13005 		/*
13006 		 * Hold path and exit phci to avoid deadlock with power
13007 		 * management code during mdi_pi_online.
13008 		 */
13009 		mdi_hold_path(PIP(cip));
13010 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13011 
13012 		rval = mdi_pi_online(PIP(cip), flags);
13013 
13014 		mdi_devi_enter_phci(pptr->port_dip, circ);
13015 		mdi_rele_path(PIP(cip));
13016 
13017 		if (rval == MDI_SUCCESS) {
13018 			mutex_enter(&ptgt->tgt_mutex);
13019 			plun->lun_state |= FCP_LUN_INIT;
13020 			mutex_exit(&ptgt->tgt_mutex);
13021 
13022 			/*
13023 			 * Clear MPxIO path permanent disable in case
13024 			 * fcp hotplug dropped the offline event.
13025 			 */
13026 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13027 
13028 		} else if (rval == MDI_NOT_SUPPORTED) {
13029 			child_info_t	*old_cip = cip;
13030 
13031 			/*
13032 			 * MPxIO does not support this device yet.
13033 			 * Enumerate in legacy mode.
13034 			 */
13035 			mutex_enter(&pptr->port_mutex);
13036 			mutex_enter(&plun->lun_mutex);
13037 			plun->lun_mpxio = 0;
13038 			plun->lun_cip = NULL;
13039 			cdip = fcp_create_dip(plun, lcount, tcount);
13040 			plun->lun_cip = cip = CIP(cdip);
13041 			if (cip == NULL) {
13042 				fcp_log(CE_WARN, pptr->port_dip,
13043 				    "!fcp_online_child: "
13044 				    "Create devinfo failed for LU=%p", plun);
13045 				mutex_exit(&plun->lun_mutex);
13046 
13047 				mutex_enter(&ptgt->tgt_mutex);
13048 				plun->lun_state |= FCP_LUN_OFFLINE;
13049 				mutex_exit(&ptgt->tgt_mutex);
13050 
13051 				mutex_exit(&pptr->port_mutex);
13052 
13053 				/*
13054 				 * free the mdi_pathinfo node
13055 				 */
13056 				(void) mdi_pi_free(PIP(old_cip), 0);
13057 			} else {
13058 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13059 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13060 				    "fcp_online_child: creating devinfo "
13061 				    "node 0x%p for plun 0x%p",
13062 				    cip, plun);
13063 				mutex_exit(&plun->lun_mutex);
13064 				mutex_exit(&pptr->port_mutex);
13065 				/*
13066 				 * free the mdi_pathinfo node
13067 				 */
13068 				(void) mdi_pi_free(PIP(old_cip), 0);
13069 				mutex_enter(&pptr->port_mutex);
13070 				mutex_enter(&plun->lun_mutex);
13071 				goto again;
13072 			}
13073 		} else {
13074 			if (cdip) {
13075 				fcp_log(CE_NOTE, pptr->port_dip,
13076 				    "!fcp_online_child: mdi_pi_online:"
13077 				    " failed for %s: target=%x lun=%x %x",
13078 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13079 				    plun->lun_num, rval);
13080 			}
13081 		}
13082 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13083 	}
13084 
13085 	if (rval == NDI_SUCCESS) {
13086 		if (cdip) {
13087 			(void) ndi_event_retrieve_cookie(
13088 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13089 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13090 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13091 			    cdip, fcp_insert_eid, NULL);
13092 		}
13093 	}
13094 	mutex_enter(&pptr->port_mutex);
13095 	mutex_enter(&plun->lun_mutex);
13096 	return (rval);
13097 }
13098 
13099 /* ARGSUSED */
13100 static int
13101 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13102     int tcount, int flags, int *circ)
13103 {
13104 	int rval;
13105 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13106 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13107 	dev_info_t		*cdip;
13108 
13109 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13110 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13111 
13112 	if (plun->lun_cip == NULL) {
13113 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13114 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13115 		    "fcp_offline_child: plun->lun_cip is NULL: "
13116 		    "plun: %p lun state: %x num: %d target state: %x",
13117 		    plun, plun->lun_state, plun->lun_num,
13118 		    plun->lun_tgt->tgt_port->port_state);
13119 		return (NDI_FAILURE);
13120 	}
13121 
13122 	if (plun->lun_mpxio == 0) {
13123 		cdip = DIP(cip);
13124 		mutex_exit(&plun->lun_mutex);
13125 		mutex_exit(&pptr->port_mutex);
13126 		rval = ndi_devi_offline(DIP(cip), flags);
13127 		if (rval != NDI_SUCCESS) {
13128 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13129 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13130 			    "fcp_offline_child: ndi_devi_offline failed "
13131 			    "rval=%x cip=%p", rval, cip);
13132 		}
13133 	} else {
13134 		cdip = mdi_pi_get_client(PIP(cip));
13135 		mutex_exit(&plun->lun_mutex);
13136 		mutex_exit(&pptr->port_mutex);
13137 
13138 		/*
13139 		 * Exit phci to avoid deadlock with power management code
13140 		 * during mdi_pi_offline
13141 		 */
13142 		mdi_hold_path(PIP(cip));
13143 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13144 
13145 		rval = mdi_pi_offline(PIP(cip), flags);
13146 
13147 		mdi_devi_enter_phci(pptr->port_dip, circ);
13148 		mdi_rele_path(PIP(cip));
13149 
13150 		if (rval == MDI_SUCCESS) {
13151 			/*
13152 			 * Clear MPxIO path permanent disable as the path is
13153 			 * already offlined.
13154 			 */
13155 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13156 
13157 			if (flags & NDI_DEVI_REMOVE) {
13158 				(void) mdi_pi_free(PIP(cip), 0);
13159 			}
13160 		} else {
13161 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13162 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13163 			    "fcp_offline_child: mdi_pi_offline failed "
13164 			    "rval=%x cip=%p", rval, cip);
13165 		}
13166 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13167 	}
13168 
13169 	mutex_enter(&ptgt->tgt_mutex);
13170 	plun->lun_state &= ~FCP_LUN_INIT;
13171 	mutex_exit(&ptgt->tgt_mutex);
13172 
13173 	mutex_enter(&pptr->port_mutex);
13174 	mutex_enter(&plun->lun_mutex);
13175 
13176 	if (rval == NDI_SUCCESS) {
13177 		cdip = NULL;
13178 		if (flags & NDI_DEVI_REMOVE) {
13179 			/*
13180 			 * If the guid of the LUN changes, lun_cip will not
13181 			 * equal to cip, and after offlining the LUN with the
13182 			 * old guid, we should keep lun_cip since it's the cip
13183 			 * of the LUN with the new guid.
13184 			 * Otherwise remove our reference to child node.
13185 			 */
13186 			if (plun->lun_cip == cip) {
13187 				plun->lun_cip = NULL;
13188 			}
13189 			if (plun->lun_old_guid) {
13190 				kmem_free(plun->lun_old_guid,
13191 				    plun->lun_old_guid_size);
13192 				plun->lun_old_guid = NULL;
13193 				plun->lun_old_guid_size = 0;
13194 			}
13195 		}
13196 	}
13197 
13198 	if (cdip) {
13199 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13200 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13201 		    " target=%x lun=%x", "ndi_offline",
13202 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13203 	}
13204 
13205 	return (rval);
13206 }
13207 
13208 static void
13209 fcp_remove_child(struct fcp_lun *plun)
13210 {
13211 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13212 
13213 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13214 		if (plun->lun_mpxio == 0) {
13215 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13216 			(void) ndi_devi_free(DIP(plun->lun_cip));
13217 		} else {
13218 			mutex_exit(&plun->lun_mutex);
13219 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13220 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13221 			FCP_TRACE(fcp_logq,
13222 			    plun->lun_tgt->tgt_port->port_instbuf,
13223 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13224 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13225 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13226 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13227 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13228 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13229 			mutex_enter(&plun->lun_mutex);
13230 		}
13231 	}
13232 
13233 	plun->lun_cip = NULL;
13234 }
13235 
13236 /*
13237  * called when a timeout occurs
13238  *
13239  * can be scheduled during an attach or resume (if not already running)
13240  *
13241  * one timeout is set up for all ports
13242  *
13243  * acquires and releases the global mutex
13244  */
13245 /*ARGSUSED*/
13246 static void
13247 fcp_watch(void *arg)
13248 {
13249 	struct fcp_port	*pptr;
13250 	struct fcp_ipkt	*icmd;
13251 	struct fcp_ipkt	*nicmd;
13252 	struct fcp_pkt	*cmd;
13253 	struct fcp_pkt	*ncmd;
13254 	struct fcp_pkt	*tail;
13255 	struct fcp_pkt	*pcmd;
13256 	struct fcp_pkt	*save_head;
13257 	struct fcp_port	*save_port;
13258 
13259 	/* increment global watchdog time */
13260 	fcp_watchdog_time += fcp_watchdog_timeout;
13261 
13262 	mutex_enter(&fcp_global_mutex);
13263 
13264 	/* scan each port in our list */
13265 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13266 		save_port = fcp_port_head;
13267 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13268 		mutex_exit(&fcp_global_mutex);
13269 
13270 		mutex_enter(&pptr->port_mutex);
13271 		if (pptr->port_ipkt_list == NULL &&
13272 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13273 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13274 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13275 			mutex_exit(&pptr->port_mutex);
13276 			mutex_enter(&fcp_global_mutex);
13277 			goto end_of_watchdog;
13278 		}
13279 
13280 		/*
13281 		 * We check if a list of targets need to be offlined.
13282 		 */
13283 		if (pptr->port_offline_tgts) {
13284 			fcp_scan_offline_tgts(pptr);
13285 		}
13286 
13287 		/*
13288 		 * We check if a list of luns need to be offlined.
13289 		 */
13290 		if (pptr->port_offline_luns) {
13291 			fcp_scan_offline_luns(pptr);
13292 		}
13293 
13294 		/*
13295 		 * We check if a list of targets or luns need to be reset.
13296 		 */
13297 		if (pptr->port_reset_list) {
13298 			fcp_check_reset_delay(pptr);
13299 		}
13300 
13301 		mutex_exit(&pptr->port_mutex);
13302 
13303 		/*
13304 		 * This is where the pending commands (pkt) are checked for
13305 		 * timeout.
13306 		 */
13307 		mutex_enter(&pptr->port_pkt_mutex);
13308 		tail = pptr->port_pkt_tail;
13309 
13310 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13311 		    cmd != NULL; cmd = ncmd) {
13312 			ncmd = cmd->cmd_next;
13313 			/*
13314 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13315 			 * must be set.
13316 			 */
13317 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13318 			/*
13319 			 * FCP_INVALID_TIMEOUT will be set for those
13320 			 * command that need to be failed. Mostly those
13321 			 * cmds that could not be queued down for the
13322 			 * "timeout" value. cmd->cmd_timeout is used
13323 			 * to try and requeue the command regularly.
13324 			 */
13325 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13326 				/*
13327 				 * This command hasn't timed out yet.  Let's
13328 				 * go to the next one.
13329 				 */
13330 				pcmd = cmd;
13331 				goto end_of_loop;
13332 			}
13333 
13334 			if (cmd == pptr->port_pkt_head) {
13335 				ASSERT(pcmd == NULL);
13336 				pptr->port_pkt_head = cmd->cmd_next;
13337 			} else {
13338 				ASSERT(pcmd != NULL);
13339 				pcmd->cmd_next = cmd->cmd_next;
13340 			}
13341 
13342 			if (cmd == pptr->port_pkt_tail) {
13343 				ASSERT(cmd->cmd_next == NULL);
13344 				pptr->port_pkt_tail = pcmd;
13345 				if (pcmd) {
13346 					pcmd->cmd_next = NULL;
13347 				}
13348 			}
13349 			cmd->cmd_next = NULL;
13350 
13351 			/*
13352 			 * save the current head before dropping the
13353 			 * mutex - If the head doesn't remain the
13354 			 * same after re acquiring the mutex, just
13355 			 * bail out and revisit on next tick.
13356 			 *
13357 			 * PS: The tail pointer can change as the commands
13358 			 * get requeued after failure to retransport
13359 			 */
13360 			save_head = pptr->port_pkt_head;
13361 			mutex_exit(&pptr->port_pkt_mutex);
13362 
13363 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13364 			    FCP_INVALID_TIMEOUT) {
13365 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13366 				struct fcp_lun	*plun;
13367 				struct fcp_tgt	*ptgt;
13368 
13369 				plun = ADDR2LUN(&pkt->pkt_address);
13370 				ptgt = plun->lun_tgt;
13371 
13372 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13373 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13374 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13375 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13376 
13377 				cmd->cmd_state == FCP_PKT_ABORTING ?
13378 				    fcp_fail_cmd(cmd, CMD_RESET,
13379 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13380 				    CMD_TIMEOUT, STAT_ABORTED);
13381 			} else {
13382 				fcp_retransport_cmd(pptr, cmd);
13383 			}
13384 			mutex_enter(&pptr->port_pkt_mutex);
13385 			if (save_head && save_head != pptr->port_pkt_head) {
13386 				/*
13387 				 * Looks like linked list got changed (mostly
13388 				 * happens when an an OFFLINE LUN code starts
13389 				 * returning overflow queue commands in
13390 				 * parallel. So bail out and revisit during
13391 				 * next tick
13392 				 */
13393 				break;
13394 			}
13395 		end_of_loop:
13396 			/*
13397 			 * Scan only upto the previously known tail pointer
13398 			 * to avoid excessive processing - lots of new packets
13399 			 * could have been added to the tail or the old ones
13400 			 * re-queued.
13401 			 */
13402 			if (cmd == tail) {
13403 				break;
13404 			}
13405 		}
13406 		mutex_exit(&pptr->port_pkt_mutex);
13407 
13408 		mutex_enter(&pptr->port_mutex);
13409 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13410 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13411 
13412 			nicmd = icmd->ipkt_next;
13413 			if ((icmd->ipkt_restart != 0) &&
13414 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13415 				/* packet has not timed out */
13416 				continue;
13417 			}
13418 
13419 			/* time for packet re-transport */
13420 			if (icmd == pptr->port_ipkt_list) {
13421 				pptr->port_ipkt_list = icmd->ipkt_next;
13422 				if (pptr->port_ipkt_list) {
13423 					pptr->port_ipkt_list->ipkt_prev =
13424 					    NULL;
13425 				}
13426 			} else {
13427 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13428 				if (icmd->ipkt_next) {
13429 					icmd->ipkt_next->ipkt_prev =
13430 					    icmd->ipkt_prev;
13431 				}
13432 			}
13433 			icmd->ipkt_next = NULL;
13434 			icmd->ipkt_prev = NULL;
13435 			mutex_exit(&pptr->port_mutex);
13436 
13437 			if (fcp_is_retryable(icmd)) {
13438 				fc_ulp_rscn_info_t *rscnp =
13439 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13440 				    pkt_ulp_rscn_infop;
13441 
13442 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13443 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13444 				    "%x to D_ID=%x Retrying..",
13445 				    icmd->ipkt_opcode,
13446 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13447 
13448 				/*
13449 				 * Update the RSCN count in the packet
13450 				 * before resending.
13451 				 */
13452 
13453 				if (rscnp != NULL) {
13454 					rscnp->ulp_rscn_count =
13455 					    fc_ulp_get_rscn_count(pptr->
13456 					    port_fp_handle);
13457 				}
13458 
13459 				mutex_enter(&pptr->port_mutex);
13460 				mutex_enter(&ptgt->tgt_mutex);
13461 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13462 					mutex_exit(&ptgt->tgt_mutex);
13463 					mutex_exit(&pptr->port_mutex);
13464 					switch (icmd->ipkt_opcode) {
13465 						int rval;
13466 					case LA_ELS_PLOGI:
13467 						if ((rval = fc_ulp_login(
13468 						    pptr->port_fp_handle,
13469 						    &icmd->ipkt_fpkt, 1)) ==
13470 						    FC_SUCCESS) {
13471 							mutex_enter(
13472 							    &pptr->port_mutex);
13473 							continue;
13474 						}
13475 						if (fcp_handle_ipkt_errors(
13476 						    pptr, ptgt, icmd, rval,
13477 						    "PLOGI") == DDI_SUCCESS) {
13478 							mutex_enter(
13479 							    &pptr->port_mutex);
13480 							continue;
13481 						}
13482 						break;
13483 
13484 					case LA_ELS_PRLI:
13485 						if ((rval = fc_ulp_issue_els(
13486 						    pptr->port_fp_handle,
13487 						    icmd->ipkt_fpkt)) ==
13488 						    FC_SUCCESS) {
13489 							mutex_enter(
13490 							    &pptr->port_mutex);
13491 							continue;
13492 						}
13493 						if (fcp_handle_ipkt_errors(
13494 						    pptr, ptgt, icmd, rval,
13495 						    "PRLI") == DDI_SUCCESS) {
13496 							mutex_enter(
13497 							    &pptr->port_mutex);
13498 							continue;
13499 						}
13500 						break;
13501 
13502 					default:
13503 						if ((rval = fcp_transport(
13504 						    pptr->port_fp_handle,
13505 						    icmd->ipkt_fpkt, 1)) ==
13506 						    FC_SUCCESS) {
13507 							mutex_enter(
13508 							    &pptr->port_mutex);
13509 							continue;
13510 						}
13511 						if (fcp_handle_ipkt_errors(
13512 						    pptr, ptgt, icmd, rval,
13513 						    "PRLI") == DDI_SUCCESS) {
13514 							mutex_enter(
13515 							    &pptr->port_mutex);
13516 							continue;
13517 						}
13518 						break;
13519 					}
13520 				} else {
13521 					mutex_exit(&ptgt->tgt_mutex);
13522 					mutex_exit(&pptr->port_mutex);
13523 				}
13524 			} else {
13525 				fcp_print_error(icmd->ipkt_fpkt);
13526 			}
13527 
13528 			(void) fcp_call_finish_init(pptr, ptgt,
13529 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13530 			    icmd->ipkt_cause);
13531 			fcp_icmd_free(pptr, icmd);
13532 			mutex_enter(&pptr->port_mutex);
13533 		}
13534 
13535 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13536 		mutex_exit(&pptr->port_mutex);
13537 		mutex_enter(&fcp_global_mutex);
13538 
13539 	end_of_watchdog:
13540 		/*
13541 		 * Bail out early before getting into trouble
13542 		 */
13543 		if (save_port != fcp_port_head) {
13544 			break;
13545 		}
13546 	}
13547 
13548 	if (fcp_watchdog_init > 0) {
13549 		/* reschedule timeout to go again */
13550 		fcp_watchdog_id =
13551 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13552 	}
13553 	mutex_exit(&fcp_global_mutex);
13554 }
13555 
13556 
13557 static void
13558 fcp_check_reset_delay(struct fcp_port *pptr)
13559 {
13560 	uint32_t		tgt_cnt;
13561 	int			level;
13562 	struct fcp_tgt	*ptgt;
13563 	struct fcp_lun	*plun;
13564 	struct fcp_reset_elem *cur = NULL;
13565 	struct fcp_reset_elem *next = NULL;
13566 	struct fcp_reset_elem *prev = NULL;
13567 
13568 	ASSERT(mutex_owned(&pptr->port_mutex));
13569 
13570 	next = pptr->port_reset_list;
13571 	while ((cur = next) != NULL) {
13572 		next = cur->next;
13573 
13574 		if (cur->timeout < fcp_watchdog_time) {
13575 			prev = cur;
13576 			continue;
13577 		}
13578 
13579 		ptgt = cur->tgt;
13580 		plun = cur->lun;
13581 		tgt_cnt = cur->tgt_cnt;
13582 
13583 		if (ptgt) {
13584 			level = RESET_TARGET;
13585 		} else {
13586 			ASSERT(plun != NULL);
13587 			level = RESET_LUN;
13588 			ptgt = plun->lun_tgt;
13589 		}
13590 		if (prev) {
13591 			prev->next = next;
13592 		} else {
13593 			/*
13594 			 * Because we drop port mutex while doing aborts for
13595 			 * packets, we can't rely on reset_list pointing to
13596 			 * our head
13597 			 */
13598 			if (cur == pptr->port_reset_list) {
13599 				pptr->port_reset_list = next;
13600 			} else {
13601 				struct fcp_reset_elem *which;
13602 
13603 				which = pptr->port_reset_list;
13604 				while (which && which->next != cur) {
13605 					which = which->next;
13606 				}
13607 				ASSERT(which != NULL);
13608 
13609 				which->next = next;
13610 				prev = which;
13611 			}
13612 		}
13613 
13614 		kmem_free(cur, sizeof (*cur));
13615 
13616 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13617 			mutex_enter(&ptgt->tgt_mutex);
13618 			if (level == RESET_TARGET) {
13619 				fcp_update_tgt_state(ptgt,
13620 				    FCP_RESET, FCP_LUN_BUSY);
13621 			} else {
13622 				fcp_update_lun_state(plun,
13623 				    FCP_RESET, FCP_LUN_BUSY);
13624 			}
13625 			mutex_exit(&ptgt->tgt_mutex);
13626 
13627 			mutex_exit(&pptr->port_mutex);
13628 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13629 			mutex_enter(&pptr->port_mutex);
13630 		}
13631 	}
13632 }
13633 
13634 
13635 static void
13636 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13637     struct fcp_lun *rlun, int tgt_cnt)
13638 {
13639 	int			rval;
13640 	struct fcp_lun	*tlun, *nlun;
13641 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13642 	    *cmd = NULL, *head = NULL,
13643 	    *tail = NULL;
13644 
13645 	mutex_enter(&pptr->port_pkt_mutex);
13646 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13647 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13648 		struct fcp_tgt *ptgt = plun->lun_tgt;
13649 
13650 		ncmd = cmd->cmd_next;
13651 
13652 		if (ptgt != ttgt && plun != rlun) {
13653 			pcmd = cmd;
13654 			continue;
13655 		}
13656 
13657 		if (pcmd != NULL) {
13658 			ASSERT(pptr->port_pkt_head != cmd);
13659 			pcmd->cmd_next = ncmd;
13660 		} else {
13661 			ASSERT(cmd == pptr->port_pkt_head);
13662 			pptr->port_pkt_head = ncmd;
13663 		}
13664 		if (pptr->port_pkt_tail == cmd) {
13665 			ASSERT(cmd->cmd_next == NULL);
13666 			pptr->port_pkt_tail = pcmd;
13667 			if (pcmd != NULL) {
13668 				pcmd->cmd_next = NULL;
13669 			}
13670 		}
13671 
13672 		if (head == NULL) {
13673 			head = tail = cmd;
13674 		} else {
13675 			ASSERT(tail != NULL);
13676 			tail->cmd_next = cmd;
13677 			tail = cmd;
13678 		}
13679 		cmd->cmd_next = NULL;
13680 	}
13681 	mutex_exit(&pptr->port_pkt_mutex);
13682 
13683 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13684 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13685 
13686 		ncmd = cmd->cmd_next;
13687 		ASSERT(pkt != NULL);
13688 
13689 		mutex_enter(&pptr->port_mutex);
13690 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13691 			mutex_exit(&pptr->port_mutex);
13692 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13693 			pkt->pkt_reason = CMD_RESET;
13694 			pkt->pkt_statistics |= STAT_DEV_RESET;
13695 			cmd->cmd_state = FCP_PKT_IDLE;
13696 			fcp_post_callback(cmd);
13697 		} else {
13698 			mutex_exit(&pptr->port_mutex);
13699 		}
13700 	}
13701 
13702 	/*
13703 	 * If the FCA will return all the commands in its queue then our
13704 	 * work is easy, just return.
13705 	 */
13706 
13707 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13708 		return;
13709 	}
13710 
13711 	/*
13712 	 * For RESET_LUN get hold of target pointer
13713 	 */
13714 	if (ttgt == NULL) {
13715 		ASSERT(rlun != NULL);
13716 
13717 		ttgt = rlun->lun_tgt;
13718 
13719 		ASSERT(ttgt != NULL);
13720 	}
13721 
13722 	/*
13723 	 * There are some severe race conditions here.
13724 	 * While we are trying to abort the pkt, it might be completing
13725 	 * so mark it aborted and if the abort does not succeed then
13726 	 * handle it in the watch thread.
13727 	 */
13728 	mutex_enter(&ttgt->tgt_mutex);
13729 	nlun = ttgt->tgt_lun;
13730 	mutex_exit(&ttgt->tgt_mutex);
13731 	while ((tlun = nlun) != NULL) {
13732 		int restart = 0;
13733 		if (rlun && rlun != tlun) {
13734 			mutex_enter(&ttgt->tgt_mutex);
13735 			nlun = tlun->lun_next;
13736 			mutex_exit(&ttgt->tgt_mutex);
13737 			continue;
13738 		}
13739 		mutex_enter(&tlun->lun_mutex);
13740 		cmd = tlun->lun_pkt_head;
13741 		while (cmd != NULL) {
13742 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13743 				struct scsi_pkt *pkt;
13744 
13745 				restart = 1;
13746 				cmd->cmd_state = FCP_PKT_ABORTING;
13747 				mutex_exit(&tlun->lun_mutex);
13748 				rval = fc_ulp_abort(pptr->port_fp_handle,
13749 				    cmd->cmd_fp_pkt, KM_SLEEP);
13750 				if (rval == FC_SUCCESS) {
13751 					pkt = cmd->cmd_pkt;
13752 					pkt->pkt_reason = CMD_RESET;
13753 					pkt->pkt_statistics |= STAT_DEV_RESET;
13754 					cmd->cmd_state = FCP_PKT_IDLE;
13755 					fcp_post_callback(cmd);
13756 				} else {
13757 					caddr_t msg;
13758 
13759 					(void) fc_ulp_error(rval, &msg);
13760 
13761 					/*
13762 					 * This part is tricky. The abort
13763 					 * failed and now the command could
13764 					 * be completing.  The cmd_state ==
13765 					 * FCP_PKT_ABORTING should save
13766 					 * us in fcp_cmd_callback. If we
13767 					 * are already aborting ignore the
13768 					 * command in fcp_cmd_callback.
13769 					 * Here we leave this packet for 20
13770 					 * sec to be aborted in the
13771 					 * fcp_watch thread.
13772 					 */
13773 					fcp_log(CE_WARN, pptr->port_dip,
13774 					    "!Abort failed after reset %s",
13775 					    msg);
13776 
13777 					cmd->cmd_timeout =
13778 					    fcp_watchdog_time +
13779 					    cmd->cmd_pkt->pkt_time +
13780 					    FCP_FAILED_DELAY;
13781 
13782 					cmd->cmd_fp_pkt->pkt_timeout =
13783 					    FCP_INVALID_TIMEOUT;
13784 					/*
13785 					 * This is a hack, cmd is put in the
13786 					 * overflow queue so that it can be
13787 					 * timed out finally
13788 					 */
13789 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13790 
13791 					mutex_enter(&pptr->port_pkt_mutex);
13792 					if (pptr->port_pkt_head) {
13793 						ASSERT(pptr->port_pkt_tail
13794 						    != NULL);
13795 						pptr->port_pkt_tail->cmd_next
13796 						    = cmd;
13797 						pptr->port_pkt_tail = cmd;
13798 					} else {
13799 						ASSERT(pptr->port_pkt_tail
13800 						    == NULL);
13801 						pptr->port_pkt_head =
13802 						    pptr->port_pkt_tail
13803 						    = cmd;
13804 					}
13805 					cmd->cmd_next = NULL;
13806 					mutex_exit(&pptr->port_pkt_mutex);
13807 				}
13808 				mutex_enter(&tlun->lun_mutex);
13809 				cmd = tlun->lun_pkt_head;
13810 			} else {
13811 				cmd = cmd->cmd_forw;
13812 			}
13813 		}
13814 		mutex_exit(&tlun->lun_mutex);
13815 
13816 		mutex_enter(&ttgt->tgt_mutex);
13817 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13818 		mutex_exit(&ttgt->tgt_mutex);
13819 
13820 		mutex_enter(&pptr->port_mutex);
13821 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13822 			mutex_exit(&pptr->port_mutex);
13823 			return;
13824 		} else {
13825 			mutex_exit(&pptr->port_mutex);
13826 		}
13827 	}
13828 }
13829 
13830 
13831 /*
13832  * unlink the soft state, returning the soft state found (if any)
13833  *
13834  * acquires and releases the global mutex
13835  */
13836 struct fcp_port *
13837 fcp_soft_state_unlink(struct fcp_port *pptr)
13838 {
13839 	struct fcp_port	*hptr;		/* ptr index */
13840 	struct fcp_port	*tptr;		/* prev hptr */
13841 
13842 	mutex_enter(&fcp_global_mutex);
13843 	for (hptr = fcp_port_head, tptr = NULL;
13844 	    hptr != NULL;
13845 	    tptr = hptr, hptr = hptr->port_next) {
13846 		if (hptr == pptr) {
13847 			/* we found a match -- remove this item */
13848 			if (tptr == NULL) {
13849 				/* we're at the head of the list */
13850 				fcp_port_head = hptr->port_next;
13851 			} else {
13852 				tptr->port_next = hptr->port_next;
13853 			}
13854 			break;			/* success */
13855 		}
13856 	}
13857 	if (fcp_port_head == NULL) {
13858 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13859 	}
13860 	mutex_exit(&fcp_global_mutex);
13861 	return (hptr);
13862 }
13863 
13864 
13865 /*
13866  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13867  * WWN and a LUN number
13868  */
13869 /* ARGSUSED */
13870 static struct fcp_lun *
13871 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13872 {
13873 	int hash;
13874 	struct fcp_tgt *ptgt;
13875 	struct fcp_lun *plun;
13876 
13877 	ASSERT(mutex_owned(&pptr->port_mutex));
13878 
13879 	hash = FCP_HASH(wwn);
13880 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13881 	    ptgt = ptgt->tgt_next) {
13882 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13883 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13884 			mutex_enter(&ptgt->tgt_mutex);
13885 			for (plun = ptgt->tgt_lun;
13886 			    plun != NULL;
13887 			    plun = plun->lun_next) {
13888 				if (plun->lun_num == lun) {
13889 					mutex_exit(&ptgt->tgt_mutex);
13890 					return (plun);
13891 				}
13892 			}
13893 			mutex_exit(&ptgt->tgt_mutex);
13894 			return (NULL);
13895 		}
13896 	}
13897 	return (NULL);
13898 }
13899 
13900 /*
13901  *     Function: fcp_prepare_pkt
13902  *
13903  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13904  *		 for fcp_start(). It binds the data or partially maps it.
13905  *		 Builds the FCP header and starts the initialization of the
13906  *		 Fibre Channel header.
13907  *
13908  *     Argument: *pptr		FCP port.
13909  *		 *cmd		FCP packet.
13910  *		 *plun		LUN the command will be sent to.
13911  *
13912  *	Context: User, Kernel and Interrupt context.
13913  */
13914 static void
13915 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13916     struct fcp_lun *plun)
13917 {
13918 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13919 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13920 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
13921 
13922 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13923 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13924 
13925 	if (cmd->cmd_pkt->pkt_numcookies) {
13926 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13927 			fcmd->fcp_cntl.cntl_read_data = 1;
13928 			fcmd->fcp_cntl.cntl_write_data = 0;
13929 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
13930 		} else {
13931 			fcmd->fcp_cntl.cntl_read_data = 0;
13932 			fcmd->fcp_cntl.cntl_write_data = 1;
13933 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
13934 		}
13935 
13936 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
13937 
13938 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
13939 		ASSERT(fpkt->pkt_data_cookie_cnt <=
13940 		    pptr->port_data_dma_attr.dma_attr_sgllen);
13941 
13942 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
13943 
13944 		/* FCA needs pkt_datalen to be set */
13945 		fpkt->pkt_datalen = cmd->cmd_dmacount;
13946 		fcmd->fcp_data_len = cmd->cmd_dmacount;
13947 	} else {
13948 		fcmd->fcp_cntl.cntl_read_data = 0;
13949 		fcmd->fcp_cntl.cntl_write_data = 0;
13950 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
13951 		fpkt->pkt_datalen = 0;
13952 		fcmd->fcp_data_len = 0;
13953 	}
13954 
13955 	/* set up the Tagged Queuing type */
13956 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
13957 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
13958 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
13959 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
13960 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
13961 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
13962 	} else {
13963 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
13964 	}
13965 
13966 	fcmd->fcp_ent_addr = plun->lun_addr;
13967 
13968 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
13969 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
13970 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
13971 	} else {
13972 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
13973 	}
13974 
13975 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
13976 	cmd->cmd_pkt->pkt_state = 0;
13977 	cmd->cmd_pkt->pkt_statistics = 0;
13978 	cmd->cmd_pkt->pkt_resid = 0;
13979 
13980 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
13981 
13982 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
13983 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
13984 		fpkt->pkt_comp = NULL;
13985 	} else {
13986 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
13987 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
13988 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
13989 		}
13990 		fpkt->pkt_comp = fcp_cmd_callback;
13991 	}
13992 
13993 	mutex_enter(&pptr->port_mutex);
13994 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
13995 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
13996 	}
13997 	mutex_exit(&pptr->port_mutex);
13998 
13999 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14000 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14001 
14002 	/*
14003 	 * Save a few kernel cycles here
14004 	 */
14005 #ifndef	__lock_lint
14006 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14007 #endif /* __lock_lint */
14008 }
14009 
14010 static void
14011 fcp_post_callback(struct fcp_pkt *cmd)
14012 {
14013 	if (cmd->cmd_pkt->pkt_comp) {
14014 		(*cmd->cmd_pkt->pkt_comp) (cmd->cmd_pkt);
14015 	}
14016 }
14017 
14018 
14019 /*
14020  * called to do polled I/O by fcp_start()
14021  *
14022  * return a transport status value, i.e. TRAN_ACCECPT for success
14023  */
14024 static int
14025 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14026 {
14027 	int	rval;
14028 
14029 #ifdef	DEBUG
14030 	mutex_enter(&pptr->port_pkt_mutex);
14031 	pptr->port_npkts++;
14032 	mutex_exit(&pptr->port_pkt_mutex);
14033 #endif /* DEBUG */
14034 
14035 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14036 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14037 	} else {
14038 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14039 	}
14040 
14041 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14042 
14043 	cmd->cmd_state = FCP_PKT_ISSUED;
14044 
14045 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14046 
14047 #ifdef	DEBUG
14048 	mutex_enter(&pptr->port_pkt_mutex);
14049 	pptr->port_npkts--;
14050 	mutex_exit(&pptr->port_pkt_mutex);
14051 #endif /* DEBUG */
14052 
14053 	cmd->cmd_state = FCP_PKT_IDLE;
14054 
14055 	switch (rval) {
14056 	case FC_SUCCESS:
14057 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14058 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14059 			rval = TRAN_ACCEPT;
14060 		} else {
14061 			rval = TRAN_FATAL_ERROR;
14062 		}
14063 		break;
14064 
14065 	case FC_TRAN_BUSY:
14066 		rval = TRAN_BUSY;
14067 		cmd->cmd_pkt->pkt_resid = 0;
14068 		break;
14069 
14070 	case FC_BADPACKET:
14071 		rval = TRAN_BADPKT;
14072 		break;
14073 
14074 	default:
14075 		rval = TRAN_FATAL_ERROR;
14076 		break;
14077 	}
14078 
14079 	return (rval);
14080 }
14081 
14082 
14083 /*
14084  * called by some of the following transport-called routines to convert
14085  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14086  */
14087 static struct fcp_port *
14088 fcp_dip2port(dev_info_t *dip)
14089 {
14090 	int	instance;
14091 
14092 	instance = ddi_get_instance(dip);
14093 	return (ddi_get_soft_state(fcp_softstate, instance));
14094 }
14095 
14096 
14097 /*
14098  * called internally to return a LUN given a dip
14099  */
14100 struct fcp_lun *
14101 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14102 {
14103 	struct fcp_tgt *ptgt;
14104 	struct fcp_lun *plun;
14105 	int i;
14106 
14107 
14108 	ASSERT(mutex_owned(&pptr->port_mutex));
14109 
14110 	for (i = 0; i < FCP_NUM_HASH; i++) {
14111 		for (ptgt = pptr->port_tgt_hash_table[i];
14112 		    ptgt != NULL;
14113 		    ptgt = ptgt->tgt_next) {
14114 			mutex_enter(&ptgt->tgt_mutex);
14115 			for (plun = ptgt->tgt_lun; plun != NULL;
14116 			    plun = plun->lun_next) {
14117 				mutex_enter(&plun->lun_mutex);
14118 				if (plun->lun_cip == cip) {
14119 					mutex_exit(&plun->lun_mutex);
14120 					mutex_exit(&ptgt->tgt_mutex);
14121 					return (plun); /* match found */
14122 				}
14123 				mutex_exit(&plun->lun_mutex);
14124 			}
14125 			mutex_exit(&ptgt->tgt_mutex);
14126 		}
14127 	}
14128 	return (NULL);				/* no LUN found */
14129 }
14130 
14131 /*
14132  * pass an element to the hotplug list, kick the hotplug thread
14133  * and wait for the element to get processed by the hotplug thread.
14134  * on return the element is freed.
14135  *
14136  * return zero success and non-zero on failure
14137  *
14138  * acquires/releases the target mutex
14139  *
14140  */
14141 static int
14142 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14143     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14144 {
14145 	struct fcp_hp_elem	*elem;
14146 	int			rval;
14147 
14148 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14149 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14150 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14151 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14152 		fcp_log(CE_CONT, pptr->port_dip,
14153 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14154 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14155 		return (NDI_FAILURE);
14156 	}
14157 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14158 	mutex_enter(&elem->mutex);
14159 	if (elem->wait) {
14160 		while (elem->wait) {
14161 			cv_wait(&elem->cv, &elem->mutex);
14162 		}
14163 	}
14164 	rval = (elem->result);
14165 	mutex_exit(&elem->mutex);
14166 	mutex_destroy(&elem->mutex);
14167 	cv_destroy(&elem->cv);
14168 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14169 	return (rval);
14170 }
14171 
14172 /*
14173  * pass an element to the hotplug list, and then
14174  * kick the hotplug thread
14175  *
14176  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14177  *
14178  * acquires/releases the hotplug mutex
14179  *
14180  * called with the target mutex owned
14181  *
14182  * memory acquired in NOSLEEP mode
14183  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14184  *	 for the hp daemon to process the request and is responsible for
14185  *	 freeing the element
14186  */
14187 static struct fcp_hp_elem *
14188 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14189     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14190 {
14191 	struct fcp_hp_elem	*elem;
14192 	dev_info_t *pdip;
14193 
14194 	ASSERT(pptr != NULL);
14195 	ASSERT(plun != NULL);
14196 	ASSERT(plun->lun_tgt != NULL);
14197 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14198 
14199 	/* create space for a hotplug element */
14200 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14201 	    == NULL) {
14202 		fcp_log(CE_WARN, NULL,
14203 		    "!can't allocate memory for hotplug element");
14204 		return (NULL);
14205 	}
14206 
14207 	/* fill in hotplug element */
14208 	elem->port = pptr;
14209 	elem->lun = plun;
14210 	elem->cip = cip;
14211 	elem->what = what;
14212 	elem->flags = flags;
14213 	elem->link_cnt = link_cnt;
14214 	elem->tgt_cnt = tgt_cnt;
14215 	elem->wait = wait;
14216 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14217 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14218 
14219 	/* schedule the hotplug task */
14220 	pdip = pptr->port_dip;
14221 	mutex_enter(&plun->lun_mutex);
14222 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14223 		plun->lun_event_count++;
14224 		elem->event_cnt = plun->lun_event_count;
14225 	}
14226 	mutex_exit(&plun->lun_mutex);
14227 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14228 	    (void *)elem, KM_NOSLEEP) == NULL) {
14229 		mutex_enter(&plun->lun_mutex);
14230 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14231 			plun->lun_event_count--;
14232 		}
14233 		mutex_exit(&plun->lun_mutex);
14234 		kmem_free(elem, sizeof (*elem));
14235 		return (0);
14236 	}
14237 
14238 	return (elem);
14239 }
14240 
14241 
14242 static void
14243 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14244 {
14245 	int			rval;
14246 	struct scsi_address	*ap;
14247 	struct fcp_lun	*plun;
14248 	struct fcp_tgt	*ptgt;
14249 	fc_packet_t	*fpkt;
14250 
14251 	ap = &cmd->cmd_pkt->pkt_address;
14252 	plun = ADDR2LUN(ap);
14253 	ptgt = plun->lun_tgt;
14254 
14255 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14256 
14257 	cmd->cmd_state = FCP_PKT_IDLE;
14258 
14259 	mutex_enter(&pptr->port_mutex);
14260 	mutex_enter(&ptgt->tgt_mutex);
14261 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14262 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14263 		fc_ulp_rscn_info_t *rscnp;
14264 
14265 		cmd->cmd_state = FCP_PKT_ISSUED;
14266 
14267 		/*
14268 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14269 		 * originally NULL, hence we try to set it to the pd pointed
14270 		 * to by the SCSI device we're trying to get to.
14271 		 */
14272 
14273 		fpkt = cmd->cmd_fp_pkt;
14274 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14275 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14276 			/*
14277 			 * We need to notify the transport that we now have a
14278 			 * reference to the remote port handle.
14279 			 */
14280 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14281 		}
14282 
14283 		mutex_exit(&ptgt->tgt_mutex);
14284 		mutex_exit(&pptr->port_mutex);
14285 
14286 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14287 
14288 		/* prepare the packet */
14289 
14290 		fcp_prepare_pkt(pptr, cmd, plun);
14291 
14292 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14293 		    pkt_ulp_rscn_infop;
14294 
14295 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14296 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14297 
14298 		if (rscnp != NULL) {
14299 			rscnp->ulp_rscn_count =
14300 			    fc_ulp_get_rscn_count(pptr->
14301 			    port_fp_handle);
14302 		}
14303 
14304 		rval = fcp_transport(pptr->port_fp_handle,
14305 		    cmd->cmd_fp_pkt, 0);
14306 
14307 		if (rval == FC_SUCCESS) {
14308 			return;
14309 		}
14310 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14311 	} else {
14312 		mutex_exit(&ptgt->tgt_mutex);
14313 		mutex_exit(&pptr->port_mutex);
14314 	}
14315 
14316 	fcp_queue_pkt(pptr, cmd);
14317 }
14318 
14319 
14320 static void
14321 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14322 {
14323 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14324 
14325 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14326 	cmd->cmd_state = FCP_PKT_IDLE;
14327 
14328 	cmd->cmd_pkt->pkt_reason = reason;
14329 	cmd->cmd_pkt->pkt_state = 0;
14330 	cmd->cmd_pkt->pkt_statistics = statistics;
14331 
14332 	fcp_post_callback(cmd);
14333 }
14334 
14335 /*
14336  *     Function: fcp_queue_pkt
14337  *
14338  *  Description: This function queues the packet passed by the caller into
14339  *		 the list of packets of the FCP port.
14340  *
14341  *     Argument: *pptr		FCP port.
14342  *		 *cmd		FCP packet to queue.
14343  *
14344  * Return Value: None
14345  *
14346  *	Context: User, Kernel and Interrupt context.
14347  */
14348 static void
14349 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14350 {
14351 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14352 
14353 	mutex_enter(&pptr->port_pkt_mutex);
14354 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14355 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14356 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14357 
14358 	/*
14359 	 * zero pkt_time means hang around for ever
14360 	 */
14361 	if (cmd->cmd_pkt->pkt_time) {
14362 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14363 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14364 		} else {
14365 			/*
14366 			 * Indicate the watch thread to fail the
14367 			 * command by setting it to highest value
14368 			 */
14369 			cmd->cmd_timeout = fcp_watchdog_time;
14370 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14371 		}
14372 	}
14373 
14374 	if (pptr->port_pkt_head) {
14375 		ASSERT(pptr->port_pkt_tail != NULL);
14376 
14377 		pptr->port_pkt_tail->cmd_next = cmd;
14378 		pptr->port_pkt_tail = cmd;
14379 	} else {
14380 		ASSERT(pptr->port_pkt_tail == NULL);
14381 
14382 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14383 	}
14384 	cmd->cmd_next = NULL;
14385 	mutex_exit(&pptr->port_pkt_mutex);
14386 }
14387 
14388 /*
14389  *     Function: fcp_update_targets
14390  *
14391  *  Description: This function applies the specified change of state to all
14392  *		 the targets listed.  The operation applied is 'set'.
14393  *
14394  *     Argument: *pptr		FCP port.
14395  *		 *dev_list	Array of fc_portmap_t structures.
14396  *		 count		Length of dev_list.
14397  *		 state		State bits to update.
14398  *		 cause		Reason for the update.
14399  *
14400  * Return Value: None
14401  *
14402  *	Context: User, Kernel and Interrupt context.
14403  *		 The mutex pptr->port_mutex must be held.
14404  */
14405 static void
14406 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14407     uint32_t count, uint32_t state, int cause)
14408 {
14409 	fc_portmap_t		*map_entry;
14410 	struct fcp_tgt	*ptgt;
14411 
14412 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14413 
14414 	while (count--) {
14415 		map_entry = &(dev_list[count]);
14416 		ptgt = fcp_lookup_target(pptr,
14417 		    (uchar_t *)&(map_entry->map_pwwn));
14418 		if (ptgt == NULL) {
14419 			continue;
14420 		}
14421 
14422 		mutex_enter(&ptgt->tgt_mutex);
14423 		ptgt->tgt_trace = 0;
14424 		ptgt->tgt_change_cnt++;
14425 		ptgt->tgt_statec_cause = cause;
14426 		ptgt->tgt_tmp_cnt = 1;
14427 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14428 		mutex_exit(&ptgt->tgt_mutex);
14429 	}
14430 }
14431 
14432 static int
14433 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14434     int lcount, int tcount, int cause)
14435 {
14436 	int rval;
14437 
14438 	mutex_enter(&pptr->port_mutex);
14439 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14440 	mutex_exit(&pptr->port_mutex);
14441 
14442 	return (rval);
14443 }
14444 
14445 
14446 static int
14447 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14448     int lcount, int tcount, int cause)
14449 {
14450 	int	finish_init = 0;
14451 	int	finish_tgt = 0;
14452 	int	do_finish_init = 0;
14453 	int	rval = FCP_NO_CHANGE;
14454 
14455 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14456 	    cause == FCP_CAUSE_LINK_DOWN) {
14457 		do_finish_init = 1;
14458 	}
14459 
14460 	if (ptgt != NULL) {
14461 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14462 		    FCP_BUF_LEVEL_2, 0,
14463 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14464 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14465 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14466 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14467 		    ptgt->tgt_d_id, ptgt->tgt_done);
14468 
14469 		mutex_enter(&ptgt->tgt_mutex);
14470 
14471 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14472 			rval = FCP_DEV_CHANGE;
14473 			if (do_finish_init && ptgt->tgt_done == 0) {
14474 				ptgt->tgt_done++;
14475 				finish_init = 1;
14476 			}
14477 		} else {
14478 			if (--ptgt->tgt_tmp_cnt <= 0) {
14479 				ptgt->tgt_tmp_cnt = 0;
14480 				finish_tgt = 1;
14481 
14482 				if (do_finish_init) {
14483 					finish_init = 1;
14484 				}
14485 			}
14486 		}
14487 		mutex_exit(&ptgt->tgt_mutex);
14488 	} else {
14489 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14490 		    FCP_BUF_LEVEL_2, 0,
14491 		    "Call Finish Init for NO target");
14492 
14493 		if (do_finish_init) {
14494 			finish_init = 1;
14495 		}
14496 	}
14497 
14498 	if (finish_tgt) {
14499 		ASSERT(ptgt != NULL);
14500 
14501 		mutex_enter(&ptgt->tgt_mutex);
14502 #ifdef	DEBUG
14503 		bzero(ptgt->tgt_tmp_cnt_stack,
14504 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14505 
14506 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14507 		    FCP_STACK_DEPTH);
14508 #endif /* DEBUG */
14509 		mutex_exit(&ptgt->tgt_mutex);
14510 
14511 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14512 	}
14513 
14514 	if (finish_init && lcount == pptr->port_link_cnt) {
14515 		ASSERT(pptr->port_tmp_cnt > 0);
14516 		if (--pptr->port_tmp_cnt == 0) {
14517 			fcp_finish_init(pptr);
14518 		}
14519 	} else if (lcount != pptr->port_link_cnt) {
14520 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14521 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14522 		    "fcp_call_finish_init_held,1: state change occured"
14523 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14524 	}
14525 
14526 	return (rval);
14527 }
14528 
14529 
14530 static void
14531 fcp_reconfigure_luns(void * tgt_handle)
14532 {
14533 	uint32_t		dev_cnt;
14534 	fc_portmap_t		*devlist;
14535 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14536 	struct fcp_port		*pptr = ptgt->tgt_port;
14537 
14538 	/*
14539 	 * If the timer that fires this off got canceled too late, the
14540 	 * target could have been destroyed.
14541 	 */
14542 
14543 	if (ptgt->tgt_tid == NULL) {
14544 		return;
14545 	}
14546 
14547 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14548 	if (devlist == NULL) {
14549 		fcp_log(CE_WARN, pptr->port_dip,
14550 		    "!fcp%d: failed to allocate for portmap",
14551 		    pptr->port_instance);
14552 		return;
14553 	}
14554 
14555 	dev_cnt = 1;
14556 	devlist->map_pd = ptgt->tgt_pd_handle;
14557 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14558 	devlist->map_did.port_id = ptgt->tgt_d_id;
14559 
14560 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14561 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14562 
14563 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14564 	devlist->map_type = PORT_DEVICE_NEW;
14565 	devlist->map_flags = 0;
14566 
14567 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14568 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14569 
14570 	/*
14571 	 * Clear the tgt_tid after no more references to
14572 	 * the fcp_tgt
14573 	 */
14574 	mutex_enter(&ptgt->tgt_mutex);
14575 	ptgt->tgt_tid = NULL;
14576 	mutex_exit(&ptgt->tgt_mutex);
14577 
14578 	kmem_free(devlist, sizeof (*devlist));
14579 }
14580 
14581 
14582 static void
14583 fcp_free_targets(struct fcp_port *pptr)
14584 {
14585 	int			i;
14586 	struct fcp_tgt	*ptgt;
14587 
14588 	mutex_enter(&pptr->port_mutex);
14589 	for (i = 0; i < FCP_NUM_HASH; i++) {
14590 		ptgt = pptr->port_tgt_hash_table[i];
14591 		while (ptgt != NULL) {
14592 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14593 
14594 			fcp_free_target(ptgt);
14595 			ptgt = next_tgt;
14596 		}
14597 	}
14598 	mutex_exit(&pptr->port_mutex);
14599 }
14600 
14601 
14602 static void
14603 fcp_free_target(struct fcp_tgt *ptgt)
14604 {
14605 	struct fcp_lun	*plun;
14606 	timeout_id_t		tid;
14607 
14608 	mutex_enter(&ptgt->tgt_mutex);
14609 	tid = ptgt->tgt_tid;
14610 
14611 	/*
14612 	 * Cancel any pending timeouts for this target.
14613 	 */
14614 
14615 	if (tid != NULL) {
14616 		/*
14617 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14618 		 * If tgt_tid is NULL, the callback will simply return.
14619 		 */
14620 		ptgt->tgt_tid = NULL;
14621 		mutex_exit(&ptgt->tgt_mutex);
14622 		(void) untimeout(tid);
14623 		mutex_enter(&ptgt->tgt_mutex);
14624 	}
14625 
14626 	plun = ptgt->tgt_lun;
14627 	while (plun != NULL) {
14628 		struct fcp_lun *next_lun = plun->lun_next;
14629 
14630 		fcp_dealloc_lun(plun);
14631 		plun = next_lun;
14632 	}
14633 
14634 	mutex_exit(&ptgt->tgt_mutex);
14635 	fcp_dealloc_tgt(ptgt);
14636 }
14637 
14638 /*
14639  *     Function: fcp_is_retryable
14640  *
14641  *  Description: Indicates if the internal packet is retryable.
14642  *
14643  *     Argument: *icmd		FCP internal packet.
14644  *
14645  * Return Value: 0	Not retryable
14646  *		 1	Retryable
14647  *
14648  *	Context: User, Kernel and Interrupt context
14649  */
14650 static int
14651 fcp_is_retryable(struct fcp_ipkt *icmd)
14652 {
14653 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14654 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14655 		return (0);
14656 	}
14657 
14658 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14659 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14660 }
14661 
14662 /*
14663  *     Function: fcp_create_on_demand
14664  *
14665  *     Argument: *pptr		FCP port.
14666  *		 *pwwn		Port WWN.
14667  *
14668  * Return Value: 0	Success
14669  *		 EIO
14670  *		 ENOMEM
14671  *		 EBUSY
14672  *		 EINVAL
14673  *
14674  *	Context: User and Kernel context
14675  */
14676 static int
14677 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14678 {
14679 	int			wait_ms;
14680 	int			tcount;
14681 	int			lcount;
14682 	int			ret;
14683 	int			error;
14684 	int			rval = EIO;
14685 	int			ntries;
14686 	fc_portmap_t		*devlist;
14687 	opaque_t		pd;
14688 	struct fcp_lun		*plun;
14689 	struct fcp_tgt		*ptgt;
14690 	int			old_manual = 0;
14691 
14692 	/* Allocates the fc_portmap_t structure. */
14693 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14694 
14695 	/*
14696 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14697 	 * in the commented statement below:
14698 	 *
14699 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14700 	 *
14701 	 * Below, the deadline for the discovery process is set.
14702 	 */
14703 	mutex_enter(&pptr->port_mutex);
14704 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14705 	mutex_exit(&pptr->port_mutex);
14706 
14707 	/*
14708 	 * We try to find the remote port based on the WWN provided by the
14709 	 * caller.  We actually ask fp/fctl if it has it.
14710 	 */
14711 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14712 	    (la_wwn_t *)pwwn, &error, 1);
14713 
14714 	if (pd == NULL) {
14715 		kmem_free(devlist, sizeof (*devlist));
14716 		return (rval);
14717 	}
14718 
14719 	/*
14720 	 * The remote port was found.  We ask fp/fctl to update our
14721 	 * fc_portmap_t structure.
14722 	 */
14723 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14724 	    (la_wwn_t *)pwwn, devlist);
14725 	if (ret != FC_SUCCESS) {
14726 		kmem_free(devlist, sizeof (*devlist));
14727 		return (rval);
14728 	}
14729 
14730 	/*
14731 	 * The map flag field is set to indicates that the creation is being
14732 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14733 	 */
14734 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14735 
14736 	mutex_enter(&pptr->port_mutex);
14737 
14738 	/*
14739 	 * We check to see if fcp already has a target that describes the
14740 	 * device being created.  If not it is created.
14741 	 */
14742 	ptgt = fcp_lookup_target(pptr, pwwn);
14743 	if (ptgt == NULL) {
14744 		lcount = pptr->port_link_cnt;
14745 		mutex_exit(&pptr->port_mutex);
14746 
14747 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14748 		if (ptgt == NULL) {
14749 			fcp_log(CE_WARN, pptr->port_dip,
14750 			    "!FC target allocation failed");
14751 			return (ENOMEM);
14752 		}
14753 
14754 		mutex_enter(&pptr->port_mutex);
14755 	}
14756 
14757 	mutex_enter(&ptgt->tgt_mutex);
14758 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14759 	ptgt->tgt_tmp_cnt = 1;
14760 	ptgt->tgt_device_created = 0;
14761 	/*
14762 	 * If fabric and auto config is set but the target was
14763 	 * manually unconfigured then reset to the manual_config_only to
14764 	 * 0 so the device will get configured.
14765 	 */
14766 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14767 	    fcp_enable_auto_configuration &&
14768 	    ptgt->tgt_manual_config_only == 1) {
14769 		old_manual = 1;
14770 		ptgt->tgt_manual_config_only = 0;
14771 	}
14772 	mutex_exit(&ptgt->tgt_mutex);
14773 
14774 	fcp_update_targets(pptr, devlist, 1,
14775 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14776 
14777 	lcount = pptr->port_link_cnt;
14778 	tcount = ptgt->tgt_change_cnt;
14779 
14780 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14781 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14782 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14783 		    fcp_enable_auto_configuration && old_manual) {
14784 			mutex_enter(&ptgt->tgt_mutex);
14785 			ptgt->tgt_manual_config_only = 1;
14786 			mutex_exit(&ptgt->tgt_mutex);
14787 		}
14788 
14789 		if (pptr->port_link_cnt != lcount ||
14790 		    ptgt->tgt_change_cnt != tcount) {
14791 			rval = EBUSY;
14792 		}
14793 		mutex_exit(&pptr->port_mutex);
14794 
14795 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14796 		    FCP_BUF_LEVEL_3, 0,
14797 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14798 		    "lcount=%x::port_link_cnt=%x, "
14799 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14800 		    ptgt, lcount, pptr->port_link_cnt,
14801 		    tcount, ptgt->tgt_change_cnt, rval);
14802 		return (rval);
14803 	}
14804 
14805 	/*
14806 	 * Due to lack of synchronization mechanisms, we perform
14807 	 * periodic monitoring of our request; Because requests
14808 	 * get dropped when another one supercedes (either because
14809 	 * of a link change or a target change), it is difficult to
14810 	 * provide a clean synchronization mechanism (such as a
14811 	 * semaphore or a conditional variable) without exhaustively
14812 	 * rewriting the mainline discovery code of this driver.
14813 	 */
14814 	wait_ms = 500;
14815 
14816 	ntries = fcp_max_target_retries;
14817 
14818 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14819 	    FCP_BUF_LEVEL_3, 0,
14820 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14821 	    "lcount=%x::port_link_cnt=%x, "
14822 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14823 	    "tgt_tmp_cnt =%x",
14824 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14825 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14826 	    ptgt->tgt_tmp_cnt);
14827 
14828 	mutex_enter(&ptgt->tgt_mutex);
14829 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14830 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14831 		mutex_exit(&ptgt->tgt_mutex);
14832 		mutex_exit(&pptr->port_mutex);
14833 
14834 		delay(drv_usectohz(wait_ms * 1000));
14835 
14836 		mutex_enter(&pptr->port_mutex);
14837 		mutex_enter(&ptgt->tgt_mutex);
14838 	}
14839 
14840 
14841 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14842 		rval = EBUSY;
14843 	} else {
14844 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14845 		    FCP_TGT_NODE_PRESENT) {
14846 			rval = 0;
14847 		}
14848 	}
14849 
14850 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14851 	    FCP_BUF_LEVEL_3, 0,
14852 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14853 	    "lcount=%x::port_link_cnt=%x, "
14854 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14855 	    "tgt_tmp_cnt =%x",
14856 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14857 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14858 	    ptgt->tgt_tmp_cnt);
14859 
14860 	if (rval) {
14861 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14862 		    fcp_enable_auto_configuration && old_manual) {
14863 			ptgt->tgt_manual_config_only = 1;
14864 		}
14865 		mutex_exit(&ptgt->tgt_mutex);
14866 		mutex_exit(&pptr->port_mutex);
14867 		kmem_free(devlist, sizeof (*devlist));
14868 
14869 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14870 		    FCP_BUF_LEVEL_3, 0,
14871 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14872 		    "lcount=%x::port_link_cnt=%x, "
14873 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14874 		    "tgt_device_created=%x, tgt D_ID=%x",
14875 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14876 		    tcount, ptgt->tgt_change_cnt, rval,
14877 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14878 		return (rval);
14879 	}
14880 
14881 	if ((plun = ptgt->tgt_lun) != NULL) {
14882 		tcount = plun->lun_tgt->tgt_change_cnt;
14883 	} else {
14884 		rval = EINVAL;
14885 	}
14886 	lcount = pptr->port_link_cnt;
14887 
14888 	/*
14889 	 * Configuring the target with no LUNs will fail. We
14890 	 * should reset the node state so that it is not
14891 	 * automatically configured when the LUNs are added
14892 	 * to this target.
14893 	 */
14894 	if (ptgt->tgt_lun_cnt == 0) {
14895 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14896 	}
14897 	mutex_exit(&ptgt->tgt_mutex);
14898 	mutex_exit(&pptr->port_mutex);
14899 
14900 	while (plun) {
14901 		child_info_t	*cip;
14902 
14903 		mutex_enter(&plun->lun_mutex);
14904 		cip = plun->lun_cip;
14905 		mutex_exit(&plun->lun_mutex);
14906 
14907 		mutex_enter(&ptgt->tgt_mutex);
14908 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14909 			mutex_exit(&ptgt->tgt_mutex);
14910 
14911 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14912 			    FCP_ONLINE, lcount, tcount,
14913 			    NDI_ONLINE_ATTACH);
14914 			if (rval != NDI_SUCCESS) {
14915 				FCP_TRACE(fcp_logq,
14916 				    pptr->port_instbuf, fcp_trace,
14917 				    FCP_BUF_LEVEL_3, 0,
14918 				    "fcp_create_on_demand: "
14919 				    "pass_to_hp_and_wait failed "
14920 				    "rval=%x", rval);
14921 				rval = EIO;
14922 			} else {
14923 				mutex_enter(&LUN_TGT->tgt_mutex);
14924 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14925 				    FCP_LUN_BUSY);
14926 				mutex_exit(&LUN_TGT->tgt_mutex);
14927 			}
14928 			mutex_enter(&ptgt->tgt_mutex);
14929 		}
14930 
14931 		plun = plun->lun_next;
14932 		mutex_exit(&ptgt->tgt_mutex);
14933 	}
14934 
14935 	kmem_free(devlist, sizeof (*devlist));
14936 
14937 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14938 	    fcp_enable_auto_configuration && old_manual) {
14939 		mutex_enter(&ptgt->tgt_mutex);
14940 		/* if successful then set manual to 0 */
14941 		if (rval == 0) {
14942 			ptgt->tgt_manual_config_only = 0;
14943 		} else {
14944 			/* reset to 1 so the user has to do the config */
14945 			ptgt->tgt_manual_config_only = 1;
14946 		}
14947 		mutex_exit(&ptgt->tgt_mutex);
14948 	}
14949 
14950 	return (rval);
14951 }
14952 
14953 
14954 static void
14955 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
14956 {
14957 	int		count;
14958 	uchar_t		byte;
14959 
14960 	count = 0;
14961 	while (*string) {
14962 		byte = FCP_ATOB(*string); string++;
14963 		byte = byte << 4 | FCP_ATOB(*string); string++;
14964 		bytes[count++] = byte;
14965 
14966 		if (count >= byte_len) {
14967 			break;
14968 		}
14969 	}
14970 }
14971 
14972 static void
14973 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
14974 {
14975 	int		i;
14976 
14977 	for (i = 0; i < FC_WWN_SIZE; i++) {
14978 		(void) sprintf(string + (i * 2),
14979 		    "%02x", wwn[i]);
14980 	}
14981 
14982 }
14983 
14984 static void
14985 fcp_print_error(fc_packet_t *fpkt)
14986 {
14987 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
14988 	    fpkt->pkt_ulp_private;
14989 	struct fcp_port	*pptr;
14990 	struct fcp_tgt	*ptgt;
14991 	struct fcp_lun	*plun;
14992 	caddr_t			buf;
14993 	int			scsi_cmd = 0;
14994 
14995 	ptgt = icmd->ipkt_tgt;
14996 	plun = icmd->ipkt_lun;
14997 	pptr = ptgt->tgt_port;
14998 
14999 	buf = kmem_zalloc(256, KM_NOSLEEP);
15000 	if (buf == NULL) {
15001 		return;
15002 	}
15003 
15004 	switch (icmd->ipkt_opcode) {
15005 	case SCMD_REPORT_LUN:
15006 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15007 		    " lun=0x%%x failed");
15008 		scsi_cmd++;
15009 		break;
15010 
15011 	case SCMD_INQUIRY_PAGE83:
15012 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15013 		    " lun=0x%%x failed");
15014 		scsi_cmd++;
15015 		break;
15016 
15017 	case SCMD_INQUIRY:
15018 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15019 		    " lun=0x%%x failed");
15020 		scsi_cmd++;
15021 		break;
15022 
15023 	case LA_ELS_PLOGI:
15024 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15025 		break;
15026 
15027 	case LA_ELS_PRLI:
15028 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15029 		break;
15030 	}
15031 
15032 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15033 		struct fcp_rsp		response, *rsp;
15034 		uchar_t			asc, ascq;
15035 		caddr_t			sense_key = NULL;
15036 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15037 
15038 		if (icmd->ipkt_nodma) {
15039 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15040 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15041 			    sizeof (struct fcp_rsp));
15042 		} else {
15043 			rsp = &response;
15044 			bep = &fcp_rsp_err;
15045 
15046 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15047 			    sizeof (struct fcp_rsp));
15048 
15049 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15050 			    bep, fpkt->pkt_resp_acc,
15051 			    sizeof (struct fcp_rsp_info));
15052 		}
15053 
15054 
15055 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15056 			(void) sprintf(buf + strlen(buf),
15057 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15058 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15059 			    " senselen=%%x. Giving up");
15060 
15061 			fcp_log(CE_WARN, pptr->port_dip, buf,
15062 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15063 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15064 			    rsp->fcp_u.fcp_status.reserved_1,
15065 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15066 
15067 			kmem_free(buf, 256);
15068 			return;
15069 		}
15070 
15071 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15072 		    bep->rsp_code != FCP_NO_FAILURE) {
15073 			(void) sprintf(buf + strlen(buf),
15074 			    " FCP Response code = 0x%x", bep->rsp_code);
15075 		}
15076 
15077 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15078 			struct scsi_extended_sense sense_info, *sense_ptr;
15079 
15080 			if (icmd->ipkt_nodma) {
15081 				sense_ptr = (struct scsi_extended_sense *)
15082 				    ((caddr_t)fpkt->pkt_resp +
15083 				    sizeof (struct fcp_rsp) +
15084 				    rsp->fcp_response_len);
15085 			} else {
15086 				sense_ptr = &sense_info;
15087 
15088 				FCP_CP_IN(fpkt->pkt_resp +
15089 				    sizeof (struct fcp_rsp) +
15090 				    rsp->fcp_response_len, &sense_info,
15091 				    fpkt->pkt_resp_acc,
15092 				    sizeof (struct scsi_extended_sense));
15093 			}
15094 
15095 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15096 			    NUM_IMPL_SENSE_KEYS) {
15097 				sense_key = sense_keys[sense_ptr->es_key];
15098 			} else {
15099 				sense_key = "Undefined";
15100 			}
15101 
15102 			asc = sense_ptr->es_add_code;
15103 			ascq = sense_ptr->es_qual_code;
15104 
15105 			(void) sprintf(buf + strlen(buf),
15106 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15107 			    " Giving up");
15108 
15109 			fcp_log(CE_WARN, pptr->port_dip, buf,
15110 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15111 			    asc, ascq);
15112 		} else {
15113 			(void) sprintf(buf + strlen(buf),
15114 			    " : SCSI status=%%x. Giving up");
15115 
15116 			fcp_log(CE_WARN, pptr->port_dip, buf,
15117 			    ptgt->tgt_d_id, plun->lun_num,
15118 			    rsp->fcp_u.fcp_status.scsi_status);
15119 		}
15120 	} else {
15121 		caddr_t state, reason, action, expln;
15122 
15123 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15124 		    &action, &expln);
15125 
15126 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15127 		    " Reason:%%s. Giving up");
15128 
15129 		if (scsi_cmd) {
15130 			fcp_log(CE_WARN, pptr->port_dip, buf,
15131 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15132 		} else {
15133 			fcp_log(CE_WARN, pptr->port_dip, buf,
15134 			    ptgt->tgt_d_id, state, reason);
15135 		}
15136 	}
15137 
15138 	kmem_free(buf, 256);
15139 }
15140 
15141 
15142 static int
15143 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15144     struct fcp_ipkt *icmd, int rval, caddr_t op)
15145 {
15146 	int	ret = DDI_FAILURE;
15147 	char	*error;
15148 
15149 	switch (rval) {
15150 	case FC_DEVICE_BUSY_NEW_RSCN:
15151 		/*
15152 		 * This means that there was a new RSCN that the transport
15153 		 * knows about (which the ULP *may* know about too) but the
15154 		 * pkt that was sent down was related to an older RSCN. So, we
15155 		 * are just going to reset the retry count and deadline and
15156 		 * continue to retry. The idea is that transport is currently
15157 		 * working on the new RSCN and will soon let the ULPs know
15158 		 * about it and when it does the existing logic will kick in
15159 		 * where it will change the tcount to indicate that something
15160 		 * changed on the target. So, rediscovery will start and there
15161 		 * will not be an infinite retry.
15162 		 *
15163 		 * For a full flow of how the RSCN info is transferred back and
15164 		 * forth, see fp.c
15165 		 */
15166 		icmd->ipkt_retries = 0;
15167 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15168 		    FCP_ICMD_DEADLINE;
15169 
15170 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15171 		    FCP_BUF_LEVEL_3, 0,
15172 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15173 		    rval, ptgt->tgt_d_id);
15174 		/* FALLTHROUGH */
15175 
15176 	case FC_STATEC_BUSY:
15177 	case FC_DEVICE_BUSY:
15178 	case FC_PBUSY:
15179 	case FC_FBUSY:
15180 	case FC_TRAN_BUSY:
15181 	case FC_OFFLINE:
15182 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15183 		    FCP_BUF_LEVEL_3, 0,
15184 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15185 		    rval, ptgt->tgt_d_id);
15186 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15187 		    fcp_is_retryable(icmd)) {
15188 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15189 			ret = DDI_SUCCESS;
15190 		}
15191 		break;
15192 
15193 	case FC_LOGINREQ:
15194 		/*
15195 		 * FC_LOGINREQ used to be handled just like all the cases
15196 		 * above. It has been changed to handled a PRLI that fails
15197 		 * with FC_LOGINREQ different than other ipkts that fail
15198 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15199 		 * a simple matter to turn it into a PLOGI instead, so that's
15200 		 * exactly what we do here.
15201 		 */
15202 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15203 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15204 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15205 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15206 		} else {
15207 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15208 			    FCP_BUF_LEVEL_3, 0,
15209 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15210 			    rval, ptgt->tgt_d_id);
15211 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15212 			    fcp_is_retryable(icmd)) {
15213 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15214 				ret = DDI_SUCCESS;
15215 			}
15216 		}
15217 		break;
15218 
15219 	default:
15220 		mutex_enter(&pptr->port_mutex);
15221 		mutex_enter(&ptgt->tgt_mutex);
15222 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15223 			mutex_exit(&ptgt->tgt_mutex);
15224 			mutex_exit(&pptr->port_mutex);
15225 
15226 			(void) fc_ulp_error(rval, &error);
15227 			fcp_log(CE_WARN, pptr->port_dip,
15228 			    "!Failed to send %s to D_ID=%x error=%s",
15229 			    op, ptgt->tgt_d_id, error);
15230 		} else {
15231 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15232 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15233 			    "fcp_handle_ipkt_errors,1: state change occured"
15234 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15235 			mutex_exit(&ptgt->tgt_mutex);
15236 			mutex_exit(&pptr->port_mutex);
15237 		}
15238 		break;
15239 	}
15240 
15241 	return (ret);
15242 }
15243 
15244 
15245 /*
15246  * Check of outstanding commands on any LUN for this target
15247  */
15248 static int
15249 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15250 {
15251 	struct	fcp_lun	*plun;
15252 	struct	fcp_pkt	*cmd;
15253 
15254 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15255 		mutex_enter(&plun->lun_mutex);
15256 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15257 		    cmd = cmd->cmd_forw) {
15258 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15259 				mutex_exit(&plun->lun_mutex);
15260 				return (FC_SUCCESS);
15261 			}
15262 		}
15263 		mutex_exit(&plun->lun_mutex);
15264 	}
15265 
15266 	return (FC_FAILURE);
15267 }
15268 
15269 static fc_portmap_t *
15270 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15271 {
15272 	int			i;
15273 	fc_portmap_t		*devlist;
15274 	fc_portmap_t		*devptr = NULL;
15275 	struct fcp_tgt	*ptgt;
15276 
15277 	mutex_enter(&pptr->port_mutex);
15278 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15279 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15280 		    ptgt = ptgt->tgt_next) {
15281 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15282 				++*dev_cnt;
15283 			}
15284 		}
15285 	}
15286 
15287 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15288 	    KM_NOSLEEP);
15289 	if (devlist == NULL) {
15290 		mutex_exit(&pptr->port_mutex);
15291 		fcp_log(CE_WARN, pptr->port_dip,
15292 		    "!fcp%d: failed to allocate for portmap for construct map",
15293 		    pptr->port_instance);
15294 		return (devptr);
15295 	}
15296 
15297 	for (i = 0; i < FCP_NUM_HASH; i++) {
15298 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15299 		    ptgt = ptgt->tgt_next) {
15300 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15301 				int ret;
15302 
15303 				ret = fc_ulp_pwwn_to_portmap(
15304 				    pptr->port_fp_handle,
15305 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15306 				    devlist);
15307 
15308 				if (ret == FC_SUCCESS) {
15309 					devlist++;
15310 					continue;
15311 				}
15312 
15313 				devlist->map_pd = NULL;
15314 				devlist->map_did.port_id = ptgt->tgt_d_id;
15315 				devlist->map_hard_addr.hard_addr =
15316 				    ptgt->tgt_hard_addr;
15317 
15318 				devlist->map_state = PORT_DEVICE_INVALID;
15319 				devlist->map_type = PORT_DEVICE_OLD;
15320 
15321 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15322 				    &devlist->map_nwwn, FC_WWN_SIZE);
15323 
15324 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15325 				    &devlist->map_pwwn, FC_WWN_SIZE);
15326 
15327 				devlist++;
15328 			}
15329 		}
15330 	}
15331 
15332 	mutex_exit(&pptr->port_mutex);
15333 
15334 	return (devptr);
15335 }
15336 /*
15337  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15338  */
15339 static void
15340 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15341 {
15342 	int i;
15343 	struct fcp_tgt	*ptgt;
15344 	struct fcp_lun	*plun;
15345 
15346 	for (i = 0; i < FCP_NUM_HASH; i++) {
15347 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15348 		    ptgt = ptgt->tgt_next) {
15349 			mutex_enter(&ptgt->tgt_mutex);
15350 			for (plun = ptgt->tgt_lun; plun != NULL;
15351 			    plun = plun->lun_next) {
15352 				if (plun->lun_mpxio &&
15353 				    plun->lun_state & FCP_LUN_BUSY) {
15354 					if (!fcp_pass_to_hp(pptr, plun,
15355 					    plun->lun_cip,
15356 					    FCP_MPXIO_PATH_SET_BUSY,
15357 					    pptr->port_link_cnt,
15358 					    ptgt->tgt_change_cnt, 0, 0)) {
15359 						FCP_TRACE(fcp_logq,
15360 						    pptr->port_instbuf,
15361 						    fcp_trace,
15362 						    FCP_BUF_LEVEL_2, 0,
15363 						    "path_verifybusy: "
15364 						    "disable lun %p failed!",
15365 						    plun);
15366 					}
15367 				}
15368 			}
15369 			mutex_exit(&ptgt->tgt_mutex);
15370 		}
15371 	}
15372 }
15373 
15374 static int
15375 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15376 {
15377 	dev_info_t		*cdip = NULL;
15378 	dev_info_t		*pdip = NULL;
15379 
15380 	ASSERT(plun);
15381 
15382 	mutex_enter(&plun->lun_mutex);
15383 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15384 		mutex_exit(&plun->lun_mutex);
15385 		return (NDI_FAILURE);
15386 	}
15387 	mutex_exit(&plun->lun_mutex);
15388 	cdip = mdi_pi_get_client(PIP(cip));
15389 	pdip = mdi_pi_get_phci(PIP(cip));
15390 
15391 	ASSERT(cdip != NULL);
15392 	ASSERT(pdip != NULL);
15393 
15394 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15395 		/* LUN ready for IO */
15396 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15397 	} else {
15398 		/* LUN busy to accept IO */
15399 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15400 	}
15401 	return (NDI_SUCCESS);
15402 }
15403 
15404 /*
15405  * Caller must free the returned string of MAXPATHLEN len
15406  * If the device is offline (-1 instance number) NULL
15407  * will be returned.
15408  */
15409 static char *
15410 fcp_get_lun_path(struct fcp_lun *plun) {
15411 	dev_info_t	*dip = NULL;
15412 	char	*path = NULL;
15413 	if (plun == NULL) {
15414 		return (NULL);
15415 	}
15416 	if (plun->lun_mpxio == 0) {
15417 		dip = DIP(plun->lun_cip);
15418 	} else {
15419 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15420 	}
15421 	if (dip == NULL) {
15422 		return (NULL);
15423 	}
15424 	if (ddi_get_instance(dip) < 0) {
15425 		return (NULL);
15426 	}
15427 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15428 	if (path == NULL) {
15429 		return (NULL);
15430 	}
15431 
15432 	(void) ddi_pathname(dip, path);
15433 	/*
15434 	 * In reality, the user wants a fully valid path (one they can open)
15435 	 * but this string is lacking the mount point, and the minor node.
15436 	 * It would be nice if we could "figure these out" somehow
15437 	 * and fill them in.  Otherwise, the userland code has to understand
15438 	 * driver specific details of which minor node is the "best" or
15439 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15440 	 * which tape doesn't rewind)
15441 	 */
15442 	return (path);
15443 }
15444 
15445 static int
15446 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15447     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15448 {
15449 	int64_t reset_delay;
15450 	int rval, retry = 0;
15451 	struct fcp_port *pptr = fcp_dip2port(parent);
15452 
15453 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15454 	    (lbolt64 - pptr->port_attach_time);
15455 	if (reset_delay < 0) {
15456 		reset_delay = 0;
15457 	}
15458 
15459 	if (fcp_bus_config_debug) {
15460 		flag |= NDI_DEVI_DEBUG;
15461 	}
15462 
15463 	switch (op) {
15464 	case BUS_CONFIG_ONE:
15465 		/*
15466 		 * Retry the command since we need to ensure
15467 		 * the fabric devices are available for root
15468 		 */
15469 		while (retry++ < fcp_max_bus_config_retries) {
15470 			rval =	(ndi_busop_bus_config(parent,
15471 			    flag | NDI_MDI_FALLBACK, op,
15472 			    arg, childp, (clock_t)reset_delay));
15473 			if (rval == 0) {
15474 				return (rval);
15475 			}
15476 		}
15477 
15478 		/*
15479 		 * drain taskq to make sure nodes are created and then
15480 		 * try again.
15481 		 */
15482 		taskq_wait(DEVI(parent)->devi_taskq);
15483 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15484 		    op, arg, childp, 0));
15485 
15486 	case BUS_CONFIG_DRIVER:
15487 	case BUS_CONFIG_ALL: {
15488 		/*
15489 		 * delay till all devices report in (port_tmp_cnt == 0)
15490 		 * or FCP_INIT_WAIT_TIMEOUT
15491 		 */
15492 		mutex_enter(&pptr->port_mutex);
15493 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15494 			(void) cv_timedwait(&pptr->port_config_cv,
15495 			    &pptr->port_mutex,
15496 			    ddi_get_lbolt() + (clock_t)reset_delay);
15497 			reset_delay =
15498 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15499 			    (lbolt64 - pptr->port_attach_time);
15500 		}
15501 		mutex_exit(&pptr->port_mutex);
15502 		/* drain taskq to make sure nodes are created */
15503 		taskq_wait(DEVI(parent)->devi_taskq);
15504 		return (ndi_busop_bus_config(parent, flag, op,
15505 		    arg, childp, 0));
15506 	}
15507 
15508 	default:
15509 		return (NDI_FAILURE);
15510 	}
15511 	/*NOTREACHED*/
15512 }
15513 
15514 static int
15515 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15516     ddi_bus_config_op_t op, void *arg)
15517 {
15518 	if (fcp_bus_config_debug) {
15519 		flag |= NDI_DEVI_DEBUG;
15520 	}
15521 
15522 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15523 }
15524 
15525 
15526 /*
15527  * Routine to copy GUID into the lun structure.
15528  * returns 0 if copy was successful and 1 if encountered a
15529  * failure and did not copy the guid.
15530  */
15531 static int
15532 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15533 {
15534 
15535 	int retval = 0;
15536 
15537 	/* add one for the null terminator */
15538 	const unsigned int len = strlen(guidp) + 1;
15539 
15540 	if ((guidp == NULL) || (plun == NULL)) {
15541 		return (1);
15542 	}
15543 
15544 	/*
15545 	 * if the plun->lun_guid already has been allocated,
15546 	 * then check the size. if the size is exact, reuse
15547 	 * it....if not free it an allocate the required size.
15548 	 * The reallocation should NOT typically happen
15549 	 * unless the GUIDs reported changes between passes.
15550 	 * We free up and alloc again even if the
15551 	 * size was more than required. This is due to the
15552 	 * fact that the field lun_guid_size - serves
15553 	 * dual role of indicating the size of the wwn
15554 	 * size and ALSO the allocation size.
15555 	 */
15556 	if (plun->lun_guid) {
15557 		if (plun->lun_guid_size != len) {
15558 			/*
15559 			 * free the allocated memory and
15560 			 * initialize the field
15561 			 * lun_guid_size to 0.
15562 			 */
15563 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15564 			plun->lun_guid = NULL;
15565 			plun->lun_guid_size = 0;
15566 		}
15567 	}
15568 	/*
15569 	 * alloc only if not already done.
15570 	 */
15571 	if (plun->lun_guid == NULL) {
15572 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15573 		if (plun->lun_guid == NULL) {
15574 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15575 			    "Unable to allocate"
15576 			    "Memory for GUID!!! size %d", len);
15577 			retval = 1;
15578 		} else {
15579 			plun->lun_guid_size = len;
15580 		}
15581 	}
15582 	if (plun->lun_guid) {
15583 		/*
15584 		 * now copy the GUID
15585 		 */
15586 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15587 	}
15588 	return (retval);
15589 }
15590 
15591 /*
15592  * fcp_reconfig_wait
15593  *
15594  * Wait for a rediscovery/reconfiguration to complete before continuing.
15595  */
15596 
15597 static void
15598 fcp_reconfig_wait(struct fcp_port *pptr)
15599 {
15600 	clock_t		reconfig_start, wait_timeout;
15601 
15602 	/*
15603 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15604 	 * reconfiguration in progress.
15605 	 */
15606 
15607 	mutex_enter(&pptr->port_mutex);
15608 	if (pptr->port_tmp_cnt == 0) {
15609 		mutex_exit(&pptr->port_mutex);
15610 		return;
15611 	}
15612 	mutex_exit(&pptr->port_mutex);
15613 
15614 	/*
15615 	 * If we cause a reconfig by raising power, delay until all devices
15616 	 * report in (port_tmp_cnt returns to 0)
15617 	 */
15618 
15619 	reconfig_start = ddi_get_lbolt();
15620 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15621 
15622 	mutex_enter(&pptr->port_mutex);
15623 
15624 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15625 	    pptr->port_tmp_cnt) {
15626 
15627 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15628 		    reconfig_start + wait_timeout);
15629 	}
15630 
15631 	mutex_exit(&pptr->port_mutex);
15632 
15633 	/*
15634 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15635 	 * we want may still be ok.  If not, it will error out later
15636 	 */
15637 }
15638 
15639 /*
15640  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15641  * We rely on the fcp_global_mutex to provide protection against changes to
15642  * the fcp_lun_blacklist.
15643  *
15644  * You can describe a list of target port WWNs and LUN numbers which will
15645  * not be configured. LUN numbers will be interpreted as decimal. White
15646  * spaces and ',' can be used in the list of LUN numbers.
15647  *
15648  * To prevent LUNs 1 and 2 from being configured for target
15649  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15650  *
15651  * pwwn-lun-blacklist=
15652  * "510000f010fd92a1,1,2",
15653  * "510000e012079df1,1,2";
15654  */
15655 static void
15656 fcp_read_blacklist(dev_info_t *dip,
15657     struct fcp_black_list_entry **pplun_blacklist) {
15658 	char **prop_array	= NULL;
15659 	char *curr_pwwn		= NULL;
15660 	char *curr_lun		= NULL;
15661 	uint32_t prop_item	= 0;
15662 	int idx			= 0;
15663 	int len			= 0;
15664 
15665 	ASSERT(mutex_owned(&fcp_global_mutex));
15666 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15667 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15668 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15669 		return;
15670 	}
15671 
15672 	for (idx = 0; idx < prop_item; idx++) {
15673 
15674 		curr_pwwn = prop_array[idx];
15675 		while (*curr_pwwn == ' ') {
15676 			curr_pwwn++;
15677 		}
15678 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15679 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15680 			    ", please check.", curr_pwwn);
15681 			continue;
15682 		}
15683 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15684 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15685 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15686 			    ", please check.", curr_pwwn);
15687 			continue;
15688 		}
15689 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15690 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15691 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15692 				    "blacklist, please check.", curr_pwwn);
15693 				break;
15694 			}
15695 		}
15696 		if (len != sizeof (la_wwn_t) * 2) {
15697 			continue;
15698 		}
15699 
15700 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15701 		*(curr_lun - 1) = '\0';
15702 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15703 	}
15704 
15705 	ddi_prop_free(prop_array);
15706 }
15707 
15708 /*
15709  * Get the masking info about one remote target port designated by wwn.
15710  * Lun ids could be separated by ',' or white spaces.
15711  */
15712 static void
15713 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15714     struct fcp_black_list_entry **pplun_blacklist) {
15715 	int		idx			= 0;
15716 	uint32_t	offset			= 0;
15717 	unsigned long	lun_id			= 0;
15718 	char		lunid_buf[16];
15719 	char		*pend			= NULL;
15720 	int		illegal_digit		= 0;
15721 
15722 	while (offset < strlen(curr_lun)) {
15723 		while ((curr_lun[offset + idx] != ',') &&
15724 		    (curr_lun[offset + idx] != '\0') &&
15725 		    (curr_lun[offset + idx] != ' ')) {
15726 			if (isdigit(curr_lun[offset + idx]) == 0) {
15727 				illegal_digit++;
15728 			}
15729 			idx++;
15730 		}
15731 		if (illegal_digit > 0) {
15732 			offset += (idx+1);	/* To the start of next lun */
15733 			idx = 0;
15734 			illegal_digit = 0;
15735 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15736 			    "the blacklist, please check digits.",
15737 			    curr_lun, curr_pwwn);
15738 			continue;
15739 		}
15740 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15741 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15742 			    "the blacklist, please check the length of LUN#.",
15743 			    curr_lun, curr_pwwn);
15744 			break;
15745 		}
15746 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15747 		    offset++;
15748 		    continue;
15749 		}
15750 
15751 		bcopy(curr_lun + offset, lunid_buf, idx);
15752 		lunid_buf[idx] = '\0';
15753 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15754 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15755 		} else {
15756 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15757 			    "the blacklist, please check %s.",
15758 			    curr_lun, curr_pwwn, lunid_buf);
15759 		}
15760 		offset += (idx+1);	/* To the start of next lun */
15761 		idx = 0;
15762 	}
15763 }
15764 
15765 /*
15766  * Add one masking record
15767  */
15768 static void
15769 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15770     struct fcp_black_list_entry **pplun_blacklist) {
15771 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15772 	struct fcp_black_list_entry	*new_entry	= NULL;
15773 	la_wwn_t			wwn;
15774 
15775 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15776 	while (tmp_entry) {
15777 		if ((bcmp(&tmp_entry->wwn, &wwn,
15778 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15779 			return;
15780 		}
15781 
15782 		tmp_entry = tmp_entry->next;
15783 	}
15784 
15785 	/* add to black list */
15786 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15787 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15788 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15789 	new_entry->lun = lun_id;
15790 	new_entry->masked = 0;
15791 	new_entry->next = *pplun_blacklist;
15792 	*pplun_blacklist = new_entry;
15793 }
15794 
15795 /*
15796  * Check if we should mask the specified lun of this fcp_tgt
15797  */
15798 static int
15799 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15800 	struct fcp_black_list_entry *remote_port;
15801 
15802 	remote_port = fcp_lun_blacklist;
15803 	while (remote_port != NULL) {
15804 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15805 			if (remote_port->lun == lun_id) {
15806 				remote_port->masked++;
15807 				if (remote_port->masked == 1) {
15808 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15809 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
15810 					    "is masked due to black listing.\n",
15811 					    lun_id, wwn->raw_wwn[0],
15812 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
15813 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
15814 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
15815 					    wwn->raw_wwn[7]);
15816 				}
15817 				return (TRUE);
15818 			}
15819 		}
15820 		remote_port = remote_port->next;
15821 	}
15822 	return (FALSE);
15823 }
15824 
15825 /*
15826  * Release all allocated resources
15827  */
15828 static void
15829 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15830 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15831 	struct fcp_black_list_entry	*current_entry	= NULL;
15832 
15833 	ASSERT(mutex_owned(&fcp_global_mutex));
15834 	/*
15835 	 * Traverse all luns
15836 	 */
15837 	while (tmp_entry) {
15838 		current_entry = tmp_entry;
15839 		tmp_entry = tmp_entry->next;
15840 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15841 	}
15842 	*pplun_blacklist = NULL;
15843 }
15844